commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
a42ae6f2c761809813b9851bc1e449e3dac685ba | Remove mongo _id from resource. | superdesk/items.py | superdesk/items.py |
from datetime import datetime
from flask import request, url_for
from . import mongo
from . import rest
from .auth import auth_required
from .utils import get_random_string
from .io.reuters_token import ReutersTokenProvider
tokenProvider = ReutersTokenProvider()
class ItemConflictException(Exception):
pass
def format_item(item):
item.pop('_id', None)
item.setdefault('self_url', url_for('item', guid=item.get('guid')))
for content in item.get('contents', []):
if content.get('href'):
content['href'] = '%s?auth_token=%s' % (content.get('href'), tokenProvider.get_token())
return item
def save_item(data):
now = datetime.utcnow()
data.setdefault('guid', generate_guid())
data.setdefault('firstCreated', now)
data.setdefault('versionCreated', now)
item = mongo.db.items.find_one({'guid': data.get('guid')})
if item and item.get('versionCreated').time() >= data.get('versionCreated').time():
raise ItemConflictException()
elif item:
data['_id'] = item.get('_id')
mongo.db.items.save(data)
return data
def update_item(data, guid):
data['versionCreated'] = datetime.utcnow()
item = mongo.db.items.find_one({'guid': guid})
item.update(data)
mongo.db.items.save(item)
return item
def generate_guid():
guid = get_random_string()
while mongo.db.items.find_one({'guid': guid}):
guid = get_random_string()
return guid
def get_last_updated():
item = mongo.db.items.find_one(fields=['versionCreated'], sort=[('versionCreated', -1)])
if item:
return item.get('versionCreated')
class ItemListResource(rest.Resource):
def get_query(self):
query = {}
query.setdefault('itemClass', 'icls:composite')
if request.args.get('q'):
query['headline'] = {'$regex': request.args.get('q'), '$options': 'i'}
if request.args.get('itemClass'):
query['itemClass'] = {'$in': request.args.get('itemClass').split(",")}
return query
@auth_required
def get(self):
skip = int(request.args.get('skip', 0))
limit = int(request.args.get('limit', 25))
query = self.get_query()
raw_items = mongo.db.items.find(query).sort('firstCreated', -1).skip(skip).limit(limit + 1)
items = [format_item(item) for item in raw_items]
return {'items': items[:limit], 'has_next': len(items) > limit, 'has_prev': skip > 0}
@auth_required
def post(self):
item = save_item(request.get_json())
return item, 201
class ItemResource(rest.Resource):
def _get_item(self, guid):
return mongo.db.items.find_one_or_404({'guid': guid})
@auth_required
def get(self, guid):
item = self._get_item(guid)
return format_item(item)
@auth_required
def put(self, guid):
data = request.get_json()
item = update_item(data, guid)
return format_item(item)
|
from datetime import datetime
from flask import request, url_for
from . import mongo
from . import rest
from .auth import auth_required
from .utils import get_random_string
from .io.reuters_token import ReutersTokenProvider
tokenProvider = ReutersTokenProvider()
class ItemConflictException(Exception):
pass
def format_item(item):
item.setdefault('self_url', url_for('item', guid=item.get('guid')))
for content in item.get('contents', []):
if content.get('href'):
content['href'] = '%s?auth_token=%s' % (content.get('href'), tokenProvider.get_token())
return item
def save_item(data):
now = datetime.utcnow()
data.setdefault('guid', generate_guid())
data.setdefault('firstCreated', now)
data.setdefault('versionCreated', now)
item = mongo.db.items.find_one({'guid': data.get('guid')})
if item and item.get('versionCreated').time() >= data.get('versionCreated').time():
raise ItemConflictException()
elif item:
data['_id'] = item.get('_id')
mongo.db.items.save(data)
return data
def update_item(data, guid):
data.pop('_id', None)
data['versionCreated'] = datetime.utcnow()
item = mongo.db.items.find_one({'guid': guid})
item.update(data)
mongo.db.items.save(item)
return item
def generate_guid():
guid = get_random_string()
while mongo.db.items.find_one({'guid': guid}):
guid = get_random_string()
return guid
def get_last_updated():
item = mongo.db.items.find_one(fields=['versionCreated'], sort=[('versionCreated', -1)])
if item:
return item.get('versionCreated')
class ItemListResource(rest.Resource):
def get_query(self):
query = {}
query.setdefault('itemClass', 'icls:composite')
if request.args.get('q'):
query['headline'] = {'$regex': request.args.get('q'), '$options': 'i'}
if request.args.get('itemClass'):
query['itemClass'] = {'$in': request.args.get('itemClass').split(",")}
return query
@auth_required
def get(self):
skip = int(request.args.get('skip', 0))
limit = int(request.args.get('limit', 25))
query = self.get_query()
raw_items = mongo.db.items.find(query).sort('versionCreated', -1).skip(skip).limit(limit + 1)
items = [format_item(item) for item in raw_items]
return {'items': items[:limit], 'has_next': len(items) > limit, 'has_prev': skip > 0}
@auth_required
def post(self):
item = save_item(request.get_json())
return item, 201
class ItemResource(rest.Resource):
def _get_item(self, guid):
return mongo.db.items.find_one_or_404({'guid': guid})
@auth_required
def get(self, guid):
item = self._get_item(guid)
return format_item(item)
@auth_required
def put(self, guid):
data = request.get_json()
item = update_item(data, guid)
return format_item(item)
| Python | 0 |
b3761729b156367229b5cd8895d225cb13d3267a | Fix example `Set-Based Column Map Expectation` template import (#6134) | examples/expectations/set_based_column_map_expectation_template.py | examples/expectations/set_based_column_map_expectation_template.py | """
This is a template for creating custom SetBasedColumnMapExpectations.
For detailed instructions on how to use it, please see:
https://docs.greatexpectations.io/docs/guides/expectations/creating_custom_expectations/how_to_create_custom_set_based_column_map_expectations
"""
from great_expectations.expectations.set_based_column_map_expectation import (
SetBasedColumnMapExpectation,
)
# <snippet>
# This class defines the Expectation itself
class ExpectColumnValuesToBeInSomeSet(SetBasedColumnMapExpectation):
"""TODO: Add a docstring here"""
# These values will be used to configure the metric created by your expectation
set_ = []
set_camel_name = "SetName"
set_semantic_name = None
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = []
# Here your regex is used to create a custom metric for this expectation
map_metric = SetBasedColumnMapExpectation.register_metric(
set_camel_name=set_camel_name,
set_=set_,
)
# This object contains metadata for display in the public Gallery
library_metadata = {
"tags": ["set-based"], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@your_name_here", # Don't forget to add your github handle here!
],
}
# </snippet>
if __name__ == "__main__":
ExpectColumnValuesToBeInSomeSet().print_diagnostic_checklist()
| """
This is a template for creating custom SetBasedColumnMapExpectations.
For detailed instructions on how to use it, please see:
https://docs.greatexpectations.io/docs/guides/expectations/creating_custom_expectations/how_to_create_custom_set_based_column_map_expectations
"""
from great_expectations.expectations.regex_based_column_map_expectation import (
SetBasedColumnMapExpectation,
)
# <snippet>
# This class defines the Expectation itself
class ExpectColumnValuesToBeInSomeSet(SetBasedColumnMapExpectation):
"""TODO: Add a docstring here"""
# These values will be used to configure the metric created by your expectation
set_ = []
set_camel_name = "SetName"
set_semantic_name = None
# These examples will be shown in the public gallery.
# They will also be executed as unit tests for your Expectation.
examples = []
# Here your regex is used to create a custom metric for this expectation
map_metric = SetBasedColumnMapExpectation.register_metric(
set_camel_name=set_camel_name,
set_=set_,
)
# This object contains metadata for display in the public Gallery
library_metadata = {
"tags": ["set-based"], # Tags for this Expectation in the Gallery
"contributors": [ # Github handles for all contributors to this Expectation.
"@your_name_here", # Don't forget to add your github handle here!
],
}
# </snippet>
if __name__ == "__main__":
ExpectColumnValuesToBeInSomeSet().print_diagnostic_checklist()
| Python | 0 |
407bb78c34b769f8d993853761234c60e1fbeabd | Update util.py | tabpy-server/tabpy_server/app/util.py | tabpy-server/tabpy_server/app/util.py | import csv
import logging
import os
from datetime import datetime
from OpenSSL import crypto
logger = logging.getLogger(__name__)
def log_and_raise(msg, exception_type):
'''
Log the message and raise an exception of specified type
'''
logger.fatal(msg)
raise exception_type(msg)
def validate_cert(cert_file_path):
with open(cert_file_path, 'r') as f:
cert_buf = f.read()
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_buf)
date_format, encoding = '%Y%m%d%H%M%SZ', 'ascii'
not_before = datetime.strptime(
cert.get_notBefore().decode(encoding), date_format)
not_after = datetime.strptime(
cert.get_notAfter().decode(encoding), date_format)
now = datetime.now()
https_error = 'Error using HTTPS: '
if now < not_before:
log_and_raise(https_error +
'The certificate provided is not valid until {}.'.format(
not_before), RuntimeError)
if now > not_after:
log_and_raise(https_error +
f'The certificate provided expired on {not_after}.',
RuntimeError)
def parse_pwd_file(pwd_file_name):
'''
Parses passwords file and returns set of credentials.
Parameters
----------
pwd_file_name : str
Passwords file name.
Returns
-------
succeeded : bool
True if specified file was parsed successfully.
False if there were any issues with parsing specified file.
credentials : dict
Credentials from the file. Empty if succeeded is False.
'''
logger.info('Parsing passwords file {}...'.format(pwd_file_name))
if not os.path.isfile(pwd_file_name):
logger.fatal('Passwords file {} not found'.format(pwd_file_name))
return False, {}
credentials = {}
with open(pwd_file_name) as pwd_file:
pwd_file_reader = csv.reader(pwd_file, delimiter=' ')
for row in pwd_file_reader:
# skip empty lines
if len(row) == 0:
continue
# skip commented lines
if row[0][0] == '#':
continue
if len(row) != 2:
logger.error(
'Incorrect entry "{}" '
'in password file'.format(row))
return False, {}
login = row[0].lower()
if login in credentials:
logger.error(
'Multiple entries for username {} '
'in password file'.format(login))
return False, {}
if(len(row[1]) > 0):
credentials[login] = row[1]
logger.debug('Found username {}'.format(login))
else:
logger.warning('Found username {} but no password'
.format(row[0]))
return False, {}
logger.info("Authentication is enabled")
return True, credentials
| import csv
import logging
import os
from datetime import datetime
from OpenSSL import crypto
logger = logging.getLogger(__name__)
def log_and_raise(msg, exception_type):
'''
Log the message and raise an exception of specified type
'''
logger.fatal(msg)
raise exception_type(msg)
def validate_cert(cert_file_path):
with open(cert_file_path, 'r') as f:
cert_buf = f.read()
cert = crypto.load_certificate(crypto.FILETYPE_PEM, cert_buf)
date_format, encoding = '%Y%m%d%H%M%SZ', 'ascii'
not_before = datetime.strptime(
cert.get_notBefore().decode(encoding), date_format)
not_after = datetime.strptime(
cert.get_notAfter().decode(encoding), date_format)
now = datetime.now()
https_error = 'Error using HTTPS: '
if now < not_before:
log_and_raise(https_error +
'The certificate provided is not valid until {}.'.format(
not_before), RuntimeError)
if now > not_after:
log_and_raise(https_error +
f'The certificate provided expired on {not_after}.',
RuntimeError)
def parse_pwd_file(pwd_file_name):
'''
Parses passwords file and returns set of credentials.
Parameters
----------
pwd_file_name : str
Passwords file name.
Returns
-------
succeeded : bool
True if specified file was parsed successfully.
False if there were any issues with parsing specified file.
credentials : dict
Credentials from the file. Empty if succeeded is False.
'''
logger.info('Parsing passwords file {}...'.format(pwd_file_name))
if not os.path.isfile(pwd_file_name):
logger.fatal('Passwords file {} not found'.format(pwd_file_name))
return False, {}
credentials = {}
with open(pwd_file_name) as pwd_file:
pwd_file_reader = csv.reader(pwd_file, delimiter=' ')
for row in pwd_file_reader:
# skip empty lines
if len(row) == 0:
continue
# skip commented lines
if row[0][0] == '#':
continue
if len(row) != 2:
logger.error(
'Incorrect entry "{}" '
'in password file'.format(row))
return False, {}
login = row[0].lower()
if login in credentials:
logger.error(
'Multiple entries for username {} '
'in password file'.format(login))
return False, {}
if(len(row[1]) > 0):
credentials[login] = row[1]
logger.debug('Found username {}'.format(login))
else:
logger.warning('Found username {} but no password'
.format(row[0]))
return False, {}
logger.info("Authentication is enabled")
return True, credentials
| Python | 0.000001 |
da6650e96523f8be4dc2d95663ec8cf94cd9c3ba | Adjust the Whataburger spider | locations/spiders/whataburger.py | locations/spiders/whataburger.py | # -*- coding: utf-8 -*-
import scrapy
import json
from locations.items import GeojsonPointItem
class WhataburgerSpider(scrapy.Spider):
name = "whataburger"
allowed_domains = ["locations.whataburger.com"]
start_urls = (
'https://locations.whataburger.com/',
)
def store_hours(self, store_hours):
day_groups = []
this_day_group = None
for day_info in store_hours:
day = day_info['day'][:2].title()
hour_intervals = []
for interval in day_info['intervals']:
f_time = str(interval['start']).zfill(4)
t_time = str(interval['end']).zfill(4)
hour_intervals.append('{}:{}-{}:{}'.format(
f_time[0:2],
f_time[2:4],
t_time[0:2],
t_time[2:4],
))
hours = ','.join(hour_intervals)
if not this_day_group:
this_day_group = {
'from_day': day,
'to_day': day,
'hours': hours
}
elif this_day_group['hours'] != hours:
day_groups.append(this_day_group)
this_day_group = {
'from_day': day,
'to_day': day,
'hours': hours
}
elif this_day_group['hours'] == hours:
this_day_group['to_day'] = day
day_groups.append(this_day_group)
opening_hours = ""
if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):
opening_hours = '24/7'
else:
for day_group in day_groups:
if day_group['from_day'] == day_group['to_day']:
opening_hours += '{from_day} {hours}; '.format(**day_group)
elif day_group['from_day'] == 'Su' and day_group['to_day'] == 'Sa':
opening_hours += '{hours}; '.format(**day_group)
else:
opening_hours += '{from_day}-{to_day} {hours}; '.format(**day_group)
opening_hours = opening_hours[:-2]
return opening_hours
def parse(self, response):
urls = response.xpath('//a[@class="c-directory-list-content-item-link"]/@href').extract()
for path in urls:
if len(path.split('/')) > 2:
# If there's only one store, the URL will be longer than <state code>.html
yield scrapy.Request(response.urljoin(path), callback=self.parse_store)
else:
yield scrapy.Request(response.urljoin(path))
def parse_store(self, response):
hours = json.loads(response.xpath('//div[@class="c-location-hours-details-wrapper js-location-hours"]/@data-days').extract_first())
yield GeojsonPointItem(
lon=float(response.xpath('//span/meta[@itemprop="longitude"]/@content').extract_first()),
lat=float(response.xpath('//span/meta[@itemprop="latitude"]/@content').extract_first()),
addr_full=response.xpath('//span[@itemprop="streetAddress"]/text()').extract_first(),
city=response.xpath('//span[@itemprop="addressLocality"]/text()').extract_first(),
state=response.xpath('//abbr[@itemprop="addressRegion"]/text()').extract_first(),
postcode=response.xpath('//span[@itemprop="postalCode"]/text()').extract_first().strip(),
phone=response.xpath('//a[@class="c-phone-number-link c-phone-main-number-link"]/text()').extract_first(),
opening_hours=self.store_hours(hours) if hours else None,
ref=response.url,
website=response.url,
)
| # -*- coding: utf-8 -*-
import scrapy
import json
from locations.items import GeojsonPointItem
class WhataburgerSpider(scrapy.Spider):
name = "whataburger"
allowed_domains = ["locations.whataburger.com"]
start_urls = (
'https://locations.whataburger.com/',
)
def store_hours(self, store_hours):
day_groups = []
this_day_group = None
for day_info in store_hours:
day = day_info['day'][:2].title()
hour_intervals = []
for interval in day_info['intervals']:
f_time = str(interval['start']).zfill(4)
t_time = str(interval['end']).zfill(4)
hour_intervals.append('{}:{}-{}:{}'.format(
f_time[0:2],
f_time[2:4],
t_time[0:2],
t_time[2:4],
))
hours = ','.join(hour_intervals)
if not this_day_group:
this_day_group = {
'from_day': day,
'to_day': day,
'hours': hours
}
elif this_day_group['hours'] != hours:
day_groups.append(this_day_group)
this_day_group = {
'from_day': day,
'to_day': day,
'hours': hours
}
elif this_day_group['hours'] == hours:
this_day_group['to_day'] = day
day_groups.append(this_day_group)
opening_hours = ""
if len(day_groups) == 1 and day_groups[0]['hours'] in ('00:00-23:59', '00:00-00:00'):
opening_hours = '24/7'
else:
for day_group in day_groups:
if day_group['from_day'] == day_group['to_day']:
opening_hours += '{from_day} {hours}; '.format(**day_group)
elif day_group['from_day'] == 'Su' and day_group['to_day'] == 'Sa':
opening_hours += '{hours}; '.format(**day_group)
else:
opening_hours += '{from_day}-{to_day} {hours}; '.format(**day_group)
opening_hours = opening_hours[:-2]
return opening_hours
def parse(self, response):
urls = response.xpath('//a[@class="c-directory-list-content-item-link"]/@href').extract()
for path in urls:
if len(path.split('/')) > 2:
# If there's only one store, the URL will be longer than <state code>.html
yield scrapy.Request(response.urljoin(path), callback=self.parse_store)
else:
yield scrapy.Request(response.urljoin(path))
def parse_store(self, response):
properties = {
'addr:full': response.xpath('//span[@itemprop="streetAddress"]/text()').extract_first(),
'addr:city': response.xpath('//span[@itemprop="addressLocality"]/text()').extract_first(),
'addr:state': response.xpath('//abbr[@itemprop="addressRegion"]/text()').extract_first(),
'addr:postcode': response.xpath('//span[@itemprop="postalCode"]/text()').extract_first().strip(),
'ref': response.url,
'website': response.url,
}
phone = response.xpath('//a[@class="c-phone-number-link c-phone-main-number-link"]/text()').extract_first()
if phone:
properties['phone'] = phone
hours = json.loads(response.xpath('//div[@class="c-location-hours-details-wrapper js-location-hours"]/@data-days').extract_first())
opening_hours = self.store_hours(hours) if hours else None
if opening_hours:
properties['opening_hours'] = opening_hours
lon_lat = [
float(response.xpath('//span/meta[@itemprop="longitude"]/@content').extract_first()),
float(response.xpath('//span/meta[@itemprop="latitude"]/@content').extract_first()),
]
yield GeojsonPointItem(
properties=properties,
lon_lat=lon_lat,
)
| Python | 0.998751 |
99e9ef79178d6e2dffd8ec7ed12b3edbd8b7d0f1 | Add basket total to context | longclaw/longclawbasket/views.py | longclaw/longclawbasket/views.py | from django.shortcuts import render
from django.views.generic import ListView
from longclaw.longclawbasket.models import BasketItem
from longclaw.longclawbasket import utils
class BasketView(ListView):
model = BasketItem
template_name = "longclawbasket/basket.html"
def get_context_data(self, **kwargs):
items, _ = utils.get_basket_items(self.request)
total_price = sum(item.total() for item in items)
return {"basket": items, "total_price": total_price}
| from django.shortcuts import render
from django.views.generic import ListView
from longclaw.longclawbasket.models import BasketItem
from longclaw.longclawbasket import utils
class BasketView(ListView):
model = BasketItem
template_name = "longclawbasket/basket.html"
def get_context_data(self, **kwargs):
items, _ = utils.get_basket_items(self.request)
return {"basket": items}
| Python | 0.99994 |
1d07732e0fae0dca9eae1d89de913a1e124e32fc | Disable some prod optimisations | lutrisweb/settings/production.py | lutrisweb/settings/production.py | import os
from base import * # noqa
DEBUG = False
MEDIA_URL = '//lutris.net/media/'
FILES_ROOT = '/srv/files'
ALLOWED_HOSTS = ['.lutris.net', '.lutris.net.', ]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'lutris',
'USER': 'lutris',
'PASSWORD': os.environ['DATABASE_PASSWORD'],
'HOST': 'localhost',
#'CONN_MAX_AGE': 600,
}
}
EMAIL_HOST = os.environ['EMAIL_HOST']
EMAIL_HOST_USER = os.environ['EMAIL_HOST_USER']
EMAIL_HOST_PASSWORD = os.environ['EMAIL_HOST_PASSWORD']
EMAIL_USE_TLS = True
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
# SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
STEAM_API_KEY = os.environ['STEAM_API_KEY']
| import os
from base import * # noqa
DEBUG = False
MEDIA_URL = '//lutris.net/media/'
FILES_ROOT = '/srv/files'
ALLOWED_HOSTS = ['.lutris.net', '.lutris.net.', ]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'lutris',
'USER': 'lutris',
'PASSWORD': os.environ['DATABASE_PASSWORD'],
'HOST': 'localhost',
'CONN_MAX_AGE': 600,
}
}
EMAIL_HOST = os.environ['EMAIL_HOST']
EMAIL_HOST_USER = os.environ['EMAIL_HOST_USER']
EMAIL_HOST_PASSWORD = os.environ['EMAIL_HOST_PASSWORD']
EMAIL_USE_TLS = True
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
SESSION_ENGINE = 'django.contrib.sessions.backends.cached_db'
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': '127.0.0.1:11211',
}
}
STEAM_API_KEY = os.environ['STEAM_API_KEY']
| Python | 0 |
cd9fc5a6ea8925de67041408d96a63beccf573a2 | add docopt | taksman.py | taksman.py | #!/usr/bin/env python
""" Assignment management tool for school.
Usage:
taksman.py (-h | --help)
taksman.py add <entry>
taksman.py course
taksman.py date
taksman.py debug
Examples:
taksman.py add 033-reading
Options:
-h, --help
"""
import os
import errno
import re
from pprint import pprint
from docopt import docopt
def show_by_course(tasks):
courses = set(tasks[name].get('course') for name in tasks)
courses -= set([None])
courses = sorted(courses)
for course in courses:
print
print "Course: %s" % course
course_tasks = filter(
lambda name: tasks[name].get('course') == course,
tasks)
for name in course_tasks:
print "> %s" % name
def read_tasks(db_root):
""" Load tasks from db. """
entry_names = os.listdir(os.path.join(db_root, "entry"))
entry_paths = {filename: os.path.join(db_root, "entry", filename) for filename in entry_names}
tasks = {name: read_task(entry_paths[name]) for name in entry_names}
return tasks
def read_task(filepath):
""" Read a task from a file. """
task = {}
task['body'] = ""
with open(filepath, 'r') as f:
reading_headers = True
for line in f.readlines():
header_match = re.match(r"(?P<field>\w+): +(?P<value>.*)$", line)
if reading_headers and header_match:
field = header_match.group('field')
value = header_match.group('value')
assert field != 'body'
assert field not in task
task[field] = value.rstrip()
else:
reading_headers = False
task['body'] += line.rstrip() + "\n"
task['body'] = task['body'].rstrip()
return task
def ensure_db(db_root):
""" Make the storage directories exist. """
mkdir_p(os.path.join(db_root, "entry"))
mkdir_p(os.path.join(db_root, "done"))
mkdir_p(os.path.join(db_root, "template"))
def mkdir_p(path):
""" no error if existing, make parent directories as needed """
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
if __name__ == "__main__":
db_root = "tasks"
ensure_db(db_root)
tasks = read_tasks(db_root)
arguments = docopt(__doc__)
if arguments['debug']:
pprint(tasks)
elif arguments['add']:
raise Exception("not implemented")
elif arguments['course']:
show_by_course(tasks)
elif arguments['course']:
raise Exception("not implemented")
else:
print "Whoops, unhandled input."
| #!/usr/bin/env python
import os
import errno
import re
from pprint import pprint
def show_by_course(tasks):
courses = set(tasks[name].get('course') for name in tasks)
courses -= set([None])
courses = sorted(courses)
for course in courses:
print
print "Course: %s" % course
course_tasks = filter(
lambda name: tasks[name].get('course') == course,
tasks)
for name in course_tasks:
print "> %s" % name
def read_tasks(db_root):
""" Load tasks from db. """
entry_names = os.listdir(os.path.join(db_root, "entry"))
entry_paths = {filename: os.path.join(db_root, "entry", filename) for filename in entry_names}
tasks = {name: read_task(entry_paths[name]) for name in entry_names}
return tasks
def read_task(filepath):
""" Read a task from a file. """
task = {}
task['body'] = ""
with open(filepath, 'r') as f:
reading_headers = True
for line in f.readlines():
header_match = re.match(r"(?P<field>\w+): +(?P<value>.*)$", line)
if reading_headers and header_match:
field = header_match.group('field')
value = header_match.group('value')
assert field != 'body'
assert field not in task
task[field] = value.rstrip()
else:
reading_headers = False
task['body'] += line.rstrip() + "\n"
task['body'] = task['body'].rstrip()
return task
def ensure_db(db_root):
""" Make the storage directories exist. """
mkdir_p(os.path.join(db_root, "entry"))
mkdir_p(os.path.join(db_root, "done"))
mkdir_p(os.path.join(db_root, "template"))
def mkdir_p(path):
""" no error if existing, make parent directories as needed """
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
if __name__ == "__main__":
db_root = "tasks"
ensure_db(db_root)
tasks = read_tasks(db_root)
# pprint(tasks)
show_by_course(tasks)
| Python | 0 |
616f2419774136b6cd98bc6dbee31bf39a99acea | add zhihu special spider | DataHouse/zhihu/zhihu_special_spider.py | DataHouse/zhihu/zhihu_special_spider.py | """
a web spider for Zhihu Special
"""
import random
import os
import time
import logging
import requests
from pymongo import MongoClient
import pandas as pd
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename='zhihu_course.log',
filemode='w')
def crawl(pagenum):
url_pattern = 'https://api.zhihu.com/lives/special_lists?limit=%d&offset=10&subtype=special_list' % pagenum
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Accept-Encoding': 'gzip, deflate, br',
'Host': 'api.zhihu.com',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',
}
cookies = dict(
cookies_are='')
response = requests.get(url=url_pattern, headers=headers, cookies=cookies)
if response.status_code == 200:
live_json = response.json()
time.sleep(random.randint(2, 5)) # a range between 2s and 5s
return live_json
else:
print('ERROR, code is %d' % response.status_code)
return None
def recursive_crawl():
"""
recursively crawl all Zhihu special data
:return:
"Version:1.0
"""
offset = 10
while True:
try:
obj = crawl(offset)
if obj is not None and len(obj['data']) > 0:
for _ in obj['data']:
if _ is not None:
insert_item(_)
print('insert one item successfully~')
offset += 10
else:
break
except:
logging.error('https://api.zhihu.com/lives/special_lists?limit=10&offset=%d&subtype=special_list' % offset)
def insert_item(item):
"""
insert an item into MongoDB
:param item:
:return:
:Version:1.0
"""
client = MongoClient()
db = client.zhihu.special
result = db.insert_one(item)
if __name__ == '__main__':
recursive_crawl()
| """
a web spider for Zhihu Special
"""
import random
import os
import time
import logging
import requests
from pymongo import MongoClient
import pandas as pd
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S',
filename='zhihu_course.log',
filemode='w')
def crawl(pagenum):
url_pattern = 'https://api.zhihu.com/lives/special_lists?limit=%d&offset=10&subtype=special_list' % pagenum
headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',
'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
'Accept-Encoding': 'gzip, deflate, br',
'Host': 'api.zhihu.com',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',
}
cookies = dict(
cookies_are='')
response = requests.get(url=url_pattern, headers=headers, cookies=cookies)
if response.status_code == 200:
live_json = response.json()
time.sleep(random.randint(2, 5)) # a range between 2s and 5s
return live_json
else:
print('ERROR, code is %d' % response.status_code)
return None
def recursive_crawl():
"""
recursively crawl all Zhihu special data
:return:
"Version:1.0
"""
offset = 10
while True:
try:
obj = crawl(offset)
if obj is not None and len(obj['data']) > 0:
for _ in obj['data']:
insert_item(_)
print('insert one item successfully~')
offset += 10
else:
break
except:
logging.error('https://api.zhihu.com/lives/special_lists?limit=10&offset=%d&subtype=special_list' % offset)
def insert_item(item):
"""
insert an item into MongoDB
:param item:
:return:
:Version:1.0
"""
client = MongoClient()
db = client.zhihu.special
result = db.insert_one(item)
if __name__ == '__main__':
recursive_crawl()
| Python | 0 |
362312ad1a26dbecf0c4942c9a6e7042cbaab3bd | Test the rest of Roman masters | test-mm.py | test-mm.py | from psautohint import autohint
from psautohint import psautohint
def getFonts(masters, baseDir):
options = autohint.ACOptions()
options.quiet = True
fonts = []
infos = []
for master in masters:
path = "%s/%s/font.ufo" % (baseDir, master)
font = autohint.openUFOFile(path, None, False, options)
font.useProcessedLayer = False
names = font.getGlyphList()
_, fontDictList = font.getfdInfo(font.getPSName(), path, False, False, [], [], names)
info = fontDictList[0].getFontInfo()
fonts.append(font)
infos.append(info)
return fonts, infos
def getGlyphList(fonts):
glyphList = fonts[0].getGlyphList()
assert all([font.getGlyphList() == glyphList for font in fonts])
return glyphList
def mmHint(masters, fonts, infos, glyphList):
hinted = []
for name in glyphList:
glyphs = []
print("Hinting %s" % name)
for i, (font, info) in enumerate(zip(fonts, infos)):
glyph = font.convertToBez(name, False, True)[0]
if not glyph:
glyph = "%%%s\n" % name
if i == 0:
glyph = psautohint.autohint(info, [glyph], False, False, False, False)[0]
glyphs.append(glyph)
try:
glyphs = _psautohint.autohintmm(infos[0], [glyphs], masters, True)
except:
for i, glyph in enumerate(glyphs):
print(masters[i])
print(glyph)
raise
hinted.append(glyphs)
return hinted
def main():
masters = ["Regular", "Light", "ExtraLight", "Medium", "Semibold", "Bold", "Black"]
fonts, infos = getFonts(masters, "tests/data/source-code-pro")
glyphList = getGlyphList(fonts)
hinted = mmHint(masters, fonts, infos, glyphList)
if __name__ == "__main__":
main()
| from psautohint import autohint
from psautohint import psautohint
def getFonts(masters, baseDir):
options = autohint.ACOptions()
options.quiet = True
fonts = []
infos = []
for master in masters:
path = "%s/%s/font.ufo" % (baseDir, master)
font = autohint.openUFOFile(path, None, False, options)
font.useProcessedLayer = False
names = font.getGlyphList()
_, fontDictList = font.getfdInfo(font.getPSName(), path, False, False, [], [], names)
info = fontDictList[0].getFontInfo()
fonts.append(font)
infos.append(info)
return fonts, infos
def getGlyphList(fonts):
glyphList = fonts[0].getGlyphList()
assert all([font.getGlyphList() == glyphList for font in fonts])
return glyphList
def mmHint(masters, fonts, infos, glyphList):
hinted = []
for name in glyphList:
glyphs = []
print("Hinting %s" % name)
for i, (font, info) in enumerate(zip(fonts, infos)):
glyph = font.convertToBez(name, False, True)[0]
if not glyph:
glyph = "%%%s\n" % name
if i == 0:
glyph = psautohint.autohint(info, [glyph], False, False, False, False)[0]
glyphs.append(glyph)
try:
glyphs = _psautohint.autohintmm(infos[0], [glyphs], masters, True)
except:
for i, glyph in enumerate(glyphs):
print(masters[i])
print(glyph)
raise
hinted.append(glyphs)
return hinted
def main():
masters = ["Black", "ExtraLight"]
fonts, infos = getFonts(masters, "tests/data/source-code-pro")
glyphList = getGlyphList(fonts)
hinted = mmHint(masters, fonts, infos, glyphList)
if __name__ == "__main__":
main()
| Python | 0.000002 |
6aa5e2c95c0f529aa2803395779ca7274d5795b1 | Bump version to 1.0.1-machtfit-67 | src/oscar/__init__.py | src/oscar/__init__.py | import os
# Use 'dev', 'beta', or 'final' as the 4th element to indicate release type.
VERSION = (1, 0, 1, 'machtfit', 67)
def get_short_version():
return '%s.%s' % (VERSION[0], VERSION[1])
def get_version():
return '{}.{}.{}-{}-{}'.format(*VERSION)
# Cheeky setting that allows each template to be accessible by two paths.
# Eg: the template 'oscar/templates/oscar/base.html' can be accessed via both
# 'base.html' and 'oscar/base.html'. This allows Oscar's templates to be
# extended by templates with the same filename
OSCAR_MAIN_TEMPLATE_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'templates/oscar')
OSCAR_CORE_APPS = [
'oscar',
'oscar.apps.checkout',
'oscar.apps.address',
'oscar.apps.shipping',
'oscar.apps.catalogue',
'oscar.apps.partner',
'oscar.apps.basket',
'oscar.apps.payment',
'oscar.apps.offer',
'oscar.apps.order',
'oscar.apps.customer',
'oscar.apps.dashboard',
# 3rd-party apps that oscar depends on
'treebeard',
'sorl.thumbnail',
'django_tables2',
]
def get_core_apps(overrides=None):
"""
Return a list of oscar's apps amended with any passed overrides
"""
if not overrides:
return OSCAR_CORE_APPS
def get_app_label(app_label, overrides):
pattern = app_label.replace('oscar.apps.', '')
for override in overrides:
if override.endswith(pattern):
if 'dashboard' in override and 'dashboard' not in pattern:
continue
return override
return app_label
apps = []
for app_label in OSCAR_CORE_APPS:
apps.append(get_app_label(app_label, overrides))
return apps
| import os
# Use 'dev', 'beta', or 'final' as the 4th element to indicate release type.
VERSION = (1, 0, 1, 'machtfit', 66)
def get_short_version():
return '%s.%s' % (VERSION[0], VERSION[1])
def get_version():
return '{}.{}.{}-{}-{}'.format(*VERSION)
# Cheeky setting that allows each template to be accessible by two paths.
# Eg: the template 'oscar/templates/oscar/base.html' can be accessed via both
# 'base.html' and 'oscar/base.html'. This allows Oscar's templates to be
# extended by templates with the same filename
OSCAR_MAIN_TEMPLATE_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'templates/oscar')
OSCAR_CORE_APPS = [
'oscar',
'oscar.apps.checkout',
'oscar.apps.address',
'oscar.apps.shipping',
'oscar.apps.catalogue',
'oscar.apps.partner',
'oscar.apps.basket',
'oscar.apps.payment',
'oscar.apps.offer',
'oscar.apps.order',
'oscar.apps.customer',
'oscar.apps.dashboard',
# 3rd-party apps that oscar depends on
'treebeard',
'sorl.thumbnail',
'django_tables2',
]
def get_core_apps(overrides=None):
"""
Return a list of oscar's apps amended with any passed overrides
"""
if not overrides:
return OSCAR_CORE_APPS
def get_app_label(app_label, overrides):
pattern = app_label.replace('oscar.apps.', '')
for override in overrides:
if override.endswith(pattern):
if 'dashboard' in override and 'dashboard' not in pattern:
continue
return override
return app_label
apps = []
for app_label in OSCAR_CORE_APPS:
apps.append(get_app_label(app_label, overrides))
return apps
| Python | 0 |
d9c2bb2de79db80bc94509cb6a23de7f85e6e899 | update tests | tests/test_pecanstreet_dataset_adapter.py | tests/test_pecanstreet_dataset_adapter.py | import sys
sys.path.append('../')
from disaggregator import PecanStreetDatasetAdapter
import unittest
class PecanStreetDatasetAdapterTestCase(unittest.TestCase):
def setUp(self):
db_url = "postgresql://USERNAME:PASSWORD@db.wiki-energy.org:5432/postgres"
self.psda = PecanStreetDatasetAdapter(db_url)
def test_get_table_names(self):
s_tables = self.psda.get_table_names('shared')
c_tables = self.psda.get_table_names('curated')
r_tables = self.psda.get_table_names('raw')
self.assertIn('group1_disaggregated_2012_12', c_tables,
'curated schema has correct tables')
self.assertIn('egauge_15min_2013', r_tables,
'raw schema has correct tables')
self.assertIn('validated_01_2014', s_tables,
'shared schema has correct tables')
def test_table_metadata(self):
ids,cols = self.psda.get_table_metadata('shared','validated_01_2014')
self.assertIn(744,ids,'shared table 01 2014 has dataid 744')
self.assertIn('use',cols,'shared table 01 2014 has column "use"')
self.assertIn('air1',cols,'shared table 01 2014 has column "air1"')
pass
def test_get_month_traces(self):
# traces = self.pdsa.get_month_traces('shared','validated_01_2014')
# trace = p.get_month_traces_wo_time_align('shared',str(tables[0]),i[0])
pass
fast = TestSuite()
if __name__ == '__main__':
unittest.main()
| import sys
sys.path.append('../')
from disaggregator import PecanStreetDatasetAdapter
import unittest
class PecanStreetDatasetAdapterTestCase(unittest.TestCase):
def setUp(self):
db_url = "postgresql://USERNAME:PASSWORD@db.wiki-energy.org:5432/postgres"
self.psda = PecanStreetDatasetAdapter(db_url)
def test_get_table_names(self):
s_tables = self.psda.get_table_names('shared')
c_tables = self.psda.get_table_names('curated')
r_tables = self.psda.get_table_names('raw')
self.assertIn('group1_disaggregated_2012_12', c_tables,
'curated schema has correct tables')
self.assertIn('egauge_15min_2013', r_tables,
'raw schema has correct tables')
self.assertIn('validated_01_2014', s_tables,
'shared schema has correct tables')
def test_table_metadata(self):
ids,cols = self.psda.get_table_metadata('shared','validated_01_2014')
self.assertIn(744,ids,'shared table 01 2014 has dataid 744')
self.assertIn('use',cols,'shared table 01 2014 has column "use"')
self.assertIn('air1',cols,'shared table 01 2014 has column "air1"')
pass
def test_get_month_traces(self):
# traces = self.pdsa.get_month_traces('shared','validated_01_2014')
# trace = p.get_month_traces_wo_time_align('shared',str(tables[0]),i[0])
pass
if __name__ == '__main__':
unittest.main()
| Python | 0.000001 |
e1da85d46f84a35198959881b55196db4e0a67c4 | Fix loading of description.yaml | lava_results_app/utils.py | lava_results_app/utils.py | import os
import yaml
import logging
import subprocess
from django.utils.translation import ungettext_lazy
from django.conf import settings
from django.http import Http404
from linaro_django_xmlrpc.models import AuthToken
def help_max_length(max_length):
return ungettext_lazy( # pylint: disable=no-member
u"Maximum length: {0} character",
u"Maximum length: {0} characters",
max_length).format(max_length)
class StreamEcho(object): # pylint: disable=too-few-public-methods
def write(self, value): # pylint: disable=no-self-use,
return value
def description_filename(job):
filename = os.path.join(job.output_dir, 'description.yaml')
if not os.path.exists(filename):
return None
return filename
class V2Loader(yaml.Loader):
def remove_pipeline_module(self, suffix, node):
if 'lava_dispatcher.pipeline' in suffix:
suffix = suffix.replace('lava_dispatcher.pipeline', 'lava_dispatcher')
return self.construct_python_object(suffix, node)
def remove_pipeline_module_name(self, suffix, node):
if 'lava_dispatcher.pipeline' in suffix:
suffix = suffix.replace('lava_dispatcher.pipeline', 'lava_dispatcher')
return self.construct_python_name(suffix, node)
def remove_pipeline_module_new(self, suffix, node):
if 'lava_dispatcher.pipeline' in suffix:
suffix = suffix.replace('lava_dispatcher.pipeline', 'lava_dispatcher')
return self.construct_python_object_new(suffix, node)
V2Loader.add_multi_constructor(
u'tag:yaml.org,2002:python/name:',
V2Loader.remove_pipeline_module_name)
V2Loader.add_multi_constructor(
u'tag:yaml.org,2002:python/object:',
V2Loader.remove_pipeline_module)
V2Loader.add_multi_constructor(
u'tag:yaml.org,2002:python/object/new:',
V2Loader.remove_pipeline_module_new)
def description_data(job):
logger = logging.getLogger('lava_results_app')
filename = description_filename(job)
if not filename:
return {}
try:
data = yaml.load(open(filename, 'r'), Loader=V2Loader)
except yaml.YAMLError:
logger.error("Unable to parse description for %s" % job.id)
return {}
if not data:
return {}
return data
# FIXME: relocate these two functions into dbutils to avoid needing django settings here.
# other functions in utils can be run outside django. Remove import of AuthToken.
def anonymous_token(request, job):
querydict = request.GET
user = querydict.get('user', default=None)
token = querydict.get('token', default=None)
# safe to call with (None, None) - returns None
auth_user = AuthToken.get_user_for_secret(username=user, secret=token)
if not user and not job.is_public:
raise Http404("Job %d requires authentication to view." % job.id)
if not auth_user:
raise Http404("User '%s' is not able to view job %d" % (user, job.id))
return auth_user
def check_request_auth(request, job):
if job.is_public:
return
if not request.user.is_authenticated():
# handle anonymous access
auth_user = anonymous_token(request, job)
if not auth_user or not job.can_view(auth_user):
raise Http404("User '%s' is not able to view job %d" % (request.user, job.id))
elif not job.can_view(request.user):
raise Http404("User '%s' is not able to view job %d" % (request.user.username, job.id))
def debian_package_version():
"""
Relies on Debian Policy rules for the existence of the
changelog. Distributions not derived from Debian will
return an empty string.
"""
changelog = '/usr/share/doc/lava-server/changelog.Debian.gz'
if os.path.exists(changelog):
deb_version = subprocess.check_output((
'dpkg-query', '-W', "-f=${Version}\n", 'lava-server')).strip().decode('utf-8')
# example version returned would be '2016.11-1'
return deb_version
| import os
import yaml
import logging
import subprocess
from django.utils.translation import ungettext_lazy
from django.conf import settings
from django.http import Http404
from linaro_django_xmlrpc.models import AuthToken
def help_max_length(max_length):
return ungettext_lazy( # pylint: disable=no-member
u"Maximum length: {0} character",
u"Maximum length: {0} characters",
max_length).format(max_length)
class StreamEcho(object): # pylint: disable=too-few-public-methods
def write(self, value): # pylint: disable=no-self-use,
return value
def description_filename(job):
filename = os.path.join(job.output_dir, 'description.yaml')
if not os.path.exists(filename):
return None
return filename
def description_data(job):
logger = logging.getLogger('lava_results_app')
filename = description_filename(job)
if not filename:
return {}
try:
data = yaml.load(open(filename, 'r'))
except yaml.YAMLError:
logger.error("Unable to parse description for %s" % job.id)
return {}
if not data:
return {}
return data
# FIXME: relocate these two functions into dbutils to avoid needing django settings here.
# other functions in utils can be run outside django. Remove import of AuthToken.
def anonymous_token(request, job):
querydict = request.GET
user = querydict.get('user', default=None)
token = querydict.get('token', default=None)
# safe to call with (None, None) - returns None
auth_user = AuthToken.get_user_for_secret(username=user, secret=token)
if not user and not job.is_public:
raise Http404("Job %d requires authentication to view." % job.id)
if not auth_user:
raise Http404("User '%s' is not able to view job %d" % (user, job.id))
return auth_user
def check_request_auth(request, job):
if job.is_public:
return
if not request.user.is_authenticated():
# handle anonymous access
auth_user = anonymous_token(request, job)
if not auth_user or not job.can_view(auth_user):
raise Http404("User '%s' is not able to view job %d" % (request.user, job.id))
elif not job.can_view(request.user):
raise Http404("User '%s' is not able to view job %d" % (request.user.username, job.id))
def debian_package_version():
"""
Relies on Debian Policy rules for the existence of the
changelog. Distributions not derived from Debian will
return an empty string.
"""
changelog = '/usr/share/doc/lava-server/changelog.Debian.gz'
if os.path.exists(changelog):
deb_version = subprocess.check_output((
'dpkg-query', '-W', "-f=${Version}\n", 'lava-server')).strip().decode('utf-8')
# example version returned would be '2016.11-1'
return deb_version
| Python | 0.000155 |
52ebe157585019c9be01b22638fff924ba328892 | Increase delay (to fix tests that are failing randomly on travis but are always passing on my locale machine) | test/test_modes/test_goto_assignments.py | test/test_modes/test_goto_assignments.py | """
Test the autocomplete mode
"""
from pyqode.core.api import TextHelper
from pyqode.qt import QtCore, QtWidgets
from pyqode.qt.QtTest import QTest
from pyqode.python import modes as pymodes
from test.helpers import editor_open
def get_mode(editor):
return editor.modes.get(pymodes.GoToAssignmentsMode)
@editor_open(__file__)
def test_enabled(editor):
mode = get_mode(editor)
assert mode.enabled
mode.enabled = False
mode.enabled = True
@editor_open(__file__)
def test_goto_variable(editor):
editor.clear()
code = "a = 15\nprint(a)"
editor.setPlainText(code)
mode = get_mode(editor)
TextHelper(editor).goto_line(2, len('print(a)') - 2)
mode.request_goto()
QTest.qWait(5000)
assert TextHelper(editor).current_line_nbr() == 0
out = False
def _on_out_of_doc(*args):
global out
out = True
@editor_open(__file__)
def test_goto_out_of_doc(editor):
global out
out = False
editor.clear()
code = "import logging\nlogging.basicConfig()"
editor.setPlainText(code)
mode = get_mode(editor)
TextHelper(editor).goto_line(1, len('logging.basicConfig()') - 4)
mode.out_of_doc.connect(_on_out_of_doc)
assert out is False
mode.request_goto()
QTest.qWait(5000)
assert out is True
flg_multi = False
def accept_dlg():
global flg_multi
flg_multi = True
widgets = QtWidgets.QApplication.instance().topLevelWidgets()
for w in widgets:
if isinstance(w, QtWidgets.QDialog):
QTest.keyPress(w, QtCore.Qt.Key_Tab)
QTest.keyPress(w, QtCore.Qt.Key_Tab)
QTest.keyPress(w, QtCore.Qt.Key_Return)
@editor_open(__file__)
def test_multiple_results(editor):
global flg_multi
editor.clear()
code = "import os\nos.path.abspath('..')"
editor.setPlainText(code)
mode = get_mode(editor)
TextHelper(editor).goto_line(1, 4)
QTest.qWait(1000)
mode.request_goto()
assert flg_multi is False
QtCore.QTimer.singleShot(1000, accept_dlg)
QTest.qWait(1000)
assert flg_multi is True
@editor_open(__file__)
def test_make_unique(editor):
seq = ['a', 'b', 'c', 'a']
mode = get_mode(editor)
new_seq = mode._unique(seq)
assert len(new_seq) == len(seq) - 1
| """
Test the autocomplete mode
"""
from pyqode.core.api import TextHelper
from pyqode.qt import QtCore, QtWidgets
from pyqode.qt.QtTest import QTest
from pyqode.python import modes as pymodes
from test.helpers import editor_open
def get_mode(editor):
return editor.modes.get(pymodes.GoToAssignmentsMode)
@editor_open(__file__)
def test_enabled(editor):
mode = get_mode(editor)
assert mode.enabled
mode.enabled = False
mode.enabled = True
@editor_open(__file__)
def test_goto_variable(editor):
editor.clear()
code = "a = 15\nprint(a)"
editor.setPlainText(code)
mode = get_mode(editor)
TextHelper(editor).goto_line(2, len('print(a)') - 2)
mode.request_goto()
QTest.qWait(1000)
assert TextHelper(editor).current_line_nbr() == 0
out = False
def _on_out_of_doc(*args):
global out
out = True
@editor_open(__file__)
def test_goto_out_of_doc(editor):
global out
out = False
editor.clear()
code = "import logging\nlogging.basicConfig()"
editor.setPlainText(code)
mode = get_mode(editor)
TextHelper(editor).goto_line(1, len('logging.basicConfig()') - 4)
mode.out_of_doc.connect(_on_out_of_doc)
assert out is False
mode.request_goto()
QTest.qWait(1000)
assert out is True
flg_multi = False
def accept_dlg():
global flg_multi
flg_multi = True
widgets = QtWidgets.QApplication.instance().topLevelWidgets()
for w in widgets:
if isinstance(w, QtWidgets.QDialog):
QTest.keyPress(w, QtCore.Qt.Key_Tab)
QTest.keyPress(w, QtCore.Qt.Key_Tab)
QTest.keyPress(w, QtCore.Qt.Key_Return)
@editor_open(__file__)
def test_multiple_results(editor):
global flg_multi
editor.clear()
code = "import os\nos.path.abspath('..')"
editor.setPlainText(code)
mode = get_mode(editor)
TextHelper(editor).goto_line(1, 4)
QTest.qWait(1000)
mode.request_goto()
assert flg_multi is False
QtCore.QTimer.singleShot(1000, accept_dlg)
QTest.qWait(1000)
assert flg_multi is True
@editor_open(__file__)
def test_make_unique(editor):
seq = ['a', 'b', 'c', 'a']
mode = get_mode(editor)
new_seq = mode._unique(seq)
assert len(new_seq) == len(seq) - 1
| Python | 0 |
93f912b9eb3a17ab24b0a7a67ad2297a7bae6e91 | Fix .aar building on Mac | tensorflow/lite/java/aar_with_jni.bzl | tensorflow/lite/java/aar_with_jni.bzl | """Generate zipped aar file including different variants of .so in jni folder."""
load("@build_bazel_rules_android//android:rules.bzl", "android_binary")
def aar_with_jni(
name,
android_library,
headers = None,
flatten_headers = False):
"""Generates an Android AAR given an Android library target.
Args:
name: Name of the generated .aar file.
android_library: The `android_library` target to package. Note that the
AAR will contain *only that library's .jar` sources. It does not
package the transitive closure of all Java source dependencies.
headers: Optional list of headers that will be included in the
generated .aar file. This is useful for distributing self-contained
.aars with native libs that can be used directly by native clients.
flatten_headers: Whether to flatten the output paths of included headers.
"""
# Generate dummy AndroidManifest.xml for dummy apk usage
# (dummy apk is generated by <name>_dummy_app_for_so target below)
native.genrule(
name = name + "_binary_manifest_generator",
outs = [name + "_generated_AndroidManifest.xml"],
cmd = """
cat > $(OUTS) <<EOF
<manifest
xmlns:android="http://schemas.android.com/apk/res/android"
package="dummy.package.for.so">
<uses-sdk android:minSdkVersion="999"/>
</manifest>
EOF
""",
)
# Generate dummy apk including .so files and later we extract out
# .so files and throw away the apk.
android_binary(
name = name + "_dummy_app_for_so",
manifest = name + "_generated_AndroidManifest.xml",
custom_package = "dummy.package.for.so",
deps = [android_library],
# In some platforms we don't have an Android SDK/NDK and this target
# can't be built. We need to prevent the build system from trying to
# use the target in that case.
tags = [
"manual",
"no_cuda_on_cpu_tap",
],
)
srcs = [android_library + ".aar", name + "_dummy_app_for_so_unsigned.apk"]
cmd = """
cp $(location {0}.aar) $(location :{1}.aar)
chmod +w $(location :{1}.aar)
origdir=$$PWD
cd $$(mktemp -d)
unzip $$origdir/$(location :{1}_dummy_app_for_so_unsigned.apk) "lib/*"
cp -r lib jni
zip -r $$origdir/$(location :{1}.aar) jni/*/*.so
""".format(android_library, name)
if headers:
srcs += headers
cmd += """
mkdir headers
"""
for src in headers:
if flatten_headers:
cmd += """
cp -RL $$origdir/$(location {0}) headers/$$(basename $(location {0}))
""".format(src)
else:
cmd += """
mkdir -p headers/$$(dirname $(location {0}))
cp -RL $$origdir/$(location {0}) headers/$(location {0})
""".format(src)
cmd += "zip -r $$origdir/$(location :{0}.aar) headers".format(name)
native.genrule(
name = name,
srcs = srcs,
outs = [name + ".aar"],
tags = ["manual"],
cmd = cmd,
)
| """Generate zipped aar file including different variants of .so in jni folder."""
load("@build_bazel_rules_android//android:rules.bzl", "android_binary")
def aar_with_jni(
name,
android_library,
headers = None,
flatten_headers = False):
"""Generates an Android AAR given an Android library target.
Args:
name: Name of the generated .aar file.
android_library: The `android_library` target to package. Note that the
AAR will contain *only that library's .jar` sources. It does not
package the transitive closure of all Java source dependencies.
headers: Optional list of headers that will be included in the
generated .aar file. This is useful for distributing self-contained
.aars with native libs that can be used directly by native clients.
flatten_headers: Whether to flatten the output paths of included headers.
"""
# Generate dummy AndroidManifest.xml for dummy apk usage
# (dummy apk is generated by <name>_dummy_app_for_so target below)
native.genrule(
name = name + "_binary_manifest_generator",
outs = [name + "_generated_AndroidManifest.xml"],
cmd = """
cat > $(OUTS) <<EOF
<manifest
xmlns:android="http://schemas.android.com/apk/res/android"
package="dummy.package.for.so">
<uses-sdk android:minSdkVersion="999"/>
</manifest>
EOF
""",
)
# Generate dummy apk including .so files and later we extract out
# .so files and throw away the apk.
android_binary(
name = name + "_dummy_app_for_so",
manifest = name + "_generated_AndroidManifest.xml",
custom_package = "dummy.package.for.so",
deps = [android_library],
# In some platforms we don't have an Android SDK/NDK and this target
# can't be built. We need to prevent the build system from trying to
# use the target in that case.
tags = [
"manual",
"no_cuda_on_cpu_tap",
],
)
srcs = [android_library + ".aar", name + "_dummy_app_for_so_unsigned.apk"]
cmd = """
cp $(location {0}.aar) $(location :{1}.aar)
chmod +w $(location :{1}.aar)
origdir=$$PWD
cd $$(mktemp -d)
unzip $$origdir/$(location :{1}_dummy_app_for_so_unsigned.apk) "lib/*"
cp -r lib jni
zip -r $$origdir/$(location :{1}.aar) jni/*/*.so
""".format(android_library, name)
if headers:
srcs += headers
cmd += """
mkdir headers
"""
for src in headers:
if flatten_headers:
cmd += """
cp -rL $$origdir/$(location {0}) headers/$$(basename $(location {0}))
""".format(src)
else:
cmd += """
mkdir -p headers/$$(dirname $(location {0}))
cp -rL $$origdir/$(location {0}) headers/$(location {0})
""".format(src)
cmd += "zip -r $$origdir/$(location :{0}.aar) headers".format(name)
native.genrule(
name = name,
srcs = srcs,
outs = [name + ".aar"],
tags = ["manual"],
cmd = cmd,
)
| Python | 0.000002 |
eb5d7f91286779ff0f3b6d7c829967f74ef1db7a | replace managers by plain functions | testbot.py | testbot.py | # -*- coding: utf-8 -*-
from bot import Tofbot
import unittest
from collections import namedtuple
def print_resp(msg):
print (" -> %s" % msg)
class TestTofbot(Tofbot):
def __init__(self, nick, name, chan, origin):
chans = [chan]
self.nick = nick
Tofbot.__init__(self, nick, name, chans, debug=False)
self.chan = chan
self.origin = origin
self.cb = None
def msg(self, chan, msg):
if self.cb:
self.cb(msg)
else:
print_resp(msg)
def send(self, msg):
print ("<- %s" % msg)
self.dispatch(self.origin, [msg, 'PRIVMSG', self.chan])
def kick(self, msg=None):
if msg is None:
msg = self.nick
self.dispatch(self.origin, [msg, 'KICK', self.chan, self.nick])
def bot_action(bot, action):
msgs = []
def capture_out(msg):
msgs.append(msg)
bot.cb = capture_out
action()
return msgs
def bot_input(bot, msg):
return bot_action(bot, lambda: bot.send(msg))
def bot_kick(bot, msg=None):
return bot_action(bot, lambda: bot.kick(msg))
class TestCase(unittest.TestCase):
def setUp(self):
nick = "testbot"
name = "Test Bot"
chan = "#chan"
Origin = namedtuple('Origin', ['sender', 'nick'])
origin = Origin('sender', 'nick')
self.bot = TestTofbot(nick, name, chan, origin)
cmds = ['!set autoTofadeThreshold 100']
for cmd in cmds:
self.bot.dispatch(origin, [cmd, 'BOTCONFIG', 'PRIVMSG', '#config'])
self.bot.joined = True
def _io(self, inp, outp):
"""
Test that a given input produces a given output.
"""
l = bot_input(self.bot, inp)
if isinstance(outp, str):
outp = [outp]
self.assertEqual(l, outp)
def test_set_allowed(self):
msg = "!set autoTofadeThreshold 9000"
self.bot.send(msg)
self._io("!get autoTofadeThreshold", "autoTofadeThreshold = 9000")
def test_kick(self):
l = bot_kick(self.bot)
self.assertEqual(l, ["respawn, LOL"])
def test_kick_reason(self):
l = bot_kick(self.bot, "tais toi")
self.assertEqual(l, ["comment ça, tais toi ?"])
def test_dassin(self):
self._io("tu sais", "je n'ai jamais été aussi heureux que ce matin-là")
def test_donnezmoi(self):
self._io("donnez moi un lol", ['L', 'O', 'L'])
| # -*- coding: utf-8 -*-
from bot import Tofbot
import unittest
from collections import namedtuple
def print_resp(msg):
print (" -> %s" % msg)
class TestTofbot(Tofbot):
def __init__(self, nick, name, chan, origin):
chans = [chan]
self.nick = nick
Tofbot.__init__(self, nick, name, chans, debug=False)
self.chan = chan
self.origin = origin
self.cb = None
def msg(self, chan, msg):
if self.cb:
self.cb(msg)
else:
print_resp(msg)
def send(self, msg):
print ("<- %s" % msg)
self.dispatch(self.origin, [msg, 'PRIVMSG', self.chan])
def kick(self, msg=None):
if msg is None:
msg = self.nick
self.dispatch(self.origin, [msg, 'KICK', self.chan, self.nick])
class BotAction:
def __init__(self, bot, action):
"""
If length=None, just expect one and return it (not a list).
"""
self.bot = bot
self.action = action
self.msgs = []
def __enter__(self):
def capture_out(msg):
self.msgs.append(msg)
self.bot.cb = capture_out
self.action()
return self.msgs
def __exit__(self, *args):
pass
def bot_input(bot, msg):
return BotAction(bot, lambda: bot.send(msg))
def bot_kick(bot, msg=None):
return BotAction(bot, lambda: bot.kick(msg))
class TestCase(unittest.TestCase):
def setUp(self):
nick = "testbot"
name = "Test Bot"
chan = "#chan"
Origin = namedtuple('Origin', ['sender', 'nick'])
origin = Origin('sender', 'nick')
self.bot = TestTofbot(nick, name, chan, origin)
cmds = ['!set autoTofadeThreshold 100']
for cmd in cmds:
self.bot.dispatch(origin, [cmd, 'BOTCONFIG', 'PRIVMSG', '#config'])
self.bot.joined = True
def _io(self, inp, outp):
"""
Test that a given input produces a given output.
"""
with bot_input(self.bot, inp) as l:
if isinstance(outp, str):
outp = [outp]
self.assertEqual(l, outp)
def test_set_allowed(self):
msg = "!set autoTofadeThreshold 9000"
self.bot.send(msg)
self._io("!get autoTofadeThreshold", "autoTofadeThreshold = 9000")
def test_kick(self):
with bot_kick(self.bot) as l:
self.assertEqual(l, ["respawn, LOL"])
def test_kick_reason(self):
with bot_kick(self.bot, "tais toi") as l:
self.assertEqual(l, ["comment ça, tais toi ?"])
def test_dassin(self):
self._io("tu sais", "je n'ai jamais été aussi heureux que ce matin-là")
def test_donnezmoi(self):
self._io("donnez moi un lol", ['L', 'O', 'L'])
| Python | 0.000041 |
48e280177123902001e4ff6fb3e178190b435054 | fix test for Exscript.workqueue.MainLoop. | tests/Exscript/workqueue/MainLoopTest.py | tests/Exscript/workqueue/MainLoopTest.py | import sys, unittest, re, os.path, threading
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..', 'src'))
from Exscript.workqueue import MainLoop
from Exscript.workqueue.Job import ProcessJob
class MainLoopTest(unittest.TestCase):
CORRELATE = MainLoop
def setUp(self):
pass
def testMainLoop(self):
lock = threading.Lock()
data = {'sum': 0, 'randsum': 0}
ml = MainLoop.MainLoop(ProcessJob)
nop = lambda x: None
for i in range(12345):
ml.enqueue(nop, name = 'test', times = 1, data = None)
self.assertEqual(0, data['sum'])
# Note: Further testing is done in WorkQueueTest.py
def suite():
return unittest.TestLoader().loadTestsFromTestCase(MainLoopTest)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity = 2).run(suite())
| import sys, unittest, re, os.path, threading
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', '..', 'src'))
from Exscript.workqueue import MainLoop
class MainLoopTest(unittest.TestCase):
CORRELATE = MainLoop
def setUp(self):
pass
def testMainLoop(self):
lock = threading.Lock()
data = {'sum': 0, 'randsum': 0}
ml = MainLoop.MainLoop()
nop = lambda x: None
for i in range(12345):
ml.enqueue(nop, name = 'test', times = 1, data = None)
self.assertEqual(0, data['sum'])
# Note: Further testing is done in WorkQueueTest.py
def suite():
return unittest.TestLoader().loadTestsFromTestCase(MainLoopTest)
if __name__ == '__main__':
unittest.TextTestRunner(verbosity = 2).run(suite())
| Python | 0 |
f62c53af583657ee13d220edbb25803bbc3c9c22 | Fix style | tests/cupy_tests/core_tests/test_core.py | tests/cupy_tests/core_tests/test_core.py | import unittest
import numpy
import cupy
from cupy.core import core
class TestGetSize(unittest.TestCase):
def test_none(self):
self.assertEqual(core.get_size(None), ())
def test_list(self):
self.assertEqual(core.get_size([1, 2]), (1, 2))
def test_tuple(self):
self.assertEqual(core.get_size((1, 2)), (1, 2))
def test_int(self):
self.assertEqual(core.get_size(1), (1,))
def test_invalid(self):
with self.assertRaises(ValueError):
core.get_size(1.0)
class TestInternalProd(unittest.TestCase):
def test_empty(self):
self.assertEqual(core.internal_prod([]), 1)
def test_one(self):
self.assertEqual(core.internal_prod([2]), 2)
def test_two(self):
self.assertEqual(core.internal_prod([2, 3]), 6)
class TestGetStridesForNocopyReshape(unittest.TestCase):
def test_different_size(self):
a = core.ndarray((2, 3))
self.assertEqual(core._get_strides_for_nocopy_reshape(a, (1, 5)),
[])
def test_one(self):
a = core.ndarray((1,), dtype=cupy.int32)
self.assertEqual(core._get_strides_for_nocopy_reshape(a, (1, 1, 1)),
[4, 4, 4])
def test_normal(self):
# TODO(nno): write test for normal case
pass
class TestGetContiguousStrides(unittest.TestCase):
def test_zero(self):
self.assertEqual(core._get_contiguous_strides((), 1), [])
def test_one(self):
self.assertEqual(core._get_contiguous_strides((1,), 2), [2])
def test_two(self):
self.assertEqual(core._get_contiguous_strides((1, 2), 3), [6, 3])
def test_three(self):
self.assertEqual(core._get_contiguous_strides((1, 2, 3), 4),
[24, 12, 4])
class TestGetCContiguity(unittest.TestCase):
def test_zero_in_shape(self):
self.assertTrue(core._get_c_contiguity((1, 0, 1), (1, 1, 1), 3))
def test_normal(self):
# TODO(unno): write test for normal case
pass
class TestInferUnknownDimension(unittest.TestCase):
def test_known_all(self):
self.assertEqual(core._infer_unknown_dimension((1, 2, 3), 6),
[1, 2, 3])
def test_multiple_unknown(self):
with self.assertRaises(ValueError):
core._infer_unknown_dimension((-1, 1, -1), 10)
def test_infer(self):
self.assertEqual(core._infer_unknown_dimension((-1, 2, 3), 12),
[2, 2, 3])
class TestArray(unittest.TestCase):
def test_unsupported_type(self):
arr = numpy.ndarray((2, 3), dtype=object)
with self.assertRaises(ValueError):
core.array(arr)
| import unittest
import numpy
import cupy
from cupy.core import core
class TestGetSize(unittest.TestCase):
def test_none(self):
self.assertEqual(core.get_size(None), ())
def test_list(self):
self.assertEqual(core.get_size([1, 2]), (1, 2))
def test_tuple(self):
self.assertEqual(core.get_size((1, 2)), (1, 2))
def test_int(self):
self.assertEqual(core.get_size(1), (1,))
def test_invalid(self):
with self.assertRaises(ValueError):
core.get_size(1.0)
class TestInternalProd(unittest.TestCase):
def test_empty(self):
self.assertEqual(core.internal_prod([]), 1)
def test_one(self):
self.assertEqual(core.internal_prod([2]), 2)
def test_two(self):
self.assertEqual(core.internal_prod([2, 3]), 6)
class TestGetStridesForNocopyReshape(unittest.TestCase):
def test_different_size(self):
a = core.ndarray((2, 3))
self.assertEqual(core._get_strides_for_nocopy_reshape(a, (1, 5)),
[])
def test_one(self):
a = core.ndarray((1,), dtype=cupy.int32)
self.assertEqual(core._get_strides_for_nocopy_reshape(a, (1, 1, 1)),
[4, 4, 4])
def test_normal(self):
# TODO(nno): write test for normal case
pass
class TestGetContiguousStrides(unittest.TestCase):
def test_zero(self):
self.assertEqual(core._get_contiguous_strides((), 1), [])
def test_one(self):
self.assertEqual(core._get_contiguous_strides((1,), 2), [2])
def test_two(self):
self.assertEqual(core._get_contiguous_strides((1, 2), 3), [6, 3])
def test_three(self):
self.assertEqual(core._get_contiguous_strides((1, 2, 3), 4),
[24, 12, 4])
class TestGetCContiguity(unittest.TestCase):
def test_zero_in_shape(self):
self.assertTrue(core._get_c_contiguity((1, 0, 1), (1, 1, 1), 3))
def test_normal(self):
# TODO(unno): write test for normal case
pass
class TestInferUnknownDimension(unittest.TestCase):
def test_known_all(self):
self.assertEqual(core._infer_unknown_dimension((1, 2, 3), 6),
[1, 2, 3])
def test_multiple_unknown(self):
with self.assertRaises(ValueError):
core._infer_unknown_dimension((-1, 1, -1), 10)
def test_infer(self):
self.assertEqual(core._infer_unknown_dimension((-1, 2, 3), 12),
[2, 2, 3])
class TestArray(unittest.TestCase):
def test_unsupported_type(self):
arr = numpy.ndarray((2,3), dtype=object)
with self.assertRaises(ValueError):
core.array(arr)
| Python | 0.000001 |
523216bbf6f21757651e41ac307bc296041b7963 | load nonlinux_config if the platform is not linux | tests/docker/test_async_docker_client.py | tests/docker/test_async_docker_client.py | import os
import sys
import warnings
from tornado.testing import AsyncTestCase, gen_test
from remoteappmanager.docker.async_docker_client import AsyncDockerClient
from tests.docker.config import nonlinux_config
from tests import utils
class TestAsyncDockerClient(AsyncTestCase):
def setUp(self):
super().setUp()
# Due to a python requests design choice, we receive a warning about
# leaking connection. This is expected and pretty much out of our
# authority but it can be annoying in tests, hence we suppress the
# warning. See issue simphony-remote/10
warnings.filterwarnings(action="ignore",
message="unclosed",
category=ResourceWarning)
def tearDown(self):
super().tearDown()
warnings.filterwarnings(action="default",
message="unclosed",
category=ResourceWarning)
@gen_test
def test_info(self):
client = AsyncDockerClient()
client.client = utils.mock_docker_client()
response = yield client.info()
# Test contents of response
self.assertIsInstance(response, dict)
self.assertIn("ID", response)
@gen_test
def test_real_connection(self):
config = None
if "DOCKER_HOST" not in os.environ and sys.platform != 'linux':
config = nonlinux_config()
if not os.path.exists(config.tls_cert):
self.skipTest("Certificates are not available. Skipping.")
client = AsyncDockerClient(config=config)
response = yield client.info()
# Test contents of response
self.assertIsInstance(response, dict)
self.assertIn("ID", response)
| import os
import warnings
from tornado.testing import AsyncTestCase, gen_test
from remoteappmanager.docker.async_docker_client import AsyncDockerClient
from tests.docker.config import nonlinux_config
from tests import utils
class TestAsyncDockerClient(AsyncTestCase):
def setUp(self):
super().setUp()
# Due to a python requests design choice, we receive a warning about
# leaking connection. This is expected and pretty much out of our
# authority but it can be annoying in tests, hence we suppress the
# warning. See issue simphony-remote/10
warnings.filterwarnings(action="ignore",
message="unclosed",
category=ResourceWarning)
def tearDown(self):
super().tearDown()
warnings.filterwarnings(action="default",
message="unclosed",
category=ResourceWarning)
@gen_test
def test_info(self):
client = AsyncDockerClient()
client.client = utils.mock_docker_client()
response = yield client.info()
# Test contents of response
self.assertIsInstance(response, dict)
self.assertIn("ID", response)
@gen_test
def test_real_connection(self):
config = None
if "DOCKER_HOST" not in os.environ:
config = nonlinux_config()
if not os.path.exists(config.tls_cert):
self.skipTest("Certificates are not available. Skipping.")
client = AsyncDockerClient(config=config)
response = yield client.info()
# Test contents of response
self.assertIsInstance(response, dict)
self.assertIn("ID", response)
| Python | 0.000477 |
6bec22cd51288c94dff40cf0c973b975538040d5 | Increase timeout for test_long_running_job test | tests/integration/minion/test_timeout.py | tests/integration/minion/test_timeout.py | # -*- coding: utf-8 -*-
'''
Tests for various minion timeouts
'''
# Import Python libs
from __future__ import absolute_import
import os
import sys
import salt.utils.platform
# Import Salt Testing libs
from tests.support.case import ShellCase
class MinionTimeoutTestCase(ShellCase):
'''
Test minion timing functions
'''
def test_long_running_job(self):
'''
Test that we will wait longer than the job timeout for a minion to
return.
'''
# Launch the command
sleep_length = 30
if salt.utils.platform.is_windows():
popen_kwargs = {'env': dict(os.environ, PYTHONPATH=';'.join(sys.path))}
else:
popen_kwargs = None
ret = self.run_salt(
'minion test.sleep {0}'.format(sleep_length),
timeout=90,
catch_stderr=True,
popen_kwargs=popen_kwargs,
)
self.assertTrue(isinstance(ret[0], list), 'Return is not a list. Minion'
' may have returned error: {0}'.format(ret))
self.assertEqual(len(ret[0]), 2, 'Standard out wrong length {}'.format(ret))
self.assertTrue('True' in ret[0][1], 'Minion did not return True after '
'{0} seconds. ret={1}'.format(sleep_length, ret))
| # -*- coding: utf-8 -*-
'''
Tests for various minion timeouts
'''
# Import Python libs
from __future__ import absolute_import
import os
import sys
import salt.utils.platform
# Import Salt Testing libs
from tests.support.case import ShellCase
class MinionTimeoutTestCase(ShellCase):
'''
Test minion timing functions
'''
def test_long_running_job(self):
'''
Test that we will wait longer than the job timeout for a minion to
return.
'''
# Launch the command
sleep_length = 30
if salt.utils.platform.is_windows():
popen_kwargs = {'env': dict(os.environ, PYTHONPATH=';'.join(sys.path))}
else:
popen_kwargs = None
ret = self.run_salt(
'minion test.sleep {0}'.format(sleep_length),
timeout=45,
catch_stderr=True,
popen_kwargs=popen_kwargs,
)
self.assertTrue(isinstance(ret[0], list), 'Return is not a list. Minion'
' may have returned error: {0}'.format(ret))
self.assertEqual(len(ret[0]), 2, 'Standard out wrong length {}'.format(ret))
self.assertTrue('True' in ret[0][1], 'Minion did not return True after '
'{0} seconds. ret={1}'.format(sleep_length, ret))
| Python | 0.000008 |
ce391c53f46c9eddcc8293081d7b62c8cca91cfc | Add regression test for #44299 | tests/integration/states/test_pkgrepo.py | tests/integration/states/test_pkgrepo.py | # -*- coding: utf-8 -*-
'''
tests for pkgrepo states
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.mixins import SaltReturnAssertsMixin
from tests.support.unit import skipIf
from tests.support.helpers import (
destructiveTest,
requires_system_grains
)
# Import salt libs
import salt.utils
# Import 3rd-party libs
import salt.ext.six as six
@destructiveTest
@skipIf(salt.utils.is_windows(), 'minion is windows')
class PkgrepoTest(ModuleCase, SaltReturnAssertsMixin):
'''
pkgrepo state tests
'''
@requires_system_grains
def test_pkgrepo_01_managed(self, grains):
'''
Test adding a repo
'''
os_grain = self.run_function('grains.item', ['os'])['os']
os_release_info = tuple(self.run_function('grains.item', ['osrelease_info'])['osrelease_info'])
if os_grain == 'Ubuntu' and os_release_info >= (15, 10):
self.skipTest(
'The PPA used for this test does not exist for Ubuntu Wily'
' (15.10) and later.'
)
if grains['os_family'] == 'Debian':
try:
from aptsources import sourceslist
except ImportError:
self.skipTest(
'aptsources.sourceslist python module not found'
)
ret = self.run_function('state.sls', mods='pkgrepo.managed', timeout=120)
# If the below assert fails then no states were run, and the SLS in
# tests/integration/files/file/base/pkgrepo/managed.sls needs to be
# corrected.
self.assertReturnNonEmptySaltType(ret)
for state_id, state_result in six.iteritems(ret):
self.assertSaltTrueReturn(dict([(state_id, state_result)]))
def test_pkgrepo_02_absent(self):
'''
Test removing the repo from the above test
'''
os_grain = self.run_function('grains.item', ['os'])['os']
os_release_info = tuple(self.run_function('grains.item', ['osrelease_info'])['osrelease_info'])
if os_grain == 'Ubuntu' and os_release_info >= (15, 10):
self.skipTest(
'The PPA used for this test does not exist for Ubuntu Wily'
' (15.10) and later.'
)
ret = self.run_function('state.sls', mods='pkgrepo.absent', timeout=120)
# If the below assert fails then no states were run, and the SLS in
# tests/integration/files/file/base/pkgrepo/absent.sls needs to be
# corrected.
self.assertReturnNonEmptySaltType(ret)
for state_id, state_result in six.iteritems(ret):
self.assertSaltTrueReturn(dict([(state_id, state_result)]))
@requires_system_grains
def test_pkgrepo_03_with_comments(self, grains):
'''
Test adding a repo with comments
'''
os_family = grains['os_family'].lower()
if os_family in ('redhat', 'suse'):
kwargs = {
'name': 'examplerepo',
'baseurl': 'http://example.com/repo',
'enabled': False,
'comments': ['This is a comment']
}
elif os_family in ('debian',):
self.skipTest('Debian/Ubuntu test case needed')
else:
self.skipTest("No test case for os_family '{0}'".format(os_family))
try:
# Run the state to add the repo
ret = self.run_state('pkgrepo.managed', **kwargs)
self.assertSaltTrueReturn(ret)
# Run again with modified comments
kwargs['comments'].append('This is another comment')
ret = self.run_state('pkgrepo.managed', **kwargs)
self.assertSaltTrueReturn(ret)
ret = ret[next(iter(ret))]
self.assertEqual(
ret['changes'],
{
'comments': {
'old': ['This is a comment'],
'new': ['This is a comment',
'This is another comment']
}
}
)
# Run a third time, no changes should be made
ret = self.run_state('pkgrepo.managed', **kwargs)
self.assertSaltTrueReturn(ret)
ret = ret[next(iter(ret))]
self.assertFalse(ret['changes'])
self.assertEqual(
ret['comment'],
"Package repo '{0}' already configured".format(kwargs['name'])
)
finally:
# Clean up
self.run_state('pkgrepo.absent', name=kwargs['name'])
| # -*- coding: utf-8 -*-
'''
tests for pkgrepo states
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from tests.support.case import ModuleCase
from tests.support.mixins import SaltReturnAssertsMixin
from tests.support.unit import skipIf
from tests.support.helpers import (
destructiveTest,
requires_system_grains
)
# Import salt libs
import salt.utils
# Import 3rd-party libs
import salt.ext.six as six
class PkgrepoTest(ModuleCase, SaltReturnAssertsMixin):
'''
pkgrepo state tests
'''
@destructiveTest
@skipIf(salt.utils.is_windows(), 'minion is windows')
@requires_system_grains
def test_pkgrepo_01_managed(self, grains):
'''
This is a destructive test as it adds a repository.
'''
os_grain = self.run_function('grains.item', ['os'])['os']
os_release_info = tuple(self.run_function('grains.item', ['osrelease_info'])['osrelease_info'])
if os_grain == 'Ubuntu' and os_release_info >= (15, 10):
self.skipTest(
'The PPA used for this test does not exist for Ubuntu Wily'
' (15.10) and later.'
)
if grains['os_family'] == 'Debian':
try:
from aptsources import sourceslist
except ImportError:
self.skipTest(
'aptsources.sourceslist python module not found'
)
ret = self.run_function('state.sls', mods='pkgrepo.managed', timeout=120)
# If the below assert fails then no states were run, and the SLS in
# tests/integration/files/file/base/pkgrepo/managed.sls needs to be
# corrected.
self.assertReturnNonEmptySaltType(ret)
for state_id, state_result in six.iteritems(ret):
self.assertSaltTrueReturn(dict([(state_id, state_result)]))
@destructiveTest
@skipIf(salt.utils.is_windows(), 'minion is windows')
def test_pkgrepo_02_absent(self):
'''
This is a destructive test as it removes the repository added in the
above test.
'''
os_grain = self.run_function('grains.item', ['os'])['os']
os_release_info = tuple(self.run_function('grains.item', ['osrelease_info'])['osrelease_info'])
if os_grain == 'Ubuntu' and os_release_info >= (15, 10):
self.skipTest(
'The PPA used for this test does not exist for Ubuntu Wily'
' (15.10) and later.'
)
ret = self.run_function('state.sls', mods='pkgrepo.absent', timeout=120)
# If the below assert fails then no states were run, and the SLS in
# tests/integration/files/file/base/pkgrepo/absent.sls needs to be
# corrected.
self.assertReturnNonEmptySaltType(ret)
for state_id, state_result in six.iteritems(ret):
self.assertSaltTrueReturn(dict([(state_id, state_result)]))
| Python | 0.000001 |
0bb2ebc52e720a3d693ca14f3621fd710ea36d4b | use make_result_iq | tests/twisted/vcard/test-avatar-async.py | tests/twisted/vcard/test-avatar-async.py |
"""
Test support for retrieving avatars asynchronously using RequestAvatars.
"""
import base64
import hashlib
from servicetest import EventPattern
from gabbletest import exec_test, acknowledge_iq, make_result_iq
def test(q, bus, conn, stream):
conn.Connect()
_, iq_event = q.expect_many(
EventPattern('dbus-signal', signal='StatusChanged', args=[0, 1]),
EventPattern('stream-iq', to=None, query_ns='vcard-temp',
query_name='vCard'))
acknowledge_iq(stream, iq_event.stanza)
handle = conn.RequestHandles(1, ['bob@foo.com'])[0]
conn.Avatars.RequestAvatars([handle])
iq_event = q.expect('stream-iq', to='bob@foo.com', query_ns='vcard-temp',
query_name='vCard')
iq = make_result_iq(stream, iq_event.stanza)
vcard = iq.firstChildElement()
photo = vcard.addElement('PHOTO')
photo.addElement('TYPE', content='image/png')
photo.addElement('BINVAL', content=base64.b64encode('hello'))
stream.send(iq)
event = q.expect('dbus-signal', signal='AvatarRetrieved')
assert event.args[0] == handle
assert event.args[1] == hashlib.sha1('hello').hexdigest()
assert event.args[2] == 'hello'
assert event.args[3] == 'image/png'
# Request again; this request should be satisfied from the avatar cache.
conn.Avatars.RequestAvatars([handle])
event = q.demand('dbus-signal', signal='AvatarRetrieved')
assert event.args[0] == handle
assert event.args[1] == hashlib.sha1('hello').hexdigest()
assert event.args[2] == 'hello'
assert event.args[3] == 'image/png'
conn.Disconnect()
q.expect('dbus-signal', signal='StatusChanged', args=[2, 1])
if __name__ == '__main__':
exec_test(test)
|
"""
Test support for retrieving avatars asynchronously using RequestAvatars.
"""
import base64
import hashlib
from servicetest import EventPattern
from gabbletest import exec_test, acknowledge_iq
def test(q, bus, conn, stream):
conn.Connect()
_, iq_event = q.expect_many(
EventPattern('dbus-signal', signal='StatusChanged', args=[0, 1]),
EventPattern('stream-iq', to=None, query_ns='vcard-temp',
query_name='vCard'))
acknowledge_iq(stream, iq_event.stanza)
handle = conn.RequestHandles(1, ['bob@foo.com'])[0]
conn.Avatars.RequestAvatars([handle])
iq_event = q.expect('stream-iq', to='bob@foo.com', query_ns='vcard-temp',
query_name='vCard')
iq = iq_event.stanza
vcard = iq_event.query
photo = vcard.addElement('PHOTO')
photo.addElement('TYPE', content='image/png')
photo.addElement('BINVAL', content=base64.b64encode('hello'))
iq['type'] = 'result'
stream.send(iq)
event = q.expect('dbus-signal', signal='AvatarRetrieved')
assert event.args[0] == handle
assert event.args[1] == hashlib.sha1('hello').hexdigest()
assert event.args[2] == 'hello'
assert event.args[3] == 'image/png'
# Request again; this request should be satisfied from the avatar cache.
conn.Avatars.RequestAvatars([handle])
event = q.demand('dbus-signal', signal='AvatarRetrieved')
assert event.args[0] == handle
assert event.args[1] == hashlib.sha1('hello').hexdigest()
assert event.args[2] == 'hello'
assert event.args[3] == 'image/png'
conn.Disconnect()
q.expect('dbus-signal', signal='StatusChanged', args=[2, 1])
if __name__ == '__main__':
exec_test(test)
| Python | 0.000003 |
cf4d8318557d971cee1869fe8cbac82cc6316020 | Change expected exception | plotly/tests/test_core/test_file/test_file.py | plotly/tests/test_core/test_file/test_file.py | """
test_meta:
==========
A module intended for use with Nose.
"""
import random
import string
import requests
from unittest import TestCase
from nose.plugins.attrib import attr
import plotly.plotly as py
from plotly.exceptions import PlotlyRequestError
@attr('slow')
class FolderAPITestCase(TestCase):
def setUp(self):
py.sign_in('PythonTest', '9v9f20pext')
def _random_filename(self):
choice_chars = string.ascii_letters + string.digits
random_chars = [random.choice(choice_chars) for _ in range(10)]
unique_filename = 'Valid Folder ' + ''.join(random_chars)
return unique_filename
def test_create_folder(self):
try:
py.file_ops.mkdirs(self._random_filename())
except PlotlyRequestError as e:
self.fail('Expected this *not* to fail! Status: {}'
.format(e.status_code))
def test_create_nested_folders(self):
first_folder = self._random_filename()
nested_folder = '{0}/{1}'.format(first_folder, self._random_filename())
try:
py.file_ops.mkdirs(nested_folder)
except PlotlyRequestError as e:
self.fail('Expected this *not* to fail! Status: {}'
.format(e.status_code))
def test_duplicate_folders(self):
first_folder = self._random_filename()
py.file_ops.mkdirs(first_folder)
try:
py.file_ops.mkdirs(first_folder)
except PlotlyRequestError as e:
self.assertTrue(400 <= e.status_code < 500)
else:
self.fail('Expected this to fail!')
| """
test_meta:
==========
A module intended for use with Nose.
"""
import random
import string
import requests
from unittest import TestCase
from nose.plugins.attrib import attr
import plotly.plotly as py
from plotly.exceptions import PlotlyRequestError
@attr('slow')
class FolderAPITestCase(TestCase):
def setUp(self):
py.sign_in('PythonTest', '9v9f20pext')
def _random_filename(self):
choice_chars = string.ascii_letters + string.digits
random_chars = [random.choice(choice_chars) for _ in range(10)]
unique_filename = 'Valid Folder ' + ''.join(random_chars)
return unique_filename
def test_create_folder(self):
try:
py.file_ops.mkdirs(self._random_filename())
except PlotlyRequestError as e:
self.fail('Expected this *not* to fail! Status: {}'
.format(e.status_code))
def test_create_nested_folders(self):
first_folder = self._random_filename()
nested_folder = '{0}/{1}'.format(first_folder, self._random_filename())
try:
py.file_ops.mkdirs(nested_folder)
except PlotlyRequestError as e:
self.fail('Expected this *not* to fail! Status: {}'
.format(e.status_code))
def test_duplicate_folders(self):
first_folder = self._random_filename()
py.file_ops.mkdirs(first_folder)
try:
py.file_ops.mkdirs(first_folder)
except requests.exceptions.RequestException as e:
self.assertTrue(400 <= e.response.status_code < 500)
else:
self.fail('Expected this to fail!')
| Python | 0.000002 |
6cfc94d8a03439c55808090aa5e3a4f35c288887 | Use assert_allclose so we can see the appveyor failure | menpodetect/tests/opencv_test.py | menpodetect/tests/opencv_test.py | from numpy.testing import assert_allclose
from menpodetect.opencv import (load_opencv_frontal_face_detector,
load_opencv_eye_detector)
import menpo.io as mio
takeo = mio.import_builtin_asset.takeo_ppm()
def test_frontal_face_detector():
takeo_copy = takeo.copy()
opencv_detector = load_opencv_frontal_face_detector()
pcs = opencv_detector(takeo_copy)
assert len(pcs) == 1
assert takeo_copy.n_channels == 3
assert takeo_copy.landmarks['opencv_0'][None].n_points == 4
def test_frontal_face_detector_min_neighbors():
takeo_copy = takeo.copy()
opencv_detector = load_opencv_frontal_face_detector()
pcs = opencv_detector(takeo_copy, min_neighbours=100)
assert len(pcs) == 0
assert takeo_copy.n_channels == 3
def test_eye_detector():
takeo_copy = takeo.copy()
opencv_detector = load_opencv_eye_detector()
pcs = opencv_detector(takeo_copy, min_size=(5, 5))
assert_allclose(len(pcs), 1)
assert takeo_copy.n_channels == 3
assert takeo_copy.landmarks['opencv_0'][None].n_points == 4
| from menpodetect.opencv import (load_opencv_frontal_face_detector,
load_opencv_eye_detector)
import menpo.io as mio
takeo = mio.import_builtin_asset.takeo_ppm()
def test_frontal_face_detector():
takeo_copy = takeo.copy()
opencv_detector = load_opencv_frontal_face_detector()
pcs = opencv_detector(takeo_copy)
assert len(pcs) == 1
assert takeo_copy.n_channels == 3
assert takeo_copy.landmarks['opencv_0'][None].n_points == 4
def test_frontal_face_detector_min_neighbors():
takeo_copy = takeo.copy()
opencv_detector = load_opencv_frontal_face_detector()
pcs = opencv_detector(takeo_copy, min_neighbours=100)
assert len(pcs) == 0
assert takeo_copy.n_channels == 3
def test_eye_detector():
takeo_copy = takeo.copy()
opencv_detector = load_opencv_eye_detector()
pcs = opencv_detector(takeo_copy, min_size=(5, 5))
assert len(pcs) == 1
assert takeo_copy.n_channels == 3
assert takeo_copy.landmarks['opencv_0'][None].n_points == 4
| Python | 0 |
b8d0a7cbac6ab2415a1d059a1f68428e9312f3cb | Make our error page handlers work on Django 2.0 (#969) | judge/views/error.py | judge/views/error.py | import traceback
from django.shortcuts import render
from django.utils.translation import gettext as _
def error(request, context, status):
return render(request, 'error.html', context=context, status=status)
def error404(request, exception=None):
# TODO: "panic: go back"
return render(request, 'generic-message.html', {
'title': _('404 error'),
'message': _('Could not find page "%s"') % request.path
}, status=404)
def error403(request, exception=None):
return error(request, {'id': 'unauthorized_access',
'description': _('no permission for %s') % request.path,
'code': 403}, 403)
def error500(request):
return error(request, {'id': 'invalid_state',
'description': _('corrupt page %s') % request.path,
'traceback': traceback.format_exc(),
'code': 500}, 500)
| import traceback
from django.shortcuts import render
from django.utils.translation import gettext as _
def error(request, context, status):
return render(request, 'error.html', context=context, status=status)
def error404(request):
# TODO: "panic: go back"
return render(request, 'generic-message.html', {
'title': _('404 error'),
'message': _('Could not find page "%s"') % request.path
}, status=404)
def error403(request):
return error(request, {'id': 'unauthorized_access',
'description': _('no permission for %s') % request.path,
'code': 403}, 403)
def error500(request):
return error(request, {'id': 'invalid_state',
'description': _('corrupt page %s') % request.path,
'traceback': traceback.format_exc(),
'code': 500}, 500)
| Python | 0 |
4e92dabe65416a3a751a0b38e75512b6daa1ba38 | Remove useless imports | ticketshop/ticketapp/tests/test_views.py | ticketshop/ticketapp/tests/test_views.py | from django.test import Client
from django.contrib.auth.models import User
from django.test import TestCase
from ..models import TicketType, TicketPurchase
class TicketPurchaseViewTest(TestCase):
def test_getForm(self):
"""
Test that we can get the purchase form
"""
self.assertContains(self.client.get("/"), "name")
class TestConfirmationView(TestCase):
def setUp(self):
# It appears that client.session only work
# for non annonymous users: setup Test User
User.objects.create_user('user', 'user@site.com', 'password')
# Login
self.client.login(username='user', password='password')
# Create data
tt = TicketType.objects.create( name = "Standard ticket", price = 100 )
self.purchase = TicketPurchase.objects.create(
name = "Bruce Wayne",
email = "bruce@wayneenterprise.com" )
self.purchase.ticket_set.create( name = "Batman", ticket_type = tt )
self.purchase.ticket_set.create( name = "Catwoman", ticket_type = tt )
self.invoice_id = self.purchase.invoice_id
def test_itRedirectToTheHomePageWhenThereIsNoSessionData(self):
"""
Test that /confirm/ redirect to / when the session doesn,t
contain any purchase data
"""
self.assertRedirects(self.client.get('/confirm/'), '/')
def test_itDisplaysTheContactName(self):
"""
Test that the view displays the contact name
"""
session = self.client.session
session['invoice_id'] = self.invoice_id
session.save()
self.assertContains(self.client.get('/confirm/'), "Bruce Wayne" )
self.assertContains(self.client.get('/confirm/'), "bruce@wayneenterprise.com" )
self.assertContains(self.client.get('/confirm/'), "bruce@wayneenterprise.com" )
def test_itDisplaysTheTotal(self):
"""
Test that the view displays the total amount
"""
session = self.client.session
session['invoice_id'] = self.invoice_id
session.save()
self.assertContains(self.client.get('/confirm/'), "<b>Total:</b> 200 SEK" )
class TestPaypalView(TestCase):
def test_2(self):
self.client.get("/paypal/")
| from django.test import Client
from django.contrib.auth.models import User
from django.contrib.messages.storage.base import Message
from django.contrib.messages.constants import ERROR
from django.test import TestCase
from ..models import TicketType, Ticket, TicketPurchase, Coupon
class TicketPurchaseViewTest(TestCase):
def test_getForm(self):
"""
Test that we can get the purchase form
"""
self.assertContains(self.client.get("/"), "name")
class TestConfirmationView(TestCase):
def setUp(self):
# It appears that client.session only work
# for non annonymous users: setup Test User
User.objects.create_user('user', 'user@site.com', 'password')
# Login
self.client.login(username='user', password='password')
# Create data
tt = TicketType.objects.create( name = "Standard ticket", price = 100 )
self.purchase = TicketPurchase.objects.create(
name = "Bruce Wayne",
email = "bruce@wayneenterprise.com" )
self.purchase.ticket_set.create( name = "Batman", ticket_type = tt )
self.purchase.ticket_set.create( name = "Catwoman", ticket_type = tt )
self.invoice_id = self.purchase.invoice_id
def test_itRedirectToTheHomePageWhenThereIsNoSessionData(self):
"""
Test that /confirm/ redirect to / when the session doesn,t
contain any purchase data
"""
self.assertRedirects(self.client.get('/confirm/'), '/')
def test_itDisplaysTheContactName(self):
"""
Test that the view displays the contact name
"""
session = self.client.session
session['invoice_id'] = self.invoice_id
session.save()
self.assertContains(self.client.get('/confirm/'), "Bruce Wayne" )
self.assertContains(self.client.get('/confirm/'), "bruce@wayneenterprise.com" )
self.assertContains(self.client.get('/confirm/'), "bruce@wayneenterprise.com" )
def test_itDisplaysTheTotal(self):
"""
Test that the view displays the total amount
"""
session = self.client.session
session['invoice_id'] = self.invoice_id
session.save()
self.assertContains(self.client.get('/confirm/'), "<b>Total:</b> 200 SEK" )
class TestPaypalView(TestCase):
def test_2(self):
self.client.get("/paypal/")
| Python | 0.000007 |
bec1d224771daefd9ce18c81b14f550e59b1577a | DidelEntity.__getattr__ raises the correct exception | didel/base.py | didel/base.py | # -*- coding: UTF-8 -*-
try:
from urlparse import urljoin
except ImportError: # Python 3
from urllib.parse import urljoin
from bs4 import BeautifulSoup
ROOT_URL = 'http://didel.script.univ-paris-diderot.fr'
class DidelError(Exception):
"""
Base exception for Didel errors
"""
pass
class DidelEntity(object):
"""
Common base for all fetchable entities. It provides a convenient way to
fetch a page describing an entity and populate the object with it.
Usage: ::
class MyEntity(DidelEntity):
def __init__(self, someArg):
self.path = '/foo/bar/qux/%s.html' % someArg
super(MyEntity, self).__init__()
def populate(self, soup, session, **kw):
# populate the object with ``soup``
self.title = soup.select('h1')[0].get_text()
The entity can then be populated: ::
s = Session()
m = MyEntity("foo")
m.fetch(s)
print m.title
"""
def __init__(self, *args, **kwargs):
self._resources = {}
def fetch(self, session):
"""
Fetch ``self.path`` using the given session and call ``self.populate``
on the returned text.
It sets ``self.session`` to the given session and ``self._populated``
to ``True``.
"""
if not hasattr(self, 'populate') or self.is_populated():
return False
if not hasattr(self, 'path'):
return False
url = urljoin(ROOT_URL, self.path)
resp = session.get(url)
if not resp.ok:
return False
soup = BeautifulSoup(resp.text, 'lxml')
setattr(self, 'session', session)
self.populate(soup, session)
setattr(self, '_populated', True)
return True
def populate(self, soup, session, **kwargs):
"""
This should be implemented by subclasses
"""
raise NotImplementedError
def is_populated(self):
"""
Test if the element has been populated
"""
return hasattr(self, '_populated')
def add_resource(self, name, value):
"""
Add a subresource to this element. It should be a ``DidelEntity``.
``name`` will be used as an attribute name which will, when first
acceded, populate the subresource and cache it.
"""
self._resources[name] = value
def __getattr__(self, name):
"""
Lazily populate subresources when they're acceded
"""
if name not in self._resources:
raise AttributeError("'%s' has no attribute '%s'" % (self, name))
if not self.is_populated():
raise DidelError('%s is not populated' % repr(self))
res = self._resources[name]
res.fetch(self.session)
setattr(self, name, res)
return res
def __getitem__(self, idx):
"""
Lazily populate subresources when they're acceded
"""
el = super(DidelEntity, self).__getitem__(idx)
el.fetch(self.session)
return el
| # -*- coding: UTF-8 -*-
try:
from urlparse import urljoin
except ImportError: # Python 3
from urllib.parse import urljoin
from bs4 import BeautifulSoup
ROOT_URL = 'http://didel.script.univ-paris-diderot.fr'
class DidelError(Exception):
"""
Base exception for Didel errors
"""
pass
class DidelEntity(object):
"""
Common base for all fetchable entities. It provides a convenient way to
fetch a page describing an entity and populate the object with it.
Usage: ::
class MyEntity(DidelEntity):
def __init__(self, someArg):
self.path = '/foo/bar/qux/%s.html' % someArg
super(MyEntity, self).__init__()
def populate(self, soup, session, **kw):
# populate the object with ``soup``
self.title = soup.select('h1')[0].get_text()
The entity can then be populated: ::
s = Session()
m = MyEntity("foo")
m.fetch(s)
print m.title
"""
def __init__(self, *args, **kwargs):
self._resources = {}
def fetch(self, session):
"""
Fetch ``self.path`` using the given session and call ``self.populate``
on the returned text.
It sets ``self.session`` to the given session and ``self._populated``
to ``True``.
"""
if not hasattr(self, 'populate') or self.is_populated():
return False
if not hasattr(self, 'path'):
return False
url = urljoin(ROOT_URL, self.path)
resp = session.get(url)
if not resp.ok:
return False
soup = BeautifulSoup(resp.text, 'lxml')
setattr(self, 'session', session)
self.populate(soup, session)
setattr(self, '_populated', True)
return True
def populate(self, soup, session, **kwargs):
"""
This should be implemented by subclasses
"""
raise NotImplementedError
def is_populated(self):
"""
Test if the element has been populated
"""
return hasattr(self, '_populated')
def add_resource(self, name, value):
"""
Add a subresource to this element. It should be a ``DidelEntity``.
``name`` will be used as an attribute name which will, when first
acceded, populate the subresource and cache it.
"""
self._resources[name] = value
def __getattr__(self, name):
"""
Lazily populate subresources when they're acceded
"""
if name not in self._resources:
raise TypeError("'%s' has no attribute '%s'" % (self, name))
if not self.is_populated():
raise DidelError('%s is not populated' % repr(self))
res = self._resources[name]
res.fetch(self.session)
setattr(self, name, res)
return res
def __getitem__(self, idx):
"""
Lazily populate subresources when they're acceded
"""
el = super(DidelEntity, self).__getitem__(idx)
el.fetch(self.session)
return el
| Python | 0.195904 |
31e3f4486eba2d933582a00a643700ac2f51ab56 | add blank string for null colmun | optional/_data_generation/create_SNPChrPosOnRef_bcp_with_allele.py | optional/_data_generation/create_SNPChrPosOnRef_bcp_with_allele.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import re
import gzip
from pyfasta import Fasta
path_to_fasta = sys.argv[1]
path_to_bcp = sys.argv[2]
# GRCh37.p13
# $ wget -r ftp://ftp.ncbi.nlm.nih.gov/genbank/genomes/Eukaryotes/vertebrates_mammals/Homo_sapiens/GRCh37.p13/Primary_Assembly/assembled_chromosomes/FASTA/
# $ for x in {1..22} X Y; do gzip -dc chr${x}.fa.gz >> GRCh37.p13.fa; done
# path_to_fasta = 'path_to_/GRCh37.p13.fa'
r = re.compile('Homo sapiens chromosome ([0-9XY]+),')
fasta = Fasta(path_to_fasta, key_fn=lambda key: r.search(key).group(1))
def get_allele(chrom, pos):
return fasta.sequence({'chr': str(chrom), 'start': int(pos), 'stop': int(pos)}, one_based=True)
if __name__ == '__main__':
# path_to_bcp = 'path_to_/b141_SNPChrPosOnRef_GRCh37p13.bcp.gz' # GRCh37.p13
# path_to_bcp = 'path_to_/b141_SNPChrPosOnRef.bcp.gz' # GRCh38
with gzip.open(path_to_bcp) as fin:
for line in fin:
record = line.split('\t')
# No chrom & pos
if record[1] in ('NotOn', 'Multi', 'Un'):
print '\t'.join([record[0], record[1], '', '', ''])
# chrom == Pseudoautosomal Region (PAR)
elif record[1] == 'PAR':
allele = get_allele('Y', int(record[2])+1) # chrom = Y (PAR) # TODO: or skip?
print '\t'.join([record[0], record[1], record[2], record[3], allele])
# chrom == MT # TODO: add chrMT.fa
elif record[1] == 'MT':
allele = ''
print '\t'.join([record[0], record[1], record[2], record[3], allele])
# No pos
elif record[2] == '':
print '\t'.join([record[0], record[1], '', '', ''])
else:
allele = get_allele(str(record[1]), int(record[2])+1) # 0-based to 1-based
print '\t'.join([record[0], record[1], record[2], record[3], allele]) # snp_id, chr, pos(0-based), orien, allele
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import re
import gzip
from pyfasta import Fasta
path_to_fasta = sys.argv[1]
path_to_bcp = sys.argv[2]
# GRCh37.p13
# $ wget -r ftp://ftp.ncbi.nlm.nih.gov/genbank/genomes/Eukaryotes/vertebrates_mammals/Homo_sapiens/GRCh37.p13/Primary_Assembly/assembled_chromosomes/FASTA/
# $ for x in {1..22} X Y; do gzip -dc chr${x}.fa.gz >> GRCh37.p13.fa; done
# path_to_fasta = 'path_to_/GRCh37.p13.fa'
r = re.compile('Homo sapiens chromosome ([0-9XY]+),')
fasta = Fasta(path_to_fasta, key_fn=lambda key: r.search(key).group(1))
def get_allele(chrom, pos):
return fasta.sequence({'chr': str(chrom), 'start': int(pos), 'stop': int(pos)}, one_based=True)
if __name__ == '__main__':
# path_to_bcp = 'path_to_/b141_SNPChrPosOnRef_GRCh37p13.bcp.gz' # GRCh37.p13
# path_to_bcp = 'path_to_/b141_SNPChrPosOnRef.bcp.gz' # GRCh38
with gzip.open(path_to_bcp) as fin:
for line in fin:
record = line.split('\t')
# No chrom & pos
if record[1] in ('NotOn', 'Multi', 'Un'):
print '\t'.join([record[0], record[1]])
# chrom == Pseudoautosomal Region (PAR)
elif record[1] == 'PAR':
allele = get_allele('Y', int(record[2])+1) # chrom = Y (PAR) # TODO: or skip?
print '\t'.join([record[0], record[1], record[2], record[3], allele])
# chrom == MT # TODO: add chrMT.fa
elif record[1] == 'MT':
allele = ''
print '\t'.join([record[0], record[1], record[2], record[3], allele])
# No pos
elif record[2] == '':
print '\t'.join([record[0], record[1]])
else:
allele = get_allele(str(record[1]), int(record[2])+1) # 0-based to 1-based
print '\t'.join([record[0], record[1], record[2], record[3], allele]) # snp_id, chr, pos(0-based), orien, allele
| Python | 0.999683 |
1f98e497136ce3d9da7e63a6dc7c3f67fedf50b5 | Save the observation if the form was valid. | observations/views.py | observations/views.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import messages
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from django.views.generic.edit import FormView
from braces.views import LoginRequiredMixin
from .forms import ObservationForm, BatchUploadForm
class AddObservationView(FormView):
"""
Add a single observation.
"""
form_class = ObservationForm
template_name = "observations/add_observation.html"
success_url = reverse_lazy('observations:add_observation')
def form_valid(self, form):
observation = form.save(commit=False)
observation.observer = self.request.observer
observation.save()
return super(AddObservationView, self).form_valid(form)
class UploadObservationsView(LoginRequiredMixin, FormView):
"""
Upload a file of observations.
"""
form_class = BatchUploadForm
template_name = "observations/upload_observations.html"
success_url = reverse_lazy('observations:upload_observations')
def form_valid(self, form):
form.process_file()
messages.success(self.request, _("File uploaded successfully!"))
return super(UploadObservationsView, self).form_valid(form)
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import messages
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from django.views.generic.edit import FormView
from braces.views import LoginRequiredMixin
from .forms import ObservationForm, BatchUploadForm
class AddObservationView(FormView):
"""
Add a single observation.
"""
form_class = ObservationForm
template_name = "observations/add_observation.html"
success_url = reverse_lazy('observations:add_observation')
class UploadObservationsView(LoginRequiredMixin, FormView):
"""
Upload a file of observations.
"""
form_class = BatchUploadForm
template_name = "observations/upload_observations.html"
success_url = reverse_lazy('observations:upload_observations')
def form_valid(self, form):
form.process_file()
messages.success(self.request, _("File uploaded successfully!"))
return super(UploadObservationsView, self).form_valid(form)
| Python | 0.000001 |
6353dd8caa3656b8c37280bcccd56cfaa78ff67a | Add API for making authenticated API requests | valohai_cli/api.py | valohai_cli/api.py | import platform
from urllib.parse import urljoin, urlparse
import requests
from click.globals import get_current_context
from requests.auth import AuthBase
from valohai_cli import __version__ as VERSION
from valohai_cli.exceptions import APIError, ConfigurationError
from valohai_cli.settings import settings
from valohai_cli.utils import force_text
class TokenAuth(AuthBase):
def __init__(self, netloc, token):
super(TokenAuth, self).__init__()
self.netloc = netloc
self.token = token
def __call__(self, request):
if not request.headers.get('Authorization') and urlparse(request.url).netloc == self.netloc:
if self.token:
request.headers['Authorization'] = 'Token %s' % self.token
return request
class APISession(requests.Session):
def __init__(self, base_url, token=None):
super(APISession, self).__init__()
self.base_url = base_url
self.base_netloc = urlparse(self.base_url).netloc
self.auth = TokenAuth(self.base_netloc, token)
self.headers['Accept'] = 'application/json'
self.headers['User-Agent'] = 'valohai-cli/%s (%s)' % (
VERSION,
';'.join(platform.uname()),
)
def prepare_request(self, request):
url_netloc = urlparse(request.url).netloc
if not url_netloc:
request.url = urljoin(self.base_url, request.url)
return super(APISession, self).prepare_request(request)
def request(self, method, url, **kwargs):
handle_errors = bool(kwargs.pop('handle_errors', True))
resp = super(APISession, self).request(method, url, **kwargs)
if handle_errors and resp.status_code >= 400:
raise APIError(resp)
return resp
def _get_current_api_session():
"""
Get an API session, either from the Click context cache, or a new one from the config.
:return: API session
:rtype: APISession
"""
host = settings.get('host')
token = settings.get('token')
if not (host and token):
raise ConfigurationError('You\'re not logged in; try `vh login` first.')
ctx = get_current_context(silent=True) or object()
cache_key = force_text('_api_session_%s_%s' % (host, token))
session = getattr(ctx, cache_key, None)
if not session:
session = APISession(host, token)
setattr(ctx, cache_key, session)
return session
def request(method, url, **kwargs):
"""
Make an authenticated API request.
See the documentation for `requests.Session.request()`.
:param method: HTTP Method
:param url: URL
:param kwargs: Other kwargs, see `requests.Session.request()`
:return: requests.Response
:rtype: requests.Response
"""
session = _get_current_api_session()
return session.request(method, url, **kwargs)
| import platform
from urllib.parse import urljoin, urlparse
import requests
from requests.auth import AuthBase
from valohai_cli import __version__ as VERSION
from valohai_cli.exceptions import APIError, ConfigurationError
from valohai_cli.settings import settings
class TokenAuth(AuthBase):
def __init__(self, netloc, token):
super(TokenAuth, self).__init__()
self.netloc = netloc
self.token = token
def __call__(self, request):
if not request.headers.get('Authorization') and urlparse(request.url).netloc == self.netloc:
if self.token:
request.headers['Authorization'] = 'Token %s' % self.token
return request
class APISession(requests.Session):
def __init__(self, base_url, token=None):
super(APISession, self).__init__()
self.base_url = base_url
self.base_netloc = urlparse(self.base_url).netloc
self.auth = TokenAuth(self.base_netloc, token)
self.headers['Accept'] = 'application/json'
self.headers['User-Agent'] = 'valohai-cli/%s (%s)' % (
VERSION,
';'.join(platform.uname()),
)
def prepare_request(self, request):
url_netloc = urlparse(request.url).netloc
if not url_netloc:
request.url = urljoin(self.base_url, request.url)
return super(APISession, self).prepare_request(request)
def request(self, method, url, **kwargs):
handle_errors = bool(kwargs.pop('handle_errors', True))
resp = super(APISession, self).request(method, url, **kwargs)
if handle_errors and resp.status_code >= 400:
raise APIError(resp)
return resp
@classmethod
def from_settings(cls):
host = settings.get('host')
token = settings.get('token')
if not (host and token):
raise ConfigurationError('You\'re not logged in; try `vh login` first.')
return APISession(host, token)
| Python | 0.000001 |
513b2ca1d3499e3786f1769ce67c41ba16b70419 | switch the default prompt to "" from None | virtualenv/core.py | virtualenv/core.py | import sys
import click
from virtualenv import __version__
from virtualenv.builders.legacy import LegacyBuilder
from virtualenv.builders.venv import VenvBuilder
def select_builder(python, builders=None):
# Determine what Python we're going to be using. If this is None we'll use
# the Python which we're currently running under.
if python is None:
python = sys.executable
# If we were not given a list of builders we'll default to one that
# contains both of our builders
if builders is None:
builders = [VenvBuilder, LegacyBuilder]
# Loop over our builders and return the first one that is acceptable for
# the target Python.
for builder in builders:
if builder.check_available(python):
return builder
# If we got to this point then we haven't selected a builder then we need
# to raise an error.
raise RuntimeError("No available builders for the target Python.")
def create(destination, python=None, **kwargs):
# Determine which builder to use based on the capabiltiies of the target
# python.
builder_type = select_builder(python)
# Instantiate our selected builder with the values given to us, and then
# create our virtual environment using the given builder.
builder = builder_type(python=python, **kwargs)
builder.create(destination)
@click.command(
context_settings={
"help_option_names": ["-h", "--help"],
},
epilog=(
"Once an environment has been created, you may wish to activate it by "
"sourcing an activate script in its bin directory."
),
)
@click.version_option(version=__version__)
@click.option("-v", "--verbose", count=True, help="Increase verbosity.")
@click.option("-q", "--quiet", count=True, help="Decrease verbosity.")
@click.option(
"-p", "--python",
help=(
"The Python interpreter to use in the newly created virtual "
"environment."
),
)
@click.option(
"--clear",
is_flag=True,
help="Clear out the virtual environment and start from scratch.",
)
@click.option(
"--system-site-packages/--no-site-packages",
default=False,
help="Give the virtual environment access to the global site-packages.",
)
@click.option(
"--always-copy",
is_flag=True, help="Always copy files rather than symlinking.",
)
@click.option(
"--relocatable",
is_flag=True,
help=(
"Make an EXISTING virtualenv environment relocatable. This fixes up "
"scripts and makes all .pth files relative."
),
)
@click.option(
"--setuptools/--no-setuptools",
default=True,
help="Install setuptools into the new virtual environment.",
)
@click.option(
"--pip/--no-pip",
default=True,
help="Install pip into the new virtual environment.",
)
@click.option(
"--extra-search-dir",
multiple=True,
help=(
"Directory to look for setuptools/pip distributions in. This option "
"can be used multiple times."
),
)
@click.option(
"--prompt",
default="",
help="Provides an alternative prompt prefix for this environment.",
)
@click.argument("destination")
def cli(destination,
verbose=0,
quiet=0,
python=None,
system_site_packages=False,
clear=False,
always_copy=False,
prompt=None,
relocatable=False,
extra_search_dir=None,
pip=True,
setuptools=True):
"""
Creates virtual python environments in a target directory.
"""
create(
destination,
python=python,
system_site_packages=system_site_packages,
clear=clear,
pip=pip,
setuptools=setuptools,
extra_search_dirs=extra_search_dir,
prompt=prompt,
)
| import sys
import click
from virtualenv import __version__
from virtualenv.builders.legacy import LegacyBuilder
from virtualenv.builders.venv import VenvBuilder
def select_builder(python, builders=None):
# Determine what Python we're going to be using. If this is None we'll use
# the Python which we're currently running under.
if python is None:
python = sys.executable
# If we were not given a list of builders we'll default to one that
# contains both of our builders
if builders is None:
builders = [VenvBuilder, LegacyBuilder]
# Loop over our builders and return the first one that is acceptable for
# the target Python.
for builder in builders:
if builder.check_available(python):
return builder
# If we got to this point then we haven't selected a builder then we need
# to raise an error.
raise RuntimeError("No available builders for the target Python.")
def create(destination, python=None, **kwargs):
# Determine which builder to use based on the capabiltiies of the target
# python.
builder_type = select_builder(python)
# Instantiate our selected builder with the values given to us, and then
# create our virtual environment using the given builder.
builder = builder_type(python=python, **kwargs)
builder.create(destination)
@click.command(
context_settings={
"help_option_names": ["-h", "--help"],
},
epilog=(
"Once an environment has been created, you may wish to activate it by "
"sourcing an activate script in its bin directory."
),
)
@click.version_option(version=__version__)
@click.option("-v", "--verbose", count=True, help="Increase verbosity.")
@click.option("-q", "--quiet", count=True, help="Decrease verbosity.")
@click.option(
"-p", "--python",
help=(
"The Python interpreter to use in the newly created virtual "
"environment."
),
)
@click.option(
"--clear",
is_flag=True,
help="Clear out the virtual environment and start from scratch.",
)
@click.option(
"--system-site-packages/--no-site-packages",
default=False,
help="Give the virtual environment access to the global site-packages.",
)
@click.option(
"--always-copy",
is_flag=True, help="Always copy files rather than symlinking.",
)
@click.option(
"--relocatable",
is_flag=True,
help=(
"Make an EXISTING virtualenv environment relocatable. This fixes up "
"scripts and makes all .pth files relative."
),
)
@click.option(
"--setuptools/--no-setuptools",
default=True,
help="Install setuptools into the new virtual environment.",
)
@click.option(
"--pip/--no-pip",
default=True,
help="Install pip into the new virtual environment.",
)
@click.option(
"--extra-search-dir",
multiple=True,
help=(
"Directory to look for setuptools/pip distributions in. This option "
"can be used multiple times."
),
)
@click.option(
"--prompt",
help="Provides an alternative prompt prefix for this environment.",
)
@click.argument("destination")
def cli(destination,
verbose=0,
quiet=0,
python=None,
system_site_packages=False,
clear=False,
always_copy=False,
prompt=None,
relocatable=False,
extra_search_dir=None,
pip=True,
setuptools=True):
"""
Creates virtual python environments in a target directory.
"""
create(
destination,
python=python,
system_site_packages=system_site_packages,
clear=clear,
pip=pip,
setuptools=setuptools,
extra_search_dirs=extra_search_dir,
prompt=prompt,
)
| Python | 0.999907 |
1b2a1bb5f4c99f80c3664a40796939732e9fe91c | bump dev version | bndl/__init__.py | bndl/__init__.py | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging.config
import os.path
from bndl.util.conf import Config, String
from bndl.util.log import install_trace_logging
from bndl.util.objects import LazyObject
# Expose a global BNDL configuration
conf = LazyObject(Config)
# Configure Logging
logging_conf = String('logging.conf')
install_trace_logging()
logging.captureWarnings(True)
if os.path.exists(conf['bndl.logging_conf']):
logging.config.fileConfig(conf['bndl.logging_conf'], disable_existing_loggers=False)
# BNDL version info
__version_info__ = (0, 7, 0, 'dev2')
__version__ = '.'.join(map(str, __version_info__))
| # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging.config
import os.path
from bndl.util.conf import Config, String
from bndl.util.log import install_trace_logging
from bndl.util.objects import LazyObject
# Expose a global BNDL configuration
conf = LazyObject(Config)
# Configure Logging
logging_conf = String('logging.conf')
install_trace_logging()
logging.captureWarnings(True)
if os.path.exists(conf['bndl.logging_conf']):
logging.config.fileConfig(conf['bndl.logging_conf'], disable_existing_loggers=False)
# BNDL version info
__version_info__ = (0, 6, 0)
__version__ = '.'.join(map(str, __version_info__))
| Python | 0 |
ee3ee6810f1f8fcc535e29f0f2a2af425dcea7c4 | add db_handler instance to lint_github | lintable_lintball/lintball.py | lintable_lintball/lintball.py | # Copyright 2015-2016 Capstone Team G
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
from typing import List
from uuid import uuid4
from lintable_db.database import DatabaseHandler
from lintable_db.models import User
from lintable_git.git_handler import GitHandler
from lintable_lintball.lint_error import LintError
from lintable_lintball.lint_report import LintReport
from lintable_lintball.lint_wrapper import LintWrapper
from lintable_lintball.runner import runner
from lintable_linters.whitespace_file_linter import WhitespaceFileLinter
from lintable_processes.db_handler import DBHandler
from lintable_processes.log_handler import LogHandler
from lintable_processes.process_handler import ProcessHandler
@runner.task(serializer='json')
def lint_github(payload: json, task_id=uuid4()):
logger = logging.getLogger()
if payload['action'] != 'opened' and payload['action'] != 'synchronized':
return
github_id = payload['repository']['owner']['id']
owner = DatabaseHandler.get_user(github_id)
oauth_key = owner.get_oauth_token() if isinstance(owner, User) else None
if oauth_key is None:
logger.error('Unable to locate oauth_token for {user} with id of {id}'.format(user=owner, id=github_id))
return
repo_url = 'https://{oauth_key}github.com/{full_name}.git'.format(
oauth_key=oauth_key,
full_name=payload['repository']['full_name'])
sha1_a = payload['pull_request']['head']['sha']
sha1_b = payload['pull_request']['base']['sha']
repo_id = payload['pull_request']['repository']['id']
process_handler = ProcessHandler(repo=repo_url,
uuid=task_id,
logger=LogHandler(logger),
db=DBHandler(repo_id=repo_id))
git_handler = GitHandler(process_handler=process_handler,
repo_url=repo_url,
sha1_a=sha1_a,
sha1_b=sha1_b)
lint_process(git_handler, process_handler)
return
def lint_process(git_handler: GitHandler,
process_handler: ProcessHandler,
linters=None):
if linters is None:
linters = [WhitespaceFileLinter()]
git_handler.started()
git_handler.clone_repo()
git_handler.retrieve_changed_files_from_commit()
lintball(process_handler, linters)
return
def lintball(handler: ProcessHandler, linters: List[LintWrapper]):
a_path = os.path.join(handler.local_path, 'a')
b_path = os.path.join(handler.local_path, 'b')
lint_errors = {}
for filename in handler.files:
a_file = os.path.join(a_path, filename)
b_file = os.path.join(b_path, filename)
a_results = lint(a_file, linters, handler) if os.path.exists(
a_file) else []
b_results = lint(b_file, linters, handler) if os.path.exists(
b_file) else []
lint_errors[filename] = [results for results in b_results if
results not in a_results]
lint_report = LintReport(errors=lint_errors)
handler.report(lint_report)
handler.finish()
return
def lint(filename: str, linters: List[LintWrapper], handler: ProcessHandler) -> List[LintError]:
lint_errors = []
for linter in linters:
handler.lint_file(linter=str(linter), file=filename)
lint_errors.extend(linter.lint(filename))
return lint_errors
| # Copyright 2015-2016 Capstone Team G
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import logging
import os
from typing import List
from uuid import uuid4
from lintable_git.git_handler import GitHandler
from lintable_lintball.lint_error import LintError
from lintable_lintball.lint_report import LintReport
from lintable_lintball.lint_wrapper import LintWrapper
from lintable_lintball.runner import runner
from lintable_linters.whitespace_file_linter import WhitespaceFileLinter
from lintable_processes.log_handler import LogHandler
from lintable_processes.process_handler import ProcessHandler
@runner.task(serializer='json')
def lint_github(payload: json, task_id=uuid4()):
if payload['action'] != 'opened' and payload['action'] != 'synchronized':
return
repo_url = 'https://github.com/{full_name}.git'.format(
full_name=payload['repository']['full_name'])
sha1_a = payload['pull_request']['head']['sha']
sha1_b = payload['pull_request']['base']['sha']
process_handler = ProcessHandler(repo=repo_url, uuid=task_id,
logger=LogHandler(logging.getLogger()))
git_handler = GitHandler(process_handler=process_handler,
repo_url=repo_url,
sha1_a=sha1_a,
sha1_b=sha1_b)
lint_process(git_handler, process_handler)
return
def lint_process(git_handler: GitHandler,
process_handler: ProcessHandler,
linters=None):
if linters is None:
linters = [WhitespaceFileLinter()]
git_handler.started()
git_handler.clone_repo()
git_handler.retrieve_changed_files_from_commit()
lintball(process_handler, linters)
return
def lintball(handler: ProcessHandler, linters: List[LintWrapper]):
a_path = os.path.join(handler.local_path, 'a')
b_path = os.path.join(handler.local_path, 'b')
lint_errors = {}
for filename in handler.files:
a_file = os.path.join(a_path, filename)
b_file = os.path.join(b_path, filename)
a_results = lint(a_file, linters, handler) if os.path.exists(
a_file) else []
b_results = lint(b_file, linters, handler) if os.path.exists(
b_file) else []
lint_errors[filename] = [results for results in b_results if
results not in a_results]
lint_report = LintReport(errors=lint_errors)
handler.report(lint_report)
handler.finish()
return
def lint(filename: str, linters: List[LintWrapper], handler: ProcessHandler) -> List[LintError]:
lint_errors = []
for linter in linters:
handler.lint_file(linter=str(linter), file=filename)
lint_errors.extend(linter.lint(filename))
return lint_errors
| Python | 0 |
b159d28dc965e60843f2617b4ae40d6c04cd2604 | Optimize sensitive areas API | geotrek/api/v2/views/sensitivity.py | geotrek/api/v2/views/sensitivity.py | from __future__ import unicode_literals
from django.conf import settings
from django.db.models import F, Case, When
from django_filters.rest_framework.backends import DjangoFilterBackend
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from geotrek.api.v2 import serializers as api_serializers, \
viewsets as api_viewsets
from geotrek.api.v2.functions import Transform, Buffer, GeometryType
from geotrek.sensitivity import models as sensitivity_models
from ..filters import GeotrekQueryParamsFilter, GeotrekInBBoxFilter, GeotrekSensitiveAreaFilter
class SensitiveAreaViewSet(api_viewsets.GeotrekViewset):
filter_backends = (
DjangoFilterBackend,
GeotrekQueryParamsFilter,
GeotrekInBBoxFilter,
GeotrekSensitiveAreaFilter,
)
serializer_class = api_serializers.SensitiveAreaListSerializer
serializer_detail_class = api_serializers.SensitiveAreaListSerializer
permission_classes = [IsAuthenticatedOrReadOnly]
authentication_classes = []
queryset = sensitivity_models.SensitiveArea.objects.existing() \
.filter(published=True) \
.select_related('species', 'structure') \
.prefetch_related('species__practices') \
.annotate(geom_type=GeometryType(F('geom'))) \
.annotate(geom2d_transformed=Case(
When(geom_type='POINT', then=Transform(Buffer(F('geom'), F('species__radius'), 4), settings.API_SRID)),
When(geom_type='POLYGON', then=Transform(F('geom'), settings.API_SRID))
))
| from __future__ import unicode_literals
from django.conf import settings
from django.db.models import F, Case, When
from django_filters.rest_framework.backends import DjangoFilterBackend
from rest_framework.permissions import IsAuthenticatedOrReadOnly
from geotrek.api.v2 import serializers as api_serializers, \
viewsets as api_viewsets
from geotrek.api.v2.functions import Transform, Buffer, GeometryType
from geotrek.sensitivity import models as sensitivity_models
from ..filters import GeotrekQueryParamsFilter, GeotrekInBBoxFilter, GeotrekSensitiveAreaFilter
class SensitiveAreaViewSet(api_viewsets.GeotrekViewset):
filter_backends = (
DjangoFilterBackend,
GeotrekQueryParamsFilter,
GeotrekInBBoxFilter,
GeotrekSensitiveAreaFilter,
)
serializer_class = api_serializers.SensitiveAreaListSerializer
serializer_detail_class = api_serializers.SensitiveAreaListSerializer
permission_classes = [IsAuthenticatedOrReadOnly]
authentication_classes = []
queryset = sensitivity_models.SensitiveArea.objects.existing() \
.filter(published=True) \
.prefetch_related('species') \
.annotate(geom_type=GeometryType(F('geom'))) \
.annotate(geom2d_transformed=Case(
When(geom_type='POINT', then=Transform(Buffer(F('geom'), F('species__radius'), 4), settings.API_SRID)),
When(geom_type='POLYGON', then=Transform(F('geom'), settings.API_SRID))
))
| Python | 0.000001 |
9433fa8970341cb2d024bceb0e23e93fbfb71393 | Update python test | solidity/python/FormulaTestSale.py | solidity/python/FormulaTestSale.py | from sys import argv
from decimal import Decimal
from random import randrange
from Formula import calculateSaleReturn
def formulaTest(supply,reserve,ratio,amount):
fixed = Decimal(calculateSaleReturn(supply,reserve,ratio,amount))
real = Decimal(reserve)*(1-(1-Decimal(amount)/Decimal(supply))**(100/Decimal(ratio)))
if fixed > real:
error = []
error.append('error occurred on:')
error.append('supply = {}'.format(supply))
error.append('reserve = {}'.format(reserve))
error.append('ratio = {}'.format(ratio))
error.append('amount = {}'.format(amount))
error.append('fixed = {}'.format(fixed))
error.append('real = {}'.format(real))
raise BaseException('\n'.join(error))
return fixed/real
size = int(argv[1]) if len(argv) > 1 else 0
if size == 0:
size = input('How many test-cases would you like to execute? ')
worstAccuracy = 1
numOfFailures = 0
for n in xrange(size):
supply = randrange(2,10**26)
reserve = randrange(1,10**23)
ratio = randrange(1,99)
amount = randrange(1,supply)
try:
accuracy = formulaTest(supply,reserve,ratio,amount)
worstAccuracy = min(worstAccuracy,accuracy)
except Exception,error:
accuracy = 0
numOfFailures += 1
except BaseException,error:
print error
break
print 'Test #{}: accuracy = {:.12f}, worst accuracy = {:.12f}, num of failures = {}'.format(n,accuracy,worstAccuracy,numOfFailures)
| from sys import argv
from decimal import Decimal
from random import randrange
from Formula import calculateSaleReturn
def formulaTest(supply,reserve,ratio,amount):
fixed = Decimal(calculateSaleReturn(supply,reserve,ratio,amount))
real = Decimal(reserve)*(1-(1-Decimal(amount)/Decimal(supply))**(100/Decimal(ratio)))
if fixed > real:
error = []
error.append('error occurred on:')
error.append('supply = {}'.format(supply))
error.append('reserve = {}'.format(reserve))
error.append('ratio = {}'.format(ratio))
error.append('amount = {}'.format(amount))
error.append('fixed = {}'.format(fixed))
error.append('real = {}'.format(real))
raise BaseException('\n'.join(error))
return fixed/real
size = int(argv[1]) if len(argv) > 1 else 0
if size == 0:
size = input('How many test-cases would you like to execute? ')
n = 0
worstAccuracy = 1
numOfFailures = 0
while n < size: # avoid creating a large range in memory
supply = randrange(2,10**26)
reserve = randrange(1,10**23)
ratio = randrange(1,99)
amount = randrange(1,supply)
try:
accuracy = formulaTest(supply,reserve,ratio,amount)
worstAccuracy = min(worstAccuracy,accuracy)
except Exception,error:
accuracy = 0
numOfFailures += 1
except BaseException,error:
print error
break
print 'Test #{}: accuracy = {:.12f}, worst accuracy = {:.12f}, num of failures = {}'.format(n,accuracy,worstAccuracy,numOfFailures)
n += 1
| Python | 0.000001 |
e364bdf7723ca45ac1000eda13a76cf1b19f0ad8 | Remove a debug print | plugins/plugin_node_manager/src/plugin_node_manager/launch_item.py | plugins/plugin_node_manager/src/plugin_node_manager/launch_item.py | #!/usr/bin/env python
################################################################################
#
# Copyright Airbus Group SAS 2015
# All rigths reserved.
#
# File Name : setup.py
# Authors : Martin Matignon
#
# If you find any bug or if you have any question please contact
# Adolfo Suarez Roos <adolfo.suarez@airbus.com>
# Martin Matignon <martin.matignon.external@airbus.com>
#
#
################################################################################
import rospy
import time
import os
import roslaunch
import subprocess
from roslib.packages import get_pkg_dir
from python_qt_binding.QtGui import *
from python_qt_binding.QtCore import *
def widget_creator(obj_ui):
widget = QWidget()
layout = QHBoxLayout(widget)
layout.setSpacing(6)
layout.setContentsMargins(0, 0, 0, 0)
spacer_left = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
spacer_right = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
layout.addItem(spacer_left)
layout.addWidget(obj_ui)
layout.addItem(spacer_right)
return widget
class LaunchItem:
def __init__(self, launch, machine):
self.launch_name = QLabel(launch)
self.launch_name.setContentsMargins(0,0,10,0)
self.launch_name.setMinimumHeight(40)
self.combo_machines = QComboBox()
self.combo_machines.setMinimumHeight(40)
self.combo_machines.addItem('cobotgui-dev:127.0.0.1')
self.combo_machines.addItem('cobot:192.168.0.1')
rsc = os.path.join(get_pkg_dir('plugin_node_manager'),'resources')
icon_launch = QIcon(rsc+'/launch.png')
self.button_launch = QPushButton()
self.button_launch.setIcon(icon_launch)
self.button_launch.setIconSize(QSize(30,30))
self.button_launch.setFixedSize(QSize(100,40))
self.button_launch.clicked.connect(self._launch_node_slot)
self.button_launch_widget = widget_creator(self.button_launch)
def _launch_node_slot(self):
rospy.loginfo('%s::_launch_node()'%self.launch_name.text())
subprocess.Popen(['roslaunch',
'node_launchers',
self.launch_name.text()])
#End of file
| #!/usr/bin/env python
################################################################################
#
# Copyright Airbus Group SAS 2015
# All rigths reserved.
#
# File Name : setup.py
# Authors : Martin Matignon
#
# If you find any bug or if you have any question please contact
# Adolfo Suarez Roos <adolfo.suarez@airbus.com>
# Martin Matignon <martin.matignon.external@airbus.com>
#
#
################################################################################
import rospy
import time
import os
import roslaunch
import subprocess
from roslib.packages import get_pkg_dir
from python_qt_binding.QtGui import *
from python_qt_binding.QtCore import *
def widget_creator(obj_ui):
widget = QWidget()
layout = QHBoxLayout(widget)
layout.setSpacing(6)
layout.setContentsMargins(0, 0, 0, 0)
spacer_left = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
spacer_right = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)
layout.addItem(spacer_left)
layout.addWidget(obj_ui)
layout.addItem(spacer_right)
return widget
class LaunchItem:
def __init__(self, launch, machine):
self.launch_name = QLabel(launch)
self.launch_name.setContentsMargins(0,0,10,0)
self.launch_name.setMinimumHeight(40)
self.combo_machines = QComboBox()
self.combo_machines.setMinimumHeight(40)
self.combo_machines.addItem('cobotgui-dev:127.0.0.1')
self.combo_machines.addItem('cobot:192.168.0.1')
rsc = os.path.join(get_pkg_dir('plugin_node_manager'),'resources')
icon_launch = QIcon(rsc+'/launch.png')
self.button_launch = QPushButton()
self.button_launch.setIcon(icon_launch)
self.button_launch.setIconSize(QSize(30,30))
self.button_launch.setFixedSize(QSize(100,40))
self.button_launch.clicked.connect(self._launch_node_slot)
self.button_launch_widget = widget_creator(self.button_launch)
def _launch_node_slot(self):
print 'coucou'
rospy.loginfo('%s::_launch_node()'%self.launch_name.text())
subprocess.Popen(['roslaunch',
'node_launchers',
self.launch_name.text()])
#End of file
| Python | 0.000028 |
3c916451ebb584a72fb0a92c2a577427ff10003c | Make Height Change Also Be A Valid Ping | dataserv/Farmer.py | dataserv/Farmer.py | import hashlib
from dataserv.run import db
from datetime import datetime
from sqlalchemy import DateTime
from dataserv.Validator import is_btc_address
def sha256(content):
"""Finds the sha256 hash of the content."""
content = content.encode('utf-8')
return hashlib.sha256(content).hexdigest()
class Farmer(db.Model):
id = db.Column(db.Integer, primary_key=True)
btc_addr = db.Column(db.String(35), unique=True)
last_seen = db.Column(DateTime, default=datetime.utcnow)
height = db.Column(db.Integer, default=0)
def __init__(self, btc_addr, last_seen=None):
"""
A farmer is a un-trusted client that provides some disk space
in exchange for payment. We use this object to keep track of
farmers connected to this node.
"""
self.btc_addr = btc_addr
self.last_seen = last_seen
def __repr__(self):
return '<Farmer BTC Address: %r>' % self.btc_addr
def is_btc_address(self):
"""Check if the address is a valid Bitcoin public key."""
return is_btc_address(self.btc_addr)
def validate(self, register=False):
"""Make sure this farmer fits the rules for this node."""
# check if this is a valid BTC address or not
if not self.is_btc_address():
raise ValueError("Invalid BTC Address.")
elif self.exists() and register:
raise LookupError("Address Already Is Registered.")
elif not self.exists() and not register:
raise LookupError("Address Not Registered.")
def register(self):
"""Add the farmer to the database."""
self.validate(True)
# If everything works correctly then commit to database.
db.session.add(self)
db.session.commit()
def exists(self):
"""Check to see if this address is already listed."""
query = db.session.query(Farmer.btc_addr)
return query.filter(Farmer.btc_addr == self.btc_addr).count() > 0
def lookup(self):
"""Return the Farmer object for the bitcoin address passed."""
self.validate()
farmer = Farmer.query.filter_by(btc_addr=self.btc_addr).first()
return farmer
def ping(self):
"""
Keep-alive for the farmer. Validation can take a long time, so
we just want to know if they are still there.
"""
farmer = self.lookup()
farmer.last_seen = datetime.utcnow()
db.session.commit()
# TODO: Actually do an audit.
def audit(self):
"""
Complete a cryptographic audit of files stored on the farmer. If
the farmer completes an audit we also update when we last saw them.
"""
self.ping()
def set_height(self, height):
"""Set the farmers advertised height."""
self.validate()
self.ping() # also serves as a valid ping
farmer = self.lookup()
farmer.height = height
db.session.commit()
return self.height
| import hashlib
from dataserv.run import db
from datetime import datetime
from sqlalchemy import DateTime
from dataserv.Validator import is_btc_address
def sha256(content):
"""Finds the sha256 hash of the content."""
content = content.encode('utf-8')
return hashlib.sha256(content).hexdigest()
class Farmer(db.Model):
id = db.Column(db.Integer, primary_key=True)
btc_addr = db.Column(db.String(35), unique=True)
last_seen = db.Column(DateTime, default=datetime.utcnow)
height = db.Column(db.Integer, default=0)
def __init__(self, btc_addr, last_seen=None):
"""
A farmer is a un-trusted client that provides some disk space
in exchange for payment. We use this object to keep track of
farmers connected to this node.
"""
self.btc_addr = btc_addr
self.last_seen = last_seen
def __repr__(self):
return '<Farmer BTC Address: %r>' % self.btc_addr
def is_btc_address(self):
"""Check if the address is a valid Bitcoin public key."""
return is_btc_address(self.btc_addr)
def validate(self, register=False):
"""Make sure this farmer fits the rules for this node."""
# check if this is a valid BTC address or not
if not self.is_btc_address():
raise ValueError("Invalid BTC Address.")
elif self.exists() and register:
raise LookupError("Address Already Is Registered.")
elif not self.exists() and not register:
raise LookupError("Address Not Registered.")
def register(self):
"""Add the farmer to the database."""
self.validate(True)
# If everything works correctly then commit to database.
db.session.add(self)
db.session.commit()
def exists(self):
"""Check to see if this address is already listed."""
query = db.session.query(Farmer.btc_addr)
return query.filter(Farmer.btc_addr == self.btc_addr).count() > 0
def lookup(self):
"""Return the Farmer object for the bitcoin address passed."""
self.validate()
farmer = Farmer.query.filter_by(btc_addr=self.btc_addr).first()
return farmer
def ping(self):
"""
Keep-alive for the farmer. Validation can take a long time, so
we just want to know if they are still there.
"""
farmer = self.lookup()
farmer.last_seen = datetime.utcnow()
db.session.commit()
# TODO: Actually do an audit.
def audit(self):
"""
Complete a cryptographic audit of files stored on the farmer. If
the farmer completes an audit we also update when we last saw them.
"""
self.ping()
def set_height(self, height):
"""Set the farmers advertised height."""
self.validate()
farmer = self.lookup()
farmer.height = height
db.session.commit()
return self.height
| Python | 0 |
31caceefaa2f6b6dc7d2601d8537e613ce600743 | Use account's static groups instead of a conversation's groups for dialogue group state | go/apps/dialogue/view_definition.py | go/apps/dialogue/view_definition.py | import json
from django.http import HttpResponse
from django.forms import Form
from go.api.go_api import client
from go.api.go_api.client import GoApiError
from go.conversation.view_definition import (
ConversationViewDefinitionBase, ConversationTemplateView)
class DialogueEditView(ConversationTemplateView):
"""This app is a unique and special snowflake, so it gets special views.
"""
view_name = 'edit'
path_suffix = 'edit/'
template_base = 'dialogue'
def get(self, request, conversation):
r = client.rpc(
request.session.session_key, 'conversation.dialogue.get_poll',
[request.user_api.user_account_key,
conversation.key])
if r.status_code != 200:
raise GoApiError(
"Failed to load dialogue from Go API:"
" (%r) %r." % (r.status_code, r.text))
contact_store = conversation.user_api.contact_store
groups = contact_store.list_static_groups()
model_data = {
'campaign_id': request.user_api.user_account_key,
'conversation_key': conversation.key,
'groups': [g.get_data() for g in groups],
'urls': {
'show': self.get_view_url(
'show',
conversation_key=conversation.key)
}
}
model_data.update(r.json['result']['poll'])
return self.render_to_response({
'conversation': conversation,
'session_id': request.session.session_key,
'model_data': json.dumps(model_data),
})
class UserDataView(ConversationTemplateView):
view_name = 'user_data'
path_suffix = 'users.csv'
def get(self, request, conversation):
# TODO: write new CSV data export
csv_data = "TODO: write data export."
return HttpResponse(csv_data, content_type='application/csv')
class SendDialogueForm(Form):
# TODO: Something better than this?
pass
class ConversationViewDefinition(ConversationViewDefinitionBase):
edit_view = DialogueEditView
extra_views = (
UserDataView,
)
action_forms = {
'send_jsbox': SendDialogueForm,
}
| import json
from django.http import HttpResponse
from django.forms import Form
from go.api.go_api import client
from go.api.go_api.client import GoApiError
from go.conversation.view_definition import (
ConversationViewDefinitionBase, ConversationTemplateView)
class DialogueEditView(ConversationTemplateView):
"""This app is a unique and special snowflake, so it gets special views.
"""
view_name = 'edit'
path_suffix = 'edit/'
template_base = 'dialogue'
def get(self, request, conversation):
r = client.rpc(
request.session.session_key, 'conversation.dialogue.get_poll',
[request.user_api.user_account_key,
conversation.key])
if r.status_code != 200:
raise GoApiError(
"Failed to load dialogue from Go API:"
" (%r) %r." % (r.status_code, r.text))
model_data = {
'campaign_id': request.user_api.user_account_key,
'conversation_key': conversation.key,
'groups': [g.get_data() for g in conversation.get_groups()],
'urls': {
'show': self.get_view_url(
'show',
conversation_key=conversation.key)
}
}
model_data.update(r.json['result']['poll'])
return self.render_to_response({
'conversation': conversation,
'session_id': request.session.session_key,
'model_data': json.dumps(model_data),
})
class UserDataView(ConversationTemplateView):
view_name = 'user_data'
path_suffix = 'users.csv'
def get(self, request, conversation):
# TODO: write new CSV data export
csv_data = "TODO: write data export."
return HttpResponse(csv_data, content_type='application/csv')
class SendDialogueForm(Form):
# TODO: Something better than this?
pass
class ConversationViewDefinition(ConversationViewDefinitionBase):
edit_view = DialogueEditView
extra_views = (
UserDataView,
)
action_forms = {
'send_jsbox': SendDialogueForm,
}
| Python | 0.000001 |
513817ef4ede24ce7609afb9d025107d8f96532b | Fix test on Windows | gouda/tests/test_decode_barcodes.py | gouda/tests/test_decode_barcodes.py | import unittest
import shutil
from pathlib import Path
from gouda.engines import ZbarEngine
from gouda.scripts.decode_barcodes import main
from utils import temp_directory_with_files
TESTDATA = Path(__file__).parent.joinpath('test_data')
@unittest.skipUnless(ZbarEngine.available(), 'ZbarEngine unavailable')
class TestRename(unittest.TestCase):
def test_rename(self):
"File is renamed with value of barcode"
with temp_directory_with_files(TESTDATA.joinpath('code128.png')) as tempdir:
main(['zbar', '--action=rename', unicode(tempdir)])
self.assertEqual(
['Stegosaurus.png'],
[path.name for path in sorted(tempdir.iterdir())]
)
def test_rename_multiple(self):
"File with multiple barcodes results in renamed / copied to three files"
with temp_directory_with_files(TESTDATA.joinpath('BM001128287.jpg')) as tempdir:
main(['zbar', '--action=rename', unicode(tempdir)])
self.assertEqual(
['BM001128286.jpg', 'BM001128287.jpg', 'BM001128288.jpg'],
[path.name for path in sorted(tempdir.iterdir())]
)
def test_rename_with_collisions(self):
"Files with same barcode values results in just a single rename"
with temp_directory_with_files(TESTDATA.joinpath('code128.png')) as tempdir:
shutil.copy(
unicode(TESTDATA.joinpath('code128.png')),
unicode(tempdir.joinpath('first copy.png'))
)
shutil.copy(
unicode(TESTDATA.joinpath('code128.png')),
unicode(tempdir.joinpath('second copy.png'))
)
main(['zbar', '--action=rename', unicode(tempdir)])
self.assertEqual(
['Stegosaurus.png', 'first copy.png', 'second copy.png'],
[path.name for path in sorted(tempdir.iterdir(), key=lambda p: p.name)]
)
def test_rename_avoid_collisions(self):
"Files with same barcode values results in new files with suffixes"
with temp_directory_with_files(TESTDATA.joinpath('code128.png')) as tempdir:
shutil.copy(
unicode(TESTDATA.joinpath('code128.png')),
unicode(tempdir.joinpath('first copy.png'))
)
shutil.copy(
unicode(TESTDATA.joinpath('code128.png')),
unicode(tempdir.joinpath('second copy.png'))
)
main(['zbar', '--action=rename', unicode(tempdir), '--avoid-collisions'])
print([path.name for path in sorted(tempdir.iterdir())])
self.assertEqual(
['Stegosaurus-1.png', 'Stegosaurus-2.png', 'Stegosaurus.png'],
[path.name for path in sorted(tempdir.iterdir())]
)
if __name__ == '__main__':
unittest.main()
| import unittest
import shutil
from pathlib import Path
from gouda.engines import ZbarEngine
from gouda.scripts.decode_barcodes import main
from utils import temp_directory_with_files
TESTDATA = Path(__file__).parent.joinpath('test_data')
@unittest.skipUnless(ZbarEngine.available(), 'ZbarEngine unavailable')
class TestRename(unittest.TestCase):
def test_rename(self):
"File is renamed with value of barcode"
with temp_directory_with_files(TESTDATA.joinpath('code128.png')) as tempdir:
main(['zbar', '--action=rename', unicode(tempdir)])
self.assertEqual(
['Stegosaurus.png'],
[path.name for path in sorted(tempdir.iterdir())]
)
def test_rename_multiple(self):
"File with multiple barcodes results in renamed / copied to three files"
with temp_directory_with_files(TESTDATA.joinpath('BM001128287.jpg')) as tempdir:
main(['zbar', '--action=rename', unicode(tempdir)])
self.assertEqual(
['BM001128286.jpg', 'BM001128287.jpg', 'BM001128288.jpg'],
[path.name for path in sorted(tempdir.iterdir())]
)
def test_rename_with_collisions(self):
"Files with same barcode values results in just a single rename"
with temp_directory_with_files(TESTDATA.joinpath('code128.png')) as tempdir:
shutil.copy(
unicode(TESTDATA.joinpath('code128.png')),
unicode(tempdir.joinpath('first copy.png'))
)
shutil.copy(
unicode(TESTDATA.joinpath('code128.png')),
unicode(tempdir.joinpath('second copy.png'))
)
main(['zbar', '--action=rename', unicode(tempdir)])
self.assertEqual(
['Stegosaurus.png', 'first copy.png', 'second copy.png'],
[path.name for path in sorted(tempdir.iterdir())]
)
def test_rename_avoid_collisions(self):
"Files with same barcode values results in new files with suffixes"
with temp_directory_with_files(TESTDATA.joinpath('code128.png')) as tempdir:
shutil.copy(
unicode(TESTDATA.joinpath('code128.png')),
unicode(tempdir.joinpath('first copy.png'))
)
shutil.copy(
unicode(TESTDATA.joinpath('code128.png')),
unicode(tempdir.joinpath('second copy.png'))
)
main(['zbar', '--action=rename', unicode(tempdir), '--avoid-collisions'])
print([path.name for path in sorted(tempdir.iterdir())])
self.assertEqual(
['Stegosaurus-1.png', 'Stegosaurus-2.png', 'Stegosaurus.png'],
[path.name for path in sorted(tempdir.iterdir())]
)
if __name__ == '__main__':
unittest.main()
| Python | 0 |
883cd72c33ae434f8452ca6923eaa2aa8dfd5f3d | Access key tests. | src/encoded/tests/test_access_key.py | src/encoded/tests/test_access_key.py | import pytest
def basic_auth(username, password):
from base64 import b64encode
return 'Basic ' + b64encode('%s:%s' % (username, password))
@pytest.datafixture
def access_keys(app):
from webtest import TestApp
environ = {
'HTTP_ACCEPT': 'application/json',
'REMOTE_USER': 'TEST',
}
testapp = TestApp(app, environ)
from .sample_data import URL_COLLECTION
url = '/users/'
users = []
for item in URL_COLLECTION[url]:
res = testapp.post_json(url, item, status=201)
principals = [
'system.Authenticated',
'system.Everyone',
'userid:' + item['_uuid'],
]
principals.extend('lab:' + lab_uuid for lab_uuid in item['lab_uuids'])
users.append({
'location': res.location,
'effective_principals': sorted(principals),
'_uuid': item['_uuid'],
})
access_keys = []
for user in users:
description = 'My programmatic key'
url = '/access-keys/'
item = {'user_uuid': user['_uuid'], 'description': description}
res = testapp.post_json(url, item, status=201)
access_keys.append({
'location': res.location,
'access_key_id': res.json['access_key_id'],
'secret_access_key': res.json['secret_access_key'],
'auth_header': basic_auth(res.json['access_key_id'], res.json['secret_access_key']),
'user_uuid': user['_uuid'],
'description': description,
'user': user,
})
return access_keys
@pytest.fixture
def access_key(access_keys):
return access_keys[0]
def test_access_key_principals(anontestapp, execute_counter, access_key):
headers = {'Authorization': access_key['auth_header']}
with execute_counter.expect(2):
res = anontestapp.get('/@@testing-user', headers=headers)
assert res.json['authenticated_userid'] == 'accesskey:' + access_key['access_key_id']
assert sorted(res.json['effective_principals']) == [
'accesskey:' + access_key['access_key_id'],
] + access_key['user']['effective_principals']
def test_access_key_reset(anontestapp, access_key):
headers = {'Authorization': access_key['auth_header']}
res = anontestapp.post_json(access_key['location'] + '/@@reset-secret', {}, headers=headers)
new_headers = {'Authorization': basic_auth(access_key['access_key_id'], res.json['secret_access_key'])}
res = anontestapp.get('/@@testing-user', headers=headers)
assert res.json['authenticated_userid'] is None
res = anontestapp.get('/@@testing-user', headers=new_headers)
assert res.json['authenticated_userid'] == 'accesskey:' + access_key['access_key_id']
def test_access_key_disable(anontestapp, access_key):
headers = {'Authorization': access_key['auth_header']}
res = anontestapp.post_json(access_key['location'] + '/@@disable-secret', {}, headers=headers)
res = anontestapp.get('/@@testing-user', headers=headers)
assert res.json['authenticated_userid'] is None
def test_access_key_edit(anontestapp, access_key):
headers = {'Authorization': access_key['auth_header']}
NEW_DESCRIPTION = 'new description'
properties = {'description': NEW_DESCRIPTION}
anontestapp.post_json(access_key['location'], properties, headers=headers)
res = anontestapp.get(access_key['location'], properties, headers=headers)
assert res.json['description'] == NEW_DESCRIPTION
@pytest.mark.xfail
def test_access_key_view_hides_secret_access_key_hash(anontestapp, access_key):
headers = {'Authorization': access_key['auth_header']}
res = anontestapp.get(access_key['location'], headers=headers)
assert 'secret_access_key_hash' not in res.json
| import pytest
@pytest.fixture
def users(testapp):
from .sample_data import URL_COLLECTION
url = '/users/'
users = []
for item in URL_COLLECTION[url]:
res = testapp.post_json(url, item, status=201)
principals = [
'system.Authenticated',
'system.Everyone',
'userid:' + item['_uuid'],
]
principals.extend('lab:' + lab_uuid for lab_uuid in item['lab_uuids'])
users.append({
'location': res.location,
'effective_principals': sorted(principals),
'_uuid': item['_uuid'],
})
return users
@pytest.fixture
def user(users):
return users[0]
@pytest.fixture
def access_keys(testapp, users):
from base64 import b64encode
access_keys = []
for user in users:
description = 'My programmatic key'
url = '/access-keys/'
item = {'user_uuid': user['_uuid'], 'description': description}
res = testapp.post_json(url, item, status=201)
access_keys.append({
'location': res.location,
'access_key_id': res.json['access_key_id'],
'secret_access_key': res.json['secret_access_key'],
'auth_header': 'Basic ' + b64encode(
'%s:%s' % (res.json['access_key_id'], res.json['secret_access_key'])),
'user_uuid': user['_uuid'],
'description': description,
'user': user,
})
return access_keys
@pytest.fixture
def access_key(access_keys):
return access_keys[0]
def test_access_key_post(anontestapp, execute_counter, access_key):
headers = {'Authorization': access_key['auth_header']}
with execute_counter.expect(2):
res = anontestapp.get('/@@testing-user', headers=headers)
assert res.json['authenticated_userid'] == 'accesskey:' + access_key['access_key_id']
assert sorted(res.json['effective_principals']) == [
'accesskey:' + access_key['access_key_id'],
] + access_key['user']['effective_principals']
res = anontestapp.get(access_key['location'], headers=headers)
assert 'description' in res.json
#assert 'secret_access_key_hash' not in res.json
| Python | 0 |
db22f7a508524409f5e03fdbcbf6a394670ebbde | Use built-in auth views | sweettooth/auth/urls.py | sweettooth/auth/urls.py |
from django.conf.urls.defaults import patterns, url
from django.views.generic import TemplateView
urlpatterns = patterns('',
url(r'login/$', 'django.contrib.auth.views.login',
dict(template_name='login.html'), name='login'),
url(r'logout/$', 'django.contrib.auth.views.logout',
dict(template_name='logout.html'), name='logout'),
url(r'register/$', 'auth.views.register', name='register'),
)
|
from django.conf.urls.defaults import patterns, url
from django.views.generic import TemplateView
urlpatterns = patterns('',
url(r'login/$', 'django.contrib.auth.views.login', dict(template_name='login.html'), name='login'),
url(r'logout/$', 'django.contrib.auth.views.logout', name='logout'),
url(r'register/$', 'auth.views.register', name='register'),
)
| Python | 0.000001 |
91a30d8e5cd18e3c5e6c5f00e48f44d6b33346b5 | clean up cache initialization in mail completer | roles/dotfiles/files/.vim/rplugin/python3/deoplete/sources/mail.py | roles/dotfiles/files/.vim/rplugin/python3/deoplete/sources/mail.py | from .base import Base
from itertools import chain
from deoplete.util import parse_buffer_pattern, getlines
import re
from subprocess import PIPE, Popen
import string
class Source(Base):
def __init__(self, vim):
super().__init__(vim)
self.filetypes = ['mail']
self.name = 'mail'
self.mark = '[@]'
self.matchers = ['matcher_length', 'matcher_full_fuzzy']
self.min_pattern_length = 0
self.limit = 1000000
self.__pattern = re.compile('^(Bcc|Cc|From|Reply-To|To):(.*, ?| ?)')
self.__wrapper = self.__find_reattach_to_user_namespace_binary()
self.__binary = self.__find_lbdbq_binary()
self.__candidates = None
def on_event(self, context):
self.__cache()
def gather_candidates(self, context):
result = self.__pattern.search(context['input'])
if result is not None:
if not self.__candidates:
self.__cache()
return self.__candidates
def __cache(self):
self.__candidates = []
data = self.__lbdbq('.')
if data:
for line in data:
try:
address, name, source = line.strip().split('\t')
if name:
address = name + ' <' + address + '>'
self.__candidates.append({'word': address, 'kind': source})
except:
pass
def __find_lbdbq_binary(self):
return self.vim.call('exepath', 'lbdbq')
def __find_reattach_to_user_namespace_binary(self):
return self.vim.call('exepath', 'reattach-to-user-namespace')
def __lbdbq(self, query):
if not self.__binary:
return None
if self.__wrapper:
command = [self.__wrapper, self.__binary, query]
else:
command = [self.__binary, query]
try:
process = Popen(command, stderr = PIPE, stdout = PIPE)
out, err = process.communicate()
if not process.returncode:
lines = out.decode('utf-8').split('\n')
if len(lines) > 1:
lines.pop(0)
return lines
except:
pass
| from .base import Base
from itertools import chain
from deoplete.util import parse_buffer_pattern, getlines
import re
from subprocess import PIPE, Popen
import string
class Source(Base):
def __init__(self, vim):
super().__init__(vim)
self.filetypes = ['mail']
self.name = 'mail'
self.mark = '[@]'
self.matchers = ['matcher_length', 'matcher_full_fuzzy']
self.min_pattern_length = 0
self.limit = 1000000
self.__pattern = re.compile('^(Bcc|Cc|From|Reply-To|To):(.*, ?| ?)')
self.__wrapper = self.__find_reattach_to_user_namespace_binary()
self.__binary = self.__find_lbdbq_binary()
self.__candidates = None
def on_event(self, context):
self.__candidates = []
data = self.__lbdbq('.')
if data:
for line in data:
try:
address, name, source = line.strip().split('\t')
if name:
address = name + ' <' + address + '>'
self.__candidates.append({'word': address, 'kind': source})
except:
pass
def gather_candidates(self, context):
result = self.__pattern.search(context['input'])
if result is not None:
if not self.__candidates:
self.on_event(context)
return self.__candidates
def __find_lbdbq_binary(self):
return self.vim.call('exepath', 'lbdbq')
def __find_reattach_to_user_namespace_binary(self):
return self.vim.call('exepath', 'reattach-to-user-namespace')
def __lbdbq(self, query):
if not self.__binary:
return None
if self.__wrapper:
command = [self.__wrapper, self.__binary, query]
else:
command = [self.__binary, query]
try:
process = Popen(command, stderr = PIPE, stdout = PIPE)
out, err = process.communicate()
if not process.returncode:
lines = out.decode('utf-8').split('\n')
if len(lines) > 1:
lines.pop(0)
return lines
except:
pass
| Python | 0.000001 |
87d2780a710e98c3b824583a2cf2607461bce35c | remove an unnecessary import | roles/dotfiles/files/.vim/rplugin/python3/deoplete/sources/mail.py | roles/dotfiles/files/.vim/rplugin/python3/deoplete/sources/mail.py | from .base import Base
from itertools import chain
from deoplete.util import parse_buffer_pattern, getlines
import re
from subprocess import PIPE, Popen
class Source(Base):
def __init__(self, vim):
super().__init__(vim)
self.filetypes = ['mail']
self.name = 'mail'
self.mark = '[@]'
self.matchers = ['matcher_length', 'matcher_full_fuzzy']
self.sorters = ['sorter_smart']
self.min_pattern_length = 0
self.limit = 1000000
self.__pattern = re.compile('^(Bcc|Cc|From|Reply-To|To):(.*, ?| ?)')
self.__wrapper = self.__find_reattach_to_user_namespace_binary()
self.__binary = self.__find_lbdbq_binary()
self.__candidates = None
def on_event(self, context):
self.__cache()
def gather_candidates(self, context):
result = self.__pattern.search(context['input'])
if result is not None:
if not self.__candidates:
self.__cache()
return self.__candidates
def __cache(self):
self.__candidates = []
data = self.__lbdbq('.')
if data:
for line in data:
try:
address, name, source = line.strip().split('\t')
if name:
address = name + ' <' + address + '>'
self.__candidates.append({'word': address, 'kind': source})
except:
pass
def __find_lbdbq_binary(self):
return self.vim.call('exepath', 'lbdbq')
def __find_reattach_to_user_namespace_binary(self):
return self.vim.call('exepath', 'reattach-to-user-namespace')
def __lbdbq(self, query):
if not self.__binary:
return None
if self.__wrapper:
command = [self.__wrapper, self.__binary, query]
else:
command = [self.__binary, query]
try:
process = Popen(command, stderr = PIPE, stdout = PIPE)
out, err = process.communicate()
if not process.returncode:
lines = out.decode('utf-8').split('\n')
if len(lines) > 1:
lines.pop(0)
return lines
except:
pass
| from .base import Base
from itertools import chain
from deoplete.util import parse_buffer_pattern, getlines
import re
from subprocess import PIPE, Popen
import string
class Source(Base):
def __init__(self, vim):
super().__init__(vim)
self.filetypes = ['mail']
self.name = 'mail'
self.mark = '[@]'
self.matchers = ['matcher_length', 'matcher_full_fuzzy']
self.sorters = ['sorter_smart']
self.min_pattern_length = 0
self.limit = 1000000
self.__pattern = re.compile('^(Bcc|Cc|From|Reply-To|To):(.*, ?| ?)')
self.__wrapper = self.__find_reattach_to_user_namespace_binary()
self.__binary = self.__find_lbdbq_binary()
self.__candidates = None
def on_event(self, context):
self.__cache()
def gather_candidates(self, context):
result = self.__pattern.search(context['input'])
if result is not None:
if not self.__candidates:
self.__cache()
return self.__candidates
def __cache(self):
self.__candidates = []
data = self.__lbdbq('.')
if data:
for line in data:
try:
address, name, source = line.strip().split('\t')
if name:
address = name + ' <' + address + '>'
self.__candidates.append({'word': address, 'kind': source})
except:
pass
def __find_lbdbq_binary(self):
return self.vim.call('exepath', 'lbdbq')
def __find_reattach_to_user_namespace_binary(self):
return self.vim.call('exepath', 'reattach-to-user-namespace')
def __lbdbq(self, query):
if not self.__binary:
return None
if self.__wrapper:
command = [self.__wrapper, self.__binary, query]
else:
command = [self.__binary, query]
try:
process = Popen(command, stderr = PIPE, stdout = PIPE)
out, err = process.communicate()
if not process.returncode:
lines = out.decode('utf-8').split('\n')
if len(lines) > 1:
lines.pop(0)
return lines
except:
pass
| Python | 0.000005 |
92f0dd46bbc1f6fa8d9539d026d6ec1e968cbcfc | Drop inaccessible code from singleton.py | sympy/core/singleton.py | sympy/core/singleton.py | """Singleton mechanism"""
from .assumptions import ManagedProperties
class SingletonRegistry(object):
"""
A map from singleton classes to the corresponding instances.
"""
def __init__(self):
self._classes_to_install = {}
# Dict of classes that have been registered, but that have not have been
# installed as an attribute of this SingletonRegistry.
# Installation automatically happens at the first attempt to access the
# attribute.
# The purpose of this is to allow registration during class
# initialization during import, but not trigger object creation until
# actual use (which should not happen until after all imports are
# finished).
def register(self, cls):
self._classes_to_install[cls.__name__] = cls
def __setattr__(self, name, obj):
setattr(self.__class__, name, obj)
def __getattr__(self, name):
"""Python calls __getattr__ if no attribute of that name was installed
yet.
This __getattr__ checks whether a class with the requested name was
already registered but not installed; if no, raises an AttributeError.
Otherwise, retrieves the class, calculates its singleton value, installs
it as an attribute of the given name, and unregisters the class."""
if name not in self._classes_to_install:
raise AttributeError(
"Attribute '%s' was not installed on SymPy registry %s" % (
name, self))
class_to_install = self._classes_to_install[name]
value_to_install = class_to_install()
self.__setattr__(name, value_to_install)
del self._classes_to_install[name]
return value_to_install
def __repr__(self):
return "S"
S = SingletonRegistry()
class Singleton(ManagedProperties):
"""
Metaclass for singleton classes.
A singleton class has only one instance which is returned every time the
class is instantiated. Additionally, this instance can be accessed through
the global registry object S as S.<class_name>.
Examples
========
>>> from sympy import S, Basic
>>> from sympy.core.singleton import Singleton
>>> class MySingleton(Basic, metaclass=Singleton):
... pass
>>> Basic() is Basic()
False
>>> MySingleton() is MySingleton()
True
>>> S.MySingleton is MySingleton()
True
Notes
=====
Instance creation is delayed until the first time the value is accessed.
This metaclass is a subclass of ManagedProperties because that is the
metaclass of many classes that need to be Singletons (Python does not allow
subclasses to have a different metaclass than the superclass, except the
subclass may use a subclassed metaclass).
"""
_instances = {}
"Maps singleton classes to their instances."
def __new__(cls, *args, **kwargs):
result = super(Singleton, cls).__new__(cls, *args, **kwargs)
S.register(result)
return result
def __call__(self, *args, **kwargs):
# Called when application code says SomeClass(), where SomeClass is a
# class of which Singleton is the metaclas.
# __call__ is invoked first, before __new__() and __init__().
if self not in Singleton._instances:
# Invokes the standard constructor of SomeClass.
Singleton._instances[self] = \
super(Singleton, self).__call__(*args, **kwargs)
return Singleton._instances[self]
| """Singleton mechanism"""
from .assumptions import ManagedProperties
class SingletonRegistry(object):
"""
A map from singleton classes to the corresponding instances.
"""
def __init__(self):
self._classes_to_install = {}
# Dict of classes that have been registered, but that have not have been
# installed as an attribute of this SingletonRegistry.
# Installation automatically happens at the first attempt to access the
# attribute.
# The purpose of this is to allow registration during class
# initialization during import, but not trigger object creation until
# actual use (which should not happen until after all imports are
# finished).
def register(self, cls):
self._classes_to_install[cls.__name__] = cls
def __setattr__(self, name, obj):
setattr(self.__class__, name, obj)
def __delattr__(self, name):
delattr(self.__class__, name)
def __getattr__(self, name):
"""Python calls __getattr__ if no attribute of that name was installed
yet.
This __getattr__ checks whether a class with the requested name was
already registered but not installed; if no, raises an AttributeError.
Otherwise, retrieves the class, calculates its singleton value, installs
it as an attribute of the given name, and unregisters the class."""
if name not in self._classes_to_install:
raise AttributeError(
"Attribute '%s' was not installed on SymPy registry %s" % (
name, self))
class_to_install = self._classes_to_install[name]
value_to_install = class_to_install()
self.__setattr__(name, value_to_install)
del self._classes_to_install[name]
return value_to_install
def __repr__(self):
return "S"
S = SingletonRegistry()
class Singleton(ManagedProperties):
"""
Metaclass for singleton classes.
A singleton class has only one instance which is returned every time the
class is instantiated. Additionally, this instance can be accessed through
the global registry object S as S.<class_name>.
Examples
========
>>> from sympy import S, Basic
>>> from sympy.core.singleton import Singleton
>>> class MySingleton(Basic, metaclass=Singleton):
... pass
>>> Basic() is Basic()
False
>>> MySingleton() is MySingleton()
True
>>> S.MySingleton is MySingleton()
True
Notes
=====
Instance creation is delayed until the first time the value is accessed.
This metaclass is a subclass of ManagedProperties because that is the
metaclass of many classes that need to be Singletons (Python does not allow
subclasses to have a different metaclass than the superclass, except the
subclass may use a subclassed metaclass).
"""
_instances = {}
"Maps singleton classes to their instances."
def __new__(cls, *args, **kwargs):
result = super(Singleton, cls).__new__(cls, *args, **kwargs)
S.register(result)
return result
def __call__(self, *args, **kwargs):
# Called when application code says SomeClass(), where SomeClass is a
# class of which Singleton is the metaclas.
# __call__ is invoked first, before __new__() and __init__().
if self not in Singleton._instances:
# Invokes the standard constructor of SomeClass.
Singleton._instances[self] = \
super(Singleton, self).__call__(*args, **kwargs)
return Singleton._instances[self]
# Inject pickling support.
def __getnewargs__(self):
return ()
self.__getnewargs__ = __getnewargs__
| Python | 0.000001 |
32fba62d157953eaeea6e5885a7ea860632a1945 | rename filter function and set the second parameter as required | sync_settings/helper.py | sync_settings/helper.py | # -*- coding: utf-8 -*-
import os, re
from urllib import parse
def getDifference (setA, setB):
return list(filter(lambda el: el not in setB, setA))
def getHomePath (fl = ""):
if isinstance(fl, str) and fl != "":
return joinPath((os.path.expanduser('~'), fl))
return os.path.expanduser('~')
def existsPath(path, isFolder = False):
opath = os.path
if isinstance(path, str) and path != "" and opath.exists(path):
if (isFolder and opath.isdir(path)): return True
if (not isFolder and opath.isfile(path)): return True
return False
def joinPath (pathTuple):
if isinstance(pathTuple, tuple) and len(pathTuple) > 1:
return os.path.join(*pathTuple)
return None
def getFiles (path):
if existsPath(path, True):
f = []
for root, dirs, files in os.walk(path):
f.extend([joinPath((root, file)) for file in files])
return f
return []
def excludeFilesByPatterns (elements, patterns):
isValidElements = isinstance(elements, list) and len(elements) > 0
isValidPattern = isinstance(patterns, list) and len(patterns) > 0
results = []
if isValidElements and isValidPattern:
for element in elements:
for pattern in patterns:
extension = '.' + element.split(os.extsep)[-1]
filename = os.path.basename(element)
if element.startswith(pattern) and existsPath(pattern, True) and existsPath(joinPath((pattern, filename))):
results.append(element)
elif (extension == pattern or element == pattern) and existsPath(element):
results.append(element)
return getDifference(elements, results)
return elements
def encodePath(path):
if isinstance(path, str) and len(path) > 0:
return parse.quote(path)
return None
def decodePath(path):
if isinstance(path, str) and len(path) > 0:
return parse.unquote(path)
return None
| # -*- coding: utf-8 -*-
import os, re
from urllib import parse
def getDifference (setA, setB):
return list(filter(lambda el: el not in setB, setA))
def getHomePath (fl = ""):
if isinstance(fl, str) and fl != "":
return joinPath((os.path.expanduser('~'), fl))
return os.path.expanduser('~')
def existsPath(path, isFolder = False):
opath = os.path
if isinstance(path, str) and path != "" and opath.exists(path):
if (isFolder and opath.isdir(path)): return True
if (not isFolder and opath.isfile(path)): return True
return False
def joinPath (pathTuple):
if isinstance(pathTuple, tuple) and len(pathTuple) > 1:
return os.path.join(*pathTuple)
return None
def getFiles (path):
if existsPath(path, True):
f = []
for root, dirs, files in os.walk(path):
f.extend([joinPath((root, file)) for file in files])
return f
return []
def excludeByPatterns (elements, patterns = []):
isValidElements = isinstance(elements, list) and len(elements) > 0
isValidPattern = isinstance(patterns, list) and len(patterns) > 0
results = []
if isValidElements and isValidPattern:
for element in elements:
for pattern in patterns:
extension = '.' + element.split(os.extsep)[-1]
filename = os.path.basename(element)
if element.startswith(pattern) and existsPath(pattern, True) and existsPath(joinPath((pattern, filename))):
results.append(element)
elif (extension == pattern or element == pattern) and existsPath(element):
results.append(element)
return getDifference(elements, results)
return elements
def encodePath(path):
if isinstance(path, str) and len(path) > 0:
return parse.quote(path)
return None
def decodePath(path):
if isinstance(path, str) and len(path) > 0:
return parse.unquote(path)
return None
| Python | 0 |
9d53e369e9757c659c72ca1e8bbb8eea8080ab2d | Add possiblity to pass an output stream to the update method | updater.py | updater.py | import configparser
import hashlib
import json
import os
import sys
import requests
def go_through_files(cur_dir, data, repo_name, bw_list, is_whitelist, output):
updated = False
for content in data:
path = os.path.join(cur_dir, content['name'])
print(path, file=output)
# check if file is in the black/whitelist
if (content["name"] in bw_list) != is_whitelist:
print("file found in blacklist/not found in whitelist", file=output)
continue
# if there is a directory go through it per recursive call
if(content["type"] == "dir"):
print("file is directory", file=output)
os.makedirs(path, exist_ok=True)
resp = requests.get(url=content['url'])
if go_through_files(path, json.loads(resp.text), repo_name, bw_list, is_whitelist, output):
updated = True
continue
try:
# check if the file is there
# hash the current file
with open(path, "r", encoding="utf-8") as f:
sha1 = hashlib.sha1()
sha1.update(f.read().encode("utf-8"))
hashoff = format(sha1.hexdigest())
except IOError: # if no file is offline always download
hashoff = None
# download the most recent file
resp = requests.get(url=content["download_url"])
if hashoff:
# hash the most recent file
sha1 = hashlib.sha1()
sha1.update(resp.text.encode('utf-8'))
hashon = format(sha1.hexdigest())
# compare hash of the offline and online file and overwrite if they are
# different
if not hashoff or (hashon != hashoff):
updated = True
print("updating {}", path, file=output)
with open(path, "w", encoding="utf-8") as f:
f.write(resp.text)
else:
print("no difference found", file=output)
return updated
def update(output=sys.stdout):
config = configparser.ConfigParser()
config.read_file(open('updater.settings'))
is_whitelist = config.getboolean("Section1", "whitelist")
repo_name = config.get("Section1", "repo")
bw_list = str(config.get("Section1", "list")).split("\n")
# get a list of files in the repo
resp = requests.get(url="https://api.github.com/repos/" + repo_name + "/contents")
data = json.loads(resp.text)
# check these files
return go_through_files("", data, repo_name, bw_list, is_whitelist, output)
if __name__ == '__main__':
update()
| import configparser
import hashlib
import json
import os
import requests
def go_through_files(cur_dir, data, repo_name, bw_list, is_whitelist):
updated = False
for content in data:
path = os.path.join(cur_dir, content['name'])
print(path)
# check if file is in the black/whitelist
if (content["name"] in bw_list) != is_whitelist:
print("file found in blacklist/not found in whitelist")
continue
# if there is a directory go through it per recursive call
if(content["type"] == "dir"):
print("file is directory")
os.makedirs(path, exist_ok=True)
resp = requests.get(url=content['url'])
if go_through_files(path, json.loads(resp.text), repo_name, bw_list, is_whitelist):
updated = True
continue
try:
# check if the file is there
# hash the current file
with open(path, "r", encoding="utf-8") as f:
sha1 = hashlib.sha1()
sha1.update(f.read().encode("utf-8"))
hashoff = format(sha1.hexdigest())
except IOError: # if no file is offline always download
hashoff = None
# download the most recent file
resp = requests.get(url=content["download_url"])
if hashoff:
# hash the most recent file
sha1 = hashlib.sha1()
sha1.update(resp.text.encode('utf-8'))
hashon = format(sha1.hexdigest())
# compare hash of the offline and online file and overwrite if they are
# different
if not hashoff or (hashon != hashoff):
updated = True
print("updating {}", path)
with open(path, "w", encoding="utf-8") as f:
f.write(resp.text)
else:
print("no difference found")
return updated
def update():
config = configparser.ConfigParser()
config.read_file(open('updater.settings'))
is_whitelist = config.getboolean("Section1", "whitelist")
repo_name = config.get("Section1", "repo")
bw_list = str(config.get("Section1", "list")).split("\n")
# get a list of files in the repo
resp = requests.get(url="https://api.github.com/repos/" + repo_name + "/contents")
data = json.loads(resp.text)
# check these files
return go_through_files("", data, repo_name, bw_list, is_whitelist)
if __name__ == '__main__':
update()
| Python | 0 |
c3c703c6d8b434da40beef6202bf2cbdc01e50a1 | Add configured tests | gym/wrappers/tests/test_wrappers.py | gym/wrappers/tests/test_wrappers.py | import gym
from gym import error
from gym import wrappers
from gym.wrappers import SkipWrapper
import tempfile
import shutil
def test_skip():
every_two_frame = SkipWrapper(2)
env = gym.make("FrozenLake-v0")
env = every_two_frame(env)
obs = env.reset()
env.render()
def test_configured():
env = gym.make("FrozenLake-v0")
env = wrappers.TimeLimit(env)
env.configure()
# Make sure all layers of wrapping are configured
assert env._configured
assert env.env._configured
env.close()
def test_double_configured():
env = gym.make("FrozenLake-v0")
every_two_frame = SkipWrapper(2)
env = every_two_frame(env)
env = wrappers.TimeLimit(env)
env.configure()
# Make sure all layers of wrapping are configured
assert env._configured
assert env.env._configured
assert env.env.env._configured
env.close()
def test_no_double_wrapping():
temp = tempfile.mkdtemp()
try:
env = gym.make("FrozenLake-v0")
env = wrappers.Monitor(env, temp)
try:
env = wrappers.Monitor(env, temp)
except error.DoubleWrapperError:
pass
else:
assert False, "Should not allow double wrapping"
env.close()
finally:
shutil.rmtree(temp)
| import gym
from gym import error
from gym import wrappers
from gym.wrappers import SkipWrapper
import tempfile
import shutil
def test_skip():
every_two_frame = SkipWrapper(2)
env = gym.make("FrozenLake-v0")
env = every_two_frame(env)
obs = env.reset()
env.render()
def test_no_double_wrapping():
temp = tempfile.mkdtemp()
try:
env = gym.make("FrozenLake-v0")
env = wrappers.Monitor(env, temp)
try:
env = wrappers.Monitor(env, temp)
except error.DoubleWrapperError:
pass
else:
assert False, "Should not allow double wrapping"
env.close()
finally:
shutil.rmtree(temp)
if __name__ == '__main__':
test_no_double_wrapping()
| Python | 0.000001 |
fa0174185832fac608cc1b65255231a73aac630a | fix evacuate call on branched lient | healing/handler_plugins/evacuate.py | healing/handler_plugins/evacuate.py | from healing.handler_plugins import base
from healing import exceptions
from healing.openstack.common import log as logging
from healing import utils
LOG = logging.getLogger(__name__)
class Evacuate(base.HandlerPluginBase):
"""evacuate VM plugin.
Data format in action_meta is:
'evacuate_host': True if evacuating the entire host
"""
DESCRIPTION = "Evacuate VM (shared storage)"
NAME = "evacuate"
def start(self, ctx, data):
""" do something... spawn thread?
:param data ActionData Object
shared_storage?
"""
if not self.can_execute(data):
raise exceptions.ActionInProgress()
self.register_action(data)
try:
client = utils.get_nova_client(ctx)
lista = client.servers.evacuate(server=data.target_resource,
host=None, on_shared_storage=True)
self.current_action.output = "Output: " + str(lista)
except Exception as e:
LOG.exception(e)
self.current_action.output = e.message
self.stop(data, True)
return None
self.stop(data)
return self.current_action.id
def stop(self, data, error=False, message=None):
#this will work if not in thread probably, if we change this
#add the id to the data and context
if error:
self.current_action.error()
else:
self.current_action.stop()
self.current_action.save()
LOG.debug("Task stopped")
def can_execute(self, data, ctx=None):
"""
:param data ActionData Obj
move to parent?
"""
return super(Evacuate, self).can_execute(data, ctx=ctx)
| from healing.handler_plugins import base
from healing import exceptions
from healing.openstack.common import log as logging
from healing import utils
LOG = logging.getLogger(__name__)
class Evacuate(base.HandlerPluginBase):
"""evacuate VM plugin.
Data format in action_meta is:
'evacuate_host': True if evacuating the entire host
"""
DESCRIPTION = "Evacuate VM (shared storage)"
NAME = "evacuate"
def start(self, ctx, data):
""" do something... spawn thread?
:param data ActionData Object
shared_storage?
"""
if not self.can_execute(data):
raise exceptions.ActionInProgress()
self.register_action(data)
try:
client = utils.get_nova_client(ctx)
lista = client.servers.evacuate(data.target_resource,
on_shared_storage=True,
find_host=True)
self.current_action.output = "Output: " + str(lista)
except Exception as e:
LOG.exception(e)
self.current_action.output = e.message
self.stop(data, True)
return None
self.stop(data)
return self.current_action.id
def stop(self, data, error=False, message=None):
#this will work if not in thread probably, if we change this
#add the id to the data and context
if error:
self.current_action.error()
else:
self.current_action.stop()
self.current_action.save()
LOG.debug("Task stopped")
def can_execute(self, data, ctx=None):
"""
:param data ActionData Obj
move to parent?
"""
return super(Evacuate, self).can_execute(data, ctx=ctx)
| Python | 0 |
5336ff3967f4e297237045ca0914ae5257e3a767 | fix csv output in one autoplot | htdocs/plotting/auto/scripts/p92.py | htdocs/plotting/auto/scripts/p92.py | import psycopg2.extras
import pyiem.nws.vtec as vtec
import datetime
import pandas as pd
def get_description():
""" Return a dict describing how to call this plotter """
d = dict()
d['data'] = True
d['cache'] = 3600
d['description'] = """This map depicts the number of days since a
Weather Forecast Office has issued a given VTEC product."""
d['arguments'] = [
dict(type='phenomena', name='phenomena',
default='TO', label='Select Watch/Warning Phenomena Type:'),
dict(type='significance', name='significance',
default='W', label='Select Watch/Warning Significance Level:'),
]
return d
def plotter(fdict):
""" Go """
import matplotlib
matplotlib.use('agg')
from pyiem.plot import MapPlot
utc = datetime.datetime.utcnow()
bins = [0, 1, 14, 31, 91, 182, 273, 365, 730, 1460, 2920, 3800]
pgconn = psycopg2.connect(database='postgis', host='iemdb', user='nobody')
cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
phenomena = fdict.get('phenomena', 'TO')
significance = fdict.get('significance', 'W')
cursor.execute("""
select wfo, extract(days from ('TODAY'::date - max(issue))) as m
from warnings where significance = %s and phenomena = %s
GROUP by wfo ORDER by m ASC
""", (significance, phenomena))
data = {}
rows = []
for row in cursor:
wfo = row[0] if row[0] != 'JSJ' else 'SJU'
rows.append(dict(wfo=wfo, days=row[1]))
data[wfo] = max([row[1], 0])
df = pd.DataFrame(rows)
df.set_index('wfo', inplace=True)
m = MapPlot(sector='nws', axisbg='white', nocaption=True,
title='Days since Last %s %s by NWS Office' % (
vtec._phenDict.get(phenomena, phenomena),
vtec._sigDict.get(significance, significance)),
subtitle='Valid %s' % (utc.strftime("%d %b %Y %H%M UTC"),))
m.fill_cwas(data, bins=bins, ilabel=True, units='Days',
lblformat='%.0f')
return m.fig, df
| import psycopg2.extras
import pyiem.nws.vtec as vtec
import datetime
import pandas as pd
def get_description():
""" Return a dict describing how to call this plotter """
d = dict()
d['data'] = True
d['cache'] = 3600
d['description'] = """This map depicts the number of days since a
Weather Forecast Office has issued a given VTEC product."""
d['arguments'] = [
dict(type='phenomena', name='phenomena',
default='TO', label='Select Watch/Warning Phenomena Type:'),
dict(type='significance', name='significance',
default='W', label='Select Watch/Warning Significance Level:'),
]
return d
def plotter(fdict):
""" Go """
import matplotlib
matplotlib.use('agg')
from pyiem.plot import MapPlot
utc = datetime.datetime.utcnow()
bins = [0, 1, 14, 31, 91, 182, 273, 365, 730, 1460, 2920, 3800]
pgconn = psycopg2.connect(database='postgis', host='iemdb', user='nobody')
cursor = pgconn.cursor(cursor_factory=psycopg2.extras.DictCursor)
phenomena = fdict.get('phenomena', 'TO')
significance = fdict.get('significance', 'W')
cursor.execute("""
select wfo, extract(days from ('TODAY'::date - max(issue))) as m
from warnings where significance = %s and phenomena = %s
GROUP by wfo ORDER by m ASC
""", (significance, phenomena))
data = {}
rows = []
for row in cursor:
wfo = row[0] if row[0] != 'JSJ' else 'SJU'
rows.append(dict(wfo=wfo, days=row[1]))
data[wfo] = max([row[1], 0])
df = pd.DataFrame(rows)
m = MapPlot(sector='nws', axisbg='white', nocaption=True,
title='Days since Last %s %s by NWS Office' % (
vtec._phenDict.get(phenomena, phenomena),
vtec._sigDict.get(significance, significance)),
subtitle='Valid %s' % (utc.strftime("%d %b %Y %H%M UTC"),))
m.fill_cwas(data, bins=bins, ilabel=True, units='Days',
lblformat='%.0f')
return m.fig, df
| Python | 0.000046 |
a8d639cbac2439c0079b86b72dd3daee6505e9d0 | Update version file | version.py | version.py | """Versioning controlled via Git Tag, check setup.py"""
__version__ = "0.3.3"
| """Versioning controlled via Git Tag, check setup.py"""
__version__ = "0.3.2"
| Python | 0 |
50a5644e2f4ea4bcc425c4a8ae2ebe230ce7af3d | implement the logic | python/docker_tool/detect_big_docker_image.py | python/docker_tool/detect_big_docker_image.py | # -*- coding: utf-8 -*-
#!/usr/bin/python
##-------------------------------------------------------------------
## @copyright 2017 DennyZhang.com
## Licensed under MIT
## https://raw.githubusercontent.com/DennyZhang/devops_public/master/LICENSE
##
## File : detect_big_docker_image.py
## Author : Denny <denny@dennyzhang.com>
## Description : Make sure all docker images you build is small enough
## Usage:
## python /usr/sbin/detect_big_docker_image.py \
## --checklist_file "/tmp/check_list.txt"
## --whitelist_file "/tmp/whitelist.txt"
##
## Example of /tmp/check_list.txt
## # mysql should not exceed 450 MB
## mysql.*:450
## # all images should not exceed 300 MB
## .*:300
##
## Example of /tmp/whitelist_file:
## denny/jenkins.*
## .*<none>.*
## .*_JENKINS_TEST.*
##
## --
## Created : <2017-05-12>
## Updated: Time-stamp: <2017-05-19 10:52:18>
##-------------------------------------------------------------------
import os, sys
import argparse
import docker
import re
import logging
log_file = "/var/log/%s.log" % (os.path.basename(__file__).rstrip('\.py'))
logging.basicConfig(filename=log_file, level=logging.DEBUG, format='%(asctime)s %(message)s')
logging.getLogger().addHandler(logging.StreamHandler())
def skip_items_by_whitelist(item_list, whitelist_file):
ret_list = []
skip_list = []
# load skip rules from config file
with open(whitelist_file,'r') as f:
for row in f:
row = row.strip()
if row == "" or row.startswith('#'):
continue
skip_list.append(row)
for item in item_list:
should_skip = False
for skip_rule in skip_list:
if re.search(skip_rule, item):
should_skip = True
logging.info("Skip check for %s" % (item))
break
if should_skip is False:
ret_list.append(item)
return ret_list
def list_all_docker_tag(client = None):
# https://docker-py.readthedocs.io/en/stable/client.html
if client is None:
client = docker.from_env()
tag_list = []
for image in client.images.list():
for tag in image.tags:
tag_list.append(tag)
return tag_list
def get_image_size_by_tag_mb(tag_name, cli_client = None):
# Use Python Low-level API: https://docker-py.readthedocs.io/en/stable/api.html
if cli_client is None:
cli_client = docker.APIClient(base_url='unix://var/run/docker.sock')
# raise exception, if image not found
response_list = cli_client.inspect_image(tag_name)
size_mb = float(response_list['Size'])/(1024*1024)
return round(size_mb, 2)
def list_image_list(tag_list, cli_client = None):
if cli_client is None:
cli_client = docker.APIClient(base_url='unix://var/run/docker.sock')
logging.info("Show image status.\n%s\t%s\n" % ("IMAGE TAG", "SIZE"))
for tag_name in tag_list:
size_mb = get_image_size_by_tag_mb(tag_name)
logging.info("%s\t%sMB" % (tag_name, size_mb))
def examine_docker_images(tag_list, checklist_file, cli_client = None):
problematic_list = []
check_list = []
with open(checklist_file,'r') as f:
for row in f:
row = row.strip()
if row == "" or row.startswith('#'):
continue
check_list.append(row)
for tag_name in tag_list:
has_matched = False
for check_rule in check_list:
l = check_rule.split(":")
tag_name_pattern = l[0]
max_size_mb = float(l[1])
if re.search(check_rule, tag_name):
has_matched = True
image_size_mb = get_image_size_by_tag_mb(tag_name, cli_client)
if image_size_mb > max_size_mb:
problematic_list.append(tag_name)
break
return problematic_list
################################################################################
if __name__ == '__main__':
# get parameters from users
parser = argparse.ArgumentParser()
parser.add_argument('--checklist_file', required=True, \
help="The list of volumes to backup. Separated by comma", type=str)
parser.add_argument('--whitelist_file', required=True, \
help="The list of volumes to backup. Separated by comma", type=str)
l = parser.parse_args()
volume_dir = l.volume_dir
backup_dir = l.backup_dir
docker_volume_list = l.docker_volume_list
## File : detect_big_docker_image.py ends
| # -*- coding: utf-8 -*-
#!/usr/bin/python
##-------------------------------------------------------------------
## @copyright 2017 DennyZhang.com
## Licensed under MIT
## https://raw.githubusercontent.com/DennyZhang/devops_public/master/LICENSE
##
## File : detect_big_docker_image.py
## Author : Denny <denny@dennyzhang.com>
## Description : Make sure all docker images you build is small enough
## Usage:
## python /usr/sbin/detect_big_docker_image.py \
## --check_list_file "/tmp/check_list.txt"
## --whitelist_file "/tmp/whitelist.txt"
## --
## Created : <2017-05-12>
## Updated: Time-stamp: <2017-05-19 10:49:47>
##-------------------------------------------------------------------
import os, sys
import argparse
import docker
import logging
log_file = "/var/log/%s.log" % (os.path.basename(__file__).rstrip('\.py'))
logging.basicConfig(filename=log_file, level=logging.DEBUG, format='%(asctime)s %(message)s')
logging.getLogger().addHandler(logging.StreamHandler())
def skip_items_by_whitelist(item_list, whitelist_file):
import re
ret_list = []
return ret_list
def list_all_docker_tag(client = None):
# https://docker-py.readthedocs.io/en/stable/client.html
if client is None:
client = docker.from_env()
tag_list = []
for image in client.images.list():
for tag in image.tags:
tag_list.append(tag)
return tag_list
def get_image_size_by_tag_mb(tag_name, cli_client = None):
# Use Python Low-level API: https://docker-py.readthedocs.io/en/stable/api.html
if cli_client is None:
cli_client = docker.APIClient(base_url='unix://var/run/docker.sock')
# raise exception, if image not found
response_list = cli_client.inspect_image(tag_name)
size_mb = float(response_list['Size'])/(1024*1024)
return round(size_mb, 2)
################################################################################
if __name__ == '__main__':
# get parameters from users
parser = argparse.ArgumentParser()
parser.add_argument('--check_list_file', required=True, \
help="The list of volumes to backup. Separated by comma", type=str)
parser.add_argument('--whitelist_file', required=True, \
help="The list of volumes to backup. Separated by comma", type=str)
l = parser.parse_args()
volume_dir = l.volume_dir
backup_dir = l.backup_dir
docker_volume_list = l.docker_volume_list
## File : detect_big_docker_image.py ends
| Python | 0.999999 |
137b20e4aa779be3c97c500ab485126085492ce5 | comment format | pywikibot/families/scratchpad_wikia_family.py | pywikibot/families/scratchpad_wikia_family.py | # -*- coding: utf-8 -*-
from pywikibot import family
class Family(family.Family):
def __init__(self):
family.Family.__init__(self)
self.name = 'scratchpad_wikia'
self.langs = {
'de':'de.mini.wikia.com',
'en':'scratchpad.wikia.com',
'fr':'bloc-notes.wikia.com',
'ja':'ja.scratchpad.wikia.com',
'zh':'zh.scratchpad.wikia.com',
}
# A few selected big languages for things that we do not want to loop
# over all languages. This is only needed by the titletranslate.py
# module, so if you carefully avoid the options, you could get away
# without these for another wikimedia family.
self.languages_by_size = ['en','de']
def version(self, code):
return "1.14.0"
def scriptpath(self, code):
return ''
| # -*- coding: utf-8 -*-
from pywikibot import family
class Family(family.Family):
def __init__(self):
family.Family.__init__(self)
self.name = 'scratchpad_wikia'
self.langs = {
'de':'de.mini.wikia.com',
'en':'scratchpad.wikia.com',
'fr':'bloc-notes.wikia.com',
'ja':'ja.scratchpad.wikia.com',
'zh':'zh.scratchpad.wikia.com',
}
# A few selected big languages for things that we do not want
# to loop over all languages. This is only needed by the
# titletranslate.py module, so if you carefully avoid the
# options, you could get away without these for another
# wikimedia family.
self.languages_by_size = ['en','de']
def version(self, code):
return "1.14.0"
def scriptpath(self, code):
return ''
| Python | 0.000001 |
c898b68fa8d81963b7a5282e67ecb28764bbd0a3 | Add comment explaining mocking | tests/app/models/test_contact_list.py | tests/app/models/test_contact_list.py | from datetime import datetime
from app.models.contact_list import ContactList
from app.models.job import PaginatedJobs
def test_created_at():
created_at = ContactList({'created_at': '2016-05-06T07:08:09.061258'}).created_at
assert isinstance(created_at, datetime)
assert created_at.isoformat() == '2016-05-06T08:08:09.061258+01:00'
def test_get_jobs(mock_get_jobs):
contact_list = ContactList({'id': 'a', 'service_id': 'b'})
assert isinstance(contact_list.get_jobs(page=123), PaginatedJobs)
# mock_get_jobs mocks the underlying API client method, not
# contact_list.get_jobs
mock_get_jobs.assert_called_once_with(
'b',
contact_list_id='a',
statuses={
'finished',
'sending limits exceeded',
'ready to send',
'scheduled',
'sent to dvla',
'pending',
'in progress',
},
page=123,
)
| from datetime import datetime
from app.models.contact_list import ContactList
from app.models.job import PaginatedJobs
def test_created_at():
created_at = ContactList({'created_at': '2016-05-06T07:08:09.061258'}).created_at
assert isinstance(created_at, datetime)
assert created_at.isoformat() == '2016-05-06T08:08:09.061258+01:00'
def test_get_jobs(mock_get_jobs):
contact_list = ContactList({'id': 'a', 'service_id': 'b'})
assert isinstance(contact_list.get_jobs(page=123), PaginatedJobs)
mock_get_jobs.assert_called_once_with(
'b',
contact_list_id='a',
statuses={
'finished',
'sending limits exceeded',
'ready to send',
'scheduled',
'sent to dvla',
'pending',
'in progress',
},
page=123,
)
| Python | 0 |
cfe2c5b405cc5cc74fed81e506e717698236f608 | debug print lines | yumoter.py | yumoter.py | #!/usr/bin/env python2
import sys, os, json, errno, subprocess, yum
class yumoter:
def __init__(self, configFile, repobasepath):
self.repobasepath = repobasepath
self.reloadConfig(configFile)
self.yb = yum.YumBase()
self.yb.setCacheDir()
def reloadConfig(self, jsonFile):
self.repoConfig = self._getConfig(jsonFile)
self._getPaths()
def _getConfig(self, jsonFile):
fh = open(jsonFile, 'r')
jsonOutput = json.load(fh)
fh.close()
return jsonOutput
def _mkdir_p(self, path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def _runRsync(self, rsrc, rdst, args):
# str(rsrc), str(rdst), list(args)
sysCall = ['rsync'] + args + [rsrc, rdst]
rsyncStdout = []
rsyncStderr = []
p = subprocess.Popen(sysCall, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
for line in iter(p.stdout.readline, ""):
stdoutLine = line.strip() + '\r\n'
rsyncStdout.append(stdoutLine)
sys.stdout.write(stdoutLine)
sys.stdout.flush()
for line in iter(p.stderr.readline, ""):
stderrLine = line.strip() + '\r\n'
rsyncStderr.append(stderrLine)
sys.stderr.write(stderrLine)
sys.stderr.flush()
return (stdoutLine, stderrLine)
# TODO check return status please. Stop coding like a 12 year old.
def getDeps(self, pkgObj):
depDicts = yb.findDeps([pkgObj])
return depDicts
def _getPaths(self):
for repo in self.repoConfig:
repopath = []
# Does this repo have a path for promotion?
if 'promotionpath' in self.repoConfig[repo]:
for promopath in self.repoConfig[repo]['promotionpath']:
repopath.append("%s/%s/%s" % (self.repobasepath, self.repoConfig[repo]['path'], promopath))
else:
# repo does not have a path for promotion
repopath.append("%s/%s" % (self.repobasepath, self.repoConfig[repo]['path']))
self.repoConfig[repo]['fullpaths'] = repopath
def _mkPaths(self):
masterPathList = []
for repo in self.repoConfig:
if 'promotionpath' in self.repoConfig[repo]:
for entry in self.repoConfig[repo]['fullpaths']:
masterPathList.append(entry)
for entry in masterPathList:
if not os.path.isdir(entry):
print "creating missing dir: %s" % entry
self._mkdir_p(entry)
def syncRepos(self):
for repo in self.repoConfig:
print repo
# Only repos with upstream set need to be synced.
if 'upstream' in self.repoConfig[repo]:
# If the dst dir doesn't exist, create it.
if not os.path.isdir(self.repoConfig[repo]['fullpaths'][0]):
self._mkPaths()
else:
print "foo"
print self.repoConfig[repo]['fullpaths']
#a = self._runRsync(self.repoConfig[repo]['upstream'], self.repoConfig[repo]['fullpaths'][0], ['-av', '--progress'])
#print a
'''
def repoSearch(self, pkgName, repos):
pkgs = yb.pkgSack.returnNewestByNameArch(patterns=pkgName)
for pkg in pkgs:
print "%s: %s" % (pkg, pkg.summary)
''' | #!/usr/bin/env python2
import sys, os, json, errno, subprocess, yum
class yumoter:
def __init__(self, configFile, repobasepath):
self.repobasepath = repobasepath
self.reloadConfig(configFile)
self.yb = yum.YumBase()
self.yb.setCacheDir()
def reloadConfig(self, jsonFile):
self.repoConfig = self._getConfig(jsonFile)
self._getPaths()
def _getConfig(self, jsonFile):
fh = open(jsonFile, 'r')
jsonOutput = json.load(fh)
fh.close()
return jsonOutput
def _mkdir_p(self, path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def _runRsync(self, rsrc, rdst, args):
# str(rsrc), str(rdst), list(args)
sysCall = ['rsync'] + args + [rsrc, rdst]
rsyncStdout = []
rsyncStderr = []
p = subprocess.Popen(sysCall, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
for line in iter(p.stdout.readline, ""):
stdoutLine = line.strip() + '\r\n'
rsyncStdout.append(stdoutLine)
sys.stdout.write(stdoutLine)
sys.stdout.flush()
for line in iter(p.stderr.readline, ""):
stderrLine = line.strip() + '\r\n'
rsyncStderr.append(stderrLine)
sys.stderr.write(stderrLine)
sys.stderr.flush()
return (stdoutLine, stderrLine)
# TODO check return status please. Stop coding like a 12 year old.
def getDeps(self, pkgObj):
depDicts = yb.findDeps([pkgObj])
return depDicts
def _getPaths(self):
for repo in self.repoConfig:
repopath = []
# Does this repo have a path for promotion?
if 'promotionpath' in self.repoConfig[repo]:
for promopath in self.repoConfig[repo]['promotionpath']:
repopath.append("%s/%s/%s" % (self.repobasepath, self.repoConfig[repo]['path'], promopath))
else:
# repo does not have a path for promotion
repopath.append("%s/%s" % (self.repobasepath, self.repoConfig[repo]['path']))
self.repoConfig[repo]['fullpaths'] = repopath
def _mkPaths(self):
masterPathList = []
for repo in self.repoConfig:
if 'promotionpath' in self.repoConfig[repo]:
for entry in self.repoConfig[repo]['fullpaths']:
masterPathList.append(entry)
for entry in masterPathList:
if not os.path.isdir(entry):
print "creating missing dir: %s" % entry
self._mkdir_p(entry)
def syncRepos(self):
for repo in self.repoConfig:
# Only repos with upstream set need to be synced.
if 'upstream' in self.repoConfig[repo]:
# If the dst dir doesn't exist, create it.
if not os.path.isdir(self.repoConfig[repo]['fullpaths'][0]):
self._mkPaths()
else:
print "foo"
print self.repoConfig[repo]['fullpaths']
#a = self._runRsync(self.repoConfig[repo]['upstream'], self.repoConfig[repo]['fullpaths'][0], ['-av', '--progress'])
#print a
else:
print "wtf"
print self.repoConfig[repo]
'''
def repoSearch(self, pkgName, repos):
pkgs = yb.pkgSack.returnNewestByNameArch(patterns=pkgName)
for pkg in pkgs:
print "%s: %s" % (pkg, pkg.summary)
''' | Python | 0.000003 |
39c34860fa9992f38892aa026c5b0c6547bd4b23 | Fix flaky evergreen test | tests/content/test_content_manager.py | tests/content/test_content_manager.py | from django.test import override_settings
from django.utils import timezone
from bulbs.campaigns.models import Campaign
from bulbs.content.models import Content
from bulbs.utils.test import make_content, BaseIndexableTestCase
from example.testcontent.models import TestContentObj, TestContentObjTwo
class ContentManagerTestCase(BaseIndexableTestCase):
def setUp(self):
super(ContentManagerTestCase, self).setUp()
campaign = Campaign.objects.create(
sponsor_name="TheCobbler",
start_date=timezone.now() - timezone.timedelta(days=5),
end_date=timezone.now() + timezone.timedelta(days=5)
)
make_content(TestReadingListObj, evergreen=True, published=timezone.now(), _quantity=50)
make_content(TestContentObj, campaign=campaign, published=timezone.now(), _quantity=50)
Content.search_objects.refresh()
def test_sponsored(self):
sponsored = Content.search_objects.sponsored().extra(from_=0, size=50)
qs = TestContentObj.objects.filter(campaign__isnull=False)
self.assertEqual(qs.count(), sponsored.count())
self.assertEqual(
sorted([obj.id for obj in qs]),
sorted([obj.id for obj in sponsored])
)
def test_evergreen(self):
evergreen = Content.search_objects.evergreen().extra(from_=0, size=50)
qs = Content.objects.filter(evergreen=True)
self.assertEqual(qs.count(), evergreen.count())
self.assertEqual(
sorted([obj.id for obj in qs]),
sorted([obj.id for obj in evergreen])
)
@override_settings(VIDEO_DOC_TYPE=TestContentObjTwo.search_objects.mapping.doc_type)
def test_evergreen_video(self):
make_content(TestContentObjTwo, evergreen=True, published=self.now, _quantity=12)
make_content(TestContentObjTwo, published=self.now, _quantity=12)
Content.search_objects.refresh()
evergreen = Content.search_objects.evergreen_video().extra(from_=0, size=50)
qs = TestContentObjTwo.objects.filter(evergreen=True)
self.assertEqual(12, evergreen.count())
self.assertEqual(
sorted([obj.id for obj in qs]),
sorted([obj.id for obj in evergreen])
)
| from django.test import override_settings
from django.utils import timezone
from bulbs.campaigns.models import Campaign
from bulbs.content.models import Content
from bulbs.utils.test import make_content, BaseIndexableTestCase
from example.testcontent.models import TestContentObj, TestContentObjTwo
class ContentManagerTestCase(BaseIndexableTestCase):
def setUp(self):
super(ContentManagerTestCase, self).setUp()
campaign = Campaign.objects.create(
sponsor_name="TheCobbler",
start_date=timezone.now() - timezone.timedelta(days=5),
end_date=timezone.now() + timezone.timedelta(days=5)
)
make_content(evergreen=True, published=timezone.now(), _quantity=50)
make_content(TestContentObj, campaign=campaign, published=timezone.now(), _quantity=50)
Content.search_objects.refresh()
def test_sponsored(self):
sponsored = Content.search_objects.sponsored().extra(from_=0, size=50)
qs = TestContentObj.objects.filter(campaign__isnull=False)
self.assertEqual(qs.count(), sponsored.count())
self.assertEqual(
sorted([obj.id for obj in qs]),
sorted([obj.id for obj in sponsored])
)
def test_evergreen(self):
evergreen = Content.search_objects.evergreen().extra(from_=0, size=50)
qs = Content.objects.filter(evergreen=True)
self.assertEqual(qs.count(), evergreen.count())
self.assertEqual(
sorted([obj.id for obj in qs]),
sorted([obj.id for obj in evergreen])
)
@override_settings(VIDEO_DOC_TYPE=TestContentObjTwo.search_objects.mapping.doc_type)
def test_evergreen_video(self):
make_content(TestContentObjTwo, evergreen=True, published=self.now, _quantity=12)
make_content(TestContentObjTwo, published=self.now, _quantity=12)
Content.search_objects.refresh()
evergreen = Content.search_objects.evergreen_video().extra(from_=0, size=50)
qs = TestContentObjTwo.objects.filter(evergreen=True)
self.assertEqual(12, evergreen.count())
self.assertEqual(
sorted([obj.id for obj in qs]),
sorted([obj.id for obj in evergreen])
)
| Python | 0.000003 |
3c82d0ca4a314ffd052b99ece7afec6aaea4e063 | Update BatchKwargs to_id tests | tests/datasource/test_batch_kwargs.py | tests/datasource/test_batch_kwargs.py | import pytest
import os
from freezegun import freeze_time
try:
from unittest import mock
except ImportError:
import mock
from great_expectations.datasource.types import *
def test_batch_kwargs_fingerprint():
test_batch_kwargs = PathBatchKwargs(
{
"path": "/data/test.csv"
}
)
# When there is only a single "important" key used in batch_kwargs, the ID can prominently include it
assert test_batch_kwargs.to_id() == "path:/data/test.csv"
test_batch_kwargs = PathBatchKwargs(
{
"path": "/data/test.csv",
"reader_method": "read_csv",
"reader_options": {
"iterator": True,
"chunksize": 2e7,
"parse_dates": [0, 3],
"names": ["start", "type", "quantity", "end"]
}
}
)
# When there are multiple relevant keys we use the hash of the batch_kwargs dictionary
print(test_batch_kwargs.to_id())
assert test_batch_kwargs.to_id() == "8607e071c6383509c8cd8f4c1ea65518"
def test_batch_kwargs_attributes_and_keys():
# When BatchKwargs are typed, the required keys should become accessible via dot notation and immutable
test_batch_kwargs = PathBatchKwargs(
{
"path": "/data/test.csv",
"reader_method": "read_csv",
"reader_options": {
"iterator": True,
"chunksize": 2e7,
"parse_dates": [0, 3],
"names": ["start", "type", "quantity", "end"]
}
}
)
assert test_batch_kwargs.path == "/data/test.csv"
assert test_batch_kwargs["path"] == test_batch_kwargs.path
# We do not allow setting the special attributes this way
with pytest.raises(AttributeError):
test_batch_kwargs.path = "/a/new/path.csv"
# Nor do we provide attribute-style access to unreserved names
with pytest.raises(AttributeError):
assert test_batch_kwargs.names == ["start", "type", "quantity", "end"]
# But we can access and set even protected names using dictionary notation
assert test_batch_kwargs["reader_options"]["names"] == ["start", "type", "quantity", "end"]
test_batch_kwargs["path"] = "/a/new/path.csv"
assert test_batch_kwargs.path == "/a/new/path.csv"
| import pytest
import os
from freezegun import freeze_time
try:
from unittest import mock
except ImportError:
import mock
from great_expectations.datasource.types import *
@freeze_time("1955-11-05")
def test_batch_kwargs_fingerprint():
test_batch_kwargs = PathBatchKwargs(
{
"path": "/data/test.csv"
}
)
#demonstrate *output* kwargs post-datasource/generator
# When there is only a single "important" key used in batch_kwargs, the ID can prominently include it
assert test_batch_kwargs.batch_fingerprint == BatchFingerprint(
partition_id="19551105T000000.000000Z",
fingerprint="path:/data/test.csv")
test_batch_kwargs = PathBatchKwargs(
{
"path": "/data/test.csv",
"partition_id": "20190101"
}
)
# When partition_id is explicitly included, we can extract it and potentially still have a human readable id
assert test_batch_kwargs.batch_fingerprint == BatchFingerprint(
partition_id="20190101",
fingerprint="path:/data/test.csv")
test_batch_kwargs = PathBatchKwargs(
{
"path": "/data/test.csv",
"iterator": True,
"partition_id": "3",
"chunksize": 2e7,
"parse_dates": [0, 3],
"names": ["start", "type", "quantity", "end"]
}
)
# When there are multiple relevant keys we use the hash of the batch_kwargs dictionary
assert test_batch_kwargs.batch_fingerprint == BatchFingerprint(
partition_id="3",
fingerprint="a5d67721928ee13317a81459818a556b")
def test_batch_kwargs_from_dict():
test_batch_kwargs = {
"path": "/data/test.csv",
"partition_id": "1"
}
# The build_batch_fingerprint convenience method makes it possible to build a batch_fingerprint from a dict.
# HOWEVER, using it can be difficult since the default-ignored keys may depend on a specific batch_kwargs type
assert BatchKwargs.build_batch_fingerprint(test_batch_kwargs) == BatchFingerprint(
partition_id="1",
fingerprint="path:/data/test.csv")
def test_batch_kwargs_attributes_and_keys():
# When BatchKwargs are typed, the required keys should become accessible via dot notation and immutable
test_batch_kwargs = PathBatchKwargs(
{
"path": "/data/test.csv",
"iterator": True,
"partition_id": "3",
"chunksize": 2e7,
"parse_dates": [0, 3],
"names": ["start", "type", "quantity", "end"]
}
)
assert test_batch_kwargs.path == "/data/test.csv"
assert test_batch_kwargs["path"] == test_batch_kwargs.path
# We do not allow setting the special attributes this way
with pytest.raises(AttributeError):
test_batch_kwargs.path = "/a/new/path.csv"
# Nor do we provide attribute-style access to unreserved names
with pytest.raises(AttributeError):
assert test_batch_kwargs.names == ["start", "type", "quantity", "end"]
# But we can access and set even protected names using dictionary notation
assert test_batch_kwargs["names"] == ["start", "type", "quantity", "end"]
test_batch_kwargs["path"] = "/a/new/path.csv"
assert test_batch_kwargs.path == "/a/new/path.csv"
| Python | 0 |
8c8bc1ef8e3ba7519d4612856a420ed410974e12 | add redactor on installed apps settings | opps/core/__init__.py | opps/core/__init__.py | # -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from django.conf import settings
trans_app_label = _('Opps')
settings.INSTALLED_APPS += ('redactor',)
| # -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
trans_app_label = _('Opps')
| Python | 0 |
9412b362b649a8eaa62448bef5772b0f001efdbb | Remove the download syncing as it's no longer part of Conveyor | conveyor/core.py | conveyor/core.py | from __future__ import absolute_import
from __future__ import division
import bz2
import csv
import logging
import logging.config
import io
import time
import urlparse
import lxml.html
import redis
import requests
import slumber
import yaml
from apscheduler.scheduler import Scheduler
from conveyor.processor import Processor, get_key
# @@@ Switch all Urls to SSL
# @@@ Switch to better exception classes
logger = logging.getLogger(__name__)
class Conveyor(object):
def __init__(self, config_file=None, *args, **kwargs):
super(Conveyor, self).__init__(*args, **kwargs)
if config_file is None:
config_file = "config.yml"
with open(config_file) as f:
self.config = yaml.safe_load(f.read())
logging.config.dictConfig(self.config["logging"])
self.redis = redis.StrictRedis(**self.config.get("redis", {}).get("connection", {}))
def run(self):
self.scheduler = Scheduler()
if self.config["conveyor"].get("schedule", {}).get("packages", {}):
self.scheduler.add_interval_job(self.packages, **self.config["conveyor"]["schedule"]["packages"])
self.scheduler.start()
try:
while True:
time.sleep(999)
except KeyboardInterrupt:
logger.info("Shutting down Conveyor...")
self.scheduler.shutdown(wait=False)
def packages(self):
if not self.redis.get(get_key(self.config.get("redis", {}).get("prefix", None), "pypi:since")):
# This is the first time we've ran so we need to do a bulk import
raise Exception(" Cannot process changes with no value for the last successful run.")
warehouse = slumber.API(
self.config["conveyor"]["warehouse"]["url"],
auth=(
self.config["conveyor"]["warehouse"]["auth"]["username"],
self.config["conveyor"]["warehouse"]["auth"]["password"],
)
)
session = requests.session(verify=self.config["conveyor"].get("verify", True))
processor = Processor(
index=self.config["conveyor"]["index"],
warehouse=warehouse,
session=session,
store=self.redis,
store_prefix=self.config.get("redis", {}).get("prefix", None)
)
processor.process()
| from __future__ import absolute_import
from __future__ import division
import bz2
import csv
import logging
import logging.config
import io
import time
import urlparse
import lxml.html
import redis
import requests
import slumber
import yaml
from apscheduler.scheduler import Scheduler
from conveyor.processor import Processor, get_key
# @@@ Switch all Urls to SSL
# @@@ Switch to better exception classes
logger = logging.getLogger(__name__)
class Conveyor(object):
def __init__(self, config_file=None, *args, **kwargs):
super(Conveyor, self).__init__(*args, **kwargs)
if config_file is None:
config_file = "config.yml"
with open(config_file) as f:
self.config = yaml.safe_load(f.read())
logging.config.dictConfig(self.config["logging"])
self.redis = redis.StrictRedis(**self.config.get("redis", {}).get("connection", {}))
def run(self):
self.scheduler = Scheduler()
if self.config["conveyor"].get("schedule", {}).get("packages", {}):
self.scheduler.add_interval_job(self.packages, **self.config["conveyor"]["schedule"]["packages"])
if self.config["conveyor"].get("schedule", {}).get("downloads", {}):
self.scheduler.add_interval_job(self.downloads, **self.config["conveyor"]["schedule"]["downloads"])
self.scheduler.start()
try:
while True:
time.sleep(999)
except KeyboardInterrupt:
logger.info("Shutting down Conveyor...")
self.scheduler.shutdown(wait=False)
def packages(self):
if not self.redis.get(get_key(self.config.get("redis", {}).get("prefix", None), "pypi:since")):
# This is the first time we've ran so we need to do a bulk import
raise Exception(" Cannot process changes with no value for the last successful run.")
warehouse = slumber.API(
self.config["conveyor"]["warehouse"]["url"],
auth=(
self.config["conveyor"]["warehouse"]["auth"]["username"],
self.config["conveyor"]["warehouse"]["auth"]["password"],
)
)
session = requests.session(verify=self.config["conveyor"].get("verify", True))
processor = Processor(
index=self.config["conveyor"]["index"],
warehouse=warehouse,
session=session,
store=self.redis,
store_prefix=self.config.get("redis", {}).get("prefix", None)
)
processor.process()
def downloads(self):
session = requests.session(verify=self.config["conveyor"].get("verify", True))
warehouse = slumber.API(
self.config["conveyor"]["warehouse"]["url"],
auth=(
self.config["conveyor"]["warehouse"]["auth"]["username"],
self.config["conveyor"]["warehouse"]["auth"]["password"],
)
)
# Get a listing of all the Files
resp = session.get(self.config["conveyor"]["stats"])
resp.raise_for_status()
html = lxml.html.fromstring(resp.content)
urls = [(urlparse.urljoin(self.config["conveyor"]["stats"], x), x) for x in html.xpath("//a/@href")]
for url, statfile in urls:
if not url.endswith(".bz2"):
continue
date = statfile[:-4]
year, month, day = date.split("-")
last_modified_key = get_key(self.config.get("redis", {}).get("prefix", ""), "pypi:download:last_modified:%s" % url)
last_modified = self.redis.get(last_modified_key)
headers = {"If-Modified-Since": last_modified} if last_modified else None
resp = session.get(url, headers=headers, prefetch=True)
if resp.status_code == 304:
logger.info("Skipping %s, it has not been modified since %s", statfile, last_modified)
continue
resp.raise_for_status()
logger.info("Computing download counts from %s", statfile)
data = bz2.decompress(resp.content)
csv_r = csv.DictReader(io.BytesIO(data), ["project", "filename", "user_agent", "downloads"])
for row in csv_r:
row["date"] = date
row["downloads"] = int(row["downloads"])
# See if we have a Download object for this yet
downloads = warehouse.downloads.get(project=row["project"], filename=row["filename"], date__year=year, date__month=month, date__day=day, user_agent=row["user_agent"])
if downloads["meta"]["total_count"] == 1:
warehouse.downloads(downloads["objects"][0]["id"]).put(row)
elif downloads["meta"]["total_count"] == 0:
warehouse.downloads.post(row)
else:
RuntimeError("There are More than 1 Download items returned")
if "Last-Modified" in resp.headers:
self.redis.set(last_modified_key, resp.headers["Last-Modified"])
else:
self.redis.delete(last_modified_key)
break
| Python | 0 |
6a02c5e1844ad7d1b9ae50cd5dbae6975fb685ee | Make internal error more clear | numba/error.py | numba/error.py | import traceback
def format_pos(node):
if node is not None and hasattr(node, 'lineno'):
return "%s:%s: " % (node.lineno, node.col_offset)
else:
return ""
class NumbaError(Exception):
"Some error happened during compilation"
def __init__(self, node, msg=None, *args):
if msg is None:
node, msg = None, node
self.node = node
self.msg = msg
self.args = args
def __str__(self):
try:
pos = format_pos(self.node)
msg = "%s%s %s" % (pos, self.msg, " ".join(map(str, self.args)))
return msg.rstrip()
except:
traceback.print_exc()
return "<internal error creating numba error message>"
class InternalError(NumbaError):
"Indicates a compiler bug"
class _UnknownAttribute(Exception):
pass
| import traceback
def format_pos(node):
if node is not None and hasattr(node, 'lineno'):
return "%s:%s: " % (node.lineno, node.col_offset)
else:
return ""
class NumbaError(Exception):
"Some error happened during compilation"
def __init__(self, node, msg=None, *args):
if msg is None:
node, msg = None, node
self.node = node
self.msg = msg
self.args = args
def __str__(self):
try:
pos = format_pos(self.node)
msg = "%s%s %s" % (pos, self.msg, " ".join(map(str, self.args)))
return msg.rstrip()
except:
traceback.print_exc()
return ""
class InternalError(NumbaError):
"Indicates a compiler bug"
class _UnknownAttribute(Exception):
pass | Python | 0.000321 |
619269367c9e38fe55ae8667ead8486f63467d2b | Fix case where apache passes DN in the format we expect rather than ssl format. | src/python/apache_utils.py | src/python/apache_utils.py | """
Apache Utils.
Tools for dealing with credential checking from X509 SSL certificates.
These are useful when using Apache as a reverse proxy to check user
credentials against a local DB.
"""
from collections import namedtuple
import cherrypy
from sqlalchemy_utils import create_db, db_session
from tables import Users
VerifiedUser = namedtuple('VerifiedUser', ('id', 'dn', 'ca', 'admin'))
def name_from_dn(client_dn):
"""
Get human-readable name from DN.
Attempt to determine a meaningful name from a
clients DN. Requires the DN to have already been
converted to the more usual slash delimeted style.
Args:
client_dn (str): The client DN
Returns:
str: The human-readable name
"""
cns = (token.strip('CN= ') for token in client_dn.split('/')
if token.startswith('CN='))
return sorted(cns, key=len)[-1]
def apache_client_convert(client_dn, client_ca=None):
"""
Convert Apache style client certs.
Convert from the Apache comma delimited style to the
more usual slash delimited style.
Args:
client_dn (str): The client DN
client_ca (str): [Optional] The client CA
Returns:
tuple: The converted client (DN, CA)
"""
if not client_dn.startswith('/'):
client_dn = '/' + '/'.join(reversed(client_dn.split(',')))
if client_ca is not None:
client_ca = '/' + '/'.join(reversed(client_ca.split(',')))
return client_dn, client_ca
class CredentialDispatcher(object):
"""
Dispatcher that checks SSL credentials.
This dispatcher is a wrapper that simply checks SSL credentials and
then hands off to the wrapped dispatcher.
"""
def __init__(self, users_dburl, dispatcher, admin_only=False):
"""Initialise."""
self._users_dburl = users_dburl
self._dispatcher = dispatcher
self._admin_only = admin_only
def __call__(self, path):
"""Dispatch."""
required_headers = set(['Ssl-Client-S-Dn', 'Ssl-Client-I-Dn', 'Ssl-Client-Verify'])
missing_headers = required_headers.difference(cherrypy.request.headers.iterkeys())
if missing_headers:
raise cherrypy.HTTPError(401, 'Unauthorized: Incomplete certificate information '
'available, required: %s' % list(missing_headers))
client_dn, client_ca = apache_client_convert(cherrypy.request.headers['Ssl-Client-S-Dn'],
cherrypy.request.headers['Ssl-Client-I-Dn'])
client_verified = cherrypy.request.headers['Ssl-Client-Verify']
if client_verified != 'SUCCESS':
raise cherrypy.HTTPError(401, 'Unauthorized: Cert not verified for user DN: %s, CA: %s.'
% (client_dn, client_ca))
create_db(self._users_dburl)
with db_session(self._users_dburl) as session:
users = session.query(Users)\
.filter(Users.dn == client_dn)\
.filter(Users.ca == client_ca)\
.all()
if not users:
raise cherrypy.HTTPError(403, 'Forbidden: Unknown user. user: (%s, %s)'
% (client_dn, client_ca))
if len(users) > 1:
raise cherrypy.HTTPError(500, 'Internal Server Error: Duplicate user detected. user: (%s, %s)'
% (client_dn, client_ca))
if users[0].suspended:
raise cherrypy.HTTPError(403, 'Forbidden: User is suspended by VO. user: (%s, %s)'
% (client_dn, client_ca))
if self._admin_only and not users[0].admin:
raise cherrypy.HTTPError(403, 'Forbidden: Admin users only')
cherrypy.request.verified_user = VerifiedUser(users[0].id,
users[0].dn,
users[0].ca,
users[0].admin)
return self._dispatcher(path)
__all__ = ('VerifiedUser', 'name_from_dn', 'apache_client_convert', 'CredentialDispatcher')
| """
Apache Utils.
Tools for dealing with credential checking from X509 SSL certificates.
These are useful when using Apache as a reverse proxy to check user
credentials against a local DB.
"""
from collections import namedtuple
import cherrypy
from sqlalchemy_utils import create_db, db_session
from tables import Users
VerifiedUser = namedtuple('VerifiedUser', ('id', 'dn', 'ca', 'admin'))
def name_from_dn(client_dn):
"""
Get human-readable name from DN.
Attempt to determine a meaningful name from a
clients DN. Requires the DN to have already been
converted to the more usual slash delimeted style.
Args:
client_dn (str): The client DN
Returns:
str: The human-readable name
"""
cns = (token.strip('CN= ') for token in client_dn.split('/')
if token.startswith('CN='))
return sorted(cns, key=len)[-1]
def apache_client_convert(client_dn, client_ca=None):
"""
Convert Apache style client certs.
Convert from the Apache comma delimited style to the
more usual slash delimited style.
Args:
client_dn (str): The client DN
client_ca (str): [Optional] The client CA
Returns:
tuple: The converted client (DN, CA)
"""
client_dn = '/' + '/'.join(reversed(client_dn.split(',')))
if client_ca is not None:
client_ca = '/' + '/'.join(reversed(client_ca.split(',')))
return client_dn, client_ca
class CredentialDispatcher(object):
"""
Dispatcher that checks SSL credentials.
This dispatcher is a wrapper that simply checks SSL credentials and
then hands off to the wrapped dispatcher.
"""
def __init__(self, users_dburl, dispatcher, admin_only=False):
"""Initialise."""
self._users_dburl = users_dburl
self._dispatcher = dispatcher
self._admin_only = admin_only
def __call__(self, path):
"""Dispatch."""
required_headers = set(['Ssl-Client-S-Dn', 'Ssl-Client-I-Dn', 'Ssl-Client-Verify'])
missing_headers = required_headers.difference(cherrypy.request.headers.iterkeys())
if missing_headers:
raise cherrypy.HTTPError(401, 'Unauthorized: Incomplete certificate information '
'available, required: %s' % list(missing_headers))
client_dn, client_ca = apache_client_convert(cherrypy.request.headers['Ssl-Client-S-Dn'],
cherrypy.request.headers['Ssl-Client-I-Dn'])
client_verified = cherrypy.request.headers['Ssl-Client-Verify']
if client_verified != 'SUCCESS':
raise cherrypy.HTTPError(401, 'Unauthorized: Cert not verified for user DN: %s, CA: %s.'
% (client_dn, client_ca))
create_db(self._users_dburl)
with db_session(self._users_dburl) as session:
users = session.query(Users)\
.filter(Users.dn == client_dn)\
.filter(Users.ca == client_ca)\
.all()
if not users:
raise cherrypy.HTTPError(403, 'Forbidden: Unknown user. user: (%s, %s)'
% (client_dn, client_ca))
if len(users) > 1:
raise cherrypy.HTTPError(500, 'Internal Server Error: Duplicate user detected. user: (%s, %s)'
% (client_dn, client_ca))
if users[0].suspended:
raise cherrypy.HTTPError(403, 'Forbidden: User is suspended by VO. user: (%s, %s)'
% (client_dn, client_ca))
if self._admin_only and not users[0].admin:
raise cherrypy.HTTPError(403, 'Forbidden: Admin users only')
cherrypy.request.verified_user = VerifiedUser(users[0].id,
users[0].dn,
users[0].ca,
users[0].admin)
return self._dispatcher(path)
__all__ = ('VerifiedUser', 'name_from_dn', 'apache_client_convert', 'CredentialDispatcher')
| Python | 0 |
429bf52eb482955cfe195708898ce275e1a72dcb | Validate input. | src/devilry_qualifiesforexam/devilry_qualifiesforexam/rest/preview.py | src/devilry_qualifiesforexam/devilry_qualifiesforexam/rest/preview.py | from djangorestframework.views import View
from djangorestframework.permissions import IsAuthenticated
from djangorestframework.response import ErrorResponse
from djangorestframework import status as statuscodes
from django.shortcuts import get_object_or_404
from devilry_qualifiesforexam.pluginhelpers import create_sessionkey
from devilry.apps.core.models import Period
from devilry.utils.groups_groupedby_relatedstudent_and_assignment import GroupsGroupedByRelatedStudentAndAssignment
from devilry_subjectadmin.rest.auth import IsPeriodAdmin
class Preview(View):
"""
Generate the data required to provide a preview for the qualified for exam wizard.
# GET
## Parameters
The following parameters are required:
- ``periodid``: The ID of the period. Supplied as the last part of the URL-path.
404 is returned unless the user is admin on this period.
- ``pluginsessionid``: Forwarded from the first page of the wizard. It is an ID
used to lookup the output from the plugin.
## Returns
An object/dict with the following attributes:
- ``pluginoutput``: The serialized output from the plugin.
- ``perioddata``: All results for all students on the period.
"""
permissions = (IsAuthenticated, IsPeriodAdmin)
def get(self, request, id):
pluginsessionid = self.request.GET.get('pluginsessionid', None)
if not pluginsessionid:
raise ErrorResponse(statuscodes.HTTP_400_BAD_REQUEST,
{'detail': '``pluginsessionid`` is a required parameter'})
period = get_object_or_404(Period, pk=id)
previewdata = self.request.session[create_sessionkey(pluginsessionid)]
grouper = GroupsGroupedByRelatedStudentAndAssignment(period)
return {
'perioddata': grouper.serialize(),
'pluginoutput': previewdata.serialize()
} | from djangorestframework.views import View
from djangorestframework.permissions import IsAuthenticated
from django.shortcuts import get_object_or_404
from devilry_qualifiesforexam.pluginhelpers import create_sessionkey
from devilry.apps.core.models import Period
from devilry.utils.groups_groupedby_relatedstudent_and_assignment import GroupsGroupedByRelatedStudentAndAssignment
from devilry_subjectadmin.rest.auth import IsPeriodAdmin
class Preview(View):
"""
Generate the data required to provide a preview for the qualified for exam wizard.
# GET
## Parameters
The following parameters are required:
- ``periodid``: The ID of the period. Supplied as the last part of the URL-path.
404 is returned unless the user is admin on this period.
- ``pluginsessionid``: Forwarded from the first page of the wizard. It is an ID
used to lookup the output from the plugin.
## Returns
An object/dict with the following attributes:
- ``pluginoutput``: The serialized output from the plugin.
- ``perioddata``: All results for all students on the period.
"""
permissions = (IsAuthenticated, IsPeriodAdmin)
def get(self, request, id):
pluginsessionid = self.request.GET['pluginsessionid']
period = get_object_or_404(Period, pk=id)
previewdata = self.request.session[create_sessionkey(pluginsessionid)]
grouper = GroupsGroupedByRelatedStudentAndAssignment(period)
return {
'perioddata': grouper.serialize(),
'pluginoutput': previewdata.serialize()
} | Python | 0.000017 |
8ab7ad1f6aee485c64a7e1347c76e628cc820ba8 | add some docker Builder args | src/py/gopythongo/builders/docker.py | src/py/gopythongo/builders/docker.py | # -* encoding: utf-8 *-
import argparse
import gopythongo.shared.docker_args
from gopythongo.utils import print_info, highlight
from gopythongo.builders import BaseBuilder
from typing import Any
class DockerBuilder(BaseBuilder):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
@property
def builder_name(self) -> str:
return "docker"
def add_args(self, parser: argparse.ArgumentParser) -> None:
gopythongo.shared.docker_args.add_shared_args(parser)
gp_docker = parser.add_argument_group("Docker Builder options")
gp_docker.add_argument("--docker-buildfile", dest="docker_buildfile", default=None,
help="Specify a Dockerfile to build the the build environment. The build commands will "
"then be executed inside the resulting container.")
gp_docker.add_argument("--docker-leave-containers", dest="docker_leave_containers", action="store_true",
default=False, env_var="DOCKER_LEAVE_CONTAINERS",
help="After creating a build environment and a runtime container, if this option is "
"used, GoPythonGo will not use 'docker rm' and 'docker rmi' to clean up the "
"resulting containers.")
def validate_args(self, args: argparse.Namespace) -> None:
gopythongo.shared.docker_args.validate_shared_args(args)
def build(self, args: argparse.Namespace) -> None:
print_info("Building with %s" % highlight("docker"))
builder_class = DockerBuilder
| # -* encoding: utf-8 *-
import argparse
import gopythongo.shared.docker_args
from gopythongo.utils import print_info, highlight
from gopythongo.builders import BaseBuilder
from typing import Any
class DockerBuilder(BaseBuilder):
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
@property
def builder_name(self) -> str:
return u"docker"
def add_args(self, parser: argparse.ArgumentParser) -> None:
gopythongo.shared.docker_args.add_shared_args(parser)
def validate_args(self, args: argparse.Namespace) -> None:
gopythongo.shared.docker_args.validate_shared_args(args)
def build(self, args: argparse.Namespace) -> None:
print_info("Building with %s" % highlight("docker"))
builder_class = DockerBuilder
| Python | 0 |
6248a0b813fc6598d964639ad696ecd506015918 | Rename to TaarifaAPI | taarifa_api/settings.py | taarifa_api/settings.py | """Global API configuration."""
from os import environ
from urlparse import urlparse
from schemas import facility_schema, request_schema, resource_schema, \
service_schema
API_NAME = 'TaarifaAPI'
URL_PREFIX = 'api'
if 'EVE_DEBUG' in environ:
DEBUG = True
if 'MONGOLAB_URI' in environ:
url = urlparse(environ['MONGOLAB_URI'])
MONGO_HOST = url.hostname
MONGO_PORT = url.port
MONGO_USERNAME = url.username
MONGO_PASSWORD = url.password
MONGO_DBNAME = url.path[1:]
else:
MONGO_DBNAME = API_NAME
# Enable reads (GET), inserts (POST) and DELETE for resources/collections
# (if you omit this line, the API will default to ['GET'] and provide
# read-only access to the endpoint).
RESOURCE_METHODS = ['GET', 'POST', 'DELETE']
# Enable reads (GET), edits (PATCH) and deletes of individual items
# (defaults to read-only item access).
ITEM_METHODS = ['GET', 'PATCH', 'DELETE']
services = {
"schema": service_schema,
}
requests = {
"schema": request_schema,
"source": "requests",
"key": "service_code",
}
facilities = {
"item_title": "facility",
"schema": facility_schema,
}
resources = {
"schema": resource_schema,
"versioning": True,
"source": "resources",
"key": "facility_code",
}
DOMAIN = {
'services': services,
'requests': requests,
'facilities': facilities,
'resources': resources,
}
# FIXME: Temporarily allow CORS requests for development purposes
X_DOMAINS = "*"
| """Global API configuration."""
from os import environ
from urlparse import urlparse
from schemas import facility_schema, request_schema, resource_schema, \
service_schema
API_NAME = 'Taarifa'
URL_PREFIX = 'api'
if 'EVE_DEBUG' in environ:
DEBUG = True
if 'MONGOLAB_URI' in environ:
url = urlparse(environ['MONGOLAB_URI'])
MONGO_HOST = url.hostname
MONGO_PORT = url.port
MONGO_USERNAME = url.username
MONGO_PASSWORD = url.password
MONGO_DBNAME = url.path[1:]
else:
MONGO_DBNAME = API_NAME
# Enable reads (GET), inserts (POST) and DELETE for resources/collections
# (if you omit this line, the API will default to ['GET'] and provide
# read-only access to the endpoint).
RESOURCE_METHODS = ['GET', 'POST', 'DELETE']
# Enable reads (GET), edits (PATCH) and deletes of individual items
# (defaults to read-only item access).
ITEM_METHODS = ['GET', 'PATCH', 'DELETE']
services = {
"schema": service_schema,
}
requests = {
"schema": request_schema,
"source": "requests",
"key": "service_code",
}
facilities = {
"item_title": "facility",
"schema": facility_schema,
}
resources = {
"schema": resource_schema,
"versioning": True,
"source": "resources",
"key": "facility_code",
}
DOMAIN = {
'services': services,
'requests': requests,
'facilities': facilities,
'resources': resources,
}
# FIXME: Temporarily allow CORS requests for development purposes
X_DOMAINS = "*"
| Python | 0.999999 |
ab8930c771d71c09186f94fb554ee0e6d82cea43 | Remove ignore source from multi push notification commands #11 | notification/management/commands/multipush.py | notification/management/commands/multipush.py | from django.core.exceptions import ObjectDoesNotExist
from django.core.management.base import BaseCommand
from notification.apns.apns import APNs, Payload, PayloadAlert
from notification.models import DeviceToken, CertFile
import logging
import os.path
import random
import sys
CERT_FILE_UPLOAD_DIR = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),
'files/'
)
class Command(BaseCommand):
help = 'Send Push Notification to multi device tokens.'
def add_arguments(self, parser):
parser.add_argument(
"-s", "--sandbox",
action="store_true",
dest="sandbox",
default=False,
help="Use apple sandbox.",
)
parser.add_argument(
'-t', '--token',
action='store',
nargs='+',
type=str,
dest='device_tokens',
help='Target device tokens.',
)
parser.add_argument(
'--title',
action='store',
type=str,
metavar='TITLE',
dest='title',
help='Title displayed in push notification.',
)
parser.add_argument(
'--subtitle',
action='store',
type=str,
metavar='SUBTITLE',
dest='subtitle',
help='Subtitle displayes in push notification.',
)
parser.add_argument(
'--body',
action='store',
type=str,
metavar='BODY',
dest='body',
help='Body displayed in push notification.',
)
parser.add_argument(
'--sound',
action='store',
type=str,
metavar='SOUND',
dest='sound',
default='default',
help='Sounds to be heard when push notification is received.',
)
parser.add_argument(
'--badge',
action='store',
type=int,
metavar='BADGE',
dest='badge',
default=1,
help='Badge displayed on application icon.',
)
parser.add_argument(
'-c', '--contentavailable',
action='store_true',
dest='content_available',
default=False,
help='Use content-available. (Support for iOS7 or higher)',
)
parser.add_argument(
'-m', '--mutablecontent',
action='store_true',
dest='mutable_content',
default=False,
help='Use mutable-content. (Support for iOS9 or higher)',
)
def handle(self, *args, **options):
error = False
if options['device_tokens'] is None:
try:
raise ValueError('Please specify a device tokens (-t or --token)')
except ValueError as e:
error = True
logging.error(e)
if options['title'] is None:
try:
raise ValueError('Please input title in push notification (--title)')
except ValueError as e:
error = True
logging.error(e)
if error:
sys.exit()
device_tokens = list(filter(lambda device_token:
DeviceToken.objects.filter(device_token=device_token).count() > 0,
options['device_tokens']))
_ = map(lambda item: logging.warning('There is no match for the specified device token: {}'.format(item)),
list(set(options['device_tokens']) - set(device_tokens)))
try:
cert_file = CertFile.objects.get(target_mode=int(not options['sandbox']), is_use=True)
except ObjectDoesNotExist:
sys.exit(logging.error('Certificate file has not been uploaded'))
apns = APNs(use_sandbox=options['sandbox'], cert_file=CERT_FILE_UPLOAD_DIR + cert_file.filename, enhanced=True)
identifier = random.getrandbits(32)
payload_alert = PayloadAlert(title=options['title'], subtitle=options['subtitle'], body=options['body'])
payload = Payload(alert=payload_alert if payload_alert.body is not None else payload_alert.title,
sound=options['sound'],
badge=options['badge'],
content_available=options['content_available'],
mutable_content=options['mutable_content'])
_ = map(lambda device_token:
apns.gateway_server.send_notification(device_token, payload, identifier=identifier),
device_tokens)
| from django.core.exceptions import ObjectDoesNotExist
from django.core.management.base import BaseCommand
from notification.apns.apns import APNs, Frame, Payload, PayloadAlert
from notification.models import DeviceToken, CertFile
import logging
import os.path
import random
import sys
import time
CERT_FILE_UPLOAD_DIR = os.path.join(
os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),
'files/'
)
class Command(BaseCommand):
help = 'Send Push Notification to multi device tokens.'
def __init__(self):
self.frame = Frame()
self.expiry = int(time.time() + 3600)
self.priority = 10
def add_arguments(self, parser):
parser.add_argument(
"-s", "--sandbox",
action="store_true",
dest="sandbox",
default=False,
help="Use apple sandbox.",
)
parser.add_argument(
'-t', '--token',
action='store',
nargs='+',
type=str,
dest='device_tokens',
help='Target device tokens.',
)
parser.add_argument(
'--title',
action='store',
type=str,
metavar='TITLE',
dest='title',
help='Title displayed in push notification.',
)
parser.add_argument(
'--subtitle',
action='store',
type=str,
metavar='SUBTITLE',
dest='subtitle',
help='Subtitle displayes in push notification.',
)
parser.add_argument(
'--body',
action='store',
type=str,
metavar='BODY',
dest='body',
help='Body displayed in push notification.',
)
parser.add_argument(
'--sound',
action='store',
type=str,
metavar='SOUND',
dest='sound',
default='default',
help='Sounds to be heard when push notification is received.',
)
parser.add_argument(
'--badge',
action='store',
type=int,
metavar='BADGE',
dest='badge',
default=1,
help='Badge displayed on application icon.',
)
parser.add_argument(
'-c', '--contentavailable',
action='store_true',
dest='content_available',
default=False,
help='Use content-available. (Support for iOS7 or higher)',
)
parser.add_argument(
'-m', '--mutablecontent',
action='store_true',
dest='mutable_content',
default=False,
help='Use mutable-content. (Support for iOS9 or higher)',
)
def handle(self, *args, **options):
error = False
if options['device_tokens'] is None:
try:
raise ValueError('Please specify a device tokens (-t or --token)')
except ValueError as e:
error = True
logging.error(e)
if options['title'] is None:
try:
raise ValueError('Please input title in push notification (--title)')
except ValueError as e:
error = True
logging.error(e)
if error:
sys.exit()
device_tokens = list(filter(lambda device_token:
DeviceToken.objects.filter(device_token=device_token).count() > 0,
options['device_tokens']))
_ = map(lambda item: logging.warning('There is no match for the specified device token: {}'.format(item)),
list(set(options['device_tokens']) - set(device_tokens)))
try:
cert_file = CertFile.objects.get(target_mode=int(not options['sandbox']), is_use=True)
except ObjectDoesNotExist:
sys.exit(logging.error('Certificate file has not been uploaded'))
apns = APNs(use_sandbox=options['sandbox'], cert_file=CERT_FILE_UPLOAD_DIR + cert_file.filename, enhanced=True)
identifier = random.getrandbits(32)
payload_alert = PayloadAlert(title=options['title'], subtitle=options['subtitle'], body=options['body'])
payload = Payload(alert=payload_alert if payload_alert.body is not None else payload_alert.title,
sound=options['sound'],
badge=options['badge'],
content_available=options['content_available'],
mutable_content=options['mutable_content'])
_ = map(lambda device_token:
apns.gateway_server.send_notification(device_token, payload, identifier=identifier),
device_tokens)
| Python | 0 |
059a799b9c347b6abfcd2daa3678d98cd0884210 | Add "no cover" to teardown() and handle_address_delete() on TiedModelRealtimeSignalProcessor. These are never called. | ovp_search/signals.py | ovp_search/signals.py | from django.db import models
from haystack import signals
from ovp_projects.models import Project
from ovp_organizations.models import Organization
from ovp_core.models import GoogleAddress
class TiedModelRealtimeSignalProcessor(signals.BaseSignalProcessor):
"""
TiedModelRealTimeSignalProcessor handles updates to a index tied to a model
We need to be able to detect changes to a model a rebuild another index,
such as detecting changes to GoogleAddress and updating the index
for projects and organizations.
"""
attach_to = [
(Project, 'handle_save', 'handle_delete'),
(Organization, 'handle_save', 'handle_delete'),
(GoogleAddress, 'handle_address_save', 'handle_address_delete'),
]
m2m = [
Project.causes.through, Project.skills.through, Organization.causes.through
]
def setup(self):
for item in self.attach_to:
models.signals.post_save.connect(getattr(self, item[1]), sender=item[0])
models.signals.post_delete.connect(getattr(self, item[1]), sender=item[0])
for item in self.m2m:
models.signals.m2m_changed.connect(self.handle_m2m, sender=item)
# never really called
def teardown(self): # pragma: no cover
for item in self.attach_to:
models.signals.post_save.disconnect(getattr(self, item[1]), sender=item[0])
models.signals.post_delete.disconnect(getattr(self, item[1]), sender=item[0])
for item in self.m2m:
models.signals.m2m_changed.disconnect(self.handle_m2m, sender=item)
def handle_address_save(self, sender, instance, **kwargs):
""" Custom handler for address save """
objects = self.find_associated_with_address(instance)
for obj in objects:
self.handle_save(obj.__class__, obj)
# this function is never really called on sqlite dbs
def handle_address_delete(self, sender, instance, **kwargs): # pragma: no cover
""" Custom handler for address delete """
objects = self.find_associated_with_address(instance)
for obj in objects:
self.handle_delete(obj.__class__, obj)
def handle_m2m(self, sender, instance, **kwargs):
""" Handle many to many relationships """
self.handle_save(instance.__class__, instance)
def find_associated_with_address(self, instance):
""" Returns list with projects and organizations associated with given address """
objects = []
objects += list(Project.objects.filter(address=instance))
objects += list(Organization.objects.filter(address=instance))
return objects
| from django.db import models
from haystack import signals
from ovp_projects.models import Project
from ovp_organizations.models import Organization
from ovp_core.models import GoogleAddress
class TiedModelRealtimeSignalProcessor(signals.BaseSignalProcessor):
"""
TiedModelRealTimeSignalProcessor handles updates to a index tied to a model
We need to be able to detect changes to a model a rebuild another index,
such as detecting changes to GoogleAddress and updating the index
for projects and organizations.
"""
attach_to = [
(Project, 'handle_save', 'handle_delete'),
(Organization, 'handle_save', 'handle_delete'),
(GoogleAddress, 'handle_address_save', 'handle_address_delete'),
]
m2m = [
Project.causes.through, Project.skills.through, Organization.causes.through
]
def setup(self):
for item in self.attach_to:
models.signals.post_save.connect(getattr(self, item[1]), sender=item[0])
models.signals.post_delete.connect(getattr(self, item[1]), sender=item[0])
for item in self.m2m:
models.signals.m2m_changed.connect(self.handle_m2m, sender=item)
def teardown(self):
for item in self.attach_to:
models.signals.post_save.disconnect(getattr(self, item[1]), sender=item[0])
models.signals.post_delete.disconnect(getattr(self, item[1]), sender=item[0])
for item in self.m2m:
models.signals.m2m_changed.disconnect(self.handle_m2m, sender=item)
def handle_address_save(self, sender, instance, **kwargs):
""" Custom handler for address save """
objects = self.find_associated_with_address(instance)
for obj in objects:
self.handle_save(obj.__class__, obj)
def handle_address_delete(self, sender, instance, **kwargs):
""" Custom handler for address delete """
objects = self.find_associated_with_address(instance)
for obj in objects:
self.handle_delete(obj.__class__, obj)
def handle_m2m(self, sender, instance, **kwargs):
""" Handle many to many relationships """
self.handle_save(instance.__class__, instance)
def find_associated_with_address(self, instance):
""" Returns list with projects and organizations associated with given address """
objects = []
objects += list(Project.objects.filter(address=instance))
objects += list(Organization.objects.filter(address=instance))
return objects
| Python | 0 |
2f357ac185e7728e0a0afec6827500c78a4b2796 | Update SavedModel example to use serialized tf Example. Change: 135378723 | tensorflow/python/saved_model/example/saved_model_half_plus_two.py | tensorflow/python/saved_model/example/saved_model_half_plus_two.py | ## Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exports an example linear regression inference graph.
Exports a TensorFlow graph to /tmp/saved_model/half_plus_two/ based on the
SavedModel format.
This graph calculates,
y = a*x + b
where a and b are variables with a=0.5 and b=2.
Output from this program is typically used to exercise SavedModel load and
execution code.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import constants
from tensorflow.python.saved_model import utils
def _generate_saved_model_for_half_plus_two(export_dir, as_text=False):
"""Generates SavedModel for half plus two.
Args:
export_dir: The directory to which the SavedModel should be written.
as_text: Writes the SavedModel protocol buffer in text format to disk.
"""
builder = saved_model_builder.SavedModelBuilder(export_dir)
with tf.Session(graph=tf.Graph()) as sess:
# Set up the model parameters as variables to exercise variable loading
# functionality upon restore.
a = tf.Variable(0.5, name="a")
b = tf.Variable(2.0, name="b")
# Create a placeholder for serialized tensorflow.Example messages to be fed.
serialized_tf_example = tf.placeholder(tf.string, name="tf_example")
# Parse the tensorflow.Example looking for a feature named "x" with a single
# floating point value.
feature_configs = {"x": tf.FixedLenFeature([1], dtype=tf.float32),}
tf_example = tf.parse_example(serialized_tf_example, feature_configs)
# Use tf.identity() to assign name
x = tf.identity(tf_example["x"], name="x")
y = tf.add(tf.mul(a, x), b, name="y")
# Set up the signature for regression with input and output tensor
# specification.
input_tensor = meta_graph_pb2.TensorInfo()
input_tensor.name = serialized_tf_example.name
signature_inputs = {"input": input_tensor}
output_tensor = meta_graph_pb2.TensorInfo()
output_tensor.name = tf.identity(y).name
signature_outputs = {"output": output_tensor}
signature_def = utils.build_signature_def(signature_inputs,
signature_outputs, "regression")
# Initialize all variables and then save the SavedModel.
sess.run(tf.initialize_all_variables())
builder.add_meta_graph_and_variables(
sess, [constants.TAG_SERVING],
signature_def_map={"regression": signature_def})
builder.save(as_text)
def main(_):
export_dir_pb = "/tmp/saved_model/half_plus_two"
_generate_saved_model_for_half_plus_two(export_dir_pb)
export_dir_pbtxt = "/tmp/saved_model/half_plus_two_pbtxt"
_generate_saved_model_for_half_plus_two(export_dir_pbtxt, as_text=True)
if __name__ == "__main__":
tf.app.run()
| ## Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exports an example linear regression inference graph.
Exports a TensorFlow graph to /tmp/saved_model/half_plus_two/ based on the
SavedModel format.
This graph calculates,
y = a*x + b
where a and b are variables with a=0.5 and b=2.
Output from this program is typically used to exercise SavedModel load and
execution code.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import constants
from tensorflow.python.saved_model import utils
def _generate_saved_model_for_half_plus_two(export_dir, as_text=False):
"""Generates SavedModel for half plus two.
Args:
export_dir: The directory to which the SavedModel should be written.
as_text: Writes the SavedModel protocol buffer in text format to disk.
"""
builder = saved_model_builder.SavedModelBuilder(export_dir)
with tf.Session(graph=tf.Graph()) as sess:
# Set up the model parameters as variables to exercise variable loading
# functionality upon restore.
a = tf.Variable(0.5, name="a")
b = tf.Variable(2.0, name="b")
# Set up placeholders.
x = tf.placeholder(tf.float32, name="x")
y = tf.add(tf.mul(a, x), b, name="y")
# Set up the signature for regression with input and output tensor
# specification.
input_tensor = meta_graph_pb2.TensorInfo()
input_tensor.name = x.name
signature_inputs = {"input": input_tensor}
output_tensor = meta_graph_pb2.TensorInfo()
output_tensor.name = y.name
signature_outputs = {"output": output_tensor}
signature_def = utils.build_signature_def(signature_inputs,
signature_outputs, "regression")
# Initialize all variables and then save the SavedModel.
sess.run(tf.initialize_all_variables())
builder.add_meta_graph_and_variables(
sess, [constants.TAG_SERVING],
signature_def_map={"regression": signature_def})
builder.save(as_text)
def main(_):
export_dir_pb = "/tmp/saved_model/half_plus_two"
_generate_saved_model_for_half_plus_two(export_dir_pb)
export_dir_pbtxt = "/tmp/saved_model/half_plus_two_pbtxt"
_generate_saved_model_for_half_plus_two(export_dir_pbtxt, as_text=True)
if __name__ == "__main__":
tf.app.run()
| Python | 0 |
e8a29a6af8856c2957ed93a2da31b62916b6694d | add git support and support passing project_name in VersionControl __init__ | deps/__init__.py | deps/__init__.py | import os
import sys
import shutil
import logging
import urlparse
logger = logging.getLogger('deps')
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
class MissingDependency(Exception):
pass
class VersionControl(object):
def __init__(self, url, root, app_name=None, project_name=None):
self.url = url
self.root = root
tail = os.path.basename((urlparse.urlparse(url)[2]).rstrip('/'))
self.project_name = project_name and project_name or tail
self.app_name = app_name and app_name or tail
self.python_path = os.path.join(
self.root,
self.project_name,
)
self.path = os.path.join(
self.root,
self.project_name,
self.app_name,
)
def __repr__(self):
return "<VersionControl: %s>" % self.app_name
def add_to_python_path(self, position):
if not os.path.exists(self.path):
raise MissingDependency('%s does not exist. Run "./manage.py up" to retrieve this dependency' % self.app_name)
sys.path.insert(position, self.python_path)
class HG(VersionControl):
def checkout(self):
logger.info('checking out %s' % self.project_name)
os.system('hg clone %s %s' % (self.url, self.python_path))
def up(self):
logger.info('%s' % self)
if not os.path.exists(self.path):
self.checkout()
os.chdir(self.python_path)
os.system('hg pull --update')
class GIT(VersionControl):
def checkout(self):
logger.info('checking out %s' % self.project_name)
os.system('git clone %s %s' % (self.url, self.python_path))
def up(self):
logger.info('%s' % self)
if not os.path.exists(self.path):
self.checkout()
os.chdir(self.python_path)
os.system('git pull')
class SVN(VersionControl):
def checkout(self):
logger.info('checking out %s' % self.project_name)
os.system('svn co %s %s' % (self.url, self.path))
def up(self):
logger.info('%s' % self)
if not os.path.exists(self.path):
self.checkout()
os.system('svn up %s' % self.path)
def add_all_to_path(settings, auto_update=False, position=1):
for dependency in settings.DEPENDENCIES:
try:
dependency.add_to_python_path(position)
except MissingDependency:
if auto_update:
dependency.up()
else:
raise
dependency.add_to_python_path(position)
| import os
import sys
import shutil
import logging
import urlparse
logger = logging.getLogger('deps')
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.StreamHandler())
class MissingDependency(Exception):
pass
class VersionControl(object):
def __init__(self, url, root, app_name='', project_name=''):
self.url = url
self.root = root
tail = os.path.basename((urlparse.urlparse(url)[2]).rstrip('/'))
if not app_name:
self.app_name = tail
self.project_name = tail
else:
self.app_name = app_name
self.project_name = tail
self.python_path = os.path.join(
self.root,
self.project_name,
)
self.path = os.path.join(
self.root,
self.project_name,
self.app_name,
)
def __repr__(self):
return "<VersionControl: %s>" % self.app_name
def add_to_python_path(self, position):
if not os.path.exists(self.path):
raise MissingDependency('%s does not exist. Run "./manage.py up" to retrieve this dependency' % self.app_name)
sys.path.insert(position, self.python_path)
class HG(VersionControl):
def checkout(self):
logger.info('checking out %s' % self.project_name)
os.system('hg clone %s %s' % (self.url, self.python_path))
def up(self):
logger.info('%s' % self)
if not os.path.exists(self.path):
self.checkout()
os.chdir(self.python_path)
os.system('hg pull --update')
class SVN(VersionControl):
def checkout(self):
logger.info('checking out %s' % self.project_name)
os.system('svn co %s %s' % (self.url, self.path))
def up(self):
logger.info('%s' % self)
if not os.path.exists(self.path):
self.checkout()
os.system('svn up %s' % self.path)
def add_all_to_path(settings, auto_update=False, position=1):
for dependency in settings.DEPENDENCIES:
try:
dependency.add_to_python_path(position)
except MissingDependency:
if auto_update:
dependency.up()
else:
raise
dependency.add_to_python_path(position)
| Python | 0 |
b2ed2050fdab7ba1052e33786c0a0868333114c4 | Update treeviz_example.py | open_spiel/python/examples/treeviz_example.py | open_spiel/python/examples/treeviz_example.py | # Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Game tree visualization example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from absl import logging
import pyspiel
from open_spiel.python.visualizations import treeviz
FLAGS = flags.FLAGS
flags.DEFINE_string("game", "kuhn_poker", "Name of the game")
flags.DEFINE_string("out", "/tmp/gametree.png", "Name of output file, e.g., "
"[*.png|*.pdf].")
flags.DEFINE_enum("prog", "dot", ["dot", "neato", "circo"], "Graphviz layout.")
flags.DEFINE_boolean("group_infosets", False, "Whether to group infosets.")
flags.DEFINE_boolean("group_terminal", False, "Whether to group terminal nodes.")
flags.DEFINE_boolean("verbose", False, "Whether to print verbose output.")
def _zero_sum_node_decorator(state):
"""Custom node decorator that only shows the return of the first player."""
attrs = treeviz.default_node_decorator(state) # get default attributes
if state.is_terminal():
attrs["label"] = str(int(state.returns()[0]))
return attrs
def main(argv):
del argv
game = pyspiel.load_game(FLAGS.game)
game_type = game.get_type()
if game_type.dynamics == pyspiel.GameType.Dynamics.SIMULTANEOUS:
logging.warn("%s is not turn-based. Trying to reload game as turn-based.",
FLAGS.game)
game = pyspiel.load_game_as_turn_based(FLAGS.game)
game_type = game.get_type()
if game_type.dynamics != pyspiel.GameType.Dynamics.SEQUENTIAL:
raise ValueError("Game must be sequential, not {}".format(
game_type.dynamics))
if (game_type.utility == pyspiel.GameType.Utility.ZERO_SUM and
game.num_players() == 2):
logging.info("Game is zero-sum: only showing first-player's returns.")
gametree = treeviz.GameTree(
game,
node_decorator=_zero_sum_node_decorator,
group_infosets=FLAGS.group_infosets,
group_terminal=FLAGS.group_terminal)
else:
gametree = treeviz.GameTree(game) # use default decorators
if FLAGS.verbose:
logging.info("Game tree:\n%s", gametree.to_string())
gametree.draw(FLAGS.out, prog=FLAGS.prog)
logging.info("Game tree saved to file: %s", FLAGS.out)
if __name__ == "__main__":
app.run(main)
| # Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Game tree visualization example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from absl import logging
import pyspiel
from open_spiel.python.visualizations import treeviz
FLAGS = flags.FLAGS
flags.DEFINE_string("game", "kuhn_poker", "Name of the game")
flags.DEFINE_string("out", "/tmp/gametree.png", "Name of output file, e.g., "
"[*.png|*.pdf].")
flags.DEFINE_enum("prog", "dot", ["dot", "neato", "circo"], "Graphviz layout.")
flags.DEFINE_boolean("group_infosets", False, "Whether to group infosets.")
flags.DEFINE_boolean("group_terminal", False, "Whether to group terminal nodes.")
flags.DEFINE_boolean("verbose", False, "Wether to print verbose output.")
def _zero_sum_node_decorator(state):
"""Custom node decorator that only shows the return of the first player."""
attrs = treeviz.default_node_decorator(state) # get default attributes
if state.is_terminal():
attrs["label"] = str(int(state.returns()[0]))
return attrs
def main(argv):
del argv
game = pyspiel.load_game(FLAGS.game)
game_type = game.get_type()
if game_type.dynamics == pyspiel.GameType.Dynamics.SIMULTANEOUS:
logging.warn("%s is not turn-based. Trying to reload game as turn-based.",
FLAGS.game)
game = pyspiel.load_game_as_turn_based(FLAGS.game)
game_type = game.get_type()
if game_type.dynamics != pyspiel.GameType.Dynamics.SEQUENTIAL:
raise ValueError("Game must be sequential, not {}".format(
game_type.dynamics))
if (game_type.utility == pyspiel.GameType.Utility.ZERO_SUM and
game.num_players() == 2):
logging.info("Game is zero-sum: only showing first-player's returns.")
gametree = treeviz.GameTree(
game,
node_decorator=_zero_sum_node_decorator,
group_infosets=FLAGS.group_infosets,
group_terminal=FLAGS.group_terminal)
else:
gametree = treeviz.GameTree(game) # use default decorators
if FLAGS.verbose:
logging.info("Game tree:\n%s", gametree.to_string())
gametree.draw(FLAGS.out, prog=FLAGS.prog)
logging.info("Game tree saved to file: %s", FLAGS.out)
if __name__ == "__main__":
app.run(main)
| Python | 0.000001 |
9cbdc64bcc1144b8ca7d32d08aa5d36afa7f1e73 | index command - reflected _log_id_short change | pageobject/commands/index.py | pageobject/commands/index.py | def index(self, value):
"""
Return index of the first child containing the specified value.
:param str value: text value to look for
:returns: index of the first child containing the specified value
:rtype: int
:raises ValueError: if the value is not found
"""
self.logger.info('getting index of text "{}" within page object list {}'.format(value, self._log_id_short))
self.logger.debug('getting index of text "{}" within page object list; {}'.format(value, self._log_id_long))
index = self.text_values.index(value)
self.logger.info('index of text "{}" within page object list {} is {}'.format(value, self._log_id_short, index))
self.logger.debug('index of text "{}" within page object is {}; {}'.format(value, index, self._log_id_long))
return index
| def index(self, value):
"""
Return index of the first child containing the specified value.
:param str value: text value to look for
:returns: index of the first child containing the specified value
:rtype: int
:raises ValueError: if the value is not found
"""
self.logger.info('getting index of "{}" within {}'.format(value, self._log_id_short))
self.logger.debug('getting index of "{}" within page object; {}'.format(value, self._log_id_long))
index = self.text_values.index(value)
self.logger.info('index of "{}" within {} is {}'.format(value, self._log_id_short, index))
self.logger.debug('index of "{}" within page object is {}; {}'.format(value, index, self._log_id_long))
return index
| Python | 0.000001 |
6143e6b015ed0435dc747b8d4242d47dca79c7a8 | improve busydialog handling | lib/kodi65/busyhandler.py | lib/kodi65/busyhandler.py | # -*- coding: utf8 -*-
# Copyright (C) 2015 - Philipp Temminghoff <phil65@kodi.tv>
# This program is Free Software see LICENSE file for details
import xbmcgui
from kodi65 import utils
import traceback
from functools import wraps
class BusyHandler(object):
"""
Class to deal with busydialog handling
"""
def __init__(self, *args, **kwargs):
self.busy = 0
self.enabled = True
self.dialog = xbmcgui.DialogBusy()
def enable(self):
"""
Enables busydialog handling
"""
self.enabled = True
def disable(self):
"""
Disables busydialog handling
"""
self.enabled = False
def show_busy(self):
"""
Increase busycounter and open busydialog if needed
"""
if not self.enabled:
return None
if self.busy == 0:
self.dialog.create()
self.busy += 1
def set_progress(self, percent):
self.dialog.update(percent)
def hide_busy(self):
"""
Decrease busycounter and close busydialog if needed
"""
if not self.enabled:
return None
self.busy = max(0, self.busy - 1)
if self.busy == 0:
self.dialog.close()
def set_busy(self, func):
"""
Decorator to show busy dialog while function is running
"""
@wraps(func)
def decorator(cls, *args, **kwargs):
self.show_busy()
result = None
try:
result = func(cls, *args, **kwargs)
except Exception:
utils.log(traceback.format_exc())
utils.notify("Error", "please contact add-on author")
finally:
self.hide_busy()
return result
return decorator
busyhandler = BusyHandler()
| # -*- coding: utf8 -*-
# Copyright (C) 2015 - Philipp Temminghoff <phil65@kodi.tv>
# This program is Free Software see LICENSE file for details
import xbmc
from kodi65 import utils
import traceback
from functools import wraps
class BusyHandler(object):
"""
Class to deal with busydialog handling
"""
def __init__(self, *args, **kwargs):
self.busy = 0
self.enabled = True
def enable(self):
"""
Enables busydialog handling
"""
self.enabled = True
def disable(self):
"""
Disables busydialog handling
"""
self.enabled = False
def show_busy(self):
"""
Increase busycounter and open busydialog if needed
"""
if not self.enabled:
return None
if self.busy == 0:
xbmc.executebuiltin("ActivateWindow(busydialog)")
self.busy += 1
def hide_busy(self):
"""
Decrease busycounter and close busydialog if needed
"""
if not self.enabled:
return None
self.busy = max(0, self.busy - 1)
if self.busy == 0:
xbmc.executebuiltin("Dialog.Close(busydialog)")
def set_busy(self, func):
"""
Decorator to show busy dialog while function is running
"""
@wraps(func)
def decorator(cls, *args, **kwargs):
self.show_busy()
result = None
try:
result = func(cls, *args, **kwargs)
except Exception:
utils.log(traceback.format_exc())
utils.notify("Error", "please contact add-on author")
finally:
self.hide_busy()
return result
return decorator
busyhandler = BusyHandler()
| Python | 0.000001 |
99eca228811022281da8c93123d7562e5e5c6acb | Update recommender_system.py | lib/recommender_system.py | lib/recommender_system.py | #!/usr/bin/env python
"""
This is a module that contains the main class and functionalities of the recommender systems.
"""
import numpy
from lib.content_based import ContentBased
from lib.evaluator import Evaluator
from lib.LDA import LDARecommender
from util.data_parser import DataParser
from util.recommender_configuer import RecommenderConfiguration
class RecommenderSystem(object):
"""
A class that will combine the content-based and collaborative-filtering,
in order to provide the main functionalities of recommendations.
"""
def __init__(self):
"""
Constructor of the RecommenderSystem.
"""
DataParser.process()
self.ratings = DataParser.get_ratings_matrix()
# TODO: split abstracts
self.abstracts = DataParser.get_abstracts().values()
self.config = RecommenderConfiguration()
self.n_factors = self.config.get_hyperparameters()['n_factors']
self.n_iterations = self.config.get_options()['n_iterations']
self.content_based = ContentBased(self.abstracts, self.n_factors, self.n_iterations)
if self.config.get_content_based() == 'LDA':
self.content_based = LDARecommender(self.abstracts, self.n_factors, self.n_iterations)
elif self.config.get_content_based() == 'LDA2Vec':
raise NotImplemented('LDA2Vec is not yet implemented.')
else:
raise NameError("Not a valid content based " + self.config.get_content_based())
self.hyperparameters = self.config.get_hyperparameters()
if self.config.get_collaborative_filtering() == 'ALS':
# self.collaborative_filtering = CollaborativeFiltering(ratings, self.n_factors,
# self.hyperparameters['collaborative-filtering-lambda'])
pass
else:
raise NameError("Not a valid collaborative filtering " + self.config.get_collaborative_filtering())
if self.config.get_error_metric() == 'RMS':
# TODO: initialize with abstracts
self.evaluator = Evaluator(self.ratings)
else:
raise NameError("Not a valid error metric " + self.config.get_error_metric())
def process(self):
"""
Process an iteration of the algorithm on the given data.
"""
self.content_based.train()
theta = self.content_based.get_word_distribution()
# TODO: Use collaborative filtering and evaluator
# u, v = self.collaborative_filtering.train(theta)
error = self.evaluator.get_rmse(theta)
return error
def recommend_items(self, user_id, num_recommendations=10):
"""
Get recommendations for a user.
@param(int) user_id: The id of the user.
@param(int) num_recommendations: The number of recommended items.
@returns(list) a list of the best recommendations for a given user_id.
"""
pass
| #!/usr/bin/env python
"""
This is a module that contains the main class and functionalities of the recommender systems.
"""
import numpy
from lib.content_based import ContentBased
from lib.evaluator import Evaluator
from lib.LDA import LDARecommender
from util.data_parser import DataParser
from util.recommender_configuer import RecommenderConfiguration
class RecommenderSystem(object):
"""
A class that will combine the content-based and collaborative-filtering,
in order to provide the main functionalities of recommendations.
"""
def __init__(self):
"""
Constructor of the RecommenderSystem.
"""
DataParser.process()
self.ratings = DataParser.get_ratings_matrix()
# TODO: split abstracts
self.abstracts = DataParser.get_abstracts().values()
self.config = RecommenderConfiguration()
self.n_factors = self.config.get_hyperparameters()['n_factors']
self.n_iterations = self.config.get_options()['n_iterations']
# self.content_based = ContentBased(self.abstracts, self.n_factors, self.n_iterations)
if self.config.get_content_based() == 'LDA':
self.content_based = LDARecommender(self.abstracts, self.n_factors, self.n_iterations)
elif self.config.get_content_based() == 'LDA2Vec':
raise NotImplemented('LDA2Vec is not yet implemented.')
else:
raise NameError("Not a valid content based " + self.config.get_content_based())
self.hyperparameters = self.config.get_hyperparameters()
if self.config.get_collaborative_filtering() == 'ALS':
# self.collaborative_filtering = CollaborativeFiltering(ratings, self.n_factors,
# self.hyperparameters['collaborative-filtering-lambda'])
pass
else:
raise NameError("Not a valid collaborative filtering " + self.config.get_collaborative_filtering())
if self.config.get_error_metric() == 'RMS':
# TODO: initialize with abstracts
self.evaluator = Evaluator(self.ratings)
else:
raise NameError("Not a valid error metric " + self.config.get_error_metric())
def process(self):
"""
Process an iteration of the algorithm on the given data.
"""
self.content_based.train()
theta = self.content_based.get_word_distribution()
# TODO: Use collaborative filtering and evaluator
# u, v = self.collaborative_filtering.train(theta)
error = self.evaluator.get_rmse(theta)
return error
def recommend_items(self, user_id, num_recommendations=10):
"""
Get recommendations for a user.
@param(int) user_id: The id of the user.
@param(int) num_recommendations: The number of recommended items.
@returns(list) a list of the best recommendations for a given user_id.
"""
pass
| Python | 0 |
767a50052895cf10386f01bab83941a2141c30f1 | fix json test and add json from string test | tests/python_tests/datasource_test.py | tests/python_tests/datasource_test.py | #!/usr/bin/env python
from nose.tools import *
from utilities import execution_path
import os, mapnik2
def setup():
# All of the paths used are relative, if we run the tests
# from another directory we need to chdir()
os.chdir(execution_path('.'))
def test_field_listing():
lyr = mapnik2.Layer('test')
lyr.datasource = mapnik2.Shapefile(file='../data/shp/poly.shp')
fields = lyr.datasource.fields()
eq_(fields, ['AREA', 'EAS_ID', 'PRFEDEA'])
def test_total_feature_count_shp():
lyr = mapnik2.Layer('test')
lyr.datasource = mapnik2.Shapefile(file='../data/shp/poly.shp')
features = lyr.datasource.all_features()
num_feats = len(features)
eq_(num_feats, 10)
def test_total_feature_count_json():
lyr = mapnik2.Layer('test')
lyr.datasource = mapnik2.Ogr(file='../data/json/points.json',layer_by_index=0)
features = lyr.datasource.all_features()
num_feats = len(features)
eq_(num_feats, 5)
def test_reading_json_from_string():
json = open('../data/json/points.json','r').read()
lyr = mapnik2.Layer('test')
lyr.datasource = mapnik2.Ogr(file=json,layer_by_index=0)
features = lyr.datasource.all_features()
num_feats = len(features)
eq_(num_feats, 5)
def test_feature_envelope():
lyr = mapnik2.Layer('test')
lyr.datasource = mapnik2.Shapefile(file='../data/shp/poly.shp')
features = lyr.datasource.all_features()
for feat in features:
env = feat.envelope()
contains = lyr.envelope().contains(env)
eq_(contains, True)
intersects = lyr.envelope().contains(env)
eq_(intersects, True)
def test_feature_attributes():
lyr = mapnik2.Layer('test')
lyr.datasource = mapnik2.Shapefile(file='../data/shp/poly.shp')
features = lyr.datasource.all_features()
feat = features[0]
attrs = {'PRFEDEA': u'35043411', 'EAS_ID': 168, 'AREA': 215229.266}
eq_(feat.attributes, attrs)
eq_(lyr.datasource.fields(),['AREA', 'EAS_ID', 'PRFEDEA'])
eq_(lyr.datasource.field_types(),[float,int,str])
| #!/usr/bin/env python
from nose.tools import *
from utilities import execution_path
import os, mapnik2
def setup():
# All of the paths used are relative, if we run the tests
# from another directory we need to chdir()
os.chdir(execution_path('.'))
def test_field_listing():
lyr = mapnik2.Layer('test')
lyr.datasource = mapnik2.Shapefile(file='../data/shp/poly.shp')
fields = lyr.datasource.fields()
eq_(fields, ['AREA', 'EAS_ID', 'PRFEDEA'])
def test_total_feature_count_shp():
lyr = mapnik2.Layer('test')
lyr.datasource = mapnik2.Shapefile(file='../data/shp/poly.shp')
features = lyr.datasource.all_features()
num_feats = len(features)
eq_(num_feats, 10)
def test_total_feature_count_json():
lyr = mapnik2.Layer('test')
lyr.datasource = mapnik2.Ogr(file='../data/json/points.json',layer_by_index=0)
features = lyr.datasource.all_features()
num_feats = len(features)
eq_(num_feats, 3)
def test_feature_envelope():
lyr = mapnik2.Layer('test')
lyr.datasource = mapnik2.Shapefile(file='../data/shp/poly.shp')
features = lyr.datasource.all_features()
for feat in features:
env = feat.envelope()
contains = lyr.envelope().contains(env)
eq_(contains, True)
intersects = lyr.envelope().contains(env)
eq_(intersects, True)
def test_feature_attributes():
lyr = mapnik2.Layer('test')
lyr.datasource = mapnik2.Shapefile(file='../data/shp/poly.shp')
features = lyr.datasource.all_features()
feat = features[0]
attrs = {'PRFEDEA': u'35043411', 'EAS_ID': 168, 'AREA': 215229.266}
eq_(feat.attributes, attrs)
eq_(lyr.datasource.fields(),['AREA', 'EAS_ID', 'PRFEDEA'])
eq_(lyr.datasource.field_types(),[float,int,str])
| Python | 0.000008 |
05855c934624c667053635a8ab8679c54426e49f | Rewrite the initialization of Release.eol_date. | releases/migrations/0003_populate_release_eol_date.py | releases/migrations/0003_populate_release_eol_date.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
def set_eol_date(apps, schema_editor):
Release = apps.get_model('releases', 'Release')
# Set the EOL date of all releases to the date of the following release
# except for the final one in the 0 series and in each 1.x series.
releases = list(Release.objects.all().order_by('major', 'minor', 'micro',
'status', 'iteration'))
for previous, current in zip(releases[:-1], releases[1:]):
if current.major != previous.major:
continue
if current.major == 1 and previous.minor != current.minor:
continue
previous.eol_date = current.date
previous.save()
# Set the EOL date of final releases the 0 series and in each 1.x series.
for version, eol_date in [
('0.96.5', datetime.date(2008, 9, 3)), # 1.0 release
('1.0.4', datetime.date(2010, 5, 17)), # 1.2 release
('1.1.4', datetime.date(2011, 3, 23)), # 1.3 release
('1.2.7', datetime.date(2012, 3, 23)), # 1.4 release
('1.3.7', datetime.date(2013, 2, 26)), # 1.5 release
('1.4.22', datetime.date(2015, 10, 1)), # end of LTS support
('1.5.12', datetime.date(2014, 9, 2)), # 1.7 release
('1.6.11', datetime.date(2015, 4, 1)), # 1.8 release
# 1.7.10 and 1.8.5 are still supported at the time of writing.
]:
# This patterns ignores missing releases e.g. during tests.
Release.objects.filter(version=version).update(eol_date=eol_date)
def unset_eol_date(apps, schema_editor):
Release = apps.get_model('releases', 'Release')
Release.objects.update(eol_date=None)
class Migration(migrations.Migration):
dependencies = [
('releases', '0002_release_eol_date'),
]
operations = [
migrations.RunPython(set_eol_date, unset_eol_date),
]
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
from django.db import migrations, models
def set_eol_date(apps, schema_editor):
Release = apps.get_model('releases', 'Release')
# List of EOL dates for releases for which docs are published.
for version, eol_date in [
('1.4', datetime.date(2015, 10, 1)),
('1.5', datetime.date(2014, 9, 2)),
('1.6', datetime.date(2015, 4, 1)),
]:
Release.objects.filter(version=version).update(eol_date=eol_date)
def unset_eol_date(apps, schema_editor):
Release = apps.get_model('releases', 'Release')
Release.objects.update(eol_date=None)
class Migration(migrations.Migration):
dependencies = [
('releases', '0002_release_eol_date'),
]
operations = [
migrations.RunPython(set_eol_date, unset_eol_date),
]
| Python | 0 |
ab93ea01dacc0fbd63fac91b1afcf5af1b711c2f | correct latest migration | umklapp/migrations/0009_teller_hasleft.py | umklapp/migrations/0009_teller_hasleft.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-31 20:38
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('umklapp', '0008_auto_20160528_2332'),
]
operations = [
migrations.AddField(
model_name='teller',
name='hasLeft',
field=models.BooleanField(default=False),
preserve_default=False,
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.9.6 on 2016-05-31 19:39
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('umklapp', '0008_auto_20160528_2332'),
]
operations = [
migrations.AddField(
model_name='teller',
name='hasLeft',
field=models.BooleanField(default=True),
preserve_default=False,
),
]
| Python | 0.000002 |
dbd11fcc20f6770afa097e65d0a81c82b7f0c334 | Update tests with access token | mnubo/tests/test_auth_manager.py | mnubo/tests/test_auth_manager.py | from mnubo.api_manager import APIManager
import requests
import datetime
from requests import Response
from mock import MagicMock
def test_auth_maneger_init():
response = Response()
response._content = '{"access_token":"ACCESS_TOKEN","token_type":"Bearer","expires_in":3887999}'
requests.post = MagicMock(return_value=response)
auth = APIManager('CLIENT_ID', 'CLIENT_SECRET', 'HOSTNAME')
requests.post.assert_called_with('HOSTNAME/oauth/token?grant_type=client_credentials', headers={'content-type': 'application/x-www-form-urlencoded', 'Authorization': 'Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ='})
auth.access_token = auth.fetch_access_token()
auth_authorization_header = auth.get_token_authorization_header()
authorization_header = auth.get_authorization_header()
api_url = auth.get_api_url()
auth_url = auth.get_auth_url()
assert auth.access_token.token == 'ACCESS_TOKEN'
assert auth.access_token.expires_in == datetime.timedelta(seconds=3887999)
assert auth_authorization_header == {'content-type': 'application/x-www-form-urlencoded', 'Authorization': 'Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ='}
assert authorization_header == {'content-type': 'application/json', 'Authorization': 'Bearer ACCESS_TOKEN'}
assert api_url == 'HOSTNAME/api/v3/'
assert auth_url == 'HOSTNAME/oauth/token?grant_type=client_credentials'
def test_create_operations():
response = Response()
response._content = '{"access_token":"ACCESS_TOKEN","token_type":"Bearer","expires_in":3887999}'
requests.post = MagicMock(return_value=response)
auth = APIManager('CLIENT_ID', 'CLIENT_SECRET', 'HOSTNAME')
response = Response()
response._content = '{"message": "SUCCESS"}'
requests.post = MagicMock(return_value=response)
create = auth.post('ROUTE', None)
assert create == {"message": "SUCCESS"}
def test_put_operation():
response = Response()
response._content = '{"access_token":"CLIENT_ACCESS_TOKEN","token_type":"Bearer","expires_in":3887999}'
requests.post = MagicMock(return_value=response)
auth = APIManager('CLIENT_ID', 'CLIENT_SECRET', 'HOSTNAME')
response = Response()
response._content = '{"message": "SUCCESS"}'
requests.put = MagicMock(return_value=response)
put = auth.put('ROUTE', None)
assert put == {"message": "SUCCESS"}
def test_delete_operation():
response = Response()
response._content = '{"access_token":"CLIENT_ACCESS_TOKEN","token_type":"Bearer","expires_in":3887999}'
requests.post = MagicMock(return_value=response)
auth = APIManager('CLIENT_ID', 'CLIENT_SECRET', 'HOSTNAME')
response = Response()
response._content = '{"message": "SUCCESS"}'
requests.delete = MagicMock(return_value=response)
delete = auth.delete('ROUTE')
assert delete == {"message": "SUCCESS"} | from mnubo.api_manager import APIManager
import requests
import json
from requests import Response
from mock import MagicMock
def test_auth_maneger_init():
response = Response()
response._content = '{"access_token":"CLIENT_ACCESS_TOKEN","token_type":"Bearer","expires_in":3887999}'
requests.post = MagicMock(return_value=response)
auth = APIManager('CLIENT_ID', 'CLIENT_SECRET', 'HOSTNAME')
requests.post.assert_called_with('HOSTNAME/oauth/token?grant_type=client_credentials', headers={'content-type': 'application/x-www-form-urlencoded', 'Authorization': 'Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ='})
auth.client_access_token = auth.fetch_client_access_token()
auth_authorization_header = auth.get_token_authorization_header()
authorization_header = auth.get_authorization_header()
api_url = auth.get_api_url()
auth_url = auth.get_auth_url()
assert auth.client_access_token == 'CLIENT_ACCESS_TOKEN'
assert auth_authorization_header == {'content-type': 'application/x-www-form-urlencoded', 'Authorization': 'Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ='}
assert authorization_header == {'content-type': 'application/json', 'Authorization': 'Bearer CLIENT_ACCESS_TOKEN'}
assert api_url == 'HOSTNAME/api/v3/'
assert auth_url == 'HOSTNAME/oauth/token?grant_type=client_credentials'
def test_create_operations():
response = Response()
response._content = '{"access_token":"CLIENT_ACCESS_TOKEN","token_type":"Bearer","expires_in":3887999}'
requests.post = MagicMock(return_value=response)
auth = APIManager('CLIENT_ID', 'CLIENT_SECRET', 'HOSTNAME')
response = Response()
response._content = '{"message": "SUCCESS"}'
requests.post = MagicMock(return_value=response)
create = auth.post('ROUTE', None)
assert create == {"message": "SUCCESS"}
def test_put_operation():
response = Response()
response._content = '{"access_token":"CLIENT_ACCESS_TOKEN","token_type":"Bearer","expires_in":3887999}'
requests.post = MagicMock(return_value=response)
auth = APIManager('CLIENT_ID', 'CLIENT_SECRET', 'HOSTNAME')
response = Response()
response._content = '{"message": "SUCCESS"}'
requests.put = MagicMock(return_value=response)
put = auth.put('ROUTE', None)
assert put == {"message": "SUCCESS"}
def test_delete_operation():
response = Response()
response._content = '{"access_token":"CLIENT_ACCESS_TOKEN","token_type":"Bearer","expires_in":3887999}'
requests.post = MagicMock(return_value=response)
auth = APIManager('CLIENT_ID', 'CLIENT_SECRET', 'HOSTNAME')
response = Response()
response._content = '{"message": "SUCCESS"}'
requests.delete = MagicMock(return_value=response)
delete = auth.delete('ROUTE')
assert delete == {"message": "SUCCESS"} | Python | 0 |
aa203b23eec8ff9ccbde3678f01f4ee14f43a09f | Fix typo introduced by code quality patch | src/storage/sqlite.py | src/storage/sqlite.py | import collections
from threading import current_thread, enumerate as threadingEnumerate, RLock
import Queue
import time
from helper_sql import *
from storage import InventoryStorage, InventoryItem
class SqliteInventory(InventoryStorage):
def __init__(self):
super(self.__class__, self).__init__()
self._inventory = {} #of objects (like msg payloads and pubkey payloads) Does not include protocol headers (the first 24 bytes of each packet).
self._streams = collections.defaultdict(set) # key = streamNumer, value = a set which holds the inventory object hashes that we are aware of. This is used whenever we receive an inv message from a peer to check to see what items are new to us. We don't delete things out of it; instead, the singleCleaner thread clears and refills it every couple hours.
self.lock = RLock() # Guarantees that two receiveDataThreads don't receive and process the same message concurrently (probably sent by a malicious individual)
def __contains__(self, hash):
with self.lock:
if hash in self._inventory:
return True
return bool(sqlQuery('SELECT 1 FROM inventory WHERE hash=?', hash))
def __getitem__(self, hash):
with self.lock:
if hash in self._inventory:
return self._inventory[hash]
rows = sqlQuery('SELECT objecttype, streamnumber, payload, expirestime, tag FROM inventory WHERE hash=?', hash)
if not rows:
raise KeyError(hash)
return InventoryItem(*rows[0])
def __setitem__(self, hash, value):
with self.lock:
value = InventoryItem(*value)
self._inventory[hash] = value
self._streams[value.stream].add(hash)
def __delitem__(self, hash):
raise NotImplementedError
def __iter__(self):
with self.lock:
hashes = self._inventory.keys()[:]
hashes += (x for x, in sqlQuery('SELECT hash FROM inventory'))
return hashes.__iter__()
def __len__(self):
with self.lock:
return len(self._inventory) + sqlQuery('SELECT count(*) FROM inventory')[0][0]
def by_type_and_tag(self, objectType, tag):
with self.lock:
values = [value for value in self._inventory.values() if value.type == objectType and value.tag == tag]
values += (InventoryItem(*value) for value in sqlQuery('SELECT objecttype, streamnumber, payload, expirestime, tag FROM inventory WHERE objecttype=? AND tag=?', objectType, tag))
return values
def hashes_by_stream(self, stream):
with self.lock:
return self._streams[stream]
def unexpired_hashes_by_stream(self, stream):
with self.lock:
t = int(time.time())
hashes = [x for x, value in self._inventory.items() if value.stream == stream and value.expires > t]
hashes += (payload for payload, in sqlQuery('SELECT hash FROM inventory WHERE streamnumber=? AND expirestime>?', stream, t))
return hashes
def flush(self):
with self.lock: # If you use both the inventoryLock and the sqlLock, always use the inventoryLock OUTSIDE of the sqlLock.
with SqlBulkExecute() as sql:
for objectHash, value in self._inventory.items():
sql.execute('INSERT INTO inventory VALUES (?, ?, ?, ?, ?, ?)', objectHash, *value)
self._inventory.clear()
def clean(self):
with self.lock:
sqlExecute('DELETE FROM inventory WHERE expirestime<?',int(time.time()) - (60 * 60 * 3))
self._streams.clear()
for objectHash, value in self.items():
self._streams[value.stream].add(objectHash)
| import collections
from threading import current_thread, enumerate as threadingEnumerate, RLock
import Queue
import time
from helper_sql import *
from storage import InventoryStorage, InventoryItem
class SqliteInventory(InventoryStorage):
def __init__(self):
super(self.__class__, self).__init__()
self._inventory = {} #of objects (like msg payloads and pubkey payloads) Does not include protocol headers (the first 24 bytes of each packet).
self._streams = collections.defaultdict(set) # key = streamNumer, value = a set which holds the inventory object hashes that we are aware of. This is used whenever we receive an inv message from a peer to check to see what items are new to us. We don't delete things out of it; instead, the singleCleaner thread clears and refills it every couple hours.
self.lock = RLock() # Guarantees that two receiveDataThreads don't receive and process the same message concurrently (probably sent by a malicious individual)
def __contains__(self, hash):
with self.lock:
if hash in self._inventory:
return True
return bool(sqlQuery('SELECT 1 FROM inventory WHERE hash=?', hash))
def __getitem__(self, hash):
with self.lock:
if hash in self._inventory:
return self._inventory[hash]
rows = sqlQuery('SELECT objecttype, streamnumber, payload, expirestime, tag FROM inventory WHERE hash=?', hash)
if not rows:
raise KeyError(hash)
return InventoryItem(*rows[0])
def __setitem__(self, hash, value):
with self.lock:
value = InventoryItem(*value)
self._inventory[hash] = value
self._streams[value.stream].add(hash)
def __delitem__(self, hash):
raise NotImplementedError
def __iter__(self):
with self.lock:
hashes = self._inventory.keys()[:]
hashes += (x for x, in sqlQuery('SELECT hash FROM inventory'))
return hashes.__iter__()
def __len__(self):
with self.lock:
return len(self._inventory) + sqlQuery('SELECT count(*) FROM inventory')[0][0]
def by_type_and_tag(self, objectType, tag):
with self.lock:
values = [value for value in self._inventory.values() if value.type == type and value.tag == tag]
values += (InventoryItem(*value) for value in sqlQuery('SELECT objecttype, streamnumber, payload, expirestime, tag FROM inventory WHERE objecttype=? AND tag=?', type, tag))
return values
def hashes_by_stream(self, stream):
with self.lock:
return self._streams[stream]
def unexpired_hashes_by_stream(self, stream):
with self.lock:
t = int(time.time())
hashes = [x for x, value in self._inventory.items() if value.stream == stream and value.expires > t]
hashes += (payload for payload, in sqlQuery('SELECT hash FROM inventory WHERE streamnumber=? AND expirestime>?', stream, t))
return hashes
def flush(self):
with self.lock: # If you use both the inventoryLock and the sqlLock, always use the inventoryLock OUTSIDE of the sqlLock.
with SqlBulkExecute() as sql:
for objectHash, value in self._inventory.items():
sql.execute('INSERT INTO inventory VALUES (?, ?, ?, ?, ?, ?)', objectHash, *value)
self._inventory.clear()
def clean(self):
with self.lock:
sqlExecute('DELETE FROM inventory WHERE expirestime<?',int(time.time()) - (60 * 60 * 3))
self._streams.clear()
for objectHash, value in self.items():
self._streams[value.stream].add(objectHash)
| Python | 0.000003 |
3405dc54b611b3d12583f0ff14f6b8d9e32a18a9 | Revert "fixed pipeline is dropping frames and GUI can't see any videos" | voctocore/lib/sources/decklinkavsource.py | voctocore/lib/sources/decklinkavsource.py | #!/usr/bin/env python3
import logging
import re
from gi.repository import Gst, GLib
from lib.config import Config
from lib.sources.avsource import AVSource
class DeckLinkAVSource(AVSource):
timer_resolution = 0.5
def __init__(self, name, has_audio=True, has_video=True):
super().__init__('DecklinkAVSource', name, has_audio, has_video)
self.device = Config.getDeckLinkDeviceNumber(name)
self.aconn = Config.getDeckLinkAudioConnection(name)
self.vconn = Config.getDeckLinkVideoConnection(name)
self.vmode = Config.getDeckLinkVideoMode(name)
self.vfmt = Config.getDeckLinkVideoFormat(name)
self.name = name
self.signalPad = None
self.build_pipeline()
def port(self):
return "Decklink #{}".format(self.device)
def attach(self, pipeline):
super().attach(pipeline)
self.signalPad = pipeline.get_by_name(
'decklinkvideosrc-{}'.format(self.name))
GLib.timeout_add(self.timer_resolution * 1000, self.do_timeout)
def do_timeout(self):
self.inputSink.set_property(
'alpha', 1.0 if self.num_connections() > 0 else 0.0)
# just come back
return True
def num_connections(self):
return 1 if self.signalPad and self.signalPad.get_property('signal') else 0
def __str__(self):
return 'DecklinkAVSource[{name}] reading card #{device}'.format(
name=self.name,
device=self.device
)
def build_source(self):
# A video source is required even when we only need audio
pipe = """
decklinkvideosrc
name=decklinkvideosrc-{name}
device-number={device}
connection={conn}
video-format={fmt}
mode={mode}
""".format(name=self.name,
device=self.device,
conn=self.vconn,
mode=self.vmode,
fmt=self.vfmt
)
if self.has_video:
if self.build_deinterlacer():
pipe += """\
! {deinterlacer}
""".format(deinterlacer=self.build_deinterlacer())
pipe += """\
! videoconvert
! videoscale
! videorate
name=vout-{name}
""".format(
deinterlacer=self.build_deinterlacer(),
name=self.name
)
else:
pipe += """\
! fakesink
"""
if self.has_audio:
pipe += """
decklinkaudiosrc
name=decklinkaudiosrc-{name}
device-number={device}
connection={conn}
channels={channels}
""".format( name=self.name,
device=self.device,
conn=self.aconn,
channels=Config.getNumAudioStreams())
return pipe
def build_audioport(self):
return 'decklinkaudiosrc-{name}.'.format(name=self.name)
def build_videoport(self):
return 'vout-{}.'.format(self.name)
| #!/usr/bin/env python3
import logging
import re
from gi.repository import Gst, GLib
from lib.config import Config
from lib.sources.avsource import AVSource
class DeckLinkAVSource(AVSource):
timer_resolution = 0.5
def __init__(self, name, has_audio=True, has_video=True):
super().__init__('DecklinkAVSource', name, has_audio, has_video)
self.device = Config.getDeckLinkDeviceNumber(name)
self.aconn = Config.getDeckLinkAudioConnection(name)
self.vconn = Config.getDeckLinkVideoConnection(name)
self.vmode = Config.getDeckLinkVideoMode(name)
self.vfmt = Config.getDeckLinkVideoFormat(name)
self.name = name
self.signalPad = None
self.build_pipeline()
def port(self):
return "Decklink #{}".format(self.device)
def attach(self, pipeline):
super().attach(pipeline)
self.signalPad = pipeline.get_by_name(
'decklinkvideosrc-{}'.format(self.name))
GLib.timeout_add(self.timer_resolution * 1000, self.do_timeout)
def do_timeout(self):
self.inputSink.set_property(
'alpha', 1.0 if self.num_connections() > 0 else 0.0)
# just come back
return True
def num_connections(self):
return 1 if self.signalPad and self.signalPad.get_property('signal') else 0
def __str__(self):
return 'DecklinkAVSource[{name}] reading card #{device}'.format(
name=self.name,
device=self.device
)
def build_source(self):
# A video source is required even when we only need audio
pipe = """
decklinkvideosrc
name=decklinkvideosrc-{name}
device-number={device}
connection={conn}
video-format={fmt}
mode={mode}
drop-no-signal-frames=true
""".format(name=self.name,
device=self.device,
conn=self.vconn,
mode=self.vmode,
fmt=self.vfmt
)
if self.has_video:
if self.build_deinterlacer():
pipe += """\
! {deinterlacer}
""".format(deinterlacer=self.build_deinterlacer())
pipe += """\
! videoconvert
! videoscale
! videorate
name=vout-{name}
""".format(
deinterlacer=self.build_deinterlacer(),
name=self.name
)
else:
pipe += """\
! fakesink
"""
if self.has_audio:
pipe += """
decklinkaudiosrc
name=decklinkaudiosrc-{name}
device-number={device}
connection={conn}
channels={channels}
""".format( name=self.name,
device=self.device,
conn=self.aconn,
channels=Config.getNumAudioStreams())
return pipe
def build_audioport(self):
return 'decklinkaudiosrc-{name}.'.format(name=self.name)
def build_videoport(self):
return 'vout-{}.'.format(self.name)
| Python | 0 |
c5bfd55147e7fb18264f601c34e180453974f55e | DEBUG messages deleted | vt_manager/src/python/agent/provisioning/ProvisioningDispatcher.py | vt_manager/src/python/agent/provisioning/ProvisioningDispatcher.py | '''
@author: msune
Provisioning dispatcher. Selects appropiate Driver for VT tech
'''
from communications.XmlRpcClient import XmlRpcClient
from utils.VmMutexStore import VmMutexStore
import threading
class ProvisioningDispatcher:
@staticmethod
def __getProvisioningDispatcher(vtype):
#Import of Dispatchers must go here to avoid import circular dependecy
from xen.provisioning.XenProvisioningDispatcher import XenProvisioningDispatcher
if vtype == "xen":
return XenProvisioningDispatcher
else:
raise Exception("Virtualization type not supported by the agent")
@staticmethod
def __dispatchAction(dispatcher,action,vm):
#Inventory
if action.type_ == "create":
return dispatcher.createVMfromImage(action.id,vm)
if action.type_ == "modify" :
return dispatcher.modifyVM(action.id,vm)
if action.type_ == "delete" :
return dispatcher.deleteVM(action.id,vm)
#Scheduling
if action.type_ == "start":
return dispatcher.startVM(action.id,vm)
if action.type_ == "reboot" :
return dispatcher.rebootVM(action.id,vm)
if action.type_ == "stop" :
return dispatcher.stopVM(action.id,vm)
if action.type_ == "hardStop" :
return dispatcher.hardStopVM(action.id,vm)
raise Exception("Unknown action type")
@staticmethod
def processProvisioning(provisioning):
for action in provisioning.action:
vm = action.server.virtual_machines[0]
try:
dispatcher = ProvisioningDispatcher.__getProvisioningDispatcher(vm.virtualization_type)
except Exception as e:
XmlRpcClient.sendAsyncProvisioningActionStatus(action.id,"FAILED",str(e))
print e
return
try:
#Acquire VM lock
VmMutexStore.lock(vm)
#Send async notification
XmlRpcClient.sendAsyncProvisioningActionStatus(action.id,"ONGOING","")
ProvisioningDispatcher.__dispatchAction(dispatcher,action,vm)
except Exception as e:
#TODO improve this trace
print e
raise e
finally:
#Release VM lock
VmMutexStore.unlock(vm)
##Abstract methods definition for ProvisioningDispatchers
#Inventory
@staticmethod
def createVMfromImage(id,vm):
raise Exception("Abstract method cannot be called")
@staticmethod
def modifyVM(id,vm):
raise Exception("Abstract method cannot be called")
@staticmethod
def deleteVM(id,vm):
raise Exception("Abstract method cannot be called")
#Scheduling
def startVM(id,vm):
raise Exception("Abstract method cannot be called")
def rebootVM(id,vm):
raise Exception("Abstract method cannot be called")
def stopVM(id,vm):
raise Exception("Abstract method cannot be called")
def hardStopVM(id,vm):
raise Exception("Abstract method cannot be called")
| '''
@author: msune
Provisioning dispatcher. Selects appropiate Driver for VT tech
'''
from communications.XmlRpcClient import XmlRpcClient
from utils.VmMutexStore import VmMutexStore
import threading
class ProvisioningDispatcher:
@staticmethod
def __getProvisioningDispatcher(vtype):
#Import of Dispatchers must go here to avoid import circular dependecy
from xen.provisioning.XenProvisioningDispatcher import XenProvisioningDispatcher
if vtype == "xen":
return XenProvisioningDispatcher
else:
raise Exception("Virtualization type not supported by the agent")
@staticmethod
def __dispatchAction(dispatcher,action,vm):
#Inventory
if action.type_ == "create":
return dispatcher.createVMfromImage(action.id,vm)
if action.type_ == "modify" :
return dispatcher.modifyVM(action.id,vm)
if action.type_ == "delete" :
return dispatcher.deleteVM(action.id,vm)
#Scheduling
if action.type_ == "start":
return dispatcher.startVM(action.id,vm)
if action.type_ == "reboot" :
return dispatcher.rebootVM(action.id,vm)
if action.type_ == "stop" :
return dispatcher.stopVM(action.id,vm)
if action.type_ == "hardStop" :
return dispatcher.hardStopVM(action.id,vm)
raise Exception("Unknown action type")
@staticmethod
def processProvisioning(provisioning):
print "******************************************************************\n
LEODEBUG: CURRENT THREAD: "+str(threading.currentThread().get_ident())+"\n
*******************************************************************"
for action in provisioning.action:
vm = action.server.virtual_machines[0]
try:
dispatcher = ProvisioningDispatcher.__getProvisioningDispatcher(vm.virtualization_type)
except Exception as e:
XmlRpcClient.sendAsyncProvisioningActionStatus(action.id,"FAILED",str(e))
print e
return
try:
#Acquire VM lock
VmMutexStore.lock(vm)
#Send async notification
XmlRpcClient.sendAsyncProvisioningActionStatus(action.id,"ONGOING","")
ProvisioningDispatcher.__dispatchAction(dispatcher,action,vm)
except Exception as e:
#TODO improve this trace
print e
raise e
finally:
#Release VM lock
VmMutexStore.unlock(vm)
##Abstract methods definition for ProvisioningDispatchers
#Inventory
@staticmethod
def createVMfromImage(id,vm):
raise Exception("Abstract method cannot be called")
@staticmethod
def modifyVM(id,vm):
raise Exception("Abstract method cannot be called")
@staticmethod
def deleteVM(id,vm):
raise Exception("Abstract method cannot be called")
#Scheduling
def startVM(id,vm):
raise Exception("Abstract method cannot be called")
def rebootVM(id,vm):
raise Exception("Abstract method cannot be called")
def stopVM(id,vm):
raise Exception("Abstract method cannot be called")
def hardStopVM(id,vm):
raise Exception("Abstract method cannot be called")
| Python | 0.000001 |
b78165d68e1e01e722b746e926a36b5680debdfa | remove email filter and rfactor | web/impact/impact/v1/views/mentor_program_office_hour_list_view.py | web/impact/impact/v1/views/mentor_program_office_hour_list_view.py | # MIT License
# Copyright (c) 2019 MassChallenge, Inc.
from impact.v1.views.base_list_view import BaseListView
from impact.v1.helpers import (
MentorProgramOfficeHourHelper,
)
class MentorProgramOfficeHourListView(BaseListView):
view_name = "office_hour"
helper_class = MentorProgramOfficeHourHelper
def filter(self, queryset):
allowed_params = ['mentor_id', 'finalist_d']
param_items = self.request.query_params.items()
if not param_items:
return queryset
filter_values = {
key: value for (key, value) in param_items
if key in allowed_params}
return queryset.filter(**filter_values)
| # MIT License
# Copyright (c) 2019 MassChallenge, Inc.
from impact.v1.views.base_list_view import BaseListView
from impact.v1.helpers import (
MentorProgramOfficeHourHelper,
)
LOOKUPS = {
'mentor_email': 'mentor__email__icontains',
'mentor_id': 'mentor_id',
'finalist_email': 'finalist__email__icontains',
'finalist_id': 'finalist_id',
}
class MentorProgramOfficeHourListView(BaseListView):
view_name = "office_hour"
helper_class = MentorProgramOfficeHourHelper
def filter(self, queryset):
if self.request.query_params.keys():
filter_values = self._get_filter()
return queryset.filter(**filter_values)
return queryset
def _get_filter(self):
query_params = self.request.query_params.dict()
query_filter = {
LOOKUPS[key]: value for key, value in query_params.items()
if key in LOOKUPS.keys()
}
return query_filter
| Python | 0 |
05b7f56bdfa600e72d4cca5a4c51324ff3c94d4d | Update file distancematrixtest.py | pymsascoring/distancematrix/test/distancematrixtest.py | pymsascoring/distancematrix/test/distancematrixtest.py | import unittest
from pymsascoring.distancematrix.distancematrix import DistanceMatrix
__author__ = "Antonio J. Nebro"
class TestMethods(unittest.TestCase):
def setUp(self):
pass
def test_should_default_gap_penalty_be_minus_eight(self):
matrix = DistanceMatrix()
self.assertEqual(-8, matrix.get_gap_penalty())
def test_should_constructor__modify_the_gap_penalty(self):
matrix = DistanceMatrix(-10)
self.assertEqual(-10, matrix.get_gap_penalty())
def test_should_get_distance_return_the_gap_penalty_if_a_char_is_a_gap(self):
matrix = DistanceMatrix()
self.assertEqual(matrix.get_gap_penalty(), matrix.get_distance('A', '-'))
self.assertEqual(matrix.get_gap_penalty(), matrix.get_distance('-', 'B'))
def test_should_get_distance_return_one_if_the_two_chars_are_gaps(self):
matrix = DistanceMatrix()
self.assertEqual(1, matrix.get_distance('-', '-'))
if __name__ == '__main__':
unittest.main() | import unittest
__author__ = "Antonio J. Nebro"
class TestMethods(unittest.TestCase):
def setUp(self):
pass
if __name__ == '__main__':
unittest.main() | Python | 0.000001 |
bd32faf934bd26957a16a0aa2ac092c5759d2342 | annotate new test | python/ql/test/experimental/dataflow/fieldflow/test.py | python/ql/test/experimental/dataflow/fieldflow/test.py | # These are defined so that we can evaluate the test code.
NONSOURCE = "not a source"
SOURCE = "source"
def is_source(x):
return x == "source" or x == b"source" or x == 42 or x == 42.0 or x == 42j
def SINK(x):
if is_source(x):
print("OK")
else:
print("Unexpected flow", x)
def SINK_F(x):
if is_source(x):
print("Unexpected flow", x)
else:
print("OK")
# Preamble
class MyObj(object):
def __init__(self, foo):
self.foo = foo
def setFoo(self, foo):
self.foo = foo
class NestedObj(object):
def __init__(self):
self.obj = MyObj("OK")
def getObj(self):
return self.obj
def setFoo(obj, x):
SINK_F(obj.foo)
obj.foo = x
def test_example1():
myobj = MyObj("OK")
setFoo(myobj, SOURCE)
SINK(myobj.foo)
def test_example1_method():
myobj = MyObj("OK")
myobj.setFoo(SOURCE)
SINK(myobj.foo) # Flow not found
def test_example2():
x = SOURCE
a = NestedObj()
a.obj.foo = x
SINK(a.obj.foo)
def test_example2_method():
x = SOURCE
a = NestedObj()
a.getObj().foo = x
SINK(a.obj.foo) # Flow missing
def test_example3():
obj = MyObj(SOURCE)
SINK(obj.foo)
def test_example3_kw():
obj = MyObj(foo=SOURCE)
SINK(obj.foo)
def fields_with_local_flow(x):
obj = MyObj(x)
a = obj.foo
return a
def test_fields():
SINK(fields_with_local_flow(SOURCE))
| # These are defined so that we can evaluate the test code.
NONSOURCE = "not a source"
SOURCE = "source"
def is_source(x):
return x == "source" or x == b"source" or x == 42 or x == 42.0 or x == 42j
def SINK(x):
if is_source(x):
print("OK")
else:
print("Unexpected flow", x)
def SINK_F(x):
if is_source(x):
print("Unexpected flow", x)
else:
print("OK")
# Preamble
class MyObj(object):
def __init__(self, foo):
self.foo = foo
def setFoo(self, foo):
self.foo = foo
class NestedObj(object):
def __init__(self):
self.obj = MyObj("OK")
def getObj(self):
return self.obj
def setFoo(obj, x):
SINK_F(obj.foo)
obj.foo = x
def test_example1():
myobj = MyObj("OK")
setFoo(myobj, SOURCE)
SINK(myobj.foo)
def test_example1_method():
myobj = MyObj("OK")
myobj.setFoo(SOURCE)
SINK(myobj.foo)
def test_example2():
x = SOURCE
a = NestedObj()
a.obj.foo = x
SINK(a.obj.foo)
def test_example2_method():
x = SOURCE
a = NestedObj()
a.getObj().foo = x
SINK(a.obj.foo) # Flow missing
def test_example3():
obj = MyObj(SOURCE)
SINK(obj.foo)
def test_example3_kw():
obj = MyObj(foo=SOURCE)
SINK(obj.foo)
def fields_with_local_flow(x):
obj = MyObj(x)
a = obj.foo
return a
def test_fields():
SINK(fields_with_local_flow(SOURCE))
| Python | 0.004804 |
091ebd935c6145ac233c03bedeb52c65634939f4 | Include the version-detecting code to allow PyXML to override the "standard" xml package. Require at least PyXML 0.6.1. | Lib/xml/__init__.py | Lib/xml/__init__.py | """Core XML support for Python.
This package contains three sub-packages:
dom -- The W3C Document Object Model. This supports DOM Level 1 +
Namespaces.
parsers -- Python wrappers for XML parsers (currently only supports Expat).
sax -- The Simple API for XML, developed by XML-Dev, led by David
Megginson and ported to Python by Lars Marius Garshol. This
supports the SAX 2 API.
"""
__all__ = ["dom", "parsers", "sax"]
__version__ = "$Revision$"[1:-1].split()[1]
_MINIMUM_XMLPLUS_VERSION = (0, 6, 1)
try:
import _xmlplus
except ImportError:
pass
else:
try:
v = _xmlplus.version_info
except AttributeError:
# _xmlplue is too old; ignore it
pass
else:
if v >= _MINIMUM_XMLPLUS_VERSION:
import sys
sys.modules[__name__] = _xmlplus
else:
del v
| """Core XML support for Python.
This package contains three sub-packages:
dom -- The W3C Document Object Model. This supports DOM Level 1 +
Namespaces.
parsers -- Python wrappers for XML parsers (currently only supports Expat).
sax -- The Simple API for XML, developed by XML-Dev, led by David
Megginson and ported to Python by Lars Marius Garshol. This
supports the SAX 2 API.
"""
try:
import _xmlplus
except ImportError:
pass
else:
import sys
sys.modules[__name__] = _xmlplus
| Python | 0 |
3f0fc980629f0645acb813b2ef8ed5d91761cbcc | add missing pkgconfig dependency and fix boost version range (#9835) | var/spack/repos/builtin/packages/wt/package.py | var/spack/repos/builtin/packages/wt/package.py | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Wt(CMakePackage):
"""Wt, C++ Web Toolkit.
Wt is a C++ library for developing web applications."""
homepage = "http://www.webtoolkit.eu/wt"
url = "https://github.com/emweb/wt/archive/3.3.7.tar.gz"
git = "https://github.com/emweb/wt.git"
version('master', branch='master')
version('3.3.7', '09858901f2dcf5c3d36a9237daba3e3f')
# wt builds in parallel, but requires more than 5 GByte RAM per -j <njob>
# which most machines do not provide and crash the build
parallel = False
variant('openssl', default=True,
description='SSL and WebSockets support in the built-in httpd, '
'the HTTP(S) client, and additional cryptographic '
'hashes in the authentication module')
variant('libharu', default=True, description='painting to PDF')
# variant('graphicsmagick', default=True,
# description='painting to PNG, GIF')
variant('sqlite', default=False, description='create SQLite3 DBO')
variant('mariadb', default=False, description='create MariaDB/MySQL DBO')
variant('postgresql', default=False, description='create PostgreSQL DBO')
# variant('firebird', default=False, description='create Firebird DBO')
variant('pango', default=True,
description='improved font support in PDF and raster image '
'painting')
variant('zlib', default=True,
description='compression in the built-in httpd')
# variant('fastcgi', default=False,
# description='FastCGI connector via libfcgi++')
depends_on('pkgconfig', type='build')
depends_on('boost@1.46.1:1.65')
depends_on('openssl', when='+openssl')
depends_on('libharu', when='+libharu')
depends_on('sqlite', when='+sqlite')
depends_on('mariadb', when='+mariadb')
depends_on('postgresql', when='+postgresql')
depends_on('pango', when='+pango')
depends_on('zlib', when='+zlib')
def cmake_args(self):
spec = self.spec
cmake_args = [
'-DBUILD_EXAMPLES:BOOL=OFF',
'-DCONNECTOR_FCGI:BOOL=OFF',
'-DENABLE_OPENGL:BOOL=OFF',
'-DENABLE_QT4:BOOL=OFF'
]
cmake_args.extend([
'-DENABLE_SSL:BOOL={0}'.format((
'ON' if '+openssl' in spec else 'OFF')),
'-DENABLE_HARU:BOOL={0}'.format((
'ON' if '+libharu' in spec else 'OFF')),
'-DENABLE_PANGO:BOOL={0}'.format((
'ON' if '+pango' in spec else 'OFF')),
'-DENABLE_SQLITE:BOOL={0}'.format((
'ON' if '+sqlite' in spec else 'OFF')),
'-DENABLE_MYSQL:BOOL={0}'.format((
'ON' if '+mariadb' in spec else 'OFF')),
'-DENABLE_POSTGRES:BOOL={0}'.format((
'ON' if '+postgres' in spec else 'OFF'))
])
return cmake_args
| # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Wt(CMakePackage):
"""Wt, C++ Web Toolkit.
Wt is a C++ library for developing web applications."""
homepage = "http://www.webtoolkit.eu/wt"
url = "https://github.com/emweb/wt/archive/3.3.7.tar.gz"
git = "https://github.com/emweb/wt.git"
version('master', branch='master')
version('3.3.7', '09858901f2dcf5c3d36a9237daba3e3f')
# wt builds in parallel, but requires more than 5 GByte RAM per -j <njob>
# which most machines do not provide and crash the build
parallel = False
variant('openssl', default=True,
description='SSL and WebSockets support in the built-in httpd, '
'the HTTP(S) client, and additional cryptographic '
'hashes in the authentication module')
variant('libharu', default=True, description='painting to PDF')
# variant('graphicsmagick', default=True,
# description='painting to PNG, GIF')
variant('sqlite', default=False, description='create SQLite3 DBO')
variant('mariadb', default=False, description='create MariaDB/MySQL DBO')
variant('postgresql', default=False, description='create PostgreSQL DBO')
# variant('firebird', default=False, description='create Firebird DBO')
variant('pango', default=True,
description='improved font support in PDF and raster image '
'painting')
variant('zlib', default=True,
description='compression in the built-in httpd')
# variant('fastcgi', default=False,
# description='FastCGI connector via libfcgi++')
depends_on('boost@1.46.1:')
depends_on('openssl', when='+openssl')
depends_on('libharu', when='+libharu')
depends_on('sqlite', when='+sqlite')
depends_on('mariadb', when='+mariadb')
depends_on('postgresql', when='+postgresql')
depends_on('pango', when='+pango')
depends_on('zlib', when='+zlib')
def cmake_args(self):
spec = self.spec
cmake_args = [
'-DBUILD_EXAMPLES:BOOL=OFF',
'-DCONNECTOR_FCGI:BOOL=OFF',
'-DENABLE_OPENGL:BOOL=OFF',
'-DENABLE_QT4:BOOL=OFF'
]
cmake_args.extend([
'-DENABLE_SSL:BOOL={0}'.format((
'ON' if '+openssl' in spec else 'OFF')),
'-DENABLE_HARU:BOOL={0}'.format((
'ON' if '+libharu' in spec else 'OFF')),
'-DENABLE_PANGO:BOOL={0}'.format((
'ON' if '+pango' in spec else 'OFF')),
'-DENABLE_SQLITE:BOOL={0}'.format((
'ON' if '+sqlite' in spec else 'OFF')),
'-DENABLE_MYSQL:BOOL={0}'.format((
'ON' if '+mariadb' in spec else 'OFF')),
'-DENABLE_POSTGRES:BOOL={0}'.format((
'ON' if '+postgres' in spec else 'OFF'))
])
return cmake_args
| Python | 0 |
308b3f9b2b8a4f2be9bfc09f0c026b54880ec94c | Remove unwanted print statement | gemdeps/views.py | gemdeps/views.py | import json
import os
from flask import Markup, render_template, request
from gemdeps import app
@app.route('/', methods=['GET', 'POST'])
def index():
completedeplist = {}
gemnames = []
SITE_ROOT = os.path.realpath(os.path.dirname(__file__))
for app in ['diaspora', 'gitlab', 'asciinema']:
appname = app + "_debian_status.json"
filepath = os.path.join(SITE_ROOT, "static", appname)
inputfile = open(filepath)
filecontent = inputfile.read()
inputfile.close()
deps = json.loads(filecontent)
completedeplist[app] = deps
gemnames += [str(x['name']) for x in deps]
gemnames = list(set(gemnames))
gemnames = Markup(gemnames)
if request.method == 'GET':
return render_template('index.html', gemnames=gemnames)
else:
apps = request.form.getlist('appname')
gemname = request.form.get('gemname')
gems = {}
flag = 0
for app in apps:
gem = [x for x in completedeplist[app] if x['name'] == gemname]
if gem:
flag = 1
gems[app] = gem
return render_template('index.html',
gemnames=gemnames,
gemname=gemname,
gemlist=gems,
flag=flag)
@app.route('/status/<appname>')
def status(appname):
SITE_ROOT = os.path.realpath(os.path.dirname(__file__))
appfilename = appname + "_debian_status.json"
filepath = os.path.join(SITE_ROOT, "static", appfilename)
inputfile = open(filepath)
filecontent = inputfile.read()
inputfile.close()
deps = json.loads(filecontent)
packaged_count = 0
unpackaged_count = 0
itp_count = 0
total = 0
for n in deps:
if n['status'] == 'Packaged' or n['status'] == 'NEW':
packaged_count += 1
elif n['status'] == 'ITP':
itp_count += 1
else:
unpackaged_count += 1
total = len(deps)
percent_complete = (packaged_count * 100) / total
return render_template('status.html',
appname=appname.title(),
deps=deps,
packaged_count=packaged_count,
unpackaged_count=unpackaged_count,
itp_count=itp_count,
total=total,
percent_complete=percent_complete
)
| import json
import os
from flask import Markup, render_template, request
from gemdeps import app
@app.route('/', methods=['GET', 'POST'])
def index():
completedeplist = {}
gemnames = []
SITE_ROOT = os.path.realpath(os.path.dirname(__file__))
for app in ['diaspora', 'gitlab', 'asciinema']:
appname = app + "_debian_status.json"
filepath = os.path.join(SITE_ROOT, "static", appname)
inputfile = open(filepath)
filecontent = inputfile.read()
inputfile.close()
deps = json.loads(filecontent)
completedeplist[app] = deps
gemnames += [str(x['name']) for x in deps]
gemnames = list(set(gemnames))
gemnames = Markup(gemnames)
print completedeplist
if request.method == 'GET':
return render_template('index.html', gemnames=gemnames)
else:
apps = request.form.getlist('appname')
gemname = request.form.get('gemname')
gems = {}
flag = 0
for app in apps:
gem = [x for x in completedeplist[app] if x['name'] == gemname]
if gem:
flag = 1
gems[app] = gem
return render_template('index.html',
gemnames=gemnames,
gemname=gemname,
gemlist=gems,
flag=flag)
@app.route('/status/<appname>')
def status(appname):
SITE_ROOT = os.path.realpath(os.path.dirname(__file__))
appfilename = appname + "_debian_status.json"
filepath = os.path.join(SITE_ROOT, "static", appfilename)
inputfile = open(filepath)
filecontent = inputfile.read()
inputfile.close()
deps = json.loads(filecontent)
packaged_count = 0
unpackaged_count = 0
itp_count = 0
total = 0
for n in deps:
if n['status'] == 'Packaged' or n['status'] == 'NEW':
packaged_count += 1
elif n['status'] == 'ITP':
itp_count += 1
else:
unpackaged_count += 1
total = len(deps)
percent_complete = (packaged_count * 100) / total
return render_template('status.html',
appname=appname.title(),
deps=deps,
packaged_count=packaged_count,
unpackaged_count=unpackaged_count,
itp_count=itp_count,
total=total,
percent_complete=percent_complete
)
| Python | 0.000034 |
49fafd2107719f0d0c588e85bb8c37a9d60a0845 | Fix PEP8 and remove pdb | sponsorship_tracking/wizard/sub_sponsorship_wizard.py | sponsorship_tracking/wizard/sub_sponsorship_wizard.py | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __openerp__.py
#
##############################################################################
from openerp import api, models, fields, exceptions, _
class sub_sponsorship_wizard(models.TransientModel):
_name = "sds.subsponsorship.wizard"
state = fields.Selection([
('sub', 'sub'),
('no_sub', 'no_sub')])
child_id = fields.Many2one(
'compassion.child', 'Child')
channel = fields.Selection('_get_channels')
no_sub_default_reasons = fields.Selection(
'_get_no_sub_reasons', 'No sub reason')
no_sub_reason = fields.Char('No sub reason')
def _get_no_sub_reasons(self):
return [
('other_sponsorship', _('Sponsors other children')),
('financial', _('Financial reasons')),
('old', _('Is too old to sponsor another child')),
('other_support', _('Wants to support with fund donations')),
('other_organization', _('Supports another organization')),
('not_now', _("Doesn't want to take another child right now")),
('not_given', _('Not given')),
('other', _('Other...'))
]
def _get_channels(self):
"""Returns the available channel through the new sponsor
reached Compassion.
"""
return self.env['recurring.contract']._get_channels()
@api.multi
def create_subsponsorship(self):
""" Creates a subsponsorship. """
self.ensure_one()
child = self.child_id
if not child:
raise exceptions.Warning(
_("No child selected"),
_("Please select a child"))
sponsorship_id = self.env.context.get('active_id')
contract_obj = self.env['recurring.contract']
contract = contract_obj.browse(sponsorship_id)
origin_obj = self.env['recurring.contract.origin']
sub_origin_id = origin_obj.search([('type', '=', 'sub')], limit=1).id
sub_contract = contract.copy({
'parent_id': sponsorship_id,
'origin_id': sub_origin_id,
'channel': self.channel,
})
sub_contract.write({'child_id': child.id})
sub_contract.signal_workflow('contract_validated')
return True
@api.multi
def no_sub(self):
""" No SUB for the sponsorship. """
self.ensure_one()
sponsorship_id = self.env.context.get('active_id')
contract = self.env['recurring.contract'].browse(sponsorship_id)
default_reason = self.no_sub_default_reasons
reason = False
if default_reason == 'other':
reason = self.no_sub_reason
else:
reason = dict(self._get_no_sub_reasons()).get(default_reason)
contract.write({'no_sub_reason': reason})
contract.signal_workflow('no_sub')
return True
| # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __openerp__.py
#
##############################################################################
from openerp import api, models, fields, exceptions, _
import pdb
class sub_sponsorship_wizard(models.TransientModel):
_name = "sds.subsponsorship.wizard"
state = fields.Selection([
('sub', 'sub'),
('no_sub', 'no_sub')])
child_id = fields.Many2one(
'compassion.child', 'Child')
channel = fields.Selection('_get_channels')
no_sub_default_reasons = fields.Selection(
'_get_no_sub_reasons', 'No sub reason')
no_sub_reason = fields.Char('No sub reason')
def _get_no_sub_reasons(self):
return [
('other_sponsorship', _('Sponsors other children')),
('financial', _('Financial reasons')),
('old', _('Is too old to sponsor another child')),
('other_support', _('Wants to support with fund donations')),
('other_organization', _('Supports another organization')),
('not_now', _("Doesn't want to take another child right now")),
('not_given', _('Not given')),
('other', _('Other...'))
]
def _get_channels(self):
"""Returns the available channel through the new sponsor
reached Compassion.
"""
return self.env['recurring.contract']._get_channels()
@api.multi
def create_subsponsorship(self):
""" Creates a subsponsorship. """
self.ensure_one()
child = self.child_id
if not child:
raise exceptions.Warning(
_("No child selected"),
_("Please select a child"))
sponsorship_id = self.env.context.get('active_id')
contract_obj = self.env['recurring.contract']
contract = contract_obj.browse(sponsorship_id)
origin_obj = self.env['recurring.contract.origin']
sub_origin_id = origin_obj.search([('type', '=', 'sub')], limit=1).id
pdb.set_trace()
sub_contract = contract.copy({
'parent_id': sponsorship_id,
'origin_id': sub_origin_id,
'channel': self.channel,
})
sub_contract.write({'child_id': child.id})
sub_contract.signal_workflow('contract_validated')
return True
@api.multi
def no_sub(self):
""" No SUB for the sponsorship. """
self.ensure_one()
sponsorship_id = self.env.context.get('active_id')
contract = self.env['recurring.contract'].browse(sponsorship_id)
default_reason = self.no_sub_default_reasons
reason = False
if default_reason == 'other':
reason = self.no_sub_reason
else:
reason = dict(self._get_no_sub_reasons()).get(default_reason)
contract.write({'no_sub_reason': reason})
contract.signal_workflow('no_sub')
return True
| Python | 0.000001 |
48f593bae26e1a587789a41aa82f9f984271bb4c | add check mode to dhcp_server | library/mt_dhcp_server.py | library/mt_dhcp_server.py | # -*- coding: utf-8 -*-
DOCUMENTATION = '''
module: mt_dhcp_server.py
author:
- "Valentin Gurmeza"
version_added: "2.4"
short_description: Manage mikrotik dhcp-server endpoints
requirements:
- mt_api
description:
- Mikrotik dhcp-server generic module
options:
hostname:
description:
- hotstname of mikrotik router
required: True
username:
description:
- username used to connect to mikrotik router
required: True
password:
description:
- password used for authentication to mikrotik router
required: True
parameter:
description:
- sub endpoint for mikrotik tool
required: True
options:
- netwatch
- e-mail
settings:
description:
- All Mikrotik compatible parameters for this particular endpoint.
Any yes/no values must be enclosed in double quotes
state:
description:
- absent or present
'''
EXAMPLES = '''
- mt_dhcp_server:
hostname: "{{ inventory_hostname }}"
username: "{{ mt_user }}"
password: "{{ mt_pass }}"
parameter: network
settings:
address: 192.168.1.0/24
dns: 192.168.1.20
'''
from mt_common import clean_params, MikrotikIdempotent
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec = dict(
hostname = dict(required=True),
username = dict(required=True),
password = dict(required=True),
settings = dict(required=False, type='dict'),
parameter = dict(
required = True,
choices = ['network', 'option', 'dhcp-server'],
type = 'str'
),
state = dict(
required = False,
choices = ['present', 'absent'],
type = 'str'
),
),
supports_check_mode=True
)
idempotent_parameter = None
params = module.params
if params['parameter'] == 'network':
idempotent_parameter = 'address'
params['parameter'] = "dhcp-server/network"
if params['parameter'] == 'option':
idempotent_parameter = 'name'
params['parameter'] = "dhcp-server/option"
if params['parameter'] == 'dhcp-server':
idempotent_parameter = 'name'
mt_obj = MikrotikIdempotent(
hostname = params['hostname'],
username = params['username'],
password = params['password'],
state = params['state'],
desired_params = params['settings'],
idempotent_param = idempotent_parameter,
api_path = '/ip/' + str(params['parameter']),
check_mode = module.check_mode,
)
mt_obj.sync_state()
if mt_obj.failed:
module.fail_json(
msg = mt_obj.failed_msg
)
elif mt_obj.changed:
module.exit_json(
failed=False,
changed=True,
msg=mt_obj.changed_msg,
diff={ "prepared": {
"old": mt_obj.old_params,
"new": mt_obj.new_params,
}},
)
else:
module.exit_json(
failed=False,
changed=False,
#msg='',
msg=params['settings'],
)
if __name__ == '__main__':
main()
| # -*- coding: utf-8 -*-
DOCUMENTATION = '''
module: mt_dhcp_server.py
author:
- "Valentin Gurmeza"
version_added: "2.4"
short_description: Manage mikrotik dhcp-server endpoints
requirements:
- mt_api
description:
- Mikrotik dhcp-server generic module
options:
hostname:
description:
- hotstname of mikrotik router
required: True
username:
description:
- username used to connect to mikrotik router
required: True
password:
description:
- password used for authentication to mikrotik router
required: True
parameter:
description:
- sub endpoint for mikrotik tool
required: True
options:
- netwatch
- e-mail
settings:
description:
- All Mikrotik compatible parameters for this particular endpoint.
Any yes/no values must be enclosed in double quotes
state:
description:
- absent or present
'''
EXAMPLES = '''
- mt_dhcp_server:
hostname: "{{ inventory_hostname }}"
username: "{{ mt_user }}"
password: "{{ mt_pass }}"
parameter: network
settings:
address: 192.168.1.0/24
dns: 192.168.1.20
'''
from mt_common import clean_params, MikrotikIdempotent
from ansible.module_utils.basic import AnsibleModule
def main():
module = AnsibleModule(
argument_spec = dict(
hostname = dict(required=True),
username = dict(required=True),
password = dict(required=True),
settings = dict(required=False, type='dict'),
parameter = dict(
required = True,
choices = ['network', 'option', 'dhcp-server'],
type = 'str'
),
state = dict(
required = False,
choices = ['present', 'absent'],
type = 'str'
),
)
)
idempotent_parameter = None
params = module.params
if params['parameter'] == 'network':
idempotent_parameter = 'address'
params['parameter'] = "dhcp-server/network"
if params['parameter'] == 'option':
idempotent_parameter = 'name'
params['parameter'] = "dhcp-server/option"
if params['parameter'] == 'dhcp-server':
idempotent_parameter = 'name'
mt_obj = MikrotikIdempotent(
hostname = params['hostname'],
username = params['username'],
password = params['password'],
state = params['state'],
desired_params = params['settings'],
idempotent_param = idempotent_parameter,
api_path = '/ip/' + str(params['parameter']),
)
mt_obj.sync_state()
if mt_obj.failed:
module.fail_json(
msg = mt_obj.failed_msg
)
elif mt_obj.changed:
module.exit_json(
failed=False,
changed=True,
msg=mt_obj.changed_msg,
diff={ "prepared": {
"old": mt_obj.old_params,
"new": mt_obj.new_params,
}},
)
else:
module.exit_json(
failed=False,
changed=False,
#msg='',
msg=params['settings'],
)
if __name__ == '__main__':
main()
| Python | 0 |
7cebbd615544dc165d6711833747bc978c3bd2d6 | fix call | dihedral_mutinf.py | dihedral_mutinf.py | import numpy as np
import mdtraj as md
import argparse
import cPickle
import time
from multiprocessing import Pool
from itertools import combinations_with_replacement as combinations
from sklearn.metrics import mutual_info_score
from contextlib import closing
class timing(object):
"Context manager for printing performance"
def __init__(self, iter):
self.iter = iter
def __enter__(self):
self.start = time.time()
def __exit__(self, ty, val, tb):
end = time.time()
print("Round %s : %0.3f seconds" %
(self.iter, end-self.start))
return False
def rbins(n=30):
return np.linspace(-np.pi, np.pi, n+3)[1:-1]
def mi(X, Y, r=rbins()):
H = np.histogram2d(X, Y, [r, r])[0]
return mutual_info_score(None, None, contingency=H)
def dihedrals(traj):
kinds = [md.compute_phi,
md.compute_psi]
return [kind(traj)[1].T for kind in kinds]
class f(object):
def __call__(self, i):
return sum([mi(d[0][i[0]], d[1][i[1]])
for d in combinations(self.D, 2)])
def __init__(self, D):
self.D = D
def run(traj, iter, N):
D = dihedrals(traj)
n = D[0].shape[0]
R = []
for i in range(iter+1):
r = np.zeros((n, n))
g = f(D)
with timing(i):
with closing(Pool(processes=N)) as pool:
r[np.triu_indices(n)] = pool.map(g, combinations(range(n), 2))
pool.terminate()
r[np.triu_indices(n)[::-1]] = r[np.triu_indices(n)]
R.append(r)
[np.random.shuffle(d) for d in D]
return R[0] - np.mean(R[1:], axis=0)
def parse_cmdln():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-i', '--input', dest='traj',
help='File containing trajectory.')
parser.add_argument('-s', '--shuffle-iter', dest='iter',
help='Number of shuffle iterations.',
default=100, type=int)
parser.add_argument('-t', '--topology', dest='top',
help='File containing topology.', default=None)
parser.add_argument('-n', '--n-proc', dest='N',
help='Number of processors to be used.',
default=4, type=int)
parser.add_argument('-o', '--output', dest='out',
help='Name of output file.', default='mutinf.pkl')
args = parser.parse_args()
return args
if __name__ == "__main__":
options = parse_cmdln()
traj = md.load(options.traj, top=options.top)
M = run(traj, options.iter, options.N)
cPickle.dump(M, open(options.out, 'wb'))
| import numpy as np
import mdtraj as md
import argparse
import cPickle
import time
from multiprocessing import Pool
from itertools import combinations_with_replacement as combinations
from sklearn.metrics import mutual_info_score
from contextlib import closing
class timing(object):
"Context manager for printing performance"
def __init__(self, iter):
self.iter = iter
def __enter__(self):
self.start = time.time()
def __exit__(self, ty, val, tb):
end = time.time()
print("Round %s : %0.3f seconds" %
(self.iter, end-self.start))
return False
def rbins(n=30):
return np.linspace(-np.pi, np.pi, n+3)[1:-1]
def mi(X, Y, r=rbins()):
H = np.histogram2d(X, Y, [r, r])[0]
return mutual_info_score(None, None, contingency=H)
def dihedrals(traj):
kinds = [md.compute_phi,
md.compute_psi]
return [kind(traj)[1].T for kind in kinds]
class f(object):
def __class__(self, i):
return sum([mi(d[0][i[0]], d[1][i[1]])
for d in combinations(self.D, 2)])
def __init__(self, D):
self.D = D
def run(traj, iter, N):
D = dihedrals(traj)
n = D[0].shape[0]
R = []
for i in range(iter+1):
r = np.zeros((n, n))
g = f(D)
with timing(i):
with closing(Pool(processes=N)) as pool:
r[np.triu_indices(n)] = pool.map(g, combinations(range(n), 2))
pool.terminate()
r[np.triu_indices(n)[::-1]] = r[np.triu_indices(n)]
R.append(r)
[np.random.shuffle(d) for d in D]
return R[0] - np.mean(R[1:], axis=0)
def parse_cmdln():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-i', '--input', dest='traj',
help='File containing trajectory.')
parser.add_argument('-s', '--shuffle-iter', dest='iter',
help='Number of shuffle iterations.',
default=100, type=int)
parser.add_argument('-t', '--topology', dest='top',
help='File containing topology.', default=None)
parser.add_argument('-n', '--n-proc', dest='N',
help='Number of processors to be used.',
default=4, type=int)
parser.add_argument('-o', '--output', dest='out',
help='Name of output file.', default='mutinf.pkl')
args = parser.parse_args()
return args
if __name__ == "__main__":
options = parse_cmdln()
traj = md.load(options.traj, top=options.top)
M = run(traj, options.iter, options.N)
cPickle.dump(M, open(options.out, 'wb'))
| Python | 0.000001 |
3a27568211c07cf614aa9865a2f08d2a9b9bfb71 | Return errors in json only | dinosaurs/views.py | dinosaurs/views.py | import os
import json
import httplib as http
import tornado.web
import tornado.ioloop
from dinosaurs import api
from dinosaurs import settings
class SingleStatic(tornado.web.StaticFileHandler):
def initialize(self, path):
self.dirname, self.filename = os.path.split(path)
super(SingleStatic, self).initialize(self.dirname)
def get(self, path=None, include_body=True):
super(SingleStatic, self).get(self.filename, include_body)
class DomainAPIHandler(tornado.web.RequestHandler):
def get(self):
self.write({
'availableDomains': settings.DOMAINS.keys()
})
class EmailAPIHandler(tornado.web.RequestHandler):
def write_error(self, status_code, **kwargs):
self.finish({
"code": status_code,
"message": self._reason,
})
def post(self):
try:
req_json = json.loads(self.request.body)
except ValueError:
raise tornado.web.HTTPError(http.BAD_REQUEST)
email = req_json.get('email')
domain = req_json.get('domain')
connection = api.get_connection(domain)
if not email or not domain or not connection:
raise tornado.web.HTTPError(http.BAD_REQUEST)
try:
ret, passwd = api.create_email(connection, email)
except api.YandexException as e:
if e.message != 'occupied':
raise
self.write({})
raise tornado.web.HTTPError(http.FORBIDDEN)
self.write({
'password': passwd,
'email': ret['login'],
'domain': ret['domain']
})
self.set_status(http.CREATED)
| import os
import json
import httplib as http
import tornado.web
import tornado.ioloop
from dinosaurs import api
from dinosaurs import settings
class SingleStatic(tornado.web.StaticFileHandler):
def initialize(self, path):
self.dirname, self.filename = os.path.split(path)
super(SingleStatic, self).initialize(self.dirname)
def get(self, path=None, include_body=True):
super(SingleStatic, self).get(self.filename, include_body)
class DomainAPIHandler(tornado.web.RequestHandler):
def get(self):
self.write({
'availableDomains': settings.DOMAINS.keys()
})
class EmailAPIHandler(tornado.web.RequestHandler):
def post(self):
try:
req_json = json.loads(self.request.body)
except ValueError:
raise tornado.web.HTTPError(http.BAD_REQUEST)
email = req_json.get('email')
domain = req_json.get('domain')
connection = api.get_connection(domain)
if not email or not domain or not connection:
raise tornado.web.HTTPError(http.BAD_REQUEST)
ret, passwd = api.create_email(connection, email)
self.write({
'password': passwd,
'email': ret['login'],
'domain': ret['domain']
})
self.set_status(http.CREATED)
| Python | 0.000002 |
c9f25b7fb983c3d635ab7f13f350a53422059a8c | Handle errors in reloaded code | cpp/pineal-run.py | cpp/pineal-run.py | #!/usr/bin/env python
from __future__ import print_function
import os
from time import sleep
from sys import argv
import logging
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import hy
from pineal.hy_utils import run_hy_code
logger = logging.getLogger("pineal-run")
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
def run_code(ns, history):
"Run last code in the history, if available"
if history:
try:
run_hy_code(history[-1], ns)
except Exception as e:
logger.info("Error evaluating code")
logger.error(e)
history.pop()
run_code(ns, history)
else:
logger.error("Empty history, there is no valid code")
def update_file(file_name, ns, history):
"Update running code, saving in the history"
logger.info("Updating file")
with open(file_name) as f:
code = f.read()
history.append(code)
run_code(ns, history)
def watch_file(file_name, action, *args, **kwargs):
"Return a watchdog observer, it will call the action callback"
def on_modified(event):
"File-changed event"
logger.info(file_name, " changed")
if event.src_path == file_name:
action(file_name, *args, **kwargs)
handler = FileSystemEventHandler()
handler.on_modified = on_modified
observer = Observer()
base_path = os.path.split(file_name)[0]
observer.schedule(handler, path=base_path)
observer.start()
return observer
def main(file_name):
"Main function"
ns = {} # namespace
history = [] # handle old versions of code
update_file(file_name, ns, history)
watcher = watch_file(file_name, update_file, ns, history)
try:
while True:
try:
ns["loop"]()
except Exception as e:
logger.error(e)
history.pop()
run_code(ns, history)
sleep(1.0/120)
except KeyboardInterrupt:
watcher.stop()
watcher.join()
if __name__ == "__main__":
if argv[1:]:
main(argv[1])
else:
print("Usage: ", argv[0], "filename")
| #!/usr/bin/env python
from __future__ import print_function
import os
from time import sleep
from sys import argv
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
import hy
from pineal.hy_utils import run_hy_code
def update_file(file_name, ns, history):
"Update running code, saving in the history"
print("Updating file") # TODO logging
with open(file_name) as f:
code = f.read()
history.append(code)
try:
run_hy_code(code, ns)
except e:
print(e)
history.pop() # TODO test and debug this
def watch_file(file_name, action, *args, **kwargs):
"Return a watchdog observer, it will call the action callback"
def on_modified(event):
"File-changed event"
print("File changed") # TODO logging
if event.src_path == file_name:
action(file_name, *args, **kwargs)
handler = FileSystemEventHandler()
handler.on_modified = on_modified
observer = Observer()
base_path = os.path.split(file_name)[0]
observer.schedule(handler, path=base_path)
observer.start()
return observer
def main(file_name):
"Main function"
ns = {} # namespace
history = [] # handle old versions of code
update_file(file_name, ns, history)
watcher = watch_file(file_name, update_file, ns, history)
try:
while True:
ns["loop"]()
sleep(1.0/120)
except KeyboardInterrupt:
watcher.stop()
watcher.join()
if __name__ == "__main__":
if argv[1:]:
main(argv[1])
else:
print("Usage: ", argv[0], "filename")
| Python | 0.000001 |
f574e19b14ff861c45f6c66c64a2570bdb0e3a3c | Apply change of file name | crawl_comments.py | crawl_comments.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__doc__ = '''
Crawl comment from nicovideo.jp
Usage:
crawl_comments.py [--sqlite <sqlite>] [--csv <csv>]
Options:
--sqlite <sqlite> (optional) path of comment DB [default: comments.sqlite3]
--csv <csv> (optional) path of csv file contains urls of videos [default: crawled.csv]
'''
from docopt import docopt
from nicocrawler.nicocrawler import NicoCrawler
if __name__ == '__main__':
# コマンドライン引数の取得
args = docopt(__doc__)
sqlite_path = args['--sqlite']
csv_path = args['--csv']
ncrawler = NicoCrawler()
ncrawler.connect_sqlite(sqlite_path)
url = 'http://ch.nicovideo.jp/2016winter_anime'
df = ncrawler.get_all_video_url_of_season(url)
ncrawler.initialize_csv_from_db(csv_path)
# # デイリーランキング1~300位の動画を取得する
# url = 'http://www.nicovideo.jp/ranking/fav/daily/all'
# ncrawler.initialize_csv_from_url(url, csv_path, max_page=3)
# ncrawler.get_all_comments_of_csv(csv_path, max_n_iter=1)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
__doc__ = '''
Crawl comment from nicovideo.jp
Usage:
main_crawl.py [--sqlite <sqlite>] [--csv <csv>]
Options:
--sqlite <sqlite> (optional) path of comment DB [default: comments.sqlite3]
--csv <csv> (optional) path of csv file contains urls of videos [default: crawled.csv]
'''
from docopt import docopt
from nicocrawler.nicocrawler import NicoCrawler
if __name__ == '__main__':
# コマンドライン引数の取得
args = docopt(__doc__)
sqlite_path = args['--sqlite']
csv_path = args['--csv']
ncrawler = NicoCrawler()
ncrawler.connect_sqlite(sqlite_path)
url = 'http://ch.nicovideo.jp/2016winter_anime'
df = ncrawler.get_all_video_url_of_season(url)
ncrawler.initialize_csv_from_db(csv_path)
# # デイリーランキング1~300位の動画を取得する
# url = 'http://www.nicovideo.jp/ranking/fav/daily/all'
# ncrawler.initialize_csv_from_url(url, csv_path, max_page=3)
# ncrawler.get_all_comments_of_csv(csv_path, max_n_iter=1)
| Python | 0.000001 |
3bc4fa33c3ec9272fed565260677518dcf5957fe | change version to 0.10.0.dev0 | csaps/_version.py | csaps/_version.py | # -*- coding: utf-8 -*-
__version__ = '0.10.0.dev0'
| # -*- coding: utf-8 -*-
__version__ = '0.9.0'
| Python | 0.000006 |
3bb9c0aacdfff372e41d7a8d4c43e71535bff930 | Remove perf regression in not yet finished size estimation code | sdks/python/google/cloud/dataflow/worker/opcounters.py | sdks/python/google/cloud/dataflow/worker/opcounters.py | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Counters collect the progress of the Worker for reporting to the service."""
from __future__ import absolute_import
from google.cloud.dataflow.utils.counters import Counter
class OperationCounters(object):
"""The set of basic counters to attach to an Operation."""
def __init__(self, counter_factory, step_name, coder, output_index):
self.element_counter = counter_factory.get_counter(
'%s-out%d-ElementCount' % (step_name, output_index), Counter.SUM)
self.mean_byte_counter = counter_factory.get_counter(
'%s-out%d-MeanByteCount' % (step_name, output_index), Counter.MEAN)
self.coder = coder
def update_from(self, windowed_value, coder=None):
"""Add one value to this counter."""
self.element_counter.update(1)
# TODO(silviuc): Implement estimated size sampling.
# TODO(gildea):
# Actually compute the encoded size of this value.
# In spirit, something like this:
# if coder is None:
# coder = self.coder
# coder.store_estimated_size(windowed_value, byte_size_accumulator)
# but will need to do sampling.
def update_collect(self):
"""Collects the accumulated size estimates.
Now that the element has been processed, we ask our accumulator
for the total and store the result in a counter.
"""
# TODO(silviuc): Implement estimated size sampling.
pass
def __str__(self):
return '<%s [%s]>' % (self.__class__.__name__,
', '.join([str(x) for x in self.__iter__()]))
def __repr__(self):
return '<%s %s at %s>' % (self.__class__.__name__,
[x for x in self.__iter__()], hex(id(self)))
| # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Counters collect the progress of the Worker for reporting to the service."""
from __future__ import absolute_import
from google.cloud.dataflow.utils.counters import Accumulator
from google.cloud.dataflow.utils.counters import Counter
class OperationCounters(object):
"""The set of basic counters to attach to an Operation."""
def __init__(self, counter_factory, step_name, coder, output_index):
self.element_counter = counter_factory.get_counter(
'%s-out%d-ElementCount' % (step_name, output_index), Counter.SUM)
self.mean_byte_counter = counter_factory.get_counter(
'%s-out%d-MeanByteCount' % (step_name, output_index), Counter.MEAN)
self.coder = coder
self._active_accumulators = []
def update_from(self, windowed_value, coder=None):
"""Add one value to this counter."""
self.element_counter.update(1)
byte_size_accumulator = Accumulator(self.mean_byte_counter.name)
self._active_accumulators.append(byte_size_accumulator)
# TODO(gildea):
# Actually compute the encoded size of this value.
# In spirit, something like this:
# if coder is None:
# coder = self.coder
# coder.store_estimated_size(windowed_value, byte_size_accumulator)
# but will need to do sampling.
def update_collect(self):
"""Collects the accumulated size estimates.
Now that the element has been processed, we ask our accumulator
for the total and store the result in a counter.
"""
for pending in self._active_accumulators:
self.mean_byte_counter.update(pending.total)
self._active_accumulators = []
def __str__(self):
return '<%s [%s]>' % (self.__class__.__name__,
', '.join([str(x) for x in self.__iter__()]))
def __repr__(self):
return '<%s %s at %s>' % (self.__class__.__name__,
[x for x in self.__iter__()], hex(id(self)))
| Python | 0.000001 |
4920391c4e6d690264ebc0bb829ad9b9a374917d | math is hard | services/extract-entities/entityextractor/aggregate.py | services/extract-entities/entityextractor/aggregate.py | import logging
from banal import ensure_list
from collections import Counter
from alephclient.services.entityextract_pb2 import ExtractedEntity
from entityextractor.extract import extract_polyglot, extract_spacy
from entityextractor.patterns import extract_patterns
from entityextractor.cluster import Cluster
log = logging.getLogger(__name__)
class EntityAggregator(object):
MAX_COUNTRIES = 3
CUTOFF = 0.01
def __init__(self):
self.clusters = []
self._countries = Counter()
self.record = 0
def extract(self, text, languages):
self.record += 1
for result in extract_polyglot(self, text, languages):
self.add(result)
for result in extract_spacy(self, text, languages):
self.add(result)
for result in extract_patterns(self, text):
self.add(result)
def add(self, result):
countries = [c.lower() for c in ensure_list(result.countries)]
self._countries.update(countries)
if not result.valid:
return
# TODO: make a hash?
for cluster in self.clusters:
if cluster.match(result):
return cluster.add(result)
self.clusters.append(Cluster(result))
@property
def countries(self):
cs = self._countries.most_common(n=self.MAX_COUNTRIES)
return [c for (c, n) in cs]
@property
def entities(self):
total_weight = sum([c.weight for c in self.clusters if not c.strict])
total_weight = float(max(1, total_weight))
for cluster in self.clusters:
# only using locations for country detection at the moment:
if cluster.category == ExtractedEntity.LOCATION:
continue
# skip entities that do not meet a threshold of relevance:
if not cluster.strict:
if (cluster.weight / total_weight) < self.CUTOFF:
continue
# log.info('%s: %s: %s', group.label, group.category, group.weight)
yield cluster.label, cluster.category, cluster.weight
for (country, weight) in self._countries.items():
yield country, ExtractedEntity.COUNTRY, weight
def __len__(self):
return len(self.clusters)
| import logging
from banal import ensure_list
from collections import Counter
from alephclient.services.entityextract_pb2 import ExtractedEntity
from entityextractor.extract import extract_polyglot, extract_spacy
from entityextractor.patterns import extract_patterns
from entityextractor.cluster import Cluster
log = logging.getLogger(__name__)
class EntityAggregator(object):
MAX_COUNTRIES = 3
CUTOFF = 0.01
def __init__(self):
self.clusters = []
self._countries = Counter()
self.record = 0
def extract(self, text, languages):
self.record += 1
for result in extract_polyglot(self, text, languages):
self.add(result)
for result in extract_spacy(self, text, languages):
self.add(result)
for result in extract_patterns(self, text):
self.add(result)
def add(self, result):
countries = [c.lower() for c in ensure_list(result.countries)]
self._countries.update(countries)
if not result.valid:
return
# TODO: make a hash?
for cluster in self.clusters:
if cluster.match(result):
return cluster.add(result)
self.clusters.append(Cluster(result))
@property
def countries(self):
cs = self._countries.most_common(n=self.MAX_COUNTRIES)
return [c for (c, n) in cs]
@property
def entities(self):
total_weight = sum([c.weight for c in self.clusters if c.strict])
for cluster in self.clusters:
# only using locations for country detection at the moment:
if cluster.category == ExtractedEntity.LOCATION:
continue
# skip entities that do not meet a threshold of relevance:
if not cluster.strict:
if (cluster.weight / total_weight) < self.CUTOFF:
continue
# log.info('%s: %s: %s', group.label, group.category, group.weight)
yield cluster.label, cluster.category, cluster.weight
for (country, weight) in self._countries.items():
yield country, ExtractedEntity.COUNTRY, weight
def __len__(self):
return len(self.clusters)
| Python | 0.998297 |
aab7c01c94088594258e33e3074f76d8735b8c2e | Add default config and config schema | mopidy/frontends/mpd/__init__.py | mopidy/frontends/mpd/__init__.py | from __future__ import unicode_literals
import mopidy
from mopidy import ext
from mopidy.utils import config, formatting
default_config = """
[ext.mpd]
# If the MPD extension should be enabled or not
enabled = true
# Which address the MPD server should bind to
#
# 127.0.0.1
# Listens only on the IPv4 loopback interface
# ::1
# Listens only on the IPv6 loopback interface
# 0.0.0.0
# Listens on all IPv4 interfaces
# ::
# Listens on all interfaces, both IPv4 and IPv6
hostname = 127.0.0.1
# Which TCP port the MPD server should listen to
port = 6600
# The password required for connecting to the MPD server
password =
# The maximum number of concurrent connections the MPD server will accept
max_connections = 20
# Number of seconds an MPD client can stay inactive before the connection is
# closed by the server
connection_timeout = 60
"""
__doc__ = """The MPD server frontend.
MPD stands for Music Player Daemon. MPD is an independent project and server.
Mopidy implements the MPD protocol, and is thus compatible with clients for the
original MPD server.
**Dependencies:**
- None
**Default config:**
.. code-block:: ini
%(config)s
**Usage:**
The frontend is enabled by default.
**Limitations:**
This is a non exhaustive list of MPD features that Mopidy doesn't support.
Items on this list will probably not be supported in the near future.
- Toggling of audio outputs is not supported
- Channels for client-to-client communication are not supported
- Stickers are not supported
- Crossfade is not supported
- Replay gain is not supported
- ``count`` does not provide any statistics
- ``stats`` does not provide any statistics
- ``list`` does not support listing tracks by genre
- ``decoders`` does not provide information about available decoders
The following items are currently not supported, but should be added in the
near future:
- Modifying stored playlists is not supported
- ``tagtypes`` is not supported
- Browsing the file system is not supported
- Live update of the music database is not supported
""" % {'config': formatting.indent(default_config)}
class Extension(ext.Extension):
name = 'Mopidy-MPD'
version = mopidy.__version__
def get_default_config(self):
return default_config
def get_config_schema(self):
schema = config.ExtensionConfigSchema()
schema['hostname'] = config.Hostname()
schema['port'] = config.Port()
schema['password'] = config.String(optional=True, secret=True)
schema['max_connections'] = config.Integer(minimum=1)
schema['connection_timeout'] = config.Integer(minimum=1)
return schema
def validate_environment(self):
pass
def get_frontend_classes(self):
from .actor import MpdFrontend
return [MpdFrontend]
| from __future__ import unicode_literals
import mopidy
from mopidy import ext
__doc__ = """The MPD server frontend.
MPD stands for Music Player Daemon. MPD is an independent project and server.
Mopidy implements the MPD protocol, and is thus compatible with clients for the
original MPD server.
**Dependencies:**
- None
**Settings:**
- :attr:`mopidy.settings.MPD_SERVER_HOSTNAME`
- :attr:`mopidy.settings.MPD_SERVER_PORT`
- :attr:`mopidy.settings.MPD_SERVER_PASSWORD`
**Usage:**
The frontend is enabled by default.
**Limitations:**
This is a non exhaustive list of MPD features that Mopidy doesn't support.
Items on this list will probably not be supported in the near future.
- Toggling of audio outputs is not supported
- Channels for client-to-client communication are not supported
- Stickers are not supported
- Crossfade is not supported
- Replay gain is not supported
- ``count`` does not provide any statistics
- ``stats`` does not provide any statistics
- ``list`` does not support listing tracks by genre
- ``decoders`` does not provide information about available decoders
The following items are currently not supported, but should be added in the
near future:
- Modifying stored playlists is not supported
- ``tagtypes`` is not supported
- Browsing the file system is not supported
- Live update of the music database is not supported
"""
class Extension(ext.Extension):
name = 'Mopidy-MPD'
version = mopidy.__version__
def get_default_config(self):
return '[ext.mpd]'
def validate_config(self, config):
pass
def validate_environment(self):
pass
def get_frontend_classes(self):
from .actor import MpdFrontend
return [MpdFrontend]
| Python | 0 |
43653246bfdcf78e76bb41846fbf80ac2e5dc0f2 | Use declared_attr for ColorMixin columns | indico/core/db/sqlalchemy/colors.py | indico/core/db/sqlalchemy/colors.py | # This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import string
from collections import namedtuple
from sqlalchemy.ext.declarative import declared_attr
from indico.core.db import db
class ColorTuple(namedtuple('ColorTuple', ('text', 'background'))):
"""A tuple that contains text and background color.
Both colors are unified to 'rrggbb' notation (in case 'rgb' is
passed) and leading ``#`` is stripped.
When a text/background color is specifie, the other color needs
to be specified too. If no color is specified, the ColorTuple
is falsy.
"""
def __new__(cls, text, background):
colors = [text, background]
for i, color in enumerate(colors):
if color.startswith('#'):
color = color[1:]
if len(color) == 3:
color = ''.join(x * 2 for x in color)
colors[i] = color.lower()
if any(colors):
if not all(colors):
raise ValueError('Both colors must be specified')
if not all(len(x) == 6 for x in colors):
raise ValueError('Colors must be be `rgb` or `rrggbb`')
if not all(c in string.hexdigits for color in colors for c in color):
raise ValueError('Colors must only use hex digits')
return super(ColorTuple, cls).__new__(cls, *colors)
def __nonzero__(self):
return all(self)
class ColorMixin(object):
"""Mixin to store text+background colors in a model.
For convenience (e.g. for WTForms integrations when selecting both
colors at the same time from a palette or in a compound field) it
provides a `colors` property which returns/accepts a `ColorTuple`
holding text color and background color.
"""
@declared_attr
def text_color(cls):
return db.Column(
db.String,
nullable=False,
default=''
)
@declared_attr
def background_color(cls):
return db.Column(
db.String,
nullable=False,
default=''
)
@property
def colors(self):
return ColorTuple(self.text_color, self.background_color)
@colors.setter
def colors(self, value):
if value is None:
value = '', ''
self.text_color, self.background_color = value
| # This file is part of Indico.
# Copyright (C) 2002 - 2015 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import string
from collections import namedtuple
from indico.core.db import db
class ColorTuple(namedtuple('ColorTuple', ('text', 'background'))):
"""A tuple that contains text and background color.
Both colors are unified to 'rrggbb' notation (in case 'rgb' is
passed) and leading ``#`` is stripped.
When a text/background color is specifie, the other color needs
to be specified too. If no color is specified, the ColorTuple
is falsy.
"""
def __new__(cls, text, background):
colors = [text, background]
for i, color in enumerate(colors):
if color.startswith('#'):
color = color[1:]
if len(color) == 3:
color = ''.join(x * 2 for x in color)
colors[i] = color.lower()
if any(colors):
if not all(colors):
raise ValueError('Both colors must be specified')
if not all(len(x) == 6 for x in colors):
raise ValueError('Colors must be be `rgb` or `rrggbb`')
if not all(c in string.hexdigits for color in colors for c in color):
raise ValueError('Colors must only use hex digits')
return super(ColorTuple, cls).__new__(cls, *colors)
def __nonzero__(self):
return all(self)
class ColorMixin(object):
"""Mixin to store text+background colors in a model.
For convenience (e.g. for WTForms integrations when selecting both
colors at the same time from a palette or in a compound field) it
provides a `colors` property which returns/accepts a `ColorTuple`
holding text color and background color.
"""
text_color = db.Column(
db.String,
nullable=False,
default=''
)
background_color = db.Column(
db.String,
nullable=False,
default=''
)
@property
def colors(self):
return ColorTuple(self.text_color, self.background_color)
@colors.setter
def colors(self, value):
if value is None:
value = '', ''
self.text_color, self.background_color = value
| Python | 0 |
2b50fd475829aa25889b49da4d4a2dcdcece9893 | Remove unused imports. | src/Products/UserAndGroupSelectionWidget/at/widget.py | src/Products/UserAndGroupSelectionWidget/at/widget.py | import types
from zope.component import ComponentLookupError
from AccessControl import ClassSecurityInfo
from Products.Archetypes.Widget import TypesWidget
from Products.Archetypes.Registry import registerWidget
from Products.UserAndGroupSelectionWidget.interfaces import IGenericGroupTranslation
class UserAndGroupSelectionWidget(TypesWidget):
_properties = TypesWidget._properties.copy()
_properties.update({
'macro' : "userandgroupselect",
'helper_js' : ('userandgroupselect.js',),
'size' : 7, # size of form-element taking the users
'groupName' : '', # takes the given group as default,
# a group id
'usersOnly' : False, # only allow user selection
'groupsOnly' : False, # allow only group selection
'groupIdFilter' : '*', # allow all groups
'searchableProperties' : (), # which properties you want to search as well
# eg. ('email', 'fullname', 'location')
})
security = ClassSecurityInfo()
security.declarePublic('process_form')
def process_form(self, instance, field, form, empty_marker=None,
emptyReturnsMarker=None,):
"""process the form data and return it."""
result = TypesWidget.process_form (self, instance, field, form,
empty_marker, emptyReturnsMarker, )
if result is empty_marker:
return result
value, kwargs = result
# The widget always returns a empty item (strange) when we use the
# multival option.
# Remove the empty items manually
if type(value) is types.ListType:
value = [item for item in value if item]
return value, kwargs
security.declarePublic('getGroupId')
def getGroupId(self, instance):
groupid = self.groupName
try:
translator = IGenericGroupTranslation(instance)
except ComponentLookupError:
pass
except TypeError, e:
if e[0] == 'Could not adapt':
pass
else:
raise
else:
groupid = translator.translateToRealGroupId(self.groupName)
return groupid
registerWidget(
UserAndGroupSelectionWidget,
title='User and Group Selection Widget',
description=('You can select users searched from a popup window.'),
used_for=('Products.Archetypes.Field.LinesField',
'Products.Archetypes.Field.StringField', ))
| import types
from zope.component import ComponentLookupError
from Globals import InitializeClass
from AccessControl import ClassSecurityInfo
from Products.Archetypes.Widget import TypesWidget
from Products.Archetypes.Registry import registerWidget
from Products.Archetypes.utils import shasattr
from Products.UserAndGroupSelectionWidget.interfaces import IGenericGroupTranslation
class UserAndGroupSelectionWidget(TypesWidget):
_properties = TypesWidget._properties.copy()
_properties.update({
'macro' : "userandgroupselect",
'helper_js' : ('userandgroupselect.js',),
'size' : 7, # size of form-element taking the users
'groupName' : '', # takes the given group as default,
# a group id
'usersOnly' : False, # only allow user selection
'groupsOnly' : False, # allow only group selection
'groupIdFilter' : '*', # allow all groups
'searchableProperties' : (), # which properties you want to search as well
# eg. ('email', 'fullname', 'location')
})
security = ClassSecurityInfo()
security.declarePublic('process_form')
def process_form(self, instance, field, form, empty_marker=None,
emptyReturnsMarker=None,):
"""process the form data and return it."""
result = TypesWidget.process_form (self, instance, field, form,
empty_marker, emptyReturnsMarker, )
if result is empty_marker:
return result
value, kwargs = result
# The widget always returns a empty item (strange) when we use the
# multival option.
# Remove the empty items manually
if type(value) is types.ListType:
value = [item for item in value if item]
return value, kwargs
security.declarePublic('getGroupId')
def getGroupId(self, instance):
groupid = self.groupName
try:
translator = IGenericGroupTranslation(instance)
except ComponentLookupError:
pass
except TypeError, e:
if e[0] == 'Could not adapt':
pass
else:
raise
else:
groupid = translator.translateToRealGroupId(self.groupName)
return groupid
registerWidget(
UserAndGroupSelectionWidget,
title='User and Group Selection Widget',
description=('You can select users searched from a popup window.'),
used_for=('Products.Archetypes.Field.LinesField',
'Products.Archetypes.Field.StringField', ))
| Python | 0 |
317926c18ac2e139d2018acd767d10b4f53428f3 | Remove unneeded post method from CreateEnvProfile view | installer/installer_config/views.py | installer/installer_config/views.py | from django.shortcuts import render
from django.shortcuts import render_to_response
from django.views.generic import CreateView, UpdateView, DeleteView
from installer_config.models import EnvironmentProfile, UserChoice, Step
from installer_config.forms import EnvironmentForm
from django.core.urlresolvers import reverse
class CreateEnvironmentProfile(CreateView):
model = EnvironmentProfile
template_name = 'env_profile_form.html'
form_class = EnvironmentForm
success_url = '/profile'
def form_valid(self, form):
form.instance.user = self.request.user
return super(CreateEnvironmentProfile, self).form_valid(form)
class UpdateEnvironmentProfile(UpdateView):
model = EnvironmentProfile
context_object_name = 'profile'
template_name = 'env_profile_form.html'
form_class = EnvironmentForm
success_url = '/profile'
class DeleteEnvironmentProfile(DeleteView):
model = EnvironmentProfile
success_url = '/profile'
def download_profile_view(request, **kwargs):
choices = UserChoice.objects.filter(profiles=kwargs['pk']).all()
response = render_to_response('installer_template.py', {'choices': choices},
content_type='application')
response['Content-Disposition'] = 'attachment; filename=something.py'
return response
| from django.shortcuts import render
from django.shortcuts import render_to_response
from django.views.generic import CreateView, UpdateView, DeleteView
from installer_config.models import EnvironmentProfile, UserChoice, Step
from installer_config.forms import EnvironmentForm
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
class CreateEnvironmentProfile(CreateView):
model = EnvironmentProfile
template_name = 'env_profile_form.html'
form_class = EnvironmentForm
success_url = '/profile'
def form_valid(self, form):
form.instance.user = self.request.user
return super(CreateEnvironmentProfile, self).form_valid(form)
def post(self, request, *args, **kwargs):
form_class = self.get_form_class()
form = form_class(request.POST)
if form.is_valid():
config_profile = form.save(commit=False)
config_profile.user = request.user
config_profile.save()
return HttpResponseRedirect(reverse('profile:profile'))
return self.render_to_response({'form': form})
class UpdateEnvironmentProfile(UpdateView):
model = EnvironmentProfile
context_object_name = 'profile'
template_name = 'env_profile_form.html'
form_class = EnvironmentForm
success_url = '/profile'
class DeleteEnvironmentProfile(DeleteView):
model = EnvironmentProfile
success_url = '/profile'
def download_profile_view(request, **kwargs):
choices = UserChoice.objects.filter(profiles=kwargs['pk']).all()
# import pdb; pdb.set_trace()
response = render_to_response('installer_template.py', {'choices': choices},
content_type='application')
response['Content-Disposition'] = 'attachment; filename=something.py'
return response
| Python | 0 |
c24dbc2d4d8b59a62a68f326edb350b3c633ea25 | Change the comment of InterleavingMethod.evaluate | interleaving/interleaving_method.py | interleaving/interleaving_method.py | class InterleavingMethod(object):
'''
Interleaving
'''
def interleave(self, k, a, b):
'''
k: the maximum length of resultant interleaving
a: a list of document IDs
b: a list of document IDs
Return an instance of Ranking
'''
raise NotImplementedError()
def multileave(self, k, *lists):
'''
k: the maximum length of resultant multileaving
*lists: lists of document IDs
Return an instance of Ranking
'''
raise NotImplementedError()
def evaluate(self, ranking, clicks):
'''
ranking: an instance of Ranking generated by Balanced.interleave
clicks: a list of indices clicked by a user
Return a list of pairs of ranker indices
in which element (i, j) indicates i won j.
e.g. a result [(1, 0), (2, 1), (2, 0)] indicates
ranker 1 won ranker 0, and ranker 2 won ranker 0 as well as ranker 1.
'''
raise NotImplementedError()
| class InterleavingMethod(object):
'''
Interleaving
'''
def interleave(self, k, a, b):
'''
k: the maximum length of resultant interleaving
a: a list of document IDs
b: a list of document IDs
Return an instance of Ranking
'''
raise NotImplementedError()
def multileave(self, k, *lists):
'''
k: the maximum length of resultant multileaving
*lists: lists of document IDs
Return an instance of Ranking
'''
raise NotImplementedError()
def evaluate(self, ranking, clicks):
'''
ranking: an instance of Ranking generated by Balanced.interleave
clicks: a list of indices clicked by a user
Return one of the following tuples:
- (1, 0): Ranking 'a' won
- (0, 1): Ranking 'b' won
- (0, 0): Tie
'''
raise NotImplementedError()
| Python | 0.000001 |
e94af78bbeae26933d987494e628b18e201f8da2 | fix logger error message | spotseeker_server/management/commands/sync_techloan.py | spotseeker_server/management/commands/sync_techloan.py | # Copyright 2022 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
import logging
from django.core.management.base import BaseCommand
from django.conf import settings
from schema import Schema
from .techloan.techloan import Techloan
from .techloan.spotseeker import Spots
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Sync techloan data from the cte"
_settings_scheme = Schema({
'server_host': str,
'oauth_key': str,
'oauth_secret': str,
'oauth_user': str,
})
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
def handle(self, *args, **options):
try:
self._settings_scheme.validate(
settings.SPOTSEEKER_TECHLOAN_UPDATER)
except Exception as ex:
logger.error(f"Settings misconfigured: {str(ex)}")
return
techloan = self.get_techloan()
spots = self.get_spots()
self.sync_techloan_to_spots(techloan, spots)
def get_techloan(self):
return Techloan.from_cte_api()
def get_spots(self):
return Spots.from_spotseeker_server(
settings.SPOTSEEKER_TECHLOAN_UPDATER)
def sync_techloan_to_spots(self, techloan, spots):
spots.sync_with_techloan(techloan)
spots.upload_data()
| # Copyright 2022 UW-IT, University of Washington
# SPDX-License-Identifier: Apache-2.0
import logging
from django.core.management.base import BaseCommand
from django.conf import settings
from schema import Schema
from .techloan.techloan import Techloan
from .techloan.spotseeker import Spots
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = "Sync techloan data from the cte"
_settings_scheme = Schema({
'server_host': str,
'oauth_key': str,
'oauth_secret': str,
'oauth_user': str,
})
def __init__(self, *args, **kwargs):
super(Command, self).__init__(*args, **kwargs)
def handle(self, *args, **options):
try:
self._settings_scheme.validate(
settings.SPOTSEEKER_TECHLOAN_UPDATER)
except Exception as ex:
logger.error("Settings misconfigured: ", ex)
return
techloan = self.get_techloan()
spots = self.get_spots()
self.sync_techloan_to_spots(techloan, spots)
def get_techloan(self):
return Techloan.from_cte_api()
def get_spots(self):
return Spots.from_spotseeker_server(
settings.SPOTSEEKER_TECHLOAN_UPDATER)
def sync_techloan_to_spots(self, techloan, spots):
spots.sync_with_techloan(techloan)
spots.upload_data()
| Python | 0.000005 |
a9bcbe8bf69403dbf7780843fe362cf8e1f02c95 | update tree topo | mininet/tree/tree.py | mininet/tree/tree.py | #!/usr/bin/env python
from mininet.cli import CLI
from mininet.node import Link
from mininet.net import Mininet
from mininet.node import RemoteController
from mininet.term import makeTerm
from functools import partial
def ofp_version(switch, protocols):
protocols_str = ','.join(protocols)
command = 'ovs-vsctl set Bridge %s protocols=%s' % (switch, protocols)
switch.cmd(command.split(' '))
if '__main__' == __name__:
net = Mininet(controller=partial( RemoteController, ip='10.42.0.27', port=6633 ))
c0 = net.addController('c0')
s1 = net.addSwitch('s1')
s2 = net.addSwitch('s2')
s3 = net.addSwitch('s3')
s4 = net.addSwitch('s4')
s5 = net.addSwitch('s5')
s6 = net.addSwitch('s6')
h1 = net.addHost('h1')
h2 = net.addHost('h2')
h3 = net.addHost('h3')
h4 = net.addHost('h4')
net.addLink(s1, h1)
net.addLink(s2, h2)
net.addLink(s5, h3)
net.addLink(s6, h4)
net.addLink(s1, s2)
net.addLink(s2, s3)
net.addLink(s2, s4)
net.addLink(s4, s5)
net.addLink(s4, s6)
net.build()
c0.start()
s1.start([c0])
s2.start([c0])
s3.start([c0])
s4.start([c0])
s5.start([c0])
s6.start([c0])
ofp_version(s1, ['OpenFlow13'])
ofp_version(s2, ['OpenFlow13'])
ofp_version(s3, ['OpenFlow13'])
ofp_version(s4, ['OpenFlow13'])
ofp_version(s5, ['OpenFlow13'])
ofp_version(s6, ['OpenFlow13'])
CLI(net)
net.stop()
| #!/usr/bin/env python
from mininet.cli import CLI
from mininet.link import Link
from mininet.net import Mininet
from mininet.node import RemoteController
from mininet.term import makeTerm
def ofp_version(switch, protocols):
protocols_str = ','.join(protocols)
command = 'ovs-vsctl set Bridge %s protocols=%s' % (switch, protocols)
switch.cmd(command.split(' '))
if '__main__' == __name__:
net = Mininet(controller=RemoteController)
c0 = net.addController('c0')
s1 = net.addSwitch('s1')
s2 = net.addSwitch('s2')
s3 = net.addSwitch('s3')
s4 = net.addSwitch('s4')
s5 = net.addSwitch('s5')
s6 = net.addSwitch('s6')
h1 = net.addHost('h1')
h2 = net.addHost('h2')
h3 = net.addHost('h3')
h4 = net.addHost('h4')
Link(s1, h1)
Link(s2, h2)
Link(s5, h3)
Link(s6, h4)
Link(s1, s2)
Link(s2, s3)
Link(s2, s4)
Link(s4, s5)
Link(s4, s6)
net.build()
c0.start()
s1.start([c0])
s2.start([c0])
s3.start([c0])
s4.start([c0])
s5.start([c0])
s6.start([c0])
ofp_version(s1, ['OpenFlow13'])
ofp_version(s2, ['OpenFlow13'])
ofp_version(s3, ['OpenFlow13'])
ofp_version(s4, ['OpenFlow13'])
ofp_version(s5, ['OpenFlow13'])
ofp_version(s6, ['OpenFlow13'])
CLI(net)
net.stop()
| Python | 0.000001 |
e2e57a89b63943857eb2954d0c5bdcf8e2191ff4 | simplify logic for player count requirement | mk2/plugins/alert.py | mk2/plugins/alert.py | import os
import random
from mk2.plugins import Plugin
from mk2.events import Hook, StatPlayerCount
class Alert(Plugin):
interval = Plugin.Property(default=200)
command = Plugin.Property(default="say {message}")
path = Plugin.Property(default="alerts.txt")
min_pcount = Plugin.Property(default=0)
messages = []
def setup(self):
self.register(self.count_check, StatPlayerCount)
if self.path and os.path.exists(self.path):
f = open(self.path, 'r')
for l in f:
l = l.strip()
if l:
self.messages.append(l)
f.close()
def count_check(self, event):
self.requirements_met = event.players_current >= self.min_pcount
def server_started(self, event):
if self.messages:
self.repeating_task(self.repeater, self.interval)
def repeater(self, event):
if self.requirements_met:
self.send_format(self.command, message=random.choice(self.messages))
| import os
import random
from mk2.plugins import Plugin
from mk2.events import Hook, StatPlayerCount
class Alert(Plugin):
interval = Plugin.Property(default=200)
command = Plugin.Property(default="say {message}")
path = Plugin.Property(default="alerts.txt")
min_pcount = Plugin.Property(default=0)
messages = []
def setup(self):
self.register(self.count_check, StatPlayerCount)
if self.path and os.path.exists(self.path):
f = open(self.path, 'r')
for l in f:
l = l.strip()
if l:
self.messages.append(l)
f.close()
def count_check(self, event):
if event.players_current >= self.min_pcount:
self.requirements_met = True
else:
self.requirements_met = False
def server_started(self, event):
if self.messages:
self.repeating_task(self.repeater, self.interval)
def repeater(self, event):
if self.requirements_met:
self.send_format(self.command, message=random.choice(self.messages))
| Python | 0.000011 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.