commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
bf24abb4ffba4f63f641cc61e22357253cdca956 | Fix migration script | src/adhocracy/migration/versions/053_add_newsservice.py | src/adhocracy/migration/versions/053_add_newsservice.py | from datetime import datetime
from sqlalchemy import MetaData, Column, ForeignKey, Table
from sqlalchemy import Boolean, DateTime, Integer, Unicode, UnicodeText
metadata = MetaData()
message_table = Table(
'message', metadata,
Column('id', Integer, primary_key=True),
Column('subject', Unicode(140), nullable=False),
Column('body', UnicodeText(), nullable=True),
Column('create_time', DateTime, default=datetime.utcnow),
Column('access_time', DateTime, default=datetime.utcnow,
onupdate=datetime.utcnow),
Column('delete_time', DateTime, nullable=True),
Column('creator_id', Integer, ForeignKey('user.id'), nullable=False),
Column('sender_email', Unicode(255), nullable=False),
)
message_recipient_table = Table(
'message_recipient', metadata,
Column('id', Integer, primary_key=True),
Column('message_id', Integer, ForeignKey('message.id'), nullable=False),
Column('recipient_id', Integer, ForeignKey('user.id'), nullable=False),
Column('email_sent', Boolean, default=False),
)
user_table = Table(
'user', metadata,
Column('id', Integer, primary_key=True),
Column('user_name', Unicode(255), nullable=False, unique=True, index=True),
Column('display_name', Unicode(255), nullable=True, index=True),
Column('bio', UnicodeText(), nullable=True),
Column('email', Unicode(255), nullable=True, unique=True),
Column('email_priority', Integer, default=3),
Column('activation_code', Unicode(255), nullable=True, unique=False),
Column('reset_code', Unicode(255), nullable=True, unique=False),
Column('password', Unicode(80), nullable=False),
Column('locale', Unicode(7), nullable=True),
Column('create_time', DateTime, default=datetime.utcnow),
Column('access_time', DateTime, default=datetime.utcnow,
onupdate=datetime.utcnow),
Column('delete_time', DateTime),
Column('banned', Boolean, default=False),
Column('no_help', Boolean, default=False, nullable=True),
Column('page_size', Integer, default=10, nullable=True),
Column('proposal_sort_order', Unicode(50), default=None, nullable=True),
Column('gender', Unicode(1), default=None),
)
def upgrade(migrate_engine):
metadata.bind = migrate_engine
message_table.create()
message_recipient_table.create()
email_messages = Column('email_messages', Boolean, default=True)
email_messages.create(user_table)
def downgrade(migrate_engine):
raise NotImplementedError()
| from datetime import datetime
from sqlalchemy import MetaData, Column, ForeignKey, Table
from sqlalchemy import Boolean, DateTime, Integer, Unicode, UnicodeText
metadata = MetaData()
message_table = Table(
'message', metadata,
Column('id', Integer, primary_key=True),
Column('subject', Unicode(140), nullable=False),
Column('body', UnicodeText(), nullable=True),
Column('create_time', DateTime, default=datetime.utcnow),
Column('access_time', DateTime, default=datetime.utcnow,
onupdate=datetime.utcnow),
Column('delete_time', DateTime, nullable=True),
Column('creator_id', Integer, ForeignKey('user.id'), nullable=False),
Column('sender_email', Unicode(255), nullable=False),
)
message_recipient_table = Table(
'message_recipient', metadata,
Column('id', Integer, primary_key=True),
Column('message_id', Integer, ForeignKey('message.id'), nullable=False),
Column('recipient_id', Integer, ForeignKey('user.id'), nullable=False),
Column('email_sent', Boolean, default=False),
)
user_table = Table(
'user', metadata,
Column('id', Integer, primary_key=True),
Column('user_name', Unicode(255), nullable=False, unique=True, index=True),
Column('display_name', Unicode(255), nullable=True, index=True),
Column('bio', UnicodeText(), nullable=True),
Column('email', Unicode(255), nullable=True, unique=True),
Column('email_priority', Integer, default=3),
Column('activation_code', Unicode(255), nullable=True, unique=False),
Column('reset_code', Unicode(255), nullable=True, unique=False),
Column('password', Unicode(80), nullable=False),
Column('locale', Unicode(7), nullable=True),
Column('create_time', DateTime, default=datetime.utcnow),
Column('access_time', DateTime, default=datetime.utcnow,
onupdate=datetime.utcnow),
Column('delete_time', DateTime),
Column('banned', Boolean, default=False),
Column('no_help', Boolean, default=False, nullable=True),
Column('page_size', Integer, default=10, nullable=True),
Column('proposal_sort_order', Unicode(50), default=None, nullable=True),
Column('gender', Unicode(1), default=None),
)
def upgrade(migrate_engine):
meta.bind = migrate_engine
message_table.create()
message_recipient_table.create()
email_messages = Column('email_messages', Boolean, default=True)
email_messages.create(user_table)
def downgrade(migrate_engine):
raise NotImplementedError()
| Python | 0.000008 |
307d866bb6538a78effcc44e005a4dcb90a2a4b5 | Increment to 0.5.4 | sanic/__init__.py | sanic/__init__.py | from sanic.app import Sanic
from sanic.blueprints import Blueprint
__version__ = '0.5.4'
__all__ = ['Sanic', 'Blueprint']
| from sanic.app import Sanic
from sanic.blueprints import Blueprint
__version__ = '0.5.3'
__all__ = ['Sanic', 'Blueprint']
| Python | 0.999999 |
5fd62098bd2f2722876a0873d5856d70046d3889 | Increment to 0.5.2 | sanic/__init__.py | sanic/__init__.py | from sanic.app import Sanic
from sanic.blueprints import Blueprint
__version__ = '0.5.2'
__all__ = ['Sanic', 'Blueprint']
| from sanic.app import Sanic
from sanic.blueprints import Blueprint
__version__ = '0.5.1'
__all__ = ['Sanic', 'Blueprint']
| Python | 0.999999 |
035938d8c0f3cc2cda353286c0089ee02ffe3b87 | Use dj six | likert_field/models.py | likert_field/models.py | #-*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.six import string_types
from django.utils.translation import ugettext_lazy as _
import likert_field.forms as forms
@python_2_unicode_compatible
class LikertField(models.IntegerField):
"""A Likert field is simply stored as an IntegerField"""
description = _('Likert item field')
def __init__(self, *args, **kwargs):
"""LikertField stores items with no answer as NULL"""
if 'null' not in kwargs and not kwargs.get('null'):
kwargs['null'] = True
super(LikertField, self).__init__(*args, **kwargs)
def __str__(self):
return "%s" % force_text(self.description)
def get_prep_value(self, value):
"""
Perform preliminary non-db specific value checks and conversions.
The field expects a number as a string (ie. '2'). Unscored fields are
empty strings and are stored as NULL
"""
if value is None:
return None
if isinstance(value, string_types) and len(value) == 0:
return None
value = int(value)
if value < 0:
value = 0
return value
def formfield(self, **kwargs):
defaults = {
'min_value': 0,
'form_class': forms.LikertField
}
defaults.update(kwargs)
return super(LikertField, self).formfield(**defaults)
| #-*- coding: utf-8 -*-
from __future__ import unicode_literals
from six import string_types
from django.db import models
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
import likert_field.forms as forms
@python_2_unicode_compatible
class LikertField(models.IntegerField):
"""A Likert field is simply stored as an IntegerField"""
description = _('Likert item field')
def __init__(self, *args, **kwargs):
if 'null' not in kwargs and not kwargs.get('null'):
kwargs['null'] = True
super(LikertField, self).__init__(*args, **kwargs)
def __str__(self):
return "%s" % force_text(self.description)
def get_prep_value(self, value):
"""
Perform preliminary non-db specific value checks and conversions.
The field expects a number as a string (ie. '2'). Unscored fields are
empty strings and are stored as NULL
"""
if value is None:
return None
if isinstance(value, string_types) and len(value) == 0:
return None
value = int(value)
if value < 0:
value = 0
return value
def formfield(self, **kwargs):
defaults = {
'min_value': 0,
'form_class': forms.LikertField
}
defaults.update(kwargs)
return super(LikertField, self).formfield(**defaults)
| Python | 0.000001 |
2194cc4e96fb2168b55c23a1c7a71636074ae8bf | Fix a comment | scout/adapter/mongo/rank_model.py | scout/adapter/mongo/rank_model.py | # -*- coding: utf-8 -*-
import logging
from io import StringIO
import requests
from configobj import ConfigObj
LOG = logging.getLogger(__name__)
TIMEUT = 20
class RankModelHandler(object):
def fetch_rank_model(self, rank_model_url):
"""Send HTTP request to retrieve rank model config file
Args:
rank_model_url(str): URL to resource containing rank model configuration
Returns:
StringIO(response.text): A StringIO containing the content of the config file
"""
try:
response = requests.get(rank_model_url, timeout=TIMEUT)
return StringIO(response.text)
except Exception as ex:
LOG.warning(ex)
def parse_rank_model(self, stringio):
"""Use configobj lib to extract RankModel key/values and return them in a dictionary
Args:
stringio(StringIO): Content of model from a file as a StringIO
Returns:
ConfigObj.dict(dictionary): dictionary with variant rank model key/values
"""
try:
return ConfigObj(stringio).dict()
except Exception as ex:
LOG.error(ex)
def add_rank_model(self, rank_model_url):
"""Fetch a rank model from remote.
Args:
rank_model_url(string): A string with the url to the rank model ini file to fetch.
Returns:
rank_model(dict): a copy of what was inserted, or None if failed
"""
response = self.fetch_rank_model(rank_model_url)
config = self.parse_rank_model(response)
if config:
config.update({"_id": rank_model_url})
config_id = self.rank_model_collection.insert_one(config).inserted_id
return self.rank_model_collection.find_one(config_id)
return {}
def rank_model_from_url(
self, rank_model_link_prefix, rank_model_version, rank_model_file_extension
):
"""Fetch a rank model configuration for A SNV or SV variant of a case
Args:
rank_model_link_prefix(str): specified in app config file
rank_model_version(string)
rank_model_file_extension(str): specified in app config file
Returns:
rank_model(dict)
"""
rank_model_url = "".join(
[rank_model_link_prefix, str(rank_model_version), rank_model_file_extension]
)
# Check if rank model document is already present in scout database
rank_model = self.rank_model_collection.find_one(rank_model_url)
if not rank_model: # Otherwise fetch it with HTTP request and save it to database
rank_model = self.add_rank_model(rank_model_url)
return rank_model
def get_ranges_info(self, rank_model, category):
"""Extract Rank model params value ranges from a database model.
These numbers will be used to describe model scores on variant page.
Args:
rank_model(dict)
category(string) examples: "Variant_call_quality_filter", "Deleteriousness" ..
Returns:
info(list): list of dictionaries containing "key", "description" and "score_ranges" key/values
"""
info = []
for _, item in rank_model.items():
if (
isinstance(item, dict) is False
or not item.get("category")
or item.get("category").casefold() != category.casefold()
):
continue
rank_info = {
"key": item.get("info_key"),
"description": item.get("description"),
"score_ranges": {},
}
for key, value in item.items():
if isinstance(value, dict) and "score" in value:
rank_info["score_ranges"][key] = value
info.append(rank_info)
return info
| # -*- coding: utf-8 -*-
import logging
from io import StringIO
import requests
from configobj import ConfigObj
LOG = logging.getLogger(__name__)
TIMEUT = 20
class RankModelHandler(object):
def fetch_rank_model(self, rank_model_url):
"""Send HTTP request to retrieve rank model config file
Args:
rank_model_url(str): URL to resource containing rank model configuration
Returns:
StringIO(response.text): A StringIO containing the content of the config file
"""
try:
response = requests.get(rank_model_url, timeout=TIMEUT)
return StringIO(response.text)
except Exception as ex:
LOG.warning(ex)
def parse_rank_model(self, stringio):
"""Use configobj lib to extract RankModel key/values and return them in a dictionary
Args:
stringio(StringIO): Content of model from a file as a StringIO
Returns:
ConfigObj.dict(dictionary): dictionary with variant rank model key/values
"""
try:
return ConfigObj(stringio).dict()
except Exception as ex:
LOG.error(ex)
def add_rank_model(self, rank_model_url):
"""Fetch a rank model from remote.
Args:
rank_model_url(string): A string with the url to the rank model ini file to fetch.
Returns:
rank_model(dict): a copy of what was inserted, or None if failed
"""
response = self.fetch_rank_model(rank_model_url)
config = self.parse_rank_model(response)
if config:
config.update({"_id": rank_model_url})
config_id = self.rank_model_collection.insert_one(config).inserted_id
return self.rank_model_collection.find_one(config_id)
return {}
def rank_model_from_url(
self, rank_model_link_prefix, rank_model_version, rank_model_file_extension
):
"""Fetch a rank model configuration for A SNV or SV variant of a case
Args:
rank_model_link_prefix(str): specified in app config file
rank_model_version(string)
rank_model_file_extension(str): specified in app config file
Returns:
rank_model(dict)
"""
rank_model_url = "".join(
[rank_model_link_prefix, str(rank_model_version), rank_model_file_extension]
)
# Check if rank model document is already present in scout database
rank_model = self.rank_model_collection.find_one(rank_model_url)
if not rank_model: # Otherwise fetch it with HTTP request and save it to database
rank_model = self.add_rank_model(rank_model_url)
return rank_model
def get_ranges_info(self, rank_model, category):
"""Extract Rank model params value ranges from a database model.
These numbers will be used to describe model scores on variant page.
Args:
rank_model(dict)
category(string) examples: "Variant_call_quality_filter", "Deleteriousness" ..
Returns:
info(list) example:
"""
info = []
for _, item in rank_model.items():
if (
isinstance(item, dict) is False
or not item.get("category")
or item.get("category").casefold() != category.casefold()
):
continue
rank_info = {
"key": item.get("info_key"),
"description": item.get("description"),
"score_ranges": {},
}
for key, value in item.items():
if isinstance(value, dict) and "score" in value:
rank_info["score_ranges"][key] = value
info.append(rank_info)
return info
| Python | 0.999759 |
e8254ced75ce9d0df1033b6e4acb8e33f9b00e93 | ''.join want strings | scheduler/send.py | scheduler/send.py | #!/usr/bin/env python
import logging
logger = logging.getLogger('')
from models import Task
def send(function, args=None):
if args is None:
args = []
Task.objects.create(function=function, args=args)
logging.info("[x] Sent %s(%s)" % (function, ", ".join(map(lambda x: "%s" % x, args))))
| #!/usr/bin/env python
import logging
logger = logging.getLogger('')
from models import Task
def send(function, args=None):
if args is None:
args = []
Task.objects.create(function=function, args=args)
logging.info("[x] Sent %s(%s)" % (function, ", ".join(args)))
| Python | 0.999958 |
60156236836944205f3993badcf179aaa6e7ae54 | Add an (unexposed) ResourceHandler so inheriting objects serialise better | ehriportal/portal/api/handlers.py | ehriportal/portal/api/handlers.py | """
Piston handlers for notable resources.
"""
from piston.handler import BaseHandler
from portal import models
class ResourceHandler(BaseHandler):
model = models.Resource
class RepositoryHandler(BaseHandler):
model = models.Repository
class CollectionHandler(BaseHandler):
model = models.Collection
class PlaceHandler(BaseHandler):
model = models.Place
class ContactHandler(BaseHandler):
model = models.Contact
class AuthorityHandler(BaseHandler):
model = models.Authority
| """
Piston handlers for notable resources.
"""
from piston.handler import BaseHandler
from portal import models
class RepositoryHandler(BaseHandler):
model = models.Repository
class CollectionHandler(BaseHandler):
model = models.Collection
class PlaceHandler(BaseHandler):
model = models.Place
class ContactHandler(BaseHandler):
model = models.Contact
class AuthorityHandler(BaseHandler):
model = models.Authority
| Python | 0 |
488e5dd9bcdcba26de98fdbcaba1e23e8b4a8188 | use csv writer for listing scraper | scrape_listing.py | scrape_listing.py | #!/usr/bin/env python
import csv
import sys
import requests
from models.listing import Listing
def scrape_listing(url):
writer = csv.writer(sys.stdout)
response = requests.get(url)
listing = Listing(response.content)
# print('Title: ' + listing.title)
# print('Price: ' + listing.price)
# print('Image URLs: ' + listing.imgs)
# print('Location: ' + listing.location)
# print('Description: ' + listing.description)
# print('Category: ' + listing.category)
# print('Manufacturer: ' + listing.manufacturer)
# print('Caliber: ' + listing.caliber)
# print('Action: ' + listing.action)
# print('Firearm Type: ' + listing.firearm_type)
# print('Listing Date: ' + listing.listed_date)
# print('Post ID: ' + listing.post_id)
# print('Registration: ' + str(listing.registered))
# print('Party Type: ' + listing.party)
writer.writerow([
listing.post_id,
listing.title,
listing.listed_date,
listing.price,
listing.location,
listing.description,
listing.registered,
listing.category,
listing.manufacturer,
listing.caliber,
listing.action,
listing.firearm_type,
listing.party,
listing.imgs
])
print('{0},{1},{2},{3},{4},{5},{6},{7},{8},{9},{10},{11},{12},{13}'.format(listing.title, listing.listed_date, listing.post_id, listing.price, listing.location, listing.description, listing.registered, listing.category, listing.manufacturer, listing.caliber, listing.action, listing.firearm_type, listing.party, listing.imgs))
if __name__ == '__main__':
if len(sys.argv) == 1:
print('url required')
sys.exit()
url = str(sys.argv[1])
scrape_listing(url=url)
| #!/usr/bin/env python
import sys
import requests
from models.listing import Listing
def scrape_listing(url):
response = requests.get(url)
listing = Listing(response.content)
# print('Title: ' + listing.title)
# print('Price: ' + listing.price)
# print('Image URLs: ' + listing.imgs)
# print('Location: ' + listing.location)
# print('Description: ' + listing.description)
# print('Category: ' + listing.category)
# print('Manufacturer: ' + listing.manufacturer)
# print('Caliber: ' + listing.caliber)
# print('Action: ' + listing.action)
# print('Firearm Type: ' + listing.firearm_type)
# print('Listing Date: ' + listing.listed_date)
# print('Post ID: ' + listing.post_id)
# print('Registration: ' + str(listing.registered))
# print('Party Type: ' + listing.party)
print('{0},{1},{2},{3},{4},{5},{6},{7},{8},{9},{10},{11},{12},{13}'.format(listing.title, listing.listed_date, listing.post_id, listing.price, listing.location, listing.description, listing.registered, listing.category, listing.manufacturer, listing.caliber, listing.action, listing.firearm_type, listing.party, listing.imgs))
if __name__ == '__main__':
if len(sys.argv) == 1:
print('url required')
sys.exit()
url = str(sys.argv[1])
scrape_listing(url=url)
| Python | 0 |
ca356ae7b85c9d88f42c5adc6227d0125ff49399 | Update settings.py | udbproject/settings.py | udbproject/settings.py | """
Django settings for udbproject project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'c5d$g#)x!2s91v2nr@h9d21opa*p1&65z)i(#4%@62fm#f!!l-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'udb',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'udbproject.urls'
WSGI_APPLICATION = 'udbproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Seattle'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| """
Django settings for udbproject project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'c5d$g#)x!2s91v2nr@h9d21opa*p1&65z)i(#4%@62fm#f!!l-'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'udb',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'udbproject.urls'
WSGI_APPLICATION = 'udbproject.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'PST'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
| Python | 0 |
844e1917e971e834f7c95064dc7ea31fc7cc0947 | Make build_plugins.py bail on error | build/build_plugins.py | build/build_plugins.py | from __future__ import print_function
import glob, os.path, sys
from mergeex import mergeex
try:
import simplejson as json
except ImportError:
import json
plugins = []
filters = []
for fileName in sorted(glob.glob('../plugins/*.json')):
try:
with open(fileName, 'rb') as f:
content = f.read().decode('utf-8')
plugin = json.loads(content)
plugin['date'] = int(os.path.getmtime(fileName) * 1000)
plugins.append(plugin)
filters.append(plugin['match'])
except IOError as e:
print('Could not open file {0}: {1}'.format(fileName, e), file=sys.stderr)
sys.exit(1)
except ValueError as e:
print('Could not load JSON from file {0}: {1}'.format(fileName, *e.args), file=sys.stderr)
sys.exit(1)
print('Writing combined plugins.')
with open('../modules/plugins.json', 'w') as f:
json.dump(plugins, f)
| from __future__ import print_function
import glob, os.path, sys
from mergeex import mergeex
try:
import simplejson as json
except ImportError:
import json
plugins = []
filters = []
for fileName in sorted(glob.glob('../plugins/*.json')):
try:
with open(fileName, 'rb') as f:
content = f.read().decode('utf-8')
plugin = json.loads(content)
plugin['date'] = int(os.path.getmtime(fileName) * 1000)
plugins.append(plugin)
filters.append(plugin['match'])
except IOError as e:
print('Could not open file {0}: {1}'.format(fileName, e), file=sys.stderr)
except ValueError as e:
print('Could not load JSON from file {0}: {1}'.format(fileName, *e.args), file=sys.stderr)
print('Writing combined plugins.')
with open('../modules/plugins.json', 'w') as f:
json.dump(plugins, f) | Python | 0.000001 |
3b3a7d482b3091959533c6de3138af349a8af558 | Tidy and comment spreadsheet reader module | autumn_model/spreadsheet.py | autumn_model/spreadsheet.py |
from __future__ import print_function
from xlrd import open_workbook
from numpy import nan
import numpy
import os
import tool_kit
#######################################
### Individual spreadsheet readers ###
#######################################
class GlobalTbReportReader:
"""
Reader object for the WHO's Global TB Report 2016. Illustrates general structure for spreadsheet readers.
"""
def __init__(self, country_to_read):
self.data = {}
self.tab_name = 'TB_burden_countries_2016-04-19'
self.key = 'tb'
self.parlist = []
self.filename = 'xls/gtb_data.xlsx'
self.start_row = 1
self.horizontal = False
self.start_column = 0
self.indices = []
self.year_indices = {}
self.country_to_read = tool_kit.adjust_country_name(country_to_read)
def parse_col(self, col):
"""
Read and interpret a column of the spreadsheet
Args:
col: The column to be read
"""
col = tool_kit.replace_specified_value(col, nan, '')
# if it's the country column (the first one), find the indices for the country being simulated
if col[0] == 'country':
for i in range(len(col)):
if col[i] == self.country_to_read: self.indices += [i]
# ignore irrelevant columns
elif 'iso' in col[0] or 'g_who' in col[0] or 'source' in col[0]:
pass
# find years to read from year column
elif col[0] == 'year':
for i in self.indices:
self.year_indices[int(col[i])] = i
# get data from the remaining (data) columns
else:
self.data[str(col[0])] = {}
for year in self.year_indices:
if not numpy.isnan(col[self.year_indices[year]]):
self.data[col[0]][year] = col[self.year_indices[year]]
def get_data(self):
"""
Return the read data.
"""
return self.data
#########################
### Master functions ###
#########################
def read_xls_with_sheet_readers(sheet_readers):
"""
Runs each of the individual readers (currently only one) to gather all the data from the input spreadsheets.
Args:
sheet_readers: The sheet readers that have been collated into a list
Returns:
All the data from the reading process as a single object
"""
result = {}
for reader in sheet_readers:
# check that the spreadsheet to be read exists
try:
print('Reading file', os.getcwd(), reader.filename)
workbook = open_workbook(reader.filename)
# if sheet unavailable, print error message but continue
except:
print('Unable to open spreadsheet')
# if the workbook was found to be available available, read the sheet in question
else:
sheet = workbook.sheet_by_name(reader.tab_name)
# read in the direction that the reader expects (either horizontal or vertical)
if reader.horizontal:
for i_row in range(reader.start_row, sheet.nrows):
reader.parse_row(sheet.row_values(i_row))
else:
for i_col in range(reader.start_column, sheet.ncols):
reader.parse_col(sheet.col_values(i_col))
result[reader.key] = reader.get_data()
return result
def read_input_data_xls(sheets_to_read, country=None):
"""
Compile sheet readers into a list according to which ones have been selected.
Note that most readers now take the country in question as an input,
while only the fixed parameters sheet reader does not.
Args:
sheets_to_read: A list containing the strings that are also the 'keys' attribute of each reader
country: Country being read
Returns:
A single data structure containing all the data to be read
"""
sheet_readers = []
if 'tb' in sheets_to_read: sheet_readers.append(GlobalTbReportReader(country))
for reader in sheet_readers: reader.filename = os.path.join(reader.filename)
return read_xls_with_sheet_readers(sheet_readers)
|
from __future__ import print_function
from xlrd import open_workbook
from numpy import nan
import numpy
import os
import tool_kit
#######################################
### Individual spreadsheet readers ###
#######################################
class GlobalTbReportReader:
def __init__(self, country_to_read):
self.data = {}
self.tab_name = 'TB_burden_countries_2016-04-19'
self.key = 'tb'
self.parlist = []
self.filename = 'xls/gtb_data.xlsx'
self.start_row = 1
self.horizontal = False
self.start_column = 0
self.indices = []
self.year_indices = {}
self.country_to_read = tool_kit.adjust_country_name(country_to_read)
def parse_col(self, col):
col = tool_kit.replace_specified_value(col, nan, '')
# if it's the country column (the first one), find the indices for the country being simulated
if col[0] == 'country':
for i in range(len(col)):
if col[i] == self.country_to_read: self.indices += [i]
# ignore irrelevant columns
elif 'iso' in col[0] or 'g_who' in col[0] or 'source' in col[0]:
pass
# find years to read from year column
elif col[0] == 'year':
for i in self.indices:
self.year_indices[int(col[i])] = i
# get data from remaining columns
else:
self.data[str(col[0])] = {}
for year in self.year_indices:
if not numpy.isnan(col[self.year_indices[year]]):
self.data[col[0]][year] = col[self.year_indices[year]]
def get_data(self):
return self.data
#########################
### Master functions ###
#########################
def read_xls_with_sheet_readers(sheet_readers):
"""
Runs the individual readers to gather all the data from the sheets
Args:
sheet_readers: The sheet readers that were previously collated into a list
Returns:
All the data for reading as a single object
"""
result = {}
for reader in sheet_readers:
# check that the spreadsheet to be read exists
try:
print('Reading file', os.getcwd(), reader.filename)
workbook = open_workbook(reader.filename)
# if sheet unavailable, print error message but continue
except:
print('Unable to open spreadsheet')
# if the workbook was found to be available available, read the sheet in question
else:
sheet = workbook.sheet_by_name(reader.tab_name)
# read in the direction that the reader expects (either horizontal or vertical)
if reader.horizontal:
for i_row in range(reader.start_row, sheet.nrows):
reader.parse_row(sheet.row_values(i_row))
else:
for i_col in range(reader.start_column, sheet.ncols):
reader.parse_col(sheet.col_values(i_col))
result[reader.key] = reader.get_data()
return result
def read_input_data_xls(sheets_to_read, country=None):
"""
Compile sheet readers into a list according to which ones have been selected.
Note that most readers now take the country in question as an input,
while only the fixed parameters sheet reader does not.
Args:
from_test: Whether being called from the directory above
sheets_to_read: A list containing the strings that are also the
'keys' attribute of the reader
country: Country being read for
Returns:
A single data structure containing all the data to be read
(by calling the read_xls_with_sheet_readers method)
"""
sheet_readers = []
if 'tb' in sheets_to_read:
sheet_readers.append(GlobalTbReportReader(country))
for reader in sheet_readers:
reader.filename = os.path.join(reader.filename)
return read_xls_with_sheet_readers(sheet_readers)
| Python | 0 |
6aa7acba495648b710635b465d5b7cd955d9f476 | remove tmp line | api/__database.py | api/__database.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sqlite3
import os
from core.config import _core_config
from core.config_builder import _core_default_config
from core.config_builder import _builder
from core.alert import warn
from core.alert import messages
def create_connection(language):
try:
return sqlite3.connect(os.path.join(os.path.dirname(os.path.dirname(__file__)),
_builder(_core_config(), _core_default_config())["api_db_name"]))
except:
warn(messages(language, 168))
return False
def submit_report_to_db(date, scan_id, report_filename, events_num, verbose, api_flag, report_type, graph_flag,
category, profile, scan_method, language, scan_cmd):
conn = create_connection(language)
if not conn:
return False
try:
c = conn.cursor()
c.execute("""
INSERT INTO reports (
date, scan_id, report_filename, events_num, verbose,
api_flag, report_type, graph_flag, category, profile,
scan_method, language, scan_cmd
)
VALUES (
'{0}', '{1}', '{2}', '{3}', '{4}',
'{5}', '{6}', '{7}', '{8}', '{9}',
'{10}', '{11}', '{12}'
);
""".format(date, scan_id, report_filename, events_num, verbose,
api_flag, report_type, graph_flag, category, profile,
scan_method, language, scan_cmd))
conn.commit()
conn.close()
except:
warn(messages(language, 168))
return False
return True
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sqlite3
import os
from core.config import _core_config
from core.config_builder import _core_default_config
from core.config_builder import _builder
from core.alert import warn
from core.alert import messages
def create_connection(language):
try:
return sqlite3.connect(os.path.join(os.path.dirname(os.path.dirname(__file__)),
_builder(_core_config(), _core_default_config())["api_db_name"]))
except:
warn(messages(language, 168))
return False
def submit_report_to_db(date, scan_id, report_filename, events_num, verbose, api_flag, report_type, graph_flag,
category, profile, scan_method, language, scan_cmd):
conn = create_connection(language)
if not conn:
return False
try:
c = conn.cursor()
c.execute("""
INSERT INTO reports (
date, scan_id, report_filename, events_num, verbose,
api_flag, report_type, graph_flag, category, profile,
scan_method, language, scan_cmd
)
VALUES (
'{0}', '{1}', '{2}', '{3}', '{4}',
'{5}', '{6}', '{7}', '{8}', '{9}',
'{10}', '{11}', '{12}'
);
""".format(date, scan_id, report_filename, events_num, verbose,
api_flag, report_type, graph_flag, category, profile,
scan_method, language, scan_cmd))
conn.commit()
conn.close()
except:
warn(messages(language, 168))
print 2
return False
return True
| Python | 0.000008 |
4b75e23687c3629d197cbdf0edac23d90e9c52b7 | Add Sample and Observation models | varda/models.py | varda/models.py | """
Models backed by SQL using SQLAlchemy.
"""
from datetime import date
from sqlalchemy import Index
from varda import db
class Variant(db.Model):
"""
Genomic variant.
"""
id = db.Column(db.Integer, primary_key=True)
chromosome = db.Column(db.String(2))
begin = db.Column(db.Integer)
end = db.Column(db.Integer)
reference = db.Column(db.String(200))
variant = db.Column(db.String(200))
def __init__(self, chromosome, begin, end, reference, variant):
self.chromosome = chromosome
self.begin = begin
self.end = end
self.reference = reference
self.variant = variant
def __repr__(self):
return '<Variant chr%s:%i %s>' % (
self.chromosome, self.begin, self.variant)
def to_dict(self):
return {'id': self.id,
'chromosome': self.chromosome,
'begin': self.begin,
'end': self.end,
'reference': self.reference,
'variant': self.variant}
Index('index_variant_position',
Variant.chromosome, Variant.begin, Variant.end)
Index('index_variant_unique',
Variant.chromosome, Variant.begin, Variant.end,
Variant.reference, Variant.variant, unique=True)
class Population(db.Model):
"""
Population study.
"""
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100))
added = db.Column(db.Date)
size = db.Column(db.Integer)
def __init__(self, name, size=0):
self.name = name
self.size = size
self.added = date.today()
def __repr__(self):
return '<Population %r>' % self.name
def to_dict(self):
return {'id': self.id,
'name': self.name,
'added': str(self.added),
'size': self.size}
class MergedObservation(db.Model):
"""
Observation in a population.
Todo: Add genotype.
"""
population_id = db.Column(db.Integer, db.ForeignKey('population.id'), primary_key=True)
variant_id = db.Column(db.Integer, db.ForeignKey('variant.id'), primary_key=True)
support = db.Column(db.Integer)
population = db.relationship(Population, backref=db.backref('merged_observations', lazy='dynamic'))
variant = db.relationship(Variant, backref=db.backref('merged_observations', lazy='dynamic'))
def __init__(self, population, variant, support=0):
self.population = population
self.variant = variant
self.support = support
def __repr__(self):
return '<MergedObservation %s %r %i>' % (self.population.name, self.variant, self.support)
def to_dict(self):
return {'population': self.population.id,
'variant': self.variant.id,
'support': self.support}
class Sample(db.Model):
"""
Sample.
Todo: do we still need a poolSize in Sample now that we split population
studies to a separate model?
"""
id = db.Column(db.Integer, primary_key=True)
threshold = db.Column(db.Integer)
added = db.Column(db.Date)
comment = db.Column(db.String(200))
def __init__(self, threshold=None, comment=None):
self.threshold = threshold
self.comment = comment
self.added = date.today()
def __repr__(self):
return '<Sample %r>' % self.id
def to_dict(self):
return {'id': self.id,
'threshold': self.threshold,
'added': str(self.added),
'comment': self.comment}
class Observation(db.Model):
"""
Observation in a sample
"""
sample_id = db.Column(db.Integer, db.ForeignKey('sample.id'), primary_key=True)
variant_id = db.Column(db.Integer, db.ForeignKey('variant.id'), primary_key=True)
coverage = db.Column(db.Integer)
support = db.Column(db.Integer)
sample = db.relationship(Sample, backref=db.backref('observations', lazy='dynamic'))
variant = db.relationship(Variant, backref=db.backref('observations', lazy='dynamic'))
def __init__(self, sample, variant, coverage=None, support=None):
self.sample = sample
self.variant = variant
self.coverage = coverage
self.support = support
def __repr__(self):
return '<Observation %i %r %i %i>' % (self.sample.id, self.variant, self.coverage, self.support)
def to_dict(self):
return {'sample': self.sample.id,
'variant': self.variant.id,
'coverage': self.coverage,
'support': self.support}
| """
Models backed by SQL using SQLAlchemy.
"""
from datetime import date
from sqlalchemy import Index
from varda import db
class Variant(db.Model):
"""
Genomic variant.
"""
id = db.Column(db.Integer, primary_key=True)
chromosome = db.Column(db.String(2))
begin = db.Column(db.Integer)
end = db.Column(db.Integer)
reference = db.Column(db.String(200))
variant = db.Column(db.String(200))
def __init__(self, chromosome, begin, end, reference, variant):
self.chromosome = chromosome
self.begin = begin
self.end = end
self.reference = reference
self.variant = variant
def __repr__(self):
return '<Variant chr%s:%i %s>' % (
self.chromosome, self.begin, self.variant)
def to_dict(self):
return {'id': self.id,
'chromosome': self.chromosome,
'begin': self.begin,
'end': self.end,
'reference': self.reference,
'variant': self.variant}
Index('index_variant_position',
Variant.chromosome, Variant.begin, Variant.end)
Index('index_variant_unique',
Variant.chromosome, Variant.begin, Variant.end,
Variant.reference, Variant.variant, unique=True)
class Population(db.Model):
"""
Population study.
"""
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100))
added = db.Column(db.Date)
size = db.Column(db.Integer)
def __init__(self, name, size=0):
self.name = name
self.size = size
self.added = date.today()
def __repr__(self):
return '<Population %r>' % self.name
def to_dict(self):
return {'id': self.id,
'name': self.name,
'added': str(self.added),
'size': self.size}
class MergedObservation(db.Model):
"""
Observation in a population.
Todo: Add genotype.
"""
population_id = db.Column(db.Integer, db.ForeignKey('population.id'), primary_key=True)
variant_id = db.Column(db.Integer, db.ForeignKey('variant.id'), primary_key=True)
support = db.Column(db.Integer)
population = db.relationship(Population, backref=db.backref('merged_observations', lazy='dynamic'))
variant = db.relationship(Variant, backref=db.backref('merged_observations', lazy='dynamic'))
def __init__(self, population, variant, support=0):
self.population = population
self.variant = variant
self.support = support
def __repr__(self):
return '<MergedObservation %s %r %i>' % (self.population.name, self.variant, self.support)
def to_dict(self):
return {'population': self.population.id,
'variant': self.variant.id,
'support': self.support}
| Python | 0 |
591b0550e0724f3e515974fee02d8d40e070e52a | Bump version | lintreview/__init__.py | lintreview/__init__.py | __version__ = '2.25.1'
| __version__ = '2.25.0'
| Python | 0 |
c0b3a1b40149e939e91c5483383f1a1c715a9b9c | Update ipc_lista1.7.py | lista1/ipc_lista1.7.py | lista1/ipc_lista1.7.py | #ipc_lista1.7
#Professor: Jucimar Junior
#Any Mendes Carvalho
#
#
#
#
#Faça um programa que calcule a área de um quadrado, em seguida mostre o dobro desta área para o #usuário.
altura = input("Digite a altura do quadrado em metros: ")
largura = input("Digite a largura em
| #ipc_lista1.7
#Professor: Jucimar Junior
#Any Mendes Carvalho
#
#
#
#
#Faça um programa que calcule a área de um quadrado, em seguida mostre o dobro desta área para o #usuário.
altura = input("Digite a altura do quadrado em metros: ")
largura = input("Digite a largura
| Python | 0 |
4fb6112552ab7969bddca7193dd51910be51d8b2 | Update ipc_lista1.7.py | lista1/ipc_lista1.7.py | lista1/ipc_lista1.7.py | #ipc_lista1.7
#Professor: Jucimar Junior
#Any Mendes Carvalho
#
#
#
#
#Faça um programa que calcule a área de um quadrado, em seguida mostre o dobro desta área para o #usuário.
altura = input("Digite a altura do quadrado em metros: ")
largura = input("Digite a largura do quadrado em
| #ipc_lista1.7
#Professor: Jucimar Junior
#Any Mendes Carvalho
#
#
#
#
#Faça um programa que calcule a área de um quadrado, em seguida mostre o dobro desta área para o #usuário.
altura = input("Digite a altura do quadrado em metros: ")
largura = input("Digite a largura do em
| Python | 0 |
f7d8d58393cf2e9fa69dfde58e5da18758408105 | move order_with_respect_to to correct location (Meta class of models) | api/api/models.py | api/api/models.py | # REST API Backend for the Radiocontrol Project
#
# Copyright (C) 2017 Stefan Derkits <stefan@derkits.at>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from typing import Dict
from django.db import models
from ordered_model.models import OrderedModel
class Song(models.Model):
artist = models.CharField(max_length=128)
title = models.CharField(max_length=128)
filename = models.CharField(max_length=256, unique=True)
length = models.IntegerField()
class Playlist(models.Model):
name = models.CharField(max_length=128, unique=True)
songs = models.ManyToManyField(Song, through="PlaylistOrder")
@property
def length(self) -> float:
return sum([song.length for song in self.songs.all()])
def __unicode__(self):
return f"{self.name}"
class PlaylistOrder(OrderedModel):
playlist = models.ForeignKey(Playlist, on_delete=models.CASCADE)
song = models.ForeignKey(Song, on_delete=models.CASCADE)
class Meta:
order_with_respect_to = 'playlist'
# distinct entry in the schedule (from pause to pause)
class ScheduleEntry(models.Model):
begin_datetime = models.DateTimeField(max_length=128, unique=True)
playlists = models.ManyToManyField(Playlist, through='ScheduleEntryOrder')
task_id = models.CharField(max_length=256)
# this method is on the model's manager
@staticmethod
def get_closest_to(target_datetime) -> Dict[str, 'ScheduleEntry']:
closest_after = ScheduleEntry.objects.filter(begin_datetime__gt=target_datetime).order_by('begin_datetime')
closest_before = ScheduleEntry.objects.filter(begin_datetime__lt=target_datetime).order_by('-begin_datetime')
closest_entries = {
'before': closest_before.first(),
'after': closest_after.first()
}
return closest_entries
@property
def length(self):
return sum([playlist.length for playlist in self.playlists.all()])
class ScheduleEntryOrder(OrderedModel):
schedule_entry = models.ForeignKey(ScheduleEntry, on_delete=models.CASCADE)
playlist = models.ForeignKey(Playlist, on_delete=models.CASCADE)
class Meta:
order_with_respect_to = 'schedule_entry'
# songs that may not yet be available
class DraftSong(models.Model):
artist = models.CharField(max_length=128)
title = models.CharField(max_length=128)
filename = models.CharField(max_length=256, unique=True)
length = models.FloatField(blank=True)
# generated from uploaded playlists and not all songs may yet be available
# when all songs are available, it can be stored as a Playlist
class DraftPlaylist(models.Model):
name = models.CharField(max_length=128, unique=True)
songs = models.ManyToManyField(DraftSong, through='DraftPlaylistOrder')
class DraftPlaylistOrder(OrderedModel):
playlist = models.ForeignKey(DraftPlaylist, on_delete=models.CASCADE)
song = models.ForeignKey(DraftSong, on_delete=models.CASCADE)
class Meta:
order_with_respect_to = 'playlist'
| # REST API Backend for the Radiocontrol Project
#
# Copyright (C) 2017 Stefan Derkits <stefan@derkits.at>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from typing import Dict
from django.db import models
from ordered_model.models import OrderedModel
class Song(models.Model):
artist = models.CharField(max_length=128)
title = models.CharField(max_length=128)
filename = models.CharField(max_length=256, unique=True)
length = models.IntegerField()
class Playlist(models.Model):
name = models.CharField(max_length=128, unique=True)
songs = models.ManyToManyField(Song, through="PlaylistOrder")
@property
def length(self) -> float:
return sum([song.length for song in self.songs.all()])
def __unicode__(self):
return f"{self.name}"
class PlaylistOrder(OrderedModel):
playlist = models.ForeignKey(Playlist, on_delete=models.CASCADE)
song = models.ForeignKey(Song, on_delete=models.CASCADE)
order_with_respect_to = 'playlist'
class Meta:
ordering = ('playlist', 'order')
# distinct entry in the schedule (from pause to pause)
class ScheduleEntry(models.Model):
begin_datetime = models.DateTimeField(max_length=128, unique=True)
playlists = models.ManyToManyField(Playlist, through='ScheduleEntryOrder')
task_id = models.CharField(max_length=256)
# this method is on the model's manager
@staticmethod
def get_closest_to(target_datetime) -> Dict[str, 'ScheduleEntry']:
closest_after = ScheduleEntry.objects.filter(begin_datetime__gt=target_datetime).order_by('begin_datetime')
closest_before = ScheduleEntry.objects.filter(begin_datetime__lt=target_datetime).order_by('-begin_datetime')
closest_entries = {
'before': closest_before.first(),
'after': closest_after.first()
}
return closest_entries
@property
def length(self):
return sum([playlist.length for playlist in self.playlists.all()])
class ScheduleEntryOrder(OrderedModel):
schedule_entry = models.ForeignKey(ScheduleEntry, on_delete=models.CASCADE)
playlist = models.ForeignKey(Playlist, on_delete=models.CASCADE)
order_with_respect_to = 'schedule_entry'
class Meta:
ordering = ('schedule_entry', 'order')
# songs that may not yet be available
class DraftSong(models.Model):
artist = models.CharField(max_length=128)
title = models.CharField(max_length=128)
filename = models.CharField(max_length=256, unique=True)
length = models.FloatField(blank=True)
# generated from uploaded playlists and not all songs may yet be available
# when all songs are available, it can be stored as a Playlist
class DraftPlaylist(models.Model):
name = models.CharField(max_length=128, unique=True)
songs = models.ManyToManyField(DraftSong, through='DraftPlaylistOrder')
class DraftPlaylistOrder(OrderedModel):
playlist = models.ForeignKey(DraftPlaylist, on_delete=models.CASCADE)
song = models.ForeignKey(DraftSong, on_delete=models.CASCADE)
order_with_respect_to = 'playlist'
class Meta:
ordering = ('playlist', 'order')
| Python | 0 |
360ef0dec991d4486ec51f23ffb065d0225347fa | Update ipc_lista1.8.py | lista1/ipc_lista1.8.py | lista1/ipc_lista1.8.py | #ipc_lista1.8
#Professor: Jucimar
| #ipc_lista1.8
#Professor:
| Python | 0 |
93a91ac118ab4e7280562bd0cfac0ea964ae0a7e | remove auth_check import | plstackapi/core/api/sites.py | plstackapi/core/api/sites.py | from types import StringTypes
from django.contrib.auth import authenticate
from plstackapi.openstack.manager import OpenStackManager
from plstackapi.core.models import Site
def _get_sites(filter):
if isinstance(filter, StringTypes) and filter.isdigit():
filter = int(filter)
if isinstance(filter, int):
sites = Site.objects.filter(id=filter)
elif isinstance(filter, StringTypes):
sites = Site.objects.filter(login_base=filter)
elif isinstance(filter, dict):
sites = Site.objects.filter(**filter)
else:
sites = []
return sites
def add_site(auth, fields):
user = authenticate(username=auth.get('username'),
password=auth.get('password'))
auth['tenant'] = user.site.login_base
site = Site(**fields)
site.os_manager = OpenStackManager(auth=auth, caller = user)
site.save()
return site
def update_site(auth, id, **fields):
user = authenticate(username=auth.get('username'),
password=auth.get('password'))
auth['tenant'] = user.site.login_base
sites = _get_sites(id)
if not sites:
return
site = Site[0]
site.os_manager = OpenStackManager(auth=auth, caller = user)
site.update(**fields)
return site
def delete_site(auth, filter={}):
user = authenticate(username=auth.get('username'),
password=auth.get('password'))
auth['tenant'] = user.site.login_base
sites = _get_sites(id)
for site in sites:
site.os_manager = OpenStackManager(auth=auth, caller = user)
site.delete()
return 1
def get_sites(auth, filter={}):
user = authenticate(username=auth.get('username'),
password=auth.get('password'))
sites = _get_sites(filter)
return sites
| from types import StringTypes
from django.contrib.auth import authenticate
from plstackapi.openstack.manager import OpenStackManager
from plstackapi.core.api.auth import auth_check
from plstackapi.core.models import Site
def _get_sites(filter):
if isinstance(filter, StringTypes) and filter.isdigit():
filter = int(filter)
if isinstance(filter, int):
sites = Site.objects.filter(id=filter)
elif isinstance(filter, StringTypes):
sites = Site.objects.filter(login_base=filter)
elif isinstance(filter, dict):
sites = Site.objects.filter(**filter)
else:
sites = []
return sites
def add_site(auth, fields):
user = authenticate(username=auth.get('username'),
password=auth.get('password'))
auth['tenant'] = user.site.login_base
site = Site(**fields)
site.os_manager = OpenStackManager(auth=auth, caller = user)
site.save()
return site
def update_site(auth, id, **fields):
user = authenticate(username=auth.get('username'),
password=auth.get('password'))
auth['tenant'] = user.site.login_base
sites = _get_sites(id)
if not sites:
return
site = Site[0]
site.os_manager = OpenStackManager(auth=auth, caller = user)
site.update(**fields)
return site
def delete_site(auth, filter={}):
user = authenticate(username=auth.get('username'),
password=auth.get('password'))
auth['tenant'] = user.site.login_base
sites = _get_sites(id)
for site in sites:
site.os_manager = OpenStackManager(auth=auth, caller = user)
site.delete()
return 1
def get_sites(auth, filter={}):
user = authenticate(username=auth.get('username'),
password=auth.get('password'))
sites = _get_sites(filter)
return sites
| Python | 0.000002 |
273aeda221aa12aac7fe1eea51e0aed859cd9098 | move fixme to right pos | sim.py | sim.py | import logging
from cardroom import Game, Table, Player, Stock, Waste, Card
log = logging.getLogger(__name__)
def play_game(players=3, cardsPerPlayer=5):
game = start_new_game(players, cardsPerPlayer)
while not game.over:
game.next_turn()
play_turn(game.player, game.table)
return game
def start_new_game(players, cardsPerPlayer):
players = invite_players(players)
deck = fetch_fresh_deck_of_cards()
ensure_sure_we_are_ok_to_play(players, cardsPerPlayer, deck)
table = set_the_table(deck)
for player in players:
deal_cards(player, table.stock, cardsPerPlayer)
return Game(players, table)
def invite_players(players):
"""Invite players to the game.
:type players: int or list of str
"""
try:
players = [Player(name) for name in players]
except TypeError:
players = [Player("Player %s" % (n)) for n in range(1, players + 1)]
log.debug("invited players are: %s", players)
return players
def fetch_fresh_deck_of_cards():
"""Magic a fresh deck of cards out of nothing from a definition"""
class Def:
values = [7, 8, 9, 10, 'Jack', 'Queen', 'King', 'Ace']
suits = ['diamonds', 'hearts', 'spades', 'clubs']
deck = Stock([Card(v, s) for v in Def.values for s in Def.suits])
log.debug(str(deck))
return deck
def ensure_sure_we_are_ok_to_play(players, cardsPerPlayer, deck):
assert len(players) > 1
assert len(players) * cardsPerPlayer <= len(deck)
def set_the_table(deck):
deck.shuffle()
stock = deck
upcard = stock.fetch_card()
waste = Waste()
return Table(stock, waste, upcard)
def deal_cards(player, stock, cardsPerPlayer):
deal = stock.fetch_cards(cardsPerPlayer)
player.hand = deal
log.debug(str(player))
def play_turn(player, table):
log.debug("upcard: %s; hand: %s", table.upcard, player.hand)
if not player.play_card(table.upcard, table):
# FIXME this could be more symmetric to what happens in play_card
# - draw_card returns boolean
# - if False (stock empty)
# - replenish stock
# - draw again
ensure_stock_is_replenished(table)
player.draw_card(table.stock)
def ensure_stock_is_replenished(table):
if table.stock.isEmpty:
table.stock = Stock(table.waste.cards)
table.waste = Waste()
table.stock.shuffle()
| import logging
from cardroom import Game, Table, Player, Stock, Waste, Card
log = logging.getLogger(__name__)
def play_game(players=3, cardsPerPlayer=5):
game = start_new_game(players, cardsPerPlayer)
while not game.over:
game.next_turn()
play_turn(game.player, game.table)
return game
def start_new_game(players, cardsPerPlayer):
players = invite_players(players)
deck = fetch_fresh_deck_of_cards()
make_sure_we_are_ok_to_play(players, cardsPerPlayer, deck)
table = set_the_table(deck)
for player in players:
deal_cards(player, table.stock, cardsPerPlayer)
return Game(players, table)
def invite_players(players):
"""Invite players to the game.
:type players: int or list of str
"""
try:
players = [Player(name) for name in players]
except TypeError:
players = [Player("Player %s" % (n)) for n in range(1, players + 1)]
log.debug("invited players are: %s", players)
return players
def fetch_fresh_deck_of_cards():
"""Magic a fresh deck of cards out of nothing from a definition"""
class Def:
values = [7, 8, 9, 10, 'Jack', 'Queen', 'King', 'Ace']
suits = ['diamonds', 'hearts', 'spades', 'clubs']
deck = Stock([Card(v, s) for v in Def.values for s in Def.suits])
log.debug(str(deck))
return deck
def make_sure_we_are_ok_to_play(players, cardsPerPlayer, deck):
assert len(players) > 1
assert len(players) * cardsPerPlayer <= len(deck)
def set_the_table(deck):
deck.shuffle()
stock = deck
upcard = stock.fetch_card()
waste = Waste()
return Table(stock, waste, upcard)
def deal_cards(player, stock, cardsPerPlayer):
deal = stock.fetch_cards(cardsPerPlayer)
player.hand = deal
log.debug(str(player))
def play_turn(player, table):
log.debug("upcard: %s; hand: %s", table.upcard, player.hand)
if not player.play_card(table.upcard, table):
# FIXME this could be more symmetric to what happens in play_card
# - draw_card returns boolean
# - if False (stock empty)
# - replenish stock
# - draw again
ensure_stock_is_replenished(table)
player.draw_card(table.stock)
def ensure_stock_is_replenished(table):
if table.stock.isEmpty:
table.stock = Stock(table.waste.cards)
table.waste = Waste()
table.stock.shuffle()
| Python | 0 |
36ae43735ed899b0ecb7b5679e60e4b0b2496d80 | Move pdf under chromiumcontent | chromiumcontent/chromiumcontent.gyp | chromiumcontent/chromiumcontent.gyp | {
'targets': [
{
'target_name': 'chromiumcontent_all',
'type': 'none',
'dependencies': [
'chromiumcontent',
'<(DEPTH)/chrome/chrome.gyp:chromedriver',
],
'conditions': [
['OS=="linux"', {
'dependencies': [
'chromiumviews',
'<(DEPTH)/build/linux/system.gyp:libspeechd',
'<(DEPTH)/third_party/mesa/mesa.gyp:osmesa',
],
}],
['OS=="win"', {
'dependencies': [
'chromiumviews',
],
}],
],
},
{
'target_name': 'chromiumcontent',
# Build chromiumcontent as shared_library otherwise some static libraries
# will not build.
'type': 'shared_library',
'dependencies': [
'<(DEPTH)/base/base.gyp:base_prefs',
'<(DEPTH)/content/content.gyp:content',
'<(DEPTH)/content/content.gyp:content_app_both',
'<(DEPTH)/content/content_shell_and_tests.gyp:content_shell_pak',
'<(DEPTH)/net/net.gyp:net_with_v8',
'<(DEPTH)/ppapi/ppapi_internal.gyp:ppapi_host',
'<(DEPTH)/ppapi/ppapi_internal.gyp:ppapi_proxy',
'<(DEPTH)/ppapi/ppapi_internal.gyp:ppapi_ipc',
'<(DEPTH)/ppapi/ppapi_internal.gyp:ppapi_shared',
],
'sources': [
'empty.cc',
],
'conditions': [
['OS=="win"', {
'dependencies': [
'<(DEPTH)/pdf/pdf.gyp:pdf',
],
}],
],
},
],
'conditions': [
['OS in ["win", "linux"]', {
'targets': [
{
'target_name': 'chromiumviews',
'type': 'none',
'dependencies': [
'<(DEPTH)/ui/content_accelerators/ui_content_accelerators.gyp:ui_content_accelerators',
'<(DEPTH)/ui/display/display.gyp:display',
'<(DEPTH)/ui/display/display.gyp:display_util',
'<(DEPTH)/ui/views/controls/webview/webview.gyp:webview',
'<(DEPTH)/ui/views/views.gyp:views',
'<(DEPTH)/ui/wm/wm.gyp:wm',
],
'conditions': [
['OS=="linux"', {
'dependencies': [
'<(DEPTH)/chrome/browser/ui/libgtk2ui/libgtk2ui.gyp:gtk2ui',
],
}], # OS=="linux"
],
},
],
}],
],
}
| {
'targets': [
{
'target_name': 'chromiumcontent_all',
'type': 'none',
'dependencies': [
'chromiumcontent',
'<(DEPTH)/chrome/chrome.gyp:chromedriver',
],
'conditions': [
['OS=="linux"', {
'dependencies': [
'chromiumviews',
'<(DEPTH)/build/linux/system.gyp:libspeechd',
'<(DEPTH)/third_party/mesa/mesa.gyp:osmesa',
],
}],
['OS=="win"', {
'dependencies': [
'chromiumviews',
'<(DEPTH)/pdf/pdf.gyp:pdf',
],
}],
],
},
{
'target_name': 'chromiumcontent',
# Build chromiumcontent as shared_library otherwise some static libraries
# will not build.
'type': 'shared_library',
'dependencies': [
'<(DEPTH)/base/base.gyp:base_prefs',
'<(DEPTH)/content/content.gyp:content',
'<(DEPTH)/content/content.gyp:content_app_both',
'<(DEPTH)/content/content_shell_and_tests.gyp:content_shell_pak',
'<(DEPTH)/net/net.gyp:net_with_v8',
'<(DEPTH)/ppapi/ppapi_internal.gyp:ppapi_host',
'<(DEPTH)/ppapi/ppapi_internal.gyp:ppapi_proxy',
'<(DEPTH)/ppapi/ppapi_internal.gyp:ppapi_ipc',
'<(DEPTH)/ppapi/ppapi_internal.gyp:ppapi_shared',
],
'sources': [
'empty.cc',
],
},
],
'conditions': [
['OS in ["win", "linux"]', {
'targets': [
{
'target_name': 'chromiumviews',
'type': 'none',
'dependencies': [
'<(DEPTH)/ui/content_accelerators/ui_content_accelerators.gyp:ui_content_accelerators',
'<(DEPTH)/ui/display/display.gyp:display',
'<(DEPTH)/ui/display/display.gyp:display_util',
'<(DEPTH)/ui/views/controls/webview/webview.gyp:webview',
'<(DEPTH)/ui/views/views.gyp:views',
'<(DEPTH)/ui/wm/wm.gyp:wm',
],
'conditions': [
['OS=="linux"', {
'dependencies': [
'<(DEPTH)/chrome/browser/ui/libgtk2ui/libgtk2ui.gyp:gtk2ui',
],
}], # OS=="linux"
],
},
],
}],
],
}
| Python | 0 |
2656e59215e0f94892a79e8f94cd90b8717fe8d6 | change list style | archivebox/cli/archivebox_add.py | archivebox/cli/archivebox_add.py | #!/usr/bin/env python3
__package__ = 'archivebox.cli'
__command__ = 'archivebox add'
import sys
import argparse
from typing import List, Optional, IO
from ..main import add
from ..util import docstring
from ..parsers import PARSERS
from ..config import OUTPUT_DIR, ONLY_NEW
from ..logging_util import SmartFormatter, accept_stdin, stderr
@docstring(add.__doc__)
def main(args: Optional[List[str]]=None, stdin: Optional[IO]=None, pwd: Optional[str]=None) -> None:
parser = argparse.ArgumentParser(
prog=__command__,
description=add.__doc__,
add_help=True,
formatter_class=SmartFormatter,
)
parser.add_argument(
'--update-all', #'-n',
action='store_true',
default=not ONLY_NEW, # when ONLY_NEW=True we skip updating old links
help="Also retry previously skipped/failed links when adding new links",
)
parser.add_argument(
'--index-only', #'-o',
action='store_true',
help="Add the links to the main index without archiving them",
)
parser.add_argument(
'urls',
nargs='*',
type=str,
default=None,
help=(
'URLs or paths to archive e.g.:\n'
' https://getpocket.com/users/USERNAME/feed/all\n'
' https://example.com/some/rss/feed.xml\n'
' https://example.com\n'
' ~/Downloads/firefox_bookmarks_export.html\n'
' ~/Desktop/sites_list.csv\n'
)
)
parser.add_argument(
"--depth",
action="store",
default=0,
choices=[0, 1],
type=int,
help="Recursively archive all linked pages up to this many hops away"
)
parser.add_argument(
"--overwrite",
default=False,
action="store_true",
help="Re-archive URLs from scratch, overwriting any existing files"
)
parser.add_argument(
"--init", #'-i',
action='store_true',
help="Init/upgrade the curent data directory before adding",
)
parser.add_argument(
"--extract",
type=str,
help="Pass a list of the extractors to be used. If the method name is not correct, it will be ignored. \
This does not take precedence over the configuration",
default=""
)
parser.add_argument(
"--parser",
type=str,
help="Parser used to read inputted URLs.",
default="auto",
choices=["auto", *PARSERS.keys()],
)
command = parser.parse_args(args or ())
urls = command.urls
stdin_urls = accept_stdin(stdin)
if (stdin_urls and urls) or (not stdin and not urls):
stderr(
'[X] You must pass URLs/paths to add via stdin or CLI arguments.\n',
color='red',
)
raise SystemExit(2)
add(
urls=stdin_urls or urls,
depth=command.depth,
update_all=command.update_all,
index_only=command.index_only,
overwrite=command.overwrite,
init=command.init,
extractors=command.extract,
parser=command.parser,
out_dir=pwd or OUTPUT_DIR,
)
if __name__ == '__main__':
main(args=sys.argv[1:], stdin=sys.stdin)
# TODO: Implement these
#
# parser.add_argument(
# '--mirror', #'-m',
# action='store_true',
# help='Archive an entire site (finding all linked pages below it on the same domain)',
# )
# parser.add_argument(
# '--crawler', #'-r',
# choices=('depth_first', 'breadth_first'),
# help='Controls which crawler to use in order to find outlinks in a given page',
# default=None,
# )
| #!/usr/bin/env python3
__package__ = 'archivebox.cli'
__command__ = 'archivebox add'
import sys
import argparse
from typing import List, Optional, IO
from ..main import add
from ..util import docstring
from ..parsers import PARSERS
from ..config import OUTPUT_DIR, ONLY_NEW
from ..logging_util import SmartFormatter, accept_stdin, stderr
@docstring(add.__doc__)
def main(args: Optional[List[str]]=None, stdin: Optional[IO]=None, pwd: Optional[str]=None) -> None:
parser = argparse.ArgumentParser(
prog=__command__,
description=add.__doc__,
add_help=True,
formatter_class=SmartFormatter,
)
parser.add_argument(
'--update-all', #'-n',
action='store_true',
default=not ONLY_NEW, # when ONLY_NEW=True we skip updating old links
help="Also retry previously skipped/failed links when adding new links",
)
parser.add_argument(
'--index-only', #'-o',
action='store_true',
help="Add the links to the main index without archiving them",
)
parser.add_argument(
'urls',
nargs='*',
type=str,
default=None,
help=(
'URLs or paths to archive e.g.:\n'
' https://getpocket.com/users/USERNAME/feed/all\n'
' https://example.com/some/rss/feed.xml\n'
' https://example.com\n'
' ~/Downloads/firefox_bookmarks_export.html\n'
' ~/Desktop/sites_list.csv\n'
)
)
parser.add_argument(
"--depth",
action="store",
default=0,
choices=[0, 1],
type=int,
help="Recursively archive all linked pages up to this many hops away"
)
parser.add_argument(
"--overwrite",
default=False,
action="store_true",
help="Re-archive URLs from scratch, overwriting any existing files"
)
parser.add_argument(
"--init", #'-i',
action='store_true',
help="Init/upgrade the curent data directory before adding",
)
parser.add_argument(
"--extract",
type=str,
help="Pass a list of the extractors to be used. If the method name is not correct, it will be ignored. \
This does not take precedence over the configuration",
default=""
)
parser.add_argument(
"--parser",
type=str,
help="Parser used to read inputted URLs.",
default="auto",
choices=["auto"] + list(PARSERS.keys())
)
command = parser.parse_args(args or ())
urls = command.urls
stdin_urls = accept_stdin(stdin)
if (stdin_urls and urls) or (not stdin and not urls):
stderr(
'[X] You must pass URLs/paths to add via stdin or CLI arguments.\n',
color='red',
)
raise SystemExit(2)
add(
urls=stdin_urls or urls,
depth=command.depth,
update_all=command.update_all,
index_only=command.index_only,
overwrite=command.overwrite,
init=command.init,
extractors=command.extract,
parser=command.parser,
out_dir=pwd or OUTPUT_DIR,
)
if __name__ == '__main__':
main(args=sys.argv[1:], stdin=sys.stdin)
# TODO: Implement these
#
# parser.add_argument(
# '--mirror', #'-m',
# action='store_true',
# help='Archive an entire site (finding all linked pages below it on the same domain)',
# )
# parser.add_argument(
# '--crawler', #'-r',
# choices=('depth_first', 'breadth_first'),
# help='Controls which crawler to use in order to find outlinks in a given page',
# default=None,
# )
| Python | 0.000002 |
07f9edc5764d3002fd3d4c1018a6ec43d5046dd0 | Fix unused import. | kinto2xml/tests/test_verifier.py | kinto2xml/tests/test_verifier.py | import json
import mock
import os
from six import StringIO
from kinto2xml.verifier import sort_lists_in_dict, main
def build_path(filename):
return os.path.join(os.path.dirname(__file__), 'fixtures', filename)
def test_sort_lists_in_dict_handles_recursion():
assert json.dumps(sort_lists_in_dict({
'@name': 'judith',
'validators': [{
'@id': 'gbc',
'toto': ['b', 'a']
}, {
'@id': 'abc',
'toto': ['c', 'd', 'a'],
'apps': [{
'@guid': 'cde',
'minVersion': 2,
}, {
'@guid': 'abc',
'minVersion': 3
}]
}]
}), sort_keys=True) == (
'{"@name": "judith", "validators": [{'
'"@id": "abc", '
'"apps": [{"@guid": "abc", "minVersion": 3}, '
'{"@guid": "cde", "minVersion": 2}], '
'"toto": ["a", "c", "d"]}, '
'{"@id": "gbc", "toto": ["a", "b"]}]}'
)
def test_files_checking():
with mock.patch('sys.stdout', new_callable=StringIO) as stdout:
with mock.patch('sys.stderr', new_callable=StringIO) as stderr:
main([build_path('blocklist.xml'),
build_path('generated-blocklist.xml')])
assert stdout.getvalue() == ''
assert stderr.getvalue() == ''
def test_fails_if_file_does_not_exists():
assert main(['unknown']) == 1
def test_verifier_supports_http_links():
with open(build_path('blocklist.xml')) as f:
blocklist_content = f.read()
response = mock.MagicMock(text=blocklist_content)
with mock.patch('requests.get', return_value=response) as mocked_request:
main(['http://first_server/url/', 'http://second_server/url/'])
mocked_request.assert_any_call('http://first_server/url/')
mocked_request.assert_any_call('http://second_server/url/')
def test_clean_option_does_not_remove_tmp_files():
with mock.patch('sys.stderr', new_callable=StringIO) as stderr:
main([build_path('blocklist.xml'),
build_path('generated-blocklist.xml'), '-k'])
assert stderr.getvalue().startswith('$ diff -u'), stderr.getvalue()
def test_in_case_diff_fails_display_the_error():
with mock.patch('sys.stderr', new_callable=StringIO) as stderr:
main([build_path('fennec-blocklist.xml'),
build_path('generated-blocklist.xml')])
assert stderr.getvalue() != ''
| import json
import mock
import os
import sys
from six import StringIO
from kinto2xml.verifier import sort_lists_in_dict, main
def build_path(filename):
return os.path.join(os.path.dirname(__file__), 'fixtures', filename)
def test_sort_lists_in_dict_handles_recursion():
assert json.dumps(sort_lists_in_dict({
'@name': 'judith',
'validators': [{
'@id': 'gbc',
'toto': ['b', 'a']
}, {
'@id': 'abc',
'toto': ['c', 'd', 'a'],
'apps': [{
'@guid': 'cde',
'minVersion': 2,
}, {
'@guid': 'abc',
'minVersion': 3
}]
}]
}), sort_keys=True) == (
'{"@name": "judith", "validators": [{'
'"@id": "abc", '
'"apps": [{"@guid": "abc", "minVersion": 3}, '
'{"@guid": "cde", "minVersion": 2}], '
'"toto": ["a", "c", "d"]}, '
'{"@id": "gbc", "toto": ["a", "b"]}]}'
)
def test_files_checking():
with mock.patch('sys.stdout', new_callable=StringIO) as stdout:
with mock.patch('sys.stderr', new_callable=StringIO) as stderr:
main([build_path('blocklist.xml'),
build_path('generated-blocklist.xml')])
assert stdout.getvalue() == ''
assert stderr.getvalue() == ''
def test_fails_if_file_does_not_exists():
assert main(['unknown']) == 1
def test_verifier_supports_http_links():
with open(build_path('blocklist.xml')) as f:
blocklist_content = f.read()
response = mock.MagicMock(text=blocklist_content)
with mock.patch('requests.get', return_value=response) as mocked_request:
main(['http://first_server/url/', 'http://second_server/url/'])
mocked_request.assert_any_call('http://first_server/url/')
mocked_request.assert_any_call('http://second_server/url/')
def test_clean_option_does_not_remove_tmp_files():
with mock.patch('sys.stderr', new_callable=StringIO) as stderr:
main([build_path('blocklist.xml'),
build_path('generated-blocklist.xml'), '-k'])
assert stderr.getvalue().startswith('$ diff -u'), stderr.getvalue()
def test_in_case_diff_fails_display_the_error():
with mock.patch('sys.stderr', new_callable=StringIO) as stderr:
main([build_path('fennec-blocklist.xml'),
build_path('generated-blocklist.xml')])
assert stderr.getvalue() != ''
| Python | 0 |
51f4d40cf6750d35f10f37d939a2c30c5f26d300 | Update script to write results to the database. | backend/scripts/updatedf.py | backend/scripts/updatedf.py | #!/usr/bin/env python
import hashlib
import os
import rethinkdb as r
def main():
conn = r.connect('localhost', 28015, db='materialscommons')
for root, dirs, files in os.walk("/mcfs/data/materialscommons"):
for f in files:
path = os.path.join(root, f)
with open(path) as fd:
data = fd.read()
hash = hashlib.md5(data).hexdigest()
s = os.stat(path).st_size
r.table('datafiles').get(f).update({'size':s, 'checksum':hash}).run(conn)
print "%s:%s:%d" %(path, hash, s)
if __name__ == "__main__":
main()
| #!/usr/bin/env python
#import hashlib
import os
def main():
for root, dirs, files in os.walk("/mcfs/data/materialscommons"):
for f in files:
print f
if __name__ == "__main__":
main()
| Python | 0 |
599672acbf925cab634bc15ab47055aabb131efd | Fix xkcd text regex. Closes #46 | dosagelib/plugins/x.py | dosagelib/plugins/x.py | # -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2013 Bastian Kleineidam
from re import compile
from ..scraper import _BasicScraper
from ..helpers import bounceStarter
from ..util import tagre
class xkcd(_BasicScraper):
url = 'http://xkcd.com/'
starter = bounceStarter(url, compile(tagre("a", "href", r'(/\d+/)', before="next")))
stripUrl = url + '%s/'
firstStripUrl = stripUrl % '1'
imageSearch = compile(tagre("img", "src", r'(http://imgs\.xkcd\.com/comics/[^"]+)'))
prevSearch = compile(tagre("a", "href", r'(/\d+/)', before="prev"))
help = 'Index format: n (unpadded)'
description = u'A webcomic of romance, sarcasm, math, and language.'
textSearch = compile(tagre("img", "title", r'([^"]+)', before=r'http://imgs\.xkcd\.com/comics/'))
adult = True
@classmethod
def namer(cls, imageUrl, pageUrl):
index = int(pageUrl.rstrip('/').rsplit('/', 1)[-1])
name = imageUrl.rsplit('/', 1)[-1].split('.')[0]
return '%03d-%s' % (index, name)
@classmethod
def imageUrlModifier(cls, url, data):
if url and '/large/' in data:
return url.replace(".png", "_large.png")
return url
| # -*- coding: iso-8859-1 -*-
# Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2013 Bastian Kleineidam
from re import compile
from ..scraper import _BasicScraper
from ..helpers import bounceStarter
from ..util import tagre
class xkcd(_BasicScraper):
url = 'http://xkcd.com/'
starter = bounceStarter(url, compile(tagre("a", "href", r'(/\d+/)', before="next")))
stripUrl = url + '%s/'
firstStripUrl = stripUrl % '1'
imageSearch = compile(tagre("img", "src", r'(http://imgs\.xkcd\.com/comics/[^"]+)'))
prevSearch = compile(tagre("a", "href", r'(/\d+/)', before="prev"))
help = 'Index format: n (unpadded)'
description = u'A webcomic of romance, sarcasm, math, and language.'
textSearch = compile(tagre("img", "title", r'([^"]+)'))
adult = True
@classmethod
def namer(cls, imageUrl, pageUrl):
index = int(pageUrl.rstrip('/').rsplit('/', 1)[-1])
name = imageUrl.rsplit('/', 1)[-1].split('.')[0]
return '%03d-%s' % (index, name)
@classmethod
def imageUrlModifier(cls, url, data):
if url and '/large/' in data:
return url.replace(".png", "_large.png")
return url
| Python | 0.999991 |
f0593b2d69730441b5a486e27ed6eb7001939bf4 | Include unlimited features for enterprise | corehq/apps/accounting/bootstrap/config/user_buckets_august_2018.py | corehq/apps/accounting/bootstrap/config/user_buckets_august_2018.py | from __future__ import absolute_import
from __future__ import unicode_literals
from decimal import Decimal
from corehq.apps.accounting.models import (
FeatureType,
SoftwarePlanEdition,
UNLIMITED_FEATURE_USAGE
)
BOOTSTRAP_CONFIG = {
(SoftwarePlanEdition.COMMUNITY, False, False): {
'role': 'community_plan_v1',
'product_rate_monthly_fee': Decimal('0.00'),
'feature_rates': {
FeatureType.USER: dict(monthly_limit=10, per_excess_fee=Decimal('2.00')),
FeatureType.SMS: dict(monthly_limit=0),
}
},
(SoftwarePlanEdition.STANDARD, False, False): {
'role': 'standard_plan_v0',
'product_rate_monthly_fee': Decimal('300.00'),
'feature_rates': {
FeatureType.USER: dict(monthly_limit=50, per_excess_fee=Decimal('2.00')),
FeatureType.SMS: dict(monthly_limit=50),
}
},
(SoftwarePlanEdition.PRO, False, False): {
'role': 'pro_plan_v0',
'product_rate_monthly_fee': Decimal('600.00'),
'feature_rates': {
FeatureType.USER: dict(monthly_limit=250, per_excess_fee=Decimal('2.00')),
FeatureType.SMS: dict(monthly_limit=50),
}
},
(SoftwarePlanEdition.ADVANCED, False, False): {
'role': 'advanced_plan_v0',
'product_rate_monthly_fee': Decimal('1200.00'),
'feature_rates': {
FeatureType.USER: dict(monthly_limit=500, per_excess_fee=Decimal('2.00')),
FeatureType.SMS: dict(monthly_limit=50),
}
},
(SoftwarePlanEdition.ADVANCED, True, False): {
'role': 'advanced_plan_v0',
'product_rate_monthly_fee': Decimal('0.00'),
'feature_rates': {
FeatureType.USER: dict(monthly_limit=UNLIMITED_FEATURE_USAGE, per_excess_fee=Decimal('0.00')),
FeatureType.SMS: dict(monthly_limit=UNLIMITED_FEATURE_USAGE),
}
}
}
| from __future__ import absolute_import
from __future__ import unicode_literals
from decimal import Decimal
from corehq.apps.accounting.models import (
FeatureType,
SoftwarePlanEdition,
)
BOOTSTRAP_CONFIG = {
(SoftwarePlanEdition.COMMUNITY, False, False): {
'role': 'community_plan_v1',
'product_rate_monthly_fee': Decimal('0.00'),
'feature_rates': {
FeatureType.USER: dict(monthly_limit=10, per_excess_fee=Decimal('2.00')),
FeatureType.SMS: dict(monthly_limit=0),
}
},
(SoftwarePlanEdition.STANDARD, False, False): {
'role': 'standard_plan_v0',
'product_rate_monthly_fee': Decimal('300.00'),
'feature_rates': {
FeatureType.USER: dict(monthly_limit=50, per_excess_fee=Decimal('2.00')),
FeatureType.SMS: dict(monthly_limit=50),
}
},
(SoftwarePlanEdition.PRO, False, False): {
'role': 'pro_plan_v0',
'product_rate_monthly_fee': Decimal('600.00'),
'feature_rates': {
FeatureType.USER: dict(monthly_limit=250, per_excess_fee=Decimal('2.00')),
FeatureType.SMS: dict(monthly_limit=50),
}
},
(SoftwarePlanEdition.ADVANCED, False, False): {
'role': 'advanced_plan_v0',
'product_rate_monthly_fee': Decimal('1200.00'),
'feature_rates': {
FeatureType.USER: dict(monthly_limit=500, per_excess_fee=Decimal('2.00')),
FeatureType.SMS: dict(monthly_limit=50),
}
},
(SoftwarePlanEdition.ADVANCED, True, False): {
'role': 'advanced_plan_v0',
'product_rate_monthly_fee': Decimal('0.00'),
'feature_rates': {
FeatureType.USER: dict(monthly_limit=10, per_excess_fee=Decimal('2.00')),
FeatureType.SMS: dict(monthly_limit=0),
}
}
}
| Python | 0 |
205f3fb2f36f33c6d13b4541ad49522b799d358d | simplify the call to make file list | src/actions/server.py | src/actions/server.py | import sys
from twisted.python import log
from twisted.web.server import Site
from twisted.web.static import File
from twisted.internet import task
from twisted.internet.protocol import DatagramProtocol
from . import utils
class Broadcaster(DatagramProtocol):
"""
Broadcast the ip to all of the listeners on the channel
"""
def __init__(self, address):
self.ip = address # shouldn't this be passed in
self.host = '224.0.0.5'
self.port = 8005
def startProtocol(self):
log.msg("Serving on {0}:8888 and broadcasting IP on 224.0.0.5:8005".format(self.ip))
self.transport.joinGroup(self.host)
self._call = task.LoopingCall(self.sendHeartbeat)
self._loop = self._call.start(5)
def sendHeartbeat(self):
message ='{0}:8888'.format(self.ip)
self.transport.write(message, (self.host, self.port))
def stopProtocol(self):
self._call.stop()
def main(serve_dir):
from twisted.internet import reactor
resource = File(serve_dir)
factory = Site(resource)
log.startLogging(sys.stdout)
serve_at = utils.get_live_interface()
# this is messy
# the program should expect to serve files at a specific location everytime.
utils.make_file_list(serve_dir)
log.msg("Starting fileserver on{0}:8888".format(serve_at))
reactor.listenTCP(8888, factory)
log.msg("Broadcasting")
reactor.listenMulticast(8005, Broadcaster(serve_at))
reactor.run()
if __name__ == "__main__":
main('./')
| import sys
from twisted.python import log
from twisted.web.server import Site
from twisted.web.static import File
from twisted.internet import task
from twisted.internet.protocol import DatagramProtocol
from . import utils
class Broadcaster(DatagramProtocol):
"""
Broadcast the ip to all of the listeners on the channel
"""
def __init__(self, address):
self.ip = address # shouldn't this be passed in
self.host = '224.0.0.5'
self.port = 8005
def startProtocol(self):
log.msg("Serving on {0}:8888 and broadcasting IP on 224.0.0.5:8005".format(self.ip))
self.transport.joinGroup(self.host)
self._call = task.LoopingCall(self.sendHeartbeat)
self._loop = self._call.start(5)
def sendHeartbeat(self):
message ='{0}:8888'.format(self.ip)
self.transport.write(message, (self.host, self.port))
def stopProtocol(self):
self._call.stop()
def main(serve_dir):
from twisted.internet import reactor
resource = File(serve_dir)
factory = Site(resource)
log.startLogging(sys.stdout)
serve_at = utils.get_live_interface()
# this is messy
# the program should expect to serve files at a specific location everytime.
utils.make_file_list(utils.list_files(serve_dir),
utils.list_dirs(serve_dir),
serve_dir)
log.msg("Starting fileserver on{0}:8888".format(serve_at))
reactor.listenTCP(8888, factory)
log.msg("Broadcasting")
reactor.listenMulticast(8005, Broadcaster(serve_at))
reactor.run()
if __name__ == "__main__":
main('./')
| Python | 0.000129 |
923d49c753acf7d8945d6b79efbdb08363e130a2 | Bring test_frame_of_test_null_file up to date with new signature of frame_of_test(). | noseprogressive/tests/test_utils.py | noseprogressive/tests/test_utils.py | from os import chdir, getcwd
from os.path import dirname, basename
from unittest import TestCase
from nose.tools import eq_
from noseprogressive.utils import human_path, frame_of_test
class UtilsTests(TestCase):
"""Tests for independent little bits and pieces"""
def test_human_path(self):
chdir(dirname(__file__))
eq_(human_path(__file__, getcwd()), basename(__file__))
def test_frame_of_test_null_file(self):
"""Make sure frame_of_test() doesn't crash when test_file is None."""
try:
frame_of_test((None, None, None), NotImplementedError,
NotImplementedError(), [('file', 333)])
except AttributeError:
self.fail('frame_of_test() raised AttributeError.')
| from os import chdir, getcwd
from os.path import dirname, basename
from unittest import TestCase
from nose.tools import eq_
from noseprogressive.utils import human_path, frame_of_test
class UtilsTests(TestCase):
"""Tests for independent little bits and pieces"""
def test_human_path(self):
chdir(dirname(__file__))
eq_(human_path(__file__, getcwd()), basename(__file__))
def test_frame_of_test_null_file(self):
"""Make sure frame_of_test() doesn't crash when test_file is None."""
try:
frame_of_test((None, None, None), [('file', 333)])
except AttributeError:
self.fail('frame_of_test() raised AttributeError.')
| Python | 0 |
0658a099a386791b3bde27f8e76c240253310890 | Update pplot.py | src/analysis/pplot.py | src/analysis/pplot.py | #-*- coding:utf-8 -*-
#!/usr/bin/python
''' This file is designed to plot the cost curve, maybe deprecated.
author:
iiiiiiiiiiii iiiiiiiiiiii !!!!!!! !!!!!!
# ### # ### ### I# #:
# ### # I##; ##; ## ##
### ### !## #### #
### ### ### ## ### #'
!##; `##% ##; ## ### ##
### ### $## `# ## #
### # ### # #### ####;
`### -# ### `# ### ###
############## ############## `# #
date:2016-11-09
'''
import os
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
class Analysis(object):
'''
class Analysis for ASR results
'''
def __init__(self,logFile,saveFig=True,showFig=False):
self.logFile = logFile
self.saveFig = saveFig
self.showFig = showFig
def getContent(self):
try:
with open(self.logFile) as f:
content = f.read().splitlines()
except RuntimeError as err:
print err
return content
def parse(self):
indexCostList = []
index1 = 0
indexValidateList = []
index2 = 0
costList = []
validateCostList = []
content = self.getContent()
keep = 0
model = ' '
dir_mfcc = ' '
learning_rate = 0
update = ''
for line in content:
if line.startswith('model'):
model = line.split(':')[1]
if line.startswith('penalty'):
penalty = line.split(':')[1]
if line.startswith('input_dim'):
input_dim = line.split(':')[1]
if line.startswith('n_hid'):
n_hid = line.split(':')[1]
if line.startswith('dataset'):
dir_mfcc = line.split(':')[1]
if line.startswith('learning_rate'):
learning_rate = line.split(':')[1]
if line.startswith('update'):
update = line.split(' ')[2]
if line.startswith('keep'):
keep = line.split(':')[1]
if line.startswith('Epoch'):
if 'validate cost' in line:
index2 = index2 + 1
cost = line.split(':')[2]
indexValidateList.append(index2)
validateCostList.append(float(cost))
elif 'train cost' in line:
index1 = index1+1
cost = line.split(':')[2]
indexCostList.append(index1)
costList.append(float(cost))
title = 'model:'+model+',dataset:'+dir_mfcc+',lr:'+ \
str(learning_rate)+'\nupdate:'+update
return title,indexCostList,indexValidateList,costList,validateCostList
def plot(self):
title,indexCostList,indexValidateList,costList,validateCostList = self.parse()
p1 = plt.plot(indexCostList,costList,marker='o',color='b',label='train cost')
p2 = plt.plot(indexValidateList,validateCostList,marker='o',color='r',label='validate cost')
plt.xlabel('Epoch')
plt.ylabel('Cost')
plt.legend()
plt.grid()
plt.title(title)
if self.saveFig:
plt.savefig(self.logFile+'.png',dpi=100)
#plt.savefig(self.logFile+'.eps',dpi=100)
if self.showFig:
plt.show()
if __name__ == '__main__':
dir_ = '/home/pony/acousticModeling/results/retest/'
for subdir, dirs, files in os.walk(dir_):
for f in files:
fullFilename = os.path.join(subdir, f)
if fullFilename.endswith('.txt'):
a = Analysis(fullFilename)
a.plot()
plt.clf()
| #!/usr/bin/python
# -*- coding:utf-8 -*-
'''Result analysis for automatic speech recognition
@Date:2016-4-9
@Author:zhang zewang
'''
import os
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
class Analysis(object):
'''
class Analysis for ASR results
'''
def __init__(self,logFile,saveFig=True,showFig=False):
self.logFile = logFile
self.saveFig = saveFig
self.showFig = showFig
def getContent(self):
try:
with open(self.logFile) as f:
content = f.read().splitlines()
except RuntimeError as err:
print err
return content
def parse(self):
indexCostList = []
index1 = 0
indexValidateList = []
index2 = 0
costList = []
validateCostList = []
content = self.getContent()
keep = 0
model = ' '
dir_mfcc = ' '
learning_rate = 0
update = ''
for line in content:
if line.startswith('model'):
model = line.split(':')[1]
if line.startswith('penalty'):
penalty = line.split(':')[1]
if line.startswith('input_dim'):
input_dim = line.split(':')[1]
if line.startswith('n_hid'):
n_hid = line.split(':')[1]
if line.startswith('dataset'):
dir_mfcc = line.split(':')[1]
if line.startswith('learning_rate'):
learning_rate = line.split(':')[1]
if line.startswith('update'):
update = line.split(' ')[2]
if line.startswith('keep'):
keep = line.split(':')[1]
if line.startswith('Epoch'):
if 'validate cost' in line:
index2 = index2 + 1
cost = line.split(':')[2]
indexValidateList.append(index2)
validateCostList.append(float(cost))
elif 'train cost' in line:
index1 = index1+1
cost = line.split(':')[2]
indexCostList.append(index1)
costList.append(float(cost))
title = 'model:'+model+',dataset:'+dir_mfcc+',lr:'+ \
str(learning_rate)+'\nupdate:'+update
return title,indexCostList,indexValidateList,costList,validateCostList
def plot(self):
title,indexCostList,indexValidateList,costList,validateCostList = self.parse()
p1 = plt.plot(indexCostList,costList,marker='o',color='b',label='train cost')
p2 = plt.plot(indexValidateList,validateCostList,marker='o',color='r',label='validate cost')
plt.xlabel('Epoch')
plt.ylabel('Cost')
plt.legend()
plt.grid()
plt.title(title)
if self.saveFig:
plt.savefig(self.logFile+'.png',dpi=100)
#plt.savefig(self.logFile+'.eps',dpi=100)
if self.showFig:
plt.show()
if __name__ == '__main__':
dir_ = '/home/pony/acousticModeling/results/retest/'
for subdir, dirs, files in os.walk(dir_):
for f in files:
fullFilename = os.path.join(subdir, f)
if fullFilename.endswith('.txt'):
a = Analysis(fullFilename)
a.plot()
plt.clf()
| Python | 0.000002 |
ac5053ada316e46d4286b1944c2fb957c42c3975 | truncate superfluous trailing zeros | durationpy/duration.py | durationpy/duration.py | # -*- coding: UTF-8 -*-
import re
import datetime
_nanosecond_size = 1
_microsecond_size = 1000 * _nanosecond_size
_millisecond_size = 1000 * _microsecond_size
_second_size = 1000 * _millisecond_size
_minute_size = 60 * _second_size
_hour_size = 60 * _minute_size
_day_size = 24 * _hour_size
_week_size = 7 * _day_size
_month_size = 30 * _day_size
_year_size = 365 * _day_size
units = {
"ns": _nanosecond_size,
"us": _microsecond_size,
"µs": _microsecond_size,
"μs": _microsecond_size,
"ms": _millisecond_size,
"s": _second_size,
"m": _minute_size,
"h": _hour_size,
"d": _day_size,
"w": _week_size,
"mm": _month_size,
"y": _year_size,
}
def from_str(duration):
"""Parse a duration string to a datetime.timedelta"""
if duration in ("0", "+0", "-0"):
return datetime.timedelta()
pattern = re.compile('([\d\.]+)([a-zµμ]+)')
total = 0
sign = -1 if duration[0] == '-' else 1
matches = pattern.findall(duration)
if not len(matches):
raise Exception("Invalid duration {}".format(duration))
for (value, unit) in matches:
if unit not in units:
raise Exception(
"Unknown unit {} in duration {}".format(unit, duration))
try:
total += float(value) * units[unit]
except:
raise Exception(
"Invalid value {} in duration {}".format(value, duration))
microseconds = total / _microsecond_size
return datetime.timedelta(microseconds=sign * microseconds)
def to_str(delta):
"""Format a datetime.timedelta to a duration string"""
total_seconds = delta.total_seconds()
sign = "-" if total_seconds < 0 else ""
nanoseconds = abs(total_seconds * _second_size)
if total_seconds < 1:
result_str = _to_str_small(nanoseconds)
else:
result_str = _to_str_large(nanoseconds)
return "{}{}".format(sign, result_str)
def _to_str_small(nanoseconds):
result_str = ""
if not nanoseconds:
return "0"
milliseconds = int(nanoseconds / _millisecond_size)
if milliseconds:
nanoseconds -= _millisecond_size * milliseconds
result_str += "{:g}ms".format(milliseconds)
microseconds = int(nanoseconds / _microsecond_size)
if microseconds:
nanoseconds -= _microsecond_size * microseconds
result_str += "{:g}us".format(microseconds)
if nanoseconds:
result_str += "{:g}ns".format(nanoseconds)
return result_str
def _to_str_large(nanoseconds):
result_str = ""
hours = int(nanoseconds / _hour_size)
if hours:
nanoseconds -= _hour_size * hours
result_str += "{:g}h".format(hours)
minutes = int(nanoseconds / _minute_size)
if minutes:
nanoseconds -= _minute_size * minutes
result_str += "{:g}m".format(minutes)
seconds = float(nanoseconds) / float(_second_size)
if seconds:
nanoseconds -= _second_size * seconds
result_str += "{:g}s".format(seconds)
return result_str
| # -*- coding: UTF-8 -*-
import re
import datetime
_nanosecond_size = 1
_microsecond_size = 1000 * _nanosecond_size
_millisecond_size = 1000 * _microsecond_size
_second_size = 1000 * _millisecond_size
_minute_size = 60 * _second_size
_hour_size = 60 * _minute_size
_day_size = 24 * _hour_size
_week_size = 7 * _day_size
_month_size = 30 * _day_size
_year_size = 365 * _day_size
units = {
"ns": _nanosecond_size,
"us": _microsecond_size,
"µs": _microsecond_size,
"μs": _microsecond_size,
"ms": _millisecond_size,
"s": _second_size,
"m": _minute_size,
"h": _hour_size,
"d": _day_size,
"w": _week_size,
"mm": _month_size,
"y": _year_size,
}
def from_str(duration):
"""Parse a duration string to a datetime.timedelta"""
if duration in ("0", "+0", "-0"):
return datetime.timedelta()
pattern = re.compile('([\d\.]+)([a-zµμ]+)')
total = 0
sign = -1 if duration[0] == '-' else 1
matches = pattern.findall(duration)
if not len(matches):
raise Exception("Invalid duration {}".format(duration))
for (value, unit) in matches:
if unit not in units:
raise Exception(
"Unknown unit {} in duration {}".format(unit, duration))
try:
total += float(value) * units[unit]
except:
raise Exception(
"Invalid value {} in duration {}".format(value, duration))
microseconds = total / _microsecond_size
return datetime.timedelta(microseconds=sign * microseconds)
def to_str(delta):
"""Format a datetime.timedelta to a duration string"""
total_seconds = delta.total_seconds()
sign = "-" if total_seconds < 0 else ""
nanoseconds = abs(total_seconds * _second_size)
if total_seconds < 1:
result_str = _to_str_small(nanoseconds)
else:
result_str = _to_str_large(nanoseconds)
return "{}{}".format(sign, result_str)
def _to_str_small(nanoseconds):
result_str = ""
if not nanoseconds:
return "0"
milliseconds = int(nanoseconds / _millisecond_size)
if milliseconds:
nanoseconds -= _millisecond_size * milliseconds
result_str += "{}ms".format(milliseconds)
microseconds = int(nanoseconds / _microsecond_size)
if microseconds:
nanoseconds -= _microsecond_size * microseconds
result_str += "{}us".format(microseconds)
if nanoseconds:
result_str += "{}ns".format(nanoseconds)
return result_str
def _to_str_large(nanoseconds):
result_str = ""
hours = int(nanoseconds / _hour_size)
if hours:
nanoseconds -= _hour_size * hours
result_str += "{}h".format(hours)
minutes = int(nanoseconds / _minute_size)
if minutes:
nanoseconds -= _minute_size * minutes
result_str += "{}m".format(minutes)
seconds = float(nanoseconds) / float(_second_size)
if seconds:
nanoseconds -= _second_size * seconds
result_str += "{}s".format(seconds)
return result_str
| Python | 0.004364 |
d9800c562b81f4e118e9db96a68e301396af46f9 | Add abstract job serializer | polyaxon/jobs/serializers.py | polyaxon/jobs/serializers.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from rest_framework import serializers, fields
from jobs.models import JobResources
class JobResourcesSerializer(serializers.ModelSerializer):
class Meta:
model = JobResources
exclude = ('id',)
class JobSerializer(serializers.ModelSerializer):
cpu = fields.DictField(allow_null=True)
memory = fields.DictField(allow_null=True)
gpu = fields.DictField(allow_null=True)
resources = JobResourcesSerializer(read_only=True)
class Meta:
fields = ('image', 'resources', 'cpu', 'memory', 'gpu')
extra_kwargs = {
'cpu': {'write_only': True},
'memory': {'write_only': True},
'gpu': {'write_only': True}}
@staticmethod
def _has_resources(validated_data):
cpu = validated_data['cpu']
memory = validated_data['memory']
gpu = validated_data['gpu']
if cpu is None and memory is None and gpu is None:
return False
return True
@staticmethod
def _get_resources(validated_data):
cpu = validated_data['cpu']
memory = validated_data['memory']
gpu = validated_data['gpu']
return {'cpu': cpu, 'memory': memory, 'gpu': gpu}
def _create_resources(self, validated_data):
if self._has_resources(validated_data):
resources = JobResourcesSerializer(data=self._get_resources(validated_data))
resources.is_valid(raise_exception=True)
return resources.save()
return None
def _update_resources(self, resources_instance, validated_data):
if self._has_resources(validated_data):
resources = JobResourcesSerializer(instance=resources_instance,
data=self._get_resources(validated_data))
resources.is_valid(raise_exception=True)
return resources.save()
return None
| # -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from rest_framework import serializers
from jobs.models import JobResources
class JobResourcesSerializer(serializers.ModelSerializer):
class Meta:
model = JobResources
exclude = ('id',)
| Python | 0.003237 |
77c4b5a72ddad68717b6fb1291ce643f20a63e2d | Update SeleniumBase exceptions | seleniumbase/common/exceptions.py | seleniumbase/common/exceptions.py | """ SeleniumBase Exceptions
NoSuchFileException => Called when self.assert_downloaded_file(...) fails.
NotUsingChromeException => Used by Chrome-only methods if not using Chrome.
OutOfScopeException => Used by BaseCase methods when setUp() is skipped.
TextNotVisibleException => Called when expected text fails to appear.
TimeLimitExceededException => Called when exceeding "--time-limit=SECONDS".
"""
from selenium.common.exceptions import WebDriverException
class NoSuchFileException(Exception):
pass
class NotUsingChromeException(WebDriverException):
pass
class OutOfScopeException(Exception):
pass
class TextNotVisibleException(WebDriverException):
pass
class TimeLimitExceededException(Exception):
pass
| """ SeleniumBase Exceptions
NoSuchFileException => Used by self.assert_downloaded_file(...)
NotUsingChromeException => Used by Chrome-only methods if not using Chrome
OutOfScopeException => Used by BaseCase methods when setUp() is skipped
TimeLimitExceededException => Used by "--time-limit=SECONDS"
"""
class NoSuchFileException(Exception):
pass
class NotUsingChromeException(Exception):
pass
class OutOfScopeException(Exception):
pass
class TimeLimitExceededException(Exception):
pass
class TextNotVisibleException(Exception):
pass
| Python | 0 |
e6af9d901f26fdf779a6a13319face483fe48a3b | Disable clickjacking protection on demos to display them in iframes | dwitter/dweet/views.py | dwitter/dweet/views.py | from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect, HttpResponse, Http404
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from dwitter.models import Dweet
from django.views.decorators.clickjacking import xframe_options_exempt
@xframe_options_exempt
def fullscreen_dweet(request, dweet_id):
dweet = get_object_or_404(Dweet, id=dweet_id)
context = {'dweet': dweet
}
return render(request, 'dweet/dweet-id.html', context );
| from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect, HttpResponse, Http404
from django.core.urlresolvers import reverse
from django.contrib.auth.decorators import login_required
from dwitter.models import Dweet
def fullscreen_dweet(request, dweet_id):
dweet = get_object_or_404(Dweet, id=dweet_id)
context = {'dweet': dweet
}
return render(request, 'dweet/dweet-id.html', context );
| Python | 0 |
2e72be703998b2d0d9fdc06bfffddccec8fb11e3 | use rps balancing | scripts/cycler.py | scripts/cycler.py | #!/usr/bin/python
from itertools import cycle
import subprocess
import time
targetsize = 16
def start():
# subprocess.call("sudo gcloud components update --quiet", shell=True)
# For completeness this should also create the backend, HTTP load balancer, template, and network
# Get the available zones
zones = subprocess.check_output("gcloud compute zones list --format='value(NAME)'", shell=True)
zoneList = zones.strip().split('\n')
# zoneList = sorted(zoneList)
# sort by zone letter (last character)
zoneList = sorted(zoneList, key=lambda x: x[-1])
print zoneList
for i, zone in enumerate(zoneList):
backendname = "retriever"
templatename = "retriever-1"
instancegroupname = "retriever-group-" + zone
print i, zone, instancegroupname
# Create the instance group
subprocess.call("gcloud compute instance-groups managed create {} --quiet --zone={} --size=0 --template={}".format(instancegroupname, zone, templatename), shell=True)
# Set instance template
subprocess.call("gcloud compute instance-groups managed set-instance-template {} --quiet --zone={} --template={}".format(instancegroupname, zone, templatename), shell=True)
# Add it to backend
subprocess.call("gcloud compute backend-services add-backend {} --quiet --instance-group={} --instance-group-zone={}".format(backendname, instancegroupname, zone), shell=True)
# Configure load balancing policy
subprocess.call("gcloud compute backend-services update-backend {} --quiet --instance-group={} --instance-group-zone={} --balancing-mode=RATE --max-rate-per-instance=1".format(backendname, instancegroupname, zone), shell=True)
pool = cycle(zoneList)
while True:
# Consider all instances in each instance group connected in a chain
# Every iteration, slide the current window one slot
# Create the new instance in the next group first, then delete an instance in the current group
zone = next(pool)
instancegroupname = "retriever-group-" + zone
currentsize = int(subprocess.check_output("gcloud compute instance-groups managed describe {} --quiet --zone={} --format='value(targetSize)'".format(instancegroupname, zone), shell=True))
if currentsize > 0:
nextzone = next(pool)
nextinstancegroupname = "retriever-group-" + nextzone
nextsize = max(targetsize - currentsize, 0)
subprocess.call("gcloud compute instance-groups managed resize {} --quiet --zone={} --size={}".format(nextinstancegroupname, nextzone, nextsize), shell=True)
time.sleep(60)
# nextsize = int(subprocess.check_output("gcloud compute instance-groups managed describe {} --quiet --zone={} --format='value(targetSize)'".format(nextinstancegroupname, nextzone), shell=True))
# # Scale up the next zone
# subprocess.call("gcloud compute instance-groups managed resize {} --quiet --zone={} --size={}".format(nextinstancegroupname, nextzone, nextsize + 1), shell=True)
# # Find the oldest instance in this group
# delete = subprocess.check_output("gcloud compute instances list --sort-by=creationTimestamp --format='table[no-heading](name)' | grep {} | head -n 1".format(instancegroupname), shell=True)
# if (delete.startswith(instancegroupname)):
# # Delete old one
# subprocess.call("gcloud compute instance-groups managed delete-instances {} --quiet --zone={} --instances={}".format(instancegroupname, zone, delete), shell=True)
# # Scale down the current zone
# subprocess.call("gcloud compute instance-groups managed resize {} --quiet --zone={} --size={}".format(instancegroupname, zone, currentsize - 1), shell=True)
# We want to cycle fast enough that each instance lives for 20 minutes
# time.sleep(1200 // (currentsize + nextsize))
while True:
try:
start()
except:
pass | #!/usr/bin/python
from itertools import cycle
import subprocess
import time
targetsize = 16
def start():
# subprocess.call("sudo gcloud components update --quiet", shell=True)
# For completeness this should also create the backend, HTTP load balancer, template, and network
# Get the available zones
zones = subprocess.check_output("gcloud compute zones list --format='value(NAME)'", shell=True)
zoneList = zones.strip().split('\n')
# zoneList = sorted(zoneList)
# sort by zone letter (last character)
zoneList = sorted(zoneList, key=lambda x: x[-1])
print zoneList
for i, zone in enumerate(zoneList):
backendname = "retriever"
templatename = "retriever-1"
instancegroupname = "retriever-group-" + zone
print i, zone, instancegroupname
# Create the instance group
subprocess.call("gcloud compute instance-groups managed create {} --quiet --zone={} --size=0 --template={}".format(instancegroupname, zone, templatename), shell=True)
# Set instance template
subprocess.call("gcloud compute instance-groups managed set-instance-template {} --quiet --zone={} --template={}".format(instancegroupname, zone, templatename), shell=True)
# Add it to backend
subprocess.call("gcloud compute backend-services add-backend {} --quiet --instance-group={} --instance-group-zone={}".format(backendname, instancegroupname, zone), shell=True)
pool = cycle(zoneList)
while True:
# Consider all instances in each instance group connected in a chain
# Every iteration, slide the current window one slot
# Create the new instance in the next group first, then delete an instance in the current group
zone = next(pool)
instancegroupname = "retriever-group-" + zone
currentsize = int(subprocess.check_output("gcloud compute instance-groups managed describe {} --quiet --zone={} --format='value(targetSize)'".format(instancegroupname, zone), shell=True))
if currentsize > 0:
nextzone = next(pool)
nextinstancegroupname = "retriever-group-" + nextzone
nextsize = max(targetsize - currentsize, 0)
subprocess.call("gcloud compute instance-groups managed resize {} --quiet --zone={} --size={}".format(nextinstancegroupname, nextzone, nextsize), shell=True)
time.sleep(60)
# nextsize = int(subprocess.check_output("gcloud compute instance-groups managed describe {} --quiet --zone={} --format='value(targetSize)'".format(nextinstancegroupname, nextzone), shell=True))
# # Scale up the next zone
# subprocess.call("gcloud compute instance-groups managed resize {} --quiet --zone={} --size={}".format(nextinstancegroupname, nextzone, nextsize + 1), shell=True)
# # Find the oldest instance in this group
# delete = subprocess.check_output("gcloud compute instances list --sort-by=creationTimestamp --format='table[no-heading](name)' | grep {} | head -n 1".format(instancegroupname), shell=True)
# if (delete.startswith(instancegroupname)):
# # Delete old one
# subprocess.call("gcloud compute instance-groups managed delete-instances {} --quiet --zone={} --instances={}".format(instancegroupname, zone, delete), shell=True)
# # Scale down the current zone
# subprocess.call("gcloud compute instance-groups managed resize {} --quiet --zone={} --size={}".format(instancegroupname, zone, currentsize - 1), shell=True)
# We want to cycle fast enough that each instance lives for 20 minutes
# time.sleep(1200 // (currentsize + nextsize))
while True:
try:
start()
except:
pass | Python | 0 |
4d5a15a4a087ea8bcf458243da947f5e0934013b | Fix html not loading the initial value (#569) | src/blocks/widgets.py | src/blocks/widgets.py | from django import forms
from wagtail.utils.widgets import WidgetWithScript
class CodeMirrorWidget(WidgetWithScript, forms.Textarea):
def render_js_init(self, id, name, value):
js = """
document.addEventListener('DOMContentLoaded', function(){{
CodeMirror.fromTextArea(
document.getElementById("{id}"),
{{
lineWrapping: true,
indentUnit: 4,
mode: "htmlmixed",
autoRefresh: true
}}
)
}});
"""
return js.format(id=id)
@property
def media(self):
return forms.Media(
css={'all': ('libraries/codemirror/codemirror.css',)},
js=(
'libraries/codemirror/codemirror.js',
'libraries/codemirror/autorefresh.js',
'libraries/codemirror/xml.js',
'libraries/codemirror/css.js',
'libraries/codemirror/javascript.js',
'libraries/codemirror/htmlmixed.js',
)
)
| from django import forms
from wagtail.utils.widgets import WidgetWithScript
class CodeMirrorWidget(WidgetWithScript, forms.Textarea):
def render_js_init(self, id, name, value):
js = """
CodeMirror.fromTextArea(
document.getElementById("{id}"),
{{
lineWrapping: true,
indentUnit: 4,
mode: "htmlmixed",
autoRefresh: true
}}
);
"""
return js.format(id=id)
@property
def media(self):
return forms.Media(
css={'all': ('libraries/codemirror/codemirror.css',)},
js=('libraries/codemirror/codemirror.js',
'libraries/codemirror/autorefresh.js',
'libraries/codemirror/xml.js',
'libraries/codemirror/css.js',
'libraries/codemirror/javascript.js',
'libraries/codemirror/htmlmixed.js')
)
| Python | 0 |
84c07019572d8945bd2d4c7473c2b86c314107d0 | Attempt to return better json | zpr.py | zpr.py | #!/var/lib/zpr/api/bin/python
import json
import lib_zpr
#import logging
#from logging.handlers import RotatingFileHandler
from flask import Flask, jsonify, make_response
app = Flask(__name__)
# app.logger.setLevel(logging.INFO)
# app.logger.disabled = False
# handler = logging.handlers.RotatingFileHandler(
# '/var/log/zpr_flask.log',
# 'a',
# maxBytes=1024 * 1024 * 100,
# backupCount=20
# )
# log = logging.getLogger('werkzeug')
# log.setLevel(logging.DEBUG)
# app.logger.addHandler(handler)
api_version = 'v1.0'
api_base = str('/zpr/{v}'.format(v=api_version))
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
# @app.route('/zpr/job')
# def ls_test():
# return json.dumps(call('ls'))
# @app.route('{a}/job/rsync/<backup_host>'.format(a=api_base), methods=['GET'])
# def check_job(backup_host):
# job = str(lib_zpr.check_zpr_rsync_job(backup_host))
# return json.dumps(job)
@app.route('{a}/job/<backup_host>'.format(a=api_base), methods=['GET'])
def check_zpr_job(backup_host):
lib_zpr.check_tsp_job(backup_host)
return json.dumps(str(lib_zpr.check_tsp_job_out[0]))
@app.route('{a}/job/<backup_host>/output'.format(a=api_base), methods=['GET'])
def check_zpr_job_summary(backup_host):
lib_zpr.check_tsp_job(backup_host, show_changes=True)
job_checked = [
{
'name': backup_host,
'response': lib_zpr.check_tsp_job_out[0],
'changes': lib_zpr.check_job_changes
}
]
return jsonify({'job_checked': job_checked})
#return json.dumps(join_summary, indent=2)
if __name__ == '__main__':
# formatter = logging.Formatter(\
# "%(asctime)s - %(levelname)s - %(name)s: \t%(messages)s")
# handler.setFormatter(formatter)
app.run(debug=True, extra_files='/var/lib/zpr/api/lib_zpr.py')
| #!/var/lib/zpr/api/bin/python
import json
import lib_zpr
#import logging
#from logging.handlers import RotatingFileHandler
from flask import Flask, jsonify, make_response
app = Flask(__name__)
# app.logger.setLevel(logging.INFO)
# app.logger.disabled = False
# handler = logging.handlers.RotatingFileHandler(
# '/var/log/zpr_flask.log',
# 'a',
# maxBytes=1024 * 1024 * 100,
# backupCount=20
# )
# log = logging.getLogger('werkzeug')
# log.setLevel(logging.DEBUG)
# app.logger.addHandler(handler)
api_version = 'v1.0'
api_base = str('/zpr/{v}'.format(v=api_version))
@app.errorhandler(404)
def not_found(error):
return make_response(jsonify({'error': 'Not found'}), 404)
# @app.route('/zpr/job')
# def ls_test():
# return json.dumps(call('ls'))
# @app.route('{a}/job/rsync/<backup_host>'.format(a=api_base), methods=['GET'])
# def check_job(backup_host):
# job = str(lib_zpr.check_zpr_rsync_job(backup_host))
# return json.dumps(job)
@app.route('{a}/job/<backup_host>'.format(a=api_base), methods=['GET'])
def check_zpr_job(backup_host):
lib_zpr.check_tsp_job(backup_host)
return json.dumps(str(lib_zpr.check_tsp_job_out[0]))
@app.route('{a}/job/<backup_host>/output'.format(a=api_base), methods=['GET'])
def check_zpr_job_summary(backup_host):
lib_zpr.check_tsp_job(backup_host, show_changes=True)
for i in [lib_zpr.check_tsp_job_out[0]]:
json.dumps(i)
json.dumps(
lib_zpr.check_job_changes[lib_zpr.check_tsp_job_out.index(i)]
)
#return json.dumps(join_summary, indent=2)
if __name__ == '__main__':
# formatter = logging.Formatter(\
# "%(asctime)s - %(levelname)s - %(name)s: \t%(messages)s")
# handler.setFormatter(formatter)
app.run(debug=True, extra_files='/var/lib/zpr/api/lib_zpr.py')
| Python | 0.999868 |
938d255db088ff721e69659db1afdd5cfa109c3f | Save temp as C and F | webapp/app/views/api/v1/points.py | webapp/app/views/api/v1/points.py | import time
from flask import request, abort
from app.views.api.v1 import APIView_v1
class PointsParser(object):
def __init__(self, data):
self.points = []
self.interval = None
self.state = 'root'
lines = map(lambda l: l.split(), filter(None, map(str.strip, data.split('\n'))))
self.parse(lines)
def parse(self, lines):
for line in lines:
try:
reparse = False
while True:
self.state, reparse = getattr(self, 'parse_' + self.state + '_line')(*line)
if not reparse:
break
except Exception as e:
abort(400, "Malformed line '{}' - {}: {}".format(' '.join(line), e.__class__.__name__, str(e)))
def require_args(self, num, args):
if len(args) != num:
raise ValueError("Expected {} args, got {}".format(num, len(args)))
def parse_int(self, value):
try:
return int(value)
except ValueError:
raise ValueError("Invalid integer value")
def parse_float(self, value):
try:
return float(value)
except ValueError:
raise ValueError("Invalid float value")
def parse_root_line(self, keyword, *args):
if keyword == 'INTERVAL':
self.require_args(1, args)
self.interval = self.parse_int(args[0])
return 'root', False
elif keyword == 'POINT':
self.require_args(0, args)
self.points.append([])
return 'point', False
else:
raise ValueError("Invalid root keyword")
def parse_point_line(self, keyword, *args):
if keyword == 'SENSOR':
self.require_args(1, args)
self.points[-1].append({'sensor': args[0]})
return 'sensor', False
else:
raise ValueError("Invalid point keyword")
def parse_sensor_line(self, keyword, *args):
if keyword in ('HUMIDITY', 'TEMPERATURE'):
self.require_args(1, args)
if keyword == 'TEMPERATURE':
self.points[-1][-1]['temperature_c'] = self.parse_float(args[0])
self.points[-1][-1]['temperature_f'] = (self.parse_float(args[0]) * 1.8) + 32
else:
self.points[-1][-1][keyword.lower()] = self.parse_float(args[0])
return 'sensor', False
elif keyword == 'SENSOR':
return 'point', True
elif keyword == 'POINT':
return 'root', True
else:
raise ValueError("Invalid sensor keyword")
class PointsView(APIView_v1):
def post(self):
"""\
Expected data format:
INTERVAL 1000
POINT
SENSOR top
HUMIDITY 75.123455
TEMPERATURE 29.243513
"INTERVAL" is the interval between points in ms. The points sections
are ordered by time, ascending - the last section is assumed to be at
the current time.
A "POINT" section is a collection of datapoints for a variety of sensors
at a given point in time. The point section may be repeated any number
of times, and leading whitespace and blank lines are ignored.
A "SENSOR" section is the data from one sensor for a given point in
time. A point may contain data from many sensors. The sensor name,
given after the keyword, may be any string not containing a newline.
Each "SENSOR" section should contain a "TEMPERATURE" and "HUMIDITY". The
temperature is a floating point number specified in degrees celsius, and
the humidity is the percent humidity, also floating point
"""
now = time.time()
res = PointsParser(request.data)
for point in reversed(res.points):
for sensor in point:
sensor['timestamp'] = now
now += (res.interval / 1000.0)
return "OK"
| import time
from flask import request, abort
from app.views.api.v1 import APIView_v1
class PointsParser(object):
def __init__(self, data):
self.points = []
self.interval = None
self.state = 'root'
lines = map(lambda l: l.split(), filter(None, map(str.strip, data.split('\n'))))
self.parse(lines)
def parse(self, lines):
for line in lines:
try:
reparse = False
while True:
self.state, reparse = getattr(self, 'parse_' + self.state + '_line')(*line)
if not reparse:
break
except Exception as e:
abort(400, "Malformed line '{}' - {}: {}".format(' '.join(line), e.__class__.__name__, str(e)))
def require_args(self, num, args):
if len(args) != num:
raise ValueError("Expected {} args, got {}".format(num, len(args)))
def parse_int(self, value):
try:
return int(value)
except ValueError:
raise ValueError("Invalid integer value")
def parse_float(self, value):
try:
return float(value)
except ValueError:
raise ValueError("Invalid float value")
def parse_root_line(self, keyword, *args):
if keyword == 'INTERVAL':
self.require_args(1, args)
self.interval = self.parse_int(args[0])
return 'root', False
elif keyword == 'POINT':
self.require_args(0, args)
self.points.append([])
return 'point', False
else:
raise ValueError("Invalid root keyword")
def parse_point_line(self, keyword, *args):
if keyword == 'SENSOR':
self.require_args(1, args)
self.points[-1].append({'sensor': args[0]})
return 'sensor', False
else:
raise ValueError("Invalid point keyword")
def parse_sensor_line(self, keyword, *args):
if keyword in ('HUMIDITY', 'TEMPERATURE'):
self.require_args(1, args)
self.points[-1][-1][keyword.lower()] = self.parse_float(args[0])
return 'sensor', False
elif keyword == 'SENSOR':
return 'point', True
elif keyword == 'POINT':
return 'root', True
else:
raise ValueError("Invalid sensor keyword")
class PointsView(APIView_v1):
def post(self):
"""\
Expected data format:
INTERVAL 1000
POINT
SENSOR top
HUMIDITY 75.123455
TEMPERATURE 29.243513
"INTERVAL" is the interval between points in ms. The points sections
are ordered by time, ascending - the last section is assumed to be at
the current time.
A "POINT" section is a collection of datapoints for a variety of sensors
at a given point in time. The point section may be repeated any number
of times, and leading whitespace and blank lines are ignored.
A "SENSOR" section is the data from one sensor for a given point in
time. A point may contain data from many sensors. The sensor name,
given after the keyword, may be any string not containing a newline.
Each "SENSOR" section should contain a "TEMPERATURE" and "HUMIDITY". The
temperature is a floating point number specified in degrees celsius, and
the humidity is the percent humidity, also floating point
"""
now = time.time()
res = PointsParser(request.data)
for point in reversed(res.points):
for sensor in point:
sensor['timestamp'] = now
now += (res.interval / 1000.0)
return "OK"
| Python | 0.000001 |
a9bf968facd2a89017ef258e5afead093d1054f7 | add method execute | CURD.py | CURD.py | # coding=utf8
# Permission to use, copy, modify,
# and distribute this software for any purpose with
# or without fee is hereby granted,
# provided that the above copyright notice
# and this permission notice appear in all copies.
#
"""
CURD.py
~~~~~~~
Tiny Python ORM for MySQL
:Author: Hit9
:Email: nz2324[at]126.com
:URL: https://github.com/hit9/CURD.py
:License: BSD
"""
__version__ = '0.2.5'
import re
import sys
import MySQLdb
import MySQLdb.cursors
class Database(object):
"""Database connection manager"""
# configuration for connection with default values
configs = {
'host': 'localhost',
'port': 3306,
'db': '',
'user': '',
'passwd': '',
'charset': 'utf8'
}
# It is strongly recommended that you set this True
autocommit = True
# MySQL connection object
conn = None
@classmethod
def config(cls, autocommit=True, **configs):
"""
Configure the database connection.
The connection will be auto established with these configs.
Keyword parameters for this method:
host
string, host to connect
user
string, user to connect as
passwd
string, password for this user
db
string, database to use
port
integer, TCP/IP port to connect
charset
string, charset of connection
See the MySQLdb documentation for more information,
the parameters of `MySQLdb.connect` are all supported.
"""
cls.configs.update(configs)
cls.autocommit = autocommit
@classmethod
def connect(cls):
"""
Connect to database, this method will new a connect object
"""
cls.conn = MySQLdb.connect(
cursorclass=MySQLdb.cursors.DictCursor, **cls.configs
)
cls.conn.autocommit(cls.autocommit)
@classmethod
def get_conn(cls):
"""
Get MySQL connection object.
if the conn is open and working, return it.
else new another one and return it.
"""
# singleton
if not cls.conn or not cls.conn.open:
cls.connect()
try:
# ping to test if this conn is working
cls.conn.ping()
except MySQLdb.OperationalError:
cls.connect()
return cls.conn
@classmethod
def execute(cls, sql):
"""
Execute one sql
parameters
sql
string, sql command to run
"""
cursor = cls.get_conn().cursor()
cursor.execute(sql)
return cursor
| # coding=utf8
# Permission to use, copy, modify,
# and distribute this software for any purpose with
# or without fee is hereby granted,
# provided that the above copyright notice
# and this permission notice appear in all copies.
#
"""
CURD.py
~~~~~~~
Tiny Python ORM for MySQL
:Author: Hit9
:Email: nz2324[at]126.com
:URL: https://github.com/hit9/CURD.py
:License: BSD
"""
__version__ = '0.2.5'
import re
import sys
import MySQLdb
import MySQLdb.cursors
class Database(object):
"""Database connection manager"""
# configuration for connection with default values
configs = {
'host': 'localhost',
'port': 3306,
'db': '',
'user': '',
'passwd': '',
'charset': 'utf8'
}
# It is strongly recommended that you set this True
autocommit = True
# MySQL connection object
conn = None
@classmethod
def config(cls, autocommit=True, **configs):
"""
Configure the database connection.
The connection will be auto established with these configs.
Keyword parameters for this method:
host
string, host to connect
user
string, user to connect as
passwd
string, password for this user
db
string, database to use
port
integer, TCP/IP port to connect
charset
string, charset of connection
See the MySQLdb documentation for more information,
the parameters of `MySQLdb.connect` are all supported.
"""
cls.configs.update(configs)
cls.autocommit = autocommit
@classmethod
def connect(cls):
"""
Connect to database, this method will new a connect object
"""
cls.conn = MySQLdb.connect(
cursorclass=MySQLdb.cursors.DictCursor, **cls.configs
)
cls.conn.autocommit(cls.autocommit)
@classmethod
def get_conn(cls):
"""
Get MySQL connection object.
if the conn is open and working, return it.
else new another one and return it.
"""
# singleton
if not cls.conn or not cls.conn.open:
cls.connect()
try:
# ping to test if this conn is working
cls.conn.ping()
except MySQLdb.OperationalError:
cls.connect()
return cls.conn
| Python | 0.000005 |
452899e183c6a8dcb2e7eb10a34a9a560e99145f | test problems | basic_cms/tests/test_api.py | basic_cms/tests/test_api.py | """Django page CMS functionnal tests suite module."""
from basic_cms.models import Page
from basic_cms.tests.testcase import TestCase
import json
from django.template.loader import render_to_string
from django.core.urlresolvers import reverse
class CMSPagesApiTests(TestCase):
fixtures = ['pages_tests.json', 'api.json']
# def setUp(self):
# self.original_data = Page.objects.from_path('terms', 'eng')
# self.original_json_data = json.dumps(self.original_data.dump_json_data())
# self.original_html_data = render_to_string(self.original_data.template,
# {"current_page": self.original_data})
def tests_basic_cms_api_access(self):
from django.test.client import Client
self.client = Client()
self.original_data = Page.objects.from_path('terms', 'en-us')
self.original_json_data = json.dumps(self.original_data.dump_json_data())
self.original_html_data = render_to_string(self.original_data.template,
{"current_page": self.original_data})
data = {
'format': 'json'
}
response = self.client.get(reverse('basic_cms_api', args=['alamakota']), data)
self.assertEqual(response.status_code, 404)
response = self.client.get(reverse('basic_cms_api', args=['terms']), data)
self.assertEqual(response.status_code, 200)
# self.assertJSONEqual(self.original_json_data, response.content)
self.assertEqual(self.original_json_data, response.content)
response = self.client.get(reverse('basic_cms_api', args=['terms']))
self.assertEqual(response.status_code, 200)
self.assertIn('Please read these Terms of Use', response.content)
response = self.client.get(reverse('basic_cms_api', args=['coaches']), {'format': 'json'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['title']['en-us'], 'coaches')
self.assertEqual(len(response.data['children']), 3)
self.assertEqual(response.data['children'][0]['title']['en-us'], 'Judith Singer')
self.assertEqual(response.data['children'][1]['title']['en-us'], 'Melissa Litwak')
self.assertEqual(response.data['children'][2]['title']['en-us'], 'Joanna Schaffler')
def test_urls(self):
from utils import links_append_domain
body = """
<a href="http://google.com">google.com</a>
<a href="foo">foo</a>
<a href="#a">#a</a>
<a href="/#a">/#a</a>
<img src="http://x.com/x.jpg"/>
<img src="a.jpg"/>
"""
return_body = """
<html><body><a href="http://google.com">google.com</a>
<a href="http://a.com/foo">foo</a>
<a href="#a">#a</a>
<a href="/#a">/#a</a>
<img src="http://x.com/x.jpg"/>
<img src="http://a.com/a.jpg"/></body></html>
"""
self.assertEqual(links_append_domain(body, 'http://a.com').strip(), return_body.strip())
| """Django page CMS functionnal tests suite module."""
from basic_cms.models import Page
from basic_cms.tests.testcase import TestCase
import json
from django.template.loader import render_to_string
from django.core.urlresolvers import reverse
class CMSPagesApiTests(TestCase):
fixtures = ['pages_tests.json', 'api.json']
# def setUp(self):
# self.original_data = Page.objects.from_path('terms', 'eng')
# self.original_json_data = json.dumps(self.original_data.dump_json_data())
# self.original_html_data = render_to_string(self.original_data.template,
# {"current_page": self.original_data})
def tests_basic_cms_api_access(self):
from django.test.client import Client
self.client = Client()
self.original_data = Page.objects.from_path('terms', 'en-us')
self.original_json_data = json.dumps(self.original_data.dump_json_data())
self.original_html_data = render_to_string(self.original_data.template,
{"current_page": self.original_data})
data = {
'format': 'json'
}
response = self.client.get(reverse('basic_cms_api', args=['alamakota']), data)
self.assertEqual(response.status_code, 404)
response = self.client.get(reverse('basic_cms_api', args=['terms']), data)
self.assertEqual(response.status_code, 200)
# self.assertJSONEqual(self.original_json_data, response.content)
self.assertEqual(self.original_json_data, response.content)
response = self.client.get(reverse('basic_cms_api', args=['terms']))
self.assertEqual(response.status_code, 200)
self.assertIn('Please read these Terms of Use', response.content)
response = self.client.get(reverse('basic_cms_api', args=['coaches']), {'format': 'json'})
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data['title']['en-us'], 'coaches')
self.assertEqual(len(response.data['children']), 3)
self.assertEqual(response.data['children'][0]['title']['en-us'], 'Judith Singer')
self.assertEqual(response.data['children'][1]['title']['en-us'], 'Melissa Litwak')
self.assertEqual(response.data['children'][2]['title']['en-us'], 'Joanna Schaffler')
def test_urls(self):
from utils import links_append_domain
body = """
<a href="http://google.com">google.com</a>
<a href="foo">foo</a>
<a href="#a">#a</a>
<a href="/#a">/#a</a>
<img src="http://x.com/x.jpg"/>
<img src="a.jpg"/>
"""
return_body = """
<a href="http://google.com">google.com</a>
<a href="http://a.com/foo">foo</a>
<a href="#a">#a</a>
<a href="/#a">/#a</a>
<img src="http://x.com/x.jpg"/>
<img src="http://a.com/a.jpg"/>
"""
print links_append_domain(body, 'http://a.com').strip()
self.assertIn(links_append_domain(body, 'http://a.com').strip(), return_body.strip())
| Python | 0.000011 |
c358f467bbab9bd0366347f9a1bd10cb2e027bb8 | use moksha widget template | fedoracommunity/mokshaapps/packagemaintresource/controllers/root.py | fedoracommunity/mokshaapps/packagemaintresource/controllers/root.py | from moksha.lib.base import Controller
from moksha.lib.helpers import MokshaApp
from tg import expose, tmpl_context
from fedoracommunity.widgets import SubTabbedContainer
class TabbedNav(SubTabbedContainer):
tabs= (MokshaApp('Overview', 'fedoracommunity.packagemaint.overview'),
MokshaApp('Builds', 'fedoracommunity.builds'),
MokshaApp('Updates', 'fedoracommunity.updates'),
MokshaApp('Packages', 'fedoracommunity.packagemaint.packages'),
MokshaApp('Package Groups', 'fedoracommunity.packagemaint.packagegroups'),
)
class RootController(Controller):
def __init__(self):
self.widget = TabbedNav('packagemaintnav')
@expose('mako:moksha.templates.widget')
def index(self):
tmpl_context.widget = self.widget
return {'options':{}}
| from moksha.lib.base import Controller
from moksha.lib.helpers import MokshaApp
from tg import expose, tmpl_context
from fedoracommunity.widgets import SubTabbedContainer
class TabbedNav(SubTabbedContainer):
tabs= (MokshaApp('Overview', 'fedoracommunity.packagemaint.overview'),
MokshaApp('Builds', 'fedoracommunity.builds'),
MokshaApp('Updates', 'fedoracommunity.updates'),
MokshaApp('Packages', 'fedoracommunity.packagemaint.packages'),
MokshaApp('Package Groups', 'fedoracommunity.packagemaint.packagegroups'),
)
class RootController(Controller):
def __init__(self):
self.widget = TabbedNav('packagemaintnav')
@expose('mako:fedoracommunity.mokshaapps.packagemaintresource.templates.index')
def index(self):
tmpl_context.widget = self.widget
return {}
| Python | 0 |
367b28277b03473e6453ad9aa26c734136db4105 | use compound dimensions | ktbh/modelling.py | ktbh/modelling.py |
import json
class AutoModellingException(Exception): pass
def make_model(amount_field, date_field, fields):
currency = "GBP"
dataset_name = "new-dataset"
description = "Dataset description"
label = "Dataset label"
dataset = {
"description": description,
"temporal_granularity": "day",
"schema_version": "2011-12-07",
"name": dataset_name,
"category": "other",
"currency": currency,
"label": label
}
def dimension(name, column_id, dim_type, data_type):
assert dim_type in ["date", "attribute", "measure"]
assert data_type in ["float", "string", "date"]
return (name, {
"default_value": "",
"description": name.title(),
"column": column_id,
"label": name.title(),
"datatype": data_type,
"type": dim_type
})
def compound_dimension(name, column_id):
return (name,
{
"attributes": {
"name": {
"datatype": "id",
"column": column_id
},
"label": {
"column": column_id,
"datatype": "string"
}
},
"type": "compound",
"description": column_id,
"label": column_id
})
def as_os_type(t):
if t in ["integer", "number"]:
return "float"
else:
return "string"
dimensions_list = [
dimension("amount", amount_field["label"], "measure", "float"),
dimension("time", date_field["label"], "date", "date"),
]
for f in fields:
#dim = dimension(f["id"], f["label"],
# "attribute", as_os_type(f["type"]))
dim = compound_dimension(f["id"], f["label"])
dimensions_list.append(dim)
dimensions_list.append(("unique_rowid",
{"default_value": "",
"description": "Nonce Row ID",
"column": "unique_rowid",
"label": "RowID",
"datatype": "string",
"key": True,
"type": "attribute"
}))
return {
"dataset": dataset,
"mapping": dict(dimensions_list)
}
def infer_model_callback(body):
args = json.loads(body)
fields = args["schema"]["fields"]
# we need a date
# an amount
types = [ field["type"] for field in fields ]
numbers = filter(lambda s: s == "number", types)
if len(numbers) != 1:
raise AutoModellingException("Found more than one numerical field")
dates = filter(lambda s: s == "date", types)
if len(dates) != 1:
raise AutoModellingException("Found more than one date field")
other_fields = filter(lambda s: s not in ["number", "date"], types)
model = make_model(
[ f for f in fields if f["type"] == "number"][0],
[ f for f in fields if f["type"] == "date" ][0],
[ f for f in fields if f["type"] not in ["number", "date"]]
)
args["model"] = model
return [ ("import", args) ]
|
import json
class AutoModellingException(Exception): pass
def make_model(amount_field, date_field, fields):
currency = "GBP"
dataset_name = "new-dataset"
description = "Dataset description"
label = "Dataset label"
dataset = {
"description": description,
"temporal_granularity": "day",
"schema_version": "2011-12-07",
"name": dataset_name,
"category": "other",
"currency": currency,
"label": label
}
def dimension(name, column_id, dim_type, data_type):
assert dim_type in ["date", "attribute", "measure"]
assert data_type in ["float", "string", "date"]
return (name, {
"default_value": "",
"description": name.title(),
"column": column_id,
"label": name.title(),
"datatype": data_type,
"type": dim_type
})
def as_os_type(t):
if t in ["integer", "number"]:
return "float"
else:
return "string"
dimensions_list = [
dimension("amount", amount_field["label"], "measure", "float"),
dimension("time", date_field["label"], "date", "date"),
]
for f in fields:
dim = dimension(f["id"], f["label"], "attribute", as_os_type(f["type"]))
dimensions_list.append(dim)
dimensions_list.append(("unique_rowid",
{"default_value": "",
"description": "Nonce Row ID",
"column": "unique_rowid",
"label": "RowID",
"datatype": "string",
"key": True,
"type": "attribute"
}))
return {
"dataset": dataset,
"mapping": dict(dimensions_list)
}
def infer_model_callback(body):
args = json.loads(body)
fields = args["schema"]["fields"]
# we need a date
# an amount
types = [ field["type"] for field in fields ]
numbers = filter(lambda s: s == "number", types)
if len(numbers) != 1:
raise AutoModellingException("Found more than one numerical field")
dates = filter(lambda s: s == "date", types)
if len(dates) != 1:
raise AutoModellingException("Found more than one date field")
other_fields = filter(lambda s: s not in ["number", "date"], types)
model = make_model(
[ f for f in fields if f["type"] == "number"][0],
[ f for f in fields if f["type"] == "date" ][0],
[ f for f in fields if f["type"] not in ["number", "date"]]
)
args["model"] = model
return [ ("import", args) ]
| Python | 0.000001 |
5b6aa3f6cca7ea83a53178be7b9e58892597ac0b | Add some logging to Auth | opwen_email_server/services/auth.py | opwen_email_server/services/auth.py | from abc import ABCMeta
from abc import abstractmethod
from functools import lru_cache
from typing import Callable
from typing import Optional
from azure.storage.table import TableService
from opwen_email_server.utils.log import LogMixin
class Auth(metaclass=ABCMeta):
@abstractmethod
def domain_for(self, client_id: str) -> Optional[str]:
raise NotImplementedError # pramga: no cover
class AzureAuth(Auth, LogMixin):
def __init__(self, account: str, key: str, table: str,
client: TableService=None,
client_factory: Callable[..., TableService]=TableService
) -> None:
self._account = account
self._key = key
self._table = table
self.__client = client
self._client_factory = client_factory
@property
def _client(self) -> TableService:
if self.__client is not None:
return self.__client
client = self._client_factory(self._account, self._key)
client.create_table(self._table)
self.__client = client
return client
def insert(self, client_id: str, domain: str):
self._client.insert_entity(self._table, {
'RowKey': client_id,
'PartitionKey': client_id,
'domain': domain,
})
self.log_debug('Registered client %s at domain %s', client_id, domain)
def domain_for(self, client_id):
try:
return self._domain_for_cached(client_id)
except KeyError:
return None
@lru_cache(maxsize=128)
def _domain_for_cached(self, client_id: str) -> str:
query = "PartitionKey eq '{0}' and RowKey eq '{0}'".format(client_id)
entities = self._client.query_entities(self._table, query)
for entity in entities:
domain = entity.get('domain')
if domain:
self.log_debug('Got domain %s for client %s', domain, client_id)
return domain
self.log_debug('Unrecognized client %s', client_id)
raise KeyError
| from abc import ABCMeta
from abc import abstractmethod
from functools import lru_cache
from typing import Callable
from typing import Optional
from azure.storage.table import TableService
class Auth(metaclass=ABCMeta):
@abstractmethod
def domain_for(self, client_id: str) -> Optional[str]:
raise NotImplementedError # pramga: no cover
class AzureAuth(Auth):
def __init__(self, account: str, key: str, table: str,
client: TableService=None,
client_factory: Callable[..., TableService]=TableService
) -> None:
self._account = account
self._key = key
self._table = table
self.__client = client
self._client_factory = client_factory
@property
def _client(self) -> TableService:
if self.__client is not None:
return self.__client
client = self._client_factory(self._account, self._key)
client.create_table(self._table)
self.__client = client
return client
def insert(self, client_id: str, domain: str):
self._client.insert_entity(self._table, {
'RowKey': client_id,
'PartitionKey': client_id,
'domain': domain,
})
def domain_for(self, client_id):
try:
return self._domain_for_cached(client_id)
except KeyError:
return None
@lru_cache(maxsize=128)
def _domain_for_cached(self, client_id: str) -> str:
query = "PartitionKey eq '{0}' and RowKey eq '{0}'".format(client_id)
entities = self._client.query_entities(self._table, query)
for entity in entities:
domain = entity.get('domain')
if domain:
return domain
raise KeyError
| Python | 0.000001 |
4b2a29c484ddd5e2dfb4ad91bb0ae5c7681553c1 | Bump version to 0.1.5 | lacrm/_version.py | lacrm/_version.py | __version_info__ = (0, 1, 5)
__version__ = '.'.join(map(str, __version_info__))
| __version_info__ = (0, 1, 4)
__version__ = '.'.join(map(str, __version_info__))
| Python | 0.000001 |
0cdac10ee51cc3e812ae9188606301e6be0644ee | Fix default url bug | web/project/main/urls.py | web/project/main/urls.py | from django.conf.urls import url, include
from rest_framework.authtoken import views as authviews
from rest_framework_jwt import views as jwt_views
from . import views
urlpatterns = [
url(r'^home/', views.index, name='index'),
# Authentication APIs
url(r'^api/auth', jwt_views.obtain_jwt_token, name="auth"),
url(r'^api/token-verify', jwt_views.verify_jwt_token, name="token-verify"),
url(r'^api/token-refresh', jwt_views.refresh_jwt_token, name="token-refresh"),
# User APIs
url(r'^api/register', views.UserCreateView.as_view(), name="register"),
url(r'^api/entity', views.EntityCreateView.as_view(), name="entity"),
url(r'^api/doctor', views.DoctorCreateView.as_view(), name="doctor"),
url(r'^api/login', views.UserLoginView.as_view(), name="login"),
url(r'^api/user', views.CurrentUserView.as_view(), name="user"),
url(r'^api/profile', views.UserProfileView.as_view(), name="profile"),
url(r'^api/record', views.RecordAPIView.as_view(), name="record"),
url(r'^api/questions', views.QuestionGetAPIView.as_view(), name="questions"),
url(r'^api/answer', views.AnswerAPIView.as_view(), name="answer"),
url(r'^api/symptom', views.SymptomAPIView.as_view(), name="symptom"),
url(r'^api/edit_symptom/(?P<record>\d+)/(?P<symptom>\d+)$', views.SymptomUpdateView.as_view(), name="edit_symptom"),
url(r'^api/edit_answer/(?P<record>\d+)/(?P<question>\d+)$', views.AnswerUpdateView.as_view(), name="edit_answer"),
url(r'^api/edit_record/(?P<pk>\d+)$', views.RecordUpdateView.as_view(), name="edit_record"),
url(r'^api/edit_question/(?P<pk>\d+)$', views.QuestionUpdateView.as_view(), name="edit_question"),
# Default URL
url(r'', views.index, name='index'),
]
| from django.conf.urls import url, include
from rest_framework.authtoken import views as authviews
from rest_framework_jwt import views as jwt_views
from . import views
urlpatterns = [
url(r'', views.index, name='index'),
url(r'^home/', views.index, name='index'),
# Authentication APIs
url(r'^api/auth', jwt_views.obtain_jwt_token, name="auth"),
url(r'^api/token-verify', jwt_views.verify_jwt_token, name="token-verify"),
url(r'^api/token-refresh', jwt_views.refresh_jwt_token, name="token-refresh"),
# User APIs
url(r'^api/register', views.UserCreateView.as_view(), name="register"),
url(r'^api/entity', views.EntityCreateView.as_view(), name="entity"),
url(r'^api/doctor', views.DoctorCreateView.as_view(), name="doctor"),
url(r'^api/login', views.UserLoginView.as_view(), name="login"),
url(r'^api/user', views.CurrentUserView.as_view(), name="user"),
url(r'^api/profile', views.UserProfileView.as_view(), name="profile"),
url(r'^api/record', views.RecordAPIView.as_view(), name="record"),
url(r'^api/questions', views.QuestionGetAPIView.as_view(), name="questions"),
url(r'^api/answer', views.AnswerAPIView.as_view(), name="answer"),
url(r'^api/symptom', views.SymptomAPIView.as_view(), name="symptom"),
url(r'^api/edit_symptom/(?P<record>\d+)/(?P<symptom>\d+)$', views.SymptomUpdateView.as_view(), name="edit_symptom"),
url(r'^api/edit_answer/(?P<record>\d+)/(?P<question>\d+)$', views.AnswerUpdateView.as_view(), name="edit_answer"),
url(r'^api/edit_record/(?P<pk>\d+)$', views.RecordUpdateView.as_view(), name="edit_record"),
url(r'^api/edit_question/(?P<pk>\d+)$', views.QuestionUpdateView.as_view(), name="edit_question"),
]
| Python | 0.000003 |
9f7837f572017a4a8176c4e74b0aaba0625905ed | Add support for custom import apps | parachute/management/commands/import_from.py | parachute/management/commands/import_from.py | import logging
from optparse import make_option
from django.db.models.loading import load_app
from django.core.management.base import LabelCommand
class Command(LabelCommand):
import_app = 'parachute'
option_list = LabelCommand.option_list + (
make_option('--database',
dest='database',
default=None,
help='Force importer to updated existing DB entries with imported ones.'),
make_option('--force-update',
action='store_true',
dest='force_update',
default=False,
help='Force importer to updated existing DB entries with imported ones.'),
make_option('--import-customers',
action='store_true',
dest='import_customers',
default=False,
help='Import only the customers.'),
make_option('--import-catalogue',
action='store_true',
dest='import_catalogue',
default=False,
help='Import only the catalogue.'),
make_option('--import-orders',
action='store_true',
dest='import_orders',
default=False,
help='Import only the orders.'),
make_option('--import-old-urls',
action='store_true',
dest='import_old_urls',
default=False,
help='Import the old urls of categories into url-tracker.'),
)
def handle_label(self, label, **options):
platform_app = None
logger = self._get_logger()
logger.debug('attempting to import app: %s', label)
try:
load_app(label)
except ImportError:
logger.debug("could not import custom backend '%s'", label)
else:
platform_app = label
if not platform_app:
logger.debug('trying to import from parachute default apps')
# import the correct app for the desired backend
label = "%s.%s" % (self.import_app, label)
try:
load_app(label)
except ImportError:
logger.error("invalid import backend '%s' specified", label)
return
else:
platform_app = label
logger.info("succesfully loaded importer app for '%s'", platform_app)
try:
backend = __import__(platform_app, globals(), locals(), ['Importer'])
importer = backend.Importer(
force_update=options.get('force_update', False),
verbosity=int(options.get('verbosity', logging.INFO)),
)
except AttributeError:
logger.error("no importer available in backend '%s'", platform_app)
return
logger.debug("found importer object for '%s'", platform_app)
importer.prepare_import(**options)
if options.get('import_customers'):
importer.import_customers()
if options.get('import_catalogue'):
importer.import_catalogue()
if options.get('import_orders'):
importer.import_orders()
if options.get('import_old_urls'):
importer.import_old_urls()
def _get_logger(self):
logger = logging.getLogger(__file__)
stream = logging.StreamHandler(self.stdout)
logger.addHandler(stream)
logger.setLevel(logging.DEBUG)
return logger
| import logging
from optparse import make_option
from django.db.models.loading import load_app
from django.core.management.base import LabelCommand
class Command(LabelCommand):
import_app = 'importer'
option_list = LabelCommand.option_list + (
make_option('--importer',
dest='force_update',
help='Specify you own importer app to be used by parachute.'),
make_option('--database',
dest='database',
default=None,
help='Force importer to updated existing DB entries with imported ones.'),
make_option('--force-update',
action='store_true',
dest='force_update',
default=False,
help='Force importer to updated existing DB entries with imported ones.'),
make_option('--import-customers',
action='store_true',
dest='import_customers',
default=False,
help='Import only the customers.'),
make_option('--import-catalogue',
action='store_true',
dest='import_catalogue',
default=False,
help='Import only the catalogue.'),
make_option('--import-orders',
action='store_true',
dest='import_orders',
default=False,
help='Import only the orders.'),
make_option('--import-old-urls',
action='store_true',
dest='import_old_urls',
default=False,
help='Import the old urls of categories into url-tracker.'),
)
def handle_label(self, label, **options):
logger = self._get_logger()
# import the correct app for the desired backend
platform_app = options.importer
if not platform_app:
platform_app = "%s.%s" % (self.import_app, label)
logger.debug('trying to import platform app: %s', platform_app)
try:
load_app("%s.%s" % (self.import_app, label))
except ImportError:
logger.error("invalid import backend '%s' specified", label)
return
logger.info("succesfully loaded importer app for '%s'", label)
try:
backend = __import__(platform_app, globals(), locals(), ['Importer'])
importer = backend.Importer(
force_update=options.get('force_update', False),
verbosity=int(options.get('verbosity', logging.INFO)),
)
except AttributeError:
logger.error("no importer available in backend '%s'", platform_app)
return
logger.debug("found importer object for '%s'", platform_app)
importer.prepare_import(**options)
if options.get('import_customers'):
importer.import_customers()
if options.get('import_catalogue'):
importer.import_catalogue()
if options.get('import_orders'):
importer.import_orders()
if options.get('import_old_urls'):
importer.import_old_urls()
def _get_logger(self):
logger = logging.getLogger(__file__)
stream = logging.StreamHandler(self.stdout)
logger.addHandler(stream)
logger.setLevel(logging.DEBUG)
return logger
| Python | 0 |
27a39812088b9312314b44a013483b49a77d8dfb | update set of modules to install for pyquickhelper | src/pymyinstall/packaged/packaged_config_0_pyquickhelper.py | src/pymyinstall/packaged/packaged_config_0_pyquickhelper.py | #-*- coding: utf-8 -*-
"""
@file
@brief Defines different a set of usual modules for Python.
"""
import sys
def pyquickhelper_set():
"""
list of modules needed to run unit test of module *pyquickhelper*
"""
names = [
"alabaster",
"autopep8",
"babel",
"certifi",
"colorama",
"coverage",
"Cython",
"cycler",
"decorator",
"docutils",
"flake8",
"futures",
"husl",
"ipython",
"ipykernel",
"ipystata" if sys.version_info[0] == 2 else None,
"ipython_genutils",
"ipywidgets",
"jinja2",
"jsonschema",
"jupyter-console",
"jupyter",
"jupyter_core",
"jupyter_client",
"jupyter-pip",
"lxml",
"matplotlib",
"metakernel",
"micropython-libc" if not sys.platform.startswith("win") else None,
"micropython-ffilib" if not sys.platform.startswith(
"win") else None,
"micropython-fcntl" if not sys.platform.startswith(
"win") else None,
'markupsafe',
"mccabe",
"mistune",
"multi_key_dict",
"nbformat",
"nbconvert",
"nose",
"backports_abc",
"notebook",
"notedown",
"numpy",
"onedrive-sdk-python",
"openpyxl",
"path.py",
"pbr",
"pandas",
"pep8",
"pexpect" if not sys.platform.startswith("win") else None,
"pickleshare",
"pipdeptree",
"psutil",
"ptyprocess" if not sys.platform.startswith("win") else None,
"pycparser",
"pycryptodome",
"pyflakes",
"pygments",
"pyparsing",
'pypiserver',
"python-dateutil",
"python-jenkins",
"pytz",
"pywin32" if sys.platform.startswith("win") else None,
"pyzmq",
"qtconsole",
"requests",
"simplegeneric",
"six",
"sphinx",
'sphinxcontrib-images',
'sphinxcontrib-imagesvg',
'sphinxcontrib-jsdemo',
'snowballstemmer',
'sphinx-rtd-theme',
"sphinxjp.themes.revealjs",
"terminado" if not sys.platform.startswith("win") else None,
"tornado",
"traitlets",
"virtualenv",
"wheel",
"wild_sphinx_theme",
"winshell" if sys.platform.startswith("win") else None,
]
from . import find_module_install
return [find_module_install(_) for _ in names if _ is not None]
| #-*- coding: utf-8 -*-
"""
@file
@brief Defines different a set of usual modules for Python.
"""
import sys
def pyquickhelper_set():
"""
list of modules needed to run unit test of module *pyquickhelper*
"""
names = [
"alabaster",
"autopep8",
"babel",
"certifi",
"colorama",
"coverage",
"Cython",
"cycler",
"decorator",
"docutils",
"flake8",
"futures",
"husl",
"ipython",
"ipykernel",
"ipystata" if sys.version_info[0] == 2 else None,
"ipython_genutils",
"ipywidgets",
"jinja2",
"jsonschema",
"jupyter-console",
"jupyter",
"jupyter_core",
"jupyter_client",
"jupyter-pip",
"lxml",
"matplotlib",
"metakernel",
"micropython-libc" if not sys.platform.startswith("win") else None,
"micropython-ffilib" if not sys.platform.startswith(
"win") else None,
"micropython-fcntl" if not sys.platform.startswith(
"win") else None,
'markupsafe',
"mccabe",
"mistune",
"multi_key_dict",
"nbformat",
"nbconvert",
"nose",
"backports_abc",
"notebook",
"notedown",
"numpy",
"onedrive-sdk-python",
"openpyxl",
"path.py",
"pbr",
"pandas",
"pep8",
"pexpect" if not sys.platform.startswith("win") else None,
"pickleshare",
"pipdeptree",
"psutil",
"ptyprocess" if not sys.platform.startswith("win") else None,
"pycparser",
"pyflakes",
"pygments",
"pyparsing",
'pypiserver',
"python-dateutil",
"python-jenkins",
"pytz",
"pywin32" if sys.platform.startswith("win") else None,
"pyzmq",
"qtconsole",
"requests",
"simplegeneric",
"six",
"sphinx",
'sphinxcontrib-images',
'sphinxcontrib-imagesvg',
'sphinxcontrib-jsdemo',
'snowballstemmer',
'sphinx-rtd-theme',
"sphinxjp.themes.revealjs",
"terminado" if not sys.platform.startswith("win") else None,
"tornado",
"traitlets",
"virtualenv",
"wheel",
"wild_sphinx_theme",
"winshell" if sys.platform.startswith("win") else None,
]
from . import find_module_install
return [find_module_install(_) for _ in names if _ is not None]
| Python | 0 |
ba73e1e06dae26716da29a02c1705458d402a9be | update PRISM model to take CDDs into account for electricity | eemeter/meter/prism.py | eemeter/meter/prism.py | from eemeter.meter.base import MeterBase
from eemeter.config.yaml_parser import load
class PRISMMeter(MeterBase):
"""Implementation of Princeton Scorekeeping Method.
"""
def __init__(self,**kwargs):
super(self.__class__, self).__init__(**kwargs)
self.meter = load(self._meter_yaml())
def _meter_yaml(self):
meter_yaml = """
!obj:eemeter.meter.SequentialMeter {
sequence: [
!obj:eemeter.meter.FuelTypePresenceMeter {
fuel_types: [electricity,natural_gas]
},
!obj:eemeter.meter.ConditionalMeter {
condition_parameter: electricity_presence,
success: !obj:eemeter.meter.SequentialMeter {
sequence: [
!obj:eemeter.meter.TemperatureSensitivityParameterOptimizationMeter {
fuel_unit_str: "kWh",
fuel_type: "electricity",
temperature_unit_str: "degF",
model: !obj:eemeter.models.HDDCDDBalancePointModel &elec_model {
x0: [1.,1.,1.,60.,5],
bounds: [[0,100],[0,100],[0,100],[55,65],[2,10]],
},
},
!obj:eemeter.meter.AnnualizedUsageMeter {
fuel_type: "electricity",
temperature_unit_str: "degF",
model: *elec_model,
},
],
output_mapping: {
temp_sensitivity_params: temp_sensitivity_params_electricity,
annualized_usage: annualized_usage_electricity,
},
},
},
!obj:eemeter.meter.ConditionalMeter {
condition_parameter: natural_gas_presence,
success: !obj:eemeter.meter.SequentialMeter {
sequence: [
!obj:eemeter.meter.TemperatureSensitivityParameterOptimizationMeter {
fuel_unit_str: "therms",
fuel_type: "natural_gas",
temperature_unit_str: "degF",
model: !obj:eemeter.models.HDDBalancePointModel &gas_model {
x0: [60,1.,1.],
bounds: [[55,65],[0,100],[0,100]],
},
},
!obj:eemeter.meter.AnnualizedUsageMeter {
fuel_type: "natural_gas",
temperature_unit_str: "degF",
model: *gas_model,
},
],
output_mapping: {
temp_sensitivity_params: temp_sensitivity_params_natural_gas,
annualized_usage: annualized_usage_natural_gas,
},
},
},
]
}
"""
return meter_yaml
def evaluate_mapped_inputs(self,**kwargs):
return self.meter.evaluate(**kwargs)
def _get_child_inputs(self):
return self.meter.get_inputs()
| from eemeter.meter.base import MeterBase
from eemeter.config.yaml_parser import load
class PRISMMeter(MeterBase):
"""Implementation of Princeton Scorekeeping Method.
"""
def __init__(self,**kwargs):
super(self.__class__, self).__init__(**kwargs)
self.meter = load(self._meter_yaml())
def _meter_yaml(self):
meter_yaml = """
!obj:eemeter.meter.SequentialMeter {
sequence: [
!obj:eemeter.meter.FuelTypePresenceMeter {
fuel_types: [electricity,natural_gas]
},
!obj:eemeter.meter.ConditionalMeter {
condition_parameter: electricity_presence,
success: !obj:eemeter.meter.SequentialMeter {
sequence: [
!obj:eemeter.meter.TemperatureSensitivityParameterOptimizationMeter {
fuel_unit_str: "kWh",
fuel_type: "electricity",
temperature_unit_str: "degF",
model: !obj:eemeter.models.HDDBalancePointModel &elec_model {
x0: [60,1.,1.],
bounds: [[55,65],[0,100],[0,100]],
},
},
!obj:eemeter.meter.AnnualizedUsageMeter {
fuel_type: "electricity",
temperature_unit_str: "degF",
model: *elec_model,
},
],
output_mapping: {
temp_sensitivity_params: temp_sensitivity_params_electricity,
annualized_usage: annualized_usage_electricity,
},
},
},
!obj:eemeter.meter.ConditionalMeter {
condition_parameter: natural_gas_presence,
success: !obj:eemeter.meter.SequentialMeter {
sequence: [
!obj:eemeter.meter.TemperatureSensitivityParameterOptimizationMeter {
fuel_unit_str: "therms",
fuel_type: "natural_gas",
temperature_unit_str: "degF",
model: !obj:eemeter.models.HDDBalancePointModel &gas_model {
x0: [60,1.,1.],
bounds: [[55,65],[0,100],[0,100]],
},
},
!obj:eemeter.meter.AnnualizedUsageMeter {
fuel_type: "natural_gas",
temperature_unit_str: "degF",
model: *gas_model,
},
],
output_mapping: {
temp_sensitivity_params: temp_sensitivity_params_natural_gas,
annualized_usage: annualized_usage_natural_gas,
},
},
},
]
}
"""
return meter_yaml
def evaluate_mapped_inputs(self,**kwargs):
return self.meter.evaluate(**kwargs)
def _get_child_inputs(self):
return self.meter.get_inputs()
| Python | 0 |
9e0c83e751e72e3396a4729392b972834b25c8b7 | Add TODO | v2/aws_secgroup_ids_from_names.py | v2/aws_secgroup_ids_from_names.py | # (c) 2015, Jon Hadfield <jon@lessknown.co.uk>
"""
Description: This lookup takes an AWS region and a list of one or more
security Group Names and returns a list of matching security Group IDs.
Example Usage:
{{ lookup('aws_secgroup_ids_from_names', ('eu-west-1', ['nginx_group', 'mysql_group'])) }}
"""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import codecs
from ansible.errors import *
from ansible.plugins.lookup import LookupBase
try:
import boto
import boto.ec2
except ImportError:
raise AnsibleError("aws_secgroup_ids_from_names lookup cannot be run without boto installed")
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
if isinstance(terms, basestring):
terms = [terms]
sg_list = []
region = terms[0]
group_names = terms[1]
conn = boto.ec2.connect_to_region(region)
#TODO: Use OR filter rather than making multiple calls
for group_name in group_names:
filters = {'group_name': group_name}
sg = conn.get_all_security_groups(filters=filters)
if sg and sg[0]:
sg_list.append(sg[0].id)
return sg_list
| # (c) 2015, Jon Hadfield <jon@lessknown.co.uk>
"""
Description: This lookup takes an AWS region and a list of one or more
security Group Names and returns a list of matching security Group IDs.
Example Usage:
{{ lookup('aws_secgroup_ids_from_names', ('eu-west-1', ['nginx_group', 'mysql_group'])) }}
"""
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import codecs
from ansible.errors import *
from ansible.plugins.lookup import LookupBase
try:
import boto
import boto.ec2
except ImportError:
raise AnsibleError("aws_secgroup_ids_from_names lookup cannot be run without boto installed")
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
if isinstance(terms, basestring):
terms = [terms]
sg_list = []
region = terms[0]
group_names = terms[1]
conn = boto.ec2.connect_to_region(region)
for group_name in group_names:
filters = {'group_name': group_name}
sg = conn.get_all_security_groups(filters=filters)
if sg and sg[0]:
sg_list.append(sg[0].id)
return sg_list
| Python | 0.000002 |
0c731bf993eea346421d9dbcd5eaa61484e84018 | fix bug in site_hrn() | sfa/util/plxrn.py | sfa/util/plxrn.py | # specialized Xrn class for PlanetLab
import re
from sfa.util.xrn import Xrn
# temporary helper functions to use this module instead of namespace
def hostname_to_hrn (auth, login_base, hostname):
return PlXrn(auth=auth+'.'+login_base,hostname=hostname).get_hrn()
def hostname_to_urn(auth, login_base, hostname):
return PlXrn(auth=auth+'.'+login_base,hostname=hostname).get_urn()
def slicename_to_hrn (auth_hrn, slicename):
return PlXrn(auth=auth_hrn,slicename=slicename).get_hrn()
def email_to_hrn (auth_hrn, email):
return PlXrn(auth=auth_hrn, email=email).get_hrn()
def hrn_to_pl_slicename (hrn):
return PlXrn(xrn=hrn,type='slice').pl_slicename()
def hrn_to_pl_login_base (hrn):
return PlXrn(xrn=hrn,type='slice').pl_login_base()
def hrn_to_pl_authname (hrn):
return PlXrn(xrn=hrn,type='any').pl_authname()
class PlXrn (Xrn):
@staticmethod
def site_hrn (auth, login_base):
return '.'.join([auth,login_base])
def __init__ (self, auth=None, hostname=None, slicename=None, email=None, **kwargs):
#def hostname_to_hrn(auth_hrn, login_base, hostname):
if hostname is not None:
self.type='node'
# keep only the first part of the DNS name
#self.hrn='.'.join( [auth,hostname.split(".")[0] ] )
# escape the '.' in the hostname
self.hrn='.'.join( [auth,Xrn.escape(hostname)] )
self.hrn_to_urn()
#def slicename_to_hrn(auth_hrn, slicename):
elif slicename is not None:
self.type='slice'
# split at the first _
parts = slicename.split("_",1)
self.hrn = ".".join([auth] + parts )
self.hrn_to_urn()
#def email_to_hrn(auth_hrn, email):
elif email is not None:
self.type='person'
# keep only the part before '@' and replace special chars into _
self.hrn='.'.join([auth,email.split('@')[0].replace(".", "_").replace("+", "_")])
self.hrn_to_urn()
else:
Xrn.__init__ (self,**kwargs)
#def hrn_to_pl_slicename(hrn):
def pl_slicename (self):
self._normalize()
leaf = self.leaf
leaf = re.sub('[^a-zA-Z0-9_]', '', leaf)
return self.pl_login_base() + '_' + leaf
#def hrn_to_pl_authname(hrn):
def pl_authname (self):
self._normalize()
return self.authority[-1]
#def hrn_to_pl_login_base(hrn):
def pl_login_base (self):
self._normalize()
base = self.authority[-1]
# Fix up names of GENI Federates
base = base.lower()
base = re.sub('\\\[^a-zA-Z0-9]', '', base)
if len(base) > 20:
base = base[len(base)-20:]
return base
| # specialized Xrn class for PlanetLab
import re
from sfa.util.xrn import Xrn
# temporary helper functions to use this module instead of namespace
def hostname_to_hrn (auth, login_base, hostname):
return PlXrn(auth=auth+'.'+login_base,hostname=hostname).get_hrn()
def hostname_to_urn(auth, login_base, hostname):
return PlXrn(auth=auth+'.'+login_base,hostname=hostname).get_urn()
def slicename_to_hrn (auth_hrn, slicename):
return PlXrn(auth=auth_hrn,slicename=slicename).get_hrn()
def email_to_hrn (auth_hrn, email):
return PlXrn(auth=auth_hrn, email=email).get_hrn()
def hrn_to_pl_slicename (hrn):
return PlXrn(xrn=hrn,type='slice').pl_slicename()
def hrn_to_pl_login_base (hrn):
return PlXrn(xrn=hrn,type='slice').pl_login_base()
def hrn_to_pl_authname (hrn):
return PlXrn(xrn=hrn,type='any').pl_authname()
class PlXrn (Xrn):
@staticmethod
def site_hrn (auth, login_base):
return '.'.join(auth,login_base)
def __init__ (self, auth=None, hostname=None, slicename=None, email=None, **kwargs):
#def hostname_to_hrn(auth_hrn, login_base, hostname):
if hostname is not None:
self.type='node'
# keep only the first part of the DNS name
#self.hrn='.'.join( [auth,hostname.split(".")[0] ] )
# escape the '.' in the hostname
self.hrn='.'.join( [auth,Xrn.escape(hostname)] )
self.hrn_to_urn()
#def slicename_to_hrn(auth_hrn, slicename):
elif slicename is not None:
self.type='slice'
# split at the first _
parts = slicename.split("_",1)
self.hrn = ".".join([auth] + parts )
self.hrn_to_urn()
#def email_to_hrn(auth_hrn, email):
elif email is not None:
self.type='person'
# keep only the part before '@' and replace special chars into _
self.hrn='.'.join([auth,email.split('@')[0].replace(".", "_").replace("+", "_")])
self.hrn_to_urn()
else:
Xrn.__init__ (self,**kwargs)
#def hrn_to_pl_slicename(hrn):
def pl_slicename (self):
self._normalize()
leaf = self.leaf
leaf = re.sub('[^a-zA-Z0-9_]', '', leaf)
return self.pl_login_base() + '_' + leaf
#def hrn_to_pl_authname(hrn):
def pl_authname (self):
self._normalize()
return self.authority[-1]
#def hrn_to_pl_login_base(hrn):
def pl_login_base (self):
self._normalize()
base = self.authority[-1]
# Fix up names of GENI Federates
base = base.lower()
base = re.sub('\\\[^a-zA-Z0-9]', '', base)
if len(base) > 20:
base = base[len(base)-20:]
return base
| Python | 0 |
1337c5269d97dc6f1cd47aed838cf26c6b488be2 | bump version | shell/__init__.py | shell/__init__.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
__title__ = 'shell'
__version__ = '0.0.7'
__author__ = 'Qingping Hou'
__license__ = 'MIT'
from .run_cmd import RunCmd
from .input_stream import InputStream
from .api import instream, cmd, pipe_all, ex, p, ex_all
| #!/usr/bin/env python
# -*- coding:utf-8 -*-
__title__ = 'shell'
__version__ = '0.0.6'
__author__ = 'Qingping Hou'
__license__ = 'MIT'
from .run_cmd import RunCmd
from .input_stream import InputStream
from .api import instream, cmd, pipe_all, ex, p, ex_all
| Python | 0 |
5873bb323d21ab8f9373518a5dd9688df4b38a9a | Break line before 80 columns. | shell/src/main.py | shell/src/main.py | # -*- coding: utf-8 -*-
# Copyright (c) 2010-2014, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import bayeslite
import bayeslite.crosscat
import bayeslite.shell.core as shell
import bayeslite.shell.hook as hook
def parse_args(argv):
parser = argparse.ArgumentParser()
parser.add_argument('bdbpath', type=str, nargs='?', default=':memory:',
help="bayesdb database file")
parser.add_argument('-j', '--njob', type=int, default=None,
help="Max number of jobs (processes) useable.")
parser.add_argument('-s', '--seed', type=int, default=None,
help="Random seed for the default generator.")
parser.add_argument('-f', '--file', type=str, nargs="+", default=None,
help="Path to commands file. May be used to specify a "
"project-specific init file.")
parser.add_argument('--batch', action='store_true',
help="Exit after executing file specified with -f.")
parser.add_argument('--debug', action='store_true', help="For unit tests.")
parser.add_argument('--no-init-file', action='store_true',
help="Do not load ~/.bayesliterc")
args = parser.parse_args(argv)
return args
def run(stdin, stdout, stderr, argv):
args = parse_args(argv[1:])
bdb = bayeslite.bayesdb_open(pathname=args.bdbpath)
# People shouldn't have to ask to go fast, they should have to ask to
# slow down.
if args.njob not in [0, 1]:
import crosscat.MultiprocessingEngine as ccme
crosscat = ccme.MultiprocessingEngine(seed=args.seed,
cpu_count=args.njob)
else:
import crosscat.LocalEngine as ccle
crosscat = ccle.LocalEngine(seed=args.seed)
metamodel = bayeslite.crosscat.CrosscatMetamodel(crosscat)
bayeslite.bayesdb_register_metamodel(bdb, metamodel)
bdbshell = shell.Shell(bdb, 'crosscat', debug=args.debug)
with hook.set_current_shell(bdbshell):
if not args.no_init_file:
init_file = os.path.join(os.path.expanduser('~/.bayesliterc'))
if os.path.isfile(init_file):
bdbshell.dot_read(init_file)
if args.file is not None:
for path in args.file:
if os.path.isfile(path):
bdbshell.dot_read(path)
else:
bdbshell.stdout.write('%s is not a file. Aborting.\n' %
str(path))
break
bdbshell.cmdloop()
return 0
def main():
import sys
sys.exit(run(sys.stdin, sys.stdout, sys.stderr, sys.argv))
if __name__ == '__main__':
main()
| # -*- coding: utf-8 -*-
# Copyright (c) 2010-2014, MIT Probabilistic Computing Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import bayeslite
import bayeslite.crosscat
import bayeslite.shell.core as shell
import bayeslite.shell.hook as hook
def parse_args(argv):
parser = argparse.ArgumentParser()
parser.add_argument('bdbpath', type=str, nargs='?', default=':memory:',
help="bayesdb database file")
parser.add_argument('-j', '--njob', type=int, default=None,
help="Max number of jobs (processes) useable.")
parser.add_argument('-s', '--seed', type=int, default=None,
help="Random seed for the default generator.")
parser.add_argument('-f', '--file', type=str, nargs="+", default=None,
help="Path to commands file. May be used to specify a "
"project-specific init file.")
parser.add_argument('--batch', action='store_true',
help="Exit after executing file specified with -f.")
parser.add_argument('--debug', action='store_true', help="For unit tests.")
parser.add_argument('--no-init-file', action='store_true',
help="Do not load ~/.bayesliterc")
args = parser.parse_args(argv)
return args
def run(stdin, stdout, stderr, argv):
args = parse_args(argv[1:])
bdb = bayeslite.bayesdb_open(pathname=args.bdbpath)
# People shouldn't have to ask to go fast, they should have to ask to
# slow down.
if args.njob not in [0, 1]:
import crosscat.MultiprocessingEngine as ccme
crosscat = ccme.MultiprocessingEngine(seed=args.seed,
cpu_count=args.njob)
else:
import crosscat.LocalEngine as ccle
crosscat = ccle.LocalEngine(seed=args.seed)
metamodel = bayeslite.crosscat.CrosscatMetamodel(crosscat)
bayeslite.bayesdb_register_metamodel(bdb, metamodel)
bdbshell = shell.Shell(bdb, 'crosscat', debug=args.debug)
with hook.set_current_shell(bdbshell):
if not args.no_init_file:
init_file = os.path.join(os.path.expanduser('~/.bayesliterc'))
if os.path.isfile(init_file):
bdbshell.dot_read(init_file)
if args.file is not None:
for path in args.file:
if os.path.isfile(path):
bdbshell.dot_read(path)
else:
bdbshell.stdout.write('%s is not a file. Aborting.\n' % str(path))
break
bdbshell.cmdloop()
return 0
def main():
import sys
sys.exit(run(sys.stdin, sys.stdout, sys.stderr, sys.argv))
if __name__ == '__main__':
main()
| Python | 0.000001 |
3ccd648ba58fd7e6a84b94e464094d0c5e3a8e55 | Add line to separate results | states/bootstrap/bootstrap.dir/modules/utils/salt_output.py | states/bootstrap/bootstrap.dir/modules/utils/salt_output.py | #!/usr/bin/env python
#
import sys
import yaml
import logging
###############################################################################
def load_yaml_file_data(file_path):
"""
Load YAML formated data from file_path.
"""
# Instead of using `with` keyword, perform standard `try`/`finally`
# to support Python 2.5 on RHEL5.
yaml_file = open(file_path, 'r')
try:
loaded_data = yaml.load(yaml_file)
finally:
yaml_file.close()
return loaded_data
###############################################################################
def load_yaml_string_data(text_content):
"""
Load YAML formated data from string.
"""
loaded_data = yaml.load(text_content)
return loaded_data
###############################################################################
def check_result(salt_output):
"""
Check result provided by Salt for local (see `salt-call`) execution.
"""
local_result = salt_output['local']
overall_result = True
success_counter = 0
total_counter = 0
for state_key in local_result.keys():
# Separate visually one result from another.
logging.info("---")
total_counter = total_counter + 1
logging.info("`comment`: " + str(local_result[state_key]['comment']))
if 'name' in local_result[state_key]:
logging.info("`name`: " + str(local_result[state_key]['name']))
result_value = local_result[state_key]['result']
if result_value is None:
logging.critical("unexpected `result` value: " + str(result_value))
overall_result = False
elif result_value == False:
logging.info("result: " + str(result_value))
overall_result = False
# Do not break the loop.
# Instead, keep on generating log output
elif result_value == True:
success_counter = success_counter + 1
logging.info("result: " + str(result_value))
else:
logging.info("unexpected `result` value: " + str(result_value))
overall_result = False
if overall_result:
logging.info("SUCCESS: " + str(success_counter) + " of " + str(total_counter))
else:
logging.critical("FAILURE: " + str(success_counter) + " of " + str(total_counter))
return overall_result
###############################################################################
# MAIN
# Execute futher only if this file is executed as a script (not imported
# as a module).
if __name__ == '__main__':
logging.getLogger().setLevel(0)
file_path = sys.argv[1]
salt_output = load_yaml_file_data(file_path)
overall_result = check_result(salt_output)
if overall_result:
sys.exit(0)
else:
sys.exit(1)
| #!/usr/bin/env python
#
import sys
import yaml
import logging
###############################################################################
def load_yaml_file_data(file_path):
"""
Load YAML formated data from file_path.
"""
# Instead of using `with` keyword, perform standard `try`/`finally`
# to support Python 2.5 on RHEL5.
yaml_file = open(file_path, 'r')
try:
loaded_data = yaml.load(yaml_file)
finally:
yaml_file.close()
return loaded_data
###############################################################################
def load_yaml_string_data(text_content):
"""
Load YAML formated data from string.
"""
loaded_data = yaml.load(text_content)
return loaded_data
###############################################################################
def check_result(salt_output):
"""
Check result provided by Salt for local (see `salt-call`) execution.
"""
local_result = salt_output['local']
overall_result = True
success_counter = 0
total_counter = 0
for state_key in local_result.keys():
total_counter = total_counter + 1
logging.info("`comment`: " + str(local_result[state_key]['comment']))
if 'name' in local_result[state_key]:
logging.info("`name`: " + str(local_result[state_key]['name']))
result_value = local_result[state_key]['result']
if result_value is None:
logging.critical("unexpected `result` value: " + str(result_value))
overall_result = False
elif result_value == False:
logging.info("result: " + str(result_value))
overall_result = False
# Do not break the loop.
# Instead, keep on generating log output
elif result_value == True:
success_counter = success_counter + 1
logging.info("result: " + str(result_value))
else:
logging.info("unexpected `result` value: " + str(result_value))
overall_result = False
if overall_result:
logging.info("SUCCESS: " + str(success_counter) + " of " + str(total_counter))
else:
logging.critical("FAILURE: " + str(success_counter) + " of " + str(total_counter))
return overall_result
###############################################################################
# MAIN
# Execute futher only if this file is executed as a script (not imported
# as a module).
if __name__ == '__main__':
logging.getLogger().setLevel(0)
file_path = sys.argv[1]
salt_output = load_yaml_file_data(file_path)
overall_result = check_result(salt_output)
if overall_result:
sys.exit(0)
else:
sys.exit(1)
| Python | 0 |
21ab430368ee262377c77f1ecc24b645377dd520 | Revert "Bug Fix: sort keys when creating json data to send" | generic_request_signer/client.py | generic_request_signer/client.py | import six
from datetime import date
import json
import decimal
if six.PY3:
import urllib.request as urllib
else:
import urllib2 as urllib
from generic_request_signer import response, factory
def json_encoder(obj):
if isinstance(obj, date):
return str(obj.isoformat())
if isinstance(obj, decimal.Decimal):
return str(obj)
class Client(object):
def __init__(self, api_credentials):
self.api_credentials = api_credentials
def get_factory(self, files):
if files:
return factory.MultipartSignedRequestFactory
return factory.SignedRequestFactory
def _get_response(self, http_method, endpoint, data=None, files=None, timeout=15, **request_kwargs):
headers = request_kwargs.get("headers", {})
if not isinstance(data, str) and headers.get("Content-Type") == "application/json":
data = json.dumps(data, default=json_encoder)
try:
http_response = urllib.urlopen(
self._get_request(http_method, endpoint, data, files, **request_kwargs), timeout=timeout)
except urllib.HTTPError as e:
http_response = e
return response.Response(http_response)
def _get_request(self, http_method, endpoint, data=None, files=None, **request_kwargs):
factory_class = self.get_factory(files)
request_factory = factory_class(http_method, self._client_id, self._private_key, data, files)
service_url = self._get_service_url(endpoint)
return request_factory.create_request(service_url, **request_kwargs)
def _get_service_url(self, endpoint):
return self._base_url + endpoint
@property
def _base_url(self):
return self.api_credentials.base_url
@property
def _client_id(self):
return self.api_credentials.client_id
@property
def _private_key(self):
return self.api_credentials.private_key
| import six
from datetime import date
import json
import decimal
from apysigner import DefaultJSONEncoder
if six.PY3:
import urllib.request as urllib
else:
import urllib2 as urllib
from generic_request_signer import response, factory
def json_encoder(obj):
if isinstance(obj, date):
return str(obj.isoformat())
if isinstance(obj, decimal.Decimal):
return str(obj)
class Client(object):
def __init__(self, api_credentials):
self.api_credentials = api_credentials
def get_factory(self, files):
if files:
return factory.MultipartSignedRequestFactory
return factory.SignedRequestFactory
def _get_response(self, http_method, endpoint, data=None, files=None, timeout=15, **request_kwargs):
headers = request_kwargs.get("headers", {})
if not isinstance(data, str) and headers.get("Content-Type") == "application/json":
data = json.dumps(data, default=DefaultJSONEncoder, sort_keys=True)
try:
http_response = urllib.urlopen(
self._get_request(http_method, endpoint, data, files, **request_kwargs), timeout=timeout)
except urllib.HTTPError as e:
http_response = e
return response.Response(http_response)
def _get_request(self, http_method, endpoint, data=None, files=None, **request_kwargs):
factory_class = self.get_factory(files)
request_factory = factory_class(http_method, self._client_id, self._private_key, data, files)
service_url = self._get_service_url(endpoint)
return request_factory.create_request(service_url, **request_kwargs)
def _get_service_url(self, endpoint):
return self._base_url + endpoint
@property
def _base_url(self):
return self.api_credentials.base_url
@property
def _client_id(self):
return self.api_credentials.client_id
@property
def _private_key(self):
return self.api_credentials.private_key
| Python | 0 |
67dfbfa250cd5de550a493c9951d456e05b05454 | Make ModelImporter.model static for flexibility of usage | girder/utility/model_importer.py | girder/utility/model_importer.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import importlib
from . import camelcase
# We want the models to essentially be singletons, so we keep this centralized
# cache of instantiated models that have been lazy-loaded.
_modelInstances = {}
def _loadModel(model, module, plugin):
global _modelInstances
className = camelcase(model)
try:
imported = importlib.import_module(module)
except ImportError: # pragma: no cover
raise Exception('Could not load model "{}".'.format(module))
try:
constructor = getattr(imported, className)
except AttributeError: # pragma: no cover
raise Exception('Incorrect model class name "{}" for model "{}".'
.format(className, module))
_modelInstances[plugin][model] = constructor()
def clearModels():
"""
Force reloading of all models by clearing the singleton cache. This is
used by the test suite to ensure that indices are built properly
at startup.
"""
global _modelInstances
_modelInstances = {}
class ModelImporter(object):
"""
Any class that wants to have convenient model importing semantics
should extend/mixin this class.
"""
@staticmethod
def model(model, plugin='_core'):
"""
Call this to get the instance of the specified model. It will be
lazy-instantiated.
:param model: The name of the model to get. This is the module
name, e.g. "folder". The class name must be the
upper-camelcased version of that module name, e.g.
"Folder".
:type model: string
:param plugin: If the model you wish to load is a model within a plugin,
set this to the name of the plugin containing the model.
:returns: The instantiated model, which is a singleton.
"""
global _modelInstances
if plugin not in _modelInstances:
_modelInstances[plugin] = {}
if model not in _modelInstances[plugin]:
if plugin == '_core':
module = 'girder.models.{}'.format(model)
else:
module = 'girder.plugins.{}.models.{}'.format(plugin, model)
_loadModel(model, module, plugin)
return _modelInstances[plugin][model]
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright 2013 Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import importlib
from . import camelcase
# We want the models to essentially be singletons, so we keep this centralized
# cache of instantiated models that have been lazy-loaded.
_modelInstances = {}
def _loadModel(model, module, plugin):
global _modelInstances
className = camelcase(model)
try:
imported = importlib.import_module(module)
except ImportError: # pragma: no cover
raise Exception('Could not load model "{}".'.format(module))
try:
constructor = getattr(imported, className)
except AttributeError: # pragma: no cover
raise Exception('Incorrect model class name "{}" for model "{}".'
.format(className, module))
_modelInstances[plugin][model] = constructor()
def clearModels():
"""
Force reloading of all models by clearing the singleton cache. This is
used by the test suite to ensure that indices are built properly
at startup.
"""
global _modelInstances
_modelInstances = {}
class ModelImporter(object):
"""
Any class that wants to have convenient model importing semantics
should extend/mixin this class.
"""
def model(self, model, plugin='_core'):
"""
Call this to get the instance of the specified model. It will be
lazy-instantiated.
:param model: The name of the model to get. This is the module
name, e.g. "folder". The class name must be the
upper-camelcased version of that module name, e.g.
"Folder".
:type model: string
:param plugin: If the model you wish to load is a model within a plugin,
set this to the name of the plugin containing the model.
:returns: The instantiated model, which is a singleton.
"""
global _modelInstances
if plugin not in _modelInstances:
_modelInstances[plugin] = {}
if model not in _modelInstances[plugin]:
if plugin == '_core':
module = 'girder.models.{}'.format(model)
else:
module = 'girder.plugins.{}.models.{}'.format(plugin, model)
_loadModel(model, module, plugin)
return _modelInstances[plugin][model]
| Python | 0 |
b28ca4abf8a6986b96bfb89cf8737c8f737fee4e | update boto import to use boto3 (#1000) | global_settings/wagtail_hooks.py | global_settings/wagtail_hooks.py | import boto3
import wagtail.admin.rich_text.editors.draftail.features as draftail_features
from wagtail.admin.rich_text.converters.html_to_contentstate import InlineStyleElementHandler
from wagtail.core import hooks
from django.urls import reverse
from wagtail.admin.menu import MenuItem
from .models import CloudfrontDistribution
@hooks.register('register_rich_text_features')
def register_strikethrough_feature(features):
"""
Registering the `superscript` feature, which uses the `SUPERSCRIPT` Draft.js inline style type,
and is stored as HTML with an `<sup>` tag.
"""
feature_name = 'superscript'
type_ = 'SUPERSCRIPT'
tag = 'sup'
control = {
'type': type_,
'label': '^',
'description': 'Superscript',
}
features.register_editor_plugin(
'draftail', feature_name, draftail_features.InlineStyleFeature(control)
)
db_conversion = {
'from_database_format': {tag: InlineStyleElementHandler(type_)},
'to_database_format': {'style_map': {type_: tag}},
}
features.default_features.append(feature_name)
features.register_converter_rule('contentstate', feature_name, db_conversion)
@hooks.register('after_edit_page')
def purge_cloudfront_caches(page, request):
try:
distribution = CloudfrontDistribution.objects.all()[0]
client = boto3.client('cloudfront')
response = client.create_invalidation(
DistributionId=distribution.distribution_id,
InvalidationBatch={
'Paths': {
'Quantity': 1,
'Items': [
'/apps/cms/api/*' # invalidate the entire cache for the website
],
},
'CallerReference': str(time()).replace(".", "")
}
)
except CloudfrontDistribution.DoesNotExist:
return
@hooks.register('register_settings_menu_item')
def register_500_menu_item():
return MenuItem('Generate 500', reverse('throw_error'), classnames='icon icon-warning', order=10000)
| import boto
import wagtail.admin.rich_text.editors.draftail.features as draftail_features
from wagtail.admin.rich_text.converters.html_to_contentstate import InlineStyleElementHandler
from wagtail.core import hooks
from django.urls import reverse
from wagtail.admin.menu import MenuItem
from .models import CloudfrontDistribution
@hooks.register('register_rich_text_features')
def register_strikethrough_feature(features):
"""
Registering the `superscript` feature, which uses the `SUPERSCRIPT` Draft.js inline style type,
and is stored as HTML with an `<sup>` tag.
"""
feature_name = 'superscript'
type_ = 'SUPERSCRIPT'
tag = 'sup'
control = {
'type': type_,
'label': '^',
'description': 'Superscript',
}
features.register_editor_plugin(
'draftail', feature_name, draftail_features.InlineStyleFeature(control)
)
db_conversion = {
'from_database_format': {tag: InlineStyleElementHandler(type_)},
'to_database_format': {'style_map': {type_: tag}},
}
features.default_features.append(feature_name)
features.register_converter_rule('contentstate', feature_name, db_conversion)
@hooks.register('after_edit_page')
def purge_cloudfront_caches(page, request):
try:
distribution = CloudfrontDistribution.objects.all()[0]
client = boto3.client('cloudfront')
response = client.create_invalidation(
DistributionId=distribution.distribution_id,
InvalidationBatch={
'Paths': {
'Quantity': 1,
'Items': [
'/apps/cms/api/*' # invalidate the entire cache for the website
],
},
'CallerReference': str(time()).replace(".", "")
}
)
except CloudfrontDistribution.DoesNotExist:
return
@hooks.register('register_settings_menu_item')
def register_500_menu_item():
return MenuItem('Generate 500', reverse('throw_error'), classnames='icon icon-warning', order=10000)
| Python | 0 |
c437074ee3ee15fc29790ca4de5413bbdd19728c | delete unused imports | autograd/convenience_wrappers.py | autograd/convenience_wrappers.py | """Convenience functions built on top of `grad`."""
from __future__ import absolute_import
import autograd.numpy as np
from autograd.core import grad, getval
def multigrad(fun, argnums=0):
"""Takes gradients wrt multiple arguments simultaneously."""
original_fun = fun
def combined_arg_fun(multi_arg, *args, **kwargs):
extra_args_list = list(args)
for argnum_ix, arg_ix in enumerate(argnums):
extra_args_list[arg_ix] = multi_arg[argnum_ix]
return original_fun(*extra_args_list, **kwargs)
gradfun = grad(combined_arg_fun, argnum=0)
def gradfun_rearranged(*args, **kwargs):
multi_arg = tuple([args[i] for i in argnums])
return gradfun(multi_arg, *args, **kwargs)
return gradfun_rearranged
def grad_and_aux(fun, argnum=0):
"""Builds a function that returns the gradient of the first output and the
(unmodified) second output of a function that returns two outputs."""
def grad_and_aux_fun(*args, **kwargs):
saved_aux = []
def return_val_save_aux(*args, **kwargs):
val, aux = fun(*args, **kwargs)
saved_aux.append(aux)
return val
gradval = grad(return_val_save_aux, argnum)(*args, **kwargs)
return gradval, saved_aux[0]
return grad_and_aux_fun
def value_and_grad(fun, argnum=0):
"""Returns a function that returns both value and gradient. Suitable for use
in scipy.optimize"""
def double_val_fun(*args, **kwargs):
val = fun(*args, **kwargs)
return val, getval(val)
gradval_and_val = grad_and_aux(double_val_fun, argnum)
def value_and_grad_fun(*args, **kwargs):
gradval, val = gradval_and_val(*args, **kwargs)
return val, gradval
return value_and_grad_fun
def elementwise_grad(fun, argnum=0):
"""Like `jacobian`, but produces a function which computes just the diagonal
of the Jacobian, and does the computation in one pass rather than in a loop.
Note: this is only valid if the Jacobian is diagonal. Only arrays are
currently supported."""
def sum_output(*args, **kwargs):
return np.sum(fun(*args, **kwargs))
return grad(sum_output, argnum=argnum)
def hessian_vector_product(fun, argnum=0):
"""Builds a function that returns the exact Hessian-vector product.
The returned function has arguments (*args, vector, **kwargs), and takes
roughly 4x as long to evaluate as the original function."""
fun_grad = grad(fun, argnum)
def vector_dot_grad(*args, **kwargs):
args, vector = args[:-1], args[-1]
return np.dot(vector, fun_grad(*args, **kwargs))
return grad(vector_dot_grad, argnum) # Grad wrt original input.
def hessian(fun, argnum=0):
"""Returns a function that computes the exact Hessian.
The Hessian is computed by calling hessian_vector_product separately for
each row. For a function with N inputs, this takes roughly 4N times as
long as a single evaluation of the original function."""
hvp = hessian_vector_product(fun, argnum)
def hessian_fun(*args, **kwargs):
arg_in = args[argnum]
directions = np.eye(arg_in.size) # axis-aligned directions.
hvp_list = [hvp(*(args+(direction,)), **kwargs) for direction in directions]
return np.array(hvp_list)
return hessian_fun
| """Convenience functions built on top of `grad`."""
from __future__ import absolute_import
import itertools as it
import autograd.numpy as np
from autograd.core import grad, getval
from builtins import map
def multigrad(fun, argnums=0):
"""Takes gradients wrt multiple arguments simultaneously."""
original_fun = fun
def combined_arg_fun(multi_arg, *args, **kwargs):
extra_args_list = list(args)
for argnum_ix, arg_ix in enumerate(argnums):
extra_args_list[arg_ix] = multi_arg[argnum_ix]
return original_fun(*extra_args_list, **kwargs)
gradfun = grad(combined_arg_fun, argnum=0)
def gradfun_rearranged(*args, **kwargs):
multi_arg = tuple([args[i] for i in argnums])
return gradfun(multi_arg, *args, **kwargs)
return gradfun_rearranged
def grad_and_aux(fun, argnum=0):
"""Builds a function that returns the gradient of the first output and the
(unmodified) second output of a function that returns two outputs."""
def grad_and_aux_fun(*args, **kwargs):
saved_aux = []
def return_val_save_aux(*args, **kwargs):
val, aux = fun(*args, **kwargs)
saved_aux.append(aux)
return val
gradval = grad(return_val_save_aux, argnum)(*args, **kwargs)
return gradval, saved_aux[0]
return grad_and_aux_fun
def value_and_grad(fun, argnum=0):
"""Returns a function that returns both value and gradient. Suitable for use
in scipy.optimize"""
def double_val_fun(*args, **kwargs):
val = fun(*args, **kwargs)
return val, getval(val)
gradval_and_val = grad_and_aux(double_val_fun, argnum)
def value_and_grad_fun(*args, **kwargs):
gradval, val = gradval_and_val(*args, **kwargs)
return val, gradval
return value_and_grad_fun
def elementwise_grad(fun, argnum=0):
"""Like `jacobian`, but produces a function which computes just the diagonal
of the Jacobian, and does the computation in one pass rather than in a loop.
Note: this is only valid if the Jacobian is diagonal. Only arrays are
currently supported."""
def sum_output(*args, **kwargs):
return np.sum(fun(*args, **kwargs))
return grad(sum_output, argnum=argnum)
def hessian_vector_product(fun, argnum=0):
"""Builds a function that returns the exact Hessian-vector product.
The returned function has arguments (*args, vector, **kwargs), and takes
roughly 4x as long to evaluate as the original function."""
fun_grad = grad(fun, argnum)
def vector_dot_grad(*args, **kwargs):
args, vector = args[:-1], args[-1]
return np.dot(vector, fun_grad(*args, **kwargs))
return grad(vector_dot_grad, argnum) # Grad wrt original input.
def hessian(fun, argnum=0):
"""Returns a function that computes the exact Hessian.
The Hessian is computed by calling hessian_vector_product separately for
each row. For a function with N inputs, this takes roughly 4N times as
long as a single evaluation of the original function."""
hvp = hessian_vector_product(fun, argnum)
def hessian_fun(*args, **kwargs):
arg_in = args[argnum]
directions = np.eye(arg_in.size) # axis-aligned directions.
hvp_list = [hvp(*(args+(direction,)), **kwargs) for direction in directions]
return np.array(hvp_list)
return hessian_fun
| Python | 0.000001 |
550133348a09b197025bc1352439cb055bf50c7b | Make sure mocks in place for setUp command. | autopush/tests/test_websocket.py | autopush/tests/test_websocket.py | import json
import twisted.internet.base
from mock import Mock
from moto import mock_dynamodb2
from txstatsd.metrics.metrics import Metrics
from twisted.internet import reactor
from twisted.internet.defer import Deferred
from twisted.trial import unittest
from autopush.settings import AutopushSettings
from autopush.websocket import SimplePushServerProtocol
class WebsocketTestCase(unittest.TestCase):
@mock_dynamodb2
def setUp(self):
twisted.internet.base.DelayedCall.debug = True
self.proto = SimplePushServerProtocol()
settings = AutopushSettings(
crypto_key="i_CYcNKa2YXrF_7V1Y-2MFfoEl7b6KX55y_9uvOKfJQ=",
hostname="localhost",
statsd_host=None,
)
self.proto.settings = settings
self.proto.sendMessage = self.send_mock = Mock()
self.proto.sendClose = self.close_mock = Mock()
self.proto.transport = self.transport_mock = Mock()
settings.metrics = Mock(spec=Metrics)
def _connect(self):
self.proto.onConnect(None)
def _send_message(self, msg):
self.proto.onMessage(json.dumps(msg).encode('utf8'), False)
def _wait_for_message(self, d):
args = self.send_mock.call_args
if args:
self.send_mock.reset_mock()
d.callback(args)
return
reactor.callLater(0.1, self._wait_for_message, d)
def _wait_for_close(self, d):
if self.close_mock.call_args is not None:
d.callback(True)
return
reactor.callLater(0.1, self._wait_for_close, d)
def _check_response(self, func):
"""Waits for a message to be sent, and runs the func with it"""
def handle_message(result):
args, _ = result
func(json.loads(args[0]))
d = Deferred()
d.addCallback(handle_message)
self._wait_for_message(d)
return d
@mock_dynamodb2
def test_hello(self):
self._connect()
self._send_message(dict(messageType="hello", channelIDs=[]))
def check_result(msg):
assert "messageType" in msg
return self._check_response(check_result)
@mock_dynamodb2
def test_hello_dupe(self):
self._connect()
self._send_message(dict(messageType="hello", channelIDs=[]))
def check_second_hello(msg):
self.assert_("messageType" in msg)
self.assertEqual(msg["status"], 401)
def check_first_hello(msg):
assert "messageType" in msg
# Send another hello
self._send_message(dict(messageType="hello", channelIDs=[]))
return self._check_response(check_second_hello)
return self._check_response(check_first_hello)
@mock_dynamodb2
def test_not_hello(self):
self._connect()
self._send_message(dict(messageType="wooooo"))
def check_result(result):
assert result is True
d = Deferred()
d.addCallback(check_result)
self._wait_for_close(d)
return d
| import json
import twisted.internet.base
from mock import Mock
from moto import mock_dynamodb2
from txstatsd.metrics.metrics import Metrics
from twisted.internet import reactor
from twisted.internet.defer import Deferred
from twisted.trial import unittest
from autopush.settings import AutopushSettings
from autopush.websocket import SimplePushServerProtocol
class WebsocketTestCase(unittest.TestCase):
def setUp(self):
twisted.internet.base.DelayedCall.debug = True
self.proto = SimplePushServerProtocol()
settings = AutopushSettings(
crypto_key="i_CYcNKa2YXrF_7V1Y-2MFfoEl7b6KX55y_9uvOKfJQ=",
hostname="localhost",
statsd_host=None,
)
self.proto.settings = settings
self.proto.sendMessage = self.send_mock = Mock()
self.proto.sendClose = self.close_mock = Mock()
self.proto.transport = self.transport_mock = Mock()
settings.metrics = Mock(spec=Metrics)
def _connect(self):
self.proto.onConnect(None)
def _send_message(self, msg):
self.proto.onMessage(json.dumps(msg).encode('utf8'), False)
def _wait_for_message(self, d):
args = self.send_mock.call_args
if args:
self.send_mock.reset_mock()
d.callback(args)
return
reactor.callLater(0.1, self._wait_for_message, d)
def _wait_for_close(self, d):
if self.close_mock.call_args is not None:
d.callback(True)
return
reactor.callLater(0.1, self._wait_for_close, d)
def _check_response(self, func):
"""Waits for a message to be sent, and runs the func with it"""
def handle_message(result):
args, _ = result
func(json.loads(args[0]))
d = Deferred()
d.addCallback(handle_message)
self._wait_for_message(d)
return d
@mock_dynamodb2
def test_hello(self):
self._connect()
self._send_message(dict(messageType="hello", channelIDs=[]))
def check_result(msg):
assert "messageType" in msg
return self._check_response(check_result)
@mock_dynamodb2
def test_hello_dupe(self):
self._connect()
self._send_message(dict(messageType="hello", channelIDs=[]))
def check_second_hello(msg):
self.assert_("messageType" in msg)
self.assertEqual(msg["status"], 401)
def check_first_hello(msg):
assert "messageType" in msg
# Send another hello
self._send_message(dict(messageType="hello", channelIDs=[]))
return self._check_response(check_second_hello)
return self._check_response(check_first_hello)
@mock_dynamodb2
def test_not_hello(self):
self._connect()
self._send_message(dict(messageType="wooooo"))
def check_result(result):
assert result is True
d = Deferred()
d.addCallback(check_result)
self._wait_for_close(d)
return d
| Python | 0 |
009ab26737923cfff97ba37a035dcff7639135b1 | Replace all_pages_in_directory with concat_pdf_pages | Util.py | Util.py | """Collection of Helper Functions"""
import os
from fnmatch import fnmatch
from PyPDF2 import PdfFileReader
def pdf_file(filename):
"""Test whether or the the filename ends with '.pdf'."""
return fnmatch(filename, '*.pdf')
def all_pdf_files_in_directory(path):
"""Return a list of of PDF files in a directory."""
return [filename for filename in os.listdir(path) if pdf_file(filename)]
def concat_pdf_pages(files):
"""A generator that yields one PDF page a time for all pages in the PDF files."""
for input_file in files:
for page in PdfFileReader(input_file).pages:
yield page
def split_on_condition(iterable, predicate):
"""Split a iterable into chunks, where the first item in the chunk will be the
evaluate to True with predicate function, and the rest of the items in the chunk
evaluates to False."""
it = iter(iterable)
# Initialize the chunk list with an item
# StopIteration will be thrown if there are no further items in the iterator
chunk = [it.next()]
while True:
try:
item = it.next()
if predicate(item):
# If the next item should be in a new chunk then return the current chunk
yield chunk
# Then rest the chunk list
chunk = [item]
else:
# Simply append the item to current chunk if it doesn't match the predicate
chunk.append(item)
except StopIteration:
# If the end of the iterator is reached then simply return the current chunk
yield chunk
break
| """Collection of Helper Functions"""
import os
from fnmatch import fnmatch
from PyPDF2 import PdfFileReader
def pdf_file(filename):
"""Test whether or the the filename ends with '.pdf'."""
return fnmatch(filename, '*.pdf')
def all_pdf_files_in_directory(path):
"""Return a list of of PDF files in a directory."""
return [filename for filename in os.listdir(path) if pdf_file(filename)]
def all_pages_in_directory(path):
"""A generator that yields one PDF page a time for all the PDF in the directory."""
for filename in sorted(all_pdf_files_in_directory(path)):
with open(filename, 'rb') as input_file:
for page in PdfFileReader(input_file).pages:
yield page
def split_on_condition(iterable, predicate):
"""Split a iterable into chunks, where the first item in the chunk will be the
evaluate to True with predicate function, and the rest of the items in the chunk
evaluates to False."""
it = iter(iterable)
# Initialize the chunk list with an item
# StopIteration will be thrown if there are no further items in the iterator
chunk = [it.next()]
while True:
try:
item = it.next()
if predicate(item):
# If the next item should be in a new chunk then return the current chunk
yield chunk
# Then rest the chunk list
chunk = [item]
else:
# Simply append the item to current chunk if it doesn't match the predicate
chunk.append(item)
except StopIteration:
# If the end of the iterator is reached then simply return the current chunk
yield chunk
break
| Python | 0 |
d16373609b2f30c6ffa576c1269c529f12c9622c | Switch to fast method for personal timetable | backend/uclapi/timetable/urls.py | backend/uclapi/timetable/urls.py | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^personal$', views.get_personal_timetable_fast),
url(r'^bymodule$', views.get_modules_timetable),
]
| from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^personal_fast$', views.get_personal_timetable_fast),
url(r'^personal$', views.get_personal_timetable),
url(r'^bymodule$', views.get_modules_timetable),
]
| Python | 0 |
22785c709956365ac51bc3b79135e6debc6418ae | Exclude legacy objc API tests properly. | all.gyp | all.gyp | # Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
{
'variables': {
'include_examples%': 1,
'include_tests%': 1,
'webrtc_root_additional_dependencies': [],
},
'targets': [
{
'target_name': 'All',
'type': 'none',
'dependencies': [
'webrtc/webrtc.gyp:*',
'<@(webrtc_root_additional_dependencies)',
],
'conditions': [
['include_examples==1', {
'dependencies': [
'webrtc/webrtc_examples.gyp:*',
],
}],
['(OS=="ios" or (OS=="mac" and target_arch!="ia32")) and include_tests==1', {
'dependencies': [
'talk/app/webrtc/legacy_objc_api_tests.gyp:*',
],
}],
],
},
],
}
| # Copyright (c) 2013 The WebRTC project authors. All Rights Reserved.
#
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file in the root of the source
# tree. An additional intellectual property rights grant can be found
# in the file PATENTS. All contributing project authors may
# be found in the AUTHORS file in the root of the source tree.
{
'variables': {
'include_examples%': 1,
'include_tests%': 1,
'webrtc_root_additional_dependencies': [],
},
'targets': [
{
'target_name': 'All',
'type': 'none',
'dependencies': [
'webrtc/webrtc.gyp:*',
'<@(webrtc_root_additional_dependencies)',
],
'conditions': [
['include_examples==1', {
'dependencies': [
'webrtc/webrtc_examples.gyp:*',
],
}],
['OS=="ios" or (OS=="mac" and target_arch!="ia32") and include_tests==1', {
'dependencies': [
'talk/app/webrtc/legacy_objc_api_tests.gyp:*',
],
}],
],
},
],
}
| Python | 0.000026 |
f6e6c10fe3a83be491eae7d1b675be0f49e639b6 | add key_watcher | MellPlayer/mell_controller.py | MellPlayer/mell_controller.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Netease Music MellController
Created on 2017-02-21
@author: Mellcap
'''
import threading
import time
import queue
import getch
import ui
import player
from directory import create_directory
CONFIG = {
# 主页
'q': 'quit',
'j': 'next_line',
'k': 'prev_line',
# 音乐
' ': 'play',
'n': 'next_song',
'p': 'prev_song',
'f': 'next_playlist',
'b': 'prev_playlist',
# 歌词
'l': 'lyric',
# 帮助
'h': 'help'
}
def my_log(loglevel, component, message):
if loglevel == 'error':
print('>>>>> I got an error')
print('[{}] {}: {}'.format(loglevel, component, message))
mell_ui = ui.UI()
mell_player = player.Player(log_handler=my_log, ytdl=True)
q = queue.Queue()
def watcher():
while 1:
key = getch.getch()
q.put(key)
if key == 'q':
break
def executor():
while 1:
try:
key = q.get_nowait()
action = CONFIG.get(key, None)
if action == 'quit':
break
elif action:
try:
func = 'handler_%s' % action
eval(func)()
except Exception:
pass
except Exception:
pass
def key_watcher():
t1 = threading.Thread(target=watcher)
t2 = threading.Thread(target=executor)
t1.start()
t2.start()
t1.join()
t2.join()
def handler_next_line():
mell_ui.next_line()
def handler_prev_line():
mell_ui.prev_line()
def handler_play():
print('Start Playing...')
def handler_next_song():
pass
def handler_prev_song():
pass
def handler_next_playlist():
pass
def handler_prev_playlist():
pass
def handler_lyric():
pass
def handler_help():
pass
# def run():
# p = '/Users/zhaoye/.MellPlayer/playlist.m3u'
# mell_player.loadlist(p)
# mell_player.wait_for_playback()
# print('wait 5 seconds')
# time.sleep(5)
# mell_player.pause = True
# print('wait another 5 seconds')
# time.sleep(5)
# mell_player.pause = False
# print('wait another 5 seconds')
# time.sleep(5)
# mell_player.play('http://m2.music.126.net/LJOP6drGL9Vo2cmkjbFazQ==/3393092919088491.mp3')
if __name__ == '__main__':
mell_ui.display()
key_watcher()
# create_directory()
# mell_player.category = '流行'
# mell_player.get_category_playlists()
# mell_player.run_playlist()
# t = threading.Thread(target=run)
# t.start()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Netease Music MellController
Created on 2017-02-21
@author: Mellcap
'''
import threading
import time
import getch
import ui
import player
from directory import create_directory
CONFIG = {
# 主页
'q': 'quit',
'j': 'next_line',
'k': 'prev_line',
# 音乐
' ': 'play',
'n': 'next_song',
'p': 'prev_song',
'f': 'next_playlist',
'b': 'prev_playlist',
# 歌词
'l': 'lyric',
# 帮助
'h': 'help'
}
def my_log(loglevel, component, message):
if loglevel == 'error':
print('>>>>> I got an error')
# print('>>>>> player: %s' % mell_player.playlist)
# mell_player.pause = True
# mell_player.next_song()
print('[{}] {}: {}'.format(loglevel, component, message))
mell_ui = ui.UI()
mell_player = player.Player(log_handler=my_log, ytdl=True)
def watch_key():
while 1:
key = getch.getch()
# print('key:%s' % key)
action = CONFIG.get(key, None)
if action == 'quit':
break
elif action:
try:
func = 'handler_%s' % action
eval(func)()
except Exception:
pass
def handler_next_line():
mell_ui.next_line()
def handler_prev_line():
mell_ui.prev_line()
def handler_play():
print('Start Playing...')
def handler_next_song():
pass
def handler_prev_song():
pass
def handler_next_playlist():
pass
def handler_prev_playlist():
pass
def handler_lyric():
pass
def handler_help():
pass
def run():
p = '/Users/zhaoye/.MellPlayer/playlist.m3u'
mell_player.loadlist(p)
mell_player.wait_for_playback()
print('wait 5 seconds')
time.sleep(5)
mell_player.pause = True
print('wait another 5 seconds')
time.sleep(5)
mell_player.pause = False
if __name__ == '__main__':
# mell_ui.display()
# watch_key()
# create_directory()
# mell_player.category = '流行'
# mell_player.get_category_playlists()
# mell_player.run_playlist()
t = threading.Thread(target=run)
t.start()
# mell_player.playlist_next()
| Python | 0.000002 |
127434cdc04ae3655747ff1e3530148404dbf849 | fix flush | blaz.py | blaz.py | from os import environ, chdir, getenv
from os.path import abspath, basename, dirname
from subprocess import check_call
from sys import argv
from colors import bold
from hashlib import md5
import sys
class Blaz(object):
def __init__(self, **kwargs):
self.__dict__ = kwargs
self.file = abspath(argv[0])
self.script = basename(self.file)
self.argv = ' '.join(argv[1:])
self.__dict__.update({
'dir': dirname(self.file),
'image': getenv('BLAZ_IMAGE', 'alpine-blaz'),
'docker_exe': getenv('DOCKER_EXE', '/usr/local/bin/docker'),
'docker_sock': getenv('DOCKER_SOCK', '/var/run/docker.sock')
})
chdir(self.dir)
self._create_lock()
def _create_lock(self):
m = md5()
m.update(bytes('{0.dir}/{0.script} {0.argv}'.format(self), 'utf-8'))
self.lock = m.hexdigest()
def _fresh(self):
if 'BLAZ_LOCK' in environ:
return environ['BLAZ_LOCK'] == self.lock
else:
return False
def invoke(self, main):
if self._fresh():
main(self)
else:
self._docker_run()
def log(self, msg='', fg='yellow'):
sys.stdout.flush()
sys.stderr.write(bold(msg + '\n', fg=fg))
sys.stderr.flush()
def run(self, cmd, fg='green'):
while True:
prev = cmd
cmd = cmd.format(self)
if prev == cmd:
break
self.log(cmd, fg=fg)
check_call(cmd, shell=True)
sys.stdout.flush()
sys.stderr.flush()
def _forward_blaz_env_vars(self):
result = []
for k in environ.keys():
if k.find('BLAZ_') == 0:
result.append('''
--env={}={}
'''.format(k, environ[k]))
elif k.find('_BLAZ_') == 0:
result.append('''
--env={0}=${0}
'''.format(k))
return ''.join(result)
def _docker_run(self):
cmd = '''
docker run
--rm
--privileged
--net=host
'''
cmd = cmd + self._forward_blaz_env_vars()
cmd = cmd + '''
--env=DOCKER_EXE={0.docker_exe}
--env=DOCKER_SOCK={0.docker_sock}
--env=BLAZ_LOCK={0.lock}
--volume={0.dir}:{0.dir}
--volume={0.docker_exe}:{0.docker_exe}
--volume={0.docker_sock}:{0.docker_sock}
{0.image}
{0.dir}/{0.script} {0.argv}
'''
cmd = '\n '.join([x.strip() + ' \\' for x in cmd.split('\n') if
x.strip() is not ''])[:-2]
self.run(cmd, fg='blue')
| from os import environ, chdir, getenv
from os.path import abspath, basename, dirname
from subprocess import check_call
from sys import argv
from colors import bold
from hashlib import md5
import sys
class Blaz(object):
def __init__(self, **kwargs):
self.__dict__ = kwargs
self.file = abspath(argv[0])
self.script = basename(self.file)
self.argv = ' '.join(argv[1:])
self.__dict__.update({
'dir': dirname(self.file),
'image': getenv('BLAZ_IMAGE', 'alpine-blaz'),
'docker_exe': getenv('DOCKER_EXE', '/usr/local/bin/docker'),
'docker_sock': getenv('DOCKER_SOCK', '/var/run/docker.sock')
})
chdir(self.dir)
self._create_lock()
def _create_lock(self):
m = md5()
m.update(bytes('{0.dir}/{0.script} {0.argv}'.format(self), 'utf-8'))
self.lock = m.hexdigest()
def _fresh(self):
if 'BLAZ_LOCK'.format(self) in environ:
return environ['BLAZ_LOCK'] == self.lock
else:
return False
def invoke(self, main):
if self._fresh():
main(self)
else:
self._docker_run()
def log(self, msg='', fg='yellow'):
sys.stdout.flush()
sys.stderr.write(bold(msg + '\n', fg=fg))
sys.stderr.flush()
def run(self, cmd, fg='green'):
while True:
prev = cmd
cmd = cmd.format(self)
if prev == cmd:
break
self.log(cmd, fg=fg)
check_call(cmd, shell=True)
def _forward_blaz_env_vars(self):
result = []
for k in environ.keys():
if k.find('BLAZ_') == 0:
result.append('''
--env={}={}
'''.format(k, environ[k]))
elif k.find('_BLAZ_') == 0:
result.append('''
--env={0}=${0}
'''.format(k))
return ''.join(result)
def _docker_run(self):
cmd = '''
docker run
--rm
--privileged
--net=host
'''
cmd = cmd + self._forward_blaz_env_vars()
cmd = cmd + '''
--env=DOCKER_EXE={0.docker_exe}
--env=DOCKER_SOCK={0.docker_sock}
--env=BLAZ_LOCK={0.lock}
--volume={0.dir}:{0.dir}
--volume={0.docker_exe}:{0.docker_exe}
--volume={0.docker_sock}:{0.docker_sock}
{0.image}
{0.dir}/{0.script} {0.argv}
'''
cmd = '\n '.join([x.strip() + ' \\' for x in cmd.split('\n') if
x.strip() is not ''])[:-2]
self.run(cmd, fg='blue')
| Python | 0.000001 |
bb679edf2b7030de07e3d3688327c5e13851232e | Troubleshoot CI | kevlar/__init__.py | kevlar/__init__.py | #!/usr/bin/env python
#
# -----------------------------------------------------------------------------
# Copyright (c) 2016 The Regents of the University of California
#
# This file is part of kevlar (http://github.com/dib-lab/kevlar) and is
# licensed under the MIT license: see LICENSE.
# -----------------------------------------------------------------------------
# Core libraries
from __future__ import print_function
try:
import __builtin__ as builtins
except:
import builtins
from collections import namedtuple
from gzip import open as gzopen
import re
import sys
# Third-party libraries
import khmer
import screed
# Internal modules
from kevlar import seqio
from kevlar import overlap
from kevlar import counting
from kevlar import sketch
from kevlar.seqio import parse_augmented_fastx, print_augmented_fastx
from kevlar.variantset import VariantSet
from kevlar.timer import Timer
# Subcommands and command-line interface
from kevlar import dump
from kevlar import novel
from kevlar import collect
from kevlar import filter
from kevlar import reaugment
from kevlar import mutate
from kevlar import assemble
from kevlar import count
from kevlar import partition
from kevlar import localize
from kevlar import cli
# C extension(s)
import kevlar.alignment
from kevlar.alignment import contig_align as align
from kevlar._version import get_versions
__version__ = get_versions()['version']
del get_versions
def open(filename, mode):
if mode not in ['r', 'w']:
raise ValueError('invalid mode "{}"'.format(mode))
if filename in ['-', None]:
filehandle = sys.stdin if mode == 'r' else sys.stdout
return filehandle
openfunc = builtins.open
if filename.endswith('.gz'):
openfunc = gzopen
mode += 't'
return openfunc(filename, mode)
def revcom(seq):
return screed.dna.reverse_complement(str(seq))
def revcommin(seq):
rc = revcom(seq)
minseq = sorted((seq, rc))[0]
return minseq
def same_seq(seq1, seq2, seq2revcom=None):
if seq2revcom is None:
seq2revcom = revcom(seq2)
return seq1 == seq2 or seq1 == seq2revcom
def to_gml(graph, outfilename, logfile=sys.stderr):
"""Write the given read graph to a GML file."""
if not outfilename.endswith('.gml'):
print('[kevlar] WARNING: GML files usually need extension .gml',
file=logfile)
networkx.write_gml(graph, outfilename)
message = '[kevlar] graph written to {}'.format(args.gml)
print(message, file=logfile)
def multi_file_iter_screed(filenames):
for filename in filenames:
for record in screed.open(filename):
yield record
def multi_file_iter_khmer(filenames):
for filename in filenames:
for record in khmer.ReadParser(filename):
yield record
def clean_subseqs(sequence, ksize):
for subseq in re.split('[^ACGT]', sequence):
if len(subseq) >= ksize:
yield subseq
KmerOfInterest = namedtuple('KmerOfInterest', 'sequence offset abund')
| #!/usr/bin/env python
#
# -----------------------------------------------------------------------------
# Copyright (c) 2016 The Regents of the University of California
#
# This file is part of kevlar (http://github.com/dib-lab/kevlar) and is
# licensed under the MIT license: see LICENSE.
# -----------------------------------------------------------------------------
# Core libraries
from __future__ import print_function
try:
import __builtin__ as builtins
except:
import builtins
from collections import namedtuple
from gzip import open as gzopen
import re
import sys
# Third-party libraries
import khmer
import screed
# Internal modules
from kevlar import seqio
from kevlar import overlap
from kevlar import counting
from kevlar import sketch
from kevlar.seqio import parse_augmented_fastx, print_augmented_fastx
from kevlar.variantset import VariantSet
from kevlar.timer import Timer
# Subcommands and command-line interface
from kevlar import dump
from kevlar import novel
from kevlar import collect
from kevlar import filter
from kevlar import reaugment
from kevlar import mutate
from kevlar import assemble
from kevlar import count
from kevlar import partition
from kevlar import localize
from kevlar import cli
# C extension(s)
from kevlar.alignment import contig_align as align
from kevlar._version import get_versions
__version__ = get_versions()['version']
del get_versions
def open(filename, mode):
if mode not in ['r', 'w']:
raise ValueError('invalid mode "{}"'.format(mode))
if filename in ['-', None]:
filehandle = sys.stdin if mode == 'r' else sys.stdout
return filehandle
openfunc = builtins.open
if filename.endswith('.gz'):
openfunc = gzopen
mode += 't'
return openfunc(filename, mode)
def revcom(seq):
return screed.dna.reverse_complement(str(seq))
def revcommin(seq):
rc = revcom(seq)
minseq = sorted((seq, rc))[0]
return minseq
def same_seq(seq1, seq2, seq2revcom=None):
if seq2revcom is None:
seq2revcom = revcom(seq2)
return seq1 == seq2 or seq1 == seq2revcom
def to_gml(graph, outfilename, logfile=sys.stderr):
"""Write the given read graph to a GML file."""
if not outfilename.endswith('.gml'):
print('[kevlar] WARNING: GML files usually need extension .gml',
file=logfile)
networkx.write_gml(graph, outfilename)
message = '[kevlar] graph written to {}'.format(args.gml)
print(message, file=logfile)
def multi_file_iter_screed(filenames):
for filename in filenames:
for record in screed.open(filename):
yield record
def multi_file_iter_khmer(filenames):
for filename in filenames:
for record in khmer.ReadParser(filename):
yield record
def clean_subseqs(sequence, ksize):
for subseq in re.split('[^ACGT]', sequence):
if len(subseq) >= ksize:
yield subseq
KmerOfInterest = namedtuple('KmerOfInterest', 'sequence offset abund')
| Python | 0.000001 |
c46e2053c0c093c2ee82f13f48787584d48664af | Fix reorder unit test for Django 1.8 | shuup_tests/front/test_reorder.py | shuup_tests/front/test_reorder.py | # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2018, Shuup Inc. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import pytest
from django.core.urlresolvers import reverse
from django.test.client import Client
from shuup.simple_supplier.module import SimpleSupplierModule
from shuup.testing import factories
from shuup.core.models import ShippingMode
@pytest.mark.django_db
def test_reorder_view():
shop = factories.get_default_shop()
factories.get_default_shipping_method()
factories.get_default_payment_method()
supplier1 = factories.get_supplier(SimpleSupplierModule.identifier, shop=shop)
supplier2 = factories.get_supplier(SimpleSupplierModule.identifier, shop=shop)
assert supplier1.pk != supplier2.pk
product_supplier1 = factories.create_product(
"product_supplier1",
shop=shop,
supplier=supplier1,
default_price=10,
shipping_mode=ShippingMode.NOT_SHIPPED
)
product_supplier2 = factories.create_product(
"product_supplier2",
shop=shop,
supplier=supplier2,
default_price=20,
shipping_mode=ShippingMode.NOT_SHIPPED
)
user = factories.create_random_user("en")
user.set_password("user")
user.save()
customer = factories.create_random_person("en")
customer.user = user
customer.save()
order = factories.create_random_order(
customer=customer,
shop=shop,
products=[product_supplier1, product_supplier2],
completion_probability=0,
random_products=False
)
suppliers = [line.supplier for line in order.lines.products()]
assert supplier1 in suppliers
assert supplier2 in suppliers
client = Client()
client.login(username=user.username, password="user")
# list orders
response = client.get(reverse("shuup:personal-orders"))
assert response.status_code == 200
content = response.content.decode("utf-8")
assert "<td>%d</td>" % order.id in content
assert "<td>Received</td>" in content
# go to order detail
response = client.get(reverse("shuup:show-order", kwargs=dict(pk=order.pk)))
assert response.status_code == 200
content = response.content.decode("utf-8")
assert "Add all products to cart" in content
reorder_url = reverse("shuup:reorder-order", kwargs=dict(pk=order.pk))
assert reorder_url in content
# reorder products
response = client.get(reorder_url)
assert response.status_code == 302
assert response.url.endswith(reverse("shuup:basket"))
# go to basket
response = client.get(response.url)
assert response.status_code == 200
content = response.content.decode("utf-8")
# ensure the basket contain those products and suppliers
basket_key = client.session["basket_basket_key"]["key"]
from shuup.front.models import StoredBasket
basket = StoredBasket.objects.get(key=basket_key)
lines = basket.data["lines"]
product_supplier = [(line["product_id"], line["supplier_id"]) for line in lines]
assert (product_supplier1.pk, supplier1.pk) in product_supplier
assert (product_supplier2.pk, supplier2.pk) in product_supplier
assert product_supplier1.name in content
assert product_supplier2.name in content
assert "You are unable to proceed to checkout!" not in content
| # -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2018, Shuup Inc. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
import pytest
from django.core.urlresolvers import reverse
from django.test.client import Client
from shuup.simple_supplier.module import SimpleSupplierModule
from shuup.testing import factories
from shuup.core.models import ShippingMode
@pytest.mark.django_db
def test_reorder_view():
shop = factories.get_default_shop()
factories.get_default_shipping_method()
factories.get_default_payment_method()
supplier1 = factories.get_supplier(SimpleSupplierModule.identifier, shop=shop)
supplier2 = factories.get_supplier(SimpleSupplierModule.identifier, shop=shop)
assert supplier1.pk != supplier2.pk
product_supplier1 = factories.create_product(
"product_supplier1",
shop=shop,
supplier=supplier1,
default_price=10,
shipping_mode=ShippingMode.NOT_SHIPPED
)
product_supplier2 = factories.create_product(
"product_supplier2",
shop=shop,
supplier=supplier2,
default_price=20,
shipping_mode=ShippingMode.NOT_SHIPPED
)
user = factories.create_random_user("en")
user.set_password("user")
user.save()
customer = factories.create_random_person("en")
customer.user = user
customer.save()
order = factories.create_random_order(
customer=customer,
shop=shop,
products=[product_supplier1, product_supplier2],
completion_probability=0,
random_products=False
)
suppliers = [line.supplier for line in order.lines.products()]
assert supplier1 in suppliers
assert supplier2 in suppliers
client = Client()
client.login(username=user.username, password="user")
# list orders
response = client.get(reverse("shuup:personal-orders"))
assert response.status_code == 200
content = response.content.decode("utf-8")
assert "<td>%d</td>" % order.id in content
assert "<td>Received</td>" in content
# go to order detail
response = client.get(reverse("shuup:show-order", kwargs=dict(pk=order.pk)))
assert response.status_code == 200
content = response.content.decode("utf-8")
assert "Add all products to cart" in content
reorder_url = reverse("shuup:reorder-order", kwargs=dict(pk=order.pk))
assert reorder_url in content
# reorder products
response = client.get(reorder_url)
assert response.status_code == 302
assert response.url == reverse("shuup:basket")
# go to basket
response = client.get(response.url)
assert response.status_code == 200
content = response.content.decode("utf-8")
# ensure the basket contain those products and suppliers
basket_key = client.session["basket_basket_key"]["key"]
from shuup.front.models import StoredBasket
basket = StoredBasket.objects.get(key=basket_key)
lines = basket.data["lines"]
product_supplier = [(line["product_id"], line["supplier_id"]) for line in lines]
assert (product_supplier1.pk, supplier1.pk) in product_supplier
assert (product_supplier2.pk, supplier2.pk) in product_supplier
assert product_supplier1.name in content
assert product_supplier2.name in content
assert "You are unable to proceed to checkout!" not in content
| Python | 0.000002 |
8092ac34f95280adf884336999b481ef5241c2cb | update data container description to make sure that scalar values are returned as scalar | simphony/scripts/cuba-generate.py | simphony/scripts/cuba-generate.py | import click
import yaml
# Cuba keywords that are excludes from DataContainers
CUBA_DATA_CONTAINER_EXLCUDE = ['Id', 'Position']
@click.group()
def cli():
""" Auto-generate code from cuba yaml description. """
@cli.command()
@click.argument('input', type=click.File('rb'))
@click.argument('output', type=click.File('wb'))
def python(input, output):
""" Create the CUBA Enum for the DataContainer.
"""
keywords = yaml.safe_load(input)
lines = [
'# code auto-generated by the cuba-generate.py script.\n',
'from enum import IntEnum, unique\n',
'\n',
'\n',
'@unique\n',
'class CUBA(IntEnum):\n',
'\n']
template = " {} = {}\n"
for keyword in keywords:
if keyword['name'] in CUBA_DATA_CONTAINER_EXLCUDE:
continue
lines.append(template.format(keyword['key'], keyword['number']))
output.writelines(lines)
@cli.command()
@click.argument('input', type=click.File('rb'))
@click.argument('output', type=click.File('wb'))
def table(input, output):
""" Create the CUBA DataContainer Table descriptions.
"""
keywords = yaml.safe_load(input)
lines = [
'# code auto-generated by the cuba-generate.py script.\n',
'import tables\n',
'\n',
'\n']
# create Data table description
lines.extend([
'class Data(tables.IsDescription):\n',
'\n'])
data_types = {
'string': 'String',
'double': 'Float64',
'integer': 'Int32'}
position = 0
for keyword in keywords:
if keyword['name'] in CUBA_DATA_CONTAINER_EXLCUDE:
continue
if keyword['type'] == 'string':
template = " {} = tables.{}Col(pos={}, itemsize={})\n"
shape = keyword['shape'][0]
else:
template = " {} = tables.{}Col(pos={}{})\n"
shape = keyword['shape']
if len(shape) == 1:
if shape[0] == 1:
shape = ''
else:
shape = ', shape={}'.format(shape[0])
else:
shape = ', shape=({})'.format(
','.join(map(str, keyword['shape'])))
lines.append(template.format(
keyword['key'].lower(),
data_types[keyword['type']],
position,
shape))
position += 1
lines.append('\n\n')
# create Mask table description
mask_size = position
lines.extend([
'class Mask(tables.IsDescription):\n',
' mask = tables.BoolCol(shape=({},))\n'.format(mask_size)])
output.writelines(lines)
if __name__ == '__main__':
cli()
| import click
import yaml
# Cuba keywords that are excludes from DataContainers
CUBA_DATA_CONTAINER_EXLCUDE = ['Id', 'Position']
@click.group()
def cli():
""" Auto-generate code from cuba yaml description. """
@cli.command()
@click.argument('input', type=click.File('rb'))
@click.argument('output', type=click.File('wb'))
def python(input, output):
""" Create the CUBA Enum for the DataContainer.
"""
keywords = yaml.safe_load(input)
lines = [
'# code auto-generated by the cuba-generate.py script.\n',
'from enum import IntEnum, unique\n',
'\n',
'\n',
'@unique\n',
'class CUBA(IntEnum):\n',
'\n']
template = " {} = {}\n"
for keyword in keywords:
if keyword['name'] in CUBA_DATA_CONTAINER_EXLCUDE:
continue
lines.append(template.format(keyword['key'], keyword['number']))
output.writelines(lines)
@cli.command()
@click.argument('input', type=click.File('rb'))
@click.argument('output', type=click.File('wb'))
def table(input, output):
""" Create the CUBA DataContainer Table descriptions.
"""
keywords = yaml.safe_load(input)
lines = [
'# code auto-generated by the cuba-generate.py script.\n',
'import tables\n',
'\n',
'\n']
# create Data table description
lines.extend([
'class Data(tables.IsDescription):\n',
'\n'])
template = " {} = tables.{}Col(pos={}, shape=({}))\n"
data_types = {
'string': 'String',
'double': 'Float64',
'integer': 'Int32'}
position = 0
for keyword in keywords:
if keyword['name'] in CUBA_DATA_CONTAINER_EXLCUDE:
continue
if len(keyword['shape']) == 1:
shape = str(keyword['shape'][0]) + ','
else:
shape = ','.join(map(str, keyword['shape']))
lines.append(template.format(
keyword['key'].lower(),
data_types[keyword['type']],
position,
shape))
position += 1
lines.append('\n\n')
# create Mask table description
mask_size = position
lines.extend([
'class Mask(tables.IsDescription):\n',
' mask = tables.BoolCol(shape=({},))\n'.format(mask_size)])
output.writelines(lines)
if __name__ == '__main__':
cli()
| Python | 0.000001 |
3159f3fa6d4d055e8a53a0b4f1d798397cc3c3a3 | The alteration of the context has no effect | base_report_to_printer/report.py | base_report_to_printer/report.py | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, exceptions, _
class Report(models.Model):
_inherit = 'report'
def _can_send_report(self, cr, uid, ids, behaviour, printer, document,
context=None):
"""Predicate that decide if report can be sent to printer
If you want to prevent `get_pdf` to send report you can set
the `must_skip_sent_to_printer` key to True in the context
"""
if context is None:
context = self.pool['res.users'].context_get(cr, uid)
if context.get('must_skip_sent_to_printer'):
return False
if behaviour['action'] == 'server' and printer and document:
return True
return False
def print_document(self, cr, uid, ids, report_name, html=None,
data=None, context=None):
""" Print a document, do not return the document file """
if context is None:
context = self.pool['res.users'].context_get(cr, uid)
local_context = dict(context)
local_context['must_skip_sent_to_printer'] = True
document = self.get_pdf(cr, uid, ids, report_name,
html=html, data=data, context=local_context)
report = self._get_report_from_name(cr, uid, report_name)
behaviour = report.behaviour()[report.id]
printer = behaviour['printer']
if not printer:
raise exceptions.Warning(
_('No printer configured to print this report.')
)
return printer.print_document(report, document, report.report_type)
def get_pdf(self, cr, uid, ids, report_name, html=None,
data=None, context=None):
""" Generate a PDF and returns it.
If the action configured on the report is server, it prints the
generated document as well.
"""
document = super(Report, self).get_pdf(cr, uid, ids, report_name,
html=html, data=data,
context=context)
report = self._get_report_from_name(cr, uid, report_name)
behaviour = report.behaviour()[report.id]
printer = behaviour['printer']
can_send_report = self._can_send_report(cr, uid, ids,
behaviour, printer, document,
context=context)
if can_send_report:
printer.print_document(report, document, report.report_type)
return document
| # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Guewen Baconnier
# Copyright 2014 Camptocamp SA
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, exceptions, _
class Report(models.Model):
_inherit = 'report'
def _can_send_report(self, cr, uid, ids, behaviour, printer, document,
context=None):
"""Predicate that decide if report can be sent to printer
If you want to prevent `get_pdf` to send report you can set
the `must_skip_sent_to_printer` key to True in the context
"""
if context is None:
context = self.pool['res.users'].context_get(cr, uid)
if context.get('must_skip_sent_to_printer'):
return False
if behaviour['action'] == 'server' and printer and document:
return True
return False
def print_document(self, cr, uid, ids, report_name, html=None,
data=None, context=None):
""" Print a document, do not return the document file """
if context is None:
context = self.pool['res.users'].context_get(cr, uid)
local_context = dict(context)
local_context['must_skip_sent_to_printer'] = True
document = self.get_pdf(cr, uid, ids, report_name,
html=html, data=data, context=local_context)
report = self._get_report_from_name(cr, uid, report_name)
behaviour = report.behaviour()[report.id]
printer = behaviour['printer']
if not printer:
raise exceptions.Warning(
_('No printer configured to print this report.')
)
return printer.print_document(report, document, report.report_type)
def get_pdf(self, cr, uid, ids, report_name, html=None,
data=None, context=None):
""" Generate a PDF and returns it.
If the action configured on the report is server, it prints the
generated document as well.
"""
if context is None:
context = self.pool['res.users'].context_get(cr, uid)
document = super(Report, self).get_pdf(cr, uid, ids, report_name,
html=html, data=data,
context=context)
report = self._get_report_from_name(cr, uid, report_name)
behaviour = report.behaviour()[report.id]
printer = behaviour['printer']
can_send_report = self._can_send_report(cr, uid, ids,
behaviour, printer, document,
context=context)
if can_send_report:
printer.print_document(report, document, report.report_type)
context['must_skip_sent_to_printer'] = True
return document
| Python | 0.999999 |
e89e721225e916f4c2514f4a6568571abfc2acc0 | Add slides frame simibar | lib/plotter/matching/__init__.py | lib/plotter/matching/__init__.py | __all__ = ["core", "single_matching_plotter"]
from lib.exp.evaluator.ground_truth import GroundTruth as GT
from core import MatchingPlotterBase
class MatchingPlotter(MatchingPlotterBase):
def __init__(self, root, name):
"""
Try to show one matching pairs
use set_data to set matched results:
array of `sid`, `fid`, `matches`
"""
MatchingPlotterBase.__init__(self, root, name)
def __match_info(self, ax, df=None, sid=-1, fid=0):
self.set_matched_pair(sid, fid)
view = self.get_view()
ax.imshow(view[:, :, [2, 1, 0]])
info = "S-{}, F-{}, df: {:5.2f}({})".\
format(sid, fid, df.dist.mean(), len(df))
ax.set_title(info)
def result_grid(self, fig, row=4, col=4, from_=1):
start = from_ - 1
end = from_+(row*col) - 1
for mi, mc in enumerate(self.df[start:end], 1):
ax = fig.add_subplot(15, 4, mi)
self.__match_info(ax, **mc)
def frame_slides_relation(self, ax, matches, answer):
"""
Print frame to slides relation by input function
"""
x = [s["sid"] for s in matches]
y = [s["df"].dist.mean() for s in matches]
ax.plot(x, y)
def slides_frames_similarity(self, sids, fids, sims):
pass
def slice_bar(self, ax, x, y, z, start, size, cmm):
end = start+size
gt = GT(self.root, self.name)
for fi, mv, fid in zip(range(1, size+1), z[start: end], y[start:end]):
cr = [cmm(fi*3./size)]*len(mv)
asid = int(gt.answer(fid))
fac = 1
if asid > 0:
print asid, fid
cr[asid-1] = '#FF5698'
else:
cr = ['#aa77FF']*len(mv)
mv = mv/max(mv)
fac = max(mv)
ax.bar(x, mv, fid, zdir='y', color=cr, alpha=0.4)
mi = min(xrange(len(mv)), key=mv.__getitem__)
ax.bar([x[mi]], [mv[mi]*fac/2.0], fid,
zdir='y', color=['#44FF32'], alpha=.8)
ax.view_init(elev=60., azim=120)
| __all__ = ["core", "single_matching_plotter"]
from core import MatchingPlotterBase
class MatchingPlotter(MatchingPlotterBase):
def __init__(self, root, name):
"""
Try to show one matching pairs
use set_data to set matched results:
array of `sid`, `fid`, `matches`
"""
MatchingPlotterBase.__init__(self, root, name)
def __match_info(self, ax, df=None, sid=-1, fid=0):
self.set_matched_pair(sid, fid)
view = self.get_view()
ax.imshow(view[:, :, [2, 1, 0]])
info = "S-{}, F-{}, df: {:5.2f}({})".\
format(sid, fid, df.dist.mean(), len(df))
ax.set_title(info)
def result_grid(self, fig, row=4, col=4, from_=1):
start = from_ - 1
end = from_+(row*col) - 1
for mi, mc in enumerate(self.df[start:end], 1):
ax = fig.add_subplot(15, 4, mi)
self.__match_info(ax, **mc)
def frame_slides_relation(self, ax, matches, answer):
"""
Print frame to slides relation by input function
"""
x = [s["sid"] for s in matches]
y = [s["df"].dist.mean() for s in matches]
ax.plot(x, y)
def slides_frames_similarity(self, sids, fids, sims):
pass
| Python | 0 |
95b08f0cb82fa376a6f07d5395bcba343a131dea | update labels | plantcv/plantcv/hyperspectral/analyze_spectral.py | plantcv/plantcv/hyperspectral/analyze_spectral.py | # Analyze signal data in Thermal image
import os
import numpy as np
import pandas as pd
from plantcv.plantcv import params
from plantcv.plantcv import outputs
from plotnine import ggplot, aes, geom_line, scale_x_continuous
def analyze_spectral(array, header_dict, mask, histplot=True):
"""This extracts the hyperspectral reflectance values of each pixel writes the values out to
a file. It can also print out a histogram plot of pixel intensity
and a pseudocolor image of the plant.
Inputs:
array = numpy array of thermal values
header_dict =
mask = Binary mask made from selected contours
histplot = if True plots histogram of intensity values
Returns:
analysis_img = output image
:param array: numpy array
:param header_dict: dict
:param mask: numpy array
:param histplot: bool
:return analysis_img: ggplot
"""
params.device += 1
# Store debug mode
debug = params.debug
params.debug = None
# List of wavelengths recorded created from parsing the header file will be string, make list of floats
wavelength_data = array[np.where(mask > 0)]
wavelength_freq = wavelength_data.mean(axis=0)
min_wavelength = int(np.ceil(float(header_dict["wavelength"][0])))
max_wavelength = int(np.ceil(float(header_dict["wavelength"][-1])))
new_wavelengths = []
new_freq = []
for i, wavelength in enumerate(header_dict["wavelength"]):
new_wavelengths.append(float(wavelength))
new_freq.append((wavelength_freq[i]).astype(np.float))
maxreflectance = np.amax(wavelength_data)
minreflectance = np.amin(wavelength_data)
avgreflectance = np.average(wavelength_data)
medianreflectance = np.median(wavelength_data)
# Store data into outputs class
outputs.add_observation(variable='max_reflectance', trait='maximum reflectance',
method='plantcv.plantcv.hyperspectral.analyze_spectral', scale='none', datatype=float,
value=float(maxreflectance), label='reflectance')
outputs.add_observation(variable='min_reflectance', trait='minimum reflectance',
method='plantcv.plantcv.hyperspectral.analyze_spectral', scale='none', datatype=float,
value=float(minreflectance), label='reflectance')
outputs.add_observation(variable='mean_reflectance', trait='mean reflectance',
method='plantcv.plantcv.hyperspectral.analyze_spectral', scale='none', datatype=float,
value=float(avgreflectance), label='reflectance')
outputs.add_observation(variable='median_reflectance', trait='median reflectance',
method='plantcv.plantcv.hyperspectral.analyze_spectral', scale='none', datatype=float,
value=float(medianreflectance), label='reflectance')
outputs.add_observation(variable='spectral_frequencies', trait='spectral frequencies',
method='plantcv.plantcv.hyperspectral.analyze_spectral', scale='frequency', datatype=list,
value=new_freq, label=new_wavelengths)
params.debug = debug
analysis_img = None
if histplot is True:
dataset = pd.DataFrame({'Wavelength': new_wavelengths,
'Reflectance': wavelength_freq})
fig_hist = (ggplot(data=dataset,
mapping=aes(x='Wavelength',
y='Reflectance'))
+ geom_line(color='purple')
+ scale_x_continuous(breaks=list(range(min_wavelength, max_wavelength, 50)))
)
analysis_img = fig_hist
if params.debug == "print":
fig_hist.save(os.path.join(params.debug_outdir, str(params.device) + '_therm_histogram.png'))
elif params.debug == "plot":
print(fig_hist)
return analysis_img
| # Analyze signal data in Thermal image
import os
import numpy as np
import pandas as pd
from plantcv.plantcv import params
from plantcv.plantcv import outputs
from plotnine import ggplot, aes, geom_line, scale_x_continuous
def analyze_spectral(array, header_dict, mask, histplot=True):
"""This extracts the hyperspectral reflectance values of each pixel writes the values out to
a file. It can also print out a histogram plot of pixel intensity
and a pseudocolor image of the plant.
Inputs:
array = numpy array of thermal values
header_dict =
mask = Binary mask made from selected contours
histplot = if True plots histogram of intensity values
Returns:
analysis_img = output image
:param array: numpy array
:param header_dict: dict
:param mask: numpy array
:param histplot: bool
:return analysis_img: ggplot
"""
params.device += 1
# Store debug mode
debug = params.debug
params.debug = None
# List of wavelengths recorded created from parsing the header file will be string, make list of floats
wavelength_data = array[np.where(mask > 0)]
wavelength_freq = wavelength_data.mean(axis=0)
min_wavelength = int(np.ceil(float(header_dict["wavelength"][0])))
max_wavelength = int(np.ceil(float(header_dict["wavelength"][-1])))
new_wavelengths = []
new_freq = []
for i, wavelength in enumerate(header_dict["wavelength"]):
new_wavelengths.append(float(wavelength))
new_freq.append((wavelength_freq[i]).astype(np.float))
maxreflectance = np.amax(wavelength_data)
minreflectance = np.amin(wavelength_data)
avgreflectance = np.average(wavelength_data)
medianreflectance = np.median(wavelength_data)
# Store data into outputs class
outputs.add_observation(variable='max_reflectance', trait='maximum reflectance',
method='plantcv.plantcv.hyperspectral.analyze_spectral', scale='degrees', datatype=float,
value=float(maxreflectance), label='reflectance')
outputs.add_observation(variable='min_reflectance', trait='minimum reflectance',
method='plantcv.plantcv.hyperspectral.analyze_spectral', scale='degrees', datatype=float,
value=float(minreflectance), label='reflectance')
outputs.add_observation(variable='mean_reflectance', trait='mean_reflectance',
method='plantcv.plantcv.hyperspectral.analyze_spectral', scale='degrees', datatype=float,
value=float(avgreflectance), label='reflectance')
outputs.add_observation(variable='median_reflectance', trait='median_reflectance',
method='plantcv.plantcv.hyperspectral.analyze_spectral', scale='degrees', datatype=float,
value=float(medianreflectance), label='reflectance')
outputs.add_observation(variable='spectral_frequencies', trait='thermal spectral_frequencies',
method='plantcv.plantcv.hyperspectral.analyze_spectral', scale='frequency', datatype=list,
value=new_freq, label=new_wavelengths)
params.debug = debug
analysis_img = None
if histplot is True:
dataset = pd.DataFrame({'Wavelength': new_wavelengths,
'Reflectance': wavelength_freq})
fig_hist = (ggplot(data=dataset,
mapping=aes(x='Wavelength',
y='Reflectance'))
+ geom_line(color='purple')
+ scale_x_continuous(breaks=list(range(min_wavelength, max_wavelength, 50)))
)
analysis_img = fig_hist
if params.debug == "print":
fig_hist.save(os.path.join(params.debug_outdir, str(params.device) + '_therm_histogram.png'))
elif params.debug == "plot":
print(fig_hist)
return analysis_img
| Python | 0.000001 |
cd59979ab446d7613ec7df5d5737539464918edf | Fix span boundary handling in Spanish noun_chunks (#5860) | spacy/lang/es/syntax_iterators.py | spacy/lang/es/syntax_iterators.py | # coding: utf8
from __future__ import unicode_literals
from ...symbols import NOUN, PROPN, PRON, VERB, AUX
from ...errors import Errors
def noun_chunks(doclike):
doc = doclike.doc
if not doc.is_parsed:
raise ValueError(Errors.E029)
if not len(doc):
return
np_label = doc.vocab.strings.add("NP")
left_labels = ["det", "fixed", "neg"] # ['nunmod', 'det', 'appos', 'fixed']
right_labels = ["flat", "fixed", "compound", "neg"]
stop_labels = ["punct"]
np_left_deps = [doc.vocab.strings.add(label) for label in left_labels]
np_right_deps = [doc.vocab.strings.add(label) for label in right_labels]
stop_deps = [doc.vocab.strings.add(label) for label in stop_labels]
for token in doclike:
if token.pos in [PROPN, NOUN, PRON]:
left, right = noun_bounds(
doc, token, np_left_deps, np_right_deps, stop_deps
)
yield left.i, right.i + 1, np_label
token = right
token = next_token(token)
def is_verb_token(token):
return token.pos in [VERB, AUX]
def next_token(token):
try:
return token.nbor()
except IndexError:
return None
def noun_bounds(doc, root, np_left_deps, np_right_deps, stop_deps):
left_bound = root
for token in reversed(list(root.lefts)):
if token.dep in np_left_deps:
left_bound = token
right_bound = root
for token in root.rights:
if token.dep in np_right_deps:
left, right = noun_bounds(
doc, token, np_left_deps, np_right_deps, stop_deps
)
if list(
filter(
lambda t: is_verb_token(t) or t.dep in stop_deps,
doc[left_bound.i : right.i],
)
):
break
else:
right_bound = right
return left_bound, right_bound
SYNTAX_ITERATORS = {"noun_chunks": noun_chunks}
| # coding: utf8
from __future__ import unicode_literals
from ...symbols import NOUN, PROPN, PRON, VERB, AUX
from ...errors import Errors
def noun_chunks(doclike):
doc = doclike.doc
if not doc.is_parsed:
raise ValueError(Errors.E029)
if not len(doc):
return
np_label = doc.vocab.strings.add("NP")
left_labels = ["det", "fixed", "neg"] # ['nunmod', 'det', 'appos', 'fixed']
right_labels = ["flat", "fixed", "compound", "neg"]
stop_labels = ["punct"]
np_left_deps = [doc.vocab.strings.add(label) for label in left_labels]
np_right_deps = [doc.vocab.strings.add(label) for label in right_labels]
stop_deps = [doc.vocab.strings.add(label) for label in stop_labels]
token = doc[0]
while token and token.i < len(doclike):
if token.pos in [PROPN, NOUN, PRON]:
left, right = noun_bounds(
doc, token, np_left_deps, np_right_deps, stop_deps
)
yield left.i, right.i + 1, np_label
token = right
token = next_token(token)
def is_verb_token(token):
return token.pos in [VERB, AUX]
def next_token(token):
try:
return token.nbor()
except IndexError:
return None
def noun_bounds(doc, root, np_left_deps, np_right_deps, stop_deps):
left_bound = root
for token in reversed(list(root.lefts)):
if token.dep in np_left_deps:
left_bound = token
right_bound = root
for token in root.rights:
if token.dep in np_right_deps:
left, right = noun_bounds(
doc, token, np_left_deps, np_right_deps, stop_deps
)
if list(
filter(
lambda t: is_verb_token(t) or t.dep in stop_deps,
doc[left_bound.i : right.i],
)
):
break
else:
right_bound = right
return left_bound, right_bound
SYNTAX_ITERATORS = {"noun_chunks": noun_chunks}
| Python | 0 |
d07b48c018d8edf5c8dc3689e22a0c4e551f79a7 | Add single file output option | cube.py | cube.py | #!/usr/bin/env python
import numpy as np
from scipy import ndimage, misc
import sys, math, os
import argparse
parser = argparse.ArgumentParser(description='Turn a panorama image into a cube map (6 images)')
parser.add_argument("--size", default=512, type=int, help="Size of output image sides")
parser.add_argument("--prefix", default="side_", help="Prefix of output images")
parser.add_argument("--type", default="jpg", help="File Type to save as, jpg, png etc.")
parser.add_argument("--dir", default="./", help="Directory in which to put the output files")
parser.add_argument("--onefile", help="Save output as one concatenated file, still uses intermediate files as temp storage.")
parser.add_argument("input", help="Input panorama file")
args = parser.parse_args()
SIZE = args.size
HSIZE = SIZE / 2.0
im = ndimage.imread(args.input)
side_im = np.zeros((SIZE, SIZE, 3), np.uint8)
pids = []
for i in range(0,6):
pid = os.fork()
if pid != 0:
pids.append(pid)
continue
it = np.nditer(side_im, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
axA = it.multi_index[0]
axB = it.multi_index[1]
c = it.multi_index[2]
z = -axA + HSIZE
if i == 0:
x = HSIZE
y = -axB + HSIZE
elif i == 1:
x = -HSIZE
y = axB - HSIZE
elif i == 2:
x = axB - HSIZE
y = HSIZE
elif i == 3:
x = -axB + HSIZE
y = -HSIZE
elif i == 4:
z = HSIZE
x = axB - HSIZE
y = axA - HSIZE
elif i == 5:
z = -HSIZE
x = axB - HSIZE
y = -axA + HSIZE
r = math.sqrt(float(x*x + y*y + z*z))
theta = math.acos(float(z)/r)
phi = math.atan2(float(y),x)
ix = (im.shape[1]-1)*phi/(2*math.pi)
iy = (im.shape[0]-1)*(theta)/math.pi
it[0] = im[iy, ix, c]
it.iternext()
misc.imsave(os.path.join(args.dir, "%s%d.%s"%(args.prefix,i,args.type)), side_im)
#Children Exit here
sys.exit(0)
# Thise seems to work better than waitpid(-1, 0), in that case sometimes the
# files still don't exist and we get an error.
for pid in pids:
os.waitpid(pid, 0)
if args.onefile:
ifiles = []
for i in range(0,6):
ifiles.append(misc.imread(os.path.join(args.dir, "%s%d.%s"%(args.prefix,i,args.type))))
onefile = np.concatenate(ifiles, axis=1)
misc.imsave(args.onefile, onefile)
for i in range(0,6):
os.unlink(os.path.join(args.dir, "%s%d.%s"%(args.prefix,i,args.type)))
| #!/usr/bin/env python
import numpy as np
from scipy import ndimage, misc
import sys, math, os
import argparse
parser = argparse.ArgumentParser(description='Turn a panorama image into a cube map (6 images)')
parser.add_argument("--size", default=512, type=int, help="Size of output image sides")
parser.add_argument("--prefix", default="side_", help="Prefix of output images")
parser.add_argument("--type", default="jpg", help="File Type to save as, jpg, png etc.")
parser.add_argument("--dir", default="./", help="Directory in which to put the output files")
parser.add_argument("input", help="Input panorama file")
args = parser.parse_args()
SIZE = args.size
HSIZE = SIZE / 2.0
im = ndimage.imread(args.input)
side_im = np.zeros((SIZE, SIZE, 3), np.uint8)
for i in range(0,6):
pid = os.fork()
if pid != 0:
continue
it = np.nditer(side_im, flags=['multi_index'], op_flags=['readwrite'])
while not it.finished:
axA = it.multi_index[0]
axB = it.multi_index[1]
c = it.multi_index[2]
z = -axA + HSIZE
if i == 0:
x = HSIZE
y = -axB + HSIZE
elif i == 1:
x = -HSIZE
y = axB - HSIZE
elif i == 2:
x = axB - HSIZE
y = HSIZE
elif i == 3:
x = -axB + HSIZE
y = -HSIZE
elif i == 4:
z = HSIZE
x = axB - HSIZE
y = axA - HSIZE
elif i == 5:
z = -HSIZE
x = axB - HSIZE
y = -axA + HSIZE
r = math.sqrt(float(x*x + y*y + z*z))
theta = math.acos(float(z)/r)
phi = math.atan2(float(y),x)
ix = (im.shape[1]-1)*phi/(2*math.pi)
iy = (im.shape[0]-1)*(theta)/math.pi
it[0] = im[iy, ix, c]
it.iternext()
misc.imsave(os.path.join(args.dir, "%s%d.%s"%(args.prefix,i,args.type)), side_im)
#Children Exit here
sys.exit(0)
os.waitpid(-1, 0)
| Python | 0.000004 |
75cb305c025ca3549c721faacb5ea51297c80052 | Use GitPython | buster.py | buster.py | """Ghost Buster. Static site generator for Ghost.
Usage:
buster.py generate [--domain=<local-address>] [--dir=<path>]
buster.py preview [--dir=<path>]
buster.py setup [--gh-repo=<repo-url>] [--dir=<path>]
buster.py deploy [--dir=<path>]
buster.py (-h | --help)
buster.py --version
Options:
-h --help Show this screen.
--version Show version.
--dir=<path> Path of directory to store static pages.
--domain=<local-address> Address of local ghost installation [default: local.tryghost.org].
--gh-repo=<repo-url> URL of your gh-pages repository.
"""
import os
import re
import shutil
import SocketServer
import SimpleHTTPServer
from docopt import docopt
from time import gmtime, strftime
from git import Repo
arguments = docopt(__doc__, version='0.1')
if arguments['dir']:
STATIC_PATH = arguments['dir']
else:
STATIC_PATH = os.path.join(os.path.dirname(__file__), 'static')
if arguments['generate']:
command = ("wget \\"
"--recursive \\" # follow links to download entire site
"--page-requisites \\" # grab everything: css / inlined images
"--domains {0} \\" # don't grab anything outside ghost
"--no-parent \\" # don't go to parent level
"--directory-prefix {1} \\" # download contents to static/ folder
"--no-host-directories \\" # don't create domain named folder
"{0}").format(arguments['--domain'], STATIC_PATH)
os.system(command)
elif arguments['preview']:
os.chdir(STATIC_PATH)
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
httpd = SocketServer.TCPServer(("", 9000), Handler)
print "Serving at port 9000"
# gracefully handle interrupt here
httpd.serve_forever()
elif arguments['setup']:
if arguments['--gh-repo']:
repo_url = arguments['--gh-repo']
else:
repo_url = raw_input("Enter the Github repository URL:\n").strip()
# Create a fresh new static files directory
if os.path.isdir(STATIC_PATH):
confirm = raw_input("This will destroy everything inside static/."
" Are you sure you want to continue? (y/N)").strip()
if confirm != 'y' or confirm != 'Y':
sys.exit(0)
shutil.rmtree(STATIC_PATH)
# User/Organization page -> master branch
# Project page -> gh-pages branch
branch = 'gh-pages'
regex = re.compile(".*[\w-]+\.github\.(?:io|com).*")
if regex.match(repo_url):
branch = 'master'
# Prepare git repository
repo = Repo.init(STATIC_PATH)
git = repo.git
if branch == 'gh-pages':
git.checkout(b='gh-pages')
repo.create_remote('origin', repo_url)
print "All set! You can generate and deploy now."
elif arguments['deploy']:
repo = Repo(STATIC_PATH)
index = repo.index
index.add('.')
current_time = strftime("%Y-%m-%d %H:%M:%S", gmtime())
index.commit('Blog update at {}'.format(current_time))
origin = repo.remote.origin
origin.push()
os.system("git push origin {}".format(branch))
print "Good job! Deployed to Github Pages."
elif arguments['domain']:
pass
| """Ghost Buster. Static site generator for Ghost.
Usage:
buster.py generate [--domain=<local-address>]
buster.py preview
buster.py setup [--gh-repo=<repo-url>]
buster.py deploy
buster.py (-h | --help)
buster.py --version
Options:
-h --help Show this screen.
--version Show version.
--domain=<local-address> Address of local ghost installation [default: local.tryghost.org].
--gh-repo=<repo-url> URL of your gh-pages repository.
"""
# XXX Assume static dir to be current dir if not specified in args
import os
import re
import shutil
import SocketServer
import SimpleHTTPServer
from docopt import docopt
from time import gmtime, strftime
STATIC_DIR = 'static'
arguments = docopt(__doc__, version='0.1')
static_path = os.path.join(os.path.dirname(__file__), STATIC_DIR)
if arguments['generate']:
command = ("wget \\"
"--recursive \\" # follow links to download entire site
"--page-requisites \\" # grab everything: css / inlined images
"--domains {0} \\" # don't grab anything outside ghost
"--no-parent \\" # don't go to parent level
"--directory-prefix {1} \\" # download contents to static/ folder
"--no-host-directories \\" # don't create domain named folder
"{0}").format(arguments['--domain'], STATIC_DIR)
os.system(command)
elif arguments['preview']:
os.chdir(static_path)
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
httpd = SocketServer.TCPServer(("", 9000), Handler)
print "Serving at port 9000"
# gracefully handle interrupt here
httpd.serve_forever()
elif arguments['setup']:
if arguments['--gh-repo']:
repo_url = arguments['--gh-repo']
else:
repo_url = raw_input("Enter the Github repository URL:\n").strip()
# Create a fresh new static files directory
if os.path.isdir(static_path):
confirm = raw_input("This will destroy everything inside static/."
" Are you sure you want to continue? (y/N)").strip()
if confirm != 'y' or confirm != 'Y':
sys.exit(0)
shutil.rmtree(static_path)
os.mkdir(static_path)
os.chdir(static_path)
# User/Organization page -> master branch
# Project page -> gh-pages branch
branch = 'gh-pages'
regex = re.compile(".*[\w-]+\.github\.(?:io|com).*")
if regex.match(repo_url):
branch = 'master'
# Prepare git repository
os.system("git init")
if branch == 'gh-pages':
os.system("git checkout -b gh-pages")
os.system("git remote add origin {}".format(repo_url))
print "All set! You can generate and deploy now."
elif arguments['deploy']:
os.chdir(static_path)
os.system("git add -A .")
current_time = strftime("%Y-%m-%d %H:%M:%S", gmtime())
os.system("git commit -m 'Blog update at {}'".format(current_time))
os.system("git push origin {}".format(branch))
print "Good job! Deployed to Github Pages."
elif arguments['domain']:
pass
| Python | 0.000001 |
a0afdc5f38c237918b2bb6906c977e83ba1574a0 | allow to define a mandatory output extension | carpet.py | carpet.py | import tempfile
import os
class TempFileContext:
remove_at_exit = True
removable_files = []
"""
Base class to create 'with' contexts.
The __init__ method must define:
- self.removable_files <list>. This list will hold a list of filenames which will
removed at the end of the context, or when calling self.delete().
- self.tempfile <string>. Temporary file of interest, returned by "with" statement.
"""
def __init__(self, file_extension=""):
self.tempfile = tempfile.mktemp() + file_extension
def __enter__(self):
return self.tempfile
def __exit__(self, exc_type, exc_val, exc_tb):
if self.remove_at_exit:
self.delete()
else:
self.remove_intermediate_files()
def delete(self):
self.remove_tempfile()
self.remove_intermediate_files()
def remove_tempfile(self):
os.remove(self.tempfile)
def remove_intermediate_files(self):
map(os.remove, self.removable_files)
def create_context_class(core_function, output_extension=""):
"""
This function is used to create context classes using a function
(core_function) to provide some functionality.
By context class I mean classes that can be used like this:
with ContextClass(whatever) as something:
do whatever with "something"
where "something" is only available within the scope of the with block.
core_function must accept at least two arguments:
- input file -> input file to process
- output fila -> output file of the processing
A typical example of core_function will be a function that transforms
between two data formats. For example, say we have a function jpg2png.
Normally we would use it this way: jpg2png("photo.jpg", "photo.png"). Now
suppose that we only need "photo.png" for a temporal step in a pipeline. In
this case we would have to care about choosing a location for "photo.png",
and about deleting it at the end.
Context classes allow to handle the temporal storage and removal, and would
be used like this:
with Jpg2Png("photo.jpg") as tmp_png_file:
do_whatever(tmp_png_file)...
Here tmp_png_file would be a pathname refering to a .png file created from
our "photo.jpg". We can use this pathname to open it, copy it, process it,
etc. without taking care of where it is. Also, once we exit the 'with'
block it will be deleted and we won't have to care about it anymore.
"""
# This is our mold of Context Class :)
class GenericContextClass(TempFileContext):
def __init__(self, *args, **kwargs):
self.removable_files = []
self.tempfile = tempfile.mktemp() + "." + output_extension
core_function(args[0], self.tempfile, *args[1:], **kwargs)
self.remove_at_exit = True
return GenericContextClass
| import tempfile
import os
class TempFileContext:
remove_at_exit = True
removable_files = []
"""
Base class to create 'with' contexts.
The __init__ method must define:
- self.removable_files <list>. This list will hold a list of filenames which will
removed at the end of the context, or when calling self.delete().
- self.tempfile <string>. Temporary file of interest, returned by "with" statement.
"""
def __init__(self, file_extension=""):
self.tempfile = tempfile.mktemp() + file_extension
def __enter__(self):
return self.tempfile
def __exit__(self, exc_type, exc_val, exc_tb):
if self.remove_at_exit:
self.delete()
else:
self.remove_intermediate_files()
def delete(self):
self.remove_tempfile()
self.remove_intermediate_files()
def remove_tempfile(self):
os.remove(self.tempfile)
def remove_intermediate_files(self):
map(os.remove, self.removable_files)
def create_context_class(core_function):
"""
This function is used to create context classes using a function
(core_function) to provide some functionality.
By context class I mean classes that can be used like this:
with ContextClass(whatever) as something:
do whatever with "something"
where "something" is only available within the scope of the with block.
core_function must accept at least two arguments:
- input file -> input file to process
- output fila -> output file of the processing
A typical example of core_function will be a function that transforms
between two data formats. For example, say we have a function jpg2png.
Normally we would use it this way: jpg2png("photo.jpg", "photo.png"). Now
suppose that we only need "photo.png" for a temporal step in a pipeline. In
this case we would have to care about choosing a location for "photo.png",
and about deleting it at the end.
Context classes allow to handle the temporal storage and removal, and would
be used like this:
with Jpg2Png("photo.jpg") as tmp_png_file:
do_whatever(tmp_png_file)...
Here tmp_png_file would be a pathname refering to a .png file created from
our "photo.jpg". We can use this pathname to open it, copy it, process it,
etc. without taking care of where it is. Also, once we exit the 'with'
block it will be deleted and we won't have to care about it anymore.
"""
# This is our mold of Context Class :)
class GenericContextClass(TempFileContext):
def __init__(self, *args, **kwargs):
self.removable_files = []
self.tempfile = tempfile.mktemp()
core_function(args[0], self.tempfile, *args[1:], **kwargs)
self.remove_at_exit = True
return GenericContextClass
| Python | 0.000001 |
db67db3cea880e40d1982149fea86699c15b5f75 | change append to add (for the set in part 1) | day3.py | day3.py | #!/usr/local/bin/python3
from collections import namedtuple
with open('day3_input.txt') as f:
instructions = f.read().rstrip()
Point = namedtuple('Point', ['x', 'y'])
location = Point(0, 0)
visited = {location}
def new_loc(current_loc, instruction):
if instruction == '^':
xy = current_loc.x, current_loc.y + 1
elif instruction == 'v':
xy = current_loc.x, current_loc.y - 1
elif instruction == '>':
xy = current_loc.x + 1, current_loc.y
elif instruction == '<':
xy = current_loc.x - 1, current_loc.y
return Point(*xy)
for char in instructions:
location = new_loc(location, char)
visited.add(location)
print('At least one present:', len(visited))
# Part two
santa_loc = Point(0, 0)
robo_loc = Point(0, 0)
visited = {santa_loc, robo_loc}
for idx, char in enumerate(instructions):
if idx % 2 == 0: # Santa
santa_loc = new_loc(santa_loc, char)
visited.add(santa_loc)
else: # robot
robo_loc = new_loc(robo_loc, char)
visited.add(robo_loc)
print('At least one present with santa and robot:', len(visited))
| #!/usr/local/bin/python3
from collections import namedtuple
with open('day3_input.txt') as f:
instructions = f.read().rstrip()
Point = namedtuple('Point', ['x', 'y'])
location = Point(0, 0)
visited = {location}
def new_loc(current_loc, instruction):
if instruction == '^':
xy = current_loc.x, current_loc.y + 1
elif instruction == 'v':
xy = current_loc.x, current_loc.y - 1
elif instruction == '>':
xy = current_loc.x + 1, current_loc.y
elif instruction == '<':
xy = current_loc.x - 1, current_loc.y
return Point(*xy)
for char in instructions:
location = new_loc(location, char)
visited.append(location)
print('At least one present:', len(visited))
# Part two
santa_loc = Point(0, 0)
robo_loc = Point(0, 0)
visited = {santa_loc, robo_loc}
for idx, char in enumerate(instructions):
if idx % 2 == 0: # Santa
santa_loc = new_loc(santa_loc, char)
visited.add(santa_loc)
else: # robot
robo_loc = new_loc(robo_loc, char)
visited.add(robo_loc)
print('At least one present with santa and robot:', len(visited))
| Python | 0 |
db713e62eafb29c1a968e16b997a4e8f49156c78 | Correct config for touchscreen | config.py | config.py | __author__ = 'Florian'
from util import get_lan_ip
#################
# CONFIGURATION #
#################
# CHANGE FROM HERE
#
UDP_PORT = 18877
IP = get_lan_ip()
BUF_SIZE = 4096
TIMEOUT_IN_SECONDS = 0.1
#
SCREEN_WIDTH = 320
SCREEN_HEIGHT = 240
SCREEN_DEEP = 32
#
LABEL_RIGHT = 0
LABEL_LEFT = 1
ALIGN_CENTER = 0
ALIGN_RIGHT = 1
ALIGN_LEFT = 2
VALIGN_CENTER = 0
VALIGN_TOP = 1
VALIGN_BOTTOM = 2
#
# Stop changing. Of course - you can do, but it should not be necessary
#
FONT = 'assets/DroidSansMono.ttf'
# set up the colors
BLACK = ( 0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = ( 0, 255, 0)
BLUE = ( 0, 0, 255)
CYAN = ( 0, 255, 255)
MAGENTA= (255, 0, 255)
YELLOW = (255, 255, 0)
RPM_YELLOW = (230, 230, 40)
GREY = (214, 214, 214)
BACKGROUND_COLOR = BLACK
FOREGROUND_COLOR = WHITE
#
#
#
import os, sys
if sys.platform == 'darwin':
# Display on Laptop Screen on the left
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (-400,100)
from datastream import MockBaseDataStream
datastream = MockBaseDataStream()
#from datastream import PDU1800DataStream
#datastream = PDU1800DataStream(ip=IP, port=UDP_PORT)
elif sys.platform == 'linux2':
if os.path.isfile('/etc/pointercal'):
os.environ["TSLIB_CALIBFILE"] = '/etc/pointercal'
os.putenv('SDL_VIDEODRIVER', 'fbcon')
os.environ["SDL_FBDEV"] = "/dev/fb1"
os.environ["SDL_MOUSEDRV"] = "TSLIB"
from evdev import InputDevice, list_devices
devices = map(InputDevice, list_devices())
eventX=""
for dev in devices:
if dev.name == "ADS7846 Touchscreen":
eventX = dev.fn
os.environ["SDL_MOUSEDEV"] = eventX
from datastream import PDU1800DataStream
datastream = PDU1800DataStream(ip=IP, port=UDP_PORT)
#
| __author__ = 'Florian'
from util import get_lan_ip
#################
# CONFIGURATION #
#################
# CHANGE FROM HERE
#
UDP_PORT = 18877
IP = get_lan_ip()
BUF_SIZE = 4096
TIMEOUT_IN_SECONDS = 0.1
#
SCREEN_WIDTH = 320
SCREEN_HEIGHT = 240
SCREEN_DEEP = 32
#
LABEL_RIGHT = 0
LABEL_LEFT = 1
ALIGN_CENTER = 0
ALIGN_RIGHT = 1
ALIGN_LEFT = 2
VALIGN_CENTER = 0
VALIGN_TOP = 1
VALIGN_BOTTOM = 2
#
# Stop changing. Of course - you can do, but it should not be necessary
#
FONT = 'assets/DroidSansMono.ttf'
# set up the colors
BLACK = ( 0, 0, 0)
WHITE = (255, 255, 255)
RED = (255, 0, 0)
GREEN = ( 0, 255, 0)
BLUE = ( 0, 0, 255)
CYAN = ( 0, 255, 255)
MAGENTA= (255, 0, 255)
YELLOW = (255, 255, 0)
RPM_YELLOW = (230, 230, 40)
GREY = (214, 214, 214)
BACKGROUND_COLOR = BLACK
FOREGROUND_COLOR = WHITE
#
#
#
import os, sys
if sys.platform == 'darwin':
# Display on Laptop Screen on the left
os.environ['SDL_VIDEO_WINDOW_POS'] = "%d,%d" % (-400,100)
#from datastream import MockBaseDataStream
#datastream = MockBaseDataStream()
from datastream import PDU1800DataStream
datastream = PDU1800DataStream(ip=IP, port=UDP_PORT)
elif sys.platform == 'linux2':
from evdev import InputDevice, list_devices
devices = map(InputDevice, list_devices())
eventX=""
for dev in devices:
if dev.name == "ADS7846 Touchscreen":
eventX = dev.fn
os.environ["SDL_FBDEV"] = "/dev/fb1"
os.environ["SDL_MOUSEDRV"] = "TSLIB"
os.environ["SDL_MOUSEDEV"] = eventX
if os.path.isfile('/etc/pointercal'):
os.environ["TSLIB_CALIBFILE"] = '/etc/pointercal'
from datastream import PDU1800DataStream
datastream = PDU1800DataStream(ip=IP, port=UDP_PORT)
#
| Python | 0.000002 |
68593e359d5bb79c096d584c83df1ff55262a686 | use with | config.py | config.py | # coding=utf-8
from configparser import ConfigParser
import os
__author__ = 'Victor Häggqvist'
class Config:
confdir = os.path.dirname(os.path.realpath(__file__))
config_file = os.path.join(confdir, 'ledman.conf')
default = """
[gpio]
red=22
green=27
blue=17
[default_level]
red=0
green=0.3
blue=0.5
[server]
keys=testkeychangeme
"""
def __init__(self):
config = ConfigParser()
if not os.path.isfile(self.config_file):
self.init_config()
config.read(self.config_file)
self.GPIO_RED = config.get('gpio', 'red') # 22
self.GPIO_GREEN = config.get('gpio', 'green') # 27
self.GPIO_BLUE = config.get('gpio', 'blue') # 17
self.RED_DEFAULT = config.get('default_level', 'red') # 0
self.GREEN_DEFAULT = config.get('default_level', 'green') # 0.3
self.BLUE_DEFAULT = config.get('default_level', 'blue') # 0.5
keys = config.get('server', 'keys')
self.keys = []
for k in keys.split(','):
self.keys.append(k)
def init_config(self):
with open(self.config_file, 'w+') as f:
f.write(self.default)
| # coding=utf-8
from configparser import ConfigParser
import os
__author__ = 'Victor Häggqvist'
class Config:
confdir = os.path.dirname(os.path.realpath(__file__))
config_file = os.path.join(confdir, 'ledman.conf')
default = """
[gpio]
red=22
green=27
blue=17
[default_level]
red=0
green=0.3
blue=0.5
[server]
keys=testkeychangeme
"""
def __init__(self):
config = ConfigParser()
if not os.path.isfile(self.config_file):
self.init_config()
config.read(self.config_file)
self.GPIO_RED = config.get('gpio', 'red') # 22
self.GPIO_GREEN = config.get('gpio', 'green') # 27
self.GPIO_BLUE = config.get('gpio', 'blue') # 17
self.RED_DEFAULT = config.get('default_level', 'red') # 0
self.GREEN_DEFAULT = config.get('default_level', 'green') # 0.3
self.BLUE_DEFAULT = config.get('default_level', 'blue') # 0.5
keys = config.get('server', 'keys')
self.keys = []
for k in keys.split(','):
self.keys.append(k)
def init_config(self):
f = open(self.config_file, 'w+')
f.write(self.default)
f.close()
| Python | 0 |
0812ec319291b709613152e9e1d781671047a428 | Make server ignore missing environment variables | config.py | config.py | import os
SQLALCHEMY_DATABASE_URI = os.environ.get('DATABASE_URL', 'sqlite://')
ACCESS_TOKEN = os.environ.get('ACCESS_TOKEN')
PAGE_ID = os.environ.get('PAGE_ID')
APP_ID = os.environ.get('APP_ID')
VERIFY_TOKEN = os.environ.get('VERIFY_TOKEN')
| import os
SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']
ACCESS_TOKEN = os.environ['ACCESS_TOKEN']
PAGE_ID = os.environ['PAGE_ID']
APP_ID = os.environ['APP_ID']
VERIFY_TOKEN = os.environ['VERIFY_TOKEN']
| Python | 0 |
d7e03596f8bf1e886e984c0ea98334af878a15e2 | Use __future__.print_function so syntax is valid on Python 3 | meta/bytecodetools/print_code.py | meta/bytecodetools/print_code.py | '''
Created on May 10, 2012
@author: sean
'''
from __future__ import print_function
from .bytecode_consumer import ByteCodeConsumer
from argparse import ArgumentParser
class ByteCodePrinter(ByteCodeConsumer):
def generic_consume(self, instr):
print(instr)
def main():
parser = ArgumentParser()
parser.add_argument()
if __name__ == '__main__':
main()
| '''
Created on May 10, 2012
@author: sean
'''
from .bytecode_consumer import ByteCodeConsumer
from argparse import ArgumentParser
class ByteCodePrinter(ByteCodeConsumer):
def generic_consume(self, instr):
print instr
def main():
parser = ArgumentParser()
parser.add_argument()
if __name__ == '__main__':
main() | Python | 0.9985 |
1f343e52abb67ab2f85836b10dadb3cb34a95379 | fix login issue with django 1.7: check_for_test_cookie is deprecated and removed in django 1.7. | xadmin/forms.py | xadmin/forms.py | from django import forms
from django.contrib.auth import authenticate
from django.contrib.auth.forms import AuthenticationForm
from django.utils.translation import ugettext_lazy, ugettext as _
from xadmin.util import User
ERROR_MESSAGE = ugettext_lazy("Please enter the correct username and password "
"for a staff account. Note that both fields are case-sensitive.")
class AdminAuthenticationForm(AuthenticationForm):
"""
A custom authentication form used in the admin app.
"""
this_is_the_login_form = forms.BooleanField(
widget=forms.HiddenInput, initial=1,
error_messages={'required': ugettext_lazy("Please log in again, because your session has expired.")})
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
message = ERROR_MESSAGE
if username and password:
self.user_cache = authenticate(
username=username, password=password)
if self.user_cache is None:
if u'@' in username:
# Mistakenly entered e-mail address instead of username? Look it up.
try:
user = User.objects.get(email=username)
except (User.DoesNotExist, User.MultipleObjectsReturned):
# Nothing to do here, moving along.
pass
else:
if user.check_password(password):
message = _("Your e-mail address is not your username."
" Try '%s' instead.") % user.username
raise forms.ValidationError(message)
elif not self.user_cache.is_active or not self.user_cache.is_staff:
raise forms.ValidationError(message)
return self.cleaned_data
| from django import forms
from django.contrib.auth import authenticate
from django.contrib.auth.forms import AuthenticationForm
from django.utils.translation import ugettext_lazy, ugettext as _
from xadmin.util import User
ERROR_MESSAGE = ugettext_lazy("Please enter the correct username and password "
"for a staff account. Note that both fields are case-sensitive.")
class AdminAuthenticationForm(AuthenticationForm):
"""
A custom authentication form used in the admin app.
"""
this_is_the_login_form = forms.BooleanField(
widget=forms.HiddenInput, initial=1,
error_messages={'required': ugettext_lazy("Please log in again, because your session has expired.")})
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
message = ERROR_MESSAGE
if username and password:
self.user_cache = authenticate(
username=username, password=password)
if self.user_cache is None:
if u'@' in username:
# Mistakenly entered e-mail address instead of username? Look it up.
try:
user = User.objects.get(email=username)
except (User.DoesNotExist, User.MultipleObjectsReturned):
# Nothing to do here, moving along.
pass
else:
if user.check_password(password):
message = _("Your e-mail address is not your username."
" Try '%s' instead.") % user.username
raise forms.ValidationError(message)
elif not self.user_cache.is_active or not self.user_cache.is_staff:
raise forms.ValidationError(message)
self.check_for_test_cookie()
return self.cleaned_data
| Python | 0 |
55dec7060ff988468499dbce1f2c56c65f2f4f81 | Add support for running from CLI without Gtk | zram-monitor.py | zram-monitor.py | #!/usr/bin/env python
import os
import psutil
import sys
try:
from gi.repository import Gtk, GLib
from gi.repository import AppIndicator3 as appindicator
gtk = True
except ImportError:
gtk = False
def sizeof_fmt(num):
for x in ['bytes', 'KB', 'MB', 'GB']:
if num < 1024.0 and num > -1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
return "%3.1f%s" % (num, 'TB')
class ZramUsage(object):
def getzramusage(self):
blockpath = '/sys/block'
statfiles = [
'compr_data_size', 'orig_data_size', 'mem_used_total',
'disksize', 'notify_free', 'size', 'zero_pages']
zramblocks = [z for z in os.listdir(blockpath) if z.startswith('zram')]
stats = dict((k, 0) for k in statfiles)
for block in zramblocks:
for statfile in statfiles:
f = os.path.join(blockpath, block, statfile)
if os.path.exists(f):
data = 0
sumdata = stats.get(statfile, 0)
with open(f, 'r') as handle:
data = handle.read()
# print '{0} = {1}'.format(f, sizeof_fmt(int(data)))
stats[statfile] = sumdata + int(data)
percentage = 100 * \
float(stats[statfiles[0]]) / float(stats[statfiles[1]])
stats['percent'] = percentage
stats['blocks'] = zramblocks
return stats
def __repr__(self):
output = ''
stats = self.getzramusage()
swap = psutil.swap_memory()
output += 'swap in use: {0}\n'.format(sizeof_fmt(swap.used))
output += 'number of zram devices: {0}\n'.format(len(stats['blocks']))
for k in ['compr_data_size', 'orig_data_size',
'mem_used_total', 'disksize']:
output += '{0}: {1}\n'.format(k, sizeof_fmt(stats.get(k)))
output += 'zram compressed size: {0:.2f}%\n'.format(stats['percent'])
return output
if gtk:
png = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'zram.png')
def cb_exit(w, data):
Gtk.main_quit()
def cb_readzram(ind_app):
stats = ZramUsage().getzramusage()
compressed = 100 - float(stats['percent'])
output = 'ZRAM: {0:.2f}%, Total {1}'.format(
compressed, sizeof_fmt(stats['orig_data_size']))
ind_app.set_label(output, '')
return 1
ind_app = appindicator.Indicator.new_with_path(
"zram-indicator",
png,
appindicator.IndicatorCategory.APPLICATION_STATUS,
os.path.dirname(os.path.realpath(__file__)))
ind_app.set_status(appindicator.IndicatorStatus.ACTIVE)
# create a menu
menu = Gtk.Menu()
menu_items = Gtk.MenuItem("Exit")
menu.append(menu_items)
menu_items.connect("activate", cb_exit, '')
menu_items.show()
ind_app.set_menu(menu)
GLib.timeout_add(1000, cb_readzram, ind_app)
Gtk.main()
else:
print str(ZramUsage())
| #!/usr/bin/env python
import os
import psutil
import sys
from gi.repository import Gtk, GLib
from gi.repository import AppIndicator3 as appindicator
def sizeof_fmt(num):
for x in ['bytes', 'KB', 'MB', 'GB']:
if num < 1024.0 and num > -1024.0:
return "%3.1f%s" % (num, x)
num /= 1024.0
return "%3.1f%s" % (num, 'TB')
class ZramUsage(object):
def getzramusage(self):
blockpath = '/sys/block'
statfiles = [
'compr_data_size', 'orig_data_size', 'mem_used_total',
'disksize', 'notify_free', 'size', 'zero_pages']
zramblocks = [z for z in os.listdir(blockpath) if z.startswith('zram')]
stats = dict((k, 0) for k in statfiles)
for block in zramblocks:
for statfile in statfiles:
f = os.path.join(blockpath, block, statfile)
if os.path.exists(f):
data = 0
sumdata = stats.get(statfile, 0)
with open(f, 'r') as handle:
data = handle.read()
# print '{0} = {1}'.format(f, sizeof_fmt(int(data)))
stats[statfile] = sumdata + int(data)
percentage = 100 * \
float(stats[statfiles[0]]) / float(stats[statfiles[1]])
stats['percent'] = percentage
stats['blocks'] = zramblocks
return stats
def __repr__(self):
output = ''
stats = self.getzramusage()
swap = psutil.swap_memory()
output += 'swap in use: {0}\n'.format(sizeof_fmt(swap.used))
output += 'number of zram devices: {0}\n'.format(len(stats['blocks']))
for k in ['compr_data_size', 'orig_data_size',
'mem_used_total', 'disksize']:
output += '{0}: {1}\n'.format(k, sizeof_fmt(stats.get(k)))
output += 'zram compressed size: {0:.2f}%\n'.format(stats['percent'])
return output
png = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'zram.png')
def cb_exit(w, data):
Gtk.main_quit()
def cb_readzram(ind_app):
stats = ZramUsage().getzramusage()
compressed = 100 - float(stats['percent'])
output = 'ZRAM: {0:.2f}%, {1}/{2}'.format(
compressed, sizeof_fmt(stats['compr_data_size']), sizeof_fmt(stats['orig_data_size']))
ind_app.set_label(output, '')
return 1
ind_app = appindicator.Indicator.new_with_path(
"zram-indicator",
png,
appindicator.IndicatorCategory.APPLICATION_STATUS,
os.path.dirname(os.path.realpath(__file__)))
ind_app.set_status(appindicator.IndicatorStatus.ACTIVE)
# create a menu
menu = Gtk.Menu()
menu_items = Gtk.MenuItem("Exit")
menu.append(menu_items)
menu_items.connect("activate", cb_exit, '')
menu_items.show()
ind_app.set_menu(menu)
GLib.timeout_add(1000, cb_readzram, ind_app)
Gtk.main()
| Python | 0 |
19e59e90cd44f6375d81c971bb5005efc1165a08 | Fix security issue in filter_non_video_iframes | website/utils/filters.py | website/utils/filters.py | def filter_non_video_iframes(html, testing = False):
"""
Given an HTML string, strips iframe tags that do not
(just) contain an embedded video.
Returns the remaining HTML string.
"""
from bs4 import BeautifulSoup
import re
# Tuple of regexes that define allowed URL patterns
matchers = ("^(https?:)?//www\.youtube\.com/embed/[a-zA-Z0-9-_]{8,15}$",)
# Tuple of allowed attributes in an iframe
allowed_attributes = ('height', 'src', 'width')
# Parse the input HTML into a DOM
dom = BeautifulSoup(html, "html.parser")
for iframe in dom.findAll("iframe"):
src = iframe.get("src", "")
matched = False
# Check whether any one matcher matches
for matcher in matchers:
exp = re.compile(matcher)
if exp.match(src):
matched = True
break
# If no matcher matched, remove the iframe
if not matched:
iframe.extract()
continue
# If iframe tag contains something, remove the iframe
if len(iframe.contents) > 0:
iframe.extract()
continue
# Check for illegal iframe attributes
for attr in iframe.attrs:
# If iframe contains illegal attribute, remove the iframe
if attr not in allowed_attributes:
iframe.extract()
break
return str(dom)
def obfuscate_email_addresses(html):
"""
Given an HTML string, will obfuscate e-mail addresses using HTML entities.
Works on mailto links and plain e-mail addresses.
Returns the HTML string with obfuscated e-mail addresses.
"""
from bs4 import BeautifulSoup
import re
# Parse the input HTML into a DOM
dom = BeautifulSoup(html, "html.parser")
# First, look for mailto: links and obfuscate them
for link in dom.findAll("a"):
href = link.get("href", "")
if href.startswith("mailto:"):
link['href'] = "".join(['&#%i;' % ord(char) for char in href])
# The intermediate HTML has all mailto: links obfuscated. Plaintext
# e-mail addresses are next.
intermediate_html = str(dom)
email_seeker = re.compile("([\w._%+-]+@[\w.-]+\.[A-Za-z]{2,4})")
resulting_html = ""
for index, fragment in enumerate(email_seeker.split(intermediate_html)):
if index % 2 != 0:
resulting_html += "".join(['&#%i;' % ord(char) for char in fragment])
else:
resulting_html += fragment
return resulting_html
def strip_scripts_not_in_whitelist(html):
"""
Given an HTML string, will strip all script tags that do not conform to
one of the whitelist patterns as defined in settings.py.
"""
from bs4 import BeautifulSoup
from mezzanine.conf import settings
import logging
logger = logging.getLogger(__name__)
# Parse the whitelist into a list of tags (to make sure format matches exactly)
allowed_tags = []
for allowed_tag_str in settings.RICHTEXT_SCRIPT_TAG_WHITELIST:
allowed_tags.append(str(BeautifulSoup(allowed_tag_str, "html.parser").find("script")))
# Parse the input HTML into a DOM
dom = BeautifulSoup(html, "html.parser")
# Look for all script tags and match them to the whitelist
for script_tag in dom.findAll("script"):
if str(script_tag) not in allowed_tags:
script_tag.extract()
logger.debug("Found non-whitelisted script tag. Stripped.")
logger.debug("CONF: stripped tag is "+str(script_tag))
else:
logger.debug("Found whitelisted script tag. Did not strip.")
return str(dom)
| def filter_non_video_iframes(html, testing = False):
"""
Given an HTML string, strips iframe tags that do not
(just) contain an embedded video.
Returns the remaining HTML string.
"""
from bs4 import BeautifulSoup
import re
# Tuple of regexes that define allowed URL patterns
matchers = ("^(https?:)?//www\.youtube\.com/embed/[a-zA-Z0-9-_]{8,15}$",)
# Tuple of allowed attributes in an iframe
allowed_attributes = ('height', 'src', 'width')
# Parse the input HTML into a DOM
dom = BeautifulSoup(html, "html.parser")
for iframe in dom.findAll("iframe"):
src = iframe.get("src", "")
matched = False
# Check whether any one matcher matches
for matcher in matchers:
exp = re.compile(matcher)
if exp.match(src):
matched = True
break
# If no matcher matched, remove the iframe
if not matched:
iframe.extract()
break
# If iframe tag contains something, remove the iframe
if len(iframe.contents) > 0:
iframe.extract()
break
# Check for illegal iframe attributes
for attr in iframe.attrs:
# If iframe contains illegal attribute, remove the iframe
if attr not in allowed_attributes:
iframe.extract()
break
return str(dom)
def obfuscate_email_addresses(html):
"""
Given an HTML string, will obfuscate e-mail addresses using HTML entities.
Works on mailto links and plain e-mail addresses.
Returns the HTML string with obfuscated e-mail addresses.
"""
from bs4 import BeautifulSoup
import re
# Parse the input HTML into a DOM
dom = BeautifulSoup(html, "html.parser")
# First, look for mailto: links and obfuscate them
for link in dom.findAll("a"):
href = link.get("href", "")
if href.startswith("mailto:"):
link['href'] = "".join(['&#%i;' % ord(char) for char in href])
# The intermediate HTML has all mailto: links obfuscated. Plaintext
# e-mail addresses are next.
intermediate_html = str(dom)
email_seeker = re.compile("([\w._%+-]+@[\w.-]+\.[A-Za-z]{2,4})")
resulting_html = ""
for index, fragment in enumerate(email_seeker.split(intermediate_html)):
if index % 2 != 0:
resulting_html += "".join(['&#%i;' % ord(char) for char in fragment])
else:
resulting_html += fragment
return resulting_html
def strip_scripts_not_in_whitelist(html):
"""
Given an HTML string, will strip all script tags that do not conform to
one of the whitelist patterns as defined in settings.py.
"""
from bs4 import BeautifulSoup
from mezzanine.conf import settings
import logging
logger = logging.getLogger(__name__)
# Parse the whitelist into a list of tags (to make sure format matches exactly)
allowed_tags = []
for allowed_tag_str in settings.RICHTEXT_SCRIPT_TAG_WHITELIST:
allowed_tags.append(str(BeautifulSoup(allowed_tag_str, "html.parser").find("script")))
# Parse the input HTML into a DOM
dom = BeautifulSoup(html, "html.parser")
# Look for all script tags and match them to the whitelist
for script_tag in dom.findAll("script"):
if str(script_tag) not in allowed_tags:
script_tag.extract()
logger.debug("Found non-whitelisted script tag. Stripped.")
logger.debug("CONF: stripped tag is "+str(script_tag))
else:
logger.debug("Found whitelisted script tag. Did not strip.")
return str(dom)
| Python | 0.000001 |
f99246cb8a41f9271d4d531c036975c9d105d973 | Add ignored exceptions | polyaxon/polyaxon/config_settings/logging.py | polyaxon/polyaxon/config_settings/logging.py | import os
from polyaxon.config_manager import ROOT_DIR, config
LOG_DIRECTORY = ROOT_DIR.child('logs')
if not os.path.exists(LOG_DIRECTORY):
os.makedirs(LOG_DIRECTORY)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '[%(asctime)s] %(levelname)s %(message)s [%(name)s:%(lineno)s]',
'datefmt': '%d/%b/%Y %H:%M:%S'
},
'simple': {
'format': '%(levelname)8s %(message)s [%(name)s]'
},
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
},
'handlers': {
'logfile': {
'level': config.log_level,
'class': 'logging.handlers.RotatingFileHandler',
'filename': '{}/polyaxon_{}.log'.format(LOG_DIRECTORY, os.getpid()),
'maxBytes': 1024 * 1024 * 8, # 8 MByte
'backupCount': 5,
'formatter': 'standard',
},
'console': {
'level': config.log_level,
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
},
'loggers': {
'polyaxon.streams': {
'handlers': ['console', ],
'propagate': True,
'level': config.log_level,
},
'polyaxon.monitors': {
'handlers': ['console', ],
'propagate': True,
'level': config.log_level,
},
'polyaxon.dockerizer': {
'handlers': ['console', ],
'propagate': True,
'level': config.log_level,
},
'django.request': {
'level': config.log_level,
'propagate': True,
'handlers': ['console', ],
},
},
}
RAVEN_CONFIG = {}
if not (config.is_testing_env or config.is_local_env) and config.platform_dsn:
RAVEN_CONFIG['dsn'] = config.platform_dsn
RAVEN_CONFIG['transport'] = "raven.transport.threaded_requests.ThreadedRequestsHTTPTransport"
RAVEN_CONFIG['release'] = config.get_string('POLYAXON_CHART_VERSION',
is_optional=True,
default='0.0.0')
RAVEN_CONFIG['IGNORE_EXCEPTIONS'] = ['django.db.ProgrammingError',
'django.db.OperationalError',
'django.db.InterfaceError']
RAVEN_CONFIG['environment'] = config.env
| import os
from polyaxon.config_manager import ROOT_DIR, config
LOG_DIRECTORY = ROOT_DIR.child('logs')
if not os.path.exists(LOG_DIRECTORY):
os.makedirs(LOG_DIRECTORY)
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'formatters': {
'standard': {
'format': '[%(asctime)s] %(levelname)s %(message)s [%(name)s:%(lineno)s]',
'datefmt': '%d/%b/%Y %H:%M:%S'
},
'simple': {
'format': '%(levelname)8s %(message)s [%(name)s]'
},
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(process)d %(thread)d %(message)s'
},
},
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
},
'handlers': {
'logfile': {
'level': config.log_level,
'class': 'logging.handlers.RotatingFileHandler',
'filename': '{}/polyaxon_{}.log'.format(LOG_DIRECTORY, os.getpid()),
'maxBytes': 1024 * 1024 * 8, # 8 MByte
'backupCount': 5,
'formatter': 'standard',
},
'console': {
'level': config.log_level,
'class': 'logging.StreamHandler',
'formatter': 'verbose'
},
'sentry': {
'level': 'ERROR',
'class': 'raven.contrib.django.raven_compat.handlers.SentryHandler',
},
},
'loggers': {
'polyaxon.streams': {
'handlers': ['console', ],
'propagate': True,
'level': config.log_level,
},
'polyaxon.monitors': {
'handlers': ['console', ],
'propagate': True,
'level': config.log_level,
},
'polyaxon.dockerizer': {
'handlers': ['console', ],
'propagate': True,
'level': config.log_level,
},
'django.request': {
'level': config.log_level,
'propagate': True,
'handlers': ['console', ],
},
},
}
RAVEN_CONFIG = {}
if not (config.is_testing_env or config.is_local_env) and config.platform_dsn:
RAVEN_CONFIG['dsn'] = config.platform_dsn
RAVEN_CONFIG['transport'] = "raven.transport.threaded_requests.ThreadedRequestsHTTPTransport"
RAVEN_CONFIG['release'] = config.get_string('POLYAXON_CHART_VERSION',
is_optional=True,
default='0.0.0')
RAVEN_CONFIG['environment'] = config.env
| Python | 0.000032 |
54282058900b473b1e1211f8e0b68c1d36280788 | Fix investigations if no user input | core/web/frontend/investigations.py | core/web/frontend/investigations.py | from __future__ import unicode_literals
from flask_classy import route
from flask_login import current_user
from flask import render_template, request, flash, redirect, url_for
from mongoengine import DoesNotExist
from core.web.frontend.generic import GenericView
from core.investigation import Investigation, ImportMethod, ImportResults
from core.web.helpers import get_object_or_404
from core.web.helpers import requires_permissions
from core.database import AttachedFile
from core.entities import Entity
from core.indicators import Indicator
from core.observables import Observable
from core.web.api.api import bson_renderer
class InvestigationView(GenericView):
klass = Investigation
@route("/graph/<id>")
@requires_permissions("read", "investigation")
def graph(self, id):
investigation = get_object_or_404(Investigation, id=id)
return render_template(
"{}/graph.html".format(self.klass.__name__.lower()),
investigation=bson_renderer(investigation.info()))
@route("/graph/<klass>/<id>")
@requires_permissions("read", "investigation")
def graph_node(self, klass, id):
if klass == 'entity':
node = get_object_or_404(Entity, id=id)
elif klass == 'indicator':
node = get_object_or_404(Indicator, id=id)
else:
node = get_object_or_404(Observable, id=id)
investigation = Investigation(created_by=current_user.username).save()
investigation.add([], [node])
return render_template(
"{}/graph.html".format(self.klass.__name__.lower()),
investigation=bson_renderer(investigation.info()))
@route("/import/<id>", methods=['GET'])
@requires_permissions("write", "investigation")
def import_wait(self, id):
results = get_object_or_404(ImportResults, id=id)
return render_template(
"{}/import_wait.html".format(self.klass.__name__.lower()),
import_results=results)
@route("/import", methods=['GET', 'POST'])
@requires_permissions("write", "investigation")
def inv_import(self):
if request.method == "GET":
return render_template(
"{}/import.html".format(self.klass.__name__.lower()))
else:
text = request.form.get('text')
url = request.form.get('url')
if text:
investigation = Investigation(
created_by=current_user.username, import_text=text)
investigation.save()
return redirect(
url_for(
'frontend.InvestigationView:import_from',
id=investigation.id))
else:
try:
if url:
import_method = ImportMethod.objects.get(acts_on="url")
results = import_method.run(url)
elif "file" in request.files:
target = AttachedFile.from_upload(request.files['file'])
import_method = ImportMethod.objects.get(
acts_on=target.content_type)
results = import_method.run(target)
else:
flash("You need to provide an input", "danger")
return redirect(request.referrer)
return redirect(
url_for(
'frontend.InvestigationView:import_wait',
id=results.id))
except DoesNotExist:
flash("This file type is not supported.", "danger")
return render_template(
"{}/import.html".format(self.klass.__name__.lower()))
@route("/<id>/import", methods=['GET'])
@requires_permissions("write", "investigation")
def import_from(self, id):
investigation = get_object_or_404(Investigation, id=id)
observables = Observable.from_string(investigation.import_text)
return render_template(
"{}/import_from.html".format(self.klass.__name__.lower()),
investigation=investigation,
observables=bson_renderer(observables))
def handle_form(self, *args, **kwargs):
kwargs['skip_validation'] = True
return super(InvestigationView, self).handle_form(*args, **kwargs)
| from __future__ import unicode_literals
from flask_classy import route
from flask_login import current_user
from flask import render_template, request, flash, redirect, url_for
from mongoengine import DoesNotExist
from core.web.frontend.generic import GenericView
from core.investigation import Investigation, ImportMethod, ImportResults
from core.web.helpers import get_object_or_404
from core.web.helpers import requires_permissions
from core.database import AttachedFile
from core.entities import Entity
from core.indicators import Indicator
from core.observables import Observable
from core.web.api.api import bson_renderer
class InvestigationView(GenericView):
klass = Investigation
@route("/graph/<id>")
@requires_permissions("read", "investigation")
def graph(self, id):
investigation = get_object_or_404(Investigation, id=id)
return render_template(
"{}/graph.html".format(self.klass.__name__.lower()),
investigation=bson_renderer(investigation.info()))
@route("/graph/<klass>/<id>")
@requires_permissions("read", "investigation")
def graph_node(self, klass, id):
if klass == 'entity':
node = get_object_or_404(Entity, id=id)
elif klass == 'indicator':
node = get_object_or_404(Indicator, id=id)
else:
node = get_object_or_404(Observable, id=id)
investigation = Investigation(created_by=current_user.username).save()
investigation.add([], [node])
return render_template(
"{}/graph.html".format(self.klass.__name__.lower()),
investigation=bson_renderer(investigation.info()))
@route("/import/<id>", methods=['GET'])
@requires_permissions("write", "investigation")
def import_wait(self, id):
results = get_object_or_404(ImportResults, id=id)
return render_template(
"{}/import_wait.html".format(self.klass.__name__.lower()),
import_results=results)
@route("/import", methods=['GET', 'POST'])
@requires_permissions("write", "investigation")
def inv_import(self):
if request.method == "GET":
return render_template(
"{}/import.html".format(self.klass.__name__.lower()))
else:
text = request.form.get('text')
url = request.form.get('url')
if text:
investigation = Investigation(
created_by=current_user.username, import_text=text)
investigation.save()
return redirect(
url_for(
'frontend.InvestigationView:import_from',
id=investigation.id))
else:
try:
if url:
import_method = ImportMethod.objects.get(acts_on="url")
results = import_method.run(url)
else:
target = AttachedFile.from_upload(request.files['file'])
import_method = ImportMethod.objects.get(
acts_on=target.content_type)
results = import_method.run(target)
return redirect(
url_for(
'frontend.InvestigationView:import_wait',
id=results.id))
except DoesNotExist:
flash("This file type is not supported.", "danger")
return render_template(
"{}/import.html".format(self.klass.__name__.lower()))
@route("/<id>/import", methods=['GET'])
@requires_permissions("write", "investigation")
def import_from(self, id):
investigation = get_object_or_404(Investigation, id=id)
observables = Observable.from_string(investigation.import_text)
return render_template(
"{}/import_from.html".format(self.klass.__name__.lower()),
investigation=investigation,
observables=bson_renderer(observables))
def handle_form(self, *args, **kwargs):
kwargs['skip_validation'] = True
return super(InvestigationView, self).handle_form(*args, **kwargs)
| Python | 0.014118 |
7b6542d58bbe788587b47e282ef393eda461f267 | add get method in UserAPI | api/route/user.py | api/route/user.py | from flask import request
from flask.ext import restful
from flask.ext.restful import marshal_with
from route.base import api
from flask.ext.bcrypt import generate_password_hash
from model.base import db
from model.user import User, user_marshaller
class UserAPI(restful.Resource):
@marshal_with(user_marshaller)
def post(self):
data = request.get_json()
hashed_password = generate_password_hash(data['password'])
user = User(data['first_name'], data['last_name'], data['email'], hashed_password, data['birthday'])
db.session.add(user)
db.session.commit()
return user
@marshal_with(user_marshaller)
def get(self):
user = User.query.all()
return user
api.add_resource(UserAPI, "/user")
| from flask import request
from flask.ext import restful
from flask.ext.restful import marshal_with
from route.base import api
from flask.ext.bcrypt import generate_password_hash
from model.base import db
from model.user import User, user_marshaller
class UserAPI(restful.Resource):
@marshal_with(user_marshaller)
def post(self):
data = request.get_json()
hashed_password = generate_password_hash(data['password'])
user = User(data['first_name'], data['last_name'], data['email'], hashed_password, data['birthday'])
db.session.add(user)
db.session.commit()
return user
api.add_resource(UserAPI, "/user") | Python | 0.000001 |
346a7d18ef6dc063e2802a0347709700a1543902 | update 影视列表 | 1/showics/models.py | 1/showics/models.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Last modified: Wang Tai (i@wangtai.me)
"""docstring
"""
__revision__ = '0.1'
from django.db import models
class ShowTableIcs(models.Model):
uid = models.CharField(max_length=255, unique=True, primary_key=True)
title = models.CharField(max_length=255, null=False)
description = models.CharField(max_length=255)
date = models.DateField()
class Meta(object):
db_table = 'show_table_ics'
class ShowList(models.Model):
show_id = models.CharField(max_length=255, primary_key=True)
title = models.CharField(max_length=255, unique=True, null=False)
class Meta(object):
db_table = 'show_list' | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Last modified: Wang Tai (i@wangtai.me)
"""docstring
"""
__revision__ = '0.1'
from django.db import models
class ShowTableIcs(models.Model):
# uid
uid = models.CharField(max_length=255, unique=True, primary_key=True)
# title
title = models.CharField(max_length=255, null=False)
# description
description = models.CharField(max_length=255)
# date
date = models.DateField()
class Meta(object):
db_table = 'show_table_ics' | Python | 0 |
5b2cc6ed06045bbe219f9cf81317c1c1a5bac714 | add missing docstring in ttls | biggraphite/drivers/ttls.py | biggraphite/drivers/ttls.py | #!/usr/bin/env python
# Copyright 2016 Criteo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Time constants and functions used by accessors."""
import dateutil
import time
MINUTE = 60
HOUR = 60 * MINUTE
DAY = 24 * HOUR
DEFAULT_UPDATED_ON_TTL_SEC = 3 * DAY
def str_to_datetime(str_repr):
"""Convert a string into a datetime."""
if not str_repr:
return None
return dateutil.parser.parse(str_repr)
def str_to_timestamp(str_repr):
"""Convert a string into a timestamp."""
if not str_repr:
return None
datetime_tuple = str_to_datetime(str_repr)
ts = time.mktime(datetime_tuple.timetuple())
return ts
| #!/usr/bin/env python
# Copyright 2016 Criteo
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Time constants and functions used by accessors."""
import dateutil
import time
MINUTE = 60
HOUR = 60 * MINUTE
DAY = 24 * HOUR
DEFAULT_UPDATED_ON_TTL_SEC = 3 * DAY
def str_to_datetime(str_repr):
if not str_repr:
return None
return dateutil.parser.parse(str_repr)
def str_to_timestamp(str_repr):
if not str_repr:
return None
datetime_tuple = str_to_datetime(str_repr)
ts = time.mktime(datetime_tuple.timetuple())
return ts
| Python | 0.000007 |
808a5b14fc0bfff8d8c23cb4e1f125ef84de6d91 | Remove deprecated oslotest.mockpatch usage | bilean/tests/common/base.py | bilean/tests/common/base.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import fixtures
import testscenarios
import testtools
from bilean.common import messaging
from bilean.tests.common import utils
TEST_DEFAULT_LOGLEVELS = {'migrate': logging.WARN,
'sqlalchemy': logging.WARN}
_LOG_FORMAT = "%(levelname)8s [%(name)s] %(message)s"
_TRUE_VALUES = ('True', 'true', '1', 'yes')
class FakeLogMixin(object):
def setup_logging(self):
# Assign default logs to self.LOG so we can still
# assert on bilean logs.
default_level = logging.INFO
if os.environ.get('OS_DEBUG') in _TRUE_VALUES:
default_level = logging.DEBUG
self.LOG = self.useFixture(
fixtures.FakeLogger(level=default_level, format=_LOG_FORMAT))
base_list = set([nlog.split('.')[0]
for nlog in logging.Logger.manager.loggerDict])
for base in base_list:
if base in TEST_DEFAULT_LOGLEVELS:
self.useFixture(fixtures.FakeLogger(
level=TEST_DEFAULT_LOGLEVELS[base],
name=base, format=_LOG_FORMAT))
elif base != 'bilean':
self.useFixture(fixtures.FakeLogger(
name=base, format=_LOG_FORMAT))
class BileanTestCase(testscenarios.WithScenarios,
testtools.TestCase, FakeLogMixin):
def setUp(self):
super(BileanTestCase, self).setUp()
self.setup_logging()
self.useFixture(fixtures.MonkeyPatch(
'bilean.common.exception._FATAL_EXCEPTION_FORMAT_ERRORS',
True))
messaging.setup("fake://", optional=True)
self.addCleanup(messaging.cleanup)
utils.setup_dummy_db()
self.addCleanup(utils.reset_dummy_db)
def patchobject(self, obj, attr, **kwargs):
mockfixture = self.useFixture(fixtures.MockPatchObject(obj, attr,
**kwargs))
return mockfixture.mock
# NOTE(pshchelo): this overrides the testtools.TestCase.patch method
# that does simple monkey-patching in favor of mock's patching
def patch(self, target, **kwargs):
mockfixture = self.useFixture(fixtures.MockPatch(target, **kwargs))
return mockfixture.mock
| # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import fixtures
from oslotest import mockpatch
import testscenarios
import testtools
from bilean.common import messaging
from bilean.tests.common import utils
TEST_DEFAULT_LOGLEVELS = {'migrate': logging.WARN,
'sqlalchemy': logging.WARN}
_LOG_FORMAT = "%(levelname)8s [%(name)s] %(message)s"
_TRUE_VALUES = ('True', 'true', '1', 'yes')
class FakeLogMixin(object):
def setup_logging(self):
# Assign default logs to self.LOG so we can still
# assert on bilean logs.
default_level = logging.INFO
if os.environ.get('OS_DEBUG') in _TRUE_VALUES:
default_level = logging.DEBUG
self.LOG = self.useFixture(
fixtures.FakeLogger(level=default_level, format=_LOG_FORMAT))
base_list = set([nlog.split('.')[0]
for nlog in logging.Logger.manager.loggerDict])
for base in base_list:
if base in TEST_DEFAULT_LOGLEVELS:
self.useFixture(fixtures.FakeLogger(
level=TEST_DEFAULT_LOGLEVELS[base],
name=base, format=_LOG_FORMAT))
elif base != 'bilean':
self.useFixture(fixtures.FakeLogger(
name=base, format=_LOG_FORMAT))
class BileanTestCase(testscenarios.WithScenarios,
testtools.TestCase, FakeLogMixin):
def setUp(self):
super(BileanTestCase, self).setUp()
self.setup_logging()
self.useFixture(fixtures.MonkeyPatch(
'bilean.common.exception._FATAL_EXCEPTION_FORMAT_ERRORS',
True))
messaging.setup("fake://", optional=True)
self.addCleanup(messaging.cleanup)
utils.setup_dummy_db()
self.addCleanup(utils.reset_dummy_db)
def patchobject(self, obj, attr, **kwargs):
mockfixture = self.useFixture(mockpatch.PatchObject(obj, attr,
**kwargs))
return mockfixture.mock
# NOTE(pshchelo): this overrides the testtools.TestCase.patch method
# that does simple monkey-patching in favor of mock's patching
def patch(self, target, **kwargs):
mockfixture = self.useFixture(mockpatch.Patch(target, **kwargs))
return mockfixture.mock
| Python | 0.000006 |
c1d35c37bb51943c28f58b4dc8005b775b7076c4 | Clean the terp file | bin/addons/base/__terp__.py | bin/addons/base/__terp__.py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Base',
'version': '1.1',
'category': 'Generic Modules/Base',
'description': """The kernel of OpenERP, needed for all installation.""",
'author': 'Tiny',
'website': 'http://www.openerp.com',
'depends': [],
'init_xml': [
'base_data.xml',
'base_menu.xml',
'security/base_security.xml',
'res/res_security.xml',
'maintenance/maintenance_security.xml'
],
'update_xml': [
'base_update.xml',
'ir/wizard/wizard_menu_view.xml',
'ir/ir.xml',
'ir/workflow/workflow_view.xml',
'module/module_wizard.xml',
'module/module_view.xml',
'module/module_data.xml',
'module/module_report.xml',
'res/res_request_view.xml',
'res/res_lang_view.xml',
'res/partner/partner_report.xml',
'res/partner/partner_view.xml',
'res/partner/partner_wizard.xml',
'res/bank_view.xml',
'res/country_view.xml',
'res/res_currency_view.xml',
'res/partner/crm_view.xml',
'res/partner/partner_data.xml',
'res/ir_property_view.xml',
'security/base_security.xml',
'maintenance/maintenance_view.xml',
'security/ir.model.access.csv'
],
'demo_xml': ['base_demo.xml', 'res/partner/partner_demo.xml', 'res/partner/crm_demo.xml'],
'installable': True,
'active': True,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Base",
"version" : "1.1",
"author" : "Tiny",
"website" : "http://www.openerp.com",
"category" : "Generic Modules/Base",
"description": "The kernel of OpenERP, needed for all installation.",
"depends" : [],
"init_xml" : [
"base_data.xml",
"base_menu.xml",
"security/base_security.xml",
"res/res_security.xml",
"maintenance/maintenance_security.xml",
],
"demo_xml" : [
"base_demo.xml",
"res/partner/partner_demo.xml",
"res/partner/crm_demo.xml",
],
"update_xml" : [
"base_update.xml",
"ir/wizard/wizard_menu_view.xml",
"ir/ir.xml",
"ir/workflow/workflow_view.xml",
"module/module_wizard.xml",
"module/module_view.xml",
"module/module_data.xml",
"module/module_report.xml",
"res/res_request_view.xml",
"res/res_lang_view.xml",
"res/partner/partner_report.xml",
"res/partner/partner_view.xml",
"res/partner/partner_wizard.xml",
"res/bank_view.xml",
"res/country_view.xml",
"res/res_currency_view.xml",
"res/partner/crm_view.xml",
"res/partner/partner_data.xml",
"res/ir_property_view.xml",
"security/base_security.xml",
"maintenance/maintenance_view.xml",
"security/ir.model.access.csv",
],
"active": True,
"installable": True,
}
| Python | 0 |
ca5c3648ad5f28090c09ecbbc0e008c51a4ce708 | Add a new dev (optional) parameter and use it | bin/push/silent_ios_push.py | bin/push/silent_ios_push.py | import json
import logging
import argparse
import emission.net.ext_service.push.notify_usage as pnu
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(prog="silent_ios_push")
parser.add_argument("interval",
help="specify the sync interval that the phones have subscribed to",
type=int)
parser.add_argument("-d", "--dev", action="store_true", default=False)
args = parser.parse_args()
logging.debug("About to send notification to phones with interval %d" % args.interval)
response = pnu.send_silent_notification_to_ios_with_interval(args.interval, dev=args.dev)
pnu.display_response(response)
| import json
import logging
import argparse
import emission.net.ext_service.push.notify_usage as pnu
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(prog="silent_ios_push")
parser.add_argument("interval",
help="specify the sync interval that the phones have subscribed to",
type=int)
args = parser.parse_args()
logging.debug("About to send notification to phones with interval %d" % args.interval)
response = pnu.send_silent_notification_to_ios_with_interval(args.interval, dev=True)
pnu.display_response(response)
| Python | 0.000001 |
a33b8222959cc14a4c89658e6d7aa6ff07f27c0c | remove commented code | ephypype/import_ctf.py | ephypype/import_ctf.py | """Import ctf."""
# -------------------- nodes (Function)
def convert_ds_to_raw_fif(ds_file):
"""CTF .ds to .fif and save result in pipeline folder structure."""
import os
import os.path as op
from nipype.utils.filemanip import split_filename as split_f
from mne.io import read_raw_ctf
_, basename, ext = split_f(ds_file)
# print(subj_path, basename, ext)
raw = read_raw_ctf(ds_file)
raw_fif_file = os.path.abspath(basename + "_raw.fif")
if not op.isfile(raw_fif_file):
raw = read_raw_ctf(ds_file)
raw.save(raw_fif_file)
else:
print(('*** RAW FIF file %s exists!!!' % raw_fif_file))
return raw_fif_file
| """Import ctf."""
# -------------------- nodes (Function)
def convert_ds_to_raw_fif(ds_file):
"""CTF .ds to .fif and save result in pipeline folder structure."""
import os
import os.path as op
from nipype.utils.filemanip import split_filename as split_f
from mne.io import read_raw_ctf
_, basename, ext = split_f(ds_file)
# print(subj_path, basename, ext)
raw = read_raw_ctf(ds_file)
# raw_fif_file = os.path.abspath(basename + "_raw.fif")
# raw.save(raw_fif_file)
# return raw_fif_file
raw_fif_file = os.path.abspath(basename + "_raw.fif")
if not op.isfile(raw_fif_file):
raw = read_raw_ctf(ds_file)
raw.save(raw_fif_file)
else:
print(('*** RAW FIF file %s exists!!!' % raw_fif_file))
return raw_fif_file
| Python | 0 |
697d3c4c80574d82e8aa37e2a13cbaeefdad255c | bump version | kuyruk/__init__.py | kuyruk/__init__.py | from __future__ import absolute_import
import logging
from kuyruk.kuyruk import Kuyruk
from kuyruk.worker import Worker
from kuyruk.task import Task
from kuyruk.config import Config
__version__ = '0.13.2'
try:
# not available in python 2.6
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
# Add NullHandler to prevent logging warnings on startup
null_handler = NullHandler()
logging.getLogger('kuyruk').addHandler(null_handler)
logging.getLogger('pika').addHandler(null_handler)
| from __future__ import absolute_import
import logging
from kuyruk.kuyruk import Kuyruk
from kuyruk.worker import Worker
from kuyruk.task import Task
from kuyruk.config import Config
__version__ = '0.13.1'
try:
# not available in python 2.6
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
# Add NullHandler to prevent logging warnings on startup
null_handler = NullHandler()
logging.getLogger('kuyruk').addHandler(null_handler)
logging.getLogger('pika').addHandler(null_handler)
| Python | 0 |
50b189888a0ff68f1cc4db1615991d1afe364854 | Update cigar_party.py | Python/Logic_1/cigar_party.py | Python/Logic_1/cigar_party.py | # When squirrels get together for a party, they like to have cigars. A squirrel
# party is successful when the number of cigars is between 40 and 60, inclusive.
# Unless it is the weekend, in which case there is no upper bound on the number
# of cigars. Return True if the party with the given values is successful, or
# False otherwise.
# cigar_party(30, False) --> False
# cigar_party(50, False) --> True
# cigar_party(70, True) --> True
def cigar_party(cigars, is_weekend):
return (is_weekend and cigars >= 40) or (39 < cigars < 61)
print(cigar_party(30, False))
print(cigar_party(50, False))
print(cigar_party(70, True))
| # When squirrels get together for a party, they like to have cigars. A squirrel
# party is successful when the number of cigars is between 40 and 60, inclusive.
# Unless it is the weekend, in which case there is no upper bound on the number
# of cigars. Return True if the party with the given values is successful, or
# False otherwise.
# cigar_party(30, False) → False
# cigar_party(50, False) → True
# cigar_party(70, True) → True
def cigar_party(cigars, is_weekend):
return (is_weekend and cigars >= 40) or (39 < cigars < 61)
print(cigar_party(30, False))
print(cigar_party(50, False))
print(cigar_party(70, True))
| Python | 0.000002 |
4d3d4e457c5886ace69250de1c5f4f696604d43b | Fix cal_seqs with no delay | QGL/BasicSequences/helpers.py | QGL/BasicSequences/helpers.py | # coding=utf-8
from itertools import product
import operator
from ..PulsePrimitives import Id, X, MEAS
from ..ControlFlow import qwait
from functools import reduce
def create_cal_seqs(qubits, numRepeats, measChans=None, waitcmp=False, delay=None):
"""
Helper function to create a set of calibration sequences.
Parameters
----------
qubits : logical channels, e.g. (q1,) or (q1,q2) (tuple)
numRepeats = number of times to repeat calibration sequences (int)
waitcmp = True if the sequence contains branching
delay: optional time between state preparation and measurement (s)
"""
if measChans is None:
measChans = qubits
calSet = [Id, X]
#Make all combination for qubit calibration states for n qubits and repeat
cal_seqs = [reduce(operator.mul, [p(q) for p, q in zip(pulseSet, qubits)])
for pulseSet in product(calSet, repeat=len(qubits))
for _ in range(numRepeats)]
#Add on the measurement operator.
measBlock = reduce(operator.mul, [MEAS(q) for q in qubits])
#Add optional delay
full_cal_seqs = [[seq, Id(qubits[0], delay), measBlock] if delay else [seq, measBlock] for seq in cal_seqs]
if waitcmp:
[cal_seq.append(qwait('CMP')) for cal_seq in full_cal_seqs]
return full_cal_seqs
def cal_descriptor(qubits, numRepeats):
states = ['0', '1']
# generate state set in same order as we do above in create_cal_seqs()
state_set = [reduce(operator.add, s) for s in product(states, repeat=len(qubits))]
descriptor = {
'name': 'calibration',
'unit': 'state',
'partition': 2,
'points': []
}
for state in state_set:
descriptor['points'] += [state] * numRepeats
return descriptor
def time_descriptor(times, desired_units="us"):
if desired_units == "s":
scale = 1
elif desired_units == "ms":
scale = 1e3
elif desired_units == "us" or desired_units == u"μs":
scale = 1e6
elif desired_units == "ns":
scale = 1e9
axis_descriptor = {
'name': 'time',
'unit': desired_units,
'points': list(scale * times),
'partition': 1
}
return axis_descriptor
| # coding=utf-8
from itertools import product
import operator
from ..PulsePrimitives import Id, X, MEAS
from ..ControlFlow import qwait
from functools import reduce
def create_cal_seqs(qubits, numRepeats, measChans=None, waitcmp=False, delay=None):
"""
Helper function to create a set of calibration sequences.
Parameters
----------
qubits : logical channels, e.g. (q1,) or (q1,q2) (tuple)
numRepeats = number of times to repeat calibration sequences (int)
waitcmp = True if the sequence contains branching
delay: optional time between state preparation and measurement (s)
"""
if measChans is None:
measChans = qubits
calSet = [Id, X]
#Make all combination for qubit calibration states for n qubits and repeat
calSeqs = [reduce(operator.mul, [p(q) for p, q in zip(pulseSet, qubits)])
for pulseSet in product(calSet, repeat=len(qubits))
for _ in range(numRepeats)]
#Add on the measurement operator.
measBlock = reduce(operator.mul, [MEAS(q) for q in qubits])
return [[seq, Id(qubits[0], delay), measBlock, qwait('CMP')] if waitcmp else [seq, Id(qubits[0], delay), measBlock]
for seq in calSeqs]
def cal_descriptor(qubits, numRepeats):
states = ['0', '1']
# generate state set in same order as we do above in create_cal_seqs()
state_set = [reduce(operator.add, s) for s in product(states, repeat=len(qubits))]
descriptor = {
'name': 'calibration',
'unit': 'state',
'partition': 2,
'points': []
}
for state in state_set:
descriptor['points'] += [state] * numRepeats
return descriptor
def time_descriptor(times, desired_units="us"):
if desired_units == "s":
scale = 1
elif desired_units == "ms":
scale = 1e3
elif desired_units == "us" or desired_units == u"μs":
scale = 1e6
elif desired_units == "ns":
scale = 1e9
axis_descriptor = {
'name': 'time',
'unit': desired_units,
'points': list(scale * times),
'partition': 1
}
return axis_descriptor
| Python | 0.000002 |
22f6ecc5b61dae0a638b4191eb2ae3ddf1b13895 | fix organization events don't have repository/owner/login | bioconda_utils/bot/views.py | bioconda_utils/bot/views.py | """
HTTP Views (pages)
"""
import logging
from aiohttp import web
from .events import event_routes
from ..githubhandler import Event
from .. import __version__ as VERSION
from .worker import celery
from .config import APP_SECRET
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
web_routes = web.RouteTableDef() # pylint: disable=invalid-name
@web_routes.post('/_gh')
async def webhook_dispatch(request):
"""Accepts webhooks from Github and dispatches them to event handlers"""
try:
body = await request.read()
secret = APP_SECRET
if secret == "IGNORE":
logger.error("IGNORING WEBHOOK SECRET (DEBUG MODE)")
secret = None
event = Event.from_http(request.headers, body, secret=secret)
# Respond to liveness check
if event.event == "ping":
return web.Response(status=200)
# Log Event
installation = event.get('installation/id')
to_user = event.get('repository/owner/login', None)
to_repo = event.get('repository/name', None)
action = event.get('action', None)
action_msg = '/' + action if action else ''
logger.info("Received GH Event '%s%s' (%s) for %s (%s/%s)",
event.event, action_msg,
event.delivery_id,
installation, to_user, to_repo)
# Get GithubAPI object for this installation
ghapi = await request.app['ghappapi'].get_github_api(
dry_run=False, installation=installation, to_user=to_user, to_repo=to_repo
)
# Dispatch the Event
try:
await event_routes.dispatch(event, ghapi)
logger.info("Event '%s%s' (%s) done", event.event, action_msg, event.delivery_id)
except Exception: # pylint: disable=broad-except
logger.exception("Failed to dispatch %s", event.delivery_id)
request.app['gh_rate_limit'] = ghapi.rate_limit
try:
events_remaining = ghapi.rate_limit.remaining
except AttributeError:
events_remaining = "Unknown"
logger.info('GH requests remaining: %s', events_remaining)
return web.Response(status=200)
except Exception: # pylint: disable=broad-except
logger.exception("Failure in webhook dispatch")
return web.Response(status=500)
@web_routes.get("/")
async def show_status(request):
"""Shows the index page
This is rendered at eg https://bioconda.herokuapps.com/
"""
try:
logger.info("Status: getting celery data")
msg = f"""
Running version {VERSION}
{request.app.get('gh_rate_limit')}
"""
worker_status = celery.control.inspect(timeout=0.1)
if not worker_status:
msg += """
no workers online
"""
else:
for worker in sorted(worker_status.ping().keys()):
active = worker_status.active(worker)
reserved = worker_status.reserved(worker)
msg += f"""
Worker: {worker}
active: {len(active[worker])}
queued: {len(reserved[worker])}
"""
return web.Response(text=msg)
except Exception: # pylint: disable=broad-except
logger.exception("Failure in show status")
return web.Response(status=500)
| """
HTTP Views (pages)
"""
import logging
from aiohttp import web
from .events import event_routes
from ..githubhandler import Event
from .. import __version__ as VERSION
from .worker import celery
from .config import APP_SECRET
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
web_routes = web.RouteTableDef() # pylint: disable=invalid-name
@web_routes.post('/_gh')
async def webhook_dispatch(request):
"""Accepts webhooks from Github and dispatches them to event handlers"""
try:
body = await request.read()
secret = APP_SECRET
if secret == "IGNORE":
logger.error("IGNORING WEBHOOK SECRET (DEBUG MODE)")
secret = None
event = Event.from_http(request.headers, body, secret=secret)
# Respond to liveness check
if event.event == "ping":
return web.Response(status=200)
# Log Event
installation = event.get('installation/id')
to_user = event.get('repository/owner/login')
to_repo = event.get('repository/name')
action = event.get('action', None)
action_msg = '/' + action if action else ''
logger.info("Received GH Event '%s%s' (%s) for %s (%s/%s)",
event.event, action_msg,
event.delivery_id,
installation, to_user, to_repo)
# Get GithubAPI object for this installation
ghapi = await request.app['ghappapi'].get_github_api(
dry_run=False, installation=installation, to_user=to_user, to_repo=to_repo
)
# Dispatch the Event
try:
await event_routes.dispatch(event, ghapi)
logger.info("Event '%s%s' (%s) done", event.event, action_msg, event.delivery_id)
except Exception: # pylint: disable=broad-except
logger.exception("Failed to dispatch %s", event.delivery_id)
request.app['gh_rate_limit'] = ghapi.rate_limit
try:
events_remaining = ghapi.rate_limit.remaining
except AttributeError:
events_remaining = "Unknown"
logger.info('GH requests remaining: %s', events_remaining)
return web.Response(status=200)
except Exception: # pylint: disable=broad-except
logger.exception("Failure in webhook dispatch")
return web.Response(status=500)
@web_routes.get("/")
async def show_status(request):
"""Shows the index page
This is rendered at eg https://bioconda.herokuapps.com/
"""
try:
logger.info("Status: getting celery data")
msg = f"""
Running version {VERSION}
{request.app.get('gh_rate_limit')}
"""
worker_status = celery.control.inspect(timeout=0.1)
if not worker_status:
msg += """
no workers online
"""
else:
for worker in sorted(worker_status.ping().keys()):
active = worker_status.active(worker)
reserved = worker_status.reserved(worker)
msg += f"""
Worker: {worker}
active: {len(active[worker])}
queued: {len(reserved[worker])}
"""
return web.Response(text=msg)
except Exception: # pylint: disable=broad-except
logger.exception("Failure in show status")
return web.Response(status=500)
| Python | 0.000713 |
667a87988d168a4dbd9b0d86267b445d91f1460b | Fix Daikin sensor temperature_unit & cleanup (#34116) | homeassistant/components/daikin/sensor.py | homeassistant/components/daikin/sensor.py | """Support for Daikin AC sensors."""
import logging
from homeassistant.const import CONF_ICON, CONF_NAME, TEMP_CELSIUS
from homeassistant.helpers.entity import Entity
from . import DOMAIN as DAIKIN_DOMAIN
from .const import ATTR_INSIDE_TEMPERATURE, ATTR_OUTSIDE_TEMPERATURE, SENSOR_TYPES
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Old way of setting up the Daikin sensors.
Can only be called when a user accidentally mentions the platform in their
config. But even in that case it would have been ignored.
"""
pass
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up Daikin climate based on config_entry."""
daikin_api = hass.data[DAIKIN_DOMAIN].get(entry.entry_id)
sensors = [ATTR_INSIDE_TEMPERATURE]
if daikin_api.device.support_outside_temperature:
sensors.append(ATTR_OUTSIDE_TEMPERATURE)
async_add_entities([DaikinClimateSensor(daikin_api, sensor) for sensor in sensors])
class DaikinClimateSensor(Entity):
"""Representation of a Sensor."""
def __init__(self, api, monitored_state) -> None:
"""Initialize the sensor."""
self._api = api
self._sensor = SENSOR_TYPES[monitored_state]
self._name = f"{api.name} {self._sensor[CONF_NAME]}"
self._device_attribute = monitored_state
@property
def unique_id(self):
"""Return a unique ID."""
return f"{self._api.mac}-{self._device_attribute}"
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._sensor[CONF_ICON]
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
if self._device_attribute == ATTR_INSIDE_TEMPERATURE:
return self._api.device.inside_temperature
if self._device_attribute == ATTR_OUTSIDE_TEMPERATURE:
return self._api.device.outside_temperature
return None
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
async def async_update(self):
"""Retrieve latest state."""
await self._api.async_update()
@property
def device_info(self):
"""Return a device description for device registry."""
return self._api.device_info
| """Support for Daikin AC sensors."""
import logging
from homeassistant.const import CONF_ICON, CONF_NAME, CONF_TYPE
from homeassistant.helpers.entity import Entity
from homeassistant.util.unit_system import UnitSystem
from . import DOMAIN as DAIKIN_DOMAIN
from .const import (
ATTR_INSIDE_TEMPERATURE,
ATTR_OUTSIDE_TEMPERATURE,
SENSOR_TYPE_TEMPERATURE,
SENSOR_TYPES,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Old way of setting up the Daikin sensors.
Can only be called when a user accidentally mentions the platform in their
config. But even in that case it would have been ignored.
"""
pass
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up Daikin climate based on config_entry."""
daikin_api = hass.data[DAIKIN_DOMAIN].get(entry.entry_id)
sensors = [ATTR_INSIDE_TEMPERATURE]
if daikin_api.device.support_outside_temperature:
sensors.append(ATTR_OUTSIDE_TEMPERATURE)
async_add_entities(
[
DaikinClimateSensor(daikin_api, sensor, hass.config.units)
for sensor in sensors
]
)
class DaikinClimateSensor(Entity):
"""Representation of a Sensor."""
def __init__(self, api, monitored_state, units: UnitSystem, name=None) -> None:
"""Initialize the sensor."""
self._api = api
self._sensor = SENSOR_TYPES.get(monitored_state)
if name is None:
name = f"{self._sensor[CONF_NAME]} {api.name}"
self._name = f"{name} {monitored_state.replace('_', ' ')}"
self._device_attribute = monitored_state
if self._sensor[CONF_TYPE] == SENSOR_TYPE_TEMPERATURE:
self._unit_of_measurement = units.temperature_unit
@property
def unique_id(self):
"""Return a unique ID."""
return f"{self._api.mac}-{self._device_attribute}"
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._sensor[CONF_ICON]
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
if self._device_attribute == ATTR_INSIDE_TEMPERATURE:
return self._api.device.inside_temperature
if self._device_attribute == ATTR_OUTSIDE_TEMPERATURE:
return self._api.device.outside_temperature
return None
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
async def async_update(self):
"""Retrieve latest state."""
await self._api.async_update()
@property
def device_info(self):
"""Return a device description for device registry."""
return self._api.device_info
| Python | 0 |
fc975bd573d439490a65bb72ff5f6c69b2b0a771 | Update loudness_zwicker_lowpass_intp.py | mosqito/functions/loudness_zwicker/loudness_zwicker_lowpass_intp.py | mosqito/functions/loudness_zwicker/loudness_zwicker_lowpass_intp.py | # -*- coding: utf-8 -*-
"""
@date Created on Fri May 22 2020
@author martin_g for Eomys
"""
# Standard library imports
import math
import numpy as np
#Needed for the loudness_zwicker_lowpass_intp_ea function
from scipy import signal
def loudness_zwicker_lowpass_intp(loudness, tau, sample_rate):
"""1st order low-pass with linear interpolation of signal for
increased precision
Parameters
----------
loudness : numpy.ndarray
Loudness vs. time
tau : float
Filter parameter
sample_rate : int
Louness signal sampling frequency
Outputs
-------
filt_loudness : numpy.ndarray
Filtered loudness
"""
filt_loudness = np.zeros(np.shape(loudness))
# Factor for virtual upsampling/inner iterations
lp_iter = 24
num_samples = np.shape(loudness)[0]
a1 = math.exp(-1 / (sample_rate * lp_iter * tau))
b0 = 1 - a1
y1 = 0
for i in range(num_samples):
x0 = loudness[i]
y1 = b0 * x0 + a1 * y1
filt_loudness[i] = y1
# Linear interpolation steps between current and next sample
if i < num_samples - 1:
xd = (loudness[i + 1] - x0) / lp_iter
# Inner iterations/interpolation
# Must add a -1 because is repeating the twice the first value at the initial of the first for loop.
for ii in range(lp_iter-1):
x0 += xd
y1 = b0 * x0 + a1 * y1
return filt_loudness
def loudness_zwicker_lowpass_intp_ea(loudness, tau, sample_rate):
"""1st order low-pass with linear interpolation of signal for
increased precision
Parameters
----------
loudness : numpy.ndarray
Loudness vs. time
tau : float
Filter parameter
sample_rate : int
Louness signal sampling frequency
Outputs
-------
filt_loudness : numpy.ndarray
Filtered loudness
"""
filt_loudness = np.zeros(np.shape(loudness))
# Factor for virtual upsampling/inner iterations
lp_iter = 24
num_samples = np.shape(loudness)[0]
a1 = math.exp(-1 / (sample_rate * lp_iter * tau))
b0 = 1 - a1
y1 = 0
delta = np.copy(loudness)
delta = np.roll(delta,-1)
delta [-1] = 0
delta = (delta - loudness) / lp_iter
ui_delta = np.zeros(loudness.shape[0]*lp_iter).reshape(loudness.shape[0],lp_iter)
ui_delta [:,0] = loudness
#Create the array complete of deltas to apply the filter.
for i_in in np.arange(1, lp_iter):
ui_delta [:,i_in] = delta + ui_delta [:,i_in-1]
# Rechape into a vector.
ui_delta = ui_delta.reshape(lp_iter*num_samples)
# Sustituir este bucle for por scipy.signal.lfilter https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.lfilter.html
# ui_delta_filt = scipy.signal.lfilter (b0 , a0, ui_delta )
#filt_loudness = ui_delta_filt.reshape(loudness.shape[0],lp_iter).T[:,0]
# Apply the filter.
ui_delta = signal.lfilter([b0], [1,-a1], ui_delta, axis=- 1, zi=None)
# Reshape again to recover the first col.
ui_delta = ui_delta.reshape(loudness.shape[0],lp_iter)
filt_loudness = ui_delta[:,0]
return filt_loudness
| # -*- coding: utf-8 -*-
"""
@date Created on Fri May 22 2020
@author martin_g for Eomys
"""
# Standard library imports
import math
import numpy as np
def loudness_zwicker_lowpass_intp(loudness, tau, sample_rate):
"""1st order low-pass with linear interpolation of signal for
increased precision
Parameters
----------
loudness : numpy.ndarray
Loudness vs. time
tau : float
Filter parameter
sample_rate : int
Louness signal sampling frequency
Outputs
-------
filt_loudness : numpy.ndarray
Filtered loudness
"""
filt_loudness = np.zeros(np.shape(loudness))
# Factor for virtual upsampling/inner iterations
lp_iter = 24
num_samples = np.shape(loudness)[0]
a1 = math.exp(-1 / (sample_rate * lp_iter * tau))
b0 = 1 - a1
y1 = 0
for i in range(num_samples):
x0 = loudness[i]
y1 = b0 * x0 + a1 * y1
filt_loudness[i] = y1
# Linear interpolation steps between current and next sample
if i < num_samples - 1:
xd = (loudness[i + 1] - x0) / lp_iter
# Inner iterations/interpolation
for ii in range(lp_iter):
x0 += xd
y1 = b0 * x0 + a1 * y1
return filt_loudness
| Python | 0.014013 |
260f5ba0b74cfbad9ed13809c62a1a942cc2be7a | fix style | tests/links_tests/connection_tests/test_conv_2d_bn_activ.py | tests/links_tests/connection_tests/test_conv_2d_bn_activ.py | import unittest
import numpy as np
import chainer
from chainer import cuda
from chainer.functions import relu
from chainer import testing
from chainer.testing import attr
from chainercv.links import Conv2DBNActiv
def _add_one(x):
return x + 1
@testing.parameterize(*testing.product({
'args_style': ['explicit', 'None', 'omit'],
'activ': ['relu', 'add_one'],
}))
class TestConv2DBNActiv(unittest.TestCase):
in_channels = 1
out_channels = 1
ksize = 3
stride = 1
pad = 1
def setUp(self):
if self.activ == 'relu':
activ = relu
elif self.activ == 'add_one':
activ = _add_one
self.x = np.random.uniform(
-1, 1, (5, self.in_channels, 5, 5)).astype(np.float32)
self.gy = np.random.uniform(
-1, 1, (5, self.out_channels, 5, 5)).astype(np.float32)
# Convolution is the identity function.
initialW = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
dtype=np.float32).reshape((1, 1, 3, 3))
bn_kwargs = {'decay': 0.8}
initial_bias = 0
if self.args_style == 'explicit':
self.l = Conv2DBNActiv(
self.in_channels, self.out_channels, self.ksize,
self.stride, self.pad,
initialW=initialW, initial_bias=initial_bias,
activ=activ, bn_kwargs=bn_kwargs)
elif self.args_style == 'None':
self.l = Conv2DBNActiv(
None, self.out_channels, self.ksize, self.stride, self.pad,
initialW=initialW, initial_bias=initial_bias,
activ=activ, bn_kwargs=bn_kwargs)
elif self.args_style == 'omit':
self.l = Conv2DBNActiv(
self.out_channels, self.ksize, stride=self.stride,
pad=self.pad, initialW=initialW, initial_bias=initial_bias,
activ=activ, bn_kwargs=bn_kwargs)
def check_forward(self, x_data):
x = chainer.Variable(x_data)
# Make the batch normalization to be the identity function.
self.l.bn.avg_var[:] = 1
self.l.bn.avg_mean[:] = 0
with chainer.using_config('train', False):
y = self.l(x)
self.assertIsInstance(y, chainer.Variable)
self.assertIsInstance(y.data, self.l.xp.ndarray)
if self.activ == 'relu':
np.testing.assert_almost_equal(
cuda.to_cpu(y.data), np.maximum(cuda.to_cpu(x_data), 0),
decimal=4
)
elif self.activ == 'add_one':
np.testing.assert_almost_equal(
cuda.to_cpu(y.data), cuda.to_cpu(x_data) + 1,
decimal=4
)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.l.to_gpu()
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, y_grad):
x = chainer.Variable(x_data)
y = self.l(x)
y.grad = y_grad
y.backward()
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.l.to_gpu()
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
testing.run_module(__name__, __file__)
| import unittest
import numpy as np
import chainer
from chainer import cuda
from chainer.functions import relu
from chainer import testing
from chainer.testing import attr
from chainercv.links import Conv2DBNActiv
def _add_one(x):
return x + 1
@testing.parameterize(*testing.product({
'args_style': ['explicit', 'None', 'omit'],
'activ': ['relu', 'add_one'],
}))
class TestConv2DBNActiv(unittest.TestCase):
in_channels = 1
out_channels = 1
ksize = 3
stride = 1
pad = 1
def setUp(self):
if self.activ == 'relu':
activ = relu
elif self.activ == 'add_one':
activ = _add_one
self.x = np.random.uniform(
-1, 1, (5, self.in_channels, 5, 5)).astype(np.float32)
self.gy = np.random.uniform(
-1, 1, (5, self.out_channels, 5, 5)).astype(np.float32)
# Convolution is the identity function.
initialW = np.array([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
dtype=np.float32).reshape(1, 1, 3, 3)
bn_kwargs = {'decay': 0.8}
initial_bias = 0
if self.args_style == 'explicit':
self.l = Conv2DBNActiv(
self.in_channels, self.out_channels, self.ksize,
self.stride, self.pad,
initialW=initialW, initial_bias=initial_bias,
activ=activ, bn_kwargs=bn_kwargs)
elif self.args_style == 'None':
self.l = Conv2DBNActiv(
None, self.out_channels, self.ksize, self.stride, self.pad,
initialW=initialW, initial_bias=initial_bias,
activ=activ, bn_kwargs=bn_kwargs)
elif self.args_style == 'omit':
self.l = Conv2DBNActiv(
self.out_channels, self.ksize, stride=self.stride,
pad=self.pad, initialW=initialW, initial_bias=initial_bias,
activ=activ, bn_kwargs=bn_kwargs)
def check_forward(self, x_data):
x = chainer.Variable(x_data)
# Make the batch normalization to be the identity function.
self.l.bn.avg_var[:] = 1
self.l.bn.avg_mean[:] = 0
with chainer.using_config('train', False):
y = self.l(x)
self.assertIsInstance(y, chainer.Variable)
self.assertIsInstance(y.data, self.l.xp.ndarray)
if self.activ == 'relu':
np.testing.assert_almost_equal(
cuda.to_cpu(y.data), np.maximum(cuda.to_cpu(x_data), 0),
decimal=4
)
elif self.activ == 'add_one':
np.testing.assert_almost_equal(
cuda.to_cpu(y.data), cuda.to_cpu(x_data) + 1,
decimal=4
)
def test_forward_cpu(self):
self.check_forward(self.x)
@attr.gpu
def test_forward_gpu(self):
self.l.to_gpu()
self.check_forward(cuda.to_gpu(self.x))
def check_backward(self, x_data, y_grad):
x = chainer.Variable(x_data)
y = self.l(x)
y.grad = y_grad
y.backward()
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.l.to_gpu()
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
testing.run_module(__name__, __file__)
| Python | 0.000001 |
4748fd514fcafd9a0536b24069bf3365cb60a926 | Bump development version number | debreach/__init__.py | debreach/__init__.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from distutils import version
__version__ = '1.3.1'
version_info = version.StrictVersion(__version__).version
default_app_config = 'debreach.apps.DebreachConfig'
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from distutils import version
__version__ = '1.3.0'
version_info = version.StrictVersion(__version__).version
default_app_config = 'debreach.apps.DebreachConfig'
| Python | 0 |
f4a010660aecaccf24fe2afdd5a568e06e65dd1e | Fix adding board_collaborator to invite | blimp_boards/boards/serializers.py | blimp_boards/boards/serializers.py | from django.core.exceptions import ValidationError
from rest_framework import serializers
from ..accounts.models import AccountCollaborator
from ..invitations.models import InvitedUser
from ..accounts.permissions import AccountPermission
from ..users.serializers import UserSimpleSerializer
from .models import Board, BoardCollaborator, BoardCollaboratorRequest
class BoardSerializer(serializers.ModelSerializer):
created_by = serializers.PrimaryKeyRelatedField(read_only=True)
class Meta:
model = Board
read_only_fields = ('slug', )
def validate_account(self, attrs, source):
account = attrs[source]
request = self.context['request']
view = self.context['view']
permission = AccountPermission()
has_object_permission = permission.has_object_permission(
request, view, account)
if not has_object_permission:
msg = 'You are not a collaborator in this account.'
raise serializers.ValidationError(msg)
return attrs
def save_object(self, obj, **kwargs):
obj.created_by = self.context['request'].user
return super(BoardSerializer, self).save_object(obj, **kwargs)
class BoardCollaboratorSimpleSerializer(serializers.ModelSerializer):
board = BoardSerializer()
class Meta:
model = BoardCollaborator
fields = ('id', 'board', 'user', 'invited_user', 'permission',
'date_created', 'date_modified',)
class BoardCollaboratorSerializer(serializers.ModelSerializer):
email = serializers.EmailField(write_only=True, required=False)
user_data = serializers.SerializerMethodField('get_user_data')
class Meta:
model = BoardCollaborator
read_only_fields = ('board', )
fields = ('id', 'board', 'user', 'invited_user', 'permission',
'email', 'user_data', 'date_created', 'date_modified',)
def get_user_data(self, obj):
from ..invitations.serializers import InvitedUserSimpleSerializer
if obj.invited_user:
serializer = InvitedUserSimpleSerializer(obj.invited_user)
else:
serializer = UserSimpleSerializer(obj.user)
return serializer.data
def validate_email(self, attrs, source):
email = attrs.get(source)
if not email:
return attrs
del attrs[source]
board = self.context['board']
account = board.account
try:
account_collaborator = AccountCollaborator.objects.get(
account=account, user__email=email)
attrs['user'] = account_collaborator.user
except AccountCollaborator.DoesNotExist:
invited_user_data = {
'email': email,
'account': account,
'created_by': self.context['request'].user,
}
self.invited_user, created = InvitedUser.objects.get_or_create(
email=email, account=account, defaults=invited_user_data)
attrs['invited_user'] = self.invited_user
return attrs
def validate_user(self, attrs, source):
user = attrs.get(source)
board = attrs.get('board')
if user and board and board.is_user_collaborator(user):
msg = 'User is already a collaborator in this board.'
raise ValidationError(msg)
return attrs
def save_object(self, obj, **kwargs):
created = bool(obj.pk)
board = self.context.get('board')
if not created and board:
obj.board = board
super(BoardCollaboratorSerializer, self).save_object(obj, **kwargs)
if not created and obj.invited_user:
self.invited_user.board_collaborator = obj
self.invited_user.save()
self.invited_user.send_invite()
class BoardCollaboratorPublicSerializer(BoardCollaboratorSerializer):
"""
BoardCollaborator serializer that removes email from
user and invited_user fields.
"""
class Meta:
model = BoardCollaborator
def get_user_data(self, obj):
data = super(BoardCollaboratorPublicSerializer,
self).get_user_data(obj)
data.pop('email', None)
return data
class BoardCollaboratorRequestSerializer(serializers.ModelSerializer):
email = serializers.EmailField()
class Meta:
model = BoardCollaboratorRequest
| from django.core.exceptions import ValidationError
from rest_framework import serializers
from ..accounts.models import AccountCollaborator
from ..invitations.models import InvitedUser
from ..accounts.permissions import AccountPermission
from ..users.serializers import UserSimpleSerializer
from .models import Board, BoardCollaborator, BoardCollaboratorRequest
class BoardSerializer(serializers.ModelSerializer):
created_by = serializers.PrimaryKeyRelatedField(read_only=True)
class Meta:
model = Board
read_only_fields = ('slug', )
def validate_account(self, attrs, source):
account = attrs[source]
request = self.context['request']
view = self.context['view']
permission = AccountPermission()
has_object_permission = permission.has_object_permission(
request, view, account)
if not has_object_permission:
msg = 'You are not a collaborator in this account.'
raise serializers.ValidationError(msg)
return attrs
def save_object(self, obj, **kwargs):
obj.created_by = self.context['request'].user
return super(BoardSerializer, self).save_object(obj, **kwargs)
class BoardCollaboratorSimpleSerializer(serializers.ModelSerializer):
board = BoardSerializer()
class Meta:
model = BoardCollaborator
fields = ('id', 'board', 'user', 'invited_user', 'permission',
'date_created', 'date_modified',)
class BoardCollaboratorSerializer(serializers.ModelSerializer):
email = serializers.EmailField(write_only=True, required=False)
user_data = serializers.SerializerMethodField('get_user_data')
class Meta:
model = BoardCollaborator
read_only_fields = ('board', )
fields = ('id', 'board', 'user', 'invited_user', 'permission',
'email', 'user_data', 'date_created', 'date_modified',)
def get_user_data(self, obj):
from ..invitations.serializers import InvitedUserSimpleSerializer
if obj.invited_user:
serializer = InvitedUserSimpleSerializer(obj.invited_user)
else:
serializer = UserSimpleSerializer(obj.user)
return serializer.data
def validate_email(self, attrs, source):
email = attrs.get(source)
if not email:
return attrs
del attrs[source]
board = self.context['board']
account = board.account
try:
account_collaborator = AccountCollaborator.objects.get(
account=account, user__email=email)
attrs['user'] = account_collaborator.user
except AccountCollaborator.DoesNotExist:
invited_user_data = {
'email': email,
'account': account,
'created_by': self.context['request'].user,
}
self.invited_user, created = InvitedUser.objects.get_or_create(
email=email, account=account, defaults=invited_user_data)
attrs['invited_user'] = self.invited_user
return attrs
def validate_user(self, attrs, source):
user = attrs.get(source)
board = attrs.get('board')
if user and board and board.is_user_collaborator(user):
msg = 'User is already a collaborator in this board.'
raise ValidationError(msg)
return attrs
def save_object(self, obj, **kwargs):
created = bool(obj.pk)
board = self.context.get('board')
if not created and board:
obj.board = board
super(BoardCollaboratorSerializer, self).save_object(obj, **kwargs)
if not created and obj.invited_user:
self.invited_user.board_collaborator = obj
self.invited_user.send_invite()
class BoardCollaboratorPublicSerializer(BoardCollaboratorSerializer):
"""
BoardCollaborator serializer that removes email from
user and invited_user fields.
"""
class Meta:
model = BoardCollaborator
def get_user_data(self, obj):
data = super(BoardCollaboratorPublicSerializer,
self).get_user_data(obj)
data.pop('email', None)
return data
class BoardCollaboratorRequestSerializer(serializers.ModelSerializer):
email = serializers.EmailField()
class Meta:
model = BoardCollaboratorRequest
| Python | 0 |
6e3cd31c7efbea71b5f731429c24e946ce6fc476 | Bump version | debreach/__init__.py | debreach/__init__.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from distutils import version
__version__ = '0.2.0'
version_info = version.StrictVersion(__version__).version
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from distutils import version
__version__ = '0.1.1'
version_info = version.StrictVersion(__version__).version
| Python | 0 |
21149eb8d128c405d0b69991d1855e99ced951c7 | Test fixed: WorkbenchUser is auto created by signal, so creating it separately is not required | ExperimentsManager/tests.py | ExperimentsManager/tests.py | from django.test import TestCase
from .models import Experiment
from UserManager.models import WorkbenchUser
from django.contrib.auth.models import User
from django.test import Client
class ExperimentTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user('test', 'test@test.nl', 'test')
self.workbench_user = WorkbenchUser.objects.get(user=self.user)
self.experiment = Experiment.objects.create(title='Experiment', description='test', version='1.0', owner=self.workbench_user)
def test_index_not_signed_in(self):
c = Client()
response = c.get('/experiments/')
self.assertEqual(response.status_code, 302)
def test_index_signed_in(self):
c = Client()
c.login(username='test', password='test')
response = c.get('/experiments/')
self.assertIsNotNone(response.context['table'])
| from django.test import TestCase
from .models import Experiment
from UserManager.models import WorkbenchUser
from django.contrib.auth.models import User
from django.test import Client
class ExperimentTestCase(TestCase):
def setUp(self):
self.user = User.objects.create_user('test', 'test@test.nl', 'test')
self.workbench_user = WorkbenchUser.objects.create(netid='jlmdegoede', user=self.user)
self.experiment = Experiment.objects.create(title='Experiment', description='test', version='1.0', owner=self.workbench_user)
def test_index_not_signed_in(self):
c = Client()
response = c.get('/experiments/')
self.assertEqual(response.status_code, 302)
def test_index_signed_in(self):
c = Client()
c.login(username='test', password='test')
response = c.get('/experiments/')
self.assertIsNotNone(response.context['table']) | Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.