commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
af073d3cfaddb33d9cb4675c33707a223348e3b8 | fix nans from logs in large models | models/distributions/distributions.py | models/distributions/distributions.py | import math
import theano.tensor as T
# ----------------------------------------------------------------------------
# this is all taken from the parmesan lib
c = - 0.5 * math.log(2*math.pi)
def log_bernoulli(x, p, eps=1e-5):
"""
Compute log pdf of a Bernoulli distribution with success probability p, at values x.
.. math:: \log p(x; p) = \log \mathcal{B}(x; p)
Parameters
----------
x : Theano tensor
Values at which to evaluate pdf.
p : Theano tensor
Success probability :math:`p(x=1)`, which is also the mean of the Bernoulli distribution.
eps : float
Small number used to avoid NaNs by clipping p in range [eps;1-eps].
Returns
-------
Theano tensor
Element-wise log probability, this has to be summed for multi-variate distributions.
"""
p = T.clip(p, eps, 1.0 - eps)
return -T.nnet.binary_crossentropy(p, x)
def log_normal(x, mean, std, eps=1e-5):
"""
Compute log pdf of a Gaussian distribution with diagonal covariance, at values x.
Variance is parameterized as standard deviation.
.. math:: \log p(x) = \log \mathcal{N}(x; \mu, \sigma^2I)
Parameters
----------
x : Theano tensor
Values at which to evaluate pdf.
mean : Theano tensor
Mean of the Gaussian distribution.
std : Theano tensor
Standard deviation of the diagonal covariance Gaussian.
eps : float
Small number added to standard deviation to avoid NaNs.
Returns
-------
Theano tensor
Element-wise log probability, this has to be summed for multi-variate distributions.
See also
--------
log_normal1 : using variance parameterization
log_normal2 : using log variance parameterization
"""
std += eps
return c - T.log(T.abs_(std)) - (x - mean)**2 / (2 * std**2)
def log_normal2(x, mean, log_var, eps=1e-5):
"""
Compute log pdf of a Gaussian distribution with diagonal covariance, at values x.
Variance is parameterized as log variance rather than standard deviation, which ensures :math:`\sigma > 0`.
.. math:: \log p(x) = \log \mathcal{N}(x; \mu, \sigma^2I)
Parameters
----------
x : Theano tensor
Values at which to evaluate pdf.
mean : Theano tensor
Mean of the Gaussian distribution.
log_var : Theano tensor
Log variance of the diagonal covariance Gaussian.
eps : float
Small number added to denominator to avoid NaNs.
Returns
-------
Theano tensor
Element-wise log probability, this has to be summed for multi-variate distributions.
See also
--------
log_normal : using standard deviation parameterization
log_normal1 : using variance parameterization
"""
lv_clip = T.clip(log_var, -10., 10.)
return c - log_var/2 - (x - mean)**2 / (2 * T.exp(lv_clip) + eps) | import math
import theano.tensor as T
# ----------------------------------------------------------------------------
# this is all taken from the parmesan lib
c = - 0.5 * math.log(2*math.pi)
def log_bernoulli(x, p, eps=1e-5):
"""
Compute log pdf of a Bernoulli distribution with success probability p, at values x.
.. math:: \log p(x; p) = \log \mathcal{B}(x; p)
Parameters
----------
x : Theano tensor
Values at which to evaluate pdf.
p : Theano tensor
Success probability :math:`p(x=1)`, which is also the mean of the Bernoulli distribution.
eps : float
Small number used to avoid NaNs by clipping p in range [eps;1-eps].
Returns
-------
Theano tensor
Element-wise log probability, this has to be summed for multi-variate distributions.
"""
p = T.clip(p, eps, 1.0 - eps)
return -T.nnet.binary_crossentropy(p, x)
def log_normal(x, mean, std, eps=1e-5):
"""
Compute log pdf of a Gaussian distribution with diagonal covariance, at values x.
Variance is parameterized as standard deviation.
.. math:: \log p(x) = \log \mathcal{N}(x; \mu, \sigma^2I)
Parameters
----------
x : Theano tensor
Values at which to evaluate pdf.
mean : Theano tensor
Mean of the Gaussian distribution.
std : Theano tensor
Standard deviation of the diagonal covariance Gaussian.
eps : float
Small number added to standard deviation to avoid NaNs.
Returns
-------
Theano tensor
Element-wise log probability, this has to be summed for multi-variate distributions.
See also
--------
log_normal1 : using variance parameterization
log_normal2 : using log variance parameterization
"""
std += eps
return c - T.log(T.abs_(std)) - (x - mean)**2 / (2 * std**2)
def log_normal2(x, mean, log_var, eps=1e-5):
"""
Compute log pdf of a Gaussian distribution with diagonal covariance, at values x.
Variance is parameterized as log variance rather than standard deviation, which ensures :math:`\sigma > 0`.
.. math:: \log p(x) = \log \mathcal{N}(x; \mu, \sigma^2I)
Parameters
----------
x : Theano tensor
Values at which to evaluate pdf.
mean : Theano tensor
Mean of the Gaussian distribution.
log_var : Theano tensor
Log variance of the diagonal covariance Gaussian.
eps : float
Small number added to denominator to avoid NaNs.
Returns
-------
Theano tensor
Element-wise log probability, this has to be summed for multi-variate distributions.
See also
--------
log_normal : using standard deviation parameterization
log_normal1 : using variance parameterization
"""
# lv_clip = T.clip(log_var, -10., 10.)
return c - log_var/2 - (x - mean)**2 / (2 * T.exp(log_var) + eps) | Python | 0 |
993d08b0ca0bcf90af77709e58698b7ecc5ba6b5 | Update log.py | django_tenants/log.py | django_tenants/log.py | import logging
from django.db import connection
class TenantContextFilter(logging.Filter):
"""
Add the current ``schema_name`` and ``domain_url`` to log records.
Thanks to @regolith for the snippet on https://github.com/bernardopires/django-tenant-schemas/issues/248
"""
def filter(self, record):
record.schema_name = connection.tenant.schema_name
record.domain_url = getattr(connection.tenant, 'domain_url', None)
return True
| import logging
from django.db import connection
class TenantContextFilter(logging.Filter):
"""
Add the current ``schema_name`` and ``domain_url`` to log records.
Thanks to @regolith for the snippet on https://github.com/bernardopires/django-tenant-schemas/issues/248
"""
def filter(self, record):
record.schema_name = connection.tenant.schema_name
record.domain_url = getattr(connection.tenant, 'domain_url', 'none')
return True
| Python | 0.000001 |
463e6563bcfa63e672ec23231b1a16870b68c56d | Fix __str__ method | pathvalidate/error.py | pathvalidate/error.py | """
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
import enum
from typing import Optional, cast
from ._common import Platform
@enum.unique
class ErrorReason(enum.Enum):
FOUND_ABS_PATH = "FOUND_ABS_PATH"
NULL_NAME = "NULL_NAME"
INVALID_CHARACTER = "INVALID_CHARACTER"
INVALID_LENGTH = "INVALID_LENGTH"
MALFORMED_ABS_PATH = "MALFORMED_ABS_PATH"
RESERVED_NAME = "RESERVED_NAME"
class ValidationError(ValueError):
"""
Base exception class that indicates invalid name errors.
"""
@property
def platform(self) -> Platform:
return self.__platform
@property
def reason(self) -> Optional[ErrorReason]:
return self.__reason
@property
def description(self) -> str:
return self.__description
@property
def reusable_name(self) -> bool:
return self.__reusable_name
def __init__(self, *args, **kwargs):
self.__platform = kwargs.pop("platform", None)
self.__reason = kwargs.pop("reason", None)
self.__description = kwargs.pop("description", None)
self.__reusable_name = kwargs.pop("reusable_name", None)
try:
super().__init__(*args[0], **kwargs)
except IndexError:
super().__init__(*args, **kwargs)
def __str__(self) -> str:
item_list = []
if Exception.__str__(self):
item_list.append(Exception.__str__(self))
if self.reason:
item_list.append("reason={}".format(cast(ErrorReason, self.reason).value))
if self.platform:
item_list.append("target-platform={}".format(self.platform.value))
if self.description:
item_list.append("description={}".format(self.description))
if self.__reusable_name is not None:
item_list.append("reusable_name={}".format(self.reusable_name))
return ", ".join(item_list).strip()
def __repr__(self, *args, **kwargs):
return self.__str__(*args, **kwargs)
class NullNameError(ValidationError):
"""
Exception raised when a name is empty.
"""
def __init__(self, *args, **kwargs) -> None:
kwargs["reason"] = ErrorReason.NULL_NAME
super().__init__(args, **kwargs)
class InvalidCharError(ValidationError):
"""
Exception raised when includes invalid character(s) within a string.
"""
def __init__(self, *args, **kwargs) -> None:
kwargs["reason"] = ErrorReason.INVALID_CHARACTER
super().__init__(args, **kwargs)
class InvalidLengthError(ValidationError):
"""
Exception raised when a string too long/short.
"""
def __init__(self, *args, **kwargs) -> None:
kwargs["reason"] = ErrorReason.INVALID_LENGTH
super().__init__(args, **kwargs)
class ReservedNameError(ValidationError):
"""
Exception raised when a string matched a reserved name.
"""
@property
def reserved_name(self) -> str:
return self.__reserved_name
def __init__(self, *args, **kwargs) -> None:
self.__reserved_name = kwargs.pop("reserved_name", None)
kwargs["reason"] = ErrorReason.RESERVED_NAME
super().__init__(args, **kwargs)
class ValidReservedNameError(ReservedNameError):
"""
Exception raised when a string matched a reserved name.
However, it can be used as a name.
"""
def __init__(self, *args, **kwargs) -> None:
kwargs["reusable_name"] = True
super().__init__(args, **kwargs)
class InvalidReservedNameError(ReservedNameError):
"""
Exception raised when a string matched a reserved name.
Moreover, the reserved name is invalid as a name.
"""
def __init__(self, *args, **kwargs) -> None:
kwargs["reusable_name"] = False
super().__init__(args, **kwargs)
| """
.. codeauthor:: Tsuyoshi Hombashi <tsuyoshi.hombashi@gmail.com>
"""
import enum
from typing import Optional, cast
from ._common import Platform
@enum.unique
class ErrorReason(enum.Enum):
FOUND_ABS_PATH = "FOUND_ABS_PATH"
NULL_NAME = "NULL_NAME"
INVALID_CHARACTER = "INVALID_CHARACTER"
INVALID_LENGTH = "INVALID_LENGTH"
MALFORMED_ABS_PATH = "MALFORMED_ABS_PATH"
RESERVED_NAME = "RESERVED_NAME"
class ValidationError(ValueError):
"""
Base exception class that indicates invalid name errors.
"""
@property
def platform(self) -> Platform:
return self.__platform
@property
def reason(self) -> Optional[ErrorReason]:
return self.__reason
@property
def description(self) -> str:
return self.__description
@property
def reusable_name(self) -> bool:
return self.__reusable_name
def __init__(self, *args, **kwargs):
self.__platform = kwargs.pop("platform", None)
self.__reason = kwargs.pop("reason", None)
self.__description = kwargs.pop("description", None)
self.__reusable_name = kwargs.pop("reusable_name", None)
try:
super().__init__(*args[0], **kwargs)
except IndexError:
super().__init__(*args, **kwargs)
def __str__(self) -> str:
item_list = []
if Exception.__str__(self):
item_list.append(Exception.__str__(self))
if self.reason:
item_list.append("reason={}".format(cast(ErrorReason, self.reason).value))
if self.platform:
item_list.append("target-platform={}".format(self.platform.value))
if self.description:
item_list.append("description={}".format(self.description))
if self.reusable_name:
item_list.append("reusable_name={}".format(self.reusable_name))
return ", ".join(item_list).strip()
def __repr__(self, *args, **kwargs):
return self.__str__(*args, **kwargs)
class NullNameError(ValidationError):
"""
Exception raised when a name is empty.
"""
def __init__(self, *args, **kwargs) -> None:
kwargs["reason"] = ErrorReason.NULL_NAME
super().__init__(args, **kwargs)
class InvalidCharError(ValidationError):
"""
Exception raised when includes invalid character(s) within a string.
"""
def __init__(self, *args, **kwargs) -> None:
kwargs["reason"] = ErrorReason.INVALID_CHARACTER
super().__init__(args, **kwargs)
class InvalidLengthError(ValidationError):
"""
Exception raised when a string too long/short.
"""
def __init__(self, *args, **kwargs) -> None:
kwargs["reason"] = ErrorReason.INVALID_LENGTH
super().__init__(args, **kwargs)
class ReservedNameError(ValidationError):
"""
Exception raised when a string matched a reserved name.
"""
@property
def reserved_name(self) -> str:
return self.__reserved_name
def __init__(self, *args, **kwargs) -> None:
self.__reserved_name = kwargs.pop("reserved_name", None)
kwargs["reason"] = ErrorReason.RESERVED_NAME
super().__init__(args, **kwargs)
class ValidReservedNameError(ReservedNameError):
"""
Exception raised when a string matched a reserved name.
However, it can be used as a name.
"""
def __init__(self, *args, **kwargs) -> None:
kwargs["reusable_name"] = True
super().__init__(args, **kwargs)
class InvalidReservedNameError(ReservedNameError):
"""
Exception raised when a string matched a reserved name.
Moreover, the reserved name is invalid as a name.
"""
def __init__(self, *args, **kwargs) -> None:
kwargs["reusable_name"] = False
super().__init__(args, **kwargs)
| Python | 0.020279 |
2756326b134acc6c343be8458870121baed963cb | fix db url | pergamena/settings.py | pergamena/settings.py | # -*- coding: utf-8 -*-
import os
os_env = os.environ
class Config(object):
SECRET_KEY = os_env.get('PERGAMENA_SECRET', 'secret-key') # TODO: Change me
APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory
PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))
BCRYPT_LOG_ROUNDS = 13
ASSETS_DEBUG = False
DEBUG_TB_ENABLED = False # Disable Debug toolbar
DEBUG_TB_INTERCEPT_REDIRECTS = False
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
class ProdConfig(Config):
"""Production configuration."""
ENV = 'prod'
DEBUG = False
SQLALCHEMY_DATABASE_URI = os_env.get('DATABASE_URL')
DEBUG_TB_ENABLED = False # Disable Debug toolbar
class DevConfig(Config):
"""Development configuration."""
ENV = 'dev'
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/pergamena_db' # TODO: Change me
DEBUG_TB_ENABLED = True
ASSETS_DEBUG = True # Don't bundle/minify static assets
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
class TestConfig(Config):
TESTING = True
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite://'
BCRYPT_LOG_ROUNDS = 1 # For faster tests
WTF_CSRF_ENABLED = False # Allows form testing
| # -*- coding: utf-8 -*-
import os
os_env = os.environ
class Config(object):
SECRET_KEY = os_env.get('PERGAMENA_SECRET', 'secret-key') # TODO: Change me
APP_DIR = os.path.abspath(os.path.dirname(__file__)) # This directory
PROJECT_ROOT = os.path.abspath(os.path.join(APP_DIR, os.pardir))
BCRYPT_LOG_ROUNDS = 13
ASSETS_DEBUG = False
DEBUG_TB_ENABLED = False # Disable Debug toolbar
DEBUG_TB_INTERCEPT_REDIRECTS = False
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
class ProdConfig(Config):
"""Production configuration."""
ENV = 'prod'
DEBUG = False
SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/pergamena_db' # TODO: Change me
DEBUG_TB_ENABLED = False # Disable Debug toolbar
class DevConfig(Config):
"""Development configuration."""
ENV = 'dev'
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/pergamena_db' # TODO: Change me
DEBUG_TB_ENABLED = True
ASSETS_DEBUG = True # Don't bundle/minify static assets
CACHE_TYPE = 'simple' # Can be "memcached", "redis", etc.
class TestConfig(Config):
TESTING = True
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite://'
BCRYPT_LOG_ROUNDS = 1 # For faster tests
WTF_CSRF_ENABLED = False # Allows form testing
| Python | 0.999203 |
b7c531220fe7a46ad56eeeb160effe94510ba4b0 | Use handler registration in listener | pg_bawler/listener.py | pg_bawler/listener.py | #!/usr/bin/env python
'''
Listen on given channel for notification.
$ python -m pg_bawler.listener mychannel
If you installed notification trigger with ``pg_bawler.gen_sql`` then
channel is the same as ``tablename`` argument.
'''
import argparse
import asyncio
import importlib
import logging
import sys
import pg_bawler.core
LOGGER = logging.getLogger('pg_bawler.listener')
class DefaultHandler:
def __init__(self):
self.count = 0
async def handle_notification(self, notification):
self.count += 1
notification_number = self.count
LOGGER.info(
'Received notification #%s pid %s from channel %s: %s',
notification_number, notification.pid,
notification.channel, notification.payload)
def get_default_cli_args_parser():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'--dsn',
metavar='DSN',
help='Connection string. e.g. `dbname=test user=postgres`')
parser.add_argument(
'--handler',
metavar='HANDLER', default='pg_bawler.listener:default_handler',
help=(
'Module and name of python callable.'
' e.g. `pg_bawler.listener:default_handler`'))
parser.add_argument(
'channel',
metavar='CHANNEL', type=str,
help='Name of Notify/Listen channel to listen on.')
return parser
def resolve_handler(handler_str):
module_name, callable_name = handler_str.split(':')
return getattr(importlib.import_module(module_name), callable_name)
default_handler = DefaultHandler().handle_notification
class NotificationListener(
pg_bawler.core.BawlerBase,
pg_bawler.core.ListenerMixin
):
pass
def main():
args = get_default_cli_args_parser().parse_args()
logging.basicConfig(
format='[%(asctime)s][%(name)s][%(levelname)s]: %(message)s',
level=logging.DEBUG)
LOGGER.info('Starting pg_bawler listener for channel: %s', args.channel)
loop = asyncio.get_event_loop()
listener = NotificationListener(connection_params={'dsn': args.dsn})
listener.listen_timeout = 5
listener.register_handler(resolve_handler(args.handler))
loop.run_until_complete(listener.register_channel(args.channel))
loop.run_until_complete(listener.listen())
if __name__ == '__main__':
sys.exit(main())
| #!/usr/bin/env python
'''
Listen on given channel for notification.
$ python -m pg_bawler.listener mychannel
If you installed notification trigger with ``pg_bawler.gen_sql`` then
channel is the same as ``tablename`` argument.
'''
import argparse
import asyncio
import importlib
import logging
import sys
import pg_bawler.core
LOGGER = logging.getLogger('pg_bawler.listener')
class DefaultHandler:
def __init__(self):
self.count = 0
async def handle_notification(self, notification):
self.count += 1
notification_number = self.count
LOGGER.info(
'Received notification #%s pid %s from channel %s: %s',
notification_number, notification.pid,
notification.channel, notification.payload)
def get_default_cli_args_parser():
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument(
'--dsn',
metavar='DSN',
help='Connection string. e.g. `dbname=test user=postgres`')
parser.add_argument(
'--handler',
metavar='HANDLER', default='pg_bawler.listener:default_handler',
help=(
'Module and name of python callable.'
' e.g. `pg_bawler.listener:default_handler`'))
parser.add_argument(
'channel',
metavar='CHANNEL', type=str,
help='Name of Notify/Listen channel to listen on.')
return parser
def resolve_handler(handler_str):
module_name, callable_name = handler_str.split(':')
return getattr(importlib.import_module(module_name), callable_name)
default_handler = DefaultHandler().handle_notification
class NotificationListener(
pg_bawler.core.BawlerBase,
pg_bawler.core.ListenerMixin
):
pass
def main():
args = get_default_cli_args_parser().parse_args()
logging.basicConfig(
format='[%(asctime)s][%(name)s][%(levelname)s]: %(message)s',
level=logging.DEBUG)
LOGGER.info('Starting pg_bawler listener for channel: %s', args.channel)
loop = asyncio.get_event_loop()
listener = NotificationListener(connection_params={'dsn': args.dsn})
listener.listen_timeout = 5
listener.handler = resolve_handler(args.handler)
loop.run_until_complete(listener.register_channel(args.channel))
loop.run_until_complete(listener.listen())
if __name__ == '__main__':
sys.exit(main())
| Python | 0 |
36ae5c9502d8aa7189d2e89c094a18c9891cbb6a | Use PID, which represents stable ID, over ID, which is instance dependent | pg_bridge/pgbridge.py | pg_bridge/pgbridge.py | """
PostGIS bridge
"""
import psycopg2
import json
class PGBMABridge(object):
def __init__(self, layer, conn_args):
self.layer = layer
self.connect(conn_args)
def connect(self, conn_args):
self.conn = psycopg2.connect(host=conn_args['host'],
user=conn_args['user'],
database=conn_args['database'],
password=conn_args['password'],
)
self.cursor = self.conn.cursor()
self.cursor.execute("SELECT column_name, udt_name FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name=%s",(self.layer,));
res = self.cursor.fetchall()
self.cnames = []
self.geometry_col = None
for r in res:
if r[1] == 'geometry':
self.geometry_col = r[0]
else:
self.cnames.append(r[0])
def get_all(self):
query = ' '.join(['SELECT',','.join(self.cnames),'FROM',self.layer,';'])
self.cursor.execute(query)
rows = self.cursor.fetchall()
ret = []
for row in rows:
dr = {}
for c in range(len(self.cnames)):
dr[self.cnames[c]] = row[c]
ret.append(dr)
return json.dumps(ret)
def get_pos(self, id):
query = 'SELECT ST_AsGeoJSON(ST_Centroid(the_geom)) FROM '+self.layer+ ' WHERE pid=%s'
self.cursor.execute(query,(id,))
res = self.cursor.fetchone()
return res[0]
def find_in_rect(self, N, E, S, W, srid):
c0 = '%f %f'%(W,N)
c1 = '%f %f'%(E,N)
c2 = '%f %f'%(E,S)
c3 = '%f %f'%(W,S)
polygon = 'POLYGON(('+ ','.join([c0,c1,c2,c3,c0]) +'))'
st_polygon = "ST_GeomFromText('" + polygon + "', "+ str(srid) +")"
query = 'SELECT pid FROM %s WHERE ST_Contains(ST_Transform(%s,ST_SRID(%s)), %s)'%(self.layer, st_polygon, self.geometry_col, self.geometry_col)
#print('[RECT] %s'%query)
self.cursor.execute(query)
ret = []
for row in self.cursor.fetchall():
ret.append(row[0])
return json.dumps(ret)
| """
PostGIS bridge
"""
import psycopg2
import json
class PGBMABridge(object):
def __init__(self, layer, conn_args):
self.layer = layer
self.connect(conn_args)
def connect(self, conn_args):
self.conn = psycopg2.connect(host=conn_args['host'],
user=conn_args['user'],
database=conn_args['database'],
password=conn_args['password'],
)
self.cursor = self.conn.cursor()
self.cursor.execute("SELECT column_name, udt_name FROM INFORMATION_SCHEMA.COLUMNS WHERE table_name=%s",(self.layer,));
res = self.cursor.fetchall()
self.cnames = []
self.geometry_col = None
for r in res:
if r[1] == 'geometry':
self.geometry_col = r[0]
else:
self.cnames.append(r[0])
def get_all(self):
query = ' '.join(['SELECT',','.join(self.cnames),'FROM',self.layer,';'])
self.cursor.execute(query)
rows = self.cursor.fetchall()
ret = []
for row in rows:
dr = {}
for c in range(len(self.cnames)):
dr[self.cnames[c]] = row[c]
ret.append(dr)
return json.dumps(ret)
def get_pos(self, id):
query = 'SELECT ST_AsGeoJSON(ST_Centroid(the_geom)) FROM '+self.layer+ ' WHERE pid=%s'
self.cursor.execute(query,(id,))
res = self.cursor.fetchone()
return res[0]
def find_in_rect(self, N, E, S, W, srid):
c0 = '%f %f'%(W,N)
c1 = '%f %f'%(E,N)
c2 = '%f %f'%(E,S)
c3 = '%f %f'%(W,S)
polygon = 'POLYGON(('+ ','.join([c0,c1,c2,c3,c0]) +'))'
st_polygon = "ST_GeomFromText('" + polygon + "', "+ str(srid) +")"
query = 'SELECT id FROM %s WHERE ST_Contains(ST_Transform(%s,ST_SRID(%s)), %s)'%(self.layer, st_polygon, self.geometry_col, self.geometry_col)
#print('[RECT] %s'%query)
self.cursor.execute(query)
ret = []
for row in self.cursor.fetchall():
ret.append(row[0])
return json.dumps(ret)
| Python | 0 |
77170407ad61370dda87c1ed3f24aa2a50cb4ccc | Access the current line directly from the Document instance instead of calculating it manually | pgcli/key_bindings.py | pgcli/key_bindings.py | import logging
from prompt_toolkit.enums import EditingMode
from prompt_toolkit.keys import Keys
from prompt_toolkit.key_binding.manager import KeyBindingManager
from prompt_toolkit.filters import Condition
from .filters import HasSelectedCompletion
_logger = logging.getLogger(__name__)
def pgcli_bindings(get_vi_mode_enabled, set_vi_mode_enabled, expand_tab):
"""Custom key bindings for pgcli."""
assert callable(get_vi_mode_enabled)
assert callable(set_vi_mode_enabled)
tab_insert_text = ' ' * 4 if expand_tab else '\t'
key_binding_manager = KeyBindingManager(
enable_open_in_editor=True,
enable_system_bindings=True,
enable_auto_suggest_bindings=True,
enable_search=True,
enable_abort_and_exit_bindings=True)
@key_binding_manager.registry.add_binding(Keys.F2)
def _(event):
"""
Enable/Disable SmartCompletion Mode.
"""
_logger.debug('Detected F2 key.')
buf = event.cli.current_buffer
buf.completer.smart_completion = not buf.completer.smart_completion
@key_binding_manager.registry.add_binding(Keys.F3)
def _(event):
"""
Enable/Disable Multiline Mode.
"""
_logger.debug('Detected F3 key.')
buf = event.cli.current_buffer
buf.always_multiline = not buf.always_multiline
@key_binding_manager.registry.add_binding(Keys.F4)
def _(event):
"""
Toggle between Vi and Emacs mode.
"""
_logger.debug('Detected F4 key.')
vi_mode = not get_vi_mode_enabled()
set_vi_mode_enabled(vi_mode)
event.cli.editing_mode = EditingMode.VI if vi_mode else EditingMode.EMACS
@key_binding_manager.registry.add_binding(Keys.Tab)
def _(event):
"""Force autocompletion at cursor on non-empty lines."""
_logger.debug('Detected <Tab> key.')
buff = event.cli.current_buffer
doc = buff.document
if doc.current_line.strip():
if buff.complete_state:
buff.complete_next()
else:
event.cli.start_completion(select_first=True)
else:
buff.insert_text(tab_insert_text, fire_event=False)
@key_binding_manager.registry.add_binding(Keys.ControlSpace)
def _(event):
"""
Initialize autocompletion at cursor.
If the autocompletion menu is not showing, display it with the
appropriate completions for the context.
If the menu is showing, select the next completion.
"""
_logger.debug('Detected <C-Space> key.')
b = event.cli.current_buffer
if b.complete_state:
b.complete_next()
else:
event.cli.start_completion(select_first=False)
@key_binding_manager.registry.add_binding(Keys.ControlJ, filter=HasSelectedCompletion())
def _(event):
"""
Makes the enter key work as the tab key only when showing the menu.
"""
_logger.debug('Detected <C-J> key.')
event.current_buffer.complete_state = None
b = event.cli.current_buffer
b.complete_state = None
return key_binding_manager
| import logging
from prompt_toolkit.enums import EditingMode
from prompt_toolkit.keys import Keys
from prompt_toolkit.key_binding.manager import KeyBindingManager
from prompt_toolkit.filters import Condition
from .filters import HasSelectedCompletion
_logger = logging.getLogger(__name__)
def pgcli_bindings(get_vi_mode_enabled, set_vi_mode_enabled, expand_tab):
"""Custom key bindings for pgcli."""
assert callable(get_vi_mode_enabled)
assert callable(set_vi_mode_enabled)
tab_insert_text = ' ' * 4 if expand_tab else '\t'
key_binding_manager = KeyBindingManager(
enable_open_in_editor=True,
enable_system_bindings=True,
enable_auto_suggest_bindings=True,
enable_search=True,
enable_abort_and_exit_bindings=True)
@key_binding_manager.registry.add_binding(Keys.F2)
def _(event):
"""
Enable/Disable SmartCompletion Mode.
"""
_logger.debug('Detected F2 key.')
buf = event.cli.current_buffer
buf.completer.smart_completion = not buf.completer.smart_completion
@key_binding_manager.registry.add_binding(Keys.F3)
def _(event):
"""
Enable/Disable Multiline Mode.
"""
_logger.debug('Detected F3 key.')
buf = event.cli.current_buffer
buf.always_multiline = not buf.always_multiline
@key_binding_manager.registry.add_binding(Keys.F4)
def _(event):
"""
Toggle between Vi and Emacs mode.
"""
_logger.debug('Detected F4 key.')
vi_mode = not get_vi_mode_enabled()
set_vi_mode_enabled(vi_mode)
event.cli.editing_mode = EditingMode.VI if vi_mode else EditingMode.EMACS
@key_binding_manager.registry.add_binding(Keys.Tab)
def _(event):
"""Force autocompletion at cursor on non-empty lines."""
_logger.debug('Detected <Tab> key.')
buff = event.cli.current_buffer
doc = buff.document
line_start = doc.cursor_position + doc.get_start_of_line_position()
line_end = doc.cursor_position + doc.get_end_of_line_position()
current_line = doc.text[line_start:line_end]
if current_line.strip():
if buff.complete_state:
buff.complete_next()
else:
event.cli.start_completion(select_first=True)
else:
buff.insert_text(tab_insert_text, fire_event=False)
@key_binding_manager.registry.add_binding(Keys.ControlSpace)
def _(event):
"""
Initialize autocompletion at cursor.
If the autocompletion menu is not showing, display it with the
appropriate completions for the context.
If the menu is showing, select the next completion.
"""
_logger.debug('Detected <C-Space> key.')
b = event.cli.current_buffer
if b.complete_state:
b.complete_next()
else:
event.cli.start_completion(select_first=False)
@key_binding_manager.registry.add_binding(Keys.ControlJ, filter=HasSelectedCompletion())
def _(event):
"""
Makes the enter key work as the tab key only when showing the menu.
"""
_logger.debug('Detected <C-J> key.')
event.current_buffer.complete_state = None
b = event.cli.current_buffer
b.complete_state = None
return key_binding_manager
| Python | 0 |
e3f7b73ee06301484dbb97209508c5f36a88236f | split Polar/Airfoil data. added more general modification slots for airfoil preprocessing | fusedwind/src/fusedwind/basic_airfoil.py | fusedwind/src/fusedwind/basic_airfoil.py | #!/usr/bin/env python
# encoding: utf-8
from openmdao.main.api import Component, Assembly, VariableTree
from openmdao.main.datatypes.api import Float, Array, Slot, Str, List
# ------- variable trees ---------
class PolarDataVT(VariableTree):
"""airfoil data at a given Reynolds number"""
alpha = Array(units='deg', desc='angles of attack')
cl = Array(desc='corresponding lift coefficients')
cd = Array(desc='corresponding drag coefficients')
cm = Array(desc='corresponding pitching moment coefficients')
class AirfoilDataVT(VariableTree):
Re = Array(desc='Reynolds number')
polars = List(Slot(PolarDataVT), desc='corresponding Polar data')
# ------------------------------------
# ------- base classes ----------
class BasicAirfoilBase(Component):
"""Evaluation of airfoil at angle of attack and Reynolds number"""
# inputs
alpha = Float(iotype='in', units='deg', desc='angle of attack')
Re = Float(iotype='in', desc='Reynolds number')
# outputs
cl = Float(iotype='out', desc='lift coefficient')
cd = Float(iotype='out', desc='drag coefficient')
cm = Float(iotype='out', desc='pitching moment coefficient')
def airfoilForces(airfoil, alpha, Re):
"""convenience method to use BasicAirfoilBase
as a regular python function as opposed to a component"""
airfoil.alpha = alpha
airfoil.Re = Re
airfoil.run()
return airfoil.cl, airfoil.cd, airfoil.cm
class ModifyAirfoilBase(Component):
"""Used for extrapolation, 3D corrections, etc."""
# inputs
afIn = Slot(AirfoilDataVT, iotype='in', desc='tabulated airfoil data')
# outputs
afOut = Slot(AirfoilDataVT, iotype='out', desc='tabulated airfoil data')
def __init__(self):
super(ModifyAirfoilBase, self).__init__()
self.afIn = AirfoilDataVT()
self.afOut = AirfoilDataVT()
class NoModification(ModifyAirfoilBase):
def execute(self):
self.afOut = self.afIn
class ReadAirfoilBase(Component):
"""Read airfoil data from a file"""
# inputs
fileIn = Str(iotype='in', desc='name of file')
# outputs
afOut = Slot(AirfoilDataVT, iotype='out', desc='tabulated airfoil data')
def __init__(self):
super(ReadAirfoilBase, self).__init__()
self.afOut = AirfoilDataVT()
class WriteAirfoilBase(Component):
"""Write airfoil data to a file"""
# inputs
afIn = Slot(AirfoilDataVT, iotype='in', desc='tabulated airfoil data')
fileOut = Str(iotype='in', desc='name of file')
def __init__(self):
super(WriteAirfoilBase, self).__init__()
self.afIn = AirfoilDataVT()
# ---------------------------
# ------- assemblies -------------
class AirfoilPreprocessingAssembly(Assembly):
# for the benefit of the GUI
read = Slot(ReadAirfoilBase)
mod1 = Slot(ModifyAirfoilBase)
mod2 = Slot(ModifyAirfoilBase)
mod3 = Slot(ModifyAirfoilBase)
write = Slot(WriteAirfoilBase)
def configure(self):
self.add('read', ReadAirfoilBase())
self.add('mod1', NoModification())
self.add('mod2', NoModification())
self.add('mod3', NoModification())
self.add('write', WriteAirfoilBase())
self.driver.workflow.add(['read', 'mod1', 'mod2', 'mod3', 'write'])
self.connect('read.afOut', 'mod1.afIn')
self.connect('mod1.afOut', 'mod2.afIn')
self.connect('mod2.afOut', 'mod3.afIn')
self.connect('mod3.afOut', 'write.afIn')
self.create_passthrough('read.fileIn')
self.create_passthrough('write.fileOut')
# ---------------------------------
| #!/usr/bin/env python
# encoding: utf-8
from openmdao.main.api import Component, Assembly, VariableTree
from openmdao.main.datatypes.api import Float, Array, Slot, Str, List
# ------- variable trees ---------
class PolarDataVT(VariableTree):
"""airfoil data at a given Reynolds number"""
alpha = Array(units='deg', desc='angles of attack')
cl = Array(desc='corresponding lift coefficients')
cd = Array(desc='corresponding drag coefficients')
cm = Array(desc='corresponding pitching moment coefficients')
class AirfoilDataVT(VariableTree):
Re = Array(desc='Reynolds number')
polars = List(PolarDataVT, desc='corresponding Polar data')
# ------------------------------------
# ------- base classes ----------
class BasicAirfoilBase(Component):
"""Evaluation of airfoil at angle of attack and Reynolds number"""
# inputs
alpha = Float(iotype='in', units='deg', desc='angle of attack')
Re = Float(iotype='in', desc='Reynolds number')
# outputs
cl = Float(iotype='out', desc='lift coefficient')
cd = Float(iotype='out', desc='drag coefficient')
cm = Float(iotype='out', desc='pitching moment coefficient')
def airfoilForces(airfoil, alpha, Re):
"""convenience method to use BasicAirfoilBase
as a regular python function as opposed to a component"""
airfoil.alpha = alpha
airfoil.Re = Re
airfoil.run()
return airfoil.cl, airfoil.cd, airfoil.cm
class ModifyAirfoilBase(Component):
"""Used for extrapolation, 3D corrections, etc."""
# inputs
afIn = Slot(AirfoilDataVT, iotype='in', desc='tabulated airfoil data')
# outputs
afOut = Slot(AirfoilDataVT, iotype='out', desc='tabulated airfoil data')
def __init__(self):
super(ModifyAirfoilBase, self).__init__()
self.afIn = AirfoilDataVT()
self.afOut = AirfoilDataVT()
class ReadAirfoilBase(Component):
"""Read airfoil data from a file"""
# inputs
fileIn = Str(iotype='in', desc='name of file')
# outputs
afOut = Slot(AirfoilDataVT, iotype='out', desc='tabulated airfoil data')
def __init__(self):
super(ReadAirfoilBase, self).__init__()
self.afOut = AirfoilDataVT()
class WriteAirfoilBase(Component):
"""Write airfoil data to a file"""
# inputs
afIn = Slot(AirfoilDataVT, iotype='in', desc='tabulated airfoil data')
fileOut = Str(iotype='in', desc='name of file')
def __init__(self):
super(WriteAirfoilBase, self).__init__()
self.afIn = AirfoilDataVT()
# ---------------------------
# ------- assemblies -------------
class AirfoilPreprocessingAssembly(Assembly):
# for the benefit of the GUI
read = Slot(ReadAirfoilBase)
correct3D = Slot(ModifyAirfoilBase)
extrapolate = Slot(ModifyAirfoilBase)
write = Slot(WriteAirfoilBase)
def configure(self):
self.add('read', ReadAirfoilBase())
self.add('correct3D', ModifyAirfoilBase())
self.add('extrapolate', ModifyAirfoilBase())
self.add('write', WriteAirfoilBase())
self.driver.workflow.add(['read', 'correct3D', 'extrapolate', 'write'])
self.connect('read.afOut', 'correct3D.afIn')
self.connect('correct3D.afOut', 'extrapolate.afIn')
self.connect('extrapolate.afOut', 'write.afIn')
self.create_passthrough('read.fileIn')
self.create_passthrough('write.fileOut')
# ---------------------------------
| Python | 0 |
ef53285ce0777650dbbadce92ddfdb15e401887a | Add some error tracking hints for sentry | mainapp/functions/geo_functions.py | mainapp/functions/geo_functions.py | import logging
import re
from typing import Optional, Dict, Any, List, Tuple
from django.conf import settings
from geopy import OpenCage, Nominatim, MapBox
from geopy.exc import GeocoderServiceError
from geopy.geocoders.base import Geocoder
from slugify import slugify
logger = logging.getLogger(__name__)
def get_geolocators() -> List[Tuple[str, Geocoder]]:
geolocators = []
if settings.GEOEXTRACT_ENGINE.lower() == "opencage":
if not settings.OPENCAGE_KEY:
raise ValueError(
"OpenCage Data is selected as Geocoder, however no OPENCAGE_KEY is set"
)
geolocators.append(("opencage", OpenCage(settings.OPENCAGE_KEY)))
if settings.MAPBOX_TOKEN:
geolocators.append(("mapbox", MapBox(settings.MAPBOX_TOKEN)))
geolocators.append(
("nominatim", Nominatim(user_agent=slugify(settings.PRODUCT_NAME) + "/1.0"))
)
return geolocators
def geocode(search: str) -> Optional[Dict[str, Any]]:
for name, geolocator in get_geolocators():
try:
if name == "mapbox":
location = geolocator.geocode(search, exactly_one=False)
else:
# noinspection PyArgumentList
location = geolocator.geocode(
search, language=settings.GEOEXTRACT_LANGUAGE, exactly_one=False
)
except GeocoderServiceError as e:
logger.warning(f"Geocoding with {name} failed: {e}")
continue
if location:
return {
"type": "Point",
"coordinates": [location[0].longitude, location[0].latitude],
}
else:
logger.debug(f"No location found for {search}")
return None
# exc_info to help sentry with grouping
logger.error(f"All geocoding attempts failed. Search string was {search}", exc_info=True)
return None
def _format_opencage_location(location) -> str:
components = location.raw["components"]
if "road" in components:
address = components["road"]
if "house_number" in components:
address += " " + components["house_number"]
elif "pedestrian" in components:
address = components["pedestrian"]
else:
address = location.address
return address
def _format_nominatim_location(location) -> str:
if re.match("^\d", location.split(",")[0]):
# Number at the beginning: probably a house number
return location.split(",")[1] + " " + location.split(",")[0]
else:
return location.split(",")[0]
def latlng_to_address(lat, lng) -> str:
search_str = str(lat) + ", " + str(lng)
if settings.GEOEXTRACT_ENGINE.lower() == "opencage":
if not settings.OPENCAGE_KEY:
raise ValueError(
"OpenCage Data is selected as Geocoder, however no OPENCAGE_KEY is set"
)
location = OpenCage(settings.OPENCAGE_KEY).reverse(search_str)
if len(location) > 0:
return _format_opencage_location(location[0])
else:
location = Nominatim(
user_agent=slugify(settings.PRODUCT_NAME) + "/1.0"
).reverse(search_str)
if len(location) > 0:
return _format_nominatim_location(location[0])
return search_str
| import logging
import re
from typing import Optional, Dict, Any, List, Tuple
from django.conf import settings
from geopy import OpenCage, Nominatim, MapBox
from geopy.exc import GeocoderServiceError
from geopy.geocoders.base import Geocoder
from slugify import slugify
logger = logging.getLogger(__name__)
def get_geolocators() -> List[Tuple[str, Geocoder]]:
geolocators = []
if settings.GEOEXTRACT_ENGINE.lower() == "opencage":
if not settings.OPENCAGE_KEY:
raise ValueError(
"OpenCage Data is selected as Geocoder, however no OPENCAGE_KEY is set"
)
geolocators.append(("opencage", OpenCage(settings.OPENCAGE_KEY)))
if settings.MAPBOX_TOKEN:
geolocators.append(("mapbox", MapBox(settings.MAPBOX_TOKEN)))
geolocators.append(
("nominatim", Nominatim(user_agent=slugify(settings.PRODUCT_NAME) + "/1.0"))
)
return geolocators
def geocode(search: str) -> Optional[Dict[str, Any]]:
for name, geolocator in get_geolocators():
try:
if name == "mapbox":
location = geolocator.geocode(search, exactly_one=False)
else:
# noinspection PyArgumentList
location = geolocator.geocode(
search, language=settings.GEOEXTRACT_LANGUAGE, exactly_one=False
)
except GeocoderServiceError as e:
logger.warning(f"Geocoding with {name} failed: {e}")
continue
if location:
return {
"type": "Point",
"coordinates": [location[0].longitude, location[0].latitude],
}
else:
logger.debug(f"No location found for {search}")
return None
logger.error(f"All geocoding attempts failed. Search string was {search}")
return None
def _format_opencage_location(location) -> str:
components = location.raw["components"]
if "road" in components:
address = components["road"]
if "house_number" in components:
address += " " + components["house_number"]
elif "pedestrian" in components:
address = components["pedestrian"]
else:
address = location.address
return address
def _format_nominatim_location(location) -> str:
if re.match("^\d", location.split(",")[0]):
# Number at the beginning: probably a house number
return location.split(",")[1] + " " + location.split(",")[0]
else:
return location.split(",")[0]
def latlng_to_address(lat, lng) -> str:
search_str = str(lat) + ", " + str(lng)
if settings.GEOEXTRACT_ENGINE.lower() == "opencage":
if not settings.OPENCAGE_KEY:
raise ValueError(
"OpenCage Data is selected as Geocoder, however no OPENCAGE_KEY is set"
)
location = OpenCage(settings.OPENCAGE_KEY).reverse(search_str)
if len(location) > 0:
return _format_opencage_location(location[0])
else:
location = Nominatim(
user_agent=slugify(settings.PRODUCT_NAME) + "/1.0"
).reverse(search_str)
if len(location) > 0:
return _format_nominatim_location(location[0])
return search_str
| Python | 0 |
5ff6dffeaf757e360a42e22a9df6d74345a4f418 | Fix panda part imports | malcolm/parts/pandabox/__init__.py | malcolm/parts/pandabox/__init__.py | # Find all subpackages, MethodMeta decorated callables, and YAML files
from malcolm.packageutil import prepare_package
__all__ = prepare_package(globals(), __name__)
del prepare_package
| # Don't import all the parts as they need to be created from
# includes.pandabox.hardware_collection()
from malcolm.parts.pandabox.pandaboxdriverpart import PandABoxDriverPart
| Python | 0.000001 |
370731942a2b5cdc6e0f712f5ee307f1ee45e488 | Improve memory usage | markovify/chain.py | markovify/chain.py | import random
import operator
import bisect
import json
BEGIN = "___BEGIN__"
END = "___END__"
def accumulate(iterable, func=operator.add):
"""
Cumulative calculations. (Summation, by default.)
Via: https://docs.python.org/3/library/itertools.html#itertools.accumulate
"""
it = iter(iterable)
total = next(it)
yield total
for element in it:
total = func(total, element)
yield total
class Chain(object):
"""
A Markov chain representing processes that have both beginnings and ends.
For example: Sentences.
"""
def __init__(self, corpus, state_size, model=None):
"""
`corpus`: A list of lists, where each outer list is a "run"
of the process (e.g., a single sentence), and each inner list
contains the steps (e.g., words) in the run. If you want to simulate
an infinite process, you can come very close by passing just one, very
long run.
`state_size`: An integer indicating the number of items the model
uses to represent its state. For text generation, 2 or 3 are typical.
"""
self.state_size = state_size
self.model = model or self.build(corpus, state_size)
def build(self, corpus, state_size):
"""
Build a Python representation of the Markov model. Returns a dict
of dicts where the keys of the outer dict represent all possible states,
and point to the inner dicts. The inner dicts represent all possibilities
for the "next" item in the chain, along with the count of times it
appears.
"""
if (type(corpus) != list) or (type(corpus[0]) != list):
raise Exception("`corpus` must be list of lists")
# Using a DefaultDict here would be a lot more convenient, however the memory
# usage is far higher.
model = {}
for run in corpus:
items = ([ BEGIN ] * state_size) + run + [ END ]
for i in range(len(run) + 1):
state = tuple(items[i:i+state_size])
follow = items[i+state_size]
if state not in model:
model[state] = {}
if follow not in model[state]:
model[state][follow] = 0
model[state][follow] += 1
return model
def move(self, state):
"""
Given a state, choose the next item at random.
"""
choices, weights = zip(*self.model[state].items())
cumdist = list(accumulate(weights))
r = random.random() * cumdist[-1]
selection = choices[bisect.bisect(cumdist, r)]
return selection
def gen(self, init_state=None):
"""
Starting either with a naive BEGIN state, or the provided `init_state`
(as a tuple), return a generator that will yield successive items
until the chain reaches the END state.
"""
state = init_state or (BEGIN,) * self.state_size
while True:
next_word = self.move(state)
if next_word == END: break
yield next_word
state = tuple(state[1:]) + (next_word,)
def walk(self, init_state=None):
"""
Return a list representing a single run of the Markov model, either
starting with a naive BEGIN state, or the provided `init_state`
(as a tuple).
"""
return list(self.gen(init_state))
def to_json(self):
"""
Dump the model as a JSON object, for loading later.
"""
return json.dumps(list(self.model.items()))
@classmethod
def from_json(cls, json_thing):
"""
Given a JSON object or JSON string that was created by `self.to_json`,
return the corresponding markovify.Chain.
"""
# Python3 compatibility
try:
basestring
except NameError:
basestring = str
if isinstance(json_thing, basestring):
obj = json.loads(json_thing)
else:
obj = json_thing
state_size = len(obj[0][0])
rehydrated = {tuple(item[0]): item[1] for item in obj}
inst = cls(None, state_size, rehydrated)
return inst
| import random
import itertools
import operator
import bisect
import json
from collections import defaultdict
BEGIN = "___BEGIN__"
END = "___END__"
def accumulate(iterable, func=operator.add):
"""
Cumulative calculations. (Summation, by default.)
Via: https://docs.python.org/3/library/itertools.html#itertools.accumulate
"""
it = iter(iterable)
total = next(it)
yield total
for element in it:
total = func(total, element)
yield total
class Chain(object):
"""
A Markov chain representing processes that have both beginnings and ends.
For example: Sentences.
"""
def __init__(self, corpus, state_size, model=None):
"""
`corpus`: A list of lists, where each outer list is a "run"
of the process (e.g., a single sentence), and each inner list
contains the steps (e.g., words) in the run. If you want to simulate
an infinite process, you can come very close by passing just one, very
long run.
`state_size`: An integer indicating the number of items the model
uses to represent its state. For text generation, 2 or 3 are typical.
"""
self.state_size = state_size
self.model = model or self.build(corpus, state_size)
def build(self, corpus, state_size):
"""
Build a Python representation of the Markov model. Returns a dict
of dicts where the keys of the outer dict represent all possible states,
and point to the inner dicts. The inner dicts represent all possibilities
for the "next" item in the chain, along with the count of times it
appears.
"""
if (type(corpus) != list) or (type(corpus[0]) != list):
raise Exception("`corpus` must be list of lists")
model = defaultdict(lambda: defaultdict(int))
for run in corpus:
items = ([ BEGIN ] * state_size) + run + [ END ]
for i in range(len(run) + 1):
state = tuple(items[i:i+state_size])
follow = items[i+state_size]
model[state][follow] += 1
return model
def move(self, state):
"""
Given a state, choose the next item at random.
"""
choices, weights = zip(*self.model[state].items())
cumdist = list(accumulate(weights))
r = random.random() * cumdist[-1]
selection = choices[bisect.bisect(cumdist, r)]
return selection
def gen(self, init_state=None):
"""
Starting either with a naive BEGIN state, or the provided `init_state`
(as a tuple), return a generator that will yield successive items
until the chain reaches the END state.
"""
state = init_state or (BEGIN,) * self.state_size
while True:
next_word = self.move(state)
if next_word == END: break
yield next_word
state = tuple(state[1:]) + (next_word,)
def walk(self, init_state=None):
"""
Return a list representing a single run of the Markov model, either
starting with a naive BEGIN state, or the provided `init_state`
(as a tuple).
"""
return list(self.gen(init_state))
def to_json(self):
"""
Dump the model as a JSON object, for loading later.
"""
return json.dumps(list(self.model.items()))
@classmethod
def from_json(cls, json_thing):
"""
Given a JSON object or JSON string that was created by `self.to_json`,
return the corresponding markovify.Chain.
"""
# Python3 compatibility
try:
basestring
except NameError:
basestring = str
if isinstance(json_thing, basestring):
obj = json.loads(json_thing)
else:
obj = json_thing
state_size = len(obj[0][0])
rehydrated = {tuple(item[0]): item[1] for item in obj}
inst = cls(None, state_size, rehydrated)
return inst
| Python | 0.000228 |
d2fdf0d91f41350347ba460e33cc04aa1e59eb96 | Call the run script from the analysis driver | analysis_driver.py | analysis_driver.py | #! /usr/bin/env python
# Brokers communication between Dakota and SWASH through files.
#
# Arguments:
# $1 is 'params.in' from Dakota
# $2 is 'results.out' returned to Dakota
import sys
import os
import re
import shutil
from subprocess import call
import numpy as np
def read(output_file, variable=None):
"""Read data from a MATfile. Returns a numpy array, or None on an error."""
from scipy.io import loadmat
try:
mat = loadmat(output_file)
var = mat[variable]
except IOError:
return None
else:
return(var)
def write(results_file, array, labels):
"""Write a Dakota results file from an input array."""
try:
fp = open(results_file, 'w')
for i in range(len(array)):
fp.write(str(array[i]) + '\t' + labels[i] + '\n')
except IOError:
raise
finally:
fp.close()
def get_labels(params_file):
"""Extract labels from a Dakota parameters file."""
labels = []
try:
fp = open(params_file, 'r')
for line in fp:
if re.search('ASV_', line):
labels.append(''.join(re.findall(':(\S+)', line)))
except IOError:
raise
finally:
fp.close()
return(labels)
if __name__ == '__main__':
# Files and directories.
start_dir = os.path.dirname(os.path.realpath(__file__))
input_template = 'INPUT.template'
input_file = 'INPUT'
output_file = 'bot07.mat'
output_file_var = 'Botlev'
run_script = 'run_swash.sh'
# Use the parsing utility `dprepro` (from $DAKOTA_DIR/bin) to
# incorporate the parameters from Dakota into the SWASH input
# template, creating a new SWASH input file.
shutil.copy(os.path.join(start_dir, input_template), os.curdir)
call(['dprepro', sys.argv[1], input_template, input_file])
# Call SWASH with a script containing PBS commands.
job_name = 'SWASH-Dakota' + os.path.splitext(os.getcwd())[1]
call(['qsub', '-N', job_name, run_script])
# Calculate the mean and standard deviation of the 'Botlev' output
# values for the simulation. Write the output to a Dakota results
# file.
labels = get_labels(sys.argv[1])
series = read(output_file, output_file_var)
if series is not None:
m_series = [np.mean(series), np.std(series)]
else:
m_series = [0, 0]
write(sys.argv[2], m_series, labels)
| #! /usr/bin/env python
# Brokers communication between Dakota and SWASH through files.
#
# Arguments:
# $1 is 'params.in' from Dakota
# $2 is 'results.out' returned to Dakota
import sys
import os
import re
import shutil
from subprocess import call
import numpy as np
def read(output_file, variable=None):
"""Read data from a MATfile. Returns a numpy array, or None on an error."""
from scipy.io import loadmat
try:
mat = loadmat(output_file)
var = mat[variable]
except IOError:
return None
else:
return(var)
def write(results_file, array, labels):
"""Write a Dakota results file from an input array."""
try:
fp = open(results_file, 'w')
for i in range(len(array)):
fp.write(str(array[i]) + '\t' + labels[i] + '\n')
except IOError:
raise
finally:
fp.close()
def get_labels(params_file):
"""Extract labels from a Dakota parameters file."""
labels = []
try:
fp = open(params_file, 'r')
for line in fp:
if re.search('ASV_', line):
labels.append(''.join(re.findall(':(\S+)', line)))
except IOError:
raise
finally:
fp.close()
return(labels)
if __name__ == '__main__':
# Files and directories.
start_dir = os.path.dirname(os.path.realpath(__file__))
input_template = 'INPUT.template'
input_file = 'INPUT'
output_file = 'bot07.mat'
output_file_var = 'Botlev'
# Use the parsing utility `dprepro` (from $DAKOTA_DIR/bin) to
# incorporate the parameters from Dakota into the SWASH input
# template, creating a new SWASH input file.
shutil.copy(os.path.join(start_dir, input_template), os.curdir)
call(['dprepro', sys.argv[1], input_template, input_file])
# Call SWASH with the new input file.
call(['swash_mpi.exe', input_file])
# Calculate the mean and standard deviation of the 'Botlev' output
# values for the simulation. Write the output to a Dakota results
# file.
labels = get_labels(sys.argv[1])
series = read(output_file, output_file_var)
if series is not None:
m_series = [np.mean(series), np.std(series)]
else:
m_series = [0, 0]
write(sys.argv[2], m_series, labels)
| Python | 0 |
9e95522c847b12a19cff54737a44f569fe2cf6b7 | Add method for getting Candidacy.party_name | opencivicdata/elections/admin/candidacy.py | opencivicdata/elections/admin/candidacy.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Custom administration panels for Candidacy-related models.
"""
from django import VERSION as django_version
from django.contrib import admin
from opencivicdata.core.admin import base
from .. import models
class CandidacySourceInline(base.LinkInline):
"""
Custom inline administrative panel for the CandidacySource model.
"""
model = models.CandidacySource
@admin.register(models.Candidacy)
class CandidacyAdmin(base.ModelAdmin):
"""
Custom inline administrative panel for the Candidacy model.
"""
raw_id_fields = (
'person',
'contest',
'top_ticket_candidacy',
)
fields = (
'candidate_name',
'post',
'filed_date',
'is_incumbent',
'registration_status',
'party',
) + raw_id_fields
list_display = (
'candidate_name',
'contest',
'is_incumbent',
'registration_status',
'id',
'party_name',
'updated_at',
)
search_fields = ('candidate_name', 'contest__name', 'post__label', )
list_filter = (
'party__name',
'is_incumbent',
'registration_status',
'updated_at',
)
# date_hierarchy across relations was added to django 1.11
if django_version[0] >= 1 and django_version[1] >= 11:
date_hierarchy = 'contest__election__date'
inlines = [
CandidacySourceInline,
]
def party_name(self, obj):
"""
Return the name of the Party associated with the Candidacy.
"""
if obj.party:
name = obj.party.name
else:
name = None
return name
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Custom administration panels for Candidacy-related models.
"""
from django import VERSION as django_version
from django.contrib import admin
from opencivicdata.core.admin import base
from .. import models
class CandidacySourceInline(base.LinkInline):
"""
Custom inline administrative panel for the CandidacySource model.
"""
model = models.CandidacySource
@admin.register(models.Candidacy)
class CandidacyAdmin(base.ModelAdmin):
"""
Custom inline administrative panel for the Candidacy model.
"""
raw_id_fields = (
'person',
'contest',
'top_ticket_candidacy',
)
fields = (
'candidate_name',
'post',
'filed_date',
'is_incumbent',
'registration_status',
'party',
) + raw_id_fields
list_display = (
'candidate_name',
'contest',
'is_incumbent',
'registration_status',
'id',
'party__name',
'updated_at',
)
search_fields = ('candidate_name', 'contest__name', 'post__label', )
list_filter = (
'party__name',
'is_incumbent',
'registration_status',
'updated_at',
)
# date_hierarchy across relations was added to django 1.11
if django_version[0] >= 1 and django_version[1] >= 11:
date_hierarchy = 'contest__election__start_time'
inlines = [
CandidacySourceInline,
]
| Python | 0.000001 |
ce266cec800fd921f9b4de82fd9f9666ed2df053 | Fix another shit | modules/gy-271/core/get.py | modules/gy-271/core/get.py | # Distributed with a free-will license.
# Use it any way you want, profit or free, provided it fits in the licenses of its associated works.
# HMC5883
# This code is designed to work with the HMC5883_I2CS I2C Mini Module available from ControlEverything.com.
# https://www.controleverything.com/content/Compass?sku=HMC5883_I2CS#tabs-0-product_tabset-2
# Modified by Broda Noel (@BrodaNoel in all social networks)
import smbus
import time
import sys
# Get I2C bus
bus = smbus.SMBus(1)
address = 0x1E
# HMC5883 address, 0x1E(30)
# Select configuration register A, 0x00(00)
# 0x60(96) Normal measurement configuration, Data output rate = 0.75 Hz
bus.write_byte_data(address, 0x00, 0x60)
# HMC5883 address, 0x1E(30)
# Select mode register, 0x02(02)
# 0x00(00) Continuous measurement mode
bus.write_byte_data(address, 0x02, 0x00)
time.sleep(0.5)
# HMC5883 address, 0x1E(30)
# Read data back from 0x03(03), 6 bytes
# X-Axis MSB, X-Axis LSB, Z-Axis MSB, Z-Axis LSB, Y-Axis MSB, Y-Axis LSB
data = bus.read_i2c_block_data(address, 0x03, 6)
# Convert the data
xMag = data[0] * 256 + data[1]
if xMag > 32767 :
xMag -= 65536
zMag = data[2] * 256 + data[3]
if zMag > 32767 :
zMag -= 65536
yMag = data[4] * 256 + data[5]
if yMag > 32767 :
yMag -= 65536
# Output data to screen
sys.stdout.write('{ "x": ' + str(xMag) + ', "y": ' + str(yMag) + ', "z": ' + str(zMag) + ' }')
| # Distributed with a free-will license.
# Use it any way you want, profit or free, provided it fits in the licenses of its associated works.
# HMC5883
# This code is designed to work with the HMC5883_I2CS I2C Mini Module available from ControlEverything.com.
# https://www.controleverything.com/content/Compass?sku=HMC5883_I2CS#tabs-0-product_tabset-2
# Modified by Broda Noel (@BrodaNoel in all social networks)
import smbus
import time
# Get I2C bus
bus = smbus.SMBus(1)
address = 0x1E
# HMC5883 address, 0x1E(30)
# Select configuration register A, 0x00(00)
# 0x60(96) Normal measurement configuration, Data output rate = 0.75 Hz
bus.write_byte_data(address, 0x00, 0x60)
# HMC5883 address, 0x1E(30)
# Select mode register, 0x02(02)
# 0x00(00) Continuous measurement mode
bus.write_byte_data(address, 0x02, 0x00)
time.sleep(0.5)
# HMC5883 address, 0x1E(30)
# Read data back from 0x03(03), 6 bytes
# X-Axis MSB, X-Axis LSB, Z-Axis MSB, Z-Axis LSB, Y-Axis MSB, Y-Axis LSB
data = bus.read_i2c_block_data(address, 0x03, 6)
# Convert the data
xMag = data[0] * 256 + data[1]
if xMag > 32767 :
xMag -= 65536
zMag = data[2] * 256 + data[3]
if zMag > 32767 :
zMag -= 65536
yMag = data[4] * 256 + data[5]
if yMag > 32767 :
yMag -= 65536
# Output data to screen
sys.stdout.write('{ "x": ' + str(xMag) + ', "y": ' + str(yMag) + ', "z": ' + str(zMag) + ' }')
| Python | 0.000005 |
4fccaeefd67c3c736861870a8fe711a934c96e6d | Add some documentation | mythril/laser/ethereum/transaction.py | mythril/laser/ethereum/transaction.py | import logging
from mythril.laser.ethereum.state import GlobalState, Environment, CalldataType
from mythril.laser.ethereum.cfg import Node, Edge, JumpType
from z3 import BitVec
class CallTransaction:
""" Represents a call value transaction """
def __init__(self, callee_address):
"""
Constructor for Call transaction, sets up all symbolic parameters
:param callee_address: Address of the contract that will be called
"""
self.callee_address = callee_address
self.caller = BitVec("caller", 256)
self.gas_price = BitVec("gasprice", 256)
self.call_value = BitVec("callvalue", 256)
self.origin = BitVec("origin", 256)
pass
def run(self, open_world_states, evm):
""" Runs this transaction on the evm starting from the open world states"""
for open_world_state in open_world_states:
# Initialize the execution environment
environment = Environment(
open_world_state[self.callee_address],
self.caller,
[],
self.gas_price,
self.call_value,
self.origin,
calldata_type=CalldataType.SYMBOLIC,
)
new_node = Node(environment.active_account.contract_name)
evm.instructions_covered = [False for _ in environment.code.instruction_list]
evm.nodes[new_node.uid] = new_node
if open_world_state.node:
evm.edges.append(Edge(open_world_state.node.uid, new_node.uid, edge_type=JumpType.Transaction, condition=None))
global_state = GlobalState(open_world_state, environment, new_node)
new_node.states.append(global_state)
evm.work_list.append(global_state)
evm.exec()
logging.info("Execution complete")
logging.info("Achieved {0:.3g}% coverage".format(evm.coverage))
| import logging
from mythril.laser.ethereum.state import GlobalState, Environment, CalldataType
from mythril.laser.ethereum.cfg import Node, Edge, JumpType
from z3 import BitVec
class CallTransaction:
def __init__(self, callee_address):
self.callee_address = callee_address
self.caller = BitVec("caller", 256)
self.gas_price = BitVec("gasprice", 256)
self.call_value = BitVec("callvalue", 256)
self.origin = BitVec("origin", 256)
pass
def run(self, open_world_states, evm):
for open_world_state in open_world_states:
# Initialize the execution environment
environment = Environment(
open_world_state[self.callee_address],
self.caller,
[],
self.gas_price,
self.call_value,
self.origin,
calldata_type=CalldataType.SYMBOLIC,
)
new_node = Node(environment.active_account.contract_name)
evm.instructions_covered = [False for _ in environment.code.instruction_list]
evm.nodes[new_node.uid] = new_node
if open_world_state.node:
evm.edges.append(Edge(open_world_state.node.uid, new_node.uid, edge_type=JumpType.Transaction, condition=None))
global_state = GlobalState(open_world_state, environment, new_node)
new_node.states.append(global_state)
evm.work_list.append(global_state)
evm.exec()
logging.info("Execution complete")
logging.info("Achieved {0:.3g}% coverage".format(evm.coverage))
| Python | 0.000001 |
9f3bf2756debb4534ddcbf538577044e2bae6528 | remove unused import | memopol2/search.py | memopol2/search.py | # -*- coding: utf-8 -*-
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
import logging
from django.db.models import signals
from django.conf import settings
from whoosh import fields
from whoosh.filedb.filestore import FileStorage
log = logging.getLogger(__name__)
WHOOSH_SCHEMA = fields.Schema(title=fields.TEXT(stored=True),
content=fields.TEXT,
url=fields.ID(stored=True, unique=True))
def create_index(sender=None, **kwargs):
if not os.path.exists(settings.WHOOSH_INDEX):
os.mkdir(settings.WHOOSH_INDEX)
storage = FileStorage(settings.WHOOSH_INDEX)
storage.create_index(WHOOSH_SCHEMA, indexname='memopol')
signals.post_syncdb.connect(create_index)
def update_index(sender, instance, created, **kwargs):
try:
url = unicode(instance.get_absolute_url())
except Exception, e:
log.critical('Cant resolve url. Content %r not indexed' % instance)
return
content = getattr(instance, 'content', None)
if content is None:
content = unicode(instance)
elif callable(content):
content = content()
storage = FileStorage(settings.WHOOSH_INDEX)
ix = storage.open_index(indexname='memopol')
writer = ix.writer()
if created:
writer.add_document(title=unicode(instance), content=content,
url=url)
writer.commit()
else:
writer.update_document(title=unicode(instance), content=content,
url=url)
writer.commit()
_searchables = []
def searchable(klass):
if hasattr(klass, 'get_absolute_url'):
signals.post_save.connect(update_index, sender=klass)
_searchables.append(klass)
if not hasattr(klass, 'content'):
log.warn('%s is declared as searchable but has no content attribute' % klass)
else:
log.warn('%s is declared as searchable but has no get_absolute_url' % klass)
return klass
def update():
from meps import models
from mps import models
from reps import models
create_index()
for klass in _searchables:
for i in klass.objects.all():
update_index(None, i, created=False)
if __name__ == '__main__':
update()
| # -*- coding: utf-8 -*-
import os
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
import logging
from django.db.models import signals
from django.conf import settings
from whoosh import fields, index
from whoosh.filedb.filestore import FileStorage
log = logging.getLogger(__name__)
WHOOSH_SCHEMA = fields.Schema(title=fields.TEXT(stored=True),
content=fields.TEXT,
url=fields.ID(stored=True, unique=True))
def create_index(sender=None, **kwargs):
if not os.path.exists(settings.WHOOSH_INDEX):
os.mkdir(settings.WHOOSH_INDEX)
storage = FileStorage(settings.WHOOSH_INDEX)
storage.create_index(WHOOSH_SCHEMA, indexname='memopol')
signals.post_syncdb.connect(create_index)
def update_index(sender, instance, created, **kwargs):
try:
url = unicode(instance.get_absolute_url())
except Exception, e:
log.critical('Cant resolve url. Content %r not indexed' % instance)
return
content = getattr(instance, 'content', None)
if content is None:
content = unicode(instance)
elif callable(content):
content = content()
storage = FileStorage(settings.WHOOSH_INDEX)
ix = storage.open_index(indexname='memopol')
writer = ix.writer()
if created:
writer.add_document(title=unicode(instance), content=content,
url=url)
writer.commit()
else:
writer.update_document(title=unicode(instance), content=content,
url=url)
writer.commit()
_searchables = []
def searchable(klass):
if hasattr(klass, 'get_absolute_url'):
signals.post_save.connect(update_index, sender=klass)
_searchables.append(klass)
if not hasattr(klass, 'content'):
log.warn('%s is declared as searchable but has no content attribute' % klass)
else:
log.warn('%s is declared as searchable but has no get_absolute_url' % klass)
return klass
def update():
from meps import models
from mps import models
from reps import models
create_index()
for klass in _searchables:
for i in klass.objects.all():
update_index(None, i, created=False)
if __name__ == '__main__':
update()
| Python | 0 |
e995a4725873f0587300aa1d0df6d05c7eaf412c | Move package folder deletion to start of execution | matador/commands/deploy_package.py | matador/commands/deploy_package.py | #!/usr/bin/env python
from .command import Command
from .deploy_ticket import execute_ticket
from matador.session import Session
import subprocess
import os
import shutil
import yaml
from importlib.machinery import SourceFileLoader
class ActionPackage(Command):
def _add_arguments(self, parser):
parser.prog = 'matador deploy-package'
parser.add_argument(
'-e', '--environment',
type=str,
required=True,
help='Agresso environment name')
parser.add_argument(
'-p', '--package',
type=str,
required=True,
help='Package name')
parser.add_argument(
'-c', '--commit',
type=str,
default='none',
help='Commit or tag ID')
@staticmethod
def _checkout_package(package, commit):
proj_folder = Session.project_folder
repo_folder = Session.matador_repository_folder
package_folder = os.path.join(
Session.matador_packages_folder, package)
shutil.rmtree(package_folder, ignore_errors=True)
Session.update_repository()
if commit == 'none':
commit = subprocess.check_output(
['git', '-C', proj_folder, 'rev-parse', 'HEAD'],
stderr=subprocess.STDOUT).decode('utf-8').strip('\n')
subprocess.run([
'git', '-C', repo_folder, 'checkout', commit],
stderr=subprocess.STDOUT,
stdout=open(os.devnull, 'w'),
check=True)
src = os.path.join(repo_folder, 'deploy', 'packages', package)
shutil.copytree(src, package_folder)
def _execute(self):
Session.set_environment(self.args.environment)
self._checkout_package(self.args.package, self.args.commit)
class DeployPackage(ActionPackage):
def _execute(self):
super(DeployPackage, self)._execute()
package_folder = os.path.join(
Session.matador_packages_folder, self.args.package)
Session.deployment_folder = package_folder
ticketsFile = os.path.join(package_folder, 'tickets.yml')
tickets = yaml.load(open(ticketsFile, 'r'))
for ticket in tickets:
execute_ticket(str(ticket), 'deploy', self.args.commit, True)
class RemovePackage(ActionPackage):
def _execute(self):
super(RemovePackage, self)._execute()
package_folder = os.path.join(
Session.matador_packages_folder, self.args.package)
Session.deployment_folder = package_folder
sourceFile = os.path.join(package_folder, 'remove.py')
SourceFileLoader('remove', sourceFile).load_module()
| #!/usr/bin/env python
from .command import Command
from .deploy_ticket import execute_ticket
from matador.session import Session
import subprocess
import os
import shutil
import yaml
from importlib.machinery import SourceFileLoader
class ActionPackage(Command):
def _add_arguments(self, parser):
parser.prog = 'matador deploy-package'
parser.add_argument(
'-e', '--environment',
type=str,
required=True,
help='Agresso environment name')
parser.add_argument(
'-p', '--package',
type=str,
required=True,
help='Package name')
parser.add_argument(
'-c', '--commit',
type=str,
default='none',
help='Commit or tag ID')
@staticmethod
def _checkout_package(package, commit):
proj_folder = Session.project_folder
repo_folder = Session.matador_repository_folder
package_folder = os.path.join(
Session.matador_packages_folder, package)
Session.update_repository()
if commit == 'none':
commit = subprocess.check_output(
['git', '-C', proj_folder, 'rev-parse', 'HEAD'],
stderr=subprocess.STDOUT).decode('utf-8').strip('\n')
subprocess.run([
'git', '-C', repo_folder, 'checkout', commit],
stderr=subprocess.STDOUT,
stdout=open(os.devnull, 'w'),
check=True)
src = os.path.join(repo_folder, 'deploy', 'packages', package)
shutil.copytree(src, package_folder)
def _execute(self):
Session.set_environment(self.args.environment)
self._checkout_package(self.args.package, self.args.commit)
class DeployPackage(ActionPackage):
def _execute(self):
super(DeployPackage, self)._execute()
package_folder = os.path.join(
Session.matador_packages_folder, self.args.package)
Session.deployment_folder = package_folder
ticketsFile = os.path.join(package_folder, 'tickets.yml')
try:
tickets = yaml.load(open(ticketsFile, 'r'))
for ticket in tickets:
execute_ticket(str(ticket), 'deploy', self.args.commit, True)
finally:
shutil.rmtree(package_folder)
class RemovePackage(ActionPackage):
def _execute(self):
super(RemovePackage, self)._execute()
package_folder = os.path.join(
Session.matador_packages_folder, self.args.package)
Session.deployment_folder = package_folder
sourceFile = os.path.join(package_folder, 'remove.py')
try:
SourceFileLoader('remove', sourceFile).load_module()
finally:
shutil.rmtree(package_folder)
| Python | 0.000001 |
534437a0d55fccae50a86a95182a0460d07c64da | Increment version number. | mopidy_pandora/__init__.py | mopidy_pandora/__init__.py | from __future__ import absolute_import, division, print_function, unicode_literals
import os
from mopidy import config, ext
__version__ = '0.2.1'
class Extension(ext.Extension):
dist_name = 'Mopidy-Pandora'
ext_name = 'pandora'
version = __version__
def get_default_config(self):
conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf')
return config.read(conf_file)
def get_config_schema(self):
from pandora import BaseAPIClient
schema = super(Extension, self).get_config_schema()
schema['api_host'] = config.String()
schema['partner_encryption_key'] = config.String()
schema['partner_decryption_key'] = config.String()
schema['partner_username'] = config.String()
schema['partner_password'] = config.String()
schema['partner_device'] = config.String()
schema['username'] = config.String()
schema['password'] = config.Secret()
schema['preferred_audio_quality'] = config.String(choices=[BaseAPIClient.LOW_AUDIO_QUALITY,
BaseAPIClient.MED_AUDIO_QUALITY,
BaseAPIClient.HIGH_AUDIO_QUALITY])
schema['sort_order'] = config.String(choices=['date', 'A-Z', 'a-z'])
schema['auto_setup'] = config.Boolean()
schema['auto_set_repeat'] = config.Deprecated()
schema['cache_time_to_live'] = config.Integer(minimum=0)
schema['event_support_enabled'] = config.Boolean()
schema['double_click_interval'] = config.String()
schema['on_pause_resume_click'] = config.String(choices=['thumbs_up',
'thumbs_down',
'sleep',
'add_artist_bookmark',
'add_song_bookmark',
'delete_station'])
schema['on_pause_next_click'] = config.String(choices=['thumbs_up',
'thumbs_down',
'sleep',
'add_artist_bookmark',
'add_song_bookmark',
'delete_station'])
schema['on_pause_previous_click'] = config.String(choices=['thumbs_up',
'thumbs_down',
'sleep',
'add_artist_bookmark',
'add_song_bookmark',
'delete_station'])
schema['on_pause_resume_pause_click'] = config.String(choices=['thumbs_up',
'thumbs_down',
'sleep',
'add_artist_bookmark',
'add_song_bookmark',
'delete_station'])
return schema
def setup(self, registry):
from .backend import PandoraBackend
from .frontend import EventMonitorFrontend, PandoraFrontend
registry.add('backend', PandoraBackend)
registry.add('frontend', PandoraFrontend)
registry.add('frontend', EventMonitorFrontend)
| from __future__ import absolute_import, division, print_function, unicode_literals
import os
from mopidy import config, ext
__version__ = '0.2.0'
class Extension(ext.Extension):
dist_name = 'Mopidy-Pandora'
ext_name = 'pandora'
version = __version__
def get_default_config(self):
conf_file = os.path.join(os.path.dirname(__file__), 'ext.conf')
return config.read(conf_file)
def get_config_schema(self):
from pandora import BaseAPIClient
schema = super(Extension, self).get_config_schema()
schema['api_host'] = config.String()
schema['partner_encryption_key'] = config.String()
schema['partner_decryption_key'] = config.String()
schema['partner_username'] = config.String()
schema['partner_password'] = config.String()
schema['partner_device'] = config.String()
schema['username'] = config.String()
schema['password'] = config.Secret()
schema['preferred_audio_quality'] = config.String(choices=[BaseAPIClient.LOW_AUDIO_QUALITY,
BaseAPIClient.MED_AUDIO_QUALITY,
BaseAPIClient.HIGH_AUDIO_QUALITY])
schema['sort_order'] = config.String(choices=['date', 'A-Z', 'a-z'])
schema['auto_setup'] = config.Boolean()
schema['auto_set_repeat'] = config.Deprecated()
schema['cache_time_to_live'] = config.Integer(minimum=0)
schema['event_support_enabled'] = config.Boolean()
schema['double_click_interval'] = config.String()
schema['on_pause_resume_click'] = config.String(choices=['thumbs_up',
'thumbs_down',
'sleep',
'add_artist_bookmark',
'add_song_bookmark',
'delete_station'])
schema['on_pause_next_click'] = config.String(choices=['thumbs_up',
'thumbs_down',
'sleep',
'add_artist_bookmark',
'add_song_bookmark',
'delete_station'])
schema['on_pause_previous_click'] = config.String(choices=['thumbs_up',
'thumbs_down',
'sleep',
'add_artist_bookmark',
'add_song_bookmark',
'delete_station'])
schema['on_pause_resume_pause_click'] = config.String(choices=['thumbs_up',
'thumbs_down',
'sleep',
'add_artist_bookmark',
'add_song_bookmark',
'delete_station'])
return schema
def setup(self, registry):
from .backend import PandoraBackend
from .frontend import EventMonitorFrontend, PandoraFrontend
registry.add('backend', PandoraBackend)
registry.add('frontend', PandoraFrontend)
registry.add('frontend', EventMonitorFrontend)
| Python | 0.000001 |
fa67f0326f9f57bc01b023a266e1f896da617ff7 | make send_mail mockable by importing the module | osmaxx-py/excerptconverter/converter_helper.py | osmaxx-py/excerptconverter/converter_helper.py | from django.contrib import messages
from django.core import mail
from django.utils.translation import ugettext_lazy as _
import stored_messages
from osmaxx.excerptexport import models
def module_converter_configuration(name, export_formats, export_options):
"""
:param export_formats example:
{
'txt': {
'name': 'Text',
'file_extension': 'txt',
'mime_type': 'text/plain'
},
'markdown': {
'name': 'Markdown',
'file_extension': 'md',
'mime_type': 'text/markdown'
}
}
:param export_options example:
{
'image_resolution': {
'label': 'Resolution',
'type': 'number',
'default': '500'
},
'quality': {
'label': 'Quality',
'type': 'number',
'default': '10'
}
}
"""
return {
'name': name,
'formats': export_formats,
'options': export_options
}
# functions using database (extraction_order) must be instance methods of a class
# -> free functions will not work: database connection error
class ConverterHelper:
def __init__(self, extraction_order):
self.extraction_order = extraction_order
self.user = extraction_order.orderer
def file_conversion_finished(self):
if self.extraction_order.output_files.count() >= len(self.extraction_order.extraction_formats):
self.inform_user(
messages.SUCCESS,
_('The extraction of the order "{order_id}" has been finished.').format(
order_id=self.extraction_order.id,
),
email=True
)
self.extraction_order.state = models.ExtractionOrderState.FINISHED
self.extraction_order.save()
def inform_user(self, message_type, message_text, email=True):
stored_messages.api.add_message_for(
users=[self.user],
level=message_type,
message_text=message_text
)
if email:
if hasattr(self.user, 'email'):
mail.send_mail(
'[OSMAXX] '+message_text,
message_text,
'no-reply@osmaxx.hsr.ch',
[self.user.email]
)
else:
self.inform_user(
messages.WARNING,
_("There is no email address assigned to your account. "
"You won't be notified by email on process finish!"),
email=False
)
| from django.contrib import messages
from django.core.mail import send_mail
from django.utils.translation import ugettext_lazy as _
import stored_messages
from osmaxx.excerptexport import models
def module_converter_configuration(name, export_formats, export_options):
"""
:param export_formats example:
{
'txt': {
'name': 'Text',
'file_extension': 'txt',
'mime_type': 'text/plain'
},
'markdown': {
'name': 'Markdown',
'file_extension': 'md',
'mime_type': 'text/markdown'
}
}
:param export_options example:
{
'image_resolution': {
'label': 'Resolution',
'type': 'number',
'default': '500'
},
'quality': {
'label': 'Quality',
'type': 'number',
'default': '10'
}
}
"""
return {
'name': name,
'formats': export_formats,
'options': export_options
}
# functions using database (extraction_order) must be instance methods of a class
# -> free functions will not work: database connection error
class ConverterHelper:
def __init__(self, extraction_order):
self.extraction_order = extraction_order
self.user = extraction_order.orderer
def file_conversion_finished(self):
if self.extraction_order.output_files.count() >= len(self.extraction_order.extraction_formats):
self.inform_user(
messages.SUCCESS,
_('The extraction of the order "{order_id}" has been finished.').format(
order_id=self.extraction_order.id,
),
email=True
)
self.extraction_order.state = models.ExtractionOrderState.FINISHED
self.extraction_order.save()
def inform_user(self, message_type, message_text, email=True):
stored_messages.api.add_message_for(
users=[self.user],
level=message_type,
message_text=message_text
)
if email:
if hasattr(self.user, 'email'):
send_mail(
'[OSMAXX] '+message_text,
message_text,
'no-reply@osmaxx.hsr.ch',
[self.user.email]
)
else:
self.inform_user(
messages.WARNING,
_("There is no email address assigned to your account. "
"You won't be notified by email on process finish!"),
email=False
)
| Python | 0 |
bac0b5e09fc172a991fb6b7172025c698c1a23d9 | Add validation that type is type of Rule into MultipleRulesGrammar | grammpy/Grammars/MultipleRulesGrammar.py | grammpy/Grammars/MultipleRulesGrammar.py | #!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 15.08.2017 14:40
:Licence GNUv3
Part of grammpy
"""
import inspect
from grammpy.exceptions import NotRuleException
from .StringGrammar import StringGrammar
from ..HashContainer import HashContainer
from ..IsMethodsRuleExtension import IsMethodsRuleExtension as Rule, IsMethodsRuleExtension
class MultipleRulesGrammar(StringGrammar):
def __init__(self,
terminals=None,
nonterminals=None,
rules=None,
start_symbol=None):
super().__init__(terminals, nonterminals, rules, start_symbol)
self._count = 0
def _create_class(self, rule):
name = 'SplitRules' + str(self._count)
self._count += 1
return type(name,
(Rule,),
{"rule": rule})
def _transform_rules(self, rules):
rules = HashContainer.to_iterable(rules)
r = []
for i in rules:
if not inspect.isclass(i) or not issubclass(i, IsMethodsRuleExtension):
raise NotRuleException(i)
if i.is_valid(self) and i.count() > 1:
for rule in i.rules:
r.append(self._create_class(rule))
else:
r.append(i)
return r
def get_rule(self, rules=None):
if rules is None:
return super().get_rule()
results = super().get_rule(self._transform_rules(rules))
if not HashContainer.is_iterable(rules):
return results[0]
return results
def have_rule(self, rules):
return super().have_rule(self._transform_rules(rules))
def remove_rule(self, rules=None):
if rules is None:
return super().remove_rule()
super().remove_rule(self._transform_rules(rules))
def add_rule(self, rules):
super().add_rule(self._transform_rules(rules))
| #!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 15.08.2017 14:40
:Licence GNUv3
Part of grammpy
"""
from .StringGrammar import StringGrammar
from ..HashContainer import HashContainer
from ..IsMethodsRuleExtension import IsMethodsRuleExtension as Rule
class MultipleRulesGrammar(StringGrammar):
def __init__(self,
terminals=None,
nonterminals=None,
rules=None,
start_symbol=None):
super().__init__(terminals, nonterminals, rules, start_symbol)
self._count = 0
def _create_class(self, rule):
name = 'SplitRules' + str(self._count)
self._count += 1
return type(name,
(Rule,),
{"rule": rule})
def _transform_rules(self, rules):
rules = HashContainer.to_iterable(rules)
r = []
for i in rules:
if i.is_valid(self) and i.count() > 1:
for rule in i.rules:
r.append(self._create_class(rule))
else:
r.append(i)
return rules
def get_rule(self, rules=None):
if rules is None:
return super().get_rule()
results = super().get_rule(self._transform_rules(rules))
if not HashContainer.is_iterable(rules):
return results[0]
return results
def have_rule(self, rules):
return super().have_rule(self._transform_rules(rules))
def remove_rule(self, rules=None):
if rules is None:
return super().remove_rule()
super().remove_rule(self._transform_rules(rules))
def add_rule(self, rules):
super().add_rule(self._transform_rules(rules))
| Python | 0.000045 |
f09470679ee831272c97dc0765a43faca5f28e75 | Remove extra newline in bordered() | dodo_commands/framework/util.py | dodo_commands/framework/util.py | # -*- coding: utf-8 -*-
"""Utilities."""
from six.moves import input as raw_input
import os
import sys
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
_global_config = """
[DodoCommands]
projects_dir=~/projects
python_interpreter=python
diff_tool=diff
"""
def create_global_config():
"""Create config file and default_commands dir."""
base_dir = os.path.expanduser('~/.dodo_commands')
if not os.path.exists(base_dir):
os.mkdir(base_dir)
config_filename = os.path.join(base_dir, "config")
if not os.path.exists(config_filename):
with open(config_filename, 'w') as f:
f.write(_global_config)
default_commands_dir = os.path.join(base_dir, "default_commands")
if not os.path.exists(default_commands_dir):
os.mkdir(default_commands_dir)
init_py = os.path.join(default_commands_dir, "__init__.py")
if not os.path.exists(init_py):
with open(init_py, 'w') as f:
pass
def remove_trailing_dashes(args):
"""Removes first -- item from args."""
return args[1:] if args[:1] == ['--'] else args
def bordered(text):
lines = text.splitlines()
width = max(len(s) for s in lines)
res = ['┌' + '─' * width + '┐']
for s in lines:
res.append('│' + (s + ' ' * width)[:width] + '│')
res.append('└' + '─' * width + '┘')
return '\n'.join(res)
| # -*- coding: utf-8 -*-
"""Utilities."""
from six.moves import input as raw_input
import os
import sys
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
_global_config = """
[DodoCommands]
projects_dir=~/projects
python_interpreter=python
diff_tool=diff
"""
def create_global_config():
"""Create config file and default_commands dir."""
base_dir = os.path.expanduser('~/.dodo_commands')
if not os.path.exists(base_dir):
os.mkdir(base_dir)
config_filename = os.path.join(base_dir, "config")
if not os.path.exists(config_filename):
with open(config_filename, 'w') as f:
f.write(_global_config)
default_commands_dir = os.path.join(base_dir, "default_commands")
if not os.path.exists(default_commands_dir):
os.mkdir(default_commands_dir)
init_py = os.path.join(default_commands_dir, "__init__.py")
if not os.path.exists(init_py):
with open(init_py, 'w') as f:
pass
def remove_trailing_dashes(args):
"""Removes first -- item from args."""
return args[1:] if args[:1] == ['--'] else args
def bordered(text):
lines = text.splitlines()
width = max(len(s) for s in lines)
res = ['┌' + '─' * width + '┐']
for s in lines:
res.append('│' + (s + ' ' * width)[:width] + '│')
res.append('└' + '─' * width + '┘')
return '\n'.join(res) + '\n'
| Python | 0.000001 |
6d8b1ea0e459bd3383528fb32e6b1a348b00a9bc | Remove unknown attributes. | phoxpy/server/auth.py | phoxpy/server/auth.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
from random import randint
from phoxpy import exceptions
from phoxpy.messages import PhoxRequest, PhoxResponse
from phoxpy.messages import auth
from phoxpy.server.main import ServerExtension, request_type
__all__ = ['AuthExt']
class AuthExt(ServerExtension):
def __init__(self, db):
db.update({
'licenses': set([]),
'users': {},
'sessions': set([])
})
super(AuthExt, self).__init__(db)
def get_session_id(self):
return str(randint(10000, 50000))
def add_license(self, key):
self.db['licenses'].add(key)
def add_user(self, login, password):
self.db['users'][login] = password
@request_type(auth.AuthRequest)
def handle_login(self, request):
if request.client_id not in self.db['licenses']:
raise exceptions.LicenseNotFound(request.client_id)
if request.instance_count is None:
raise exceptions.LisBaseException(654)
if request.login not in self.db['users']:
raise exceptions.UnknownUser()
if self.db['users'][request.login] != request.password:
raise exceptions.AuthentificationError()
sessionid = self.get_session_id()
self.db['sessions'].add(sessionid)
return auth.AuthResponse(
sessionid=sessionid,
buildnumber=self.build_number,
version=self.server_version
)
@request_type(PhoxRequest)
def handle_logout(self, request):
if request.sessionid not in self.db['sessions']:
raise exceptions.UnknownSession()
self.db['sessions'].remove(request.sessionid)
return PhoxResponse(sessionid=request.sessionid)
| # -*- coding: utf-8 -*-
#
# Copyright (C) 2011 Alexander Shorin
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
#
from random import randint
from phoxpy import exceptions
from phoxpy.messages import PhoxRequest, PhoxResponse
from phoxpy.messages import auth
from phoxpy.server.main import ServerExtension, request_type
__all__ = ['AuthExt']
class AuthExt(ServerExtension):
def __init__(self, db):
db.update({
'licenses': set([]),
'users': {},
'sessions': set([])
})
super(AuthExt, self).__init__(db)
def get_session_id(self):
return str(randint(10000, 50000))
def add_license(self, key):
self.db['licenses'].add(key)
def add_user(self, login, password):
self.db['users'][login] = password
@request_type(auth.AuthRequest)
def handle_login(self, request):
if request.client_id not in self.db['licenses']:
raise exceptions.LicenseNotFound(request.client_id)
if request.instance_count is None:
raise exceptions.LisBaseException(654)
if request.login not in self.db['users']:
raise exceptions.UnknownUser()
if self.db['users'][request.login] != request.password:
raise exceptions.AuthentificationError()
sessionid = self.get_session_id()
self.db['sessions'].add(sessionid)
return auth.AuthResponse(
sessionid=sessionid,
buildnumber=self.build_number,
version=self.server_version
)
@request_type(PhoxRequest)
def handle_logout(self, request):
if request.sessionid not in self.db['sessions']:
raise exceptions.UnknownSession()
self.db['sessions'].remove(request.sessionid)
return PhoxResponse(
buildnumber=request.buildnumber,
sessionid=request.sessionid,
version=request.version
)
| Python | 0.000002 |
e2cbc0a3acf793ca8c45eb17cb0071a254a7e2b7 | Update parse_indepexpends.py | server/src/datasource/parse_indepexpends.py | server/src/datasource/parse_indepexpends.py | from datasource import fec
from datasource import propublica
import os
FEC_APIKEY = os.getenv('FEC_API_KEY', '')
ProPublica_APIKEY = os.getenv('PP_API_KEY', '')
FecApiObj = fec.FECAPI(FEC_APIKEY)
committees = FecApiObj.get_committees()
PPCampFinObj = propublica.CampaignFinanceAPI(ProPublica_APIKEY)
PPCongressApi = propublica.CongressAPI(ProPublica_APIKEY)
legislator_index = list()
legislators = PPCongressApi.list_members('house')["results"][0]["members"]
for legislator in legislators:
name = str(legislator['first_name']) + " " + str(legislator['last_name'])
legislator_index.append(name)
legislators = PPCongressApi.list_members('senate')["results"][0]["members"]
for legislator in legislators:
name = str(legislator['first_name']) + " " + str(legislator['last_name'])
legislator_index.append(name)
for committee in committees:
if(2016 in committee['cycles']):
indepExpend = PPCampFinObj.get_indep_expends(str(committee['committee_id']))
for expend in indepExpend["results"]:
if(expend['candidate_name'] in legislator_index):
#expend fo a particular expenditure
| from datasource import fec
from datasource import propublica
import os
FEC_APIKEY = os.getenv('FEC_API_KEY', '')
ProPublica_APIKEY = os.getenv('PP_API_KEY', '')
FecApiObj = fec.FECAPI(FEC_APIKEY)
committees = FecApiObj.get_committees()
PPCampFinObj = propublica.CampaignFinanceAPI(ProPublica_APIKEY)
datafile = open("IndepExpends.json", 'w')
for committee in committees:
if(2016 in committee['cycles']):
indepExpend = PPCampFinObj.get_indep_expends(str(committee['committee_id']))
datafile.write(str(indepExpend))
datafile.close()
| Python | 0 |
0c450f52bfd30b694cea19a80fed900b22a39b90 | Update nbgrader/plugins/export.py | nbgrader/plugins/export.py | nbgrader/plugins/export.py | from traitlets import Unicode, List
from .base import BasePlugin
from ..api import MissingEntry
class ExportPlugin(BasePlugin):
"""Base class for export plugins."""
to = Unicode("", help="destination to export to").tag(config=True)
student = List([],
help="list of students to export").tag(config=True)
assignment = List([],
help="list of assignments to export").tag(config=True)
def export(self, gradebook):
"""Export grades to another format.
This method MUST be implemented by subclasses. Users should be able to
pass the ``--to`` flag on the command line, which will set the
``self.to`` variable. By default, this variable will be an empty string,
which allows you to specify whatever default you would like.
Arguments
---------
gradebook: :class:`nbgrader.api.Gradebook`
An instance of the gradebook
"""
raise NotImplementedError
class CsvExportPlugin(ExportPlugin):
"""CSV exporter plugin."""
def export(self, gradebook):
if self.to == "":
dest = "grades.csv"
else:
dest = self.to
if len(self.student) == 0:
allstudents = False
else:
# make sure studentID(s) are a list of strings
allstudents = [str(item) for item in self.student]
if len(self.assignment) == 0:
allassignments = []
else:
# make sure assignment(s) are a list of strings
allassignments = [str(item) for item in self.assignment]
self.log.info("Exporting grades to %s", dest)
if allassignments:
self.log.info("Exporting only assignments: %s", allassignments)
if allstudents:
self.log.info("Exporting only students: %s", allstudents)
fh = open(dest, "w")
keys = [
"assignment",
"duedate",
"timestamp",
"student_id",
"last_name",
"first_name",
"email",
"raw_score",
"late_submission_penalty",
"score",
"max_score"
]
fh.write(",".join(keys) + "\n")
fmt = ",".join(["{" + x + "}" for x in keys]) + "\n"
# Loop over each assignment in the database
for assignment in gradebook.assignments:
# only continue if assignment is required
if allassignments and assignment.name not in allassignments:
continue
# Loop over each student in the database
for student in gradebook.students:
# only continue if student is required
if allstudents and student.id not in allstudents:
continue
# Create a dictionary that will store information
# about this student's submitted assignment
score = {}
score['assignment'] = assignment.name
score['duedate'] = assignment.duedate
score['student_id'] = student.id
score['last_name'] = student.last_name
score['first_name'] = student.first_name
score['email'] = student.email
score['max_score'] = assignment.max_score
# Try to find the submission in the database. If it
# doesn't exist, the `MissingEntry` exception will be
# raised, which means the student didn't submit
# anything, so we assign them a score of zero.
try:
submission = gradebook.find_submission(
assignment.name, student.id)
except MissingEntry:
score['timestamp'] = ''
score['raw_score'] = 0.0
score['late_submission_penalty'] = 0.0
score['score'] = 0.0
else:
penalty = submission.late_submission_penalty
score['timestamp'] = submission.timestamp
score['raw_score'] = submission.score
score['late_submission_penalty'] = penalty
score['score'] = max(0.0, submission.score - penalty)
for key in score:
if score[key] is None:
score[key] = ''
if not isinstance(score[key], str):
score[key] = str(score[key])
fh.write(fmt.format(**score))
fh.close()
| from traitlets import Unicode, List
from .base import BasePlugin
from ..api import MissingEntry
class ExportPlugin(BasePlugin):
"""Base class for export plugins."""
to = Unicode("", help="destination to export to").tag(config=True)
student = List([],
help="list of students to export").tag(config=True)
assignment = List([],
help="list of assignments to export").tag(config=True)
def export(self, gradebook):
"""Export grades to another format.
This method MUST be implemented by subclasses. Users should be able to
pass the ``--to`` flag on the command line, which will set the
``self.to`` variable. By default, this variable will be an empty string,
which allows you to specify whatever default you would like.
Arguments
---------
gradebook: :class:`nbgrader.api.Gradebook`
An instance of the gradebook
"""
raise NotImplementedError
class CsvExportPlugin(ExportPlugin):
"""CSV exporter plugin."""
def export(self, gradebook):
if self.to == "":
dest = "grades.csv"
else:
dest = self.to
if len(self.student) == 0:
allstudents = False
else:
# make sure studentID(s) are a list of strings
allstudents = [str(item) for item in self.student]
if len(self.assignment) == 0:
allassignments = False
else:
# make sure assignment(s) are a list of strings
allassignments = [str(item) for item in self.assignment]
self.log.info("Exporting grades to %s", dest)
if allassignments:
self.log.info("Exporting only assignments: %s", allassignments)
if allstudents:
self.log.info("Exporting only students: %s", allstudents)
fh = open(dest, "w")
keys = [
"assignment",
"duedate",
"timestamp",
"student_id",
"last_name",
"first_name",
"email",
"raw_score",
"late_submission_penalty",
"score",
"max_score"
]
fh.write(",".join(keys) + "\n")
fmt = ",".join(["{" + x + "}" for x in keys]) + "\n"
# Loop over each assignment in the database
for assignment in gradebook.assignments:
# only continue if assignment is required
if allassignments and assignment.name not in allassignments:
continue
# Loop over each student in the database
for student in gradebook.students:
# only continue if student is required
if allstudents and student.id not in allstudents:
continue
# Create a dictionary that will store information
# about this student's submitted assignment
score = {}
score['assignment'] = assignment.name
score['duedate'] = assignment.duedate
score['student_id'] = student.id
score['last_name'] = student.last_name
score['first_name'] = student.first_name
score['email'] = student.email
score['max_score'] = assignment.max_score
# Try to find the submission in the database. If it
# doesn't exist, the `MissingEntry` exception will be
# raised, which means the student didn't submit
# anything, so we assign them a score of zero.
try:
submission = gradebook.find_submission(
assignment.name, student.id)
except MissingEntry:
score['timestamp'] = ''
score['raw_score'] = 0.0
score['late_submission_penalty'] = 0.0
score['score'] = 0.0
else:
penalty = submission.late_submission_penalty
score['timestamp'] = submission.timestamp
score['raw_score'] = submission.score
score['late_submission_penalty'] = penalty
score['score'] = max(0.0, submission.score - penalty)
for key in score:
if score[key] is None:
score[key] = ''
if not isinstance(score[key], str):
score[key] = str(score[key])
fh.write(fmt.format(**score))
fh.close()
| Python | 0 |
e751329b8aacdf51b70537be47172386deaded63 | Fix alembic env | alembic/env.py | alembic/env.py | from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
import os,sys
parentdir = os.path.abspath(os.path.join('.', '.'))
sys.path.insert(0,parentdir)
from rootio.app import create_app
from rootio.extensions import db
app = create_app()
config.set_main_option("sqlalchemy.url", app.config["SQLALCHEMY_DATABASE_URI"])
target_metadata = db.Model.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata,
compare_type=True
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
import os,sys
parentdir = os.path.abspath(os.path.join('.', '.'))
sys.path.insert(0,parentdir)
from rootio import create_app
from rootio.extensions import db
app = create_app()
config.set_main_option("sqlalchemy.url", app.config["SQLALCHEMY_DATABASE_URI"])
target_metadata = db.Model.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata,
compare_type=True
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| Python | 0.999689 |
564b434c2fd7fadc5d467fe884e5bd88b794acc3 | Fix config. | sample-config.py | sample-config.py | # -*- coding: utf-8 -*-
"""
Example configuration for GEAStarterKit
"""
##
## Authentication/authorizationc config
import authomatic
from authomatic.providers import oauth2
from collections import OrderedDict
AUTHOMATIC_CONFIG = OrderedDict([
('google', {
'name': 'Google',
'id': 1000,
'icon': 'google'
}),
# ('github', {
# 'name': 'Github',
#
# 'class_': oauth2.GitHub,
# 'consumer_key': 'ADD YOURS',
# 'consumer_secret': 'AD YOURS',
#
# 'id': 2000,
#
# 'icon': 'github',
#
# 'scope': ['user:email']
# }),
])
import os
if os.environ.get('SERVER_SOFTWARE', '').startswith('Development') or os.environ.get('SERVER_SOFTWARE', '') == '':
SECRET_STRING = 'YOUR SECRET KEY'
DEVELOPMENT = True
else:
SECRET_STRING = 'YOUR SECRET KEY'
DEVELOPMENT = False
#
# Talisman security
import talisman
csp_policy = {
# Fonts from fonts.google.com
'font-src': "'self' themes.googleusercontent.com *.gstatic.com",
# <iframe> based embedding for Maps and Youtube.
'frame-src': "'self' www.google.com www.youtube.com",
# Assorted Google-hosted Libraries/APIs.
'script-src': "'self' ajax.googleapis.com *.googleanalytics.com "
"*.google-analytics.com",
# Used by generated code from http://www.google.com/fonts
'style-src': "'self' ajax.googleapis.com fonts.googleapis.com "
"*.gstatic.com",
# gravatar
'img-src': "'self' *.gravatar.com",
# Other
'default-src': "'self' *.gstatic.com",
}
enable_talisman = False
talisman_config = dict(
force_https=True,
force_https_permanent=False,
frame_options=talisman.SAMEORIGIN,
frame_options_allow_from=None,
strict_transport_security=True,
strict_transport_security_max_age=31556926, # One year in seconds
strict_transport_security_include_subdomains=True,
content_security_policy=csp_policy,
session_cookie_secure=True,
session_cookie_http_only=True
)
#
# Origin address for system emails.
email_from_address = 'root@localhost'
#
# Options for login manager
max_days_verification = 30
max_hours_password_reset = 48
#
# How long to time.sleep() when an invalid login, token, or similar is tried.
security_wait = 3
#
# Languages application supports
languages = OrderedDict([
('en', u'English'),
('es', u'Español'),
('fr', u'Français')
])
#
# Whether to use Paste debug panel while in development
enable_debug_panel = DEVELOPMENT
#
# Where to send user when he logs in if nothing else is set.
default_view = 'users.profile'
#
# Name of the site/product
site_name = 'GAEStarterKit'
#
# Domain name for email links
email_domain = 'http://localhost:8080'
#
# What to import automatically
install_apps = [
'apps.welcomekit',
'apps.simplecms',
'apps.error_pages',
'apps.users',
'apps.tenants',
'apps.email',
'apps.admin',
]
| # -*- coding: utf-8 -*-
"""
Example configuration for GEAStarterKit
"""
##
## Authentication/authorizationc config
import authomatic
from authomatic.providers import oauth2
from collections import OrderedDict
AUTHOMATIC_CONFIG = OrderedDict([
('google', {
'name': 'Google',
'id': 1000,
'icon': 'google'
}),
# ('github', {
# 'name': 'Github',
#
# 'class_': oauth2.GitHub,
# 'consumer_key': 'ADD YOURS',
# 'consumer_secret': 'AD YOURS',
#
# 'id': 2000,
#
# 'icon': 'github',
#
# 'scope': ['user:email']
# }),
])
import os
if os.environ.get('SERVER_SOFTWARE', '').startswith('Development') or os.environ.get('SERVER_SOFTWARE', '') == '':
SECRET_STRING = 'YOUR SECRET KEY'
DEVELOPMENT = True
else:
SECRET_STRING = 'YOUR SECRET KEY'
DEVELOPMENT = False
#
# Talisman security
import talisman
csp_policy = {
# Fonts from fonts.google.com
'font-src': "'self' themes.googleusercontent.com *.gstatic.com",
# <iframe> based embedding for Maps and Youtube.
'frame-src': "'self' www.google.com www.youtube.com",
# Assorted Google-hosted Libraries/APIs.
'script-src': "'self' ajax.googleapis.com *.googleanalytics.com "
"*.google-analytics.com",
# Used by generated code from http://www.google.com/fonts
'style-src': "'self' ajax.googleapis.com fonts.googleapis.com "
"*.gstatic.com",
# gravatar
'img-src': "'self' *.gravatar.com",
# Other
'default-src': "'self' *.gstatic.com",
}
enable_talisman = False
talisman_config = dict(
force_https=True,
force_https_permanent=False,
frame_options=talisman.SAMEORIGIN,
frame_options_allow_from=None,
strict_transport_security=True,
strict_transport_security_max_age=31556926, # One year in seconds
strict_transport_security_include_subdomains=True,
content_security_policy=csp_policy,
session_cookie_secure=True,
session_cookie_http_only=True
)
#
# Origin address for system emails.
email_from_address = 'root@localhost'
#
# Options for login manager
max_days_verification = 30
max_hours_password_reset = 48
#
# How long to time.sleep() when an invalid login, token, or similar is tried.
security_wait = 3
#
# Languages application supports
languages = OrderedDict([
('en', 'English'),
('es', 'Español'),
('fr', 'Français')
])
#
# Whether to use Paste debug panel while in development
enable_debug_panel = DEVELOPMENT
#
# Where to send user when he logs in if nothing else is set.
default_view = 'users.profile'
#
# Name of the site/product
site_name = 'GAEStarterKit'
#
# Domain name for email links
email_domain = 'http://localhost:8080'
#
# What to import automatically
install_apps = [
'apps.welcomekit',
'apps.simplecms',
'apps.error_pages',
'apps.users',
'apps.tenants',
'apps.email',
'apps.admin',
]
| Python | 0 |
b76e91c4517e52528f8543fce276ff4b5af9a4f6 | fix temp file creation to something more multiplatform friendly | burp_reports/lib/files.py | burp_reports/lib/files.py | import tempfile
import os
def temp_file(file='temporal'):
"""
return: str with tempfilename
"""
# Append uid to end of filename
file += '_{}'.format(os.getlogin())
# Simplified and reutilized core funtionally from python
cache_path = os.path.join(tempfile.gettempdir(), file)
return cache_path
| import tempfile
import os
def temp_file(file='temporal'):
"""
return: str with tempfilename
"""
# Append uid to end of filename
file += '_{}'.format(os.getuid())
# Simplified and reutilized core funtionally from python
cache_path = os.path.join(tempfile.gettempdir(), file)
return cache_path
| Python | 0 |
632b86289ef643381c954adeca1f58c78e2aa8d5 | Add documentation for plugins | cactus/plugin/defaults.py | cactus/plugin/defaults.py | #coding:utf-8
# Define no-op plugin methods
def preBuildPage(page, context, data):
"""
Called prior to building a page.
:param page: The page about to be built
:param context: The context for this page (you can modify this, but you must return it)
:param data: The raw body for this page (you can modify this).
:returns: Modified (or not) context and data.
"""
return context, data
def postBuildPage(page):
"""
Called after building a page.
:param page: The page that was just built.
:returns: None
"""
pass
def preBuildStatic(static):
"""
Called before building (copying to the build folder) a static file.
:param static: The static file about to be built.
:returns: None
"""
pass
def postBuildStatic(static):
"""
Called after building (copying to the build folder) a static file.
:param static: The static file that was just built.
:returns: None
"""
pass
def preBuild(site):
"""
Called prior to building the site, after loading configuration, plugins and externals.
:param site: The site about to be built.
:returns: None
"""
pass
def postBuild(site):
"""
Called after building the site.
:param site: The site that was just built.
:returns: None
"""
pass
def preDeploy(site):
"""
Called prior to deploying the site (built files)
:param site: The site about to be deployed.
:returns: None
"""
pass
def postDeploy(site):
"""
Called after deploying the site (built files)
:param site: The site that was just built.
:returns: None
"""
pass
def preDeployFile(file):
"""
Called prior to deploying a single built file
:param file: The file about to be deployed.
:returns: None
"""
pass
ORDER = -1
DEFAULTS = [
'preBuildPage',
'postBuildPage',
'preBuildStatic',
'postBuildStatic',
'preBuild',
'postBuild',
'preDeploy',
'postDeploy',
'preDeployFile',
] | #coding:utf-8
# Define no-op plugin methods
def preBuildPage(page, context, data):
return context, data
def postBuildPage(page):
pass
def preBuildStatic(static):
pass
def postBuildStatic(static):
pass
def preBuild(site):
pass
def postBuild(site):
pass
def preDeploy(site):
pass
def postDeploy(site):
pass
def preDeployFile(file):
pass
ORDER = -1
DEFAULTS = [
'preBuildPage',
'postBuildPage',
'preBuildStatic',
'postBuildStatic',
'preBuild',
'postBuild',
'preDeploy',
'postDeploy',
'preDeployFile',
] | Python | 0 |
04fd80cda56a911289bca20c7ee1bd70ac263bd4 | set readonly from true to false because the cursor is hidded if readonly is true. | call_seq/TextEdit/rich.py | call_seq/TextEdit/rich.py | from PySide import QtCore
import pyqode.python
# public API
from pyqode.python.bootstrapper import Bootstrapper
from pyqode.python.modes import PyAutoCompleteMode
from pyqode.python.modes import CalltipsMode
from pyqode.python.modes import CommentsMode
from pyqode.python.modes import PyCodeCompletionMode, JediCompletionProvider
from pyqode.python.modes import PEP8CheckerMode
from pyqode.python.modes import PyAutoIndentMode
from pyqode.python.modes import PyFlakesCheckerMode
from pyqode.python.modes import PyHighlighterMode
from pyqode.python.modes import PyIndenterMode
from pyqode.python.modes import DEFAULT_DARK_STYLES
from pyqode.python.modes import DEFAULT_LIGHT_STYLES
from pyqode.python.modes import GoToAssignmentsMode
from pyqode.python.modes import DocumentAnalyserMode
from pyqode.python.panels import PreLoadPanel
from pyqode.python.panels import SymbolBrowserPanel
from pyqode.core.modes import CaretLineHighlighterMode
from pyqode.python.panels import QuickDocPanel
class RichTextEdit(pyqode.core.QCodeEdit):
def __init__(self):
super(RichTextEdit, self).__init__()
self.setLineWrapMode(self.NoWrap)
self.installPanel(pyqode.core.LineNumberPanel(),
pyqode.core.PanelPosition.LEFT)
self.installMode(pyqode.core.ZoomMode())
#self.installMode(pyqode.core.FileWatcherMode())
self.installMode(pyqode.core.SymbolMatcherMode())
self.installMode(pyqode.core.WordClickMode())
self.installMode(PyHighlighterMode(self.document()))
self.installMode(PyAutoIndentMode())
self.installMode(PyFlakesCheckerMode())
self.installMode(PEP8CheckerMode())
self.installMode(CalltipsMode())
self.installMode(PyIndenterMode())
self.installMode(GoToAssignmentsMode())
self.installPanel(QuickDocPanel(), pyqode.core.PanelPosition.BOTTOM)
self.installMode(CommentsMode())
self.installMode(CaretLineHighlighterMode())
self.setReadOnly(False) | from PySide import QtCore
import pyqode.python
# public API
from pyqode.python.bootstrapper import Bootstrapper
from pyqode.python.modes import PyAutoCompleteMode
from pyqode.python.modes import CalltipsMode
from pyqode.python.modes import CommentsMode
from pyqode.python.modes import PyCodeCompletionMode, JediCompletionProvider
from pyqode.python.modes import PEP8CheckerMode
from pyqode.python.modes import PyAutoIndentMode
from pyqode.python.modes import PyFlakesCheckerMode
from pyqode.python.modes import PyHighlighterMode
from pyqode.python.modes import PyIndenterMode
from pyqode.python.modes import DEFAULT_DARK_STYLES
from pyqode.python.modes import DEFAULT_LIGHT_STYLES
from pyqode.python.modes import GoToAssignmentsMode
from pyqode.python.modes import DocumentAnalyserMode
from pyqode.python.panels import PreLoadPanel
from pyqode.python.panels import SymbolBrowserPanel
from pyqode.core.modes import CaretLineHighlighterMode
from pyqode.python.panels import QuickDocPanel
class RichTextEdit(pyqode.core.QCodeEdit):
def __init__(self):
super(RichTextEdit, self).__init__()
self.setLineWrapMode(self.NoWrap)
self.installPanel(pyqode.core.LineNumberPanel(),
pyqode.core.PanelPosition.LEFT)
self.installMode(pyqode.core.ZoomMode())
#self.installMode(pyqode.core.FileWatcherMode())
self.installMode(pyqode.core.SymbolMatcherMode())
self.installMode(pyqode.core.WordClickMode())
self.installMode(PyHighlighterMode(self.document()))
self.installMode(PyAutoIndentMode())
self.installMode(PyFlakesCheckerMode())
self.installMode(PEP8CheckerMode())
self.installMode(CalltipsMode())
self.installMode(PyIndenterMode())
self.installMode(GoToAssignmentsMode())
self.installPanel(QuickDocPanel(), pyqode.core.PanelPosition.BOTTOM)
self.installMode(CommentsMode())
self.installMode(CaretLineHighlighterMode())
self.setReadOnly(True) | Python | 0 |
29aed8ce12734ac0489a8b4e4aa9b48ff4a320a7 | fix fail | client/cli.py | client/cli.py | #!/usr/bin/env python
import base64
import sys
import logging
import firehose.common as common
class CLI(common.FirehoseClient):
def __select(self, chums, prompt):
print prompt
for n, chum in enumerate(chums):
print "%02d> %s (%s)" % (n, chum.name, chum.keyid)
inp = raw_input("Enter ID number> ")
return chums[int(inp)]
def main(self, args=sys.argv):
common.FirehoseClient.__init__(self)
self.load_config()
try:
my_self = self.__select(self.get_identities(), "Select an identity to send as:")
my_chum = self.__select(self.get_chums(), "Select somebody to send to:")
self.set_identity(my_self)
self.start_recv_thread()
while True:
data = raw_input("Send to %s> " % my_chum.name)
cmd, _, args = data.partition(" ")
if cmd == "/me":
data = "ACT " + args
elif cmd == "/ping":
data = "PING 0"
else:
data = "MSG " + data
my_chum.send(data)
except (EOFError, KeyboardInterrupt):
pass
def on_msg(self, chum, target, message):
print "%s: %s" % (chum.name, message)
def on_act(self, chum, target, message):
print "* %s %s" % (chum.name, message)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG, format="%(asctime)19.19s %(levelname)4.4s %(name)s: %(message)s")
module_log = logging.getLogger("firehose")
module_log.setLevel(logging.DEBUG)
module_log = logging.getLogger("gnupg")
module_log.setLevel(logging.INFO)
sys.exit(CLI().main(sys.argv))
| #!/usr/bin/env python
import base64
import sys
import logging
import firehose.common as common
class CLI(common.FirehoseClient):
def __select(self, chums, prompt):
print prompt
for n, chum in enumerate(chums):
print "%02d> %s (%s)" % (n, chum.name, chum.keyid)
inp = raw_input("Enter ID number> ")
return chums[int(inp)]
def main(self, args=sys.argv):
common.FirehoseClient.__init__(self)
self.load_config()
try:
my_self = self.__select(self.get_identities(), "Select an identity to send as:")
my_chum = self.__select(self.get_chums(), "Select somebody to send to:")
self.set_identity(my_self)
self.start_recv_thread()
while True:
data = raw_input("Send to %s> " % chum.name)
cmd, _, args = data.partition(" ")
if cmd == "/me":
data = "ACT " + args
elif cmd == "/ping":
data = "PING 0"
else:
data = "MSG " + data
my_chum.send(data)
except (EOFError, KeyboardInterrupt):
pass
def on_msg(self, chum, target, message):
print "%s: %s" % (chum.name, message)
def on_act(self, chum, target, message):
print "* %s %s" % (chum.name, message)
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG, format="%(asctime)19.19s %(levelname)4.4s %(name)s: %(message)s")
module_log = logging.getLogger("firehose")
module_log.setLevel(logging.DEBUG)
module_log = logging.getLogger("gnupg")
module_log.setLevel(logging.INFO)
sys.exit(CLI().main(sys.argv))
| Python | 0.000003 |
f8ce7d7709c3b83e02dde352b8888f462be572ce | Make event handlers for Debugger non-filters with a priority of 100.0 (they aren't donig any filtering) | circuits/core/debugger.py | circuits/core/debugger.py | # Module: debugger
# Date: 2nd April 2006
# Author: James Mills, prologic at shortcircuit dot net dot au
"""
Debugger component used to debug each event in a system by printing
each event to sys.stderr or to a Logger Component instnace.
"""
import os
import sys
from cStringIO import StringIO
from handlers import handler
from components import Component
from circuits.tools import reprhandler
class Debugger(Component):
"""Create a new Debugger Component
Creates a new Debugger Component that filters all events in teh system
printing each event to sys.stderr or a Logger Component.
:var IgnoreEvents: list of events (str) to ignore
:var IgnoreChannels: list of channels (str) to ignore
:var enabled: Enabled/Disabled flag
:param log: Logger Component instnace or None (*default*)
"""
IgnoreEvents = []
IgnoreChannels = []
def __init__(self, errors=True, events=True, file=None, logger=None,
chop=False, **kwargs):
"initializes x; see x.__class__.__doc__ for signature"
super(Debugger, self).__init__()
self.errors = errors
self.events = events
if type(file) is str:
self.file = open(os.path.abspath(os.path.expanduser(file)), "a")
elif type(file) is file or hasattr(file, "write"):
self.file = file
else:
self.file = sys.stderr
self.logger = logger
self.chop = chop
self.IgnoreEvents.extend(kwargs.get("IgnoreEvents", []))
self.IgnoreChannels.extend(kwargs.get("IgnoreChannels", []))
@handler("exception", priority=100.0)
def exception(self, type, value, traceback, handler=None):
if not self.errors:
return
s = StringIO()
if handler is None:
handler = ""
else:
handler = reprhandler(self.root, handler)
s.write("ERROR %s(%s): %s\n" % ("%s " % handler, type, value))
s.write("%s\n" % "".join(traceback))
s.seek(0)
if self.logger is not None:
self.logger.error(s.getvalue())
else:
self.file.write(s.read())
self.file.flush()
s.close()
@handler(priority=100.0)
def event(self, event, *args, **kwargs):
"""Global Event Handler
Event handler to listen and filter all events printing each event
to self.file or a Logger Component instnace by calling self.logger.debug
"""
if not self.events:
return
channel = event.channel
if True in [event.name == x.__name__ for x in self.IgnoreEvents]:
return
elif channel in self.IgnoreChannels:
return
else:
if self.logger is not None:
self.logger.debug(repr(event))
else:
s = repr(event)
if self.file is sys.stderr and len(s) > 80 and self.chop:
s = "%s ...>" % s[:75]
self.file.write("%s\n" % s)
self.file.flush()
| # Module: debugger
# Date: 2nd April 2006
# Author: James Mills, prologic at shortcircuit dot net dot au
"""
Debugger component used to debug each event in a system by printing
each event to sys.stderr or to a Logger Component instnace.
"""
import os
import sys
from cStringIO import StringIO
from handlers import handler
from components import Component
from circuits.tools import reprhandler
class Debugger(Component):
"""Create a new Debugger Component
Creates a new Debugger Component that filters all events in teh system
printing each event to sys.stderr or a Logger Component.
:var IgnoreEvents: list of events (str) to ignore
:var IgnoreChannels: list of channels (str) to ignore
:var enabled: Enabled/Disabled flag
:param log: Logger Component instnace or None (*default*)
"""
IgnoreEvents = []
IgnoreChannels = []
def __init__(self, errors=True, events=True, file=None, logger=None,
chop=False, **kwargs):
"initializes x; see x.__class__.__doc__ for signature"
super(Debugger, self).__init__()
self.errors = errors
self.events = events
if type(file) is str:
self.file = open(os.path.abspath(os.path.expanduser(file)), "a")
elif type(file) is file or hasattr(file, "write"):
self.file = file
else:
self.file = sys.stderr
self.logger = logger
self.chop = chop
self.IgnoreEvents.extend(kwargs.get("IgnoreEvents", []))
self.IgnoreChannels.extend(kwargs.get("IgnoreChannels", []))
@handler("exception", filter=True)
def exception(self, type, value, traceback, handler=None):
if not self.errors:
return
s = StringIO()
if handler is None:
handler = ""
else:
handler = reprhandler(self.root, handler)
s.write("ERROR %s(%s): %s\n" % ("%s " % handler, type, value))
s.write("%s\n" % "".join(traceback))
s.seek(0)
if self.logger is not None:
self.logger.error(s.getvalue())
else:
self.file.write(s.read())
self.file.flush()
s.close()
@handler(filter=True)
def event(self, event, *args, **kwargs):
"""Global Event Handler
Event handler to listen and filter all events printing each event
to self.file or a Logger Component instnace by calling self.logger.debug
"""
if not self.events:
return
channel = event.channel
if True in [event.name == x.__name__ for x in self.IgnoreEvents]:
return
elif channel in self.IgnoreChannels:
return
else:
if self.logger is not None:
self.logger.debug(repr(event))
else:
s = repr(event)
if self.file is sys.stderr and len(s) > 80 and self.chop:
s = "%s ...>" % s[:75]
self.file.write("%s\n" % s)
self.file.flush()
| Python | 0 |
3157bbd5cca51ea2ac0c086a9337296c6652fafc | fix url order | citizendialer3000/urls.py | citizendialer3000/urls.py | from django.conf.urls.defaults import *
urlpatterns = patterns('citizendialer3000.views',
url(r'^$', 'callcampaign_list', name='call_list'),
url(r'^(?P<slug>[\w\-]+)/$', 'callcampaign_detail', name='call_campaign'),
url(r'^(?P<slug>[\w\-]+)/thankyou/$', 'complete', name='call_complete'),
url(r'^(?P<slug>[\w\-]+)/results/$', 'results', name='results'),
url(r'^(?P<slug>[\w\-]+)/results/calls.csv$', 'results_calls', name='results_calls'),
url(r'^(?P<slug>[\w\-]+)/results/summary.csv$', 'results_summary', name='results_summary'),
url(r'^(?P<slug>[\w\-]+)/(?P<bioguide_id>\w+)/$', 'contact_detail', name='call_contact'),
)
| from django.conf.urls.defaults import *
urlpatterns = patterns('citizendialer3000.views',
url(r'^$', 'callcampaign_list', name='call_list'),
url(r'^(?P<slug>[\w\-]+)/$', 'callcampaign_detail', name='call_campaign'),
url(r'^(?P<slug>[\w\-]+)/(?P<bioguide_id>\w+)/$', 'contact_detail', name='call_contact'),
url(r'^(?P<slug>[\w\-]+)/thankyou/$', 'complete', name='call_complete'),
url(r'^(?P<slug>[\w\-]+)/results/$', 'results', name='results'),
url(r'^(?P<slug>[\w\-]+)/results/calls.csv$', 'results_calls', name='results_calls'),
url(r'^(?P<slug>[\w\-]+)/results/summary.csv$', 'results_summary', name='results_summary'),
)
| Python | 0.982361 |
4e42da241c5edc43990778225ad84ae241973770 | Convert unicode in sa engine | ckanserviceprovider/db.py | ckanserviceprovider/db.py | import sqlalchemy as sa
engine = None
metadata = None
jobs_table = None
metadata_table = None
logs_table = None
def setup_db(app):
global engine, metadata
engine = sa.create_engine(app.config.get('SQLALCHEMY_DATABASE_URI'),
echo=app.config.get('SQLALCHEMY_ECHO'),
convert_unicode=True)
metadata = sa.MetaData(engine)
make_task_table()
metadata.create_all(engine)
def make_task_table():
global jobs_table, metadata_table, logs_table
jobs_table = sa.Table('jobs', metadata,
sa.Column('job_id', sa.UnicodeText,
primary_key=True),
sa.Column('job_type', sa.UnicodeText),
sa.Column('status', sa.UnicodeText,
index=True),
sa.Column('data', sa.UnicodeText),
sa.Column('error', sa.UnicodeText),
sa.Column('requested_timestamp', sa.DateTime),
sa.Column('finished_timestamp', sa.DateTime),
sa.Column('sent_data', sa.UnicodeText),
# Callback url
sa.Column('result_url', sa.UnicodeText),
# CKAN API key
sa.Column('api_key', sa.UnicodeText),
# Key to administer job
sa.Column('job_key', sa.UnicodeText)
)
metadata_table = sa.Table('metadata', metadata,
sa.Column('job_id', sa.UnicodeText,
primary_key=True),
sa.Column('key', sa.UnicodeText,
primary_key=True),
sa.Column('value', sa.UnicodeText,
index=True),
sa.Column('type', sa.UnicodeText),
)
logs_table = sa.Table('logs', metadata,
sa.Column('job_id', sa.UnicodeText,
index=True),
sa.Column('timestamp', sa.DateTime),
sa.Column('message', sa.UnicodeText),
sa.Column('name', sa.UnicodeText),
sa.Column('level', sa.UnicodeText),
sa.Column('module', sa.UnicodeText),
sa.Column('funcName', sa.UnicodeText),
sa.Column('lineno', sa.Integer)
)
| import sqlalchemy as sa
engine = None
metadata = None
jobs_table = None
metadata_table = None
logs_table = None
def setup_db(app):
global engine, metadata
engine = sa.create_engine(app.config.get('SQLALCHEMY_DATABASE_URI'),
echo=app.config.get('SQLALCHEMY_ECHO'))
metadata = sa.MetaData(engine)
make_task_table()
metadata.create_all(engine)
def make_task_table():
global jobs_table, metadata_table, logs_table
jobs_table = sa.Table('jobs', metadata,
sa.Column('job_id', sa.UnicodeText,
primary_key=True),
sa.Column('job_type', sa.UnicodeText),
sa.Column('status', sa.UnicodeText,
index=True),
sa.Column('data', sa.UnicodeText),
sa.Column('error', sa.UnicodeText),
sa.Column('requested_timestamp', sa.DateTime),
sa.Column('finished_timestamp', sa.DateTime),
sa.Column('sent_data', sa.UnicodeText),
# Callback url
sa.Column('result_url', sa.UnicodeText),
# CKAN API key
sa.Column('api_key', sa.UnicodeText),
# Key to administer job
sa.Column('job_key', sa.UnicodeText)
)
metadata_table = sa.Table('metadata', metadata,
sa.Column('job_id', sa.UnicodeText,
primary_key=True),
sa.Column('key', sa.UnicodeText,
primary_key=True),
sa.Column('value', sa.UnicodeText,
index=True),
sa.Column('type', sa.UnicodeText),
)
logs_table = sa.Table('logs', metadata,
sa.Column('job_id', sa.UnicodeText,
index=True),
sa.Column('timestamp', sa.DateTime),
sa.Column('message', sa.UnicodeText),
sa.Column('name', sa.UnicodeText),
sa.Column('level', sa.UnicodeText),
sa.Column('module', sa.UnicodeText),
sa.Column('funcName', sa.UnicodeText),
sa.Column('lineno', sa.Integer)
)
| Python | 0.999999 |
302934bfd8b30ee1b33cdfb60ca36021df153746 | improve cleanup process of test by removing the downloaded file | quantecon/util/tests/test_notebooks.py | quantecon/util/tests/test_notebooks.py | """
Tests for Notebook Utilities
Functions
---------
fetch_nb_dependencies
"""
from quantecon.util import fetch_nb_dependencies
import unittest
import os
FILES = ['README.md']
REPO = "https://github.com/QuantEcon/QuantEcon.py"
RAW = "raw"
BRANCH = "master"
class TestNotebookUtils(unittest.TestCase):
def test_fetch_nb_dependencies(self):
"""
Run First and Test Download
"""
status = fetch_nb_dependencies(files=FILES, repo=REPO, raw=RAW, branch=BRANCH)
self.assertFalse(False in status)
def test_fetch_nb_dependencies_overwrite(self):
"""
Run Second and Ensure file is skipped by checking a False is found in status
"""
status = fetch_nb_dependencies(files=FILES, repo=REPO, raw=RAW, branch=BRANCH) #First will succeed
status = fetch_nb_dependencies(files=FILES, repo=REPO, raw=RAW, branch=BRANCH) #Second should skip
self.assertTrue(False in status)
def tearDown(self):
os.remove("README.md") | """
Tests for Notebook Utilities
Functions
---------
fetch_nb_dependencies
"""
from quantecon.util import fetch_nb_dependencies
import unittest
FILES = ['README.md']
REPO = "https://github.com/QuantEcon/QuantEcon.py"
RAW = "raw"
BRANCH = "master"
class TestNotebookUtils(unittest.TestCase):
def test_fetch_nb_dependencies(self):
"""
Run First and Test Download
"""
status = fetch_nb_dependencies(files=FILES, repo=REPO, raw=RAW, branch=BRANCH)
self.assertFalse(False in status)
def test_fetch_nb_dependencies_overwrite(self):
"""
Run Second and Ensure file is skipped by checking a False is found in status
"""
status = fetch_nb_dependencies(files=FILES, repo=REPO, raw=RAW, branch=BRANCH)
self.assertTrue(False in status) | Python | 0 |
02b7d5416ad55b78e256e58ed6a282681d1df48d | Add required get_model for Haystack 2.0 | readthedocs/projects/search_indexes.py | readthedocs/projects/search_indexes.py | # -*- coding: utf-8-*-
import codecs
import os
from django.utils.html import strip_tags
#from haystack import site
from haystack import indexes
from haystack.fields import CharField
#from celery_haystack.indexes import SearchIndex
from projects.models import File, ImportedFile, Project
import logging
log = logging.getLogger(__name__)
class ProjectIndex(indexes.SearchIndex, indexes.Indexable):
text = CharField(document=True, use_template=True)
author = CharField()
title = CharField(model_attr='name')
description = CharField(model_attr='description')
repo_type = CharField(model_attr='repo_type')
def prepare_author(self, obj):
return obj.users.all()[0]
def get_model(self):
return Project
class FileIndex(indexes.SearchIndex, indexes.Indexable):
text = CharField(document=True, use_template=True)
author = CharField()
project = CharField(model_attr='project__name', faceted=True)
title = CharField(model_attr='heading')
def prepare_author(self, obj):
return obj.project.users.all()[0]
def get_model(self):
return File
#Should prob make a common subclass for this and FileIndex
class ImportedFileIndex(indexes.SearchIndex, indexes.Indexable):
text = CharField(document=True)
author = CharField()
project = CharField(model_attr='project__name', faceted=True)
title = CharField(model_attr='name')
def prepare_author(self, obj):
return obj.project.users.all()[0]
def prepare_text(self, obj):
"""
Prepare the text of the html file.
This only works on machines that have the html
files for the projects checked out.
"""
#Import this here to hopefully fix tests for now.
from pyquery import PyQuery
full_path = obj.project.rtd_build_path()
file_path = os.path.join(full_path, obj.path.lstrip('/'))
try:
with codecs.open(file_path, encoding='utf-8', mode='r') as f:
content = f.read()
except IOError as e:
log.info('Unable to index file: %s, error :%s' % (file_path, e))
return
log.debug('Indexing %s' % obj.slug)
try:
to_index = strip_tags(PyQuery(content)("div.document").html()).replace(u'¶', '')
except ValueError:
#Pyquery returns ValueError if div.document doesn't exist.
return
return to_index
def get_model(self):
return ImportedFile
| # -*- coding: utf-8-*-
import codecs
import os
from django.utils.html import strip_tags
#from haystack import site
from haystack import indexes
from haystack.fields import CharField
#from celery_haystack.indexes import SearchIndex
from projects.models import File, ImportedFile, Project
import logging
log = logging.getLogger(__name__)
class ProjectIndex(indexes.SearchIndex, indexes.Indexable):
text = CharField(document=True, use_template=True)
author = CharField()
title = CharField(model_attr='name')
description = CharField(model_attr='description')
repo_type = CharField(model_attr='repo_type')
def prepare_author(self, obj):
return obj.users.all()[0]
class FileIndex(indexes.SearchIndex, indexes.Indexable):
text = CharField(document=True, use_template=True)
author = CharField()
project = CharField(model_attr='project__name', faceted=True)
title = CharField(model_attr='heading')
def prepare_author(self, obj):
return obj.project.users.all()[0]
#Should prob make a common subclass for this and FileIndex
class ImportedFileIndex(indexes.SearchIndex, indexes.Indexable):
text = CharField(document=True)
author = CharField()
project = CharField(model_attr='project__name', faceted=True)
title = CharField(model_attr='name')
def prepare_author(self, obj):
return obj.project.users.all()[0]
def prepare_text(self, obj):
"""
Prepare the text of the html file.
This only works on machines that have the html
files for the projects checked out.
"""
#Import this here to hopefully fix tests for now.
from pyquery import PyQuery
full_path = obj.project.rtd_build_path()
file_path = os.path.join(full_path, obj.path.lstrip('/'))
try:
with codecs.open(file_path, encoding='utf-8', mode='r') as f:
content = f.read()
except IOError as e:
log.info('Unable to index file: %s, error :%s' % (file_path, e))
return
log.debug('Indexing %s' % obj.slug)
try:
to_index = strip_tags(PyQuery(content)("div.document").html()).replace(u'¶', '')
except ValueError:
#Pyquery returns ValueError if div.document doesn't exist.
return
return to_index
| Python | 0 |
96877f2cb706a465c5e7fb4d316dbd82ff2cb432 | add comment | purelyjs/interpreter.py | purelyjs/interpreter.py | from .io import invoke
class Interpreter(object):
known_engines = ['js', 'rhino']
def __init__(self, exes=None):
engines = exes if exes else self.known_engines
self.exe = self.detect(engines)
if not self.exe:
raise ValueError("No js engine could be found, tried: %s"
% ', '.join(engines))
def detect(self, engines):
found = None
for engine in engines:
# NOTE: Very platform specific
success, stdout, stderr = invoke(['which', engine])
if success:
found = stdout
break
return found
def run_module(self, filepath):
success, stdout, stderr = invoke([self.exe, filepath])
return success, stderr
| from .io import invoke
class Interpreter(object):
known_engines = ['js', 'rhino']
def __init__(self, exes=None):
engines = exes if exes else self.known_engines
self.exe = self.detect(engines)
if not self.exe:
raise ValueError("No js engine could be found, tried: %s"
% ', '.join(engines))
def detect(self, engines):
found = None
for engine in engines:
success, stdout, stderr = invoke(['which', engine])
if success:
found = stdout
break
return found
def run_module(self, filepath):
success, stdout, stderr = invoke([self.exe, filepath])
return success, stderr
| Python | 0 |
b99ded7ddd0166d88111ced1a648bd9c79a8bbbe | mark xfail of test_get_psm3 (#803) | pvlib/test/test_psm3.py | pvlib/test/test_psm3.py | """
test iotools for PSM3
"""
import os
from pvlib.iotools import psm3
from conftest import needs_pandas_0_22
import numpy as np
import pandas as pd
import pytest
from requests import HTTPError
BASEDIR = os.path.abspath(os.path.dirname(__file__))
PROJDIR = os.path.dirname(BASEDIR)
DATADIR = os.path.join(PROJDIR, 'data')
TEST_DATA = os.path.join(DATADIR, 'test_psm3.csv')
LATITUDE, LONGITUDE = 40.5137, -108.5449
HEADER_FIELDS = [
'Source', 'Location ID', 'City', 'State', 'Country', 'Latitude',
'Longitude', 'Time Zone', 'Elevation', 'Local Time Zone',
'Dew Point Units', 'DHI Units', 'DNI Units', 'GHI Units',
'Temperature Units', 'Pressure Units', 'Wind Direction Units',
'Wind Speed', 'Surface Albedo Units', 'Version']
PVLIB_EMAIL = 'pvlib-admin@googlegroups.com'
DEMO_KEY = 'DEMO_KEY'
@pytest.mark.xfail(strict=True)
@needs_pandas_0_22
def test_get_psm3():
"""test get_psm3"""
header, data = psm3.get_psm3(LATITUDE, LONGITUDE, DEMO_KEY, PVLIB_EMAIL)
expected = pd.read_csv(TEST_DATA)
# check datevec columns
assert np.allclose(data.Year, expected.Year)
assert np.allclose(data.Month, expected.Month)
assert np.allclose(data.Day, expected.Day)
assert np.allclose(data.Hour, expected.Hour)
# XXX: unclear if NSRDB changes to timesteps are permanent or temporary
# assert np.allclose(data.Minute, expected.Minute)
# check data columns
assert np.allclose(data.GHI, expected.GHI)
assert np.allclose(data.DNI, expected.DNI)
assert np.allclose(data.DHI, expected.DHI)
assert np.allclose(data.Temperature, expected.Temperature)
assert np.allclose(data.Pressure, expected.Pressure)
assert np.allclose(data['Dew Point'], expected['Dew Point'])
assert np.allclose(data['Surface Albedo'], expected['Surface Albedo'])
assert np.allclose(data['Wind Speed'], expected['Wind Speed'])
assert np.allclose(data['Wind Direction'], expected['Wind Direction'])
# check header
for hf in HEADER_FIELDS:
assert hf in header
# check timezone
assert (data.index.tzinfo.zone == 'Etc/GMT%+d' % -header['Time Zone'])
# check errors
with pytest.raises(HTTPError):
# HTTP 403 forbidden because api_key is rejected
psm3.get_psm3(LATITUDE, LONGITUDE, api_key='BAD', email=PVLIB_EMAIL)
with pytest.raises(HTTPError):
# coordinates were not found in the NSRDB
psm3.get_psm3(51, -5, DEMO_KEY, PVLIB_EMAIL)
with pytest.raises(HTTPError):
# names is not one of the available options
psm3.get_psm3(LATITUDE, LONGITUDE, DEMO_KEY, PVLIB_EMAIL, names='bad')
with pytest.raises(HTTPError):
# intervals can only be 30 or 60 minutes
psm3.get_psm3(LATITUDE, LONGITUDE, DEMO_KEY, PVLIB_EMAIL, interval=15)
| """
test iotools for PSM3
"""
import os
from pvlib.iotools import psm3
from conftest import needs_pandas_0_22
import numpy as np
import pandas as pd
import pytest
from requests import HTTPError
BASEDIR = os.path.abspath(os.path.dirname(__file__))
PROJDIR = os.path.dirname(BASEDIR)
DATADIR = os.path.join(PROJDIR, 'data')
TEST_DATA = os.path.join(DATADIR, 'test_psm3.csv')
LATITUDE, LONGITUDE = 40.5137, -108.5449
HEADER_FIELDS = [
'Source', 'Location ID', 'City', 'State', 'Country', 'Latitude',
'Longitude', 'Time Zone', 'Elevation', 'Local Time Zone',
'Dew Point Units', 'DHI Units', 'DNI Units', 'GHI Units',
'Temperature Units', 'Pressure Units', 'Wind Direction Units',
'Wind Speed', 'Surface Albedo Units', 'Version']
PVLIB_EMAIL = 'pvlib-admin@googlegroups.com'
DEMO_KEY = 'DEMO_KEY'
@needs_pandas_0_22
def test_get_psm3():
"""test get_psm3"""
header, data = psm3.get_psm3(LATITUDE, LONGITUDE, DEMO_KEY, PVLIB_EMAIL)
expected = pd.read_csv(TEST_DATA)
# check datevec columns
assert np.allclose(data.Year, expected.Year)
assert np.allclose(data.Month, expected.Month)
assert np.allclose(data.Day, expected.Day)
assert np.allclose(data.Hour, expected.Hour)
# XXX: unclear if NSRDB changes to timesteps are permanent or temporary
# assert np.allclose(data.Minute, expected.Minute)
# check data columns
assert np.allclose(data.GHI, expected.GHI)
assert np.allclose(data.DNI, expected.DNI)
assert np.allclose(data.DHI, expected.DHI)
assert np.allclose(data.Temperature, expected.Temperature)
assert np.allclose(data.Pressure, expected.Pressure)
assert np.allclose(data['Dew Point'], expected['Dew Point'])
assert np.allclose(data['Surface Albedo'], expected['Surface Albedo'])
assert np.allclose(data['Wind Speed'], expected['Wind Speed'])
assert np.allclose(data['Wind Direction'], expected['Wind Direction'])
# check header
for hf in HEADER_FIELDS:
assert hf in header
# check timezone
assert (data.index.tzinfo.zone == 'Etc/GMT%+d' % -header['Time Zone'])
# check errors
with pytest.raises(HTTPError):
# HTTP 403 forbidden because api_key is rejected
psm3.get_psm3(LATITUDE, LONGITUDE, api_key='BAD', email=PVLIB_EMAIL)
with pytest.raises(HTTPError):
# coordinates were not found in the NSRDB
psm3.get_psm3(51, -5, DEMO_KEY, PVLIB_EMAIL)
with pytest.raises(HTTPError):
# names is not one of the available options
psm3.get_psm3(LATITUDE, LONGITUDE, DEMO_KEY, PVLIB_EMAIL, names='bad')
with pytest.raises(HTTPError):
# intervals can only be 30 or 60 minutes
psm3.get_psm3(LATITUDE, LONGITUDE, DEMO_KEY, PVLIB_EMAIL, interval=15)
| Python | 0 |
494e7ae13c7b8c0ef4a65cb0b005578f8a0d2857 | Fix canary command | pwndbg/commands/misc.py | pwndbg/commands/misc.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import errno as _errno
import struct
import gdb
import pwndbg as _pwndbg
import pwndbg.arch as _arch
import pwndbg.auxv
import pwndbg.commands
import pwndbg.regs
import pwndbg.symbol
_errno.errorcode[0] = 'OK'
parser = argparse.ArgumentParser(description='''
Converts errno (or argument) to its string representation.
''')
parser.add_argument('err', type=int, nargs='?', default=None, help='Errno; if not passed, it is retrieved from __errno_location')
@_pwndbg.commands.ArgparsedCommand(parser)
def errno(err):
if err is None:
# Dont ask.
errno_location = pwndbg.symbol.get('__errno_location')
err = pwndbg.memory.int(errno_location)
# err = int(gdb.parse_and_eval('*((int *(*) (void)) __errno_location) ()'))
err = abs(int(err))
if err >> 63:
err -= (1<<64)
elif err >> 31:
err -= (1<<32)
msg = _errno.errorcode.get(int(err), "Unknown error code")
print("Errno %i: %s" % (err, msg))
parser = argparse.ArgumentParser(description='''
Prints out a list of all pwndbg commands. The list can be optionally filtered if filter_pattern is passed.
''')
parser.add_argument('filter_pattern', type=str, nargs='?', default=None, help='Filter to apply to commands names/docs')
@_pwndbg.commands.ArgparsedCommand(parser)
def pwndbg(filter_pattern):
sorted_commands = list(_pwndbg.commands._Command.commands)
sorted_commands.sort(key=lambda x: x.__name__)
if filter_pattern:
filter_pattern = filter_pattern.lower()
for c in sorted_commands:
name = c.__name__
docs = c.__doc__
if docs: docs = docs.strip()
if docs: docs = docs.splitlines()[0]
if not filter_pattern or filter_pattern in name.lower() or (docs and filter_pattern in docs.lower()):
print("%-20s %s" % (name, docs))
@_pwndbg.commands.ParsedCommand
def distance(a, b):
'''Print the distance between the two arguments'''
a = int(a) & _arch.ptrmask
b = int(b) & _arch.ptrmask
distance = (b-a)
print("%#x->%#x is %#x bytes (%#x words)" % (a, b, distance, distance // _arch.ptrsize))
@_pwndbg.commands.Command
def canary():
"""Print out the current stack canary"""
auxv = _pwndbg.auxv.get()
at_random = auxv.get('AT_RANDOM', None)
if at_secure is not None:
print("AT_RANDOM=%#x" % at_secure)
else:
print("Couldn't find AT_RANDOM")
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import errno as _errno
import struct
import gdb
import pwndbg as _pwndbg
import pwndbg.arch as _arch
import pwndbg.auxv
import pwndbg.commands
import pwndbg.regs
import pwndbg.symbol
_errno.errorcode[0] = 'OK'
parser = argparse.ArgumentParser(description='''
Converts errno (or argument) to its string representation.
''')
parser.add_argument('err', type=int, nargs='?', default=None, help='Errno; if not passed, it is retrieved from __errno_location')
@_pwndbg.commands.ArgparsedCommand(parser)
def errno(err):
if err is None:
# Dont ask.
errno_location = pwndbg.symbol.get('__errno_location')
err = pwndbg.memory.int(errno_location)
# err = int(gdb.parse_and_eval('*((int *(*) (void)) __errno_location) ()'))
err = abs(int(err))
if err >> 63:
err -= (1<<64)
elif err >> 31:
err -= (1<<32)
msg = _errno.errorcode.get(int(err), "Unknown error code")
print("Errno %i: %s" % (err, msg))
parser = argparse.ArgumentParser(description='''
Prints out a list of all pwndbg commands. The list can be optionally filtered if filter_pattern is passed.
''')
parser.add_argument('filter_pattern', type=str, nargs='?', default=None, help='Filter to apply to commands names/docs')
@_pwndbg.commands.ArgparsedCommand(parser)
def pwndbg(filter_pattern):
sorted_commands = list(_pwndbg.commands._Command.commands)
sorted_commands.sort(key=lambda x: x.__name__)
if filter_pattern:
filter_pattern = filter_pattern.lower()
for c in sorted_commands:
name = c.__name__
docs = c.__doc__
if docs: docs = docs.strip()
if docs: docs = docs.splitlines()[0]
if not filter_pattern or filter_pattern in name.lower() or (docs and filter_pattern in docs.lower()):
print("%-20s %s" % (name, docs))
@_pwndbg.commands.ParsedCommand
def distance(a, b):
'''Print the distance between the two arguments'''
a = int(a) & _arch.ptrmask
b = int(b) & _arch.ptrmask
distance = (b-a)
print("%#x->%#x is %#x bytes (%#x words)" % (a, b, distance, distance // _arch.ptrsize))
@_pwndbg.commands.Command
def canary():
"""Print out the current stack canary"""
auxv = pwndbg.auxv.get()
if 'AT_SECURE' in auxv:
print("AT_SECURE=%#x" % auxv['AT_SECURE'])
else:
print("Couldn't find AT_SECURE")
| Python | 0.000011 |
5aa90e98abcfafa9036f8cc19cd49b33aa638181 | update dev version after 0.26.0 tag [skip ci] | py/desispec/_version.py | py/desispec/_version.py | __version__ = '0.26.0.dev3104'
| __version__ = '0.26.0'
| Python | 0 |
b59b0e12a0f5fc83d69d9eaa1f7652e8e1b4ac81 | Improve tuple and list converters | pybinding/utils/misc.py | pybinding/utils/misc.py | from functools import wraps
import numpy as np
def to_tuple(o):
try:
return tuple(o)
except TypeError:
return (o,) if o is not None else ()
def to_list(o):
try:
return list(o)
except TypeError:
return [o] if o is not None else []
def with_defaults(options: dict, defaults_dict: dict=None, **defaults_kwargs):
"""Return a dict where missing keys are filled in by defaults
>>> options = dict(hello=0)
>>> with_defaults(options, hello=4, world=5) == dict(hello=0, world=5)
True
>>> defaults = dict(hello=4, world=5)
>>> with_defaults(options, defaults) == dict(hello=0, world=5)
True
>>> with_defaults(options, defaults, world=7, yes=3) == dict(hello=0, world=5, yes=3)
True
"""
options = options if options else {}
if defaults_dict:
options = dict(defaults_dict, **options)
return dict(defaults_kwargs, **options)
def x_pi(value):
"""Return str of value in 'multiples of pi' latex representation
>>> x_pi(6.28) == r"$2\pi$"
True
>>> x_pi(3) == r"$0.95\pi$"
True
>>> x_pi(-np.pi) == r"$-\pi$"
True
>>> x_pi(0) == "0"
True
"""
n = value / np.pi
if np.isclose(n, 0):
return "0"
elif np.isclose(abs(n), 1):
return r"$\pi$" if n > 0 else r"$-\pi$"
else:
return r"${:.2g}\pi$".format(n)
def decorator_decorator(decorator_wrapper):
"""A decorator decorator which allows it to be used with or without arguments
Parameters
----------
decorator_wrapper : Callable[[Any], Callable]
Examples
--------
>>> @decorator_decorator
... def decorator_wrapper(optional="default"):
... def actual_decorator(func):
... return lambda x: func(x, optional)
... return actual_decorator
>>> @decorator_wrapper("hello")
... def foo(x, y):
... print(x, y)
>>> foo(1)
1 hello
>>> @decorator_wrapper
... def bar(x, y):
... print(x, y)
>>> bar(2)
2 default
"""
@wraps(decorator_wrapper)
def new_wrapper(*args, **kwargs):
if len(args) == 1 and not kwargs and (isinstance(args[0], type) or callable(args[0])):
return decorator_wrapper()(args[0])
else:
return lambda cls_or_func: decorator_wrapper(*args, **kwargs)(cls_or_func)
return new_wrapper
| from functools import wraps
import numpy as np
def to_tuple(o):
if isinstance(o, (tuple, list)):
return tuple(o)
else:
return o,
def with_defaults(options: dict, defaults_dict: dict=None, **defaults_kwargs):
"""Return a dict where missing keys are filled in by defaults
>>> options = dict(hello=0)
>>> with_defaults(options, hello=4, world=5) == dict(hello=0, world=5)
True
>>> defaults = dict(hello=4, world=5)
>>> with_defaults(options, defaults) == dict(hello=0, world=5)
True
>>> with_defaults(options, defaults, world=7, yes=3) == dict(hello=0, world=5, yes=3)
True
"""
options = options if options else {}
if defaults_dict:
options = dict(defaults_dict, **options)
return dict(defaults_kwargs, **options)
def x_pi(value):
"""Return str of value in 'multiples of pi' latex representation
>>> x_pi(6.28) == r"$2\pi$"
True
>>> x_pi(3) == r"$0.95\pi$"
True
>>> x_pi(-np.pi) == r"$-\pi$"
True
>>> x_pi(0) == "0"
True
"""
n = value / np.pi
if np.isclose(n, 0):
return "0"
elif np.isclose(abs(n), 1):
return r"$\pi$" if n > 0 else r"$-\pi$"
else:
return r"${:.2g}\pi$".format(n)
def decorator_decorator(decorator_wrapper):
"""A decorator decorator which allows it to be used with or without arguments
Parameters
----------
decorator_wrapper : Callable[[Any], Callable]
Examples
--------
>>> @decorator_decorator
... def decorator_wrapper(optional="default"):
... def actual_decorator(func):
... return lambda x: func(x, optional)
... return actual_decorator
>>> @decorator_wrapper("hello")
... def foo(x, y):
... print(x, y)
>>> foo(1)
1 hello
>>> @decorator_wrapper
... def bar(x, y):
... print(x, y)
>>> bar(2)
2 default
"""
@wraps(decorator_wrapper)
def new_wrapper(*args, **kwargs):
if len(args) == 1 and not kwargs and (isinstance(args[0], type) or callable(args[0])):
return decorator_wrapper()(args[0])
else:
return lambda cls_or_func: decorator_wrapper(*args, **kwargs)(cls_or_func)
return new_wrapper
| Python | 0.000001 |
ee5a85df1d2db8babd8d6df6a188137051c3a48e | Change the improvement policies due to reorganizing reggie. | pybo/policies/simple.py | pybo/policies/simple.py | """
Acquisition functions based on the probability or expected value of
improvement.
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
__all__ = ['EI', 'PI', 'UCB', 'Thompson']
def EI(model, _, xi=0.0):
"""
Expected improvement policy with an exploration parameter of `xi`.
"""
target = model.predict(model.data[0])[0].max() + xi
def index(X, grad=False):
"""EI policy instance."""
return model.get_improvement(X, target, grad)
return index
def PI(model, _, xi=0.05):
"""
Probability of improvement policy with an exploration parameter of `xi`.
"""
target = model.predict(model.data[0])[0].max() + xi
def index(X, grad=False):
"""PI policy instance."""
return model.get_tail(X, target, grad)
return index
def Thompson(model, _, n=100, rng=None):
"""
Thompson sampling policy.
"""
return model.sample_f(n, rng).get
def UCB(model, _, delta=0.1, xi=0.2):
"""
The (GP)UCB acquisition function where `delta` is the probability that the
upper bound holds and `xi` is a multiplicative modification of the
exploration factor.
"""
d = model.ndata
a = xi * 2 * np.log(np.pi**2 / 3 / delta)
b = xi * (4 + d)
def index(X, grad=False):
"""UCB policy instance."""
posterior = model.predict(X, grad=grad)
mu, s2 = posterior[:2]
beta = a + b * np.log(model.ndata + 1)
if grad:
dmu, ds2 = posterior[2:]
return (mu + np.sqrt(beta * s2),
dmu + 0.5 * np.sqrt(beta / s2[:, None]) * ds2)
else:
return mu + np.sqrt(beta * s2)
return index
| """
Acquisition functions based on the probability or expected value of
improvement.
"""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
import numpy as np
__all__ = ['EI', 'PI', 'UCB', 'Thompson']
def EI(model, _, xi=0.0):
"""
Expected improvement policy with an exploration parameter of `xi`.
"""
X = model.data[0]
x = X[model.predict(X)[0].argmax()]
def index(X, grad=False):
"""EI policy instance."""
return model.get_improvement(X, x, xi, grad)
return index
def PI(model, _, xi=0.05):
"""
Probability of improvement policy with an exploration parameter of `xi`.
"""
X = model.data[0]
x = X[model.predict(X)[0].argmax()]
def index(X, grad=False):
"""PI policy instance."""
return model.get_improvement(X, x, xi, grad, pi=True)
return index
def Thompson(model, _, n=100, rng=None):
"""
Thompson sampling policy.
"""
return model.sample_f(n, rng).get
def UCB(model, _, delta=0.1, xi=0.2):
"""
The (GP)UCB acquisition function where `delta` is the probability that the
upper bound holds and `xi` is a multiplicative modification of the
exploration factor.
"""
d = model.ndata
a = xi * 2 * np.log(np.pi**2 / 3 / delta)
b = xi * (4 + d)
def index(X, grad=False):
"""UCB policy instance."""
posterior = model.predict(X, grad=grad)
mu, s2 = posterior[:2]
beta = a + b * np.log(model.ndata + 1)
if grad:
dmu, ds2 = posterior[2:]
return (mu + np.sqrt(beta * s2),
dmu + 0.5 * np.sqrt(beta / s2[:, None]) * ds2)
else:
return mu + np.sqrt(beta * s2)
return index
| Python | 0 |
1a8d7797e691bd5959fc8f7cdc0371e39208aee7 | Update version # | pyhindsight/__init__.py | pyhindsight/__init__.py | __author__ = "Ryan Benson"
__version__ = "2.1.0"
__email__ = "ryan@obsidianforensics.com"
| __author__ = "Ryan Benson"
__version__ = "2.0.5"
__email__ = "ryan@obsidianforensics.com"
| Python | 0 |
ec6191d63236a130e6a39f2383b7e8a6ae8ec672 | Remove the unexisting import. | pytask/profile/forms.py | pytask/profile/forms.py | import os
from django import forms
from registration.forms import RegistrationFormUniqueEmail
from registration.models import RegistrationProfile
from pytask.profile.models import GENDER_CHOICES, Profile
class CustomRegistrationForm(RegistrationFormUniqueEmail):
"""Used instead of RegistrationForm used by default django-registration
backend, this adds aboutme, dob, gender, address, phonenum to the default
django-registration RegistrationForm"""
full_name = forms.CharField(required=True, max_length=50,
label="Name as on your bank account",
help_text="Any DD/Cheque will be issued on \
this name")
aboutme = forms.CharField(required=True, widget=forms.Textarea,
max_length=1000, label=u"About Me",
help_text="A write up about yourself to aid the\
reviewer in judging your eligibility for a task.\
It can have your educational background, CGPA,\
field of interests etc.,"
)
dob = forms.DateField(help_text = "YYYY-MM-DD", required=True, label=u'date of birth')
gender = forms.ChoiceField(choices = GENDER_CHOICES, required=True, label=u'gender')
address = forms.CharField(required=True, max_length=200,
widget=forms.Textarea, help_text="This \
information will be used while sending DD/Cheque")
phonenum = forms.CharField(required=True, max_length=10,
label="Phone Number")
def clean_aboutme(self):
""" Empty not allowed """
data = self.cleaned_data['aboutme']
if not data.strip():
raise forms.ValidationError("Please write something about\
yourself")
return data
def clean_address(self):
""" Empty not allowed """
data = self.cleaned_data['address']
if not data.strip():
raise forms.ValidationError("Please enter an address")
return data
def clean_phonenum(self):
""" should be of 10 digits """
data = self.cleaned_data['phonenum']
if (not data.strip()) or \
(data.strip("1234567890")) or \
(len(data)!= 10):
raise forms.ValidationError("This is not a valid phone number")
return data
def save(self,profile_callback=None):
new_user = RegistrationProfile.objects.create_inactive_user(
username=self.cleaned_data['username'],
password=self.cleaned_data['password1'],
email=self.cleaned_data['email'])
new_profile = Profile(user=new_user,
aboutme=self.cleaned_data['aboutme'],
dob=self.cleaned_data['dob'],
gender=self.cleaned_data['gender'],
address=self.cleaned_data['address'],
phonenum=self.cleaned_data['phonenum'],
uniq_key=make_key(Profile),
)
new_profile.save()
return new_user
class CreateProfileForm(forms.ModelForm):
class Meta:
model = Profile
exclude = ['pynts', 'rights']
class EditProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ['full_name', 'aboutme', 'gender', 'dob', 'address', 'phonenum']
def clean_aboutme(self):
""" Empty not allowed """
data = self.cleaned_data['aboutme']
if not data.strip():
raise forms.ValidationError("Please write something about\
yourself")
return data
def clean_address(self):
""" Empty not allowed """
data = self.cleaned_data['address']
if not data.strip():
raise forms.ValidationError("Please enter an address")
return data
def clean_phonenum(self):
""" should be of 10 digits """
data = self.cleaned_data['phonenum']
if (not data.strip()) or \
(data.strip("1234567890")) or \
(len(data)!= 10):
raise forms.ValidationError("This is not a valid phone number")
return data
| import os
from django import forms
from registration.forms import RegistrationFormUniqueEmail
from registration.models import RegistrationProfile
from pytask.utils import make_key
from pytask.profile.models import GENDER_CHOICES, Profile
class CustomRegistrationForm(RegistrationFormUniqueEmail):
"""Used instead of RegistrationForm used by default django-registration
backend, this adds aboutme, dob, gender, address, phonenum to the default
django-registration RegistrationForm"""
full_name = forms.CharField(required=True, max_length=50,
label="Name as on your bank account",
help_text="Any DD/Cheque will be issued on \
this name")
aboutme = forms.CharField(required=True, widget=forms.Textarea,
max_length=1000, label=u"About Me",
help_text="A write up about yourself to aid the\
reviewer in judging your eligibility for a task.\
It can have your educational background, CGPA,\
field of interests etc.,"
)
dob = forms.DateField(help_text = "YYYY-MM-DD", required=True, label=u'date of birth')
gender = forms.ChoiceField(choices = GENDER_CHOICES, required=True, label=u'gender')
address = forms.CharField(required=True, max_length=200,
widget=forms.Textarea, help_text="This \
information will be used while sending DD/Cheque")
phonenum = forms.CharField(required=True, max_length=10,
label="Phone Number")
def clean_aboutme(self):
""" Empty not allowed """
data = self.cleaned_data['aboutme']
if not data.strip():
raise forms.ValidationError("Please write something about\
yourself")
return data
def clean_address(self):
""" Empty not allowed """
data = self.cleaned_data['address']
if not data.strip():
raise forms.ValidationError("Please enter an address")
return data
def clean_phonenum(self):
""" should be of 10 digits """
data = self.cleaned_data['phonenum']
if (not data.strip()) or \
(data.strip("1234567890")) or \
(len(data)!= 10):
raise forms.ValidationError("This is not a valid phone number")
return data
def save(self,profile_callback=None):
new_user = RegistrationProfile.objects.create_inactive_user(
username=self.cleaned_data['username'],
password=self.cleaned_data['password1'],
email=self.cleaned_data['email'])
new_profile = Profile(user=new_user,
aboutme=self.cleaned_data['aboutme'],
dob=self.cleaned_data['dob'],
gender=self.cleaned_data['gender'],
address=self.cleaned_data['address'],
phonenum=self.cleaned_data['phonenum'],
uniq_key=make_key(Profile),
)
new_profile.save()
return new_user
class CreateProfileForm(forms.ModelForm):
class Meta:
model = Profile
exclude = ['pynts', 'rights']
class EditProfileForm(forms.ModelForm):
class Meta:
model = Profile
fields = ['full_name', 'aboutme', 'gender', 'dob', 'address', 'phonenum']
def clean_aboutme(self):
""" Empty not allowed """
data = self.cleaned_data['aboutme']
if not data.strip():
raise forms.ValidationError("Please write something about\
yourself")
return data
def clean_address(self):
""" Empty not allowed """
data = self.cleaned_data['address']
if not data.strip():
raise forms.ValidationError("Please enter an address")
return data
def clean_phonenum(self):
""" should be of 10 digits """
data = self.cleaned_data['phonenum']
if (not data.strip()) or \
(data.strip("1234567890")) or \
(len(data)!= 10):
raise forms.ValidationError("This is not a valid phone number")
return data
| Python | 0.000012 |
c45fc8485935c39af869204f9fc6b0dd6bc0deb1 | Move I/O outside of properties for light/tplink platform (#8699) | homeassistant/components/light/tplink.py | homeassistant/components/light/tplink.py | """
Support for TPLink lights.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/light.tplink/
"""
import logging
from homeassistant.const import (CONF_HOST, CONF_NAME)
from homeassistant.components.light import (
Light, ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_KELVIN,
SUPPORT_BRIGHTNESS, SUPPORT_COLOR_TEMP)
from homeassistant.util.color import \
color_temperature_mired_to_kelvin as mired_to_kelvin
from homeassistant.util.color import \
color_temperature_kelvin_to_mired as kelvin_to_mired
REQUIREMENTS = ['pyHS100==0.2.4.2']
_LOGGER = logging.getLogger(__name__)
SUPPORT_TPLINK = (SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Initialise pyLB100 SmartBulb."""
from pyHS100 import SmartBulb
host = config.get(CONF_HOST)
name = config.get(CONF_NAME)
add_devices([TPLinkSmartBulb(SmartBulb(host), name)], True)
def brightness_to_percentage(byt):
"""Convert brightness from absolute 0..255 to percentage."""
return int((byt*100.0)/255.0)
def brightness_from_percentage(percent):
"""Convert percentage to absolute value 0..255."""
return (percent*255.0)/100.0
class TPLinkSmartBulb(Light):
"""Representation of a TPLink Smart Bulb."""
def __init__(self, smartbulb, name):
"""Initialize the bulb."""
self.smartbulb = smartbulb
# Use the name set on the device if not set
if name is None:
self._name = self.smartbulb.alias
else:
self._name = name
self._state = None
self._color_temp = None
self._brightness = None
_LOGGER.debug("Setting up TP-Link Smart Bulb")
@property
def name(self):
"""Return the name of the Smart Bulb, if any."""
return self._name
def turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_COLOR_TEMP in kwargs:
self.smartbulb.color_temp = \
mired_to_kelvin(kwargs[ATTR_COLOR_TEMP])
if ATTR_KELVIN in kwargs:
self.smartbulb.color_temp = kwargs[ATTR_KELVIN]
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs.get(ATTR_BRIGHTNESS, self.brightness or 255)
self.smartbulb.brightness = brightness_to_percentage(brightness)
self.smartbulb.state = self.smartbulb.BULB_STATE_ON
def turn_off(self):
"""Turn the light off."""
self.smartbulb.state = self.smartbulb.BULB_STATE_OFF
@property
def color_temp(self):
"""Return the color temperature of this light in mireds for HA."""
return self._color_temp
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def is_on(self):
"""True if device is on."""
return self._state
def update(self):
"""Update the TP-Link Bulb's state."""
from pyHS100 import SmartPlugException
try:
self._state = (
self.smartbulb.state == self.smartbulb.BULB_STATE_ON)
self._brightness = brightness_from_percentage(
self.smartbulb.brightness)
if self.smartbulb.is_color:
if (self.smartbulb.color_temp is not None and
self.smartbulb.color_temp != 0):
self._color_temp = kelvin_to_mired(
self.smartbulb.color_temp)
except (SmartPlugException, OSError) as ex:
_LOGGER.warning('Could not read state for %s: %s', self.name, ex)
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_TPLINK
| """
Support for TPLink lights.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/light.tplink/
"""
import logging
from homeassistant.const import (CONF_HOST, CONF_NAME)
from homeassistant.components.light import (
Light, ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_KELVIN,
SUPPORT_BRIGHTNESS, SUPPORT_COLOR_TEMP)
from homeassistant.util.color import \
color_temperature_mired_to_kelvin as mired_to_kelvin
from homeassistant.util.color import \
color_temperature_kelvin_to_mired as kelvin_to_mired
REQUIREMENTS = ['pyHS100==0.2.4.2']
_LOGGER = logging.getLogger(__name__)
SUPPORT_TPLINK = (SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Initialise pyLB100 SmartBulb."""
from pyHS100 import SmartBulb
host = config.get(CONF_HOST)
name = config.get(CONF_NAME)
add_devices([TPLinkSmartBulb(SmartBulb(host), name)], True)
def brightness_to_percentage(byt):
"""Convert brightness from absolute 0..255 to percentage."""
return int((byt*100.0)/255.0)
def brightness_from_percentage(percent):
"""Convert percentage to absolute value 0..255."""
return (percent*255.0)/100.0
class TPLinkSmartBulb(Light):
"""Representation of a TPLink Smart Bulb."""
def __init__(self, smartbulb, name):
"""Initialize the bulb."""
self.smartbulb = smartbulb
# Use the name set on the device if not set
if name is None:
self._name = self.smartbulb.alias
else:
self._name = name
self._state = None
_LOGGER.debug("Setting up TP-Link Smart Bulb")
@property
def name(self):
"""Return the name of the Smart Bulb, if any."""
return self._name
def turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_COLOR_TEMP in kwargs:
self.smartbulb.color_temp = \
mired_to_kelvin(kwargs[ATTR_COLOR_TEMP])
if ATTR_KELVIN in kwargs:
self.smartbulb.color_temp = kwargs[ATTR_KELVIN]
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs.get(ATTR_BRIGHTNESS, self.brightness or 255)
self.smartbulb.brightness = brightness_to_percentage(brightness)
self.smartbulb.state = self.smartbulb.BULB_STATE_ON
def turn_off(self):
"""Turn the light off."""
self.smartbulb.state = self.smartbulb.BULB_STATE_OFF
@property
def color_temp(self):
"""Return the color temperature of this light in mireds for HA."""
if self.smartbulb.is_color:
if (self.smartbulb.color_temp is not None and
self.smartbulb.color_temp != 0):
return kelvin_to_mired(self.smartbulb.color_temp)
else:
return None
else:
return None
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return brightness_from_percentage(self.smartbulb.brightness)
@property
def is_on(self):
"""True if device is on."""
return self.smartbulb.state == \
self.smartbulb.BULB_STATE_ON
def update(self):
"""Update the TP-Link Bulb's state."""
from pyHS100 import SmartPlugException
try:
self._state = self.smartbulb.state == \
self.smartbulb.BULB_STATE_ON
except (SmartPlugException, OSError) as ex:
_LOGGER.warning('Could not read state for %s: %s', self.name, ex)
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_TPLINK
| Python | 0 |
82a9dc620cc20692e5b5c84381be38084f89ad75 | Add device_class to Shelly cover domain (#46894) | homeassistant/components/shelly/cover.py | homeassistant/components/shelly/cover.py | """Cover for Shelly."""
from aioshelly import Block
from homeassistant.components.cover import (
ATTR_POSITION,
DEVICE_CLASS_SHUTTER,
SUPPORT_CLOSE,
SUPPORT_OPEN,
SUPPORT_SET_POSITION,
SUPPORT_STOP,
CoverEntity,
)
from homeassistant.core import callback
from . import ShellyDeviceWrapper
from .const import COAP, DATA_CONFIG_ENTRY, DOMAIN
from .entity import ShellyBlockEntity
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up cover for device."""
wrapper = hass.data[DOMAIN][DATA_CONFIG_ENTRY][config_entry.entry_id][COAP]
blocks = [block for block in wrapper.device.blocks if block.type == "roller"]
if not blocks:
return
async_add_entities(ShellyCover(wrapper, block) for block in blocks)
class ShellyCover(ShellyBlockEntity, CoverEntity):
"""Switch that controls a cover block on Shelly devices."""
def __init__(self, wrapper: ShellyDeviceWrapper, block: Block) -> None:
"""Initialize light."""
super().__init__(wrapper, block)
self.control_result = None
self._supported_features = SUPPORT_OPEN | SUPPORT_CLOSE | SUPPORT_STOP
if self.wrapper.device.settings["rollers"][0]["positioning"]:
self._supported_features |= SUPPORT_SET_POSITION
@property
def is_closed(self):
"""If cover is closed."""
if self.control_result:
return self.control_result["current_pos"] == 0
return self.block.rollerPos == 0
@property
def current_cover_position(self):
"""Position of the cover."""
if self.control_result:
return self.control_result["current_pos"]
return self.block.rollerPos
@property
def is_closing(self):
"""Return if the cover is closing."""
if self.control_result:
return self.control_result["state"] == "close"
return self.block.roller == "close"
@property
def is_opening(self):
"""Return if the cover is opening."""
if self.control_result:
return self.control_result["state"] == "open"
return self.block.roller == "open"
@property
def supported_features(self):
"""Flag supported features."""
return self._supported_features
@property
def device_class(self) -> str:
"""Return the class of the device."""
return DEVICE_CLASS_SHUTTER
async def async_close_cover(self, **kwargs):
"""Close cover."""
self.control_result = await self.block.set_state(go="close")
self.async_write_ha_state()
async def async_open_cover(self, **kwargs):
"""Open cover."""
self.control_result = await self.block.set_state(go="open")
self.async_write_ha_state()
async def async_set_cover_position(self, **kwargs):
"""Move the cover to a specific position."""
self.control_result = await self.block.set_state(
go="to_pos", roller_pos=kwargs[ATTR_POSITION]
)
self.async_write_ha_state()
async def async_stop_cover(self, **_kwargs):
"""Stop the cover."""
self.control_result = await self.block.set_state(go="stop")
self.async_write_ha_state()
@callback
def _update_callback(self):
"""When device updates, clear control result that overrides state."""
self.control_result = None
super()._update_callback()
| """Cover for Shelly."""
from aioshelly import Block
from homeassistant.components.cover import (
ATTR_POSITION,
SUPPORT_CLOSE,
SUPPORT_OPEN,
SUPPORT_SET_POSITION,
SUPPORT_STOP,
CoverEntity,
)
from homeassistant.core import callback
from . import ShellyDeviceWrapper
from .const import COAP, DATA_CONFIG_ENTRY, DOMAIN
from .entity import ShellyBlockEntity
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up cover for device."""
wrapper = hass.data[DOMAIN][DATA_CONFIG_ENTRY][config_entry.entry_id][COAP]
blocks = [block for block in wrapper.device.blocks if block.type == "roller"]
if not blocks:
return
async_add_entities(ShellyCover(wrapper, block) for block in blocks)
class ShellyCover(ShellyBlockEntity, CoverEntity):
"""Switch that controls a cover block on Shelly devices."""
def __init__(self, wrapper: ShellyDeviceWrapper, block: Block) -> None:
"""Initialize light."""
super().__init__(wrapper, block)
self.control_result = None
self._supported_features = SUPPORT_OPEN | SUPPORT_CLOSE | SUPPORT_STOP
if self.wrapper.device.settings["rollers"][0]["positioning"]:
self._supported_features |= SUPPORT_SET_POSITION
@property
def is_closed(self):
"""If cover is closed."""
if self.control_result:
return self.control_result["current_pos"] == 0
return self.block.rollerPos == 0
@property
def current_cover_position(self):
"""Position of the cover."""
if self.control_result:
return self.control_result["current_pos"]
return self.block.rollerPos
@property
def is_closing(self):
"""Return if the cover is closing."""
if self.control_result:
return self.control_result["state"] == "close"
return self.block.roller == "close"
@property
def is_opening(self):
"""Return if the cover is opening."""
if self.control_result:
return self.control_result["state"] == "open"
return self.block.roller == "open"
@property
def supported_features(self):
"""Flag supported features."""
return self._supported_features
async def async_close_cover(self, **kwargs):
"""Close cover."""
self.control_result = await self.block.set_state(go="close")
self.async_write_ha_state()
async def async_open_cover(self, **kwargs):
"""Open cover."""
self.control_result = await self.block.set_state(go="open")
self.async_write_ha_state()
async def async_set_cover_position(self, **kwargs):
"""Move the cover to a specific position."""
self.control_result = await self.block.set_state(
go="to_pos", roller_pos=kwargs[ATTR_POSITION]
)
self.async_write_ha_state()
async def async_stop_cover(self, **_kwargs):
"""Stop the cover."""
self.control_result = await self.block.set_state(go="stop")
self.async_write_ha_state()
@callback
def _update_callback(self):
"""When device updates, clear control result that overrides state."""
self.control_result = None
super()._update_callback()
| Python | 0 |
ea6f60838ae309e5fb0662b2416d3c4450be7823 | correct straight function | design_of_computer_programs_cs212/lesson01/poker_game.py | design_of_computer_programs_cs212/lesson01/poker_game.py |
def poker(hands):
"""Return the best hand: poker([hand,...]) => hand"""
return max(hands, key=hand_rank)
def hand_rank(hand):
"""define a rank for a specific hand"""
ranks = card_ranks(hand)
if straight(ranks) and flush(hand): # straight flush
return (8, max(ranks))
elif kind(4, ranks): # 4 of a kind
return (7, kind(4, ranks), kind(1, ranks))
elif kind(3, ranks) and kind(2, ranks): # full house
return (6, kind(3, ranks), kind(2, ranks))
elif flush(hand): # flush
return (5, ranks)
elif straight(ranks): # straight
return (4, max(ranks))
elif kind(3, ranks): # 3 of a kind
return (3, kind(3, ranks), ranks)
elif two_pair(ranks): # 2 pair
return (2, two_pair(ranks), ranks)
elif kind(2, ranks): # kind
return (1, kind(2, ranks), ranks)
else: # high card
return (0, ranks)
def card_ranks(cards):
""" Return a list of the ranks, sorted with higher first"""
ranks = ["--23456789TJQKA".index(r) for r, s in cards]
ranks.sort(reverse=True)
return [5, 4, 3, 2, 1] if ranks == [14, 5, 4, 3, 2] else ranks
def straight(ranks):
""" Return True if the ordered ranks from a 5 card straight"""
return (max(ranks) - min(ranks) == 4) and len(set(ranks)) == 5
def flush(hand):
""" Return True if all cards have the same suit"""
suits = [s for r, s in hand]
return len(set(suits)) == 1
def kind(n, ranks):
""" Return the first rank that this hand has exactly n
and return None otherwise"""
for r in ranks:
if ranks.count(r) == n:
return r
return None
def two_pair(ranks):
"""If there are two pair, return the two ranks as a
tuple: (highest, lowest); otherwise return None."""
pair_highest = kind(2, ranks)
pair_lowest = kind(2, list(reversed(ranks)))
if pair_highest and pair_highest != pair_lowest:
return (pair_highest, pair_lowest)
return None
|
def poker(hands):
"""Return the best hand: poker([hand,...]) => hand"""
return max(hands, key=hand_rank)
def hand_rank(hand):
"""define a rank for a specific hand"""
ranks = card_ranks(hand)
if straight(ranks) and flush(hand): # straight flush
return (8, max(ranks))
elif kind(4, ranks): # 4 of a kind
return (7, kind(4, ranks), kind(1, ranks))
elif kind(3, ranks) and kind(2, ranks): # full house
return (6, kind(3, ranks), kind(2, ranks))
elif flush(hand): # flush
return (5, ranks)
elif straight(ranks): # straight
return (4, max(ranks))
elif kind(3, ranks): # 3 of a kind
return (3, kind(3, ranks), ranks)
elif two_pair(ranks): # 2 pair
return (2, two_pair(ranks), ranks)
elif kind(2, ranks): # kind
return (1, kind(2, ranks), ranks)
else: # high card
return (0, ranks)
def card_ranks(cards):
""" Return a list of the ranks, sorted with higher first"""
ranks = ["--23456789TJQKA".index(r) for r, s in cards]
ranks.sort(reverse=True)
return ranks
def straight(ranks):
""" Return True if the ordered ranks from a 5 card straight"""
return (max(ranks) - min(ranks) == 4) and len(set(ranks)) == 5
def flush(hand):
""" Return True if all cards have the same suit"""
suits = [s for r, s in hand]
return len(set(suits)) == 1
def kind(n, ranks):
""" Return the first rank that this hand has exactly n
and return None otherwise"""
for r in ranks:
if ranks.count(r) == n:
return r
return None
def two_pair(ranks):
"""If there are two pair, return the two ranks as a
tuple: (highest, lowest); otherwise return None."""
pair_highest = kind(2, ranks)
pair_lowest = kind(2, list(reversed(ranks)))
if pair_highest and pair_highest != pair_lowest:
return (pair_highest, pair_lowest)
return None
| Python | 0.000437 |
56c3c373563a38991da72bc235d4e3e40e711968 | Use extra space. | remove_duplicates_from_sorted_array.py | remove_duplicates_from_sorted_array.py | #! /usr/bin/env python3
"""
http://oj.leetcode.com/problems/remove-duplicates-from-sorted-array/
Given a sorted array, remove the duplicates in place such that each element
appear only once and return the new length.
Do not allocate extra space for another array, you must do this in place with
constant memory.
For example,
Given input array A = [1,1,2],
Your function should return length = 2, and A is now [1,2].
Since Apr-22-2014 18:16
"""
class Solution:
# @param a list of integers
# @return an integer
def removeDuplicates(self, a):
global A
l = len(a)
if l in (0, 1):
return l
else:
i = 0
B = []
while i < l:
if i == l - 1:
B.append(a[i])
break
if a[i] == a[i + 1]:
i += 1
else:
B.append(a[i])
i += 1
A = list(B)
return len(A)
if __name__ == '__main__':
s = Solution()
A = []
assert s.removeDuplicates(A) == 0
assert A == []
A = [1]
assert s.removeDuplicates(A) == 1
assert A == [1]
A = [1, 1, 2]
assert s.removeDuplicates(A) == 2
assert A == [1, 2]
A = [1, 1, 2, 3, 4, 4, 5, 5]
assert s.removeDuplicates(A) == 5
assert A == [1, 2, 3, 4, 5]
A = [1, 2, 3, 4, 5]
assert s.removeDuplicates(A) == 5
assert A == [1, 2, 3, 4, 5]
| #! /usr/bin/env python3
"""
http://oj.leetcode.com/problems/remove-duplicates-from-sorted-array/
Given a sorted array, remove the duplicates in place such that each element
appear only once and return the new length.
Do not allocate extra space for another array, you must do this in place with
constant memory.
For example,
Given input array A = [1,1,2],
Your function should return length = 2, and A is now [1,2].
Since Apr-22-2014 18:16
"""
class Solution:
# @param a list of integers
# @return an integer
def removeDuplicates(self, A):
L = len(A)
if L in (0, 1):
return L
else:
i = 0
while i <= L - 2:
if A[i] == A[i + 1]:
A.remove(A[i])
L = len(A)
i += 1
return len(A)
if __name__ == '__main__':
s = Solution()
A = []
assert s.removeDuplicates(A) == 0
assert A == []
A = [1]
assert s.removeDuplicates(A) == 1
assert A == [1]
A = [1, 1, 2]
assert s.removeDuplicates(A) == 2
assert A == [1, 2]
A = [1, 1, 2, 3, 4, 4, 5, 5]
assert s.removeDuplicates(A) == 5
assert A == [1, 2, 3, 4, 5]
A = [1, 2, 3, 4, 5]
assert s.removeDuplicates(A) == 5
assert A == [1, 2, 3, 4, 5]
| Python | 0.000008 |
97dbd18e12094820be5985b9daec4ceab4d86116 | Fix getfolders() | offlineimap/repository/LocalStatus.py | offlineimap/repository/LocalStatus.py | # Local status cache repository support
# Copyright (C) 2002 John Goerzen
# <jgoerzen@complete.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from Base import BaseRepository
from offlineimap.folder.LocalStatus import LocalStatusFolder, magicline
from offlineimap.folder.LocalStatusSQLite import LocalStatusSQLiteFolder
import os
import re
class LocalStatusRepository(BaseRepository):
def __init__(self, reposname, account):
BaseRepository.__init__(self, reposname, account)
self.directory = os.path.join(account.getaccountmeta(), 'LocalStatus')
#statusbackend can be 'plain' or 'sqlite'
backend = self.account.getconf('status_backend', 'plain')
if backend == 'sqlite':
self._backend = 'sqlite'
self.LocalStatusFolderClass = LocalStatusSQLiteFolder
self.directory += '-sqlite'
elif backend == 'plain':
self._backend = 'plain'
self.LocalStatusFolderClass = LocalStatusFolder
else:
raise SyntaxWarning("Unknown status_backend '%s' for account '%s'" \
% (backend, account.name))
if not os.path.exists(self.directory):
os.mkdir(self.directory, 0700)
# self._folders is a list of LocalStatusFolders()
self._folders = None
def getsep(self):
return '.'
def getfolderfilename(self, foldername):
"""Return the full path of the status file"""
# replace with 'dot' if final path name is '.'
foldername = re.sub('(^|\/)\.$','\\1dot', foldername)
return os.path.join(self.directory, foldername)
def makefolder(self, foldername):
"""Create a LocalStatus Folder
Empty Folder for plain backend. NoOp for sqlite backend as those
are created on demand."""
# Invalidate the cache.
self._folders = None
if self._backend == 'sqlite':
return
filename = self.getfolderfilename(foldername)
file = open(filename + ".tmp", "wt")
file.write(magicline + '\n')
file.close()
os.rename(filename + ".tmp", filename)
# Invalidate the cache.
self._folders = None
def getfolder(self, foldername):
"""Return the Folder() object for a foldername"""
return self.LocalStatusFolderClass(self.directory, foldername,
self, self.accountname,
self.config)
def getfolders(self):
"""Returns a list of all cached folders."""
if self._folders != None:
return self._folders
self._folders = []
for folder in os.listdir(self.directory):
self._folders.append(self.getfolder(folder))
return self._folders
def forgetfolders(self):
"""Forgets the cached list of folders, if any. Useful to run
after a sync run."""
self._folders = None
| # Local status cache repository support
# Copyright (C) 2002 John Goerzen
# <jgoerzen@complete.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
from Base import BaseRepository
from offlineimap.folder.LocalStatus import LocalStatusFolder, magicline
from offlineimap.folder.LocalStatusSQLite import LocalStatusSQLiteFolder
import os
import re
class LocalStatusRepository(BaseRepository):
def __init__(self, reposname, account):
BaseRepository.__init__(self, reposname, account)
self.directory = os.path.join(account.getaccountmeta(), 'LocalStatus')
#statusbackend can be 'plain' or 'sqlite'
backend = self.account.getconf('status_backend', 'plain')
if backend == 'sqlite':
self._backend = 'sqlite'
self.LocalStatusFolderClass = LocalStatusSQLiteFolder
self.directory += '-sqlite'
elif backend == 'plain':
self._backend = 'plain'
self.LocalStatusFolderClass = LocalStatusFolder
else:
raise SyntaxWarning("Unknown status_backend '%s' for account '%s'" \
% (backend, account.name))
if not os.path.exists(self.directory):
os.mkdir(self.directory, 0700)
# self._folders is a list of LocalStatusFolders()
self._folders = None
def getsep(self):
return '.'
def getfolderfilename(self, foldername):
"""Return the full path of the status file"""
# replace with 'dot' if final path name is '.'
foldername = re.sub('(^|\/)\.$','\\1dot', foldername)
return os.path.join(self.directory, foldername)
def makefolder(self, foldername):
"""Create a LocalStatus Folder
Empty Folder for plain backend. NoOp for sqlite backend as those
are created on demand."""
# Invalidate the cache.
self._folders = None
if self._backend == 'sqlite':
return
filename = self.getfolderfilename(foldername)
file = open(filename + ".tmp", "wt")
file.write(magicline + '\n')
file.close()
os.rename(filename + ".tmp", filename)
# Invalidate the cache.
self._folders = None
def getfolder(self, foldername):
"""Return the Folder() object for a foldername"""
return self.LocalStatusFolderClass(self.directory, foldername,
self, self.accountname,
self.config)
def getfolders(self):
"""Returns a list of ALL folders on this server.
This is currently nowhere used in the code."""
if self._folders != None:
return self._folders
for folder in os.listdir(self.directory):
self._folders = retval.append(self.getfolder(folder))
return self._folders
def forgetfolders(self):
"""Forgets the cached list of folders, if any. Useful to run
after a sync run."""
self._folders = None
| Python | 0 |
c1cbdf20e6c109ff1586f663cab7e24f1716af08 | Make remove-if-exists function public | opwen_email_server/utils/temporary.py | opwen_email_server/utils/temporary.py | from contextlib import contextmanager
from contextlib import suppress
from os import close
from os import remove
from tempfile import mkstemp
from typing import Generator
def create_tempfilename() -> str:
file_descriptor, filename = mkstemp()
close(file_descriptor)
return filename
@contextmanager
def removing(path: str) -> Generator[str, None, None]:
try:
yield path
finally:
remove_if_exists(path)
def remove_if_exists(path: str):
with suppress(FileNotFoundError):
remove(path)
| from contextlib import contextmanager
from contextlib import suppress
from os import close
from os import remove
from tempfile import mkstemp
from typing import Generator
def create_tempfilename() -> str:
file_descriptor, filename = mkstemp()
close(file_descriptor)
return filename
@contextmanager
def removing(path: str) -> Generator[str, None, None]:
try:
yield path
finally:
_remove_if_exists(path)
def _remove_if_exists(path: str):
with suppress(FileNotFoundError):
remove(path)
| Python | 0.000217 |
f238d2f036d79cd9d192b09b05575a71864fb682 | API tests should tearDown in the correct order | moniker/tests/test_api/test_v1/__init__.py | moniker/tests/test_api/test_v1/__init__.py | # Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from moniker.openstack.common import log as logging
from moniker.api.v1 import factory
from moniker.api.auth import NoAuthContextMiddleware
from moniker.tests.test_api import ApiTestCase
LOG = logging.getLogger(__name__)
class ApiV1Test(ApiTestCase):
__test__ = False
def setUp(self):
super(ApiV1Test, self).setUp()
# Create a Flask application
self.app = factory({})
# Inject the NoAuth middleware
self.app.wsgi_app = NoAuthContextMiddleware(self.app.wsgi_app)
# Obtain a test client
self.client = self.app.test_client()
# Create and start an instance of the central service
self.central_service = self.get_central_service()
self.central_service.start()
def tearDown(self):
self.central_service.stop()
super(ApiV1Test, self).tearDown()
| # Copyright 2012 Managed I.T.
#
# Author: Kiall Mac Innes <kiall@managedit.ie>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from moniker.openstack.common import log as logging
from moniker.api.v1 import factory
from moniker.api.auth import NoAuthContextMiddleware
from moniker.tests.test_api import ApiTestCase
LOG = logging.getLogger(__name__)
class ApiV1Test(ApiTestCase):
__test__ = False
def setUp(self):
super(ApiV1Test, self).setUp()
# Create a Flask application
self.app = factory({})
# Inject the NoAuth middleware
self.app.wsgi_app = NoAuthContextMiddleware(self.app.wsgi_app)
# Obtain a test client
self.client = self.app.test_client()
# Create and start an instance of the central service
self.central_service = self.get_central_service()
self.central_service.start()
def tearDown(self):
super(ApiV1Test, self).tearDown()
self.central_service.stop()
| Python | 0.999982 |
baf149711302fab8a29f32316cc78d7bd3a0f94f | Enable heartbeats by default for non-clustered agents (#385) | cloudify/broker_config.py | cloudify/broker_config.py | ########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# AMQP broker configuration for agents and manager
# Primarily used by celery, so provided with variables it understands
from __future__ import absolute_import
import json
import os
import ssl
from cloudify.constants import BROKER_PORT_SSL, BROKER_PORT_NO_SSL
workdir_path = os.getenv('CELERY_WORK_DIR')
if workdir_path is None:
# We are not in an appropriately configured celery environment
config = {}
else:
conf_file_path = os.path.join(workdir_path, 'broker_config.json')
if os.path.isfile(conf_file_path):
with open(conf_file_path) as conf_handle:
conf_file = conf_handle.read()
config = json.loads(conf_file)
else:
config = {}
# Provided as variables for retrieval by amqp_client and logger as required
broker_cert_path = config.get('broker_cert_path', '')
broker_username = config.get('broker_username', 'guest')
broker_password = config.get('broker_password', 'guest')
broker_hostname = config.get('broker_hostname', 'localhost')
broker_vhost = config.get('broker_vhost', '/')
broker_ssl_enabled = config.get('broker_ssl_enabled', False)
broker_port = BROKER_PORT_SSL if broker_ssl_enabled else BROKER_PORT_NO_SSL
# only enable heartbeat by default for agents connected to a cluster
DEFAULT_HEARTBEAT = 30
if os.name == 'nt':
# celery doesn't support broker_heartbeat on windows
broker_heartbeat = None
else:
broker_heartbeat = config.get('broker_heartbeat', DEFAULT_HEARTBEAT)
if broker_ssl_enabled:
BROKER_USE_SSL = {
'ca_certs': broker_cert_path,
'cert_reqs': ssl.CERT_REQUIRED,
}
if broker_heartbeat:
options = '?heartbeat={heartbeat}'.format(heartbeat=broker_heartbeat)
else:
options = ''
# BROKER_URL is held in the config to avoid the password appearing
# in ps listings
URL_TEMPLATE = \
'amqp://{username}:{password}@{hostname}:{port}/{vhost}{options}'
if config.get('cluster'):
BROKER_URL = ';'.join(URL_TEMPLATE.format(username=broker_username,
password=broker_password,
hostname=node_ip,
port=broker_port,
vhost=broker_vhost,
options=options)
for node_ip in config['cluster'])
else:
BROKER_URL = URL_TEMPLATE.format(
username=broker_username,
password=broker_password,
hostname=broker_hostname,
port=broker_port,
vhost=broker_vhost,
options=options
)
# celery will not use the failover strategy if there is only one broker url;
# we need it to try and failover even with one initial manager, because
# another node might've been added dynamically, while the worker was already
# running; we add an empty broker url so that celery always sees at least two -
# the failover strategy we're using (defined in cloudify_agent.app) filters out
# the empty one
BROKER_URL += ';'
CELERY_RESULT_BACKEND = BROKER_URL
CELERY_TASK_RESULT_EXPIRES = 600
CELERYD_PREFETCH_MULTIPLIER = 1
CELERY_ACKS_LATE = False
| ########
# Copyright (c) 2015 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# AMQP broker configuration for agents and manager
# Primarily used by celery, so provided with variables it understands
from __future__ import absolute_import
import json
import os
import ssl
from cloudify.constants import BROKER_PORT_SSL, BROKER_PORT_NO_SSL
workdir_path = os.getenv('CELERY_WORK_DIR')
if workdir_path is None:
# We are not in an appropriately configured celery environment
config = {}
else:
conf_file_path = os.path.join(workdir_path, 'broker_config.json')
if os.path.isfile(conf_file_path):
with open(conf_file_path) as conf_handle:
conf_file = conf_handle.read()
config = json.loads(conf_file)
else:
config = {}
# Provided as variables for retrieval by amqp_client and logger as required
broker_cert_path = config.get('broker_cert_path', '')
broker_username = config.get('broker_username', 'guest')
broker_password = config.get('broker_password', 'guest')
broker_hostname = config.get('broker_hostname', 'localhost')
broker_vhost = config.get('broker_vhost', '/')
broker_ssl_enabled = config.get('broker_ssl_enabled', False)
broker_port = BROKER_PORT_SSL if broker_ssl_enabled else BROKER_PORT_NO_SSL
# only enable heartbeat by default for agents connected to a cluster
DEFAULT_HEARTBEAT = 30 if config.get('cluster') else None
if os.name == 'nt':
# celery doesn't support broker_heartbeat on windows
broker_heartbeat = None
else:
broker_heartbeat = config.get('broker_heartbeat', DEFAULT_HEARTBEAT)
if broker_ssl_enabled:
BROKER_USE_SSL = {
'ca_certs': broker_cert_path,
'cert_reqs': ssl.CERT_REQUIRED,
}
if broker_heartbeat:
options = '?heartbeat={heartbeat}'.format(heartbeat=broker_heartbeat)
else:
options = ''
# BROKER_URL is held in the config to avoid the password appearing
# in ps listings
URL_TEMPLATE = \
'amqp://{username}:{password}@{hostname}:{port}/{vhost}{options}'
if config.get('cluster'):
BROKER_URL = ';'.join(URL_TEMPLATE.format(username=broker_username,
password=broker_password,
hostname=node_ip,
port=broker_port,
vhost=broker_vhost,
options=options)
for node_ip in config['cluster'])
else:
BROKER_URL = URL_TEMPLATE.format(
username=broker_username,
password=broker_password,
hostname=broker_hostname,
port=broker_port,
vhost=broker_vhost,
options=options
)
# celery will not use the failover strategy if there is only one broker url;
# we need it to try and failover even with one initial manager, because
# another node might've been added dynamically, while the worker was already
# running; we add an empty broker url so that celery always sees at least two -
# the failover strategy we're using (defined in cloudify_agent.app) filters out
# the empty one
BROKER_URL += ';'
CELERY_RESULT_BACKEND = BROKER_URL
CELERY_TASK_RESULT_EXPIRES = 600
CELERYD_PREFETCH_MULTIPLIER = 1
CELERY_ACKS_LATE = False
| Python | 0 |
94763abaf573bfd25cad06da0cffc6b94a7dedc8 | Fix a flaw in the old implementation of checking whether the state has changed. | pervert/management/commands/pervert_migrate.py | pervert/management/commands/pervert_migrate.py | import json
from django.core.management.base import BaseCommand, CommandError
from pervert.models import AbstractPervert, SchemaState, PervertError
class Command(BaseCommand):
help = "Registers new schema for Pervert-controlled models"
def handle(self, *args, **options):
states = []
print "Reading the schema of Pervert-controlled models..."
state_text = ""
for cl in AbstractPervert.__subclasses__():
state = {
"app_label": cl._meta.app_label,
"model": cl._meta.object_name,
"fields": [],
"fks": []
}
state_text += "%s.models.%s\n" % (state["app_label"], state["model"],)
for field in cl._meta.fields:
state_text += " * %s\n" % field.name
if field.name == "uid":
continue
if field.__class__.__name__ == "ForeignKey":
state["fks"].append(field.name)
else:
state["fields"].append(field.name)
# Sort to make sure there is a unique json representation of each state
states.append(state)
# If the json is identical to the last saved state
if SchemaState.objects.count() and \
json.loads(SchemaState.objects.order_by("-when")[0].state) == states:
print "The state hasn't changed, nothing to do."
else:
# Save new state
ss = SchemaState(state = json.dumps(states))
ss.save()
print state_text + "SchemaState saved on %s" % ss.when
| import json
from django.core.management.base import BaseCommand, CommandError
from pervert.models import AbstractPervert, SchemaState, PervertError
class Command(BaseCommand):
help = "Registers new schema for Pervert-controlled models"
def handle(self, *args, **options):
states = []
print "Reading the schema of Pervert-controlled models..."
state_text = ""
for cl in AbstractPervert.__subclasses__():
state = {
"app_label": cl._meta.app_label,
"model": cl._meta.object_name,
"fields": [],
"fks": []
}
state_text += "%s.models.%s\n" % (state["app_label"], state["model"],)
for field in cl._meta.fields:
state_text += " * %s\n" % field.name
if field.name == "uid":
continue
if field.__class__.__name__ == "ForeignKey":
state["fks"].append(field.name)
else:
state["fields"].append(field.name)
# Sort to make sure there is a unique json representation of each state
state["fields"].sort()
state["fks"].sort()
states.append(state)
jsonstate = json.dumps(states)
# If the json is identical to the last saved state
if SchemaState.objects.count() and \
SchemaState.objects.order_by("-when")[0].state == jsonstate:
print "The state hasn't changed, nothing to do."
else:
# Save new state
ss = SchemaState(state = json.dumps(states))
ss.save()
print state_text + "SchemaState saved on %s" % ss.when
| Python | 0.000002 |
24c83211588ac71492640ce43e3a893e05466a54 | Change old membership migration to null | amy/workshops/migrations/0065_multiple_memberships.py | amy/workshops/migrations/0065_multiple_memberships.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('workshops', '0064_membership'),
]
operations = [
migrations.RemoveField(
model_name='host',
name='membership',
),
migrations.AddField(
model_name='membership',
name='host',
# the default value of 1 here doesn't break anything, because
# migrations 0064-0065 should be applied together
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, default=1, to='workshops.Host', null=True),
preserve_default=False,
),
]
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('workshops', '0064_membership'),
]
operations = [
migrations.RemoveField(
model_name='host',
name='membership',
),
migrations.AddField(
model_name='membership',
name='host',
# the default value of 1 here doesn't break anything, because
# migrations 0064-0065 should be applied together
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, default=1, to='workshops.Host'),
preserve_default=False,
),
]
| Python | 0 |
c267a580ca2179722d31941f4d02e2c67093769b | Add temporary streaming mechanism for output files | dockci/views/build.py | dockci/views/build.py | """
Views related to build management
"""
import logging
import mimetypes
import os.path
import re
import select
from flask import (abort,
flash,
redirect,
render_template,
request,
Response,
url_for,
)
from dockci.models.build import Build
from dockci.models.job import Job
from dockci.server import APP
from dockci.util import is_valid_github
@APP.route('/jobs/<job_slug>/builds/<build_slug>', methods=('GET',))
def build_view(job_slug, build_slug):
"""
View to display a build
"""
job = Job(slug=job_slug)
build = Build(job=job, slug=build_slug)
return render_template('build.html', build=build)
@APP.route('/jobs/<job_slug>/builds/new', methods=('GET', 'POST'))
def build_new_view(job_slug):
"""
View to create a new build
"""
job = Job(slug=job_slug)
if request.method == 'POST':
build = Build(job=job)
build.repo = job.repo
build_url = url_for('build_view',
job_slug=job_slug,
build_slug=build.slug)
if 'X-Github-Event' in request.headers:
if not job.github_secret:
logging.warn("GitHub webhook secret not setup")
abort(403)
if not is_valid_github(job.github_secret):
logging.warn("Invalid GitHub payload")
abort(403)
if request.headers['X-Github-Event'] == 'push':
push_data = request.json
build.commit = push_data['head_commit']['id']
else:
logging.debug("Unknown GitHub hook '%s'",
request.headers['X-Github-Event'])
abort(501)
build.save()
build.queue()
return build_url, 201
else:
build.commit = request.form['commit']
if not re.match(r'[a-fA-F0-9]{1,40}', request.form['commit']):
flash(u"Invalid git commit hash", 'danger')
return render_template('build_new.html', build=build)
build.save()
build.queue()
flash(u"Build queued", 'success')
return redirect(build_url, 303)
return render_template('build_new.html', build=Build(job=job))
@APP.route('/jobs/<job_slug>/builds/<build_slug>/output/<filename>',
methods=('GET',))
def build_output_view(job_slug, build_slug, filename):
"""
View to download some build output
"""
job = Job(slug=job_slug)
build = Build(job=job, slug=build_slug)
# TODO possible security issue opending files from user input like this
data_file_path = os.path.join(*build.build_output_path() + [filename])
if not os.path.isfile(data_file_path):
abort(404)
def loader():
"""
Generator to stream the log file
"""
with open(data_file_path, 'rb') as handle:
while True:
data = handle.read(1024)
yield data
#if build.state == 'running' and filename == "%s.log" % build.build_stage_slugs[-1]:
if filename == "%s.log" % build.build_stage_slugs[-1]:
select.select((handle,), (), (), 2)
build.load()
elif len(data) == 0:
return
mimetype, _ = mimetypes.guess_type(filename)
if mimetype is None:
mimetype = 'application/octet-stream'
return Response(loader(), mimetype=mimetype)
| """
Views related to build management
"""
import logging
import mimetypes
import os.path
import re
from flask import (abort,
flash,
redirect,
render_template,
request,
Response,
url_for,
)
from dockci.models.build import Build
from dockci.models.job import Job
from dockci.server import APP
from dockci.util import is_valid_github
@APP.route('/jobs/<job_slug>/builds/<build_slug>', methods=('GET',))
def build_view(job_slug, build_slug):
"""
View to display a build
"""
job = Job(slug=job_slug)
build = Build(job=job, slug=build_slug)
return render_template('build.html', build=build)
@APP.route('/jobs/<job_slug>/builds/new', methods=('GET', 'POST'))
def build_new_view(job_slug):
"""
View to create a new build
"""
job = Job(slug=job_slug)
if request.method == 'POST':
build = Build(job=job)
build.repo = job.repo
build_url = url_for('build_view',
job_slug=job_slug,
build_slug=build.slug)
if 'X-Github-Event' in request.headers:
if not job.github_secret:
logging.warn("GitHub webhook secret not setup")
abort(403)
if not is_valid_github(job.github_secret):
logging.warn("Invalid GitHub payload")
abort(403)
if request.headers['X-Github-Event'] == 'push':
push_data = request.json
build.commit = push_data['head_commit']['id']
else:
logging.debug("Unknown GitHub hook '%s'",
request.headers['X-Github-Event'])
abort(501)
build.save()
build.queue()
return build_url, 201
else:
build.commit = request.form['commit']
if not re.match(r'[a-fA-F0-9]{1,40}', request.form['commit']):
flash(u"Invalid git commit hash", 'danger')
return render_template('build_new.html', build=build)
build.save()
build.queue()
flash(u"Build queued", 'success')
return redirect(build_url, 303)
return render_template('build_new.html', build=Build(job=job))
@APP.route('/jobs/<job_slug>/builds/<build_slug>/output/<filename>',
methods=('GET',))
def build_output_view(job_slug, build_slug, filename):
"""
View to download some build output
"""
job = Job(slug=job_slug)
build = Build(job=job, slug=build_slug)
# TODO possible security issue opending files from user input like this
data_file_path = os.path.join(*build.build_output_path() + [filename])
if not os.path.isfile(data_file_path):
abort(404)
def loader():
"""
Generator to stream the log file
"""
with open(data_file_path, 'rb') as handle:
while True:
data = handle.read(1024)
yield data
if len(data) == 0:
return
mimetype, _ = mimetypes.guess_type(filename)
if mimetype is None:
mimetype = 'application/octet-stream'
return Response(loader(), mimetype=mimetype)
| Python | 0 |
9bc8b9967631064821112b5c7ff3b65fb0b176f6 | Fix wrong column name in db migration script of ryu plugin | neutron/db/migration/alembic_migrations/versions/5a875d0e5c_ryu.py | neutron/db/migration/alembic_migrations/versions/5a875d0e5c_ryu.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
"""ryu
This retroactively provides migration support for
https://review.openstack.org/#/c/11204/
Revision ID: 5a875d0e5c
Revises: 2c4af419145b
Create Date: 2012-12-18 12:32:04.482477
"""
# revision identifiers, used by Alembic.
revision = '5a875d0e5c'
down_revision = '2c4af419145b'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugin=None, options=None):
if not migration.should_run(active_plugin, migration_for_plugins):
return
op.create_table(
'tunnelkeys',
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('tunnel_key', sa.Integer(), autoincrement=False,
nullable=False),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('tunnel_key')
)
op.create_table(
'tunnelkeylasts',
sa.Column('last_key', sa.Integer(), autoincrement=False,
nullable=False),
sa.PrimaryKeyConstraint('last_key')
)
def downgrade(active_plugin=None, options=None):
if not migration.should_run(active_plugin, migration_for_plugins):
return
op.drop_table('tunnelkeylasts')
op.drop_table('tunnelkeys')
| # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# @author: Mark McClain, DreamHost
"""ryu
This retroactively provides migration support for
https://review.openstack.org/#/c/11204/
Revision ID: 5a875d0e5c
Revises: 2c4af419145b
Create Date: 2012-12-18 12:32:04.482477
"""
# revision identifiers, used by Alembic.
revision = '5a875d0e5c'
down_revision = '2c4af419145b'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.plugins.ryu.ryu_neutron_plugin.RyuNeutronPluginV2'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugin=None, options=None):
if not migration.should_run(active_plugin, migration_for_plugins):
return
op.create_table(
'tunnelkeys',
sa.Column('network_id', sa.String(length=36), nullable=False),
sa.Column('last_key', sa.Integer(), autoincrement=False,
nullable=False),
sa.ForeignKeyConstraint(['network_id'], ['networks.id'],
ondelete='CASCADE'),
sa.PrimaryKeyConstraint('last_key')
)
op.create_table(
'tunnelkeylasts',
sa.Column('last_key', sa.Integer(), autoincrement=False,
nullable=False),
sa.PrimaryKeyConstraint('last_key')
)
def downgrade(active_plugin=None, options=None):
if not migration.should_run(active_plugin, migration_for_plugins):
return
op.drop_table('tunnelkeylasts')
op.drop_table('tunnelkeys')
| Python | 0 |
687681724202949105a476254f7a122171b2cf3e | Update existing objects on reimport when the ID is the same. | open511/management/commands/import_xml_roadevents.py | open511/management/commands/import_xml_roadevents.py | import datetime
import logging
import sys
from django.contrib.gis.geos import fromstr as geos_geom_from_string
from django.core.management.base import BaseCommand, CommandError
from lxml import etree
from open511.models import RoadEvent
from open511.utils.postgis import gml_to_ewkt
from open511.utils.serialization import ELEMENTS
logger = logging.getLogger(__name__)
class Command(BaseCommand):
element_lookup = dict(
(e[1], e[0]) for e in ELEMENTS
)
def handle(self, filename = sys.stdin, **options):
root = etree.parse(filename).getroot()
assert root.tag == 'Open511'
created = []
for event in root.xpath('RoadEvent'):
try:
source_id = event.get('id')
try:
rdev = RoadEvent.objects.get(source_id=source_id)
except RoadEvent.DoesNotExist:
rdev = RoadEvent(source_id=source_id)
logger.info("Importing event %s" % rdev.source_id)
rdev.jurisdiction = rdev.source_id.split(':')[0]
for event_el in event:
if event_el.tag in self.element_lookup:
setattr(rdev, self.element_lookup[event_el.tag], event_el.text)
elif event_el.tag == 'Geometry':
gml = etree.tostring(event_el[0])
ewkt = gml_to_ewkt(gml, force_2D=True)
rdev.geom = geos_geom_from_string(ewkt)
else:
logger.warning("Unknown tag: %s" % etree.tostring(event_el))
if isinstance(rdev.start_date, basestring):
rdev.start_date = _str_to_date(rdev.start_date)
if isinstance(rdev.end_date, basestring):
rdev.end_date = _str_to_date(rdev.end_date)
rdev.save()
created.append(rdev)
except ValueError as e:
logger.error("ValueError importing %s: %s" % (e, rdev.source_id))
print "%s entries imported." % len(created)
def _str_to_date(s):
"""2012-02-12 to a datetime.date object"""
return datetime.date(*[
int(x) for x in s.split('-')
])
| import datetime
import logging
import sys
from django.contrib.gis.geos import fromstr as geos_geom_from_string
from django.core.management.base import BaseCommand, CommandError
from lxml import etree
from open511.models import RoadEvent
from open511.utils.postgis import gml_to_ewkt
from open511.utils.serialization import ELEMENTS
logger = logging.getLogger(__name__)
class Command(BaseCommand):
element_lookup = dict(
(e[1], e[0]) for e in ELEMENTS
)
def handle(self, filename = sys.stdin, **options):
root = etree.parse(filename).getroot()
assert root.tag == 'Open511'
created = []
for event in root.xpath('RoadEvent'):
try:
rdev = RoadEvent()
rdev.source_id = event.get('id')
logger.info("Importing event %s" % rdev.source_id)
rdev.jurisdiction = rdev.source_id.split(':')[0]
for event_el in event:
if event_el.tag in self.element_lookup:
setattr(rdev, self.element_lookup[event_el.tag], event_el.text)
elif event_el.tag == 'Geometry':
gml = etree.tostring(event_el[0])
ewkt = gml_to_ewkt(gml, force_2D=True)
rdev.geom = geos_geom_from_string(ewkt)
else:
logger.warning("Unknown tag: %s" % etree.tostring(event_el))
if isinstance(rdev.start_date, basestring):
rdev.start_date = _str_to_date(rdev.start_date)
if isinstance(rdev.end_date, basestring):
rdev.end_date = _str_to_date(rdev.end_date)
rdev.save()
created.append(rdev)
except ValueError as e:
logger.error("ValueError importing %s: %s" % (e, rdev.source_id))
print "%s entries imported." % len(created)
def _str_to_date(s):
"""2012-02-12 to a datetime.date object"""
return datetime.date(*[
int(x) for x in s.split('-')
])
| Python | 0 |
06f7f0b5d45a4349ee688aaac86b57c74ad0f76c | FIX geocoder model | partner_compassion/models/base_geocoder.py | partner_compassion/models/base_geocoder.py | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2019 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __manifest__.py
#
##############################################################################
import logging
from odoo import models
_logger = logging.getLogger(__name__)
class Geocoder(models.AbstractModel):
_inherit = 'base.geocoder'
def _raise_internet_access_error(self, error):
# Don't raise error
_logger.error(
"Cannot contact geolocation servers. Please make sure that your "
"Internet connection is up and running (%s).", error)
| # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2019 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __manifest__.py
#
##############################################################################
import logging
from odoo import models
_logger = logging.getLogger(__name__)
class Geocoder(models.Model):
_inherit = 'base.geocoder'
def _raise_internet_access_error(self, error):
# Don't raise error
_logger.error(
"Cannot contact geolocation servers. Please make sure that your "
"Internet connection is up and running (%s).", error)
| Python | 0.000001 |
f94a85609100b4a77cc8352838cfc110d6033fed | Fix var set/unset help messages, and scale. | dotcloud/ui/parser.py | dotcloud/ui/parser.py | import argparse
from .version import VERSION
def get_parser(name='dotcloud'):
parser = argparse.ArgumentParser(prog=name, description='dotcloud CLI')
parser.add_argument('--application', '-A', help='specify the application')
parser.add_argument('--environment', '-E', help='specify the environment')
parser.add_argument('--version', '-v', action='version', version='dotcloud/{0}'.format(VERSION))
parser.add_argument('--trace', action='store_true', help='Display trace ID')
subcmd = parser.add_subparsers(dest='cmd')
subcmd.add_parser('list', help='list applications')
subcmd.add_parser('version', help='show version')
check = subcmd.add_parser('check', help='Check the installation and authentication')
setup = subcmd.add_parser('setup', help='Setup the client authentication')
create = subcmd.add_parser('create', help='Create a new application')
create.add_argument('application', help='specify the application')
conn = subcmd.add_parser('connect', help='Connect a local directory with an existing app')
conn.add_argument('application', help='specify the application')
destroy = subcmd.add_parser('destroy', help='Destroy an existing app')
destroy.add_argument('service', nargs='?', help='Specify the service')
disconnect = subcmd.add_parser('disconnect', help='Disconnect the current directory from DotCloud app')
app = subcmd.add_parser('app', help='Show the application name linked to the current directory')
info = subcmd.add_parser('info', help='Get information about the application')
info.add_argument('service', nargs='?', help='Specify the service')
url = subcmd.add_parser('url', help='Show URL for the application')
url.add_argument('service', nargs='?', help='Specify the service')
ssh = subcmd.add_parser('ssh', help='SSH into the service')
ssh.add_argument('service', help='Specify the service')
run = subcmd.add_parser('run', help='SSH into the service')
run.add_argument('service', help='Specify the service')
run.add_argument('command', nargs='+', help='Run a command on the service')
env = subcmd.add_parser('env', help='Manipulate application environments') \
.add_subparsers(dest='subcmd')
env_show = env.add_parser('show', help='Show the current environment')
env_list = env.add_parser('list', help='List the environments')
env_create = env.add_parser('create', help='Create a new environment')
env_create.add_argument('name', help='Name of the new environment')
env_destroy = env.add_parser('destroy', help='Destroy an environment')
env_destroy.add_argument('name', help='Name of the environment to destroy')
env_switch = env.add_parser('switch', help='Switch to an environment')
env_switch.add_argument('name', help='Name of the environment')
push = subcmd.add_parser('push', help='Push the code')
push.add_argument('--clean', action='store_true', help='clean build')
var = subcmd.add_parser('var', help='Manipulate application variables') \
.add_subparsers(dest='subcmd')
var_list = var.add_parser('list', help='List the application variables')
var_set = var.add_parser('set', help='Set new application variables')
var_set.add_argument('values', help='Application variables to set',
metavar='key=value', nargs='*')
var_unset = var.add_parser('unset', help='Unset application variables')
var_unset.add_argument('variables', help='Application ariables to unset', metavar='var', nargs='*')
scale = subcmd.add_parser('scale', help='Scale services')
scale.add_argument('services', nargs='*', metavar='service=count',
help='Number of instances to set for each service e.g. www=2')
restart = subcmd.add_parser('restart', help='Restart the service')
restart.add_argument('service', help='Specify the service')
alias = subcmd.add_parser('alias', help='Manage aliases for the service')
alias.add_argument('commands', nargs='*')
return parser
| import argparse
from .version import VERSION
def get_parser(name='dotcloud'):
parser = argparse.ArgumentParser(prog=name, description='dotcloud CLI')
parser.add_argument('--application', '-A', help='specify the application')
parser.add_argument('--environment', '-E', help='specify the environment')
parser.add_argument('--version', '-v', action='version', version='dotcloud/{0}'.format(VERSION))
parser.add_argument('--trace', action='store_true', help='Display trace ID')
subcmd = parser.add_subparsers(dest='cmd')
subcmd.add_parser('list', help='list applications')
subcmd.add_parser('version', help='show version')
check = subcmd.add_parser('check', help='Check the installation and authentication')
setup = subcmd.add_parser('setup', help='Setup the client authentication')
create = subcmd.add_parser('create', help='Create a new application')
create.add_argument('application', help='specify the application')
conn = subcmd.add_parser('connect', help='Connect a local directory with an existing app')
conn.add_argument('application', help='specify the application')
destroy = subcmd.add_parser('destroy', help='Destroy an existing app')
destroy.add_argument('service', nargs='?', help='Specify the service')
disconnect = subcmd.add_parser('disconnect', help='Disconnect the current directory from DotCloud app')
app = subcmd.add_parser('app', help='Show the application name linked to the current directory')
info = subcmd.add_parser('info', help='Get information about the application')
info.add_argument('service', nargs='?', help='Specify the service')
url = subcmd.add_parser('url', help='Show URL for the application')
url.add_argument('service', nargs='?', help='Specify the service')
ssh = subcmd.add_parser('ssh', help='SSH into the service')
ssh.add_argument('service', help='Specify the service')
run = subcmd.add_parser('run', help='SSH into the service')
run.add_argument('service', help='Specify the service')
run.add_argument('command', nargs='+', help='Run a command on the service')
env = subcmd.add_parser('env', help='Manipulate application environments') \
.add_subparsers(dest='subcmd')
env_show = env.add_parser('show', help='Show the current environment')
env_list = env.add_parser('list', help='List the environments')
env_create = env.add_parser('create', help='Create a new environment')
env_create.add_argument('name', help='Name of the new environment')
env_destroy = env.add_parser('destroy', help='Destroy an environment')
env_destroy.add_argument('name', help='Name of the environment to destroy')
env_switch = env.add_parser('switch', help='Switch to an environment')
env_switch.add_argument('name', help='Name of the environment')
push = subcmd.add_parser('push', help='Push the code')
push.add_argument('--clean', action='store_true', help='clean build')
var = subcmd.add_parser('var', help='Manipulate application variables') \
.add_subparsers(dest='subcmd')
var_list = var.add_parser('list', help='List the application variables')
var_set = var.add_parser('set', help='Set new application variables')
var_set.add_argument('values', help='key=value pair of application variables to set', nargs='*')
var_unset = var.add_parser('unset', help='Unset application variables')
var_unset.add_argument('variables', help='Application ariables to unset')
scale = subcmd.add_parser('scale', help='Scale services')
scale.add_argument('services', nargs='*')
restart = subcmd.add_parser('restart', help='Restart the service')
restart.add_argument('service', help='Specify the service')
alias = subcmd.add_parser('alias', help='Manage aliases for the service')
alias.add_argument('commands', nargs='*')
return parser
| Python | 0 |
bf684f5a2a688739ccc195a125eb376997084f96 | remove leftover code | osspeak/recognition/actions/pyexpr.py | osspeak/recognition/actions/pyexpr.py | import ast
import re
def varrepl(_, num):
num = int(num)
if num > 0:
num -= 1
return f'result.vars.get({num})'
error_handler_strings = {
(r'\$', r'-?\d+'): varrepl
}
error_handlers = {}
for (before_pattern, after_pattern), handler in error_handler_strings.items():
before_pattern = None if before_pattern is None else re.compile(f'(.*)({before_pattern})$')
after_pattern = None if after_pattern is None else re.compile(f'({after_pattern})(.*)')
error_handlers[(before_pattern, after_pattern)] = handler
def compile_python_expressions(input_string, validator=lambda expr: True, raise_on_error=True):
expressions = []
remaining_text = input_string
while remaining_text:
try:
expr_text, remaining_text = greedy_parse(remaining_text, validator)
except Exception as e:
remaining_text = handle_parse_error(remaining_text[:e.offset], remaining_text[e.offset:])
if remaining_text is None:
if raise_on_error:
raise e
break
else:
expressions.append(expr_text)
expressions = merge_expressions(expressions)
return expressions
def merge_expressions(expressions):
merged = []
i = 0
while i < len(expressions):
next_i = i + 1
merged_expr = expressions[i]
try_parse_expr = merged_expr
for j, expr in enumerate(expressions[i+1:], start=i+1):
try_parse_expr += expr
try:
ast.parse(try_parse_expr, mode='eval')
merged_expr = try_parse_expr
next_i = j + 1
except SyntaxError:
pass
merged.append(merged_expr)
i = next_i
return merged
def handle_parse_error(before, after):
for (before_pattern, after_pattern), handler in error_handlers.items():
start, end = before, after
before_error_text, after_error_text = None, None
if before_pattern:
bmatch = before_pattern.match(before)
if not bmatch:
break
start, before_error_text = bmatch.group(1), bmatch.group(2)
if after_pattern:
amatch = after_pattern.match(after)
if not amatch:
break
after_error_text, after = amatch.group(1), amatch.group(2)
return start + handler(before_error_text, after_error_text) + after
def greedy_parse(s, validator):
assert s
last_error = None
expr_text = None
remaining_text = None
seen_string = ''
for char in s:
seen_string += char
try:
expr = ast.parse(seen_string, mode='eval')
except SyntaxError as e:
last_error = e
else:
if not validator(expr):
remaining_text = None
break
expr_text = seen_string
remaining_text = s[len(seen_string):]
if expr_text is None:
raise last_error
return expr_text, remaining_text | import ast
import re
def varrepl(_, num):
num = int(num)
if num > 0:
num -= 1
return f'result.vars.get({num})'
error_handler_strings = {
(r'\$', r'-?\d+'): varrepl
}
error_handlers = {}
for (before_pattern, after_pattern), handler in error_handler_strings.items():
before_pattern = None if before_pattern is None else re.compile(f'(.*)({before_pattern})$')
after_pattern = None if after_pattern is None else re.compile(f'({after_pattern})(.*)')
error_handlers[(before_pattern, after_pattern)] = handler
def compile_python_expressions(input_string, validator=lambda expr: True, raise_on_error=True):
expressions = []
remaining_text = input_string
while remaining_text:
try:
expr_text, remaining_text = greedy_parse(remaining_text, validator)
except Exception as e:
remaining_text = handle_parse_error(remaining_text[:e.offset], remaining_text[e.offset:])
if remaining_text is None:
if raise_on_error:
raise e
break
else:
expressions.append(expr_text)
expressions = merge_expressions(expressions)
return expressions
def merge_expressions(expressions):
merged = []
i = 0
while i < len(expressions):
next_i = i + 1
merged_expr = expressions[i]
try_parse_expr = merged_expr
for j, expr in enumerate(expressions[i+1:], start=i+1):
try_parse_expr += expr
try:
ast.parse(try_parse_expr, mode='eval')
merged_expr = try_parse_expr
next_i = j + 1
except:
pass
merged.append(merged_expr)
i = next_i
return merged
for expr in expressions:
if not merged:
merged.append(expr)
else:
merged_expr = merged[-1] + expr
try:
ast.parse(merged_expr, mode='eval')
except:
merged.append(expr)
else:
merged[-1] = merged_expr
return merged
def handle_parse_error(before, after):
for (before_pattern, after_pattern), handler in error_handlers.items():
start, end = before, after
before_error_text, after_error_text = None, None
if before_pattern:
bmatch = before_pattern.match(before)
if not bmatch:
break
start, before_error_text = bmatch.group(1), bmatch.group(2)
if after_pattern:
amatch = after_pattern.match(after)
if not amatch:
break
after_error_text, after = amatch.group(1), amatch.group(2)
return start + handler(before_error_text, after_error_text) + after
def greedy_parse(s, validator):
assert s
last_error = None
expr_text = None
remaining_text = None
seen_string = ''
for char in s:
seen_string += char
try:
expr = ast.parse(seen_string, mode='eval')
except SyntaxError as e:
last_error = e
else:
if not validator(expr):
remaining_text = None
break
expr_text = seen_string
remaining_text = s[len(seen_string):]
if expr_text is None:
raise last_error
return expr_text, remaining_text | Python | 0.001174 |
397f33adb5cafaeda3de624dc9dd1bb24d0b65e5 | remove dup line | MOAL/maths/applied/optimization/strength_reduction.py | MOAL/maths/applied/optimization/strength_reduction.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = """Chris Tabor (dxdstudio@gmail.com)"""
if __name__ == '__main__':
from os import getcwd
from os import sys
sys.path.append(getcwd())
from MOAL.helpers.display import Section
from MOAL.helpers.trials import test_speed
DEBUG = True if __name__ == '__main__' else False
"""Strength reduction is an optimization technique that involves taking
a more 'advanced' math technique and breaking it down into simple 'dumb' tasks
that can be repeated.
For example, multiplication can be converted to lots of addition."""
@test_speed
def exp(x, power):
# 4^3
# ... 4 * 4 * 4
res = x
for num in [x] * power:
res = num * res
return res
@test_speed
def strengthreduced_exp(x, power):
# Replaces an exponential operation with a multiplication + addition
# 4^3 = 64
# ... 4 * 4 * 4 = 64
# ... 2 + 2 + 2 + 2 + 2 ... (32 times) = 64
res = x
for num in [x] * power:
res = strengthreduced_mult(num, res)
return res
@test_speed
def mult(x, y):
return x * y
@test_speed
def strengthreduced_mult(x, y):
# 2 * 4 = 8
# ... 2 + 2 + 2 + 2 = 8
res = 0
for f in xrange(y):
res += x
return res
if DEBUG:
with Section('Optimization - strength reduction'):
# This is slower since the native multiplication is much much faster
# than looping, but it demonstrates the idea.
max = 200
f, g = exp(4, 2), strengthreduced_exp(4, 2)
assert f == g
print(f, g)
f2, g2 = mult(2, 4), strengthreduced_mult(2, 4)
assert f2 == g2
print(f2, g2)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = """Chris Tabor (dxdstudio@gmail.com)"""
__author__ = """Chris Tabor (dxdstudio@gmail.com)"""
if __name__ == '__main__':
from os import getcwd
from os import sys
sys.path.append(getcwd())
from MOAL.helpers.display import Section
from MOAL.helpers.trials import test_speed
DEBUG = True if __name__ == '__main__' else False
"""Strength reduction is an optimization technique that involves taking
a more 'advanced' math technique and breaking it down into simple 'dumb' tasks
that can be repeated.
For example, multiplication can be converted to lots of addition."""
@test_speed
def exp(x, power):
# 4^3
# ... 4 * 4 * 4
res = x
for num in [x] * power:
res = num * res
return res
@test_speed
def strengthreduced_exp(x, power):
# Replaces an exponential operation with a multiplication + addition
# 4^3 = 64
# ... 4 * 4 * 4 = 64
# ... 2 + 2 + 2 + 2 + 2 ... (32 times) = 64
res = x
for num in [x] * power:
res = strengthreduced_mult(num, res)
return res
@test_speed
def mult(x, y):
return x * y
@test_speed
def strengthreduced_mult(x, y):
# 2 * 4 = 8
# ... 2 + 2 + 2 + 2 = 8
res = 0
for f in xrange(y):
res += x
return res
if DEBUG:
with Section('Optimization - strength reduction'):
# This is slower since the native multiplication is much much faster
# than looping, but it demonstrates the idea.
max = 200
f, g = exp(4, 2), strengthreduced_exp(4, 2)
assert f == g
print(f, g)
f2, g2 = mult(2, 4), strengthreduced_mult(2, 4)
assert f2 == g2
print(f2, g2)
| Python | 0.000001 |
3f2f069e1c22ee88afb67ef68164046222a009e3 | Create a error class for the API client | drydock_provisioner/error.py | drydock_provisioner/error.py | # Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
class DesignError(Exception):
pass
class StateError(Exception):
pass
class OrchestratorError(Exception):
pass
class TransientOrchestratorError(OrchestratorError):
pass
class PersistentOrchestratorError(OrchestratorError):
pass
class DriverError(Exception):
pass
class TransientDriverError(DriverError):
pass
class PersistentDriverError(DriverError):
pass
class ApiError(Exception):
def __init__(self, msg, code=500):
super().__init__(msg)
self.message = msg
self.status_code = code
def to_json(self):
err_dict = {'error': msg, 'type': self.__class__.__name__}
return json.dumps(err_dict)
class InvalidFormat(ApiError):
def __init__(self, msg, code=400):
super(InvalidFormat, self).__init__(msg, code=code)
class ClientError(Exception):
def __init__(self, msg, code=500):
super().__init__(msg)
self.message = msg
self.status_code = code
| # Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
class DesignError(Exception):
pass
class StateError(Exception):
pass
class OrchestratorError(Exception):
pass
class TransientOrchestratorError(OrchestratorError):
pass
class PersistentOrchestratorError(OrchestratorError):
pass
class DriverError(Exception):
pass
class TransientDriverError(DriverError):
pass
class PersistentDriverError(DriverError):
pass
class ApiError(Exception):
def __init__(self, msg, code=500):
super().__init__(msg)
self.message = msg
self.status_code = code
def to_json(self):
err_dict = {'error': msg, 'type': self.__class__.__name__}}
return json.dumps(err_dict)
class InvalidFormat(ApiError):
def __init__(self, msg, code=400):
super(InvalidFormat, self).__init__(msg, code=code)
| Python | 0 |
7c5018ca5d4edd85990aea7889cd767e059f55b2 | modify logging level. | engine/scheduler/__init__.py | engine/scheduler/__init__.py | # -*- coding: utf-8 -*-
#
# dp for Tornado
# YoungYong Park (youngyongpark@gmail.com)
# 2015.03.09
#
import os
import time
import threading
import tornado.options
import engine.scheduler.tornado_subprocess
from ..engine import Engine as dpEngine
try:
from croniter import croniter
except:
croniter = None
class Scheduler(threading.Thread, dpEngine):
def __init__(self, schedules):
self.interrupted = False
self.schedules = []
self.path = os.path.dirname(os.path.realpath(__file__))
self.path = os.path.dirname(self.path)
self.path = os.path.dirname(self.path)
self.path = os.path.join(self.path, 'scheduler.py')
self.python = tornado.options.options.python
self.ts = self.helper.datetime.time()
self.reference_count = 0
for e in schedules:
i = e[2] if len(e) >= 3 and isinstance(e[2], int) else 1
for i in range(i):
s = e[0] if isinstance(e[0], int) else croniter(e[0], self.ts)
self.schedules.append({
'c': e[1],
's': s,
'n': self.ts + 5 if isinstance(e[0], int) else s.get_next()
})
threading.Thread.__init__(self)
def run(self):
if not self.schedules:
return
while not self.interrupted:
ts = self.helper.datetime.time()
for e in self.schedules:
if ts >= e['n']:
try:
e['n'] = ts + e['s'] if isinstance(e['s'], int) else e['s'].get_next()
args = [self.python, self.path, e['c']]
self.reference_count += 1
h = SchedulerHandler()
h.attach(args=args, timeout=0, ref=self.reference_count)
except Exception as e:
self.logging.exception(e)
time.sleep(2)
class SchedulerHandler(dpEngine):
args = None
ref = 0
def on_done(self, status, stdout, stderr, has_timed_out):
if has_timed_out:
self.logging.error('Scheduler done with timed out [%s] (%s)' % (' '.join(self.args[2:]), self.ref))
if stdout:
self.logging.error(stdout)
if stderr:
self.logging.error(stderr)
return
if stdout:
self.logging.info('Scheduler done with stdout [%s] (%s)' % (' '.join(self.args[2:]), self.ref))
self.logging.info(stdout)
return
if stderr:
self.logging.error('Scheduler done with stderr [%s] (%s)' % (' '.join(self.args[2:]), self.ref))
self.logging.error(stderr)
return
self.logging.info('Scheduler done [%s] (%s)' % (' '.join(self.args[2:]), self.ref))
def attach(self, args, timeout=0, ref=None):
self.args = args
self.ref = ref
self.logging.info('Scheduler attach [%s] (%s)' % (' '.join(self.args[2:]), self.ref))
tornado_subprocess.Subprocess(
callback=self.on_done,
timeout=timeout or 3600*24*7,
args=self.args).start()
class Processor(dpEngine):
def run(self):
raise NotImplementedError
| # -*- coding: utf-8 -*-
#
# dp for Tornado
# YoungYong Park (youngyongpark@gmail.com)
# 2015.03.09
#
import os
import time
import threading
import tornado.options
import engine.scheduler.tornado_subprocess
from ..engine import Engine as dpEngine
try:
from croniter import croniter
except:
croniter = None
class Scheduler(threading.Thread, dpEngine):
def __init__(self, schedules):
self.interrupted = False
self.schedules = []
self.path = os.path.dirname(os.path.realpath(__file__))
self.path = os.path.dirname(self.path)
self.path = os.path.dirname(self.path)
self.path = os.path.join(self.path, 'scheduler.py')
self.python = tornado.options.options.python
self.ts = self.helper.datetime.time()
self.reference_count = 0
for e in schedules:
i = e[2] if len(e) >= 3 and isinstance(e[2], int) else 1
for i in range(i):
s = e[0] if isinstance(e[0], int) else croniter(e[0], self.ts)
self.schedules.append({
'c': e[1],
's': s,
'n': self.ts + 5 if isinstance(e[0], int) else s.get_next()
})
threading.Thread.__init__(self)
def run(self):
if not self.schedules:
return
while not self.interrupted:
ts = self.helper.datetime.time()
for e in self.schedules:
if ts >= e['n']:
try:
e['n'] = ts + e['s'] if isinstance(e['s'], int) else e['s'].get_next()
args = [self.python, self.path, e['c']]
self.reference_count += 1
h = SchedulerHandler()
h.attach(args=args, timeout=0, ref=self.reference_count)
except Exception as e:
self.logging.exception(e)
time.sleep(2)
class SchedulerHandler(dpEngine):
args = None
ref = 0
def on_done(self, status, stdout, stderr, has_timed_out):
if has_timed_out:
self.logging.info('Scheduler done with timed out [%s] (%s)' % (' '.join(self.args[2:]), self.ref))
self.logging.info(stdout)
self.logging.info(stderr)
return
if stdout:
self.logging.info('Scheduler done with stdout [%s] (%s)' % (' '.join(self.args[2:]), self.ref))
self.logging.info(stdout)
return
if stderr:
self.logging.info('Scheduler done with stderr [%s] (%s)' % (' '.join(self.args[2:]), self.ref))
self.logging.info(stderr)
return
self.logging.info('Scheduler done [%s] (%s)' % (' '.join(self.args[2:]), self.ref))
def attach(self, args, timeout=0, ref=None):
self.args = args
self.ref = ref
self.logging.info('Scheduler attach [%s] (%s)' % (' '.join(self.args[2:]), self.ref))
tornado_subprocess.Subprocess(
callback=self.on_done,
timeout=timeout or 3600*24*7,
args=self.args).start()
class Processor(dpEngine):
def run(self):
raise NotImplementedError
| Python | 0 |
46cd16ff56ff93b2ee8a38363b37c3287c9cb1cc | Update sal checkin module. | payload/usr/local/sal/checkin_modules/sal_checkin.py | payload/usr/local/sal/checkin_modules/sal_checkin.py | #!/usr/local/sal/Python.framework/Versions/3.8/bin/python3
import sal
__version__ = '1.1.0'
def main():
_, _, bu_key = sal.get_server_prefs()
sal_submission = {
'extra_data': {
'sal_version': sal.__version__,
'key': bu_key,},
'facts': {'checkin_module_version': __version__}}
sal.set_checkin_results('Sal', sal_submission)
if __name__ == "__main__":
main()
| #!/usr/local/sal/Python.framework/Versions/3.8/bin/python3
import sys
import sal
__version__ = '1.0.0'
def main():
_, _, bu_key = sal.get_server_prefs()
sal_submission = {
'extra_data': {
'sal_version': sal.__version__,
'key': bu_key,},
'facts': {'checkin_module_version': __version__}}
sal.set_checkin_results('Sal', sal_submission)
if __name__ == "__main__":
main()
| Python | 0 |
55132ff6740b3c70ddb75dcf7c3615aaea0680ac | Fix typo | main/models.py | main/models.py | from django.db import models
from django.contrib.auth.models import User
class VM(models.Model):
user = models.ForeignKey(User, related_name='user', null=False)
vmid = models.PositiveIntegerField()
template = models.CharField(max_length=100)
hostname = models.CharField(max_length=30)
storage = models.CharField(max_length=50)
memory = models.PositiveIntegerField()
swap = models.PositiveIntegerField()
cores = models.PositiveSmallIntegerField()
disk = models.PositiveIntegerField()
description = models.CharField(max_length=200)
ip = models.CharField(max_length=15)
def __unicode__(self):
return u'%s' % self.hostname
class Limits(models.Model):
memory = models.PositiveIntegerField()
swap = models.PositiveIntegerField()
cores = models.PositiveSmallIntegerField()
disk = models.PositiveSmallIntegerField()
| from django.db import models
from django.contrib.auth.models import User
class VM(models.Model):
user = models.ForeignKey(User, related_name='user', null=False)
vmid = models.PositiveIntegerField()
template = models.CharField(max_length=100)
hostname = models.CharField(max_length=30)
storage = models.CharField(max_length=50)
memory = models.PositiveIntegerField()
swap = models.PositiveIntegerField()
cores = models.PositiveSmallIntegerField()
disk = models.PositiveIntegerField()
description = models.CharField(max_length=200)
ip = models.CharField(max_length=15)
def __unicode__(self):
return u'%s' % self.hostname
def Limits(models.Model):
memory = models.PositiveIntegerField()
swap = models.PositiveIntegerField()
cores = models.PositiveSmallIntegerField()
disk = models.PositiveSmallIntegerField()
| Python | 0.999999 |
bddac740c06a1e399179b2cda16ec8fd9556f2e0 | Fix monitoring new files | monitor.py | monitor.py | #!/usr/bin/env python
import sys, os
from pathlib import Path
import time
from multiprocessing import Pool
from functools import partial
import transfer
def get_new_files(folder, init_filelist=None):
if init_filelist is None:
init_filelist = []
return [f for f in folder.glob('**/*.yml')
if f.with_suffix('.dat').is_file() and f not in init_filelist]
def complete_task(fname, dry_run=False):
print('Completed processing for "%d"' % fname, flush=True)
#dest = transfer.replace_basedir(fname, transfer.temp_basedir,
# transfer.local_archive_basedir)
#transfer.filecopy(fname, dest) # filecopy does not have a dry_run arg
def start_monitoring(folder, dry_run=False):
complete_task_local = partial(complete_task, dry_run=dry_run)
title_msg = 'Monitoring %s' % folder.name
print('\n\n%s' % title_msg)
init_filelist = get_new_files(folder)
print('- The following files are present at startup and will be skipped:')
for f in init_filelist:
print(' %s' % f)
print()
with Pool(processes=1) as pool:
try:
while True:
transfer.timestamp()
for i in range(20):
time.sleep(3)
newfiles = get_new_files(folder, init_filelist)
for newfile_yml in newfiles:
newfile_dat = newfile_yml.with_suffix('.dat')
pool.apply_async(transfer.process_int,
(newfile_dat, dry_run),
callback=complete_task_local)
init_filelist += newfiles
except KeyboardInterrupt:
print('\n>>> Got keyboard interrupt.\n', flush=True)
print('Closing subprocess pool.', flush=True)
def batch_process(folder, dry_run=False):
assert folder.is_dir(), 'Path not found: %s' % folder
title_msg = 'Monitoring %s' % folder.name
print('\n\n%s' % title_msg)
init_filelist = get_new_files(folder)
with Pool(processes=4) as pool:
try:
for newfile in newfiles:
pool.apply_async(transfer.process_int, (newfile, dry_run),
callback=copy_log_local)
except KeyboardInterrupt:
print('\n>>> Got keyboard interrupt.\n', flush=True)
print('Closing subprocess pool.', flush=True)
def help():
msg = """\
monitor.py
This script monitors a folder and converts DAT files to Photon-HDF5
if a metadata YAML file is found in the same folder.
USAGE
-----
python monitor.py <folder> [--batch] [--dry-run]
Arguments:
--batch
Process all the DAT/YML files in the folder (batch-mode). Without
this option only new files created after the monitor started are
processed.
--dry-run
No processing (copy, conversion, analysis) is perfomed.
Used for debugging.
"""
print(msg)
if __name__ == '__main__':
args = sys.argv[1:].copy()
if len(args) == 0 or '-h' in args or '--help' in args:
help()
os.exit(0)
msg = '1 to 3 command-line arguments expected. Received %d instead.'
assert 1 <= len(args) <= 3, msg % len(args)
dry_run = False
if '--dry-run' in args:
dry_run = True
args.pop(args.index('--dry-run'))
batch = False
if '--batch' in in sys.argv[1:]:
batch = True
args.pop(args.index('--batch'))
assert len(args) == 1
folder = Path(arg[0])
assert folder.is_dir(), 'Path not found: %s' % folder
if batch:
batch_process(folder, dry_run)
else:
start_monitoring(folder, dry_run)
print('Monitor execution end.', flush=True)
| #!/usr/bin/env python
import sys, os
from pathlib import Path
import time
from multiprocessing import Pool
from functools import partial
import transfer
def get_new_files(folder, init_filelist=None):
if init_filelist is None:
init_filelist = []
return [f.with_suffix('.dat') for f in folder.glob('**/*.yml')
if f.with_suffix('.dat').is_file() and f not in init_filelist]
def complete_task(fname, dry_run=False):
print('Completed processing for "%d"' % fname, flush=True)
#dest = transfer.replace_basedir(fname, transfer.temp_basedir,
# transfer.local_archive_basedir)
#transfer.filecopy(fname, dest) # filecopy does not have a dry_run arg
def start_monitoring(folder, dry_run=False):
complete_task_local = partial(complete_task, dry_run=dry_run)
title_msg = 'Monitoring %s' % folder.name
print('\n\n%s' % title_msg)
init_filelist = get_new_files(folder)
print('- The following files are present at startup and will be skipped:')
for f in init_filelist:
print(' %s' % f)
print()
with Pool(processes=1) as pool:
try:
while True:
transfer.timestamp()
for i in range(20):
time.sleep(3)
newfiles = get_new_files(folder, init_filelist)
for newfile in newfiles:
pool.apply_async(transfer.process_int, (newfile, dry_run),
callback=complete_task_local)
init_filelist += newfiles
except KeyboardInterrupt:
print('\n>>> Got keyboard interrupt.\n', flush=True)
print('Closing subprocess pool.', flush=True)
def batch_process(folder, dry_run=False):
assert folder.is_dir(), 'Path not found: %s' % folder
title_msg = 'Monitoring %s' % folder.name
print('\n\n%s' % title_msg)
init_filelist = get_new_files(folder)
with Pool(processes=4) as pool:
try:
for newfile in newfiles:
pool.apply_async(transfer.process_int, (newfile, dry_run),
callback=copy_log_local)
except KeyboardInterrupt:
print('\n>>> Got keyboard interrupt.\n', flush=True)
print('Closing subprocess pool.', flush=True)
def help():
msg = """\
monitor.py
This script monitors a folder and converts DAT files to Photon-HDF5
if a metadata YAML file is found in the same folder.
USAGE
-----
python monitor.py <folder> [--batch] [--dry-run]
Arguments:
--batch
Process all the DAT/YML files in the folder (batch-mode). Without
this option only new files created after the monitor started are
processed.
--dry-run
No processing (copy, conversion, analysis) is perfomed.
Used for debugging.
"""
print(msg)
if __name__ == '__main__':
args = sys.argv[1:].copy()
if len(args) == 0 or '-h' in args or '--help' in args:
help()
os.exit(0)
msg = '1 to 3 command-line arguments expected. Received %d instead.'
assert 1 <= len(args) <= 3, msg % len(args)
dry_run = False
if '--dry-run' in args:
dry_run = True
args.pop(args.index('--dry-run'))
batch = False
if '--batch' in in sys.argv[1:]:
batch = True
args.pop(args.index('--batch'))
assert len(args) == 1
folder = Path(arg[0])
assert folder.is_dir(), 'Path not found: %s' % folder
if batch:
batch_process(folder, dry_run)
else:
start_monitoring(folder, dry_run)
print('Monitor execution end.', flush=True)
| Python | 0.000001 |
b6f54a008cfe1c0a6db06d4f9c23d4699c2ab901 | Update harmonizer.py | intelmq/bots/inputs/openbl/harmonizer.py | intelmq/bots/inputs/openbl/harmonizer.py | from intelmq.lib.bot import Bot, sys
class OpenBLHarmonizerBot(Bot):
def process(self):
event = self.receive_message()
if event:
event.add('feed', 'openbl')
event.add('feed_url', 'http://www.openbl.org/lists/date_all.txt')
ip_value = event.value('reported_ip')
event.add('source_ip', ip_value)
event.add('ip', ip_value)
event.add('type', 'malware') # ???
self.send_message(event)
self.acknowledge_message()
if __name__ == "__main__":
bot = OpenBLHarmonizerBot(sys.argv[1])
bot.start()
| from intelmq.lib.bot import Bot, sys
class OpenBLHarmonizerBot(Bot):
def process(self):
event = self.receive_message()
if event:
event.add('feed', 'openbl')
event.add('feed_url', 'http://www.openbl.org/lists/date_all.txt')
ip_value = event.value('reported_ip')
event.add('source_ip', ip_value)
event.add('ip', ip_value)
event.add('type', 'brute-force') # ???
self.send_message(event)
self.acknowledge_message()
if __name__ == "__main__":
bot = OpenBLHarmonizerBot(sys.argv[1])
bot.start()
| Python | 0 |
93904a11a78d5c58d2baaaa71cb962195becae6e | Change test. | event_track_info/tests/test_track_info.py | event_track_info/tests/test_track_info.py | # -*- coding: utf-8 -*-
# © 2016 Oihane Crucelaegui - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp.addons.sale_order_create_event.tests.\
test_sale_order_create_event import TestSaleOrderCreateEvent
class TestTrackInfo(TestSaleOrderCreateEvent):
def setUp(self):
super(TestTrackInfo, self).setUp()
self.event_model = self.env['event.event']
self.url = 'www.example.com'
self.planification = 'This is the planification'
self.resolution = 'This is the resolution'
self.html_info = 'This is the html_info'
self.track_template = self.env['product.event.track.template'].create({
'product_id': self.service_product.id,
'sequence': 0,
'name': 'Session 1',
'planification': self.planification,
'resolution': self.resolution,
'html_info': self.html_info,
'url': self.url,
})
def test_sale_order_confirm(self):
self.sale_order2.action_button_confirm()
cond = [('sale_order_line', '=', self.sale_order2.order_line[0].id)]
event = self.event_model.search(cond, limit=1)
self.sale_order2.order_line[0].event_id = event.id
self.sale_order2.action_button_confirm()
for track in self.sale_order2.mapped('order_line.event_id.track_ids'):
if track.url:
self.assertEquals(track.url, self.url)
if track.planification:
self.assertEquals(track.planification, self.planification)
if track.resolution:
self.assertEquals(track.resolution, self.resolution)
| # -*- coding: utf-8 -*-
# © 2016 Oihane Crucelaegui - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp.addons.sale_order_create_event.tests.\
test_sale_order_create_event_by_task import TestSaleOrderCreateEvent
class TestTrackInfo(TestSaleOrderCreateEvent):
def setUp(self):
super(TestTrackInfo, self).setUp()
self.url = 'www.example.com'
self.planification = 'This is the planification'
self.resolution = 'This is the resolution'
self.html_info = 'This is the html_info'
self.track_template = self.env['product.event.track.template'].create({
'product_id': self.service_product.id,
'sequence': 0,
'name': 'Session 1',
'planification': self.planification,
'resolution': self.resolution,
'html_info': self.html_info,
'url': self.url,
})
def test_sale_order_confirm(self):
self.sale_order2.action_button_confirm()
for track in self.sale_order2.mapped('order_line.event_id.track_ids'):
self.assertEquals(track.url, self.url)
self.assertEquals(track.planification, self.planification)
self.assertEquals(track.resolution, self.resolution)
self.assertEquals(track.html_info, self.html_info)
| Python | 0 |
d32f6dcfcc7bbf8f4d9a8d84673635b1345450f6 | Simplify library includes | dnsimple/__init__.py | dnsimple/__init__.py | from dnsimple.client import Client
| Python | 0.000001 | |
9f790ebf51c7e05e09a39bd18f2597410ea0287d | bump version to 0.6.2 | djangoql/__init__.py | djangoql/__init__.py | __version__ = '0.6.2'
| __version__ = '0.6.1'
| Python | 0.000001 |
a3802e18e95d2ba85454d9d45881b53452fb1aa2 | fix build in chroots with older glibc | cerbero/bootstrap/build_tools.py | cerbero/bootstrap/build_tools.py | # cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
from cerbero.config import Config, DEFAULT_HOME, Platform, DistroVersion
from cerbero.bootstrap import BootstraperBase
from cerbero.build.oven import Oven
from cerbero.build.cookbook import CookBook
class BuildTools (BootstraperBase):
BUILD_TOOLS = ['automake', 'autoconf', 'm4', 'libtool', 'pkg-config',
'orc-tool', 'gettext-m4', 'gettext-tools']
PLAT_BUILD_TOOLS = {
Platform.DARWIN: ['intltool', 'yasm', 'cmake'],
Platform.WINDOWS: ['intltool', 'yasm', 'cmake'],
}
def __init__(self, config):
BootstraperBase.__init__(self, config)
if self.config.platform == Platform.WINDOWS:
self.BUILD_TOOLS.remove('m4')
self.BUILD_TOOLS.append('gperf')
if self.config.platform == Platform.DARWIN:
self.BUILD_TOOLS.append('gperf')
self.BUILD_TOOLS.insert(0, 'tar')
self.BUILD_TOOLS.insert(0, 'xz')
if self.config.target_platform == Platform.IOS:
self.BUILD_TOOLS.append('gas-preprocessor')
if self.config.platform != Platform.LINUX and\
not self.config.prefix_is_executable():
# For glib-mkenums and glib-genmarshal
self.BUILD_TOOLS.append('glib-tools')
def start(self):
# Use a common prefix for the build tools for all the configurations
# so that it can be reused
config = Config()
os.environ.clear()
os.environ.update(self.config._pre_environ)
config.prefix = self.config.build_tools_prefix
config.build_tools_prefix = self.config.build_tools_prefix
config.sources = self.config.build_tools_sources
config.build_tools_sources = self.config.build_tools_sources
config.cache_file = self.config.build_tools_cache
config.build_tools_cache = self.config.build_tools_cache
config.load()
if not os.path.exists(config.prefix):
os.makedirs(config.prefix)
if not os.path.exists(config.sources):
os.makedirs(config.sources)
config.do_setup_env()
cookbook = CookBook(config)
recipes = self.BUILD_TOOLS
recipes += self.PLAT_BUILD_TOOLS.get(self.config.platform, [])
oven = Oven(recipes, cookbook)
oven.start_cooking()
self.config.do_setup_env()
| # cerbero - a multi-platform build system for Open Source software
# Copyright (C) 2012 Andoni Morales Alastruey <ylatuya@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Library General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Library General Public License for more details.
#
# You should have received a copy of the GNU Library General Public
# License along with this library; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
import os
from cerbero.config import Config, DEFAULT_HOME, Platform, DistroVersion
from cerbero.bootstrap import BootstraperBase
from cerbero.build.oven import Oven
from cerbero.build.cookbook import CookBook
class BuildTools (BootstraperBase):
BUILD_TOOLS = ['automake', 'autoconf', 'm4', 'libtool', 'pkg-config',
'orc-tool', 'gettext-m4', 'gettext-tools']
PLAT_BUILD_TOOLS = {
Platform.DARWIN: ['intltool', 'yasm', 'cmake'],
Platform.WINDOWS: ['intltool', 'yasm', 'cmake'],
}
def __init__(self, config):
BootstraperBase.__init__(self, config)
if self.config.platform == Platform.WINDOWS:
self.BUILD_TOOLS.remove('m4')
self.BUILD_TOOLS.append('gperf')
if self.config.platform == Platform.DARWIN:
self.BUILD_TOOLS.append('gperf')
self.BUILD_TOOLS.insert(0, 'tar')
self.BUILD_TOOLS.insert(0, 'xz')
if self.config.target_platform == Platform.IOS:
self.BUILD_TOOLS.append('gas-preprocessor')
if self.config.platform != Platform.LINUX and\
not self.config.prefix_is_executable():
# For glib-mkenums and glib-genmarshal
self.BUILD_TOOLS.append('glib-tools')
def start(self):
# Use a common prefix for the build tools for all the configurations
# so that it can be reused
config = Config()
os.environ.clear()
os.environ.update(self.config._pre_environ)
config.prefix = self.config.build_tools_prefix
config.sources = self.config.build_tools_sources
config.cache_file = self.config.build_tools_cache
config.load()
if not os.path.exists(config.prefix):
os.makedirs(config.prefix)
if not os.path.exists(config.sources):
os.makedirs(config.sources)
config.do_setup_env()
cookbook = CookBook(config)
recipes = self.BUILD_TOOLS
recipes += self.PLAT_BUILD_TOOLS.get(self.config.platform, [])
oven = Oven(recipes, cookbook)
oven.start_cooking()
self.config.do_setup_env()
| Python | 0.000001 |
b9c7d3f76dee20dd1da1e53365b2aeac616bd0a3 | add --run option | doc/examples/plot.py | doc/examples/plot.py | import argparse
import matplotlib.pyplot as plt
import perf
import statistics
def plot_bench(args, bench):
if not args.split_runs:
runs = bench.get_runs()
if args.run:
run = runs[args.run - 1]
runs = [run]
values = []
for run in runs:
run_values = run.values
if args.skip:
run_values = run_values[args.skip:]
values.extend(run_values)
plt.plot(values, label='values')
mean = statistics.mean(values)
plt.plot([mean] * len(values), label='mean')
else:
values = []
width = None
for run_index, run in enumerate(bench.get_runs()):
index = 0
x = []
y = []
run_values = run.values
if args.skip:
run_values = run_values[args.skip:]
for value in run_values:
x.append(index)
y.append(value)
index += 1
plt.plot(x, y, color='blue')
values.extend(run_values)
width = len(run_values)
if args.warmups:
run_values = [value for loops, value in run.warmups]
index = -len(run.warmups) + 1
x = []
y = []
for value in run_values:
x.append(index)
y.append(value)
index += 1
plt.plot(x, y, color='red')
mean = statistics.mean(values)
plt.plot([mean] * width, label='mean', color='green')
plt.legend(loc='upper right', shadow=True, fontsize='x-large')
plt.show()
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-b', '--benchmark')
parser.add_argument('--split-runs', action='store_true')
parser.add_argument('--skip', type=int, help='skip first SKIP values')
parser.add_argument('--warmups', action='store_true')
parser.add_argument('--run', metavar='INDEX', type=int,
help='only render run number INDEX')
parser.add_argument('filename')
return parser.parse_args()
def main():
args = parse_args()
if args.benchmark:
suite = perf.BenchmarkSuite.load(args.filename)
bench = suite.get_benchmark(args.benchmark)
else:
bench = perf.Benchmark.load(args.filename)
plot_bench(args, bench)
if __name__ == "__main__":
main()
| import argparse
import matplotlib.pyplot as plt
import perf
import statistics
def plot_bench(args, bench):
if not args.split_runs:
values = bench.get_values()
if args.skip:
values = values[args.skip:]
values = [value for value in values]
plt.plot(values, label='values')
mean = statistics.mean(values)
plt.plot([mean] * len(values), label='mean')
plt.legend(loc='upper right', shadow=True, fontsize='x-large')
else:
for run_index, run in enumerate(bench.get_runs()):
index = 0
x = []
y = []
values = run.values
if args.skip:
values = values[args.skip:]
for value in values:
x.append(index)
y.append(value)
index += 1
plt.plot(x, y, color='blue')
if args.warmups:
values = [value for loops, value in run.warmups]
index = -len(run.warmups) + 1
x = []
y = []
for value in values:
x.append(index)
y.append(value)
index += 1
plt.plot(x, y, color='red')
plt.show()
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-b', '--benchmark')
parser.add_argument('--split-runs', action='store_true')
parser.add_argument('--skip', type=int, help='skip first SKIP values')
parser.add_argument('--warmups', action='store_true')
parser.add_argument('filename')
return parser.parse_args()
def main():
args = parse_args()
if args.benchmark:
suite = perf.BenchmarkSuite.load(args.filename)
bench = suite.get_benchmark(args.benchmark)
else:
bench = perf.Benchmark.load(args.filename)
plot_bench(args, bench)
if __name__ == "__main__":
main()
| Python | 0.000003 |
4c549414fdac30bdf514f346543760fbe9bf5505 | Revert "Reject dud properly when not validated." | debile/master/incoming_dud.py | debile/master/incoming_dud.py | # Copyright (c) 2012-2013 Paul Tagliamonte <paultag@debian.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
from firewoes.lib.hash import idify, uniquify
from sqlalchemy.orm.exc import NoResultFound
from debile.master.utils import emit
from debile.master.dud import Dud, DudFileException
from debile.master.filerepo import FileRepo, FilesAlreadyRegistered
from debile.master.orm import Builder, Job
def process_dud(config, session, path):
try:
dud = Dud(path)
dud.validate()
except Exception:
print "SKIP: Invavalid dud file {path}".format(tag=path)
return
try:
fingerprint = dud.validate_signature(config['keyrings']['pgp'])
except DudFileException:
return reject_dud(session, dud, "invalid-signature")
try:
builder = session.query(Builder).filter_by(pgp=fingerprint).one()
except NoResultFound:
return reject_dud(session, dud, "invalid-dud-builder")
jid = dud.get("X-Debile-Job", None)
if jid is None:
return reject_dud(session, dud, "missing-dud-job")
job = session.query(Job).get(jid)
if job is None:
return reject_dud(session, dud, "invalid-dud-job")
if dud.get("X-Debile-Failed", None) is None:
return reject_dud(session, dud, "no-failure-notice")
if job.builder != builder:
return reject_dud(session, dud, "invalid-dud-uploader")
accept_dud(config, session, dud, builder)
def reject_dud(session, dud, tag):
session.rollback()
print "REJECT: {source} because {tag}".format(
tag=tag, source=dud['Source'])
emit('reject', 'result', {
"tag": tag,
"source": dud['Source'],
})
for fp in [dud.get_dud_file()] + dud.get_files():
os.unlink(fp)
# Note this in the log.
def accept_dud(config, session, dud, builder):
fire = dud.get_firehose()
failed = True if dud.get('X-Debile-Failed', None) == "Yes" else False
job = session.query(Job).get(dud['X-Debile-Job'])
fire, _ = idify(fire)
fire = uniquify(session, fire)
result = job.new_result(fire, failed)
session.add(result)
try:
repo = FileRepo()
repo.add_dud(result.path, dud, config['filerepo_chmod_mode'])
except FilesAlreadyRegistered:
return reject_dud(session, dud, "dud-files-already-registered")
emit('receive', 'result', result.debilize())
# OK. It's safely in the database and repo. Let's cleanup.
for fp in [dud.get_dud_file()] + dud.get_files():
os.unlink(fp)
| # Copyright (c) 2012-2013 Paul Tagliamonte <paultag@debian.org>
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import os
from firewoes.lib.hash import idify, uniquify
from sqlalchemy.orm.exc import NoResultFound
from debile.master.utils import emit
from debile.master.dud import Dud, DudFileException
from debile.master.filerepo import FileRepo, FilesAlreadyRegistered
from debile.master.orm import Builder, Job
def process_dud(config, session, path):
try:
dud = Dud(path)
dud.validate()
except Exception:
return reject_dud(session, dud, 'invalid-dud')
try:
fingerprint = dud.validate_signature(config['keyrings']['pgp'])
except DudFileException:
return reject_dud(session, dud, "invalid-signature")
try:
builder = session.query(Builder).filter_by(pgp=fingerprint).one()
except NoResultFound:
return reject_dud(session, dud, "invalid-dud-builder")
jid = dud.get("X-Debile-Job", None)
if jid is None:
return reject_dud(session, dud, "missing-dud-job")
job = session.query(Job).get(jid)
if job is None:
return reject_dud(session, dud, "invalid-dud-job")
if dud.get("X-Debile-Failed", None) is None:
return reject_dud(session, dud, "no-failure-notice")
if job.builder != builder:
return reject_dud(session, dud, "invalid-dud-uploader")
accept_dud(config, session, dud, builder)
def reject_dud(session, dud, tag):
session.rollback()
print "REJECT: {source} because {tag}".format(
tag=tag, source=dud['Source'])
emit('reject', 'result', {
"tag": tag,
"source": dud['Source'],
})
for fp in [dud.get_dud_file()] + dud.get_files():
os.unlink(fp)
# Note this in the log.
def accept_dud(config, session, dud, builder):
fire = dud.get_firehose()
failed = True if dud.get('X-Debile-Failed', None) == "Yes" else False
job = session.query(Job).get(dud['X-Debile-Job'])
fire, _ = idify(fire)
fire = uniquify(session, fire)
result = job.new_result(fire, failed)
session.add(result)
try:
repo = FileRepo()
repo.add_dud(result.path, dud, config['filerepo_chmod_mode'])
except FilesAlreadyRegistered:
return reject_dud(session, dud, "dud-files-already-registered")
emit('receive', 'result', result.debilize())
# OK. It's safely in the database and repo. Let's cleanup.
for fp in [dud.get_dud_file()] + dud.get_files():
os.unlink(fp)
| Python | 0 |
facd1988bbbcbf64e64ba6d744805045e26f06f7 | 'subprocess' needs a literal string for the sed command | installscripts/wizard/jazz_common.py | installscripts/wizard/jazz_common.py | #!/usr/bin/python
import os
import re
import subprocess
#Global variables
TFVARS_FILE = "terraform.tfvars"
HOME_JAZZ_INSTALLER = os.path.expanduser("~") + "/jazz-installer/"
COGNITO_USER_FILE = HOME_JAZZ_INSTALLER + "/installscripts/cookbooks/jenkins/files/credentials/cognitouser.sh"
DEFAULT_RB = HOME_JAZZ_INSTALLER + "/installscripts/cookbooks/jenkins/attributes/default.rb"
def parse_and_replace_paramter_list(terraform_folder, parameter_list):
"""
Method parse the parameters send from run.py and these common variables
are set in terraform.tfvars and other files needed
"""
jazz_branch = parameter_list[0]
cognito_details = parameter_list[1]
jazz_account_id = parameter_list[2]
jazz_tag_details = parameter_list[3] #[tag_env_prefix, tag_enviornment, tag_exempt, tag_owner]
os.chdir(terraform_folder)
# ----------------------------------------------------------
# Populate variables in terraform variables.tf and cookbooks
# -----------------------------------------------------------
#populating BRANCH name
replace_tfvars('github_branch', jazz_branch, TFVARS_FILE)
subprocess.call(['sed', '-i', "s|default\['git_branch'\].*.|default\['git_branch'\]='%s'|g" %(jazz_branch), DEFAULT_RB])
# Populating Jazz Account ID
replace_tfvars('jazz_accountid', jazz_account_id, TFVARS_FILE)
# Populating Cognito Details
replace_tfvars('cognito_pool_username', cognito_details[0], TFVARS_FILE)
replace_tfvars('cognito_pool_password', cognito_details[1], TFVARS_FILE)
subprocess.call(['sed', '-i', "s|<username>cognitouser</username>|<username>%s</username>|g" %(cognito_details[0]), COGNITO_USER_FILE])
subprocess.call(['sed', '-i', "s|<password>cognitopasswd</password>|<password>%s</password>|g" %(cognito_details[1]), COGNITO_USER_FILE])
# Populating Jazz Tag env
replace_tfvars('envPrefix', jazz_tag_details[0], TFVARS_FILE)
replace_tfvars('tagsEnvironment', jazz_tag_details[1], TFVARS_FILE)
replace_tfvars('tagsExempt', jazz_tag_details[2], TFVARS_FILE)
replace_tfvars('tagsOwner', jazz_tag_details[3], TFVARS_FILE)
subprocess.call(['sed', '-i', 's|stack_name=.*.$|stack_name="%s"|g' %(jazz_tag_details[0]), "scripts/destroy.sh"])
# Uses sed to modify the values of key-value pairs within a file that follow the form 'key = value'
# NOTE: The use of "-i'.bak'" and the creation of backup files is required macOS (that is, BSD) 'sed' will fail otherise.
# NOTE: The `r` prefix is needed to force a string literal here.
# TODO: We should replace `sed` executable calls with standard python library calls, would be faster and simpler.
def replace_tfvars(key, value, fileName):
subprocess.call(['sed', "-i\'.bak\'", r's|\(%s = \)\(.*\)|\1\"%s\"|g' %(key, value), fileName])
def validate_email_id(email_id):
"""
Method parse the parameters send from run.py and validate Cognito details
"""
if re.search('[^@]+@[^@]+\.[^@]+', email_id) is None:
return False
else:
return True
| #!/usr/bin/python
import os
import re
import subprocess
#Global variables
TFVARS_FILE = "terraform.tfvars"
HOME_JAZZ_INSTALLER = os.path.expanduser("~") + "/jazz-installer/"
COGNITO_USER_FILE = HOME_JAZZ_INSTALLER + "/installscripts/cookbooks/jenkins/files/credentials/cognitouser.sh"
DEFAULT_RB = HOME_JAZZ_INSTALLER + "/installscripts/cookbooks/jenkins/attributes/default.rb"
def parse_and_replace_paramter_list(terraform_folder, parameter_list):
"""
Method parse the parameters send from run.py and these common variables
are set in terraform.tfvars and other files needed
"""
jazz_branch = parameter_list[0]
cognito_details = parameter_list[1]
jazz_account_id = parameter_list[2]
jazz_tag_details = parameter_list[3] #[tag_env_prefix, tag_enviornment, tag_exempt, tag_owner]
os.chdir(terraform_folder)
# ----------------------------------------------------------
# Populate variables in terraform variables.tf and cookbooks
# -----------------------------------------------------------
#populating BRANCH name
replace_tfvars('github_branch', jazz_branch, TFVARS_FILE)
subprocess.call(['sed', '-i', "s|default\['git_branch'\].*.|default\['git_branch'\]='%s'|g" %(jazz_branch), DEFAULT_RB])
# Populating Jazz Account ID
replace_tfvars('jazz_accountid', jazz_account_id, TFVARS_FILE)
# Populating Cognito Details
replace_tfvars('cognito_pool_username', cognito_details[0], TFVARS_FILE)
replace_tfvars('cognito_pool_password', cognito_details[1], TFVARS_FILE)
subprocess.call(['sed', '-i', "s|<username>cognitouser</username>|<username>%s</username>|g" %(cognito_details[0]), COGNITO_USER_FILE])
subprocess.call(['sed', '-i', "s|<password>cognitopasswd</password>|<password>%s</password>|g" %(cognito_details[1]), COGNITO_USER_FILE])
# Populating Jazz Tag env
replace_tfvars('envPrefix', jazz_tag_details[0], TFVARS_FILE)
replace_tfvars('tagsEnvironment', jazz_tag_details[1], TFVARS_FILE)
replace_tfvars('tagsExempt', jazz_tag_details[2], TFVARS_FILE)
replace_tfvars('tagsOwner', jazz_tag_details[3], TFVARS_FILE)
subprocess.call(['sed', '-i', 's|stack_name=.*.$|stack_name="%s"|g' %(jazz_tag_details[0]), "scripts/destroy.sh"])
# Uses sed to modify the values of key-value pairs within a file that follow the form 'key = value'
# NOTE: The use of "-i'.bak'" and the creation of backup files is required macOS (that is, BSD) 'sed' will fail otherise.
def replace_tfvars(key, value, fileName):
subprocess.call(['sed', '-i\'.bak\'', "s|\(%s = \)\(.*\)|\1\"%s\"|g" %(key, value), fileName])
def validate_email_id(email_id):
"""
Method parse the parameters send from run.py and validate Cognito details
"""
if re.search('[^@]+@[^@]+\.[^@]+', email_id) is None:
return False
else:
return True
| Python | 0.999999 |
5f726edd25c1d478da02215a36b9a8ac4a41eec3 | Add missing import | ckanext/stadtzhtheme/commands.py | ckanext/stadtzhtheme/commands.py | import sys
import itertools
import traceback
import ckan.lib.cli
import ckan.logic as logic
import ckan.model as model
class StadtzhCommand(ckan.lib.cli.CkanCommand):
'''Command for stadtzh
Usage:
# General usage
paster --plugin=ckanext-stadtzh-theme <command> -c <path to config>
# Show this help
paster stadtzh help
# Cleanup datastore
paster stadtzh cleanup_datastore
'''
summary = __doc__.split('\n')[0]
usage = __doc__
def command(self):
# load pylons config
self._load_config()
options = {
'cleanup_datastore': self.cleanup_datastore,
'help': self.help,
}
try:
cmd = self.args[0]
options[cmd](*self.args[1:])
except KeyError:
self.help()
sys.exit(1)
def help(self):
print self.__doc__
def cleanup_datastore(self):
user = logic.get_action('get_site_user')({'ignore_auth': True}, {})
context = {
'model': model,
'session': model.Session,
'user': user['name']
}
try:
logic.check_access('datastore_delete', context)
logic.check_access('resource_show', context)
except logic.NotAuthorized:
print("User is not authorized to perform this action.")
sys.exit(1)
# query datastore to get all resources from the _table_metadata
resource_id_list = []
try:
for offset in itertools.count(start=0, step=100):
print(
"Load metadata records from datastore (offset: %s)"
% offset
)
record_list, has_next_page = self._get_datastore_table_page(context, offset) # noqa
resource_id_list.extend(record_list)
if not has_next_page:
break
except Exception, e:
print(
"Error while gathering resources: %s / %s"
% (str(e), traceback.format_exc())
)
# delete the rows of the orphaned datastore tables
delete_count = 0
for resource_id in resource_id_list:
try:
logic.check_access('datastore_delete', context)
logic.get_action('datastore_delete')(
context,
{'resource_id': resource_id, 'force': True}
)
print("Table '%s' deleted (not dropped)" % resource_id)
delete_count += 1
except Exception, e:
print(
"Error while deleting datastore resource %s: %s / %s"
% (resource_id, str(e), traceback.format_exc())
)
continue
print("Deleted content of %s tables" % delete_count)
def _get_datastore_table_page(self, context, offset=0):
# query datastore to get all resources from the _table_metadata
result = logic.get_action('datastore_search')(
context,
{
'resource_id': '_table_metadata',
'offset': offset
}
)
resource_id_list = []
for record in result['records']:
try:
# ignore 'alias' records
if record['alias_of']:
continue
logic.check_access('resource_show', context)
logic.get_action('resource_show')(
context,
{'id': record['name']}
)
print("Resource '%s' found" % record['name'])
except logic.NotFound:
resource_id_list.append(record['name'])
print("Resource '%s' *not* found" % record['name'])
except logic.NotAuthorized:
print("User is not authorized to perform this action.")
except (KeyError, AttributeError), e:
print("Error while handling record %s: %s" % (record, str(e)))
continue
# are there more records?
has_next_page = (len(result['records']) > 0)
return (resource_id_list, has_next_page)
| import sys
import itertools
import ckan.lib.cli
import ckan.logic as logic
import ckan.model as model
class StadtzhCommand(ckan.lib.cli.CkanCommand):
'''Command for stadtzh
Usage:
# General usage
paster --plugin=ckanext-stadtzh-theme <command> -c <path to config>
# Show this help
paster stadtzh help
# Cleanup datastore
paster stadtzh cleanup_datastore
'''
summary = __doc__.split('\n')[0]
usage = __doc__
def command(self):
# load pylons config
self._load_config()
options = {
'cleanup_datastore': self.cleanup_datastore,
'help': self.help,
}
try:
cmd = self.args[0]
options[cmd](*self.args[1:])
except KeyError:
self.help()
sys.exit(1)
def help(self):
print self.__doc__
def cleanup_datastore(self):
user = logic.get_action('get_site_user')({'ignore_auth': True}, {})
context = {
'model': model,
'session': model.Session,
'user': user['name']
}
try:
logic.check_access('datastore_delete', context)
logic.check_access('resource_show', context)
except logic.NotAuthorized:
print("User is not authorized to perform this action.")
sys.exit(1)
# query datastore to get all resources from the _table_metadata
resource_id_list = []
try:
for offset in itertools.count(start=0, step=100):
print(
"Load metadata records from datastore (offset: %s)"
% offset
)
record_list, has_next_page = self._get_datastore_table_page(context, offset) # noqa
resource_id_list.extend(record_list)
if not has_next_page:
break
except Exception, e:
print(
"Error while gathering resources: %s / %s"
% (str(e), traceback.format_exc())
)
# delete the rows of the orphaned datastore tables
delete_count = 0
for resource_id in resource_id_list:
try:
logic.check_access('datastore_delete', context)
logic.get_action('datastore_delete')(
context,
{'resource_id': resource_id, 'force': True}
)
print("Table '%s' deleted (not dropped)" % resource_id)
delete_count += 1
except Exception, e:
print(
"Error while deleting datastore resource %s: %s / %s"
% (resource_id, str(e), traceback.format_exc())
)
continue
print("Deleted content of %s tables" % delete_count)
def _get_datastore_table_page(self, context, offset=0):
# query datastore to get all resources from the _table_metadata
result = logic.get_action('datastore_search')(
context,
{
'resource_id': '_table_metadata',
'offset': offset
}
)
resource_id_list = []
for record in result['records']:
try:
# ignore 'alias' records
if record['alias_of']:
continue
logic.check_access('resource_show', context)
logic.get_action('resource_show')(
context,
{'id': record['name']}
)
print("Resource '%s' found" % record['name'])
except logic.NotFound:
resource_id_list.append(record['name'])
print("Resource '%s' *not* found" % record['name'])
except logic.NotAuthorized:
print("User is not authorized to perform this action.")
except (KeyError, AttributeError), e:
print("Error while handling record %s: %s" % (record, str(e)))
continue
# are there more records?
has_next_page = (len(result['records']) > 0)
return (resource_id_list, has_next_page)
| Python | 0.000466 |
842e1bac8edaf6f28772067eaffd83351d28332a | add unicode | fastube/fastube/settings/partials/auth.py | fastube/fastube/settings/partials/auth.py | # -*- coding: utf-8 -*-
import os
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Auth Model
AUTH_USER_MODEL = "users.User"
# Login
LOGIN_URL = "/login/"
SIGNUP_SUCCESS_MESSAGE = "성공적으로 회원가입이 되었습니다."
LOGIN_SUCCESS_MESSAGE = "성공적으로 로그인이 되었습니다."
LOGOUT_SUCCESS_MESSAGE = "성공적으로 로그아웃이 되었습니다."
SOCIAL_AUTH_URL_NAMESPACE = 'social'
AUTHENTICATION_BACKENDS = (
'social.backends.facebook.FacebookOAuth2',
'social.backends.kakao.KakaoOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
SOCIAL_AUTH_FACEBOOK_KEY = os.environ.get("SOCIAL_AUTH_FACEBOOK_KEY")
SOCIAL_AUTH_FACEBOOK_SECRET = os.environ.get("SOCIAL_AUTH_FACEBOOK_SECRET")
SOCIAL_AUTH_KAKAO_KEY = os.environ.get("SOCIAL_AUTH_KAKAO_KEY")
SOCIAL_AUTH_KAKAO_SECRET = os.environ.get("SOCIAL_AUTH_KAKAO_SECRET")
SOCIAL_AUTH_PIPELINE = (
'social.pipeline.social_auth.social_details',
'social.pipeline.social_auth.social_uid',
'social.pipeline.social_auth.auth_allowed',
'social.pipeline.social_auth.social_user',
'social.pipeline.user.get_username',
'social.pipeline.social_auth.associate_by_email',
'social.pipeline.user.create_user',
'social.pipeline.social_auth.associate_user',
'social.pipeline.social_auth.load_extra_data',
'social.pipeline.user.user_details'
)
SOCIAL_AUTH_LOGIN_REDIRECT_URL = "/"
| #-*- coding: utf-8 -*-
import os
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Auth Model
AUTH_USER_MODEL = "users.User"
# Login
LOGIN_URL = "/login/"
SIGNUP_SUCCESS_MESSAGE = "성공적으로 회원가입이 되었습니다."
LOGIN_SUCCESS_MESSAGE = "성공적으로 로그인이 되었습니다."
LOGOUT_SUCCESS_MESSAGE = "성공적으로 로그아웃이 되었습니다."
SOCIAL_AUTH_URL_NAMESPACE = 'social'
AUTHENTICATION_BACKENDS = (
'social.backends.facebook.FacebookOAuth2',
'social.backends.kakao.KakaoOAuth2',
'django.contrib.auth.backends.ModelBackend',
)
SOCIAL_AUTH_FACEBOOK_KEY = os.environ.get("SOCIAL_AUTH_FACEBOOK_KEY")
SOCIAL_AUTH_FACEBOOK_SECRET = os.environ.get("SOCIAL_AUTH_FACEBOOK_SECRET")
SOCIAL_AUTH_KAKAO_KEY = os.environ.get("SOCIAL_AUTH_KAKAO_KEY")
SOCIAL_AUTH_KAKAO_SECRET = os.environ.get("SOCIAL_AUTH_KAKAO_SECRET")
SOCIAL_AUTH_PIPELINE = (
'social.pipeline.social_auth.social_details',
'social.pipeline.social_auth.social_uid',
'social.pipeline.social_auth.auth_allowed',
'social.pipeline.social_auth.social_user',
'social.pipeline.user.get_username',
'social.pipeline.social_auth.associate_by_email',
'social.pipeline.user.create_user',
'social.pipeline.social_auth.associate_user',
'social.pipeline.social_auth.load_extra_data',
'social.pipeline.user.user_details'
)
SOCIAL_AUTH_LOGIN_REDIRECT_URL = "/"
| Python | 0.999999 |
61731632b04ca1d9a719b6b4b62fa0a97926e3a9 | clean up unused imports | kubernetes/K8sHorizontalPodAutoscaler.py | kubernetes/K8sHorizontalPodAutoscaler.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.md', which is part of this source code package.
#
from kubernetes.K8sObject import K8sObject
from kubernetes.models.v1.HorizontalPodAutoscaler import HorizontalPodAutoscaler
class K8sHorizontalPodAutoscaler(K8sObject):
def __init__(self, config=None, name=None):
super(K8sHorizontalPodAutoscaler, self).__init__(
config=config,
obj_type='HorizontalPodAutoscaler',
name=name
)
# ------------------------------------------------------------------------------------- override
def create(self):
super(K8sHorizontalPodAutoscaler, self).create()
self.get()
return self
def update(self):
super(K8sHorizontalPodAutoscaler, self).update()
self.get()
return self
def list(self, pattern=None):
ls = super(K8sHorizontalPodAutoscaler, self).list()
hpas = list(map(lambda x: HorizontalPodAutoscaler(x), ls))
if pattern is not None:
hpas = list(filter(lambda x: pattern in x.name, hpas))
k8s = []
for x in hpas:
z = K8sHorizontalPodAutoscaler(config=self.config, name=x.name)
z.model = x
k8s.append(z)
return k8s
# ------------------------------------------------------------------------------------- get
def get(self):
self.model = HorizontalPodAutoscaler(self.get_model())
return self
# ------------------------------------------------------------------------------------- cpu_percent
@property
def cpu_percent(self):
return self.model.spec.cpu_utilization
@cpu_percent.setter
def cpu_percent(self, pct=None):
self.model.spec.cpu_utilization = pct
# ------------------------------------------------------------------------------------- min replicas
@property
def min_replicas(self):
return self.model.spec.min_replicas
@min_replicas.setter
def min_replicas(self, min=None):
self.model.spec.min_replicas = min
# ------------------------------------------------------------------------------------- max replicas
@property
def max_replicas(self):
return self.model.spec.max_replicas
@max_replicas.setter
def max_replicas(self, max=None):
self.model.spec.max_replicas = max
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.md', which is part of this source code package.
#
from kubernetes.K8sObject import K8sObject
from kubernetes.K8sDeployment import K8sDeployment
from kubernetes.K8sReplicationController import K8sReplicationController
from kubernetes.K8sExceptions import NotFoundException
from kubernetes.models.v1.HorizontalPodAutoscaler import HorizontalPodAutoscaler
import subprocess
class K8sHorizontalPodAutoscaler(K8sObject):
def __init__(self, config=None, name=None):
super(K8sHorizontalPodAutoscaler, self).__init__(
config=config,
obj_type='HorizontalPodAutoscaler',
name=name
)
# ------------------------------------------------------------------------------------- override
def create(self):
super(K8sHorizontalPodAutoscaler, self).create()
self.get()
return self
def update(self):
super(K8sHorizontalPodAutoscaler, self).update()
self.get()
return self
def list(self, pattern=None):
ls = super(K8sHorizontalPodAutoscaler, self).list()
hpas = list(map(lambda x: HorizontalPodAutoscaler(x), ls))
if pattern is not None:
hpas = list(filter(lambda x: pattern in x.name, hpas))
k8s = []
for x in hpas:
z = K8sHorizontalPodAutoscaler(config=self.config, name=x.name)
z.model = x
k8s.append(z)
return k8s
# ------------------------------------------------------------------------------------- get
def get(self):
self.model = HorizontalPodAutoscaler(self.get_model())
return self
# ------------------------------------------------------------------------------------- cpu_percent
@property
def cpu_percent(self):
return self.model.spec.cpu_utilization
@cpu_percent.setter
def cpu_percent(self, pct=None):
self.model.spec.cpu_utilization = pct
# ------------------------------------------------------------------------------------- min replicas
@property
def min_replicas(self):
return self.model.spec.min_replicas
@min_replicas.setter
def min_replicas(self, min=None):
self.model.spec.min_replicas = min
# ------------------------------------------------------------------------------------- max replicas
@property
def max_replicas(self):
return self.model.spec.max_replicas
@max_replicas.setter
def max_replicas(self, max=None):
self.model.spec.max_replicas = max
| Python | 0 |
b6947fa1850c888cd5b3190b2abf315409f01cdc | Add an explicit logfile rollover at the beginning of each Tulsi bazel build. | src/TulsiGenerator/Scripts/tulsi_logging.py | src/TulsiGenerator/Scripts/tulsi_logging.py | # Copyright 2017 The Tulsi Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logging routines used by Tulsi scripts."""
import logging
import logging.handlers
import os
class Logger(object):
"""Tulsi specific logging."""
def __init__(self):
logging_dir = os.path.expanduser('~/Library/Application Support/Tulsi')
if not os.path.exists(logging_dir):
os.mkdir(logging_dir)
logfile = os.path.join(logging_dir, 'build_log.txt')
# Currently only creates a single logger called 'tulsi_logging'. If
# additional loggers are needed, consider adding a name attribute to the
# Logger.
self._logger = logging.getLogger('tulsi_logging')
self._logger.setLevel(logging.INFO)
file_handler = logging.handlers.RotatingFileHandler(logfile, backupCount=20)
file_handler.setLevel(logging.INFO)
# Create a new log file for each build.
file_handler.doRollover()
self._logger.addHandler(file_handler)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
self._logger.addHandler(console)
def log_action(self, action_name, action_id, seconds):
del action_id # Unused by this logger.
# Log to file and print to stdout for display in the Xcode log.
self._logger.info('<*> %s completed in %0.3f ms',
action_name, seconds * 1000)
| # Copyright 2017 The Tulsi Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Logging routines used by Tulsi scripts."""
import logging
import logging.handlers
import os
class Logger(object):
"""Tulsi specific logging."""
def __init__(self):
logging_dir = os.path.expanduser('~/Library/Application Support/Tulsi')
if not os.path.exists(logging_dir):
os.mkdir(logging_dir)
logfile = os.path.join(logging_dir, 'build_log.txt')
# Currently only creates a single logger called 'tulsi_logging'. If
# additional loggers are needed, consider adding a name attribute to the
# Logger.
self._logger = logging.getLogger('tulsi_logging')
self._logger.setLevel(logging.INFO)
file_handler = logging.handlers.RotatingFileHandler(logfile, backupCount=5)
file_handler.setLevel(logging.INFO)
self._logger.addHandler(file_handler)
console = logging.StreamHandler()
console.setLevel(logging.INFO)
self._logger.addHandler(console)
def log_action(self, action_name, action_id, seconds):
del action_id # Unused by this logger.
# Log to file and print to stdout for display in the Xcode log.
self._logger.info('<*> %s completed in %0.3f ms',
action_name, seconds * 1000)
| Python | 0 |
48c3a35deffaca384189c8342a65debf03036dff | Remove semicolons | acstis/Logging.py | acstis/Logging.py | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2017 Tijme Gommers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from colorama import Fore, Back, Style
import datetime
class Logging:
@staticmethod
def info(message):
print(Back.BLACK + str(datetime.datetime.now()) + ": " + message)
@staticmethod
def red(message):
print(Fore.RED + Back.BLACK + str(datetime.datetime.now()) + ": " + message)
@staticmethod
def green(message):
print(Fore.GREEN + Back.BLACK + str(datetime.datetime.now()) + ": " + message)
@staticmethod
def yellow(message):
print(Fore.YELLOW + Back.BLACK + str(datetime.datetime.now()) + ": " + message)
| # -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2017 Tijme Gommers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from colorama import Fore, Back, Style
import datetime
class Logging:
@staticmethod
def info(message):
print(Back.BLACK + str(datetime.datetime.now()) + ": " + message);
@staticmethod
def red(message):
print(Fore.RED + Back.BLACK + str(datetime.datetime.now()) + ": " + message);
@staticmethod
def green(message):
print(Fore.GREEN + Back.BLACK + str(datetime.datetime.now()) + ": " + message);
@staticmethod
def yellow(message):
print(Fore.YELLOW + Back.BLACK + str(datetime.datetime.now()) + ": " + message);
| Python | 0.999999 |
9365a3dce9cc1abe507c36d3dd1d79ca7fcab15c | add admin for Product | eca_catalogue/abstract_admin.py | eca_catalogue/abstract_admin.py | from django.contrib import admin
from treebeard.admin import TreeAdmin
class AbstractProductCategoryAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
class AbstractNestedProductCategoryAdmin(TreeAdmin):
prepopulated_fields = {"slug": ("name",)}
class AbstractProductAdmin(admin.ModelAdmin):
list_display = ['item_number', 'name',]
prepopulated_fields = {"slug": ("name",)}
| from django.contrib import admin
from treebeard.admin import TreeAdmin
class AbstractProductCategoryAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
class AbstractNestedProductCategoryAdmin(TreeAdmin):
prepopulated_fields = {"slug": ("name",)}
| Python | 0 |
9dca7838d8fb495acc02241b55a30870b7eec0ba | fix flake error in apps.py | actstream/apps.py | actstream/apps.py | from django.core.exceptions import ImproperlyConfigured
from actstream import settings
from actstream.signals import action
from actstream.compat_apps import AppConfig
class ActstreamConfig(AppConfig):
name = 'actstream'
def ready(self):
from actstream.actions import action_handler
action.connect(action_handler, dispatch_uid='actstream.models')
action_class = self.get_model('action')
if settings.USE_JSONFIELD:
try:
from jsonfield.fields import JSONField
except ImportError:
raise ImproperlyConfigured(
'You must have django-jsonfield installed '
'if you wish to use a JSONField on your actions'
)
JSONField(blank=True, null=True).contribute_to_class(action_class, 'data')
| from django.core.exceptions import ImproperlyConfigured
from actstream import settings
from actstream.signals import action
from actstream.compat_apps import AppConfig
class ActstreamConfig(AppConfig):
name = 'actstream'
def ready(self):
from actstream.actions import action_handler
action.connect(action_handler, dispatch_uid='actstream.models')
action_class = self.get_model('action')
if settings.USE_JSONFIELD:
try:
from jsonfield.fields import JSONField
except ImportError:
raise ImproperlyConfigured('You must have django-jsonfield installed '
'if you wish to use a JSONField on your actions')
JSONField(blank=True, null=True).contribute_to_class(action_class, 'data')
| Python | 0.000001 |
416d452fdaa37a6506a92adf227038474e818acc | Remove unused imports | edaboweb/blueprints/playlist.py | edaboweb/blueprints/playlist.py | #!/usr/bin/env python
# coding: utf-8
# Copyright © 2015 Wieland Hoffmann
# License: MIT, see LICENSE for details
from flask import abort, Blueprint, redirect, request, render_template, url_for
from json import loads
from mbdata import models
from uuid import UUID
from ..mb_database import db_session
from ..models import db, Playlist
playlist_bp = Blueprint("playlist", __name__)
@playlist_bp.route("/")
def list_playlists():
playlists = db.session.query(Playlist.data["description"],
Playlist.data["name"],
Playlist.gid)
return render_template("playlist/list.html", playlists=playlists)
@playlist_bp.route("/<uuid:pid>", methods=["GET"])
def view_playlist(pid):
playlist = Playlist.query.filter(Playlist.gid == str(pid)).first_or_404()
recording_ids = []
release_ids = {}
for track in playlist.data["tracklist"]:
recording_id = track["recordingid"]
recording_ids.append(recording_id)
release_ids[recording_id] = track["releaseid"]
recording_query = db_session().query(models.Recording.name,
models.Recording.gid,
models.ArtistCredit.name,
models.Track.gid).\
outerjoin(models.Track).\
join(models.ArtistCredit,
models.Recording.artist_credit_id == models.ArtistCredit.id).\
filter(models.Recording.gid.in_(recording_ids))
recordings = {}
for name, recordingid, credit, trackid in recording_query.all():
recordings[recordingid] = (name, credit, trackid)
return render_template("playlist/single.html",
playlist=playlist,
recordings=recordings,
release_ids=release_ids)
@playlist_bp.route("/<uuid:pid>", methods=["POST"])
def add_playlist(pid):
doc = request.get_data()
json = loads(doc.encode("utf-8"))
uuid_from_doc = UUID(json["uuid"])
if uuid_from_doc != pid:
abort(400)
playlist = Playlist.query.filter(Playlist.gid == str(pid)).first()
if playlist is None:
playlist = Playlist(gid=str(pid), data=json)
else:
playlist.data = json
db.session.add(playlist)
db.session.commit()
return redirect(url_for('playlist.list_playlists'))
| #!/usr/bin/env python
# coding: utf-8
# Copyright © 2015 Wieland Hoffmann
# License: MIT, see LICENSE for details
from flask import abort, Blueprint, redirect, request, render_template, url_for
from json import loads
from mbdata import models
from operator import itemgetter
from sqlalchemy.orm.query import Query
from uuid import UUID
from ..mb_database import db_session
from ..models import db, Playlist
playlist_bp = Blueprint("playlist", __name__)
@playlist_bp.route("/")
def list_playlists():
playlists = db.session.query(Playlist.data["description"],
Playlist.data["name"],
Playlist.gid)
return render_template("playlist/list.html", playlists=playlists)
@playlist_bp.route("/<uuid:pid>", methods=["GET"])
def view_playlist(pid):
playlist = Playlist.query.filter(Playlist.gid == str(pid)).first_or_404()
recording_ids = []
release_ids = {}
for track in playlist.data["tracklist"]:
recording_id = track["recordingid"]
recording_ids.append(recording_id)
release_ids[recording_id] = track["releaseid"]
recording_query = db_session().query(models.Recording.name,
models.Recording.gid,
models.ArtistCredit.name,
models.Track.gid).\
outerjoin(models.Track).\
join(models.ArtistCredit,
models.Recording.artist_credit_id == models.ArtistCredit.id).\
filter(models.Recording.gid.in_(recording_ids))
recordings = {}
for name, recordingid, credit, trackid in recording_query.all():
recordings[recordingid] = (name, credit, trackid)
return render_template("playlist/single.html",
playlist=playlist,
recordings=recordings,
release_ids=release_ids)
@playlist_bp.route("/<uuid:pid>", methods=["POST"])
def add_playlist(pid):
doc = request.get_data()
json = loads(doc.encode("utf-8"))
uuid_from_doc = UUID(json["uuid"])
if uuid_from_doc != pid:
abort(400)
playlist = Playlist.query.filter(Playlist.gid == str(pid)).first()
if playlist is None:
playlist = Playlist(gid=str(pid), data=json)
else:
playlist.data = json
db.session.add(playlist)
db.session.commit()
return redirect(url_for('playlist.list_playlists'))
| Python | 0.000001 |
d8099cd712279afa1c4e73989c7f03bc9de6dd4c | fix performance problem with historian | flow_workflow/historian/operation_data.py | flow_workflow/historian/operation_data.py | import json
class OperationData(object):
def __init__(self, net_key, operation_id, color):
self.net_key = net_key
self.operation_id = int(operation_id)
self.color = int(color)
def dumps(self):
return json.dumps(self.to_dict, sort_keys=True)
@classmethod
def loads(cls, string):
return cls.from_dict(json.loads(string))
@property
def to_dict(self):
return {
'net_key': str(self.net_key),
'operation_id': self.operation_id,
'color': self.color
}
@classmethod
def from_dict(cls, operation_data_dict):
return cls(**operation_data_dict)
def __repr__(self):
return str(self)
def __str__(self):
return "OperationData(net_key='%s', operation_id=%s, color=%s)" % (
self.net_key, self.operation_id, self.color)
def __eq__(self, other):
return self.to_dict == other.to_dict
| import json
class OperationData(object):
def __init__(self, net_key, operation_id, color):
self.net_key = net_key
self.operation_id = int(operation_id)
self.color = int(color)
def dumps(self):
return json.dumps(self.to_dict, sort_keys=True)
@classmethod
def loads(cls, string):
return cls.from_dict(json.loads(string))
@property
def to_dict(self):
return {
'net_key': self.net_key,
'operation_id': self.operation_id,
'color': self.color
}
@classmethod
def from_dict(cls, operation_data_dict):
return cls(**operation_data_dict)
def __repr__(self):
return str(self)
def __str__(self):
return "OperationData(net_key='%s', operation_id=%s, color=%s)" % (
self.net_key, self.operation_id, self.color)
def __eq__(self, other):
return self.to_dict == other.to_dict
| Python | 0.000044 |
995ff0e9d7189d5b6b7ae01c3440d2ec336d6e53 | Handle failed call smartctl on USB drives. | device_inventory/benchmark.py | device_inventory/benchmark.py | """
Devices benchmark
Set of programs, or other operations, in order to assess the relative
performance of an object, normally by running a number of standard
tests and trials against it.
"""
import logging
import re
import subprocess
from .utils import run
def hard_disk_smart(disk="/dev/sda"):
# smartctl -a /dev/sda | grep "# 1"
# # 1 Short offline Completed without error 00% 10016 -
# XXX extract data of smartest. Decide which info is relevant.
assert disk is not None
error = False
try:
smart = subprocess.check_output(["smartctl", "-a", disk],
universal_newlines=True)
except subprocess.CalledProcessError as e:
smart = e.output
# analyze e.returncode
if e.returncode == pow(2, 0): # bit 0
# TODO raise # command line did not parse
logging.debug("Error calling smartctl: %s", e.output)
error = True
elif e.returncode == pow(2, 1): # bit 1
pass # only warning because low-power
elif e.returncode == pow(2, 2): # bit 2
error = True # TODO cannot perform SMART
else: # bit 3, 4, 5, 6, 7 device log with errors
error = True
test = {
"@type": "TestHardDrive",
#"device": disk,
"error": error,
}
# excepted output
# Num Test_Description Status Remaining LifeTime(hours) LBA_of_first_error
try:
beg = smart.index('# 1')
end = smart.index('\n', beg)
result = re.split(r'\s\s+', smart[beg:end])
except ValueError:
logging.error("Error retrieving SMART info from '%s'", disk)
else:
try:
lifetime = int(result[4])
except ValueError:
lifetime = -1
try:
lba_first_error = int(result[5], 0) # accepts hex and decimal value
except ValueError:
lba_first_error = None
test.update({
"type": result[1],
"status": result[2],
"lifetime": lifetime,
"firstError": lba_first_error,
})
return test
def score_cpu():
# https://en.wikipedia.org/wiki/BogoMips
# score = sum(cpu.bogomips for cpu in device.cpus)
mips = []
with open("/proc/cpuinfo") as f:
for line in f:
if line.startswith("bogomips"):
mips.append(float(line.split(':')[1]))
return sum(mips)
def score_ram(speed):
"""
Score is the relation between memory frequency and memory latency.
- higher frequency is better
- lower latency is better
"""
# http://www.cyberciti.biz/faq/check-ram-speed-linux/
# Expected input "800 MHz (1.2 ns)"
try:
freq = float(speed.split()[0])
lat = float(speed[speed.index("("):speed.index("ns)")])
except (IndexError, ValueError):
return "Unknown"
return freq/lat
def score_vga(model_name):
score = None
for model in re.findall('\w*\d\w*', model_name):
# TODO find matching on etc/vga.txt (e.g. ['GT218M', '310M'])
pass
return score
| """
Devices benchmark
Set of programs, or other operations, in order to assess the relative
performance of an object, normally by running a number of standard
tests and trials against it.
"""
import logging
import re
import subprocess
from .utils import run
def hard_disk_smart(disk="/dev/sda"):
# smartctl -a /dev/sda | grep "# 1"
# # 1 Short offline Completed without error 00% 10016 -
# XXX extract data of smartest. Decide which info is relevant.
assert disk is not None
error = False
try:
smart = subprocess.check_output(["smartctl", "-a", disk],
universal_newlines=True)
except subprocess.CalledProcessError as e:
smart = e.output
# analyze e.returncode
if e.returncode == pow(2, 0): # bit 0
raise # command line did not parse
elif e.returncode == pow(2, 1): # bit 1
pass # only warning because low-power
elif e.returncode == pow(2, 2): # bit 2
error = True # TODO cannot perform SMART
else: # bit 3, 4, 5, 6, 7 device log with errors
error = True
test = {
"@type": "TestHardDrive",
#"device": disk,
"error": error,
}
# excepted output
# Num Test_Description Status Remaining LifeTime(hours) LBA_of_first_error
try:
beg = smart.index('# 1')
end = smart.index('\n', beg)
result = re.split(r'\s\s+', smart[beg:end])
except ValueError:
logging.error("Error retrieving SMART info from '%s'", disk)
else:
try:
lifetime = int(result[4])
except ValueError:
lifetime = -1
try:
lba_first_error = int(result[5], 0) # accepts hex and decimal value
except ValueError:
lba_first_error = None
test.update({
"type": result[1],
"status": result[2],
"lifetime": lifetime,
"firstError": lba_first_error,
})
return test
def score_cpu():
# https://en.wikipedia.org/wiki/BogoMips
# score = sum(cpu.bogomips for cpu in device.cpus)
mips = []
with open("/proc/cpuinfo") as f:
for line in f:
if line.startswith("bogomips"):
mips.append(float(line.split(':')[1]))
return sum(mips)
def score_ram(speed):
"""
Score is the relation between memory frequency and memory latency.
- higher frequency is better
- lower latency is better
"""
# http://www.cyberciti.biz/faq/check-ram-speed-linux/
# Expected input "800 MHz (1.2 ns)"
try:
freq = float(speed.split()[0])
lat = float(speed[speed.index("("):speed.index("ns)")])
except (IndexError, ValueError):
return "Unknown"
return freq/lat
def score_vga(model_name):
score = None
for model in re.findall('\w*\d\w*', model_name):
# TODO find matching on etc/vga.txt (e.g. ['GT218M', '310M'])
pass
return score
| Python | 0 |
71554067936e2355658e6e566e8fcb4a66f24ee7 | Add new keyfile | dexter/config/celeryconfig.py | dexter/config/celeryconfig.py | from celery.schedules import crontab
# uses AWS creds from the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY env variables
BROKER_URL = 'sqs://'
BROKER_TRANSPORT_OPTIONS = {
'region': 'eu-west-1',
'polling_interval': 15 * 1,
'queue_name_prefix': 'mma-dexter-',
'visibility_timeout': 3600*12,
}
# all our tasks can by retried if the worker fails
CELERY_ACKS_LATE = True
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TIMEZONE = 'Africa/Johannesburg'
CELERY_ENABLE_UTC = True
CELERYBEAT_SCHEDULE = {
'fetch-yesterdays-feeds': {
'schedule': crontab(hour=1, minute=0),
'task': 'dexter.tasks.fetch_yesterdays_feeds',
},
'back-process-feeds': {
'schedule': crontab(hour=11, minute=0),
'task': 'dexter.tasks.back_process_feeds',
},
'fetch_yesterdays_feeds_rerun': {
'schedule': crontab(hour=12, minute=0),
'task': 'dexter.tasks.back_process_feeds',
},
# 'backfill-taxonomies': {
# 'schedule': crontab(hour=21, minute=0),
# 'task': 'dexter.tasks.backfill_taxonomies',
# },
}
| from celery.schedules import crontab
# uses AWS creds from the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY env variables
BROKER_URL = 'sqs://'
BROKER_TRANSPORT_OPTIONS = {
'region': 'eu-west-1',
'polling_interval': 15 * 1,
'queue_name_prefix': 'mma-dexter-',
'visibility_timeout': 3600*12,
}
# all our tasks can by retried if the worker fails
CELERY_ACKS_LATE = True
CELERY_TASK_SERIALIZER = 'json'
CELERY_RESULT_SERIALIZER = 'json'
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TIMEZONE = 'Africa/Johannesburg'
CELERY_ENABLE_UTC = True
CELERYBEAT_SCHEDULE = {
'fetch-yesterdays-feeds': {
'schedule': crontab(hour=3, minute=0),
'task': 'dexter.tasks.fetch_yesterdays_feeds',
},
'back-process-feeds': {
'schedule': crontab(hour=11, minute=0),
'task': 'dexter.tasks.back_process_feeds',
},
fetch_yesterdays_feeds_rerun: {
'schedule': crontab(hour=12, minute=0),
'task': 'dexter.tasks.back_process_feeds',
},
# 'backfill-taxonomies': {
# 'schedule': crontab(hour=21, minute=0),
# 'task': 'dexter.tasks.backfill_taxonomies',
# },
}
| Python | 0.000002 |
a0b1948261555b724e9c72558a7ca18d793f4748 | Support Ticket - In response to fix | erpnext/support/doctype/support_ticket/support_ticket.py | erpnext/support/doctype/support_ticket/support_ticket.py | import webnotes
from webnotes.model.doc import make_autoname
from utilities.transaction_base import TransactionBase
from home import update_feed
class DocType(TransactionBase):
def __init__(self, doc, doclist=[]):
self.doc = doc
self.doclist = doclist
def autoname(self):
self.doc.name = make_autoname(self.doc.naming_series+'.#####')
def send_response(self):
"""
Adds a new response to the ticket and sends an email to the sender
"""
if not self.doc.new_response:
webnotes.msgprint("Please write something as a response", raise_exception=1)
subject = '[' + self.doc.name + '] ' + self.doc.subject
response = self.doc.new_response + '\n\n[Please do not change the subject while responding.]'
# add last response to new response
response += self.last_response()
signature = webnotes.conn.get_value('Email Settings',None,'support_signature')
if signature:
response += '\n\n' + signature
from webnotes.utils.email_lib import sendmail
sendmail(\
recipients = [self.doc.raised_by], \
sender=webnotes.conn.get_value('Email Settings',None,'support_email'), \
subject=subject, \
msg=response)
self.doc.new_response = None
webnotes.conn.set(self.doc,'status','Waiting for Customer')
self.make_response_record(response)
def last_response(self):
"""return last response"""
tmp = webnotes.conn.sql("""select mail from `tabSupport Ticket Response`
where parent = %s order by creation desc limit 1
""", self.doc.name)
if not tmp:
tmp = webnotes.conn.sql("""
SELECT description from `tabSupport Ticket`
where name = %s
""", self.doc.name)
return '\n\n=== In response to ===\n\n' + tmp[0][0]
def make_response_record(self, response, from_email = None, content_type='text/plain'):
"""
Creates a new Support Ticket Response record
"""
# add to Support Ticket Response
from webnotes.model.doc import Document
d = Document('Support Ticket Response')
d.from_email = from_email or webnotes.user.name
d.parent = self.doc.name
d.mail = response
d.content_type = content_type
d.save(1)
def close_ticket(self):
webnotes.conn.set(self.doc,'status','Closed')
update_feed(self.doc)
def reopen_ticket(self):
webnotes.conn.set(self.doc,'status','Open')
update_feed(self.doc)
| import webnotes
from webnotes.model.doc import make_autoname
from utilities.transaction_base import TransactionBase
from home import update_feed
class DocType(TransactionBase):
def __init__(self, doc, doclist=[]):
self.doc = doc
self.doclist = doclist
def autoname(self):
self.doc.name = make_autoname(self.doc.naming_series+'.#####')
def send_response(self):
"""
Adds a new response to the ticket and sends an email to the sender
"""
if not self.doc.new_response:
webnotes.msgprint("Please write something as a response", raise_exception=1)
subject = '[' + self.doc.name + '] ' + self.doc.subject
response = self.doc.new_response + '\n\n[Please do not change the subject while responding.]'
# add last response to new response
response += self.last_response()
signature = webnotes.conn.get_value('Email Settings',None,'support_signature')
if signature:
response += '\n\n' + signature
from webnotes.utils.email_lib import sendmail
sendmail(\
recipients = [self.doc.raised_by], \
sender=webnotes.conn.get_value('Email Settings',None,'support_email'), \
subject=subject, \
msg=response)
self.doc.new_response = None
webnotes.conn.set(self.doc,'status','Waiting for Customer')
self.make_response_record(response)
def last_response(self):
"""return last response"""
tmp = webnotes.conn.sql("""select mail from `tabSupport Ticket Response`
where parent = %s order by creation desc limit 1
""", self.doc.name)
return '\n\n=== In response to ===\n\n' + tmp[0][0]
def make_response_record(self, response, from_email = None, content_type='text/plain'):
"""
Creates a new Support Ticket Response record
"""
# add to Support Ticket Response
from webnotes.model.doc import Document
d = Document('Support Ticket Response')
d.from_email = from_email or webnotes.user.name
d.parent = self.doc.name
d.mail = response
d.content_type = content_type
d.save(1)
def close_ticket(self):
webnotes.conn.set(self.doc,'status','Closed')
update_feed(self.doc)
def reopen_ticket(self):
webnotes.conn.set(self.doc,'status','Open')
update_feed(self.doc)
| Python | 0 |
ab0de2247584f1c86eb15a9c9da254865ebfdfc0 | Create artifact directory when not created. | tubular/scripts/find_and_advance_pipeline.py | tubular/scripts/find_and_advance_pipeline.py | #! /usr/bin/env python3
"""
Command-line script to find the next release pipeline to advance
and then advance it by triggering the manual stage.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from os import path
import os
import sys
import logging
import yaml
from dateutil import parser
import click
# Add top-level module path to sys.path before importing tubular code.
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from tubular.gocd_api import GoCDAPI # pylint: disable=wrong-import-position
from tubular.hipchat import submit_hipchat_message # pylint: disable=wrong-import-position
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
LOG = logging.getLogger(__name__)
@click.command()
@click.option(
'--gocd_user',
help=u"Username to use in logging into GoCD.",
required=True,
)
@click.option(
'--gocd_password',
help=u"Password to use in logging into GoCD.",
required=True,
)
@click.option(
'--gocd_url',
help=u"URL to use in logging into GoCD.",
required=True,
)
@click.option(
'--hipchat_token',
help=u"HipChat token which authorizes message sending. (optional)",
)
@click.option(
'--hipchat_channel',
multiple=True,
help=u"HipChat channel which to send the message. (optional)",
)
@click.option(
'--pipeline',
help=u"Name of the pipeline to advance.",
required=True,
)
@click.option(
'--stage',
help=u"Name of the pipeline's stage to advance.",
required=True,
)
@click.option(
'--relative_dt',
help=u"Datetime used when determining current release date in ISO 8601 format, YYYY-MM-DDTHH:MM:SS+HH:MM",
)
@click.option(
'--out_file',
help=u"File location in which to write CI test status info.",
type=click.File(mode='w', lazy=True),
default=sys.stdout
)
def find_and_advance_pipeline(
gocd_user, gocd_password, gocd_url, hipchat_token, hipchat_channel, pipeline, stage, relative_dt, out_file
):
"""
Find the GoCD advancement pipeline that should be advanced/deployed to production - and advance it.
"""
gocd = GoCDAPI(gocd_user, gocd_password, gocd_url)
# If a datetime string was passed-in, convert it to a datetime.
if relative_dt:
relative_dt = parser.parse(relative_dt)
pipeline_to_advance = gocd.fetch_pipeline_to_advance(pipeline, stage, relative_dt)
gocd.approve_stage(
pipeline_to_advance.name,
pipeline_to_advance.counter,
stage
)
advance_info = {
'name': pipeline_to_advance.name,
'counter': pipeline_to_advance.counter,
'stage': stage,
'url': pipeline_to_advance.url
}
LOG.info('Successfully advanced this pipeline: %s', advance_info)
dirname = os.path.dirname(out_file.name)
if dirname:
os.makedirs(dirname, exist_ok=True)
yaml.safe_dump(advance_info, stream=out_file)
if hipchat_token:
submit_hipchat_message(
hipchat_token,
hipchat_channel,
'PROD DEPLOY: Pipeline was advanced: {}'.format(pipeline_to_advance.url),
"green"
)
if __name__ == "__main__":
# pylint: disable=no-value-for-parameter
find_and_advance_pipeline()
| #! /usr/bin/env python3
"""
Command-line script to find the next release pipeline to advance
and then advance it by triggering the manual stage.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
from os import path
import sys
import logging
import yaml
from dateutil import parser
import click
# Add top-level module path to sys.path before importing tubular code.
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from tubular.gocd_api import GoCDAPI # pylint: disable=wrong-import-position
from tubular.hipchat import submit_hipchat_message # pylint: disable=wrong-import-position
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
LOG = logging.getLogger(__name__)
@click.command()
@click.option(
'--gocd_user',
help=u"Username to use in logging into GoCD.",
required=True,
)
@click.option(
'--gocd_password',
help=u"Password to use in logging into GoCD.",
required=True,
)
@click.option(
'--gocd_url',
help=u"URL to use in logging into GoCD.",
required=True,
)
@click.option(
'--hipchat_token',
help=u"HipChat token which authorizes message sending. (optional)",
)
@click.option(
'--hipchat_channel',
multiple=True,
help=u"HipChat channel which to send the message. (optional)",
)
@click.option(
'--pipeline',
help=u"Name of the pipeline to advance.",
required=True,
)
@click.option(
'--stage',
help=u"Name of the pipeline's stage to advance.",
required=True,
)
@click.option(
'--relative_dt',
help=u"Datetime used when determining current release date in ISO 8601 format, YYYY-MM-DDTHH:MM:SS+HH:MM",
)
@click.option(
'--out_file',
help=u"File location to which to write pipeline advancement information.",
type=click.File(mode='w'),
default=sys.stdout,
)
def find_and_advance_pipeline(
gocd_user, gocd_password, gocd_url, hipchat_token, hipchat_channel, pipeline, stage, relative_dt, out_file
):
"""
Find the GoCD advancement pipeline that should be advanced/deployed to production - and advance it.
"""
gocd = GoCDAPI(gocd_user, gocd_password, gocd_url)
# If a datetime string was passed-in, convert it to a datetime.
if relative_dt:
relative_dt = parser.parse(relative_dt)
pipeline_to_advance = gocd.fetch_pipeline_to_advance(pipeline, stage, relative_dt)
gocd.approve_stage(
pipeline_to_advance.name,
pipeline_to_advance.counter,
stage
)
advance_info = {
'name': pipeline_to_advance.name,
'counter': pipeline_to_advance.counter,
'stage': stage,
'url': pipeline_to_advance.url
}
LOG.info('Successfully advanced this pipeline: %s', advance_info)
yaml.safe_dump(advance_info, stream=out_file)
if hipchat_token:
submit_hipchat_message(
hipchat_token,
hipchat_channel,
'PROD DEPLOY: Pipeline was advanced: {}'.format(pipeline_to_advance.url),
"green"
)
if __name__ == "__main__":
# pylint: disable=no-value-for-parameter
find_and_advance_pipeline()
| Python | 0 |
30e44e48bacb1403d4df96df0654bdade324ec3e | Add test for `get_current_shift` | clock/shifts/tests/test_utils.py | clock/shifts/tests/test_utils.py | """Tests for the shift utilities."""
from test_plus import TestCase
from clock.shifts.factories import ShiftFactory, UserFactory
from clock.shifts.models import Shift
from clock.shifts.utils import get_current_shift, get_last_shifts
from clock.contracts.models import Contract
class TestUtils(TestCase):
"""Test the functionality of the shift utilities."""
def setUp(self):
self.user = self.make_user()
self.contract1 = Contract.objects.create(
employee=self.user, department='Test department', hours='50')
def test_get_last_shifts(self):
employee = UserFactory()
# Function returns `None` when user has no shifts yet
no_shifts = get_last_shifts(employee)
self.assertIsNone(no_shifts)
# Function returns the last 5 shifts per default
shifts = ShiftFactory.create_batch(10, employee=employee)
five_shifts = get_last_shifts(employee)
self.assertEqual(len(five_shifts), 5)
self.assertIsInstance(five_shifts[0], Shift)
self.assertEqual(five_shifts[0].employee, employee)
# Assert we get the correct order, with the latest finished shift first.
for i, shift in enumerate(five_shifts):
try:
self.assertTrue(five_shifts[i].shift_finished >
five_shifts[i + 1].shift_finished)
except IndexError:
pass
# Return seven shifts
seven_shifts = get_last_shifts(employee, count=7)
self.assertEqual(len(seven_shifts), 7)
# Return the maximum number of shifts, even if more are requested
eleven_shifts = get_last_shifts(employee, count=11)
self.assertEqual(len(eleven_shifts), 10)
# Make sure we only retrieve finished shifts
for shift in eleven_shifts:
self.assertIsNotNone(shift.shift_finished)
def test_retrieve_current_running_shift(self):
"""Test that we can retrieve the currently running shift."""
no_shift = get_current_shift(self.user)
self.assertIsNone(no_shift)
with self.login(username=self.user.username, password='password'):
response = self.post(
'shift:quick_action', data={
'_start': True,
}, follow=True)
last_shift = get_current_shift(self.user)
self.assertIsNotNone(last_shift)
self.assertIsNone(last_shift.shift_finished, '')
| """Tests for the shift utilities."""
from test_plus import TestCase
from clock.shifts.factories import UserFactory, ShiftFactory
from clock.shifts.models import Shift
from clock.shifts.utils import get_last_shifts
class TestUtils(TestCase):
"""Test the functionality of the shift utilities."""
def test_get_last_shifts(self):
employee = UserFactory()
# Function returns `None` when user has no shifts yet
no_shifts = get_last_shifts(employee)
self.assertIsNone(no_shifts)
# Function returns the last 5 shifts per default
shifts = ShiftFactory.create_batch(10, employee=employee)
five_shifts = get_last_shifts(employee)
self.assertEqual(len(five_shifts), 5)
self.assertIsInstance(five_shifts[0], Shift)
self.assertEqual(five_shifts[0].employee, employee)
# Assert we get the correct order, with the latest finished shift first.
for i, shift in enumerate(five_shifts):
try:
self.assertTrue(five_shifts[i].shift_finished >
five_shifts[i + 1].shift_finished)
except IndexError:
pass
# Return seven shifts
seven_shifts = get_last_shifts(employee, count=7)
self.assertEqual(len(seven_shifts), 7)
# Return the maximum number of shifts, even if more are requested
eleven_shifts = get_last_shifts(employee, count=11)
self.assertEqual(len(eleven_shifts), 10)
# Make sure we only retrieve finished shifts
for shift in eleven_shifts:
self.assertIsNotNone(shift.shift_finished)
| Python | 0 |
2bbf7bc31b0c7372c143e9d8d062302127ddadd8 | add __version__ package attribute | online_monitor/__init__.py | online_monitor/__init__.py | # http://stackoverflow.com/questions/17583443/what-is-the-correct-way-to-share-package-version-with-setup-py-and-the-package
from pkg_resources import get_distribution
__version__ = get_distribution('online_monitor').version | Python | 0.000077 | |
a3f568a0eaad8209423a2d418ee7e627e614f4ee | Create Gravatar field for API serialization | dockci/api/fields.py | dockci/api/fields.py | """
Flask RESTful fields, and WTForms input validators for validation and
marshaling
"""
import re
from functools import wraps
from flask_restful import fields
from dockci.util import gravatar_url
class RewriteUrl(fields.Url):
"""
Extension of the Flask RESTful Url field that allows you to remap object
fields to different names
"""
def __init__(self,
endpoint=None,
absolute=False,
scheme=None,
rewrites=None):
super(RewriteUrl, self).__init__(endpoint, absolute, scheme)
self.rewrites = rewrites or {}
def output(self, key, obj):
if obj is None:
return None
data = obj.__dict__
for field_set, field_from in self.rewrites.items():
attr_path_data = obj
for attr_path in field_from.split('.'):
if attr_path_data is None:
return None
attr_path_data = getattr(attr_path_data, attr_path)
data[field_set] = attr_path_data
return super(RewriteUrl, self).output(key, data)
class GravatarUrl(fields.String):
"""
Automatically turn an email into a Gravatar URL
>>> from dockci.models.job import Job
>>> field = GravatarUrl()
>>> field.output('git_author_email',
... Job(git_author_email='ricky@spruce.sh'))
'https://s.gravatar.com/avatar/35866d5d838f7aeb9b51a29eda9878e7'
>>> field = GravatarUrl(attr_name='git_author_email')
>>> field.output('different_name',
... Job(git_author_email='ricky@spruce.sh'))
'https://s.gravatar.com/avatar/35866d5d838f7aeb9b51a29eda9878e7'
"""
def __init__(self, attr_name=None):
self.attr_name = attr_name
def output(self, key, obj):
if self.attr_name is None:
email = getattr(obj, key)
else:
email = getattr(obj, self.attr_name)
return gravatar_url(email)
class RegexField(fields.String):
""" Output a Python compiled regex as string """
def output(self, key, obj):
regex = getattr(obj, key, None)
if regex is None:
return None
return regex.pattern
class NonBlankInput(object):
""" Don't allow a field to be blank, or None """
def _raise_error(self, name): # pylint:disable=no-self-use
""" Central place to handle invalid input """
raise ValueError("The '%s' parameter can not be blank" % name)
def __call__(self, value, name):
if value is None:
self._raise_error(name)
try:
if value.strip() == '':
self._raise_error(name)
except AttributeError:
pass
return value
class RegexInput(object):
""" Validate a RegEx """
def __call__(self, value, name): # pylint:disable=no-self-use
try:
return re.compile(value)
except re.error as ex:
raise ValueError(str(ex))
def strip(field_type):
""" Decorator to strip whitespace on input values before parsing """
@wraps(field_type)
def inner(value, name):
""" Strip whitespace, pass to input field type """
try:
value = value.strip()
except AttributeError:
pass
return field_type(value, name)
return inner
| """
Flask RESTful fields, and WTForms input validators for validation and
marshaling
"""
import re
from functools import wraps
from flask_restful import fields
class RewriteUrl(fields.Url):
"""
Extension of the Flask RESTful Url field that allows you to remap object
fields to different names
"""
def __init__(self,
endpoint=None,
absolute=False,
scheme=None,
rewrites=None):
super(RewriteUrl, self).__init__(endpoint, absolute, scheme)
self.rewrites = rewrites or {}
def output(self, key, obj):
if obj is None:
return None
data = obj.__dict__
for field_set, field_from in self.rewrites.items():
attr_path_data = obj
for attr_path in field_from.split('.'):
if attr_path_data is None:
return None
attr_path_data = getattr(attr_path_data, attr_path)
data[field_set] = attr_path_data
return super(RewriteUrl, self).output(key, data)
class RegexField(fields.String):
""" Output a Python compiled regex as string """
def output(self, key, obj):
regex = getattr(obj, key, None)
if regex is None:
return None
return regex.pattern
class NonBlankInput(object):
""" Don't allow a field to be blank, or None """
def _raise_error(self, name): # pylint:disable=no-self-use
""" Central place to handle invalid input """
raise ValueError("The '%s' parameter can not be blank" % name)
def __call__(self, value, name):
if value is None:
self._raise_error(name)
try:
if value.strip() == '':
self._raise_error(name)
except AttributeError:
pass
return value
class RegexInput(object):
""" Validate a RegEx """
def __call__(self, value, name): # pylint:disable=no-self-use
try:
return re.compile(value)
except re.error as ex:
raise ValueError(str(ex))
def strip(field_type):
""" Decorator to strip whitespace on input values before parsing """
@wraps(field_type)
def inner(value, name):
""" Strip whitespace, pass to input field type """
try:
value = value.strip()
except AttributeError:
pass
return field_type(value, name)
return inner
| Python | 0 |
1bc4c7ff0ecd5df9a1874c1f9930e33268c9524d | fix AddonMan | app/py/cuda_addonman/work_cudatext_updates__fosshub.py | app/py/cuda_addonman/work_cudatext_updates__fosshub.py | import sys
import os
import re
import platform
import tempfile
import webbrowser
import cudatext as app
from .work_remote import *
p = sys.platform
X64 = platform.architecture()[0]=='64bit'
DOWNLOAD_PAGE = 'https://www.fosshub.com/CudaText.html'
TEXT_CPU = 'x64' if X64 else 'x32'
REGEX_GROUP_VER = 1
DOWNLOAD_REGEX = ' href="(https://.+?=cudatext-win-'+TEXT_CPU+'-(.+?)\.zip)"'
def versions_ordered(s1, s2):
"""
compare "1.10.0" and "1.9.0" correctly
"""
n1 = list(map(int, s1.split('.')))
n2 = list(map(int, s2.split('.')))
return n1<=n2
def check_cudatext():
if os.name!='nt':
return
fn = os.path.join(tempfile.gettempdir(), 'cudatext_download.html')
app.msg_status('Downloading: '+DOWNLOAD_PAGE, True)
get_url(DOWNLOAD_PAGE, fn, True)
app.msg_status('')
if not os.path.isfile(fn):
app.msg_status('Cannot download: '+DOWNLOAD_PAGE)
return
text = open(fn, encoding='utf8').read()
items = re.findall(DOWNLOAD_REGEX, text)
if not items:
app.msg_status('Cannot find download links')
return
items = sorted(items, reverse=True)
print('Found links:')
for i in items:
print(' '+i[0])
url = items[0][0]
ver_inet = items[0][REGEX_GROUP_VER]
ver_local = app.app_exe_version()
if versions_ordered(ver_inet, ver_local):
app.msg_box('Latest CudaText is already here.\nLocal: %s\nInternet: %s'
%(ver_local, ver_inet), app.MB_OK+app.MB_ICONINFO)
return
if app.msg_box('CudaText update is available.\nLocal: %s\nInternet: %s\n\nOpen download URL in browser?'
%(ver_local, ver_inet), app.MB_YESNO+app.MB_ICONINFO) == app.ID_YES:
webbrowser.open_new_tab(url)
print('Opened download URL')
| import sys
import os
import re
import platform
import tempfile
import webbrowser
import cudatext as app
from .work_remote import *
p = sys.platform
X64 = platform.architecture()[0]=='64bit'
DOWNLOAD_PAGE = 'https://www.fosshub.com/CudaText.html'
TEXT_CPU = 'x64' if X64 else 'x32'
REGEX_GROUP_VER = 1
DOWNLOAD_REGEX = ' href="(https://.+?=cudatext-win-'+TEXT_CPU+'-(.+?)\.zip)"'
def versions_ordered(s1, s2):
"""
compare "1.10.0" and "1.9.0" correctly
"""
n1 = list(map(int, s1.split('.')))
n2 = list(map(int, s2.split('.')))
return n1<=n2
def check_cudatext():
fn = os.path.join(tempfile.gettempdir(), 'cudatext_download.html')
app.msg_status('Downloading: '+DOWNLOAD_PAGE, True)
get_url(DOWNLOAD_PAGE, fn, True)
app.msg_status('')
if not os.path.isfile(fn):
app.msg_status('Cannot download: '+DOWNLOAD_PAGE)
return
text = open(fn, encoding='utf8').read()
items = re.findall(DOWNLOAD_REGEX, text)
if not items:
app.msg_status('Cannot find download links')
return
items = sorted(items, reverse=True)
print('Found links:')
for i in items:
print(' '+i[0])
url = items[0][0]
ver_inet = items[0][REGEX_GROUP_VER]
ver_local = app.app_exe_version()
if versions_ordered(ver_inet, ver_local):
app.msg_box('Latest CudaText is already here.\nLocal: %s\nInternet: %s'
%(ver_local, ver_inet), app.MB_OK+app.MB_ICONINFO)
return
if app.msg_box('CudaText update is available.\nLocal: %s\nInternet: %s\n\nOpen download URL in browser?'
%(ver_local, ver_inet), app.MB_YESNO+app.MB_ICONINFO) == app.ID_YES:
webbrowser.open_new_tab(url)
print('Opened download URL')
| Python | 0.000001 |
34d9375de23384b3a5a777f802e93973ef7c4e60 | Fix the ARC test case. | MDTraj/tests/test_arc.py | MDTraj/tests/test_arc.py | ##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Lee-Ping Wang
# Contributors: Robert McGibbon
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import tempfile, os
import numpy as np
import mdtraj as md
from mdtraj.formats import ArcTrajectoryFile, arc
from mdtraj.formats import PDBTrajectoryFile
from mdtraj.testing import get_fn, eq, DocStringFormatTester
TestDocstrings = DocStringFormatTester(arc, error_on_none=True)
fd, temp = tempfile.mkstemp(suffix='.arc')
def teardown_module(module):
"""remove the temporary file created by tests in this file
this gets automatically called by nose"""
os.close(fd)
os.unlink(temp)
def test_read_0():
with ArcTrajectoryFile(get_fn('4waters.arc')) as f:
xyz, leng, ang = f.read()
with PDBTrajectoryFile(get_fn('4waters.pdb')) as f:
xyz2 = f.positions
eq(xyz, xyz2, decimal=3)
| ##############################################################################
# MDTraj: A Python Library for Loading, Saving, and Manipulating
# Molecular Dynamics Trajectories.
# Copyright 2012-2013 Stanford University and the Authors
#
# Authors: Lee-Ping Wang
# Contributors: Robert McGibbon
#
# MDTraj is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 2.1
# of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with MDTraj. If not, see <http://www.gnu.org/licenses/>.
##############################################################################
import tempfile, os
import numpy as np
import mdtraj as md
from mdtraj.formats import ArcTrajectoryFile, arc
from mdtraj.formats import PDBTrajectoryFile
from mdtraj.testing import get_fn, eq, DocStringFormatTester
TestDocstrings = DocStringFormatTester(arc, error_on_none=True)
fd, temp = tempfile.mkstemp(suffix='.arc')
def teardown_module(module):
"""remove the temporary file created by tests in this file
this gets automatically called by nose"""
os.close(fd)
os.unlink(temp)
def test_read_0():
with ArcTrajectoryFile(get_fn('4waters.arc')) as f:
xyz = f.read()
with PDBTrajectoryFile(get_fn('4waters.pdb')) as f:
xyz2 = f.positions
eq(xyz, xyz2, decimal=3)
| Python | 0.000058 |
aec8191bb4ae782c9b7570fff2fc4b10b4a68bb6 | Update docstring on top of migration | scripts/migration/migrate_root_and_parent_on_node.py | scripts/migration/migrate_root_and_parent_on_node.py | """
This will add a parent field and a parent_node field to all nodes.
Root will be the primary key of the originating parent node.
Parent_node will be the first primary parent
Done so that you can filter on both root nodes and parent nodes with a DB query
"""
import sys
import logging
from modularodm import Q
from website import models
from website.app import init_app
from scripts import utils as script_utils
from framework.transactions.context import TokuTransaction
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def do_migration():
init_app(routes=False)
logger.warn('ultimate_parent field will be added to all nodes.')
all_undeleted_nodes = models.Node.find(Q('is_deleted', 'eq', False))
all_undeleted_nodes_count = all_undeleted_nodes.count()
touched_counter = 0
logger.info('There are {} total nodes'.format(all_undeleted_nodes_count))
for node in all_undeleted_nodes:
if not getattr(node, '_parent_node', None):
touched_counter += 1
node.save()
children = [child for child in node.get_descendants_recursive(include=lambda n: n.primary and not n.is_deleted)]
logger.info(
'{}/{}: touched node {} with children {}'.format(
touched_counter,
all_undeleted_nodes_count,
node._id,
[child._id for child in children]
)
)
assert node.root._id == node._id
assert not getattr(node, 'parent_node', None)
logger.info('Parent Node Saving: Saved Node {} with root {}'.format(node._id, node.root))
for child in children:
touched_counter += 1
logger.info(
'{}/{}: touched node {} with parent {}'.format(
touched_counter,
all_undeleted_nodes_count,
child._id,
child.parent_id
)
)
child.save()
logger.info('The child root id is {} and the node id is {}'.format(child.root._id, node._id))
logger.info('Child Node saved: Verifying that save Node {} with root {}'.format(child._id, child.root))
assert child.parent_node._id == child.parent_id
assert child.root._id == node._id
assert all_undeleted_nodes_count == touched_counter
def main(dry=True):
init_app(set_backends=True, routes=False) # Sets the storage backends on all models
with TokuTransaction():
do_migration()
if dry:
raise Exception('Abort Transaction - Dry Run')
if __name__ == '__main__':
dry = 'dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
main(dry=dry)
| """
This will add an ultimate_parent field to all nodes.
Ultimate_parent will be the primary key of the originating parent node
"""
import sys
import logging
from modularodm import Q
from website import models
from website.app import init_app
from scripts import utils as script_utils
from framework.transactions.context import TokuTransaction
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def do_migration():
init_app(routes=False)
logger.warn('ultimate_parent field will be added to all nodes.')
all_undeleted_nodes = models.Node.find(Q('is_deleted', 'eq', False))
all_undeleted_nodes_count = all_undeleted_nodes.count()
touched_counter = 0
logger.info('There are {} total nodes'.format(all_undeleted_nodes_count))
for node in all_undeleted_nodes:
if not getattr(node, '_parent_node', None):
touched_counter += 1
node.save()
children = [child for child in node.get_descendants_recursive(include=lambda n: n.primary and not n.is_deleted)]
logger.info(
'{}/{}: touched node {} with children {}'.format(
touched_counter,
all_undeleted_nodes_count,
node._id,
[child._id for child in children]
)
)
assert node.root._id == node._id
assert not getattr(node, 'parent_node', None)
logger.info('Parent Node Saving: Saved Node {} with root {}'.format(node._id, node.root))
for child in children:
touched_counter += 1
logger.info(
'{}/{}: touched node {} with parent {}'.format(
touched_counter,
all_undeleted_nodes_count,
child._id,
child.parent_id
)
)
child.save()
logger.info('The child root id is {} and the node id is {}'.format(child.root._id, node._id))
logger.info('Child Node saved: Verifying that save Node {} with root {}'.format(child._id, child.root))
assert child.parent_node._id == child.parent_id
assert child.root._id == node._id
assert all_undeleted_nodes_count == touched_counter
def main(dry=True):
init_app(set_backends=True, routes=False) # Sets the storage backends on all models
with TokuTransaction():
do_migration()
if dry:
raise Exception('Abort Transaction - Dry Run')
if __name__ == '__main__':
dry = 'dry' in sys.argv
if not dry:
script_utils.add_file_logger(logger, __file__)
main(dry=dry)
| Python | 0 |
d35604f7cdef01f9cf39171bf6c1551e314231ae | remove chunk | openprocurement/tender/openeu/traversal.py | openprocurement/tender/openeu/traversal.py | # -*- coding: utf-8 -*-
from openprocurement.api.traversal import Root, get_item
def qualifications_factory(request):
request.validated['tender_src'] = {}
root = Root(request)
if not request.matchdict or not request.matchdict.get('tender_id'):
return root
request.validated['tender_id'] = request.matchdict['tender_id']
tender = request.tender
tender.__parent__ = root
request.validated['tender'] = tender
request.validated['tender_status'] = tender.status
if request.method != 'GET':
request.validated['tender_src'] = tender.serialize('plain')
if request.matchdict.get('qualification_id'):
qualification = get_item(tender, 'qualification', request)
if request.matchdict.get('document_id'):
return get_item(qualification, 'document', request)
else:
return qualification
request.validated['id'] = request.matchdict['tender_id']
return tender
def get_document(parent, key, request):
request.validated['document_id'] = request.matchdict['document_id']
attr = key.split('_')
attr = attr[0] + attr[1].capitalize() + 's'
items = [i for i in getattr(parent, attr, []) if i.id == request.matchdict['document_id']]
if not items:
from openprocurement.api.utils import error_handler
request.errors.add('url', 'document_id', 'Not Found')
request.errors.status = 404
raise error_handler(request.errors)
else:
if 'document' in key:
request.validated['documents'] = items
item = items[-1]
request.validated['document'] = item
request.validated['id'] = request.matchdict['document_id']
item.__parent__ = parent
return item
def handle_root(request):
request.validated['tender_src'] = {}
root = Root(request)
if not request.matchdict or not request.matchdict.get('tender_id'):
return root
request.validated['tender_id'] = request.matchdict['tender_id']
tender = request.tender
tender.__parent__ = root
request.validated['tender'] = tender
request.validated['tender_status'] = tender.status
if request.method != 'GET':
request.validated['tender_src'] = tender.serialize('plain')
def bid_financial_documents_factory(request):
response = handle_root(request)
if response:
return response
tender = request.validated['tender']
if request.matchdict.get('bid_id'):
bid = get_item(tender, 'bid', request)
if request.matchdict.get('document_id'):
return get_document(bid, 'financial_document', request)
else:
return bid
def bid_eligibility_documents_factory(request):
response = handle_root(request)
if response:
return response
tender = request.validated['tender']
if request.matchdict.get('bid_id'):
bid = get_item(tender, 'bid', request)
if request.matchdict.get('document_id'):
return get_document(bid, 'eligibility_document', request)
else:
return bid
def bid_qualification_documents_factory(request):
response = handle_root(request)
if response:
return response
tender = request.validated['tender']
if request.matchdict.get('bid_id'):
bid = get_item(tender, 'bid', request)
if request.matchdict.get('document_id'):
return get_document(bid, 'qualification_document', request)
else:
return bid
| # -*- coding: utf-8 -*-
from openprocurement.api.traversal import Root, get_item
def qualifications_factory(request):
request.validated['tender_src'] = {}
root = Root(request)
if not request.matchdict or not request.matchdict.get('tender_id'):
return root
request.validated['tender_id'] = request.matchdict['tender_id']
tender = request.tender
tender.__parent__ = root
request.validated['tender'] = tender
request.validated['tender_status'] = tender.status
if request.method != 'GET':
request.validated['tender_src'] = tender.serialize('plain')
if request.matchdict.get('qualification_id'):
qualification = get_item(tender, 'qualification', request)
if request.matchdict.get('document_id'):
return get_item(qualification, 'document', request)
else:
return qualification
request.validated['id'] = request.matchdict['tender_id']
return tender
def get_document(parent, key, request):
request.validated['document_id'] = request.matchdict['document_id']
request.validated['{}_id'.format(key)] = request.matchdict['document_id'] # TODO
attr = key.split('_')
attr = attr[0] + attr[1].capitalize() + 's'
print "parent container ", attr
items = [i for i in getattr(parent, attr, []) if i.id == request.matchdict['document_id']]
if not items:
from openprocurement.api.utils import error_handler
request.errors.add('url', '{}_id'.format(key), 'Not Found')
request.errors.status = 404
raise error_handler(request.errors)
else:
if 'document' in key:
request.validated['{}s'.format(key)] = items # TODO
request.validated['documents'] = items
item = items[-1]
request.validated[key] = item # TODO
request.validated['document'] = item
request.validated['id'] = request.matchdict['document_id']
item.__parent__ = parent
return item
def handle_root(request):
request.validated['tender_src'] = {}
root = Root(request)
if not request.matchdict or not request.matchdict.get('tender_id'):
return root
request.validated['tender_id'] = request.matchdict['tender_id']
tender = request.tender
tender.__parent__ = root
request.validated['tender'] = tender
request.validated['tender_status'] = tender.status
if request.method != 'GET':
request.validated['tender_src'] = tender.serialize('plain')
def bid_financial_documents_factory(request):
response = handle_root(request)
if response:
return response
tender = request.validated['tender']
if request.matchdict.get('bid_id'):
bid = get_item(tender, 'bid', request)
if request.matchdict.get('document_id'):
return get_document(bid, 'financial_document', request)
else:
return bid # should never happen for documents resource
def bid_eligibility_documents_factory(request):
response = handle_root(request)
if response:
return response
tender = request.validated['tender']
if request.matchdict.get('bid_id'):
bid = get_item(tender, 'bid', request)
if request.matchdict.get('document_id'):
return get_document(bid, 'eligibility_document', request)
else:
return bid
def bid_qualification_documents_factory(request):
response = handle_root(request)
if response:
return response
tender = request.validated['tender']
if request.matchdict.get('bid_id'):
bid = get_item(tender, 'bid', request)
if request.matchdict.get('document_id'):
return get_document(bid, 'qualification_document', request)
else:
return bid
| Python | 0.000075 |
863d0d28fb26007c448610a845caab39b1451326 | Add comparison with TCE output in CCD example | docs/examples/ccd.py | docs/examples/ccd.py | """Automatic derivation of CCD equations.
"""
import urllib.request
from pyspark import SparkConf, SparkContext
from sympy import IndexedBase, Rational
from drudge import PartHoleDrudge, CR, AN
conf = SparkConf().setAppName('CCSD-derivation')
ctx = SparkContext(conf=conf)
dr = PartHoleDrudge(ctx)
p = dr.names
c_ = dr.op[AN]
c_dag = dr.op[CR]
a, b = p.V_dumms[:2]
i, j = p.O_dumms[:2]
t = IndexedBase('t')
dr.set_dbbar_base(t, 2)
doubles = dr.sum(
(a, p.V), (b, p.V), (i, p.O), (j, p.O),
Rational(1, 4) * t[a, b, i, j] * c_dag[a] * c_dag[b] * c_[j] * c_[i]
)
curr = dr.ham
h_bar = dr.ham
for order in range(0, 4):
curr = (curr | doubles).simplify() * Rational(1, order + 1)
h_bar += curr
en_eqn = dr.eval_fermi_vev(h_bar).simplify()
proj = c_dag[i] * c_dag[j] * c_[b] * c_[a]
t2_eqn = dr.eval_fermi_vev(proj * h_bar).simplify()
# Check with the result from TCE.
TCE_BASE_URL = 'http://www.scs.illinois.edu/~sohirata/'
tce_res = [
dr.parse_tce(
urllib.request.urlopen(TCE_BASE_URL + i).read().decode(),
{2: t}
).simplify()
for i in ['ccd_e.out', 'ccd_t2.out']
]
print('Checking with TCE result: ')
print('Energy: ', en_eqn == tce_res[0])
print('T2 amplitude: ', t2_eqn == tce_res[1])
| """Automatic derivation of CCD equations.
"""
import pickle
from pyspark import SparkConf, SparkContext
from sympy import IndexedBase, Rational
from drudge import PartHoleDrudge, CR, AN
conf = SparkConf().setAppName('CCSD-derivation')
ctx = SparkContext(conf=conf)
dr = PartHoleDrudge(ctx)
p = dr.names
c_ = dr.op[AN]
c_dag = dr.op[CR]
a, b = p.V_dumms[:2]
i, j = p.O_dumms[:2]
t = IndexedBase('t')
dr.set_dbbar_base(t, 2)
doubles = dr.sum(
(a, p.V), (b, p.V), (i, p.O), (j, p.O),
t[a, b, i, j] * c_dag[a] * c_dag[b] * c_[j] * c_[i]
)
curr = dr.ham
h_bar = dr.ham
for i in range(0, 4):
curr = (curr | doubles).simplify() * Rational(1, i + 1)
h_bar += curr
en_eqn = dr.eval_fermi_vev(h_bar)
proj = c_dag[i] * c_dag[j] * c_[b] * c_[a]
t2_eqn = dr.eval_fermi_vev(proj * h_bar)
with open('ccd_eqns.pickle') as fp:
pickle.dump([en_eqn, t2_eqn], fp)
| Python | 0 |
1fd6fdbdd7c0cf3764fa0707692346675273a764 | allow underscores before quality suffix | mp4mark.py | mp4mark.py | #!/usr/bin/env python2
import os
import sys
import re
import glob
from subprocess import call, Popen, PIPE
files = []
for x in sys.argv[1:]:
files += glob.glob(x) or ([x] if os.path.exists(x) else [])
#import pdb; pdb.set_trace()
base = None
for vid in files:
m = re.match(r'(.*)[_-]\d+p[_-]ame?\.mp4$', vid)
if not m: continue
base = m.group(1)
if base is None:
base = re.sub(r'(.*)-ame\.mp4$', r'\1', files[0])
json = base + "-chapters.json"
ffmeta = base + "-chapters.ffmeta"
vtt = base + "-chapters.vtt"
jumplist = base + "-chapters.txt"
mp4select = Popen(['mp4select.py', 'uuid/+16', vid], stdout=PIPE, shell=True)
xmpmarkers = Popen(['xmpmarkers.py', '-'], stdin=mp4select.stdout, stdout=open(json, 'w'), shell=True)
assert xmpmarkers.wait() == 0
call(['ffmeta.py', 'ffmeta', json, ffmeta], shell=True)
call(['ffmeta.py', 'webvtt', json, vtt], shell=True)
call(['ffmeta.py', 'jumplist', json, jumplist], shell=True)
for invid in files:
outvid = invid.replace('-ame', '')
assert not os.path.exists(outvid)
call(['ffmpeg', '-i', invid, '-i', ffmeta, '-c', 'copy', '-movflags', 'faststart', outvid], shell=True)
| #!/usr/bin/env python2
import os
import sys
import re
import glob
from subprocess import call, Popen, PIPE
files = []
for x in sys.argv[1:]:
files += glob.glob(x) or ([x] if os.path.exists(x) else [])
#import pdb; pdb.set_trace()
base = None
for vid in files:
m = re.match(r'(.*)-\d+p-ame?\.mp4$', vid)
if not m: continue
base = m.group(1)
if base is None:
base = re.sub(r'(.*)-ame\.mp4$', r'\1', files[0])
json = base + "-chapters.json"
ffmeta = base + "-chapters.ffmeta"
vtt = base + "-chapters.vtt"
jumplist = base + "-chapters.txt"
mp4select = Popen(['mp4select.py', 'uuid/+16', vid], stdout=PIPE, shell=True)
xmpmarkers = Popen(['xmpmarkers.py', '-'], stdin=mp4select.stdout, stdout=open(json, 'w'), shell=True)
assert xmpmarkers.wait() == 0
call(['ffmeta.py', 'ffmeta', json, ffmeta], shell=True)
call(['ffmeta.py', 'webvtt', json, vtt], shell=True)
call(['ffmeta.py', 'jumplist', json, jumplist], shell=True)
for invid in files:
outvid = invid.replace('-ame', '')
assert not os.path.exists(outvid)
call(['ffmpeg', '-i', invid, '-i', ffmeta, '-c', 'copy', '-movflags', 'faststart', outvid], shell=True)
| Python | 0.000002 |
ed0b5efb77dd8178d6ec63db205dcf1d4e6a3ee0 | fix bug in category view | elephantblog/views.py | elephantblog/views.py | from datetime import date
from django.http import Http404
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _
from feincms.translations import short_language_code
# from tagging.models import Tag, TaggedItem
from django.core.exceptions import FieldError
from feincms.views.generic import list_detail
from models import Entry
import settings
def entry(request, year, month, day, slug, language_code=None, **kwargs):
context={}
entry = get_object_or_404(Entry.objects.select_related(),
published_on__year=year,
published_on__month=month,
published_on__day=day,
slug=slug)
if not entry.isactive() and not request.user.is_authenticated():
raise Http404
else:
extra_contest = {'entry':entry,
'date': date(int(year), int(month),int(day)),
'comments' : settings.BLOG_COMMENTS
}
return render_to_response('blog/entry_detail.html', extra_contest,
context_instance=RequestContext(request))
""" Date views use object_list generic view due to pagination """
""" Define the options in the entry_dict of the url file. Copy the url file into your project. """
def entry_list(request, category=None, year=None, month=None, day=None, page=0,
paginate_by=10, template_name='blog/entry_list.html', limit=None,
language_code=None, exclude=None, **kwargs):
extra_context = {}
if language_code:
queryset = Entry.objects.active().filter(language=language_code)
else:
try:
language_code = request._feincms_page.language
queryset = Entry.objects.active().filter(language=language_code)
except (AttributeError, FieldError):
queryset = Entry.objects.active()
""" You can define a dict of fields and values to exclude. """
if exclude:
queryset = queryset.exclude(**exclude)
if limit:
queryset = queryset[:limit]
if category:
queryset = queryset.filter(categories__translations__slug=category)
extra_context.update({'category': category})
if year:
queryset = queryset.filter(published_on__year=int(year))
extra_context.update({'drilldown_mode': 'year', 'title' : _('entries of the year')})
else:
year=1
if month:
queryset = queryset.filter(published_on__month=int(month))
extra_context.update({'drilldown_mode': 'month', 'title' : _('entries of the month')})
else:
month=1
if day:
queryset = queryset.filter(published_on__day=int(day))
extra_context.update({'drilldown_mode': 'day', 'title' : _('entries of the year')})
else:
day=1
extra_context.update({'date':date(int(year), int(month), int(day)),
'comments' : settings.BLOG_COMMENTS})
return list_detail.object_list(
request,
queryset = queryset,
paginate_by = paginate_by,
page = page,
template_name = template_name,
extra_context = extra_context,
**kwargs)
| from datetime import date
from django.http import Http404
from django.shortcuts import render_to_response, get_object_or_404
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _
from feincms.translations import short_language_code
# from tagging.models import Tag, TaggedItem
from django.core.exceptions import FieldError
from feincms.views.generic import list_detail
from models import Entry
import settings
def entry(request, year, month, day, slug, language_code=None, **kwargs):
context={}
entry = get_object_or_404(Entry.objects.select_related(),
published_on__year=year,
published_on__month=month,
published_on__day=day,
slug=slug)
if not entry.isactive() and not request.user.is_authenticated():
raise Http404
else:
extra_contest = {'entry':entry,
'date': date(int(year), int(month),int(day)),
'comments' : settings.BLOG_COMMENTS
}
return render_to_response('blog/entry_detail.html', extra_contest,
context_instance=RequestContext(request))
""" Date views use object_list generic view due to pagination """
""" Define the options in the entry_dict of the url file. Copy the url file into your project. """
def entry_list(request, category=None, year=None, month=None, day=None, page=0,
paginate_by=10, template_name='blog/entry_list.html', limit=None,
language_code=None, exclude=None, **kwargs):
extra_context = {}
if language_code:
queryset = Entry.objects.active().filter(language=language_code)
else:
try:
language_code = request._feincms_page.language
queryset = Entry.objects.active().filter(language=language_code)
except (AttributeError, FieldError):
queryset = Entry.objects.active()
""" You can define a dict of fields and values to exclude. """
if exclude:
queryset = queryset.exclude(**exclude)
if limit:
queryset = queryset[:limit]
if category:
queryset = queryset.filter(categories__translations__title=category)
extra_context.update({'category': category})
if year:
queryset = queryset.filter(published_on__year=int(year))
extra_context.update({'drilldown_mode': 'year', 'title' : _('entries of the year')})
else:
year=1
if month:
queryset = queryset.filter(published_on__month=int(month))
extra_context.update({'drilldown_mode': 'month', 'title' : _('entries of the month')})
else:
month=1
if day:
queryset = queryset.filter(published_on__day=int(day))
extra_context.update({'drilldown_mode': 'day', 'title' : _('entries of the year')})
else:
day=1
extra_context.update({'date':date(int(year), int(month), int(day)),
'comments' : settings.BLOG_COMMENTS})
return list_detail.object_list(
request,
queryset = queryset,
paginate_by = paginate_by,
page = page,
template_name = template_name,
extra_context = extra_context,
**kwargs)
| Python | 0 |
b487bad4079773d8537cd46f20164af77e7674fb | change TODO on nice-to-have to avoid triggering code climate | callisto/delivery/management/commands/find_matches.py | callisto/delivery/management/commands/find_matches.py | import importlib
from django.core.management.base import BaseCommand
from callisto.delivery.report_delivery import PDFMatchReport
from callisto.delivery.matching import find_matches
class Command(BaseCommand):
help = 'finds matches and sends match reports'
def add_arguments(self, parser):
parser.add_argument('report_class', nargs='?', default=None)
# eventually: add test option that verifies that passed class can be imported & has necessary methods
# https://github.com/SexualHealthInnovations/callisto-core/issues/56
def handle(self, *args, **options):
report_class_name = options['report_class']
if report_class_name:
module_name, class_name = report_class_name.rsplit(".", 1)
ReportClass = getattr(importlib.import_module(module_name), class_name)
else:
ReportClass = PDFMatchReport
find_matches(report_class=ReportClass)
self.stdout.write('Matching run')
| import importlib
from django.core.management.base import BaseCommand
from callisto.delivery.report_delivery import PDFMatchReport
from callisto.delivery.matching import find_matches
class Command(BaseCommand):
help = 'finds matches and sends match reports'
def add_arguments(self, parser):
parser.add_argument('report_class', nargs='?', default=None)
# TODO: add test option that verifies that passed class can be imported & has necessary methods
# https://github.com/SexualHealthInnovations/callisto-core/issues/56
def handle(self, *args, **options):
report_class_name = options['report_class']
if report_class_name:
module_name, class_name = report_class_name.rsplit(".", 1)
ReportClass = getattr(importlib.import_module(module_name), class_name)
else:
ReportClass = PDFMatchReport
find_matches(report_class=ReportClass)
self.stdout.write('Matching run')
| Python | 0 |
b0f4ebf0cd0999debfdec7a6de972666d28eea98 | Update PWM example. | usr/examples/02-Board-Control/pwm_control.py | usr/examples/02-Board-Control/pwm_control.py | # PWM Control Example
#
# This example shows how to do PWM with your OpenMV Cam.
import time
from pyb import Pin, Timer
tim = Timer(4, freq=1000) # Frequency in Hz
# Generate a 1KHz square wave on TIM4 with 50% and 75% duty cycles on channels 1 and 2, respectively.
ch1 = tim.channel(1, Timer.PWM, pin=Pin("P7"), pulse_width_percent=50)
ch2 = tim.channel(2, Timer.PWM, pin=Pin("P8"), pulse_width_percent=75)
while (True):
time.sleep(1000) | # PWM Control Example
#
# This example shows how to do PWM with your OpenMV Cam.
#
# WARNING: PWM control is... not easy with MicroPython. You have to use
# the correct timer with the correct pins and channels. As for what the
# correct values are - who knows. If you need to change the pins from the
# example below please try out different timer/channel/pin configs.
import pyb, time
t2 = pyb.Timer(1, freq=1000)
ch1 = t2.channel(2, pyb.Timer.PWM, pin=pyb.Pin("P0"))
ch2 = t2.channel(3, pyb.Timer.PWM, pin=pyb.Pin("P1"))
while(True):
for i in range(100):
ch1.pulse_width_percent(i)
ch2.pulse_width_percent(100-i)
time.sleep(5)
for i in range(100):
ch1.pulse_width_percent(100-i)
ch2.pulse_width_percent(i)
time.sleep(5)
| Python | 0 |
a52a0fc4589c07439da8194fb6583d46af422bc2 | Fix comment typos in 04-KNN.py (examples/05-vector) | examples/05-vector/04-KNN.py | examples/05-vector/04-KNN.py | from __future__ import print_function
from __future__ import unicode_literals
from builtins import str, bytes, dict, int
from builtins import range
import os, sys; sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
from pattern.web import Twitter
from pattern.en import Sentence, parse
from pattern.search import search
from pattern.vector import Document, Model, KNN
# Classification is a supervised machine learning method,
# where labeled documents are used as training material
# to learn how to label unlabeled documents.
# This example trains a simple classifier with Twitter messages.
# The idea is that, if you have a number of texts with a "type"
# (mail/spam, positive/negative, language, author's age, ...),
# you can predict the type of other "unknown" texts.
# The k-Nearest Neighbor algorithm classifies texts according
# to the k documents that are most similar (cosine similarity) to the given input document.
m = Model()
t = Twitter()
# First, we mine a model of a 1000 tweets.
# We'll use hashtags as type.
for page in range(1, 10):
for tweet in t.search('#win OR #fail', start=page, count=100, cached=True):
# If the tweet contains #win hashtag, we'll set its type to 'WIN':
s = tweet.text.lower() # tweet in lowercase
p = '#win' in s and 'WIN' or 'FAIL' # document labels
s = Sentence(parse(s)) # parse tree with part-of-speech tags
s = search('JJ', s) # adjectives in the tweet
s = [match[0].string for match in s] # adjectives as a list of strings
s = " ".join(s) # adjectives as string
if len(s) > 0:
m.append(Document(s, type=p, stemmer=None))
# Train k-Nearest Neighbor on the model.
# Note that this is only a simple example: to build a robust classifier
# you would need a lot more training data (e.g., tens of thousands of tweets).
# The more training data, the more statistically reliable the classifier becomes.
# The only way to really know if your classifier is working correctly
# is to test it with testing data, see the documentation for Classifier.test().
classifier = KNN(baseline=None) # By default, baseline=MAJORITY
for document in m: # (classify unknown documents with the most frequent type).
classifier.train(document)
# These are the adjectives the classifier has learned:
print(sorted(classifier.features))
print()
# We can now ask it to classify documents containing these words.
# Note that you may get different results than the ones below,
# since you will be mining other (more recent) tweets.
# Again, a robust classifier needs lots and lots of training data.
# If None is returned, the word was not recognized,
# and the classifier returned the default value (see above).
print(classifier.classify('sweet potato burger')) # yields 'WIN'
print(classifier.classify('stupid autocorrect')) # yields 'FAIL'
# "What can I do with it?"
# In the scientific community, classifiers have been used to predict:
# - the opinion (positive/negative) in product reviews on blogs,
# - the age of users posting on social networks,
# - the author of medieval poems,
# - spam in e-mail messages,
# - lies & deception in text,
# - doubt & uncertainty in text,
# and to:
# - improve search engine query results (e.g., where "jeans" queries also yield "denim" results),
# - win at Jeopardy!,
# - win at rock-paper-scissors,
# and so on... | from __future__ import print_function
from __future__ import unicode_literals
from builtins import str, bytes, dict, int
from builtins import range
import os, sys; sys.path.insert(0, os.path.join(os.path.dirname(__file__), "..", ".."))
from pattern.web import Twitter
from pattern.en import Sentence, parse
from pattern.search import search
from pattern.vector import Document, Model, KNN
# Classification is a supervised machine learning method,
# where labeled documents are used as training material
# to learn how to label unlabeled documents.
# This example trains a simple classifier with Twitter messages.
# The idea is that, if you have a number of texts with a "type"
# (mail/spam, positive/negative, language, author's age, ...),
# you can predict the type of other "unknown" texts.
# The k-Nearest Neighbor algorithm classifies texts according
# to the k documents that are most similar (cosine similarity) to the given input document.
m = Model()
t = Twitter()
# First, we mine a model of a 1000 tweets.
# We'll use hashtags as type.
for page in range(1, 10):
for tweet in t.search('#win OR #fail', start=page, count=100, cached=True):
# If the tweet contains #win hashtag, we'll set its type to 'WIN':
s = tweet.text.lower() # tweet in lowercase
p = '#win' in s and 'WIN' or 'FAIL' # document labels
s = Sentence(parse(s)) # parse tree with part-of-speech tags
s = search('JJ', s) # adjectives in the tweet
s = [match[0].string for match in s] # adjectives as a list of strings
s = " ".join(s) # adjectives as string
if len(s) > 0:
m.append(Document(s, type=p, stemmer=None))
# Train k-Nearest Neighbor on the model.
# Note that this is a only simple example: to build a robust classifier
# you would need a lot more training data (e.g., tens of thousands of tweets).
# The more training data, the more statistically reliable the classifier becomes.
# The only way to really know if you're classifier is working correctly
# is to test it with testing data, see the documentation for Classifier.test().
classifier = KNN(baseline=None) # By default, baseline=MAJORITY
for document in m: # (classify unknown documents with the most frequent type).
classifier.train(document)
# These are the adjectives the classifier has learned:
print(sorted(classifier.features))
print()
# We can now ask it to classify documents containing these words.
# Note that you may get different results than the ones below,
# since you will be mining other (more recent) tweets.
# Again, a robust classifier needs lots and lots of training data.
# If None is returned, the word was not recognized,
# and the classifier returned the default value (see above).
print(classifier.classify('sweet potato burger')) # yields 'WIN'
print(classifier.classify('stupid autocorrect')) # yields 'FAIL'
# "What can I do with it?"
# In the scientific community, classifiers have been used to predict:
# - the opinion (positive/negative) in product reviews on blogs,
# - the age of users posting on social networks,
# - the author of medieval poems,
# - spam in e-mail messages,
# - lies & deception in text,
# - doubt & uncertainty in text,
# and to:
# - improve search engine query results (e.g., where "jeans" queries also yield "denim" results),
# - win at Jeopardy!,
# - win at rock-paper-scissors,
# and so on... | Python | 0 |
ada5520cddb065899fca25ec96edb1f2b98bb797 | None -> False | tests/chainer_tests/dataset_tests/tabular_tests/dummy_dataset.py | tests/chainer_tests/dataset_tests/tabular_tests/dummy_dataset.py | import numpy as np
import chainer
from chainer import testing
class DummyDataset(chainer.dataset.TabularDataset):
def __init__(
self, size=10, keys=('a', 'b', 'c'), mode=tuple,
return_array=False, callback=None, convert=False):
if mode is None:
keys = keys[0],
self._keys = keys
self._mode = mode
self._return_array = return_array
self._callback = callback
self._convert = convert
self.data = np.random.uniform(size=(len(keys), size))
def __len__(self):
return self.data.shape[1]
@property
def keys(self):
return self._keys
@property
def mode(self):
return self._mode
def get_examples(self, indices, key_indices):
if self._callback:
self._callback(indices, key_indices)
data = self.data
if indices is not None:
data = data[:, indices]
if key_indices is not None:
data = data[list(key_indices)]
if self._return_array:
return tuple(data)
else:
return tuple(list(d) for d in data)
def convert(self, data):
if self._convert:
return 'converted'
else:
return super(DummyDataset, self).convert(data)
# tests/chainer_tests/test_runnable.py
testing.run_module(__name__, __file__)
| import numpy as np
import chainer
from chainer import testing
class DummyDataset(chainer.dataset.TabularDataset):
def __init__(
self, size=10, keys=('a', 'b', 'c'), mode=tuple,
return_array=False, callback=None, convert=None):
if mode is None:
keys = keys[0],
self._keys = keys
self._mode = mode
self._return_array = return_array
self._callback = callback
self._convert = convert
self.data = np.random.uniform(size=(len(keys), size))
def __len__(self):
return self.data.shape[1]
@property
def keys(self):
return self._keys
@property
def mode(self):
return self._mode
def get_examples(self, indices, key_indices):
if self._callback:
self._callback(indices, key_indices)
data = self.data
if indices is not None:
data = data[:, indices]
if key_indices is not None:
data = data[list(key_indices)]
if self._return_array:
return tuple(data)
else:
return tuple(list(d) for d in data)
def convert(self, data):
if self._convert:
return 'converted'
else:
return super(DummyDataset, self).convert(data)
# tests/chainer_tests/test_runnable.py
testing.run_module(__name__, __file__)
| Python | 0.999988 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.