commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
9358060c648c0ee71498f173dcbf6fc839ba6ff8 | Update expected release date | src/penn_chime/constants.py | src/penn_chime/constants.py | """Constants."""
from datetime import date
"""
This reflects a date from which previously-run reports will no
longer match current results, indicating when users should
re-run their reports
"""
CHANGE_DATE = date(year=2020, month=4, day=8)
VERSION = 'v1.1.3'
DATE_FORMAT = "%b, %d" # see https://strftime.org
DOCS_URL = "https://code-for-philly.gitbook.io/chime"
EPSILON = 1.0e-7
FLOAT_INPUT_MIN = 0.0001
FLOAT_INPUT_STEP = 0.1
| """Constants."""
from datetime import date
"""
This reflects a date from which previously-run reports will no
longer match current results, indicating when users should
re-run their reports
"""
CHANGE_DATE = date(year=2020, month=4, day=6)
VERSION = 'v1.1.3'
DATE_FORMAT = "%b, %d" # see https://strftime.org
DOCS_URL = "https://code-for-philly.gitbook.io/chime"
EPSILON = 1.0e-7
FLOAT_INPUT_MIN = 0.0001
FLOAT_INPUT_STEP = 0.1
| Python | 0 |
da6c8157e688c8c721bd66e5779ce6f550a5a7e2 | remove useless code in PathPayment | stellar_sdk/operation/path_payment.py | stellar_sdk/operation/path_payment.py | import warnings
from decimal import Decimal
from typing import List, Union
from .path_payment_strict_receive import PathPaymentStrictReceive
from ..asset import Asset
class PathPayment(PathPaymentStrictReceive):
"""The :class:`PathPayment` object, which represents a PathPayment
operation on Stellar's network.
Sends an amount in a specific asset to a destination account through a path
of offers. This allows the asset sent (e.g. 450 XLM) to be different from
the asset received (e.g. 6 BTC).
Threshold: Medium
:param destination: The destination account to send to.
:param send_asset: The asset to pay with.
:param send_max: The maximum amount of send_asset to send.
:param dest_asset: The asset the destination will receive.
:param dest_amount: The amount the destination receives.
:param path: A list of Asset objects to use as the path.
:param source: The source account for the payment. Defaults to the
transaction's source account.
"""
def __init__(self, destination: str, send_asset: Asset, send_max: Union[str, Decimal], dest_asset: Asset,
dest_amount: Union[str, Decimal], path: List[Asset], source: str = None) -> None:
warnings.warn(
"Will be removed in version v2.0.0-alpha6, "
"use stellar_sdk.operation.PathPaymentStrictReceive",
DeprecationWarning,
)
super().__init__(destination, send_asset, send_max, dest_asset, dest_amount, path, source)
| import warnings
from decimal import Decimal
from typing import List, Union
from .operation import Operation
from ..asset import Asset
from ..keypair import Keypair
from ..xdr import Xdr
from ..strkey import StrKey
from .utils import check_ed25519_public_key, check_amount
class PathPayment(Operation):
"""The :class:`PathPayment` object, which represents a PathPayment
operation on Stellar's network.
Sends an amount in a specific asset to a destination account through a path
of offers. This allows the asset sent (e.g., 450 XLM) to be different from
the asset received (e.g, 6 BTC).
Threshold: Medium
:param destination: The destination account to send to.
:param send_asset: The asset to pay with.
:param send_max: The maximum amount of send_asset to send.
:param dest_asset: The asset the destination will receive.
:param dest_amount: The amount the destination receives.
:param path: A list of Asset objects to use as the path.
:param source: The source account for the payment. Defaults to the
transaction's source account.
"""
def __init__(
self,
destination: str,
send_asset: Asset,
send_max: Union[str, Decimal],
dest_asset: Asset,
dest_amount: Union[str, Decimal],
path: List[Asset],
source: str = None,
) -> None:
warnings.warn(
"Will be removed in version v2.0.0-alpha6, "
"use stellar_sdk.operation.PathPaymentStrictReceive",
DeprecationWarning,
)
super().__init__(source)
check_ed25519_public_key(destination)
check_amount(send_max)
check_amount(dest_amount)
self.destination = destination
self.send_asset = send_asset
self.send_max = send_max
self.dest_asset = dest_asset
self.dest_amount = dest_amount
self.path = path # a list of paths/assets
@classmethod
def _type_code(cls) -> int:
return -1
def _to_operation_body(self) -> Xdr.nullclass:
destination = Keypair.from_public_key(self.destination).xdr_account_id()
send_asset = self.send_asset.to_xdr_object()
dest_asset = self.dest_asset.to_xdr_object()
path = [asset.to_xdr_object() for asset in self.path]
path_payment_strict_receive_op = Xdr.types.PathPaymentStrictReceiveOp(
send_asset,
Operation.to_xdr_amount(self.send_max),
destination,
dest_asset,
Operation.to_xdr_amount(self.dest_amount),
path,
)
body = Xdr.nullclass()
body.type = Xdr.const.PATH_PAYMENT_STRICT_RECEIVE
body.pathPaymentStrictReceiveOp = path_payment_strict_receive_op
return body
@classmethod
def from_xdr_object(
cls, operation_xdr_object: Xdr.types.Operation
) -> "PathPayment":
"""Creates a :class:`PathPayment` object from an XDR Operation
object.
"""
source = Operation.get_source_from_xdr_obj(operation_xdr_object)
destination = StrKey.encode_ed25519_public_key(
operation_xdr_object.body.pathPaymentStrictReceiveOp.destination.ed25519
)
send_asset = Asset.from_xdr_object(
operation_xdr_object.body.pathPaymentStrictReceiveOp.sendAsset
)
dest_asset = Asset.from_xdr_object(
operation_xdr_object.body.pathPaymentStrictReceiveOp.destAsset
)
send_max = Operation.from_xdr_amount(
operation_xdr_object.body.pathPaymentStrictReceiveOp.sendMax
)
dest_amount = Operation.from_xdr_amount(
operation_xdr_object.body.pathPaymentStrictReceiveOp.destAmount
)
path = []
if operation_xdr_object.body.pathPaymentStrictReceiveOp.path:
for x in operation_xdr_object.body.pathPaymentStrictReceiveOp.path:
path.append(Asset.from_xdr_object(x))
return cls(
source=source,
destination=destination,
send_asset=send_asset,
send_max=send_max,
dest_asset=dest_asset,
dest_amount=dest_amount,
path=path,
)
| Python | 0.000077 |
2de30c0acdbcc2560ee7c9c472df956441cb2bab | use better filterType | nvchecker_source/vsmarketplace.py | nvchecker_source/vsmarketplace.py | # MIT licensed
# Copyright (c) 2013-2021 Th3Whit3Wolf <the.white.wolf.is.1337@gmail.com>, et al.
from nvchecker.api import (
VersionResult, Entry, AsyncCache, KeyManager,
TemporaryError, session, GetVersionError,
)
API_URL = 'https://marketplace.visualstudio.com/_apis/public/gallery/extensionquery'
HEADERS = {
'Accept': 'application/json;api-version=6.1-preview.1',
'Content-Type': 'application/json'
}
async def get_version(name: str, conf: Entry, *, cache: AsyncCache, **kwargs):
name = conf.get('vsmarketplace') or name
q = {
'filters': [
{
'criteria': [
{
'filterType': 8,
'value': 'Microsoft.VisualStudio.Code'
},
{
'filterType': 7,
'value': name
},
{
'filterType': 12,
'value': '4096'
}
],
'pageNumber': 1,
'pageSize': 2,
'sortBy': 0,
'sortOrder': 0
}
],
'assetTypes': [],
'flags': 946
}
res = await session.post(
API_URL,
headers = HEADERS,
json = q,
)
j = res.json()
version = j['results'][0]['extensions'][0]['versions'][0]['version']
return version
| # MIT licensed
# Copyright (c) 2013-2021 Th3Whit3Wolf <the.white.wolf.is.1337@gmail.com>, et al.
from nvchecker.api import (
VersionResult, Entry, AsyncCache, KeyManager,
TemporaryError, session, GetVersionError,
)
API_URL = 'https://marketplace.visualstudio.com/_apis/public/gallery/extensionquery'
HEADERS = {
'Accept': 'application/json;api-version=6.1-preview.1',
'Content-Type': 'application/json'
}
async def get_version(name: str, conf: Entry, *, cache: AsyncCache, **kwargs):
name = conf.get('vsmarketplace') or name
q = {
'filters': [
{
'criteria': [
{
'filterType': 8,
'value': 'Microsoft.VisualStudio.Code'
},
{
'filterType': 10,
'value': name
},
{
'filterType': 12,
'value': '4096'
}
],
'pageNumber': 1,
'pageSize': 2,
'sortBy': 0,
'sortOrder': 0
}
],
'assetTypes': [],
'flags': 946
}
res = await session.post(
API_URL,
headers = HEADERS,
json = q,
)
j = res.json()
version = j['results'][0]['extensions'][0]['versions'][0]['version']
return version | Python | 0 |
355372ff51a84c0a6d7d86c0ef1fb12def341436 | Add the score to Engine.chat return values | invada/engine.py | invada/engine.py | # -*- coding: utf-8 -*-
class Engine:
def __init__(self,
response_pairs,
knowledge={}):
self.response_pairs = response_pairs
self.knowledge = knowledge
def chat(self, user_utterance, context):
best_score = 0
best_response_pair = None
best_captured = {}
for response_pair in self.response_pairs:
captured = response_pair.match(user_utterance, self.knowledge)
if captured is None:
continue
score = response_pair.score(captured, context, self.knowledge)
if best_score < score:
best_score, best_response_pair, best_captured = score, response_pair, captured
response, new_context = best_response_pair.generate(best_captured, context, self.knowledge)
return response, new_context, best_score
| # -*- coding: utf-8 -*-
class Engine:
def __init__(self,
response_pairs,
knowledge={}):
self.response_pairs = response_pairs
self.knowledge = knowledge
def chat(self, user_utterance, context):
best_score = 0
best_response_pair = None
best_captured = {}
for response_pair in self.response_pairs:
captured = response_pair.match(user_utterance, self.knowledge)
if captured is None:
continue
score = response_pair.score(captured, context, self.knowledge)
if best_score < score:
best_score, best_response_pair, best_captured = score, response_pair, captured
return best_response_pair.generate(best_captured, context, self.knowledge)
| Python | 0.000033 |
ef29e402c58751a938cb11cee480ac4f4e31aef5 | Add warning | invoke/config.py | invoke/config.py | from .vendor.etcaetera.config import Config as EtcConfig
from .vendor.etcaetera.adapter import File
class Config(object):
"""
Invoke's primary configuration handling class.
See :doc:`/concepts/configuration` for details on the configuration system
this class implements, including the :ref:`configuration hierarchy
<config-hierarchy>`.
Lightly wraps ``etcaetera.config.Config``, allowing for another level of
configurability (re: which files are loaded and in what order) as well as
convenient access to configuration values, which may be accessed using
dict syntax::
config['foo']
or attribute syntax::
config.foo
.. warning::
Any "real" attributes (methods, etc) on `Config` take precedence over
settings values - so if you e.g. have a top level setting named
``load``, you *must* use dict syntax to access it.
Nesting works the same way - dict config values are transparently turned
into objects which honor both the dictionary protocol and the
attribute-access method::
config['foo']['bar']
config.foo.bar
"""
def __init__(self):
"""
Creates a new config object, but does not load any configuration data.
.. note::
To load configuration data, call `~.Config.load` after
initialization.
For convenience, keyword arguments not listed below will be interpreted
as top-level configuration keys, so one may say e.g.::
c = Config(my_setting='my_value')
print(c['my_setting']) # => 'my_value'
:param str global_prefix:
Path & partial filename for the global config file location. Should
include everything but the dot & file extension.
The final result (including extension) will be turned into a fully
qualified file path and have system-appropriate expansion performed
(tildes and so forth).
Default: ``/etc/invoke`` (e.g. ``/etc/invoke.yaml`` or
``/etc/invoke.json``).
:param str user_prefix:
Like ``global_prefix`` but for the per-user config file.
Default: ``~/.invoke`` (e.g. ``~/.invoke.yaml``).
"""
pass
def load(self):
"""
Performs loading and merging of all config sources.
See :ref:`config-hierarchy` for details on load order and file
locations.
"""
pass
| from .vendor.etcaetera.config import Config as EtcConfig
from .vendor.etcaetera.adapter import File
class Config(object):
"""
Invoke's primary configuration handling class.
See :doc:`/concepts/configuration` for details on the configuration system
this class implements, including the :ref:`configuration hierarchy
<config-hierarchy>`.
Lightly wraps ``etcaetera.config.Config``, allowing for another level of
configurability (re: which files are loaded and in what order) as well as
convenient access to configuration values, which may be accessed using
dict syntax::
config['foo']
or attribute syntax::
config.foo
Nesting works the same way - dict config values are transparently turned
into objects which honor both the dictionary protocol and the
attribute-access method::
config['foo']['bar']
config.foo.bar
"""
def __init__(self):
"""
Creates a new config object, but does not load any configuration data.
.. note::
To load configuration data, call `~.Config.load` after
initialization.
For convenience, keyword arguments not listed below will be interpreted
as top-level configuration keys, so one may say e.g.::
c = Config(my_setting='my_value')
print(c['my_setting']) # => 'my_value'
:param str global_prefix:
Path & partial filename for the global config file location. Should
include everything but the dot & file extension.
The final result (including extension) will be turned into a fully
qualified file path and have system-appropriate expansion performed
(tildes and so forth).
Default: ``/etc/invoke`` (e.g. ``/etc/invoke.yaml`` or
``/etc/invoke.json``).
:param str user_prefix:
Like ``global_prefix`` but for the per-user config file.
Default: ``~/.invoke`` (e.g. ``~/.invoke.yaml``).
"""
pass
def load(self):
"""
Performs loading and merging of all config sources.
See :ref:`config-hierarchy` for details on load order and file
locations.
"""
pass
| Python | 0.000002 |
aa459c2db7f1995fda486ef80c30b541ff1895d8 | Remove unnessesaty params | ocds/databridge/contrib/client.py | ocds/databridge/contrib/client.py | import requests
import requests.adapters
from gevent.pool import Pool
import logging
logger = logging.getLogger(__name__)
class APIClient(object):
def __init__(self, api_key, api_host, api_version, **options):
self.base_url = "{}/api/{}".format(api_host, api_version)
self.session = requests.Session()
self.session.auth = (api_key, '')
self.session.headers = {"Accept": "applicaiton/json",
"Content-type": "application/json"}
resourse = options.get('resourse', 'tenders')
self.resourse_url = "{}/{}".format(self.base_url, resourse)
APIAdapter = requests.adapters.HTTPAdapter(max_retries=5,
pool_connections=50,
pool_maxsize=30)
self.session.mount(self.resourse_url, APIAdapter)
# retrieve cookie
self.session.head("{}/{}".format(self.base_url, 'spore'))
self.pool = Pool(10)
def get_tenders(self, params=None):
if not params:
params = {'feed': 'chages'}
resp = self.session.get(self.resourse_url, params=params)
if resp.ok:
return resp.json()
else:
resp.raise_for_status()
def get_tender(self, tender_id):
resp = self.session.get(
"{}/{}".format(self.resourse_url, tender_id)
)
if resp.ok:
return resp.json()['data']
else:
resp.raise_for_status()
def fetch(self, tender_ids):
resp = self.pool.map(self.get_tender, [t['id'] for t in tender_ids])
return [r for r in resp if r]
def get_retreive_clients(api_key, api_host, api_version):
forward = APIClient(api_key, api_host, api_version)
backward = APIClient(api_key, api_host, api_version)
origin_cookie = forward.session.cookies
backward.session.cookies = origin_cookie
return origin_cookie, forward, backward
| import requests
import requests.adapters
from gevent.pool import Pool
import logging
logger = logging.getLogger(__name__)
class APIClient(object):
def __init__(self, api_key, api_host, api_version, **options):
self.base_url = "{}/api/{}".format(api_host, api_version)
self.session = requests.Session()
self.session.auth = (api_key, '')
self.session.headers = {"Accept": "applicaiton/json",
"Content-type": "application/json"}
resourse = options.get('resourse', 'tenders')
self.resourse_url = "{}/{}".format(self.base_url, resourse)
APIAdapter = requests.adapters.HTTPAdapter(max_retries=5,
pool_connections=50,
pool_maxsize=30)
self.session.mount(self.resourse_url, APIAdapter)
# retrieve cookie
self.session.head("{}/{}".format(self.base_url, 'spore'))
self.pool = Pool(10)
def get_tenders(self, params=None):
if not params:
params = {'feed': 'chages'}
resp = self.session.get(self.resourse_url, params=params)
if resp.ok:
return resp.json()
else:
resp.raise_for_status()
def get_tender(self, tender_id, params=None):
resp = self.session.get(
"{}/{}".format(self.resourse_url, tender_id), params=params
)
if resp.ok:
return resp.json()['data']
else:
resp.raise_for_status()
def fetch(self, tender_ids):
resp = self.pool.map(self.get_tender, [t['id'] for t in tender_ids])
return [r for r in resp]
def get_retreive_clients(api_key, api_host, api_version):
forward = APIClient(api_key, api_host, api_version)
backward = APIClient(api_key, api_host, api_version)
origin_cookie = forward.session.cookies
backward.session.cookies = origin_cookie
return origin_cookie, forward, backward
| Python | 0.000007 |
48cb3e901917c598294c5431c66efe6ed56e465a | set DEBUG to true | wsgi/settings.py | wsgi/settings.py | import os
MONGO_HOST = os.getenv('OPENSHIFT_NOSQL_DB_HOST')
MONGO_PORT = os.getenv('OPENSHIFT_NOSQL_DB_PORT')
MONGO_USERNAME = os.getenv('OPENSHIFT_NOSQL_DB_USERNAME')
MONGO_PASSWORD = os.getenv('OPENSHIFT_NOSQL_DB_PASSWORD')
PRIV_KEY = os.getenv('OPENSHIFT_DATA_DIR') + '/server_private.pem'
PUB_KEY = os.getenv('OPENSHIFT_DATA_DIR') + '/server_public.pem'
DEBUG = True
| import os
MONGO_HOST = os.getenv('OPENSHIFT_NOSQL_DB_HOST')
MONGO_PORT = os.getenv('OPENSHIFT_NOSQL_DB_PORT')
MONGO_USERNAME = os.getenv('OPENSHIFT_NOSQL_DB_USERNAME')
MONGO_PASSWORD = os.getenv('OPENSHIFT_NOSQL_DB_PASSWORD')
PRIV_KEY = os.getenv('OPENSHIFT_DATA_DIR') + '/server_private.pem'
PUB_KEY = os.getenv('OPENSHIFT_DATA_DIR') + '/server_public.pem'
| Python | 0.999549 |
99f45d201b3513096bf8ebe7c877c836d8e6611a | Add logging to web client | clients/web/rewebclient/rewebclient.py | clients/web/rewebclient/rewebclient.py |
from flask import Flask, request, render_template, flash, redirect, url_for
from reclient.client import ReClient, ReClientException
import os
import logging
DEBUG = False
SECRET_KEY = 'CHANGE ME'
app = Flask(__name__)
app.config.from_object(__name__)
app.config.from_envvar('REWEBCLIENT_SETTINGS', silent=True)
app.config['RE_FRONTEND_URL'] = app.config.get('RE_FRONTEND_URL', None)
if app.config['RE_FRONTEND_URL'] is None:
app.config['RE_FRONTEND_URL'] = os.getenv('RE_FRONTEND_URL')
if app.config['RE_FRONTEND_URL'] is None:
raise RuntimeError("RE_FRONTEND_URL environment variable must be set and point to a reliable-email web frontend!")
# Logging
if app.config.get('LOG', None) is not None:
file_handler = logging.FileHandler(app.config['LOG'])
file_handler.setLevel(logging.DEBUG)
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.DEBUG)
client = ReClient(app.config['RE_FRONTEND_URL'])
@app.route('/', methods=['GET', 'POST'])
def index():
# We could use something like WTForms here, but I'll just keep it simple.
# I'm ignoring all kinds of i'llformed user input, and let the web frontend handle the small amount of validation
if request.method == 'POST':
try:
client.submit(
request.form.get('subject', ''),
request.form.get('body', ''),
request.form.get('to_email', ''),
request.form.get('to_name', '')
)
flash(u'Frontend returned a OK, job submitted!')
except ReClientException, ex:
flash(u'Job failed submission: %s' % ex.message)
redirect(url_for('index'))
return render_template('index.html')
if __name__ == '__main__':
app.run()
|
from flask import Flask, request, render_template, flash, redirect, url_for
from reclient.client import ReClient, ReClientException
import os
DEBUG = False
SECRET_KEY = 'CHANGE ME'
app = Flask(__name__)
app.config.from_object(__name__)
app.config.from_envvar('REWEBCLIENT_SETTINGS', silent=True)
app.config['RE_FRONTEND_URL'] = app.config.get('RE_FRONTEND_URL', None) or os.getenv('RE_FRONTEND_URL')
if app.config['RE_FRONTEND_URL'] is None:
raise RuntimeError("RE_FRONTEND_URL environment variable must be set and point to a reliable-email web frontend!")
client = ReClient(app.config['RE_FRONTEND_URL'])
@app.route('/', methods=['GET', 'POST'])
def index():
# We could use something like WTForms here, but I'll just keep it simple.
# I'm ignoring all kinds of i'llformed user input, and let the web frontend handle the small amount of validation
if request.method == 'POST':
try:
client.submit(
request.form.get('subject', ''),
request.form.get('body', ''),
request.form.get('to_email', ''),
request.form.get('to_name', '')
)
flash(u'Frontend returned a OK, job submitted!')
except ReClientException, ex:
flash(u'Job failed submission: %s' % ex.message)
redirect(url_for('index'))
return render_template('index.html')
if __name__ == '__main__':
app.run() | Python | 0.000001 |
bffb0c7fb099039afb444cfc641ae7b1978c59f8 | Exit main script when no observations found | ircelsos/main.py | ircelsos/main.py | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 07 23:11:39 2015
@author: Joris Van den Bossche
"""
from __future__ import print_function
def main():
import argparse
parser = argparse.ArgumentParser(
prog='ircelsos',
description='Download air quality data from the SOS of IRCEL - CELINE.')
parser.add_argument('pollutant',
help='The pollutant')
parser.add_argument('--station', '-s', nargs=1,
help='Station number. If no provided, use all available'
' stations for that pollutant')
parser.add_argument('--period', '-p', type=str, nargs=2,
help='Period of the measurements given as "start stop"')
args = parser.parse_args()
from .query_ircelsos import query_ircelsos
from .sosparser import get_observations, parse_observation
print("SOS of IRCEL - CELINE")
print("Downloading ...")
pollutant = args.pollutant
if args.station:
station = args.station[0]
else:
station = None
response = query_ircelsos(pollutant, station, args.period[0],
args.period[1])
observations = get_observations(response)
if not observations:
print('No observations found')
import sys
sys.exit()
for obs in observations:
st_info, raw_data = parse_observation(obs)
filename = '{0}_{1}.csv'.format(pollutant, st_info['name'])
print("Writing file '{}'".format(filename))
with open(filename, 'w') as f:
f.writelines(raw_data.replace(';', '\n'))
| # -*- coding: utf-8 -*-
"""
Created on Wed Apr 07 23:11:39 2015
@author: Joris Van den Bossche
"""
from __future__ import print_function
def main():
import argparse
parser = argparse.ArgumentParser(
prog='ircelsos',
description='Download air quality data from the SOS of IRCEL - CELINE.')
parser.add_argument('pollutant',
help='The pollutant')
parser.add_argument('--station', '-s', nargs=1,
help='Station number. If no provided, use all available'
' stations for that pollutant')
parser.add_argument('--period', '-p', type=str, nargs=2,
help='Period of the measurements given as "start stop"')
args = parser.parse_args()
from .query_ircelsos import query_ircelsos
from .sosparser import get_observations, parse_observation
print("SOS of IRCEL - CELINE")
print("Downloading ...")
pollutant = args.pollutant
if args.station:
station = args.station[0]
else:
station = None
response = query_ircelsos(pollutant, station, args.period[0],
args.period[1])
observations = get_observations(response)
if not observations:
print('No observations found')
for obs in observations:
st_info, raw_data = parse_observation(obs)
filename = '{0}_{1}.csv'.format(pollutant, st_info['name'])
print("Writing file '{}'".format(filename))
with open(filename, 'w') as f:
f.writelines(raw_data.replace(';', '\n'))
| Python | 0 |
641c7da63b2d7255ed3039d5c26574faa060b333 | Stop altering the glance API URL | openstack_dashboard/api/glance.py | openstack_dashboard/api/glance.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import itertools
import logging
import thread
from django.conf import settings
import glanceclient as glance_client
from horizon.utils import functions as utils
from openstack_dashboard.api import base
LOG = logging.getLogger(__name__)
def glanceclient(request):
url = base.url_for(request, 'image')
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
LOG.debug('glanceclient connection created using token "%s" and url "%s"'
% (request.user.token.id, url))
return glance_client.Client('1', url, token=request.user.token.id,
insecure=insecure, cacert=cacert)
def image_delete(request, image_id):
return glanceclient(request).images.delete(image_id)
def image_get(request, image_id):
"""Returns an Image object populated with metadata for image
with supplied identifier.
"""
image = glanceclient(request).images.get(image_id)
if not hasattr(image, 'name'):
image.name = None
return image
def image_list_detailed(request, marker=None, filters=None, paginate=False):
limit = getattr(settings, 'API_RESULT_LIMIT', 1000)
page_size = utils.get_page_size(request)
if paginate:
request_size = page_size + 1
else:
request_size = limit
kwargs = {'filters': filters or {}}
if marker:
kwargs['marker'] = marker
images_iter = glanceclient(request).images.list(page_size=request_size,
limit=limit,
**kwargs)
has_more_data = False
if paginate:
images = list(itertools.islice(images_iter, request_size))
if len(images) > page_size:
images.pop(-1)
has_more_data = True
else:
images = list(images_iter)
return (images, has_more_data)
def image_update(request, image_id, **kwargs):
return glanceclient(request).images.update(image_id, **kwargs)
def image_create(request, **kwargs):
copy_from = None
if kwargs.get('copy_from'):
copy_from = kwargs.pop('copy_from')
image = glanceclient(request).images.create(**kwargs)
if copy_from:
thread.start_new_thread(image_update,
(request, image.id),
{'copy_from': copy_from})
return image
| # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import itertools
import logging
import thread
from django.conf import settings
import six.moves.urllib.parse as urlparse
import glanceclient as glance_client
from horizon.utils import functions as utils
from openstack_dashboard.api import base
LOG = logging.getLogger(__name__)
def glanceclient(request):
o = urlparse.urlparse(base.url_for(request, 'image'))
url = "://".join((o.scheme, o.netloc))
insecure = getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
LOG.debug('glanceclient connection created using token "%s" and url "%s"'
% (request.user.token.id, url))
return glance_client.Client('1', url, token=request.user.token.id,
insecure=insecure, cacert=cacert)
def image_delete(request, image_id):
return glanceclient(request).images.delete(image_id)
def image_get(request, image_id):
"""Returns an Image object populated with metadata for image
with supplied identifier.
"""
image = glanceclient(request).images.get(image_id)
if not hasattr(image, 'name'):
image.name = None
return image
def image_list_detailed(request, marker=None, filters=None, paginate=False):
limit = getattr(settings, 'API_RESULT_LIMIT', 1000)
page_size = utils.get_page_size(request)
if paginate:
request_size = page_size + 1
else:
request_size = limit
kwargs = {'filters': filters or {}}
if marker:
kwargs['marker'] = marker
images_iter = glanceclient(request).images.list(page_size=request_size,
limit=limit,
**kwargs)
has_more_data = False
if paginate:
images = list(itertools.islice(images_iter, request_size))
if len(images) > page_size:
images.pop(-1)
has_more_data = True
else:
images = list(images_iter)
return (images, has_more_data)
def image_update(request, image_id, **kwargs):
return glanceclient(request).images.update(image_id, **kwargs)
def image_create(request, **kwargs):
copy_from = None
if kwargs.get('copy_from'):
copy_from = kwargs.pop('copy_from')
image = glanceclient(request).images.create(**kwargs)
if copy_from:
thread.start_new_thread(image_update,
(request, image.id),
{'copy_from': copy_from})
return image
| Python | 0.000011 |
e85e1021ae20ebecb344c592f60f2ad6607a1db1 | refactor rename variables for clarity | src/main/python/pybuilder/plugins/filter_resources_plugin.py | src/main/python/pybuilder/plugins/filter_resources_plugin.py | # -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2014 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import string
from pybuilder.core import init, after, use_plugin
from pybuilder.utils import apply_on_files, read_file, write_file
use_plugin("core")
@init
def init_filter_resources_plugin(project):
project.set_property_if_unset("filter_resources_target", "$dir_target")
project.set_property_if_unset("filter_resources_glob", [])
@after("package", only_once=True)
def filter_resources(project, logger):
globs = project.get_mandatory_property("filter_resources_glob")
if not globs:
logger.warn("No resources to filter configured. Consider removing plugin.")
return
target = project.expand_path("$filter_resources_target")
logger.info("Filter resources matching %s in %s", " ".join(globs), target)
project_dict_wrapper = ProjectDictWrapper(project)
apply_on_files(target, filter_resource, globs, project_dict_wrapper, logger)
def filter_resource(absolute_file_name, relative_file_name, dictionary, logger):
logger.debug("Filtering resource %s", absolute_file_name)
content = "".join(read_file(absolute_file_name))
filtered = string.Template(content).safe_substitute(dictionary)
write_file(absolute_file_name, filtered)
class ProjectDictWrapper(object):
def __init__(self, project):
self.project = project
def __getitem__(self, key):
fallback_when_no_substitution_found = "${%s}" % key
project_property_or_fallback = self.project.get_property(key,
fallback_when_no_substitution_found)
return getattr(self.project, key, project_property_or_fallback)
| # -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2014 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import string
from pybuilder.core import init, after, use_plugin
from pybuilder.utils import apply_on_files, read_file, write_file
use_plugin("core")
@init
def init_filter_resources_plugin(project):
project.set_property_if_unset("filter_resources_target", "$dir_target")
project.set_property_if_unset("filter_resources_glob", [])
@after("package", only_once=True)
def filter_resources(project, logger):
globs = project.get_mandatory_property("filter_resources_glob")
if not globs:
logger.warn("No resources to filter configured. Consider removing plugin.")
return
target = project.expand_path("$filter_resources_target")
logger.info("Filter resources matching %s in %s", " ".join(globs), target)
project_dict_wrapper = ProjectDictWrapper(project)
apply_on_files(target, filter_resource, globs, project_dict_wrapper, logger)
def filter_resource(absolute_file_name, relative_file_name, dictionary, logger):
logger.debug("Filtering resource %s", absolute_file_name)
content = "".join(read_file(absolute_file_name))
filtered = string.Template(content).safe_substitute(dictionary)
write_file(absolute_file_name, filtered)
class ProjectDictWrapper(object):
def __init__(self, project):
self.project = project
def __getitem__(self, key):
default_value = "${%s}" % key
fallback_value = self.project.get_property(key, default_value)
return getattr(self.project, key, fallback_value)
| Python | 0.000002 |
ab49b0be58975156f96bd5340da8d06f5b8626a5 | Change to batch_size = 64 | tensorflow_examples/models/nmt_with_attention/distributed_test.py | tensorflow_examples/models/nmt_with_attention/distributed_test.py | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for distributed nmt_with_attention."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import tensorflow as tf # TF2
from tensorflow_examples.models.nmt_with_attention import distributed_train
from tensorflow_examples.models.nmt_with_attention import utils
assert tf.__version__.startswith('2')
class NmtDistributedBenchmark(tf.test.Benchmark):
def __init__(self, output_dir=None, **kwargs):
self.output_dir = output_dir
def benchmark_one_epoch_1_gpu(self):
kwargs = utils.get_common_kwargs()
kwargs.update({'enable_function': False})
self._run_and_report_benchmark(**kwargs)
def benchmark_one_epoch_1_gpu_function(self):
kwargs = utils.get_common_kwargs()
self._run_and_report_benchmark(**kwargs)
def benchmark_ten_epochs_2_gpus(self):
kwargs = utils.get_common_kwargs()
kwargs.update({'epochs': 10, 'num_gpu': 2, 'batch_size': 64})
self._run_and_report_benchmark(**kwargs)
def _run_and_report_benchmark(self, **kwargs):
start_time_sec = time.time()
train_loss, test_loss = distributed_train.main(**kwargs)
wall_time_sec = time.time() - start_time_sec
extras = {'train_loss': train_loss,
'test_loss': test_loss}
self.report_benchmark(
wall_time=wall_time_sec, extras=extras)
if __name__ == '__main__':
tf.test.main()
| # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for distributed nmt_with_attention."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import tensorflow as tf # TF2
from tensorflow_examples.models.nmt_with_attention import distributed_train
from tensorflow_examples.models.nmt_with_attention import utils
assert tf.__version__.startswith('2')
class NmtDistributedBenchmark(tf.test.Benchmark):
def __init__(self, output_dir=None, **kwargs):
self.output_dir = output_dir
def benchmark_one_epoch_1_gpu(self):
kwargs = utils.get_common_kwargs()
kwargs.update({'enable_function': False})
self._run_and_report_benchmark(**kwargs)
def benchmark_one_epoch_1_gpu_function(self):
kwargs = utils.get_common_kwargs()
self._run_and_report_benchmark(**kwargs)
def benchmark_ten_epochs_2_gpus(self):
kwargs = utils.get_common_kwargs()
kwargs.update({'epochs': 10, 'num_gpu': 2, 'batch_size': 128})
self._run_and_report_benchmark(**kwargs)
def _run_and_report_benchmark(self, **kwargs):
start_time_sec = time.time()
train_loss, test_loss = distributed_train.main(**kwargs)
wall_time_sec = time.time() - start_time_sec
extras = {'train_loss': train_loss,
'test_loss': test_loss}
self.report_benchmark(
wall_time=wall_time_sec, extras=extras)
if __name__ == '__main__':
tf.test.main()
| Python | 0.000689 |
a59ec3963e0726c291dbde0d26a5f3468e88966c | Add call to audit_orders from cli.py. | 2017-code/cli.py | 2017-code/cli.py | # cli.py
# Ronald L. Rivest (with Karim Husayn Karimi)
# July 22, 2017
# python3
"""
Command-line parser and dispatch
"""
import argparse
import multi
import election_specification
import ids
import audit
import reported
##############################################################################
# Command-line arguments
def parse_args():
parser = argparse.ArgumentParser(description="""multi.py: A Bayesian post-election audit program for an
election with multiple contests and multiple paper ballot
collections.""")
#v1 and v2:
# Mandatory argument is dirname
parser.add_argument("election_dirname", help="""
The name for this election of the subdirectory within the elections root directory.""")
# All others are optional
# First group sets parameters: election_name, elections_root, audit_seed
parser.add_argument("--election_name", help="""
Human-readable name of the election.""",
default="TestElection")
parser.add_argument("--elections_root", help="""The directory where the subdirectory for the
election is to be found. Defaults to "./elections".""",
default="./elections")
parser.add_argument("--audit_seed",
help="""Seed for the random number generator used for
auditing (arbitrary nonnegative integer). (If omitted, uses clock.)""")
## v2:
parser.add_argument("--read_specification", action="store_true", help="""
Read and check election specification.""")
parser.add_argument("--read_reported", action="store_true", help="""
Read and check reported election data and results.""")
parser.add_argument("--read_seed", action="store_true", help="""
Read audit seed.""")
parser.add_argument("--make_orders", action="store_true", help="""
Make audit orders files.""")
parser.add_argument("--read_audited", action="store_true", help="""
Read and check audited votes.""")
parser.add_argument("--stage",
help="""Run stage STAGE of the audit (may specify "ALL").""")
args = parser.parse_args()
# print("Command line arguments:", args)
return args
def process_args(e, args):
e.election_dirname = ids.filename_safe(args.election_dirname)
e.election_name = args.election_name
ELECTIONS_ROOT = args.elections_root
audit.set_audit_seed(e, args.audit_seed)
if args.read_specification:
# print("read_specification")
election_specification.get_election_specification(e)
elif args.read_reported:
print("read_reported")
election_specification.get_election_specification(e)
reported.get_election_data(e)
elif args.read_seed:
print("read_seed")
election_specification.get_election_specification(e)
reported.get_election_data(e)
audit.get_audit_parameters(e, args)
elif args.make_orders:
print("make_orders")
audit_orders.compute_audit_orders(e)
elif args.read_audited:
print("read_audited")
elif args.stage:
print("stage", args.stage)
election_specification.get_election_specification(e)
reported.get_election_data(e)
audit.get_audit_parameters(e, args)
audit.audit(e, args)
| # cli.py
# Ronald L. Rivest (with Karim Husayn Karimi)
# July 22, 2017
# python3
"""
Command-line parser and dispatch
"""
import argparse
import multi
import election_specification
import ids
import audit
import reported
##############################################################################
# Command-line arguments
def parse_args():
parser = argparse.ArgumentParser(description="""multi.py: A Bayesian post-election audit program for an
election with multiple contests and multiple paper ballot
collections.""")
#v1 and v2:
# Mandatory argument is dirname
parser.add_argument("election_dirname", help="""
The name for this election of the subdirectory within the elections root directory.""")
# All others are optional
# First group sets parameters: election_name, elections_root, audit_seed
parser.add_argument("--election_name", help="""
Human-readable name of the election.""",
default="TestElection")
parser.add_argument("--elections_root", help="""The directory where the subdirectory for the
election is to be found. Defaults to "./elections".""",
default="./elections")
parser.add_argument("--audit_seed",
help="""Seed for the random number generator used for
auditing (arbitrary nonnegative integer). (If omitted, uses clock.)""")
## v2:
parser.add_argument("--read_specification", action="store_true", help="""
Read and check election specification.""")
parser.add_argument("--read_reported", action="store_true", help="""
Read and check reported election data and results.""")
parser.add_argument("--read_seed", action="store_true", help="""
Read audit seed.""")
parser.add_argument("--make_orders", action="store_true", help="""
Make audit orders files.""")
parser.add_argument("--read_audited", action="store_true", help="""
Read and check audited votes.""")
parser.add_argument("--stage",
help="""Run stage STAGE of the audit (may specify "ALL").""")
args = parser.parse_args()
# print("Command line arguments:", args)
return args
def process_args(e, args):
e.election_dirname = ids.filename_safe(args.election_dirname)
e.election_name = args.election_name
ELECTIONS_ROOT = args.elections_root
audit.set_audit_seed(e, args.audit_seed)
if args.read_specification:
# print("read_specification")
election_specification.get_election_specification(e)
elif args.read_reported:
print("read_reported")
election_specification.get_election_specification(e)
reported.get_election_data(e)
elif args.read_seed:
print("read_seed")
election_specification.get_election_specification(e)
reported.get_election_data(e)
audit.get_audit_parameters(e, args)
elif args.make_orders:
print("make_orders")
elif args.read_audited:
print("read_audited")
elif args.stage:
print("stage", args.stage)
election_specification.get_election_specification(e)
reported.get_election_data(e)
audit.get_audit_parameters(e, args)
audit.audit(e, args)
| Python | 0 |
ab22a41382f739313d8e5484b4f3d54745e0a888 | Removed urllib2 import. should fix #1 | BitcoinTicker.py | BitcoinTicker.py | import sublime
import sublime_plugin
try:
from urllib.request import urlopen
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
from urllib import urlopen
import json
import re
class BitcoinTicker(sublime_plugin.EventListener):
def check_for_calc(self):
"""
If enabled in settings, searches the view for a bitcoin amount to convert and
replaces string with converted value.
Supported formats that will be searched:
1 BTC
0.252 btc
.5 btc
13.303 BTC
"""
settings = sublime.load_settings(__name__ + '.sublime-settings')
convert_strings = settings.get('convert_strings')
if convert_strings:
regex = r'([-+]?[0-9]*\.?[0-9]+)\s*btc'
extractions = []
regions = self.view.find_all(regex, sublime.IGNORECASE, "$1", extractions)
added_length = 0
btc_in_usd, exchange_name = self.get_current_exchange()
for index, region in enumerate(regions):
amount = float(extractions[index])
result = btc_in_usd * amount
edit = self.view.begin_edit()
added_length += self.view.insert(edit, region.end() + added_length, " => $%.2f (%s)" % (result, exchange_name))
self.view.end_edit(edit)
def update_status(self):
"""
Updates the view's status bar with the current exchange rate
"""
self.view.set_status('btc', "$%.2f (%s)" % self.get_current_exchange())
def get_current_exchange(self):
"""
Makes API call to exchange (determined via settings) to retrieve latest
exchange rate.
Exchanges:
1 - Mt.Gox
2 - Bitfloor
Returns a tuple consisting of the current exchange rate of 1 bitcoin in USD
as well as the name of the exchange.
"""
settings = sublime.load_settings(__name__ + '.sublime-settings')
exchange = settings.get('exchange')
if exchange == 1:
url = 'http://data.mtgox.com/api/1/BTCUSD/ticker'
req = urlparse(url)
resp = json.load(urlopen(req.geturl()))
exchange_name = 'Mt.Gox'
btc_in_usd = float(resp['return']['last']['value'])
elif exchange == 2:
url = 'https://api.bitfloor.com/ticker/1'
req = urlparse(url)
resp = json.load(urlopen(req.geturl()))
exchange_name = 'Bitfloor'
btc_in_usd = float(resp['price'])
return (btc_in_usd, exchange_name)
def on_load(self, view):
self.view = view
settings = sublime.load_settings(__name__ + '.sublime-settings')
settings.add_on_change('exchange', self.update_status)
settings.add_on_change('convert_strings', self.check_for_calc)
sublime.set_timeout(self.update_status, 10)
def on_post_save(self, view):
self.view = view
sublime.set_timeout(self.update_status, 10)
self.check_for_calc() | import sublime
import sublime_plugin
try:
from urllib.request import urlopen
from urllib.parse import urlparse
import urllib2
except ImportError:
from urlparse import urlparse
from urllib import urlopen
import json
import re
class BitcoinTicker(sublime_plugin.EventListener):
def check_for_calc(self):
"""
If enabled in settings, searches the view for a bitcoin amount to convert and
replaces string with converted value.
Supported formats that will be searched:
1 BTC
0.252 btc
.5 btc
13.303 BTC
"""
settings = sublime.load_settings(__name__ + '.sublime-settings')
convert_strings = settings.get('convert_strings')
if convert_strings:
regex = r'([-+]?[0-9]*\.?[0-9]+)\s*btc'
extractions = []
regions = self.view.find_all(regex, sublime.IGNORECASE, "$1", extractions)
added_length = 0
btc_in_usd, exchange_name = self.get_current_exchange()
for index, region in enumerate(regions):
amount = float(extractions[index])
result = btc_in_usd * amount
edit = self.view.begin_edit()
added_length += self.view.insert(edit, region.end() + added_length, " => $%.2f (%s)" % (result, exchange_name))
self.view.end_edit(edit)
def update_status(self):
"""
Updates the view's status bar with the current exchange rate
"""
self.view.set_status('btc', "$%.2f (%s)" % self.get_current_exchange())
def get_current_exchange(self):
"""
Makes API call to exchange (determined via settings) to retrieve latest
exchange rate.
Exchanges:
1 - Mt.Gox
2 - Bitfloor
Returns a tuple consisting of the current exchange rate of 1 bitcoin in USD
as well as the name of the exchange.
"""
settings = sublime.load_settings(__name__ + '.sublime-settings')
exchange = settings.get('exchange')
if exchange == 1:
url = 'http://data.mtgox.com/api/1/BTCUSD/ticker'
req = urlparse(url)
resp = json.load(urlopen(req.geturl()))
exchange_name = 'Mt.Gox'
btc_in_usd = float(resp['return']['last']['value'])
elif exchange == 2:
url = 'https://api.bitfloor.com/ticker/1'
req = urlparse(url)
resp = json.load(urlopen(req.geturl()))
exchange_name = 'Bitfloor'
btc_in_usd = float(resp['price'])
return (btc_in_usd, exchange_name)
def on_load(self, view):
self.view = view
settings = sublime.load_settings(__name__ + '.sublime-settings')
settings.add_on_change('exchange', self.update_status)
settings.add_on_change('convert_strings', self.check_for_calc)
sublime.set_timeout(self.update_status, 10)
def on_post_save(self, view):
self.view = view
sublime.set_timeout(self.update_status, 10)
self.check_for_calc() | Python | 0.999632 |
93ad5396bb1d574c86a6b3323199e75fe3bb34f4 | implement protection for non existing directories | PyAnalysisTools/base/ShellUtils.py | PyAnalysisTools/base/ShellUtils.py | import shutil
import os
import subprocess
def make_dirs(path):
path = os.path.expanduser(path)
if os.path.exists(path):
return
try:
os.makedirs(path)
except OSError as e:
raise OSError
def resolve_path_from_symbolic_links(symbolic_link, relative_path):
def is_symbolic_link(path):
return os.path.islink(path)
if symbolic_link is None or relative_path is None:
return relative_path
if os.path.isabs(relative_path):
return relative_path
if not symbolic_link.endswith("/"):
symbolic_link += "/"
top_level_dir = symbolic_link.split("/")
for n in range(1, len(top_level_dir)):
if is_symbolic_link("/".join(top_level_dir[:-n])):
return os.path.abspath(os.path.join(symbolic_link, relative_path))
return relative_path
def move(src, dest):
try:
shutil.move(src, dest)
except IOError:
raise
def copy(src, dest):
try:
shutil.copy(src, dest)
except:
raise
def remove_directory(path, safe=False):
if not os.path.exists(path):
return
if safe:
try:
os.removedirs(path)
except OSError:
raise
else:
try:
shutil.rmtree(path)
except OSError as e:
raise e
def source(script_name):
pipe = subprocess.Popen(". %s; env" % script_name, stdout=subprocess.PIPE, shell=True)
output = pipe.communicate()[0]
output = filter(lambda l: len(l.split("=")) == 2, output.splitlines())
env = dict((line.split("=", 1) for line in output))
os.environ.update(env)
| import shutil
import os
import subprocess
def make_dirs(path):
path = os.path.expanduser(path)
if os.path.exists(path):
return
try:
os.makedirs(path)
except OSError as e:
raise OSError
def resolve_path_from_symbolic_links(symbolic_link, relative_path):
def is_symbolic_link(path):
return os.path.islink(path)
if symbolic_link is None or relative_path is None:
return relative_path
if os.path.isabs(relative_path):
return relative_path
if not symbolic_link.endswith("/"):
symbolic_link += "/"
top_level_dir = symbolic_link.split("/")
for n in range(1, len(top_level_dir)):
if is_symbolic_link("/".join(top_level_dir[:-n])):
return os.path.abspath(os.path.join(symbolic_link, relative_path))
return relative_path
def move(src, dest):
try:
shutil.move(src, dest)
except IOError:
raise
def copy(src, dest):
try:
shutil.copy(src, dest)
except:
raise
def remove_directory(path, safe=False):
if safe:
try:
os.removedirs(path)
except OSError:
raise
else:
try:
shutil.rmtree(path)
except OSError as e:
raise e
def source(script_name):
pipe = subprocess.Popen(". %s; env" % script_name, stdout=subprocess.PIPE, shell=True)
output = pipe.communicate()[0]
output = filter(lambda l: len(l.split("=")) == 2, output.splitlines())
env = dict((line.split("=", 1) for line in output))
os.environ.update(env)
| Python | 0 |
188affe12f31973741ae9b429d8aed757fff0d85 | Fixing timestamp = sec * 1000 | rockmylight/rockmylight/rml/views.py | rockmylight/rockmylight/rml/views.py | from django.shortcuts import render
from django.http import JsonResponse
import time
# Create your views here.
def main(request):
context = {}
return render(request, 'rml/main.html', context)
def jam(request):
context = {}
return render(request, 'rml/jam.html', context)
# API part
INTERVAL = 0.5
NUM_OF_FRAMES = 120
COLORS = ['002b36', '073642', '586e75', '657b83',
'839496', '93a1a1', 'eee8d5', 'fdf6e3']
def next_color(color):
index = COLORS.index(color)
if index + 1 == len(COLORS):
return COLORS[0]
return COLORS[index + 1]
def api_dj(request, session_id=1):
data = {}
# number of connected clients in the grid
data['num_of_clients'] = 6
data['frames'] = []
start_time = int(time.time())
color = COLORS[0]
for frame_index in range(NUM_OF_FRAMES):
frame = {}
frame['timestamp'] = (start_time + frame_index * INTERVAL) * 1000
frame['color'] = color
color = next_color(color)
data['frames'].append(frame)
repsonse = JsonResponse(data)
return repsonse
| from django.shortcuts import render
from django.http import JsonResponse
import time
# Create your views here.
def main(request):
context = {}
return render(request, 'rml/main.html', context)
def jam(request):
context = {}
return render(request, 'rml/jam.html', context)
# API part
INTERVAL = 0.5
NUM_OF_FRAMES = 120
COLORS = ['002b36', '073642', '586e75', '657b83',
'839496', '93a1a1', 'eee8d5', 'fdf6e3']
def next_color(color):
index = COLORS.index(color)
if index + 1 == len(COLORS):
return COLORS[0]
return COLORS[index + 1]
def api_dj(request, session_id=1):
data = {}
# number of connected clients in the grid
data['num_of_clients'] = 6
data['frames'] = []
start_time = int(time.time())
color = COLORS[0]
for frame_index in range(NUM_OF_FRAMES):
frame = {}
frame['timestamp'] = start_time * 1000 + frame_index * INTERVAL
frame['color'] = color
color = next_color(color)
data['frames'].append(frame)
repsonse = JsonResponse(data)
return repsonse
| Python | 0.998776 |
632f70d64bac45365974db834a3a6ddcb16e13ad | Add GuardianModelMixin in users/models.py | feder/users/models.py | feder/users/models.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.contrib.auth.models import AbstractUser
from django.utils.encoding import python_2_unicode_compatible
from guardian.mixins import GuardianUserMixin
@python_2_unicode_compatible
class User(GuardianUserMixin, AbstractUser):
def __str__(self):
return self.username
| # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from django.contrib.auth.models import AbstractUser
from django.utils.encoding import python_2_unicode_compatible
# from django.db import models
# from django.utils.translation import ugettext_lazy as _
@python_2_unicode_compatible
class User(AbstractUser):
def __str__(self):
return self.username
| Python | 0 |
81b2519f575d35d2f1b735bcaef1901539ee06fa | refactor mgmt cmd update-toplist to use just CouchDB | mygpo/directory/management/commands/update-toplist.py | mygpo/directory/management/commands/update-toplist.py | from datetime import datetime
from django.core.management.base import BaseCommand
from mygpo.core.models import Podcast, SubscriberData
from mygpo.users.models import PodcastUserState
from mygpo.utils import progress
from mygpo.decorators import repeat_on_conflict
class Command(BaseCommand):
def handle(self, *args, **options):
# couchdbkit doesn't preserve microseconds
started = datetime.utcnow().replace(microsecond=0)
podcasts = Podcast.all_podcasts()
total = Podcast.view('core/podcasts_by_oldid', limit=0).total_rows
for n, podcast in enumerate(podcasts):
subscriber_count = self.get_subscriber_count(podcast.get_id())
self.update(podcast=podcast, started=started, subscriber_count=subscriber_count)
progress(n, total)
@repeat_on_conflict(['podcast'])
def update(self, podcast, started, subscriber_count):
# We've already updated this podcast
if started in [e.timestamp for e in podcast.subscribers]:
return
data = SubscriberData(
timestamp = started,
subscriber_count = max(0, subscriber_count),
)
podcast.subscribers = sorted(podcast.subscribers + [data], key=lambda e: e.timestamp)
podcast.save()
@staticmethod
def get_subscriber_count(podcast_id):
db = PodcastUserState.get_db()
x = db.view('users/subscriptions_by_podcast',
startkey = [podcast_id, None],
endkey = [podcast_id, {}],
reduce = True,
group = True,
group_level = 2,
)
return x.count()
| from datetime import datetime
from django.core.management.base import BaseCommand
from couchdbkit import ResourceConflict
from mygpo.core.models import Podcast, SubscriberData
from mygpo.users.models import PodcastUserState
from mygpo.utils import progress, multi_request_view
from mygpo.decorators import repeat_on_conflict
class Command(BaseCommand):
def handle(self, *args, **options):
started = datetime.now()
entries = multi_request_view(Podcast, 'core/podcasts_by_oldid', include_docs=True)
total = Podcast.view('core/podcasts_by_oldid', limit=0).total_rows
for n, entry in enumerate(entries):
subscriber_count = self.get_subscriber_count(entry.get_id())
self.update(entry=entry, started=started, subscriber_count=subscriber_count)
progress(n, total)
@repeat_on_conflict(['entry'])
def update(self, entry, started, subscriber_count):
data = SubscriberData(
timestamp = started,
subscriber_count = max(0, subscriber_count),
)
entry.subscribers.append(data)
entry.save()
@staticmethod
def get_subscriber_count(podcast_id):
db = PodcastUserState.get_db()
x = db.view('users/subscriptions_by_podcast',
startkey = [podcast_id, None],
endkey = [podcast_id, {}],
)
return x.count()
| Python | 0 |
85761d00814d1835ace72adb13a43b07b1f5536d | Fix issue #18, don't follow symlinks by default | botbot/checker.py | botbot/checker.py | """Base class for checking file trees"""
import stat
import os
import time
from botbot import problist as pl
class Checker:
"""
Holds a set of checks that can be run on a file to make sure that
it's suitable for the shared directory. Runs checks recursively on a
given path.
"""
# checks is a set of all the checking functions this checker knows of. All
# checkers return a number signifying a specific problem with the
# file specified in the path.
def __init__(self):
self.checks = set() # All checks to perform
self.probs = pl.ProblemList() # List of files with their issues
self.info = {
'files': 0,
'problems': 0,
'time': 0
} # Information about the previous check
def register(self, func):
"""
Add a new checking function to the set, or a list/tuple of
functions.
"""
if hasattr(func, '__call__'):
self.checks.add(func)
else:
for f in list(func):
self.checks.add(f)
def check_tree(self, path, link=False):
"""
Run all the checks on every file in the specified path,
recursively. Returns a list of tuples. Each tuple contains 2
elements: the first is the path of the file, and the second is
a list of issues with the file at that path. If link is True,
follow symlinks.
"""
path = os.path.abspath(path)
start = path # Currently unused, could be used to judge depth
to_check = [path]
extime = time.time()
while len(to_check) > 0:
chk_path = to_check.pop()
try:
if not link and is_link(chk_path):
continue
elif stat.S_ISDIR(os.stat(chk_path).st_mode):
new = [os.path.join(chk_path, f) for f in os.listdir(chk_path)]
to_check.extend(new)
else:
self.check_file(chk_path)
except FileNotFoundError:
self.probs.add_problem(chk_path, 'PROB_BROKEN_LINK')
except PermissionError:
self.probs.add_problem(chk_path, 'PROB_DIR_NOT_WRITABLE')
self.info['time'] = time.time() - extime
def check_file(self, chk_path):
"""Check a file against all checkers"""
for check in self.checks:
prob = check(chk_path)
if prob is not None:
self.probs.add_problem(chk_path, prob)
self.info['problems'] += 1
self.info['files'] += 1
def pretty_print_issues(self, verbose):
"""
Print a list of issues with their fixes. Only print issues which
are in problist, unless verbose is true, in which case print
all messages.
TODO: Move into ReportWriter
"""
# Print general statistics
infostring = "Found {problems} problems over {files} files in {time:.2f} seconds."
print(infostring.format(**self.info))
def is_link(path):
"""Check if the given path is a symbolic link"""
return os.path.islink(path) or os.path.abspath(path) != os.path.realpath(path)
| """Base class for checking file trees"""
import stat
import os
import time
from botbot import problist as pl
class Checker:
"""
Holds a set of checks that can be run on a file to make sure that
it's suitable for the shared directory. Runs checks recursively on a
given path.
"""
# checks is a set of all the checking functions this checker knows of. All
# checkers return a number signifying a specific problem with the
# file specified in the path.
def __init__(self):
self.checks = set() # All checks to perform
self.probs = pl.ProblemList() # List of files with their issues
self.info = {
'files': 0,
'problems': 0,
'time': 0
} # Information about the previous check
def register(self, func):
"""
Add a new checking function to the set, or a list/tuple of
functions.
"""
if hasattr(func, '__call__'):
self.checks.add(func)
else:
for f in list(func):
self.checks.add(f)
def check_tree(self, path):
"""
Run all the checks on every file in the specified path,
recursively. Returns a list of tuples. Each tuple contains 2
elements: the first is the path of the file, and the second is
a list of issues with the file at that path. If link is True,
follow symlinks.
"""
path = os.path.abspath(path)
start = path # Currently unused, could be used to judge depth
to_check = [path]
extime = time.time()
while len(to_check) > 0:
chk_path = to_check.pop()
try:
if stat.S_ISDIR(os.stat(chk_path).st_mode):
new = [os.path.join(chk_path, f) for f in os.listdir(chk_path)]
to_check.extend(new)
else:
self.check_file(chk_path)
except FileNotFoundError:
self.probs.add_problem(chk_path, 'PROB_BROKEN_LINK')
except PermissionError:
self.probs.add_problem(chk_path, 'PROB_DIR_NOT_WRITABLE')
self.info['time'] = time.time() - extime
def check_file(self, chk_path):
"""Check a file against all checkers"""
for check in self.checks:
prob = check(chk_path)
if prob is not None:
self.probs.add_problem(chk_path, prob)
self.info['problems'] += 1
self.info['files'] += 1
def pretty_print_issues(self, verbose):
"""
Print a list of issues with their fixes. Only print issues which
are in problist, unless verbose is true, in which case print
all messages.
TODO: Move into ReportWriter
"""
# Print general statistics
infostring = "Found {problems} problems over {files} files in {time:.2f} seconds."
print(infostring.format(**self.info))
def is_link(path):
"""Check if the given path is a symbolic link"""
return os.path.islink(path) or os.path.abspath(path) != os.path.realpath(path)
| Python | 0 |
48e09e446943b695cc7208bc2a7cad7e53437957 | Bump to 0.1.1 since I apparently pushed 0.1.0 at some point =/ | botox/__init__.py | botox/__init__.py | __version__ = "0.1.1"
| __version__ = "0.1.0"
| Python | 0 |
b56eb07e06c41dd46d7adaeb0a9b9863c3e165c6 | Fix mono | executors/mono_executor.py | executors/mono_executor.py | import sys
import os
import re
import errno
from collections import defaultdict
from cptbox import CHROOTSecurity, ALLOW
from cptbox.syscalls import *
from .base_executor import CompiledExecutor
from judgeenv import env
CS_FS = ['.*\.so', '/proc/(?:self/|xen)', '/dev/shm/', '/proc/stat', '/usr/lib/mono',
'/etc/nsswitch.conf$', '/etc/passwd$', '/etc/mono/', '/dev/null$', '.*/.mono/',
'/sys/']
WRITE_FS = ['/proc/self/task/\d+/comm$', '/dev/shm/mono\.\d+$']
UNLINK_FS = re.compile('/dev/shm/mono.\d+$')
class MonoExecutor(CompiledExecutor):
name = 'MONO'
nproc = -1 # If you use Mono on Windows you are doing it wrong.
def get_compiled_file(self):
return self._file('%s.exe' % self.problem)
def get_cmdline(self):
return ['mono', self._executable]
def get_executable(self):
return env['runtime']['mono']
def get_security(self):
fs = CS_FS + [self._dir]
sec = CHROOTSecurity(fs)
sec[sys_sched_getaffinity] = ALLOW
sec[sys_statfs] = ALLOW
sec[sys_ftruncate64] = ALLOW
sec[sys_clock_getres] = ALLOW
sec[sys_socketcall] = ALLOW
sec[sys_sched_yield] = ALLOW
fs = sec.fs_jail
write_fs = re.compile('|'.join(WRITE_FS))
writable = defaultdict(bool)
writable[1] = writable[2] = True
def handle_open(debugger):
file = debugger.readstr(debugger.uarg0)
if fs.match(file) is None:
print>>sys.stderr, 'Not allowed to access:', file
return False
can = write_fs.match(file) is not None
def update():
writable[debugger.result] = can
debugger.on_return(update)
return True
def handle_close(debugger):
writable[debugger.arg0] = False
return True
def handle_dup(debugger):
writable[debugger.arg1] = writable[debugger.arg0]
return True
def handle_write(debugger):
return writable[debugger.arg0]
def handle_ftruncate(debugger):
return writable[debugger.arg0]
def handle_kill(debugger):
# Mono likes to signal other instances of it, but doesn't care if it fails.
def kill_return():
debugger.result = -errno.EPERM
if debugger.arg0 != debugger.pid:
debugger.syscall = debugger.getpid_syscall
debugger.on_return(kill_return)
return True
def unlink(debugger):
path = debugger.readstr(debugger.uarg0)
if UNLINK_FS.match(path) is None:
print 'Not allowed to unlink:', UNLINK_FS
return False
return True
def handle_socket(debugger):
def socket_return():
debugger.result = -errno.EACCES
debugger.syscall = debugger.getpid_syscall
debugger.on_return(socket_return)
return True
sec[sys_open] = handle_open
sec[sys_close] = handle_close
sec[sys_dup2] = handle_dup
sec[sys_dup3] = handle_dup
sec[sys_write] = handle_write
sec[sys_ftruncate] = handle_ftruncate
sec[sys_kill] = handle_kill
sec[sys_tgkill] = handle_kill
sec[sys_unlink] = unlink
sec[sys_socket] = handle_socket
return sec
@classmethod
def initialize(cls, sandbox=True):
if 'mono' not in env['runtime'] or not os.path.isfile(env['runtime']['mono']):
return False
return super(MonoExecutor, cls).initialize(sandbox=sandbox)
| import sys
import os
import re
import errno
from collections import defaultdict
from cptbox import CHROOTSecurity, ALLOW
from cptbox.syscalls import *
from .base_executor import CompiledExecutor
from judgeenv import env
CS_FS = ['.*\.so', '/proc/(?:self/|xen)', '/dev/shm/', '/proc/stat', '/usr/lib/mono',
'/etc/nsswitch.conf$', '/etc/passwd$', '/etc/mono/', '/dev/null$', '.*/.mono/',
'/sys/']
WRITE_FS = ['/proc/self/task/\d+/comm$', '/dev/shm/mono\.\d+$']
UNLINK_FS = re.compile('/dev/shm/mono.\d+$')
class MonoExecutor(CompiledExecutor):
name = 'MONO'
nproc = -1 # If you use Mono on Windows you are doing it wrong.
def get_compiled_file(self):
return self._file('%s.exe' % self.problem)
def get_cmdline(self):
return ['mono', self._executable]
def get_executable(self):
return env['runtime']['mono']
def get_security(self):
fs = CS_FS + [self._dir]
sec = CHROOTSecurity(fs)
sec[sys_sched_getaffinity] = ALLOW
sec[sys_statfs] = ALLOW
sec[sys_ftruncate64] = ALLOW
sec[sys_clock_getres] = ALLOW
sec[sys_socketcall] = ALLOW
sec[sys_sched_yield] = ALLOW
fs = sec.fs_jail
write_fs = re.compile('|'.join(WRITE_FS))
writable = defaultdict(bool)
writable[1] = writable[2] = True
def handle_open(debugger):
file = debugger.readstr(debugger.uarg0)
if fs.match(file) is None:
print>>sys.stderr, 'Not allowed to access:', file
return False
can = write_fs.match(file) is not None
def update():
writable[debugger.result] = can
debugger.on_return(update)
return True
def handle_close(debugger):
writable[debugger.arg0] = False
return True
def handle_dup(debugger):
writable[debugger.arg1] = writable[debugger.arg0]
return True
def handle_write(debugger):
return writable[debugger.arg0]
def handle_ftruncate(debugger):
return writable[debugger.arg0]
def handle_kill(debugger):
# Mono likes to signal other instances of it, but doesn't care if it fails.
def kill_return():
debugger.result = -errno.EPERM
if debugger.arg0 != debugger.pid:
debugger.syscall = debugger.getpid_syscall
debugger.on_return(kill_return)
return True
def unlink(debugger):
path = debugger.readstr(debugger.uarg0)
if UNLINK_FS.match(path) is None:
print 'Not allowed to unlink:', UNLINK_FS
return False
return True
def handle_socket(debugger):
def socket_return():
debugger.result = -errno.EACCES
debugger.syscall = debugger.getpid_syscall
debugger.on_return(socket_return)
return True
sec[sys_open] = handle_open
sec[sys_close] = handle_close
sec[sys_dup2] = handle_dup
sec[sys_dup3] = handle_dup
sec[sys_write] = handle_write
sec[sys_ftruncate] = handle_ftruncate
sec[sys_kill] = handle_kill
sec[sys_tgkill] = handle_kill
sec[sys_unlink] = unlink
sec[sys_socket] = handle_socket
return sec
@classmethod
def initialize(cls):
if 'mono' not in env['runtime'] or not os.path.isfile(env['runtime']['mono']):
return False
return super(MonoExecutor, cls).initialize()
| Python | 0.000018 |
68b1b9d824da9225b8b568348a56d5770195d8f8 | Fix method with classmethod | openassessment/xblock/openassesment_template_mixin.py | openassessment/xblock/openassesment_template_mixin.py | class OpenAssessmentTemplatesMixin(object):
"""
This helps to get templates for different type of assessment that is
offered.
"""
@classmethod
def templates(cls):
"""
Returns a list of dictionary field: value objects that describe possible templates.
VALID_ASSESSMENT_TYPES needs to be declared as a class variable to use it.
"""
templates = []
for assesment_type in cls.VALID_ASSESSMENT_TYPES:
template_id = assesment_type
display_name = cls.VALID_ASSESSMENT_TYPES_DISPLAY_NAMES.get(
assesment_type)
template = cls._create_template_dict(template_id, display_name)
templates.append(template)
return templates
@classmethod
def _create_template_dict(cls, template_id, display_name):
"""
Returns a template dictionary which can be used with Studio API
"""
return {
"template_id": template_id,
"metadata": {
"display_name": display_name,
}
}
| class OpenAssessmentTemplatesMixin(object):
"""
This helps to get templates for different type of assessment that is
offered.
"""
@classmethod
def templates(cls):
"""
Returns a list of dictionary field: value objects that describe possible templates.
VALID_ASSESSMENT_TYPES needs to be declared as a class variable to use it.
"""
templates = []
for assesment_type in cls.VALID_ASSESSMENT_TYPES:
template_id = assesment_type
display_name = cls.VALID_ASSESSMENT_TYPES_DISPLAY_NAMES.get(
assesment_type)
template = cls._create_template_dict(template_id, display_name)
templates.append(template)
return templates
def _create_template_dict(cls, template_id, display_name):
"""
Returns a template dictionary which can be used with Studio API
"""
return {
"template_id": template_id,
"metadata": {
"display_name": display_name,
}
}
| Python | 0 |
fa0b16b46fe014be9009bc595bee719cc1fdcc31 | don't divide by zero | apps/amo/management/commands/clean_redis.py | apps/amo/management/commands/clean_redis.py | import logging
import os
import socket
import subprocess
import sys
import tempfile
import time
from django.core.management.base import BaseCommand
import redisutils
import redis as redislib
log = logging.getLogger('z.redis')
# We process the keys in chunks of size CHUNK.
CHUNK = 3000
# Remove any sets with less than MIN or more than MAX elements.
MIN = 10
MAX = 50
# Expire keys after EXPIRE seconds.
EXPIRE = 60 * 5
# Calling redis can raise raise these errors.
RedisError = redislib.RedisError, socket.error
def vacuum(master, slave):
def keys():
ks = slave.keys()
log.info('There are %s keys to clean up.' % len(ks))
ks = iter(ks)
while 1:
buffer = []
for _ in xrange(CHUNK):
try:
buffer.append(ks.next())
except StopIteration:
yield buffer
return
yield buffer
tmp = tempfile.NamedTemporaryFile(delete=False)
for ks in keys():
tmp.write('\n'.join(ks))
tmp.close()
# It's hard to get Python to clean up the memory from slave.keys(), so
# we'll let the OS do it. argv[0] is a dummy argument, the rest get passed
# like a normal command line.
os.execl(sys.executable, 'argv[0]', sys.argv[0], sys.argv[1], tmp.name)
def cleanup(master, slave, filename):
tmp = open(filename)
total = [1, 0]
p = subprocess.Popen(['wc', '-l', filename], stdout=subprocess.PIPE)
total[0] = int(p.communicate()[0].strip().split()[0])
def file_keys():
while 1:
buffer = []
for _ in xrange(CHUNK):
line = tmp.readline()
if line:
buffer.append(line.strip())
else:
yield buffer
return
yield buffer
num = 0
for ks in file_keys():
pipe = slave.pipeline()
for k in ks:
pipe.scard(k)
try:
drop = [k for k, size in zip(ks, pipe.execute())
if 0 < size < MIN or size > MAX]
except RedisError:
continue
num += len(ks)
percent = round(float(num) / total[0] * 100, 1) if total[0] else 0
total[1] += len(drop)
log.debug('[%s %.1f%%] Dropping %s keys.' % (num, percent, len(drop)))
pipe = master.pipeline()
for k in drop:
pipe.expire(k, EXPIRE)
try:
pipe.execute()
except RedisError:
continue
time.sleep(1) # Poor man's rate limiting.
if total[0]:
log.info('Dropped %s keys [%.1f%%].' %
(total[1], round(float(total[1]) / total[0] * 100, 1)))
class Command(BaseCommand):
help = "Clean up the redis used by cache machine."
def handle(self, *args, **kw):
try:
master = redisutils.connections['cache']
slave = redisutils.connections['cache_slave']
except Exception:
log.error('Could not connect to redis.', exc_info=True)
return
if args:
filename = args[0]
try:
cleanup(master, slave, filename)
finally:
os.unlink(filename)
else:
vacuum(master, slave)
| import logging
import os
import socket
import subprocess
import sys
import tempfile
import time
from django.core.management.base import BaseCommand
import redisutils
import redis as redislib
log = logging.getLogger('z.redis')
# We process the keys in chunks of size CHUNK.
CHUNK = 3000
# Remove any sets with less than MIN or more than MAX elements.
MIN = 10
MAX = 50
# Expire keys after EXPIRE seconds.
EXPIRE = 60 * 5
# Calling redis can raise raise these errors.
RedisError = redislib.RedisError, socket.error
def vacuum(master, slave):
def keys():
ks = slave.keys()
log.info('There are %s keys to clean up.' % len(ks))
ks = iter(ks)
while 1:
buffer = []
for _ in xrange(CHUNK):
try:
buffer.append(ks.next())
except StopIteration:
yield buffer
return
yield buffer
tmp = tempfile.NamedTemporaryFile(delete=False)
for ks in keys():
tmp.write('\n'.join(ks))
tmp.close()
# It's hard to get Python to clean up the memory from slave.keys(), so
# we'll let the OS do it. argv[0] is a dummy argument, the rest get passed
# like a normal command line.
os.execl(sys.executable, 'argv[0]', sys.argv[0], sys.argv[1], tmp.name)
def cleanup(master, slave, filename):
tmp = open(filename)
total = [1, 0]
p = subprocess.Popen(['wc', '-l', filename], stdout=subprocess.PIPE)
total[0] = int(p.communicate()[0].strip().split()[0])
def file_keys():
while 1:
buffer = []
for _ in xrange(CHUNK):
line = tmp.readline()
if line:
buffer.append(line.strip())
else:
yield buffer
return
yield buffer
num = 0
for ks in file_keys():
pipe = slave.pipeline()
for k in ks:
pipe.scard(k)
try:
drop = [k for k, size in zip(ks, pipe.execute())
if 0 < size < MIN or size > MAX]
except RedisError:
continue
num += len(ks)
percent = round(float(num) / total[0] * 100, 1)
total[1] += len(drop)
log.debug('[%s %.1f%%] Dropping %s keys.' % (num, percent, len(drop)))
pipe = master.pipeline()
for k in drop:
pipe.expire(k, EXPIRE)
try:
pipe.execute()
except RedisError:
continue
time.sleep(1) # Poor man's rate limiting.
if total[0]:
log.info('Dropped %s keys [%.1f%%].' %
(total[1], round(float(total[1]) / total[0] * 100, 1)))
class Command(BaseCommand):
help = "Clean up the redis used by cache machine."
def handle(self, *args, **kw):
try:
master = redisutils.connections['cache']
slave = redisutils.connections['cache_slave']
except Exception:
log.error('Could not connect to redis.', exc_info=True)
return
if args:
filename = args[0]
try:
cleanup(master, slave, filename)
finally:
os.unlink(filename)
else:
vacuum(master, slave)
| Python | 0.999407 |
69582dd80518ccc29fc8de9cf5bff54caf62468b | Truncate to exact length | src/sentry/utils/strings.py | src/sentry/utils/strings.py | """
sentry.utils.strings
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import base64
import zlib
def truncatechars(value, arg):
"""
Truncates a string after a certain number of chars.
Argument: Number of chars to truncate after.
"""
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
if len(value) > length:
return value[:length - 3] + '...'
return value
def compress(value):
return base64.b64encode(zlib.compress(value))
def decompress(value):
return zlib.decompress(base64.b64decode(value))
def gunzip(value):
return zlib.decompress(value, 16 + zlib.MAX_WBITS)
| """
sentry.utils.strings
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2013 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import base64
import zlib
def truncatechars(value, arg):
"""
Truncates a string after a certain number of chars.
Argument: Number of chars to truncate after.
"""
try:
length = int(arg)
except ValueError: # Invalid literal for int().
return value # Fail silently.
if len(value) > length:
return value[:length] + '...'
return value
def compress(value):
return base64.b64encode(zlib.compress(value))
def decompress(value):
return zlib.decompress(base64.b64decode(value))
def gunzip(value):
return zlib.decompress(value, 16 + zlib.MAX_WBITS)
| Python | 0.000002 |
a8fb92840ff487c61564175efbf637fec538b480 | Add signup view to fix error | features/gestalten/urls.py | features/gestalten/urls.py | from allauth.socialaccount import views as socialaccount_views
from allauth.socialaccount.providers.facebook import views as facebook_views
from django.conf.urls import url
from . import views
urlpatterns = [
url(
r'^stadt/gestalten/$',
views.List.as_view(),
name='gestalten'),
url(
r'^stadt/gestalten/(?P<pk>[0-9]+)/edit/$',
views.Update.as_view(),
name='gestalt-update'),
url(
r'^stadt/gestalten/(?P<pk>[0-9]+)/edit/avatar/$',
views.UpdateAvatar.as_view(),
name='gestalt-avatar-update'),
url(
r'^stadt/gestalten/(?P<pk>[0-9]+)/edit/background/$',
views.UpdateBackground.as_view(),
name='gestalt-background-update'),
url(
r'^stadt/login/$',
views.Login.as_view(),
name='login'),
url(r'^stadt/login/cancelled/$',
socialaccount_views.login_cancelled,
name='socialaccount_login_cancelled'),
url(r'^stadt/login/error/$',
socialaccount_views.login_error,
name='socialaccount_login_error'),
url(r'^stadt/login/signup/$',
socialaccount_views.signup,
name='socialaccount_signup'),
url(r'^stadt/login/facebook/$',
facebook_views.oauth2_login,
name='facebook_login'),
url(r'^stadt/login/facebook/callback/$',
facebook_views.oauth2_callback,
name='facebook_callback'),
url(r'^stadt/login/facebook/token/$',
facebook_views.login_by_token,
name='facebook_login_by_token'),
]
| from allauth.socialaccount import views as socialaccount_views
from allauth.socialaccount.providers.facebook import views as facebook_views
from django.conf.urls import url
from . import views
urlpatterns = [
url(
r'^stadt/gestalten/$',
views.List.as_view(),
name='gestalten'),
url(
r'^stadt/gestalten/(?P<pk>[0-9]+)/edit/$',
views.Update.as_view(),
name='gestalt-update'),
url(
r'^stadt/gestalten/(?P<pk>[0-9]+)/edit/avatar/$',
views.UpdateAvatar.as_view(),
name='gestalt-avatar-update'),
url(
r'^stadt/gestalten/(?P<pk>[0-9]+)/edit/background/$',
views.UpdateBackground.as_view(),
name='gestalt-background-update'),
url(
r'^stadt/login/$',
views.Login.as_view(),
name='login'),
url(r'^stadt/login/cancelled/$',
socialaccount_views.login_cancelled,
name='socialaccount_login_cancelled'),
url(r'^stadt/login/error/$',
socialaccount_views.login_error,
name='socialaccount_login_error'),
url(r'^stadt/login/facebook/$',
facebook_views.oauth2_login,
name='facebook_login'),
url(r'^stadt/login/facebook/callback/$',
facebook_views.oauth2_callback,
name='facebook_callback'),
url(r'^stadt/login/facebook/token/$',
facebook_views.login_by_token,
name='facebook_login_by_token'),
]
| Python | 0 |
1d52996a88eb5aed643fe61ee959bd88373401b3 | Throw a linebreak in there upon completion | filebutler_upload/utils.py | filebutler_upload/utils.py | from datetime import datetime, timedelta
import sys
class ProgressBar(object):
def __init__(self, filename, fmt):
self.filename = filename
self.fmt = fmt
self.progress = 0
self.total = 0
self.time_started = datetime.now()
self.time_updated = self.time_started
def __call__(self, current, total):
self.progress = current
self.total = total
final_update = current == total
if datetime.now() - self.time_updated > timedelta(seconds=0.5) or final_update:
output = self.fmt.format(
filename=self.filename,
percent=self.get_percent(),
speed=self.get_mbps()
)
sys.stdout.write('\r' + output)
if final_update:
sys.stdout.write('\n')
sys.stdout.flush()
self.time_updated = datetime.now()
def get_percent(self):
return self.progress / float(self.total)
def get_mbps(self):
time_delta = datetime.now() - self.time_started
if not time_delta.seconds:
return 0
return self.progress * 8 / float(time_delta.seconds) / 1000 / 1000
| from datetime import datetime, timedelta
import sys
class ProgressBar(object):
def __init__(self, filename, fmt):
self.filename = filename
self.fmt = fmt
self.progress = 0
self.total = 0
self.time_started = datetime.now()
self.time_updated = self.time_started
def __call__(self, current, total):
self.progress = current
self.total = total
if datetime.now() - self.time_updated > timedelta(seconds=0.5):
output = self.fmt.format(
filename=self.filename,
percent=self.get_percent(),
speed=self.get_mbps()
)
sys.stdout.write('\r' + output)
sys.stdout.flush()
self.time_updated = datetime.now()
def get_percent(self):
return self.progress / float(self.total)
def get_mbps(self):
time_delta = datetime.now() - self.time_started
if not time_delta.seconds:
return 0
return self.progress * 8 / float(time_delta.seconds) / 1000 / 1000
| Python | 0.000003 |
42eae4634f4bab5649298a65889a4b1a3149d586 | Use new invalidate_many cache invalidation to invalidate the event_push_actions cache appropriately. | synapse/storage/event_push_actions.py | synapse/storage/event_push_actions.py | # -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._base import SQLBaseStore
from twisted.internet import defer
from synapse.util.caches.descriptors import cachedInlineCallbacks
import logging
import ujson as json
logger = logging.getLogger(__name__)
class EventPushActionsStore(SQLBaseStore):
@defer.inlineCallbacks
def set_push_actions_for_event_and_users(self, event, tuples):
"""
:param event: the event set actions for
:param tuples: list of tuples of (user_id, profile_tag, actions)
"""
values = []
for uid, profile_tag, actions in tuples:
values.append({
'room_id': event.room_id,
'event_id': event.event_id,
'user_id': uid,
'profile_tag': profile_tag,
'actions': json.dumps(actions)
})
def f(txn):
for uid, _, __ in tuples:
txn.call_after(
self.get_unread_event_push_actions_by_room_for_user.invalidate_many,
(event.room_id, uid)
)
return self._simple_insert_many_txn(txn, "event_push_actions", values)
yield self.runInteraction(
"set_actions_for_event_and_users",
f,
)
@cachedInlineCallbacks(num_args=3, lru=True)
def get_unread_event_push_actions_by_room_for_user(
self, room_id, user_id, last_read_event_id
):
def _get_unread_event_push_actions_by_room(txn):
sql = (
"SELECT stream_ordering, topological_ordering"
" FROM events"
" WHERE room_id = ? AND event_id = ?"
)
txn.execute(
sql, (room_id, last_read_event_id)
)
results = txn.fetchall()
if len(results) == 0:
return []
stream_ordering = results[0][0]
topological_ordering = results[0][1]
sql = (
"SELECT ea.event_id, ea.actions"
" FROM event_push_actions ea, events e"
" WHERE ea.room_id = e.room_id"
" AND ea.event_id = e.event_id"
" AND ea.user_id = ?"
" AND ea.room_id = ?"
" AND ("
" e.topological_ordering > ?"
" OR (e.topological_ordering = ? AND e.stream_ordering > ?)"
")"
)
txn.execute(sql, (
user_id, room_id,
topological_ordering, topological_ordering, stream_ordering
)
)
return [
{"event_id": row[0], "actions": json.loads(row[1])}
for row in txn.fetchall()
]
ret = yield self.runInteraction(
"get_unread_event_push_actions_by_room",
_get_unread_event_push_actions_by_room
)
defer.returnValue(ret)
@defer.inlineCallbacks
def remove_push_actions_for_event_id(self, room_id, event_id):
def f(txn):
# Sad that we have to blow away the cache for the whole room here
txn.call_after(
self.get_unread_event_push_actions_by_room_for_user.invalidate_many,
(room_id,)
)
txn.execute(
"DELETE FROM event_push_actions WHERE room_id = ? AND event_id = ?",
(room_id, event_id)
)
yield self.runInteraction(
"remove_push_actions_for_event_id",
f
)
| # -*- coding: utf-8 -*-
# Copyright 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ._base import SQLBaseStore
from twisted.internet import defer
from synapse.util.caches.descriptors import cachedInlineCallbacks
import logging
import ujson as json
logger = logging.getLogger(__name__)
class EventPushActionsStore(SQLBaseStore):
@defer.inlineCallbacks
def set_push_actions_for_event_and_users(self, event, tuples):
"""
:param event: the event set actions for
:param tuples: list of tuples of (user_id, profile_tag, actions)
"""
values = []
for uid, profile_tag, actions in tuples:
values.append({
'room_id': event.room_id,
'event_id': event.event_id,
'user_id': uid,
'profile_tag': profile_tag,
'actions': json.dumps(actions)
})
yield self.runInteraction(
"set_actions_for_event_and_users",
self._simple_insert_many_txn,
"event_push_actions",
values
)
@cachedInlineCallbacks(num_args=3)
def get_unread_event_push_actions_by_room_for_user(
self, room_id, user_id, last_read_event_id
):
def _get_unread_event_push_actions_by_room(txn):
sql = (
"SELECT stream_ordering, topological_ordering"
" FROM events"
" WHERE room_id = ? AND event_id = ?"
)
txn.execute(
sql, (room_id, last_read_event_id)
)
results = txn.fetchall()
if len(results) == 0:
return []
stream_ordering = results[0][0]
topological_ordering = results[0][1]
sql = (
"SELECT ea.event_id, ea.actions"
" FROM event_push_actions ea, events e"
" WHERE ea.room_id = e.room_id"
" AND ea.event_id = e.event_id"
" AND ea.user_id = ?"
" AND ea.room_id = ?"
" AND ("
" e.topological_ordering > ?"
" OR (e.topological_ordering = ? AND e.stream_ordering > ?)"
")"
)
txn.execute(sql, (
user_id, room_id,
topological_ordering, topological_ordering, stream_ordering
)
)
return [
{"event_id": row[0], "actions": json.loads(row[1])}
for row in txn.fetchall()
]
ret = yield self.runInteraction(
"get_unread_event_push_actions_by_room",
_get_unread_event_push_actions_by_room
)
defer.returnValue(ret)
@defer.inlineCallbacks
def remove_push_actions_for_event_id(self, room_id, event_id):
def f(txn):
txn.execute(
"DELETE FROM event_push_actions WHERE room_id = ? AND event_id = ?",
(room_id, event_id)
)
yield self.runInteraction(
"remove_push_actions_for_event_id",
f
)
| Python | 0 |
07f96a22afe2d010809d03077d9cdd5ecb43d017 | Update data source unique name migration to support another name of constraint | migrations/0020_change_ds_name_to_non_uniqe.py | migrations/0020_change_ds_name_to_non_uniqe.py | from redash.models import db
import peewee
from playhouse.migrate import PostgresqlMigrator, migrate
if __name__ == '__main__':
migrator = PostgresqlMigrator(db.database)
with db.database.transaction():
# Change the uniqueness constraint on data source name to be (org, name):
success = False
for constraint in ['unique_name', 'data_sources_name']:
try:
db.database.execute_sql("ALTER TABLE data_sources DROP CONSTRAINT {}".format(constraint))
success = True
break
except peewee.ProgrammingError:
db.close_db(None)
if not success:
print "Failed removing uniqueness constraint on data source name."
print "Please verify its name in the schema, update the migration and run again."
exit()
migrate(
migrator.add_index('data_sources', ('org_id', 'name'), unique=True)
)
db.close_db(None)
| from redash.models import db
from playhouse.migrate import PostgresqlMigrator, migrate
if __name__ == '__main__':
migrator = PostgresqlMigrator(db.database)
with db.database.transaction():
# Change the uniqueness constraint on data source name to be (org, name):
db.database.execute_sql("ALTER TABLE data_sources DROP CONSTRAINT unique_name")
migrate(
migrator.add_index('data_sources', ('org_id', 'name'), unique=True)
)
db.close_db(None)
| Python | 0 |
bc5621afa044a486ef7514e1654224102b3cfd54 | Rename chunk list | RecordingApp/app/src/scripts/get_chunks.py | RecordingApp/app/src/scripts/get_chunks.py | """ Script to generate a json file containing book name, number of
chapters, number of chunks """
import json
import urllib.request
import re
RESULT_JSON_NAME = "chunks.json"
with open("catalog.json") as file:
DATA = json.load(file)
OUTPUT = []
#skip obs for now, loop over all books
for x in range(1, 67):
#gives book name and order (the books are stored out of order in the json)
slug = DATA[x]["slug"]
sort = DATA[x]["sort"]
#Get languages.json
url_lang_cat = DATA[x]["lang_catalog"]
response_lang_cat = urllib.request.urlopen(url_lang_cat)
lang_catalog = json.loads(response_lang_cat.read().decode('utf-8'))
name = lang_catalog[0]["project"]["name"]
#Get resources.json
#0 is for udb, are chunks the same for both?
url_res = lang_catalog[0]["res_catalog"]
response_res = urllib.request.urlopen(url_res)
res_cat = json.loads(response_res.read().decode('utf-8'))
#Get the usfm file
url_usfm = res_cat[0]["usfm"]
response_usfm = urllib.request.urlopen(url_usfm)
usfm_data = response_usfm.read().decode('utf-8')
lines = usfm_data.splitlines()
#keep a count of \c and \s5 tags (chapter and chunk respectively)
chapter = 0
num_chunks = 0
chapters_in_book = []
for line in lines:
chunk_match = re.search(r'\\s5', line)
#add to the number of chunks seen so far
if chunk_match:
num_chunks += 1
#on a new chapter, append the number of chunks tallied and reset the count
chapter_match = re.search(r'\\c', line)
if chapter_match:
chapters_in_book.append(num_chunks)
num_chunks = 0
chapter += 1
#append the last chapter
chapters_in_book.append(num_chunks+1)
#Account for the off by one introduced from chunks coming before chapters
chunk_list_fixed = []
length = len(chapters_in_book)-1
#eliminate chapter "0"
for i in range(length):
chunk_list_fixed.append(chapters_in_book[i+1])
#create a dictionary to store the book's data
book = {}
book['slug'] = slug
book['name'] = name
book['sort'] = sort
book['chapters'] = len(chunk_list_fixed)
book['chunks'] = chunk_list_fixed
#add to the list of books
OUTPUT.append(book)
break # DEBUG -- only process one book for testing
#output all book data to a json file
with open(RESULT_JSON_NAME, 'w') as outfile:
json.dump(OUTPUT, outfile)
| """ Script to generate a json file containing book name, number of
chapters, number of chunks """
import json
import urllib.request
import re
RESULT_JSON_NAME = "chunks.json"
with open("catalog.json") as file:
DATA = json.load(file)
OUTPUT = []
#skip obs for now, loop over all books
for x in range(1, 67):
#gives book name and order (the books are stored out of order in the json)
slug = DATA[x]["slug"]
sort = DATA[x]["sort"]
#Get languages.json
url_lang_cat = DATA[x]["lang_catalog"]
response_lang_cat = urllib.request.urlopen(url_lang_cat)
lang_catalog = json.loads(response_lang_cat.read().decode('utf-8'))
name = lang_catalog[0]["project"]["name"]
#Get resources.json
#0 is for udb, are chunks the same for both?
url_res = lang_catalog[0]["res_catalog"]
response_res = urllib.request.urlopen(url_res)
res_cat = json.loads(response_res.read().decode('utf-8'))
#Get the usfm file
url_usfm = res_cat[0]["usfm"]
response_usfm = urllib.request.urlopen(url_usfm)
usfm_data = response_usfm.read().decode('utf-8')
lines = usfm_data.splitlines()
#keep a count of \c and \s5 tags (chapter and chunk respectively)
chapter = 0
num_chunks = 0
chunk_list = []
for line in lines:
chunk_match = re.search(r'\\s5', line)
#add to the number of chunks seen so far
if chunk_match:
num_chunks += 1
#on a new chapter, append the number of chunks tallied and reset the count
chapter_match = re.search(r'\\c', line)
if chapter_match:
chunk_list.append(num_chunks)
num_chunks = 0
chapter += 1
#append the last chapter
chunk_list.append(num_chunks+1)
#Account for the off by one introduced from chunks coming before chapters
chunk_list_fixed = []
length = len(chunk_list)-1
#eliminate chapter "0"
for i in range(length):
chunk_list_fixed.append(chunk_list[i+1])
#create a dictionary to store the book's data
book = {}
book['slug'] = slug
book['name'] = name
book['sort'] = sort
book['chapters'] = len(chunk_list_fixed)
book['chunks'] = chunk_list_fixed
#add to the list of books
OUTPUT.append(book)
#output all book data to a json file
with open(RESULT_JSON_NAME, 'w') as outfile:
json.dump(OUTPUT, outfile)
| Python | 0.000004 |
d949c21c4b0a54a9a697a07bf12e22a98dc59ff1 | Add `attach` method so we can wrap apps like WSGI middleware | flask_mustache/__init__.py | flask_mustache/__init__.py | # flask-mustache Flask plugin
import os
from jinja2 import Template
from flask import current_app, Blueprint
__all__ = ('FlaskMustache',)
mustache_app = Blueprint('mustache', __name__, static_folder='static')
class FlaskMustache(object):
"Wrapper to inject Mustache stuff into Flask"
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app):
self.app = app
app.register_blueprint(mustache_app)
# set up global `mustache` function
app.jinja_env.globals['mustache'] = mustache
# attach context processor with template content
app.context_processor(mustache_templates)
@staticmethod
def attach(app):
"This is written so it can work like WSGI middleware"
# noop
_ = FlaskMustache(app)
return app
# context processor
def mustache_templates():
"Returns the content of all Mustache templates in the Jinja environment"
# short circuit development
if current_app.debug:
return {}
# get all the templates this env knows about
all_templates = current_app.jinja_loader.list_templates()
mustache_templates = {}
for template_name in all_templates:
# TODO: make this configurable
# we only want a specific extension
if template_name.endswith('mustache'):
# throw away everything except the file content
template, _, _ = \
current_app.jinja_loader.get_source(current_app.jinja_env,
template_name)
mustache_templates[template_name] = template
# now we need to render the templates
template_string = """{% if mustache_templates %}
{% for template_name, content in mustache_templates.items() %}
<script type="text/x-mustache-template" id="{{ template_name|replace('/', '-') }}" charset="utf-8">
{{ content|e }}
</script>
{% endfor %}
{% endif %}"""
context = {
'mustache_templates': mustache_templates
}
# returns the full HTML, ready to use in JavaScript
return {'mustache_templates': Template(template_string).render(context)}
# template helper function
def mustache(template, **kwargs):
"""Usage:
{{ mustache('path/to/whatever.mustache', key=value, key1=value1.. keyn=valuen) }}
This uses the regular Jinja2 loader to find the templates, so your *.mustache files
will need to be available in that path.
"""
template, _, _ = current_app.jinja_loader.get_source(current_app.jinja_env, template)
return pystache.render(template, kwargs, encoding='utf-8')
| # flask-mustache Flask plugin
import os
from jinja2 import Template
from flask import current_app, Blueprint
__all__ = ('FlaskMustache',)
mustache_app = Blueprint('mustache', __name__, static_folder='static')
class FlaskMustache(object):
"Wrapper to inject Mustache stuff into Flask"
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app):
self.app = app
app.register_blueprint(mustache_app)
# set up global `mustache` function
app.jinja_env.globals['mustache'] = mustache
# attach context processor with template content
app.context_processor(mustache_templates)
# context processor
def mustache_templates():
"Returns the content of all Mustache templates in the Jinja environment"
# short circuit development
if current_app.debug:
return {}
# get all the templates this env knows about
all_templates = current_app.jinja_loader.list_templates()
mustache_templates = {}
for template_name in all_templates:
# TODO: make this configurable
# we only want a specific extension
if template_name.endswith('mustache'):
# throw away everything except the file content
template, _, _ = \
current_app.jinja_loader.get_source(current_app.jinja_env,
template_name)
mustache_templates[template_name] = template
# now we need to render the templates
template_string = """{% if mustache_templates %}
{% for template_name, content in mustache_templates.items() %}
<script type="text/x-mustache-template" id="{{ template_name|replace('/', '-') }}" charset="utf-8">
{{ content|e }}
</script>
{% endfor %}
{% endif %}"""
context = {
'mustache_templates': mustache_templates
}
# returns the full HTML, ready to use in JavaScript
return {'mustache_templates': Template(template_string).render(context)}
# template helper function
def mustache(template, **kwargs):
"""Usage:
{{ mustache('path/to/whatever.mustache', key=value, key1=value1.. keyn=valuen) }}
This uses the regular Jinja2 loader to find the templates, so your *.mustache files
will need to be available in that path.
"""
template, _, _ = current_app.jinja_loader.get_source(current_app.jinja_env, template)
return pystache.render(template, kwargs, encoding='utf-8')
| Python | 0 |
c1c5fbdc2d7cda67668df38d91a2becf546fa852 | Update transform config in development | backdrop/transformers/config/development.py | backdrop/transformers/config/development.py | TRANSFORMER_AMQP_URL = 'amqp://transformer:notarealpw@localhost:5672/%2Ftransformations'
STAGECRAFT_URL = 'http://localhost:3103'
STAGECRAFT_OAUTH_TOKEN = 'development-oauth-access-token'
BACKDROP_READ_URL = 'http://backdrop-read.dev.gov.uk/data'
BACKDROP_WRITE_URL = 'http://backdrop-write.dev.gov.uk/data'
| TRANSFORMER_AMQP_URL = 'amqp://transformer:notarealpw@localhost:5672/%2Ftransformations'
STAGECRAFT_URL = 'http://localhost:3204'
STAGECRAFT_OAUTH_TOKEN = 'development-oauth-access-token'
BACKDROP_READ_URL = 'http://localhost:3038/data'
BACKDROP_WRITE_URL = 'http://localhost:3039/data'
| Python | 0 |
13c74e663dd511f53e6c0b1bb37b5baa12bba016 | add tokens for fco transaction buckets | backdrop/write/config/development_tokens.py | backdrop/write/config/development_tokens.py | TOKENS = {
'_foo_bucket': '_foo_bucket-bearer-token',
'bucket': 'bucket-bearer-token',
'foo': 'foo-bearer-token',
'foo_bucket': 'foo_bucket-bearer-token',
'licensing': 'licensing-bearer-token',
'licensing_journey': 'licensing_journey-bearer-token',
'pay_legalisation_post_journey': 'pay_legalisation_post_journey-bearer-token',
'pay_legalisation_drop_off_journey': 'pay_legalisation_drop_off_journey-bearer-token',
'pay_register_birth_abroad_journey': 'pay_register_birth_abroad_journey-bearer-token',
'pay_register_death_abroad_journey': 'pay_register_death_abroad_journey-bearer-token',
'pay_foreign_marriage_certificates_journey': 'pay_foreign_marriage_certificates_journey-bearer-token',
'deposit_foreign_marriage_journey': 'deposit_foreign_marriage_journey-bearer-token'
}
| TOKENS = {
'_foo_bucket': '_foo_bucket-bearer-token',
'bucket': 'bucket-bearer-token',
'foo': 'foo-bearer-token',
'foo_bucket': 'foo_bucket-bearer-token',
'licensing': 'licensing-bearer-token',
'licensing_journey': 'licensing_journey-bearer-token'
}
| Python | 0 |
700af658169cdb861ff15341c3a03443f207c02e | Update __init__.py | tendrl/node_agent/manager/__init__.py | tendrl/node_agent/manager/__init__.py | import signal
import threading
from tendrl.commons import manager as commons_manager
from tendrl.commons import TendrlNS
from tendrl.commons.utils import log_utils as logger
from tendrl.node_agent.provisioner.gluster.manager import \
ProvisioningManager as GlusterProvisioningManager
from tendrl import node_agent
from tendrl.node_agent.message.handler import MessageHandler
from tendrl.node_agent import node_sync
from tendrl.integrations.gluster import GlusterIntegrationNS
class NodeAgentManager(commons_manager.Manager):
def __init__(self):
# Initialize the state sync thread which gets the underlying
# node details and pushes the same to etcd
super(NodeAgentManager, self).__init__(
NS.state_sync_thread,
message_handler_thread=NS.message_handler_thread
)
def main():
# NS.node_agent contains the config object,
# hence initialize it before any other NS
node_agent.NodeAgentNS()
# Init NS.tendrl
TendrlNS()
# Init NS.provisioning
# TODO(team) remove NS.provisioner and use NS.provisioning.{ceph, gluster}
# provisioning.ProvisioningNS()
# Init NS.integrations.ceph
# TODO(team) add all short circuited ceph(import/create) NS.tendrl.flows
# to NS.integrations.ceph
# ceph.CephIntegrationNS()
# Init NS.integrations.gluster
# TODO(team) add all short circuited ceph(import/create) NS.tendrl.flows
# to NS.integrations.ceph
GlusterIntegrationNS()
# Compile all definitions
NS.compiled_definitions = \
NS.node_agent.objects.CompiledDefinitions()
NS.compiled_definitions.merge_definitions([
NS.tendrl.definitions, NS.node_agent.definitions,
NS.integrations.gluster.definitions])
NS.node_agent.compiled_definitions = NS.compiled_definitions
# Every process needs to set a NS.type
# Allowed types are "node", "integration", "monitoring"
NS.type = "node"
NS.first_node_inventory_sync = True
NS.state_sync_thread = node_sync.NodeAgentSyncThread()
NS.compiled_definitions.save()
NS.node_context.save()
NS.tendrl_context.save()
NS.node_agent.definitions.save()
# NS.integrations.ceph.definitions.save()
NS.node_agent.config.save()
NS.publisher_id = "node_agent"
NS.message_handler_thread = MessageHandler()
NS.gluster_provisioner = GlusterProvisioningManager(
NS.tendrl.definitions.get_parsed_defs()["namespace.tendrl"][
'gluster_provisioner']
)
if NS.config.data.get("with_internal_profiling", False):
from tendrl.commons import profiler
profiler.start()
NS.gluster_sds_sync_running = False
m = NodeAgentManager()
m.start()
complete = threading.Event()
def shutdown(signum, frame):
logger.log(
"debug",
NS.publisher_id,
{"message": "Signal handler: stopping"}
)
complete.set()
m.stop()
if NS.gluster_sds_sync_running:
NS.gluster_integrations_sync_thread.stop()
def reload_config(signum, frame):
logger.log(
"debug",
NS.publisher_id,
{"message": "Signal handler: SIGHUP"}
)
NS.node_agent.ns.setup_common_objects()
signal.signal(signal.SIGTERM, shutdown)
signal.signal(signal.SIGINT, shutdown)
signal.signal(signal.SIGHUP, reload_config)
while not complete.is_set():
complete.wait(timeout=1)
if __name__ == "__main__":
main()
| import signal
import threading
from tendrl.commons import manager as commons_manager
from tendrl.commons import TendrlNS
from tendrl.commons.utils import log_utils as logger
from tendrl.node_agent.provisioner.gluster.manager import \
ProvisioningManager as GlusterProvisioningManager
from tendrl import node_agent
from tendrl.node_agent.message.handler import MessageHandler
from tendrl.node_agent import node_sync
from tendrl.integrations.gluster import GlusterIntegrationNS
class NodeAgentManager(commons_manager.Manager):
def __init__(self):
# Initialize the state sync thread which gets the underlying
# node details and pushes the same to etcd
super(NodeAgentManager, self).__init__(
NS.state_sync_thread,
message_handler_thread=NS.message_handler_thread
)
node_sync.platform_detect.sync()
node_sync.sds_detect.sync()
def main():
# NS.node_agent contains the config object,
# hence initialize it before any other NS
node_agent.NodeAgentNS()
# Init NS.tendrl
TendrlNS()
# Init NS.provisioning
# TODO(team) remove NS.provisioner and use NS.provisioning.{ceph, gluster}
# provisioning.ProvisioningNS()
# Init NS.integrations.ceph
# TODO(team) add all short circuited ceph(import/create) NS.tendrl.flows
# to NS.integrations.ceph
# ceph.CephIntegrationNS()
# Init NS.integrations.gluster
# TODO(team) add all short circuited ceph(import/create) NS.tendrl.flows
# to NS.integrations.ceph
GlusterIntegrationNS()
# Compile all definitions
NS.compiled_definitions = \
NS.node_agent.objects.CompiledDefinitions()
NS.compiled_definitions.merge_definitions([
NS.tendrl.definitions, NS.node_agent.definitions,
NS.integrations.gluster.definitions])
NS.node_agent.compiled_definitions = NS.compiled_definitions
# Every process needs to set a NS.type
# Allowed types are "node", "integration", "monitoring"
NS.type = "node"
NS.first_node_inventory_sync = True
NS.state_sync_thread = node_sync.NodeAgentSyncThread()
NS.compiled_definitions.save()
NS.node_context.save()
NS.tendrl_context.save()
NS.node_agent.definitions.save()
# NS.integrations.ceph.definitions.save()
NS.node_agent.config.save()
NS.publisher_id = "node_agent"
NS.message_handler_thread = MessageHandler()
NS.gluster_provisioner = GlusterProvisioningManager(
NS.tendrl.definitions.get_parsed_defs()["namespace.tendrl"][
'gluster_provisioner']
)
if NS.config.data.get("with_internal_profiling", False):
from tendrl.commons import profiler
profiler.start()
NS.gluster_sds_sync_running = False
m = NodeAgentManager()
m.start()
complete = threading.Event()
def shutdown(signum, frame):
logger.log(
"debug",
NS.publisher_id,
{"message": "Signal handler: stopping"}
)
complete.set()
m.stop()
if NS.gluster_sds_sync_running:
NS.gluster_integrations_sync_thread.stop()
def reload_config(signum, frame):
logger.log(
"debug",
NS.publisher_id,
{"message": "Signal handler: SIGHUP"}
)
NS.node_agent.ns.setup_common_objects()
signal.signal(signal.SIGTERM, shutdown)
signal.signal(signal.SIGINT, shutdown)
signal.signal(signal.SIGHUP, reload_config)
while not complete.is_set():
complete.wait(timeout=1)
if __name__ == "__main__":
main()
| Python | 0.000072 |
7b27423bef813befe1bb9dd5cb14843d847bff42 | Fix mailhog settings | backend/project_name/settings/local_base.py | backend/project_name/settings/local_base.py | from .base import * # noqa
DEBUG = True
HOST = "http://localhost:8000"
SECRET_KEY = "secret"
DATABASES = {
"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": base_dir_join("db.sqlite3"),}
}
STATIC_ROOT = base_dir_join("staticfiles")
STATIC_URL = "/static/"
MEDIA_ROOT = base_dir_join("mediafiles")
MEDIA_URL = "/media/"
DEFAULT_FILE_STORAGE = "django.core.files.storage.FileSystemStorage"
STATICFILES_STORAGE = "django.contrib.staticfiles.storage.StaticFilesStorage"
AUTH_PASSWORD_VALIDATORS = [] # allow easy passwords only on local
# Celery
CELERY_TASK_ALWAYS_EAGER = True
CELERY_TASK_EAGER_PROPAGATES = True
# Email settings for mailhog
EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
EMAIL_HOST = 'mailhog'
EMAIL_PORT = 1025
# Logging
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {"standard": {"format": "%(levelname)-8s [%(asctime)s] %(name)s: %(message)s"},},
"handlers": {
"console": {"level": "DEBUG", "class": "logging.StreamHandler", "formatter": "standard",},
},
"loggers": {
"": {"handlers": ["console"], "level": "INFO"},
"celery": {"handlers": ["console"], "level": "INFO"},
},
}
JS_REVERSE_JS_MINIFY = False
| from .base import * # noqa
DEBUG = True
HOST = "http://localhost:8000"
SECRET_KEY = "secret"
DATABASES = {
"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": base_dir_join("db.sqlite3"),}
}
STATIC_ROOT = base_dir_join("staticfiles")
STATIC_URL = "/static/"
MEDIA_ROOT = base_dir_join("mediafiles")
MEDIA_URL = "/media/"
DEFAULT_FILE_STORAGE = "django.core.files.storage.FileSystemStorage"
STATICFILES_STORAGE = "django.contrib.staticfiles.storage.StaticFilesStorage"
AUTH_PASSWORD_VALIDATORS = [] # allow easy passwords only on local
# Celery
CELERY_TASK_ALWAYS_EAGER = True
CELERY_TASK_EAGER_PROPAGATES = True
# Email
EMAIL_BACKEND = "django.core.mail.backends.smtp.EmailBackend"
EMAIL_HOST = config("EMAIL_HOST")
EMAIL_HOST_USER = config("EMAIL_HOST_USER")
EMAIL_HOST_PASSWORD = config("EMAIL_HOST_PASSWORD")
EMAIL_PORT = 587
EMAIL_USE_TLS = True
# Logging
LOGGING = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {"standard": {"format": "%(levelname)-8s [%(asctime)s] %(name)s: %(message)s"},},
"handlers": {
"console": {"level": "DEBUG", "class": "logging.StreamHandler", "formatter": "standard",},
},
"loggers": {
"": {"handlers": ["console"], "level": "INFO"},
"celery": {"handlers": ["console"], "level": "INFO"},
},
}
JS_REVERSE_JS_MINIFY = False
| Python | 0 |
bce815a12a3ce18d23644c08beda5f97271e559e | update token | forge/tests/test_github.py | forge/tests/test_github.py | # Copyright 2017 datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time, os
from forge.tasks import TaskError
from forge.github import Github
from .common import mktree
from tempfile import mkdtemp
from shutil import rmtree
# github will deactivate this token if it detects it in our source, so
# we obfuscate it slightly
numbers = [48, 49, 51, 99, 99, 101, 52, 51, 48, 53, 54, 100, 57, 56, 97, 50,
55, 97, 54, 53, 55, 55, 49, 48, 49, 55, 48, 54, 55, 102, 100, 48,
102, 57, 49, 51, 97, 48, 102, 51]
token = "".join(chr(c) for c in numbers)
def test_list():
gh = Github(token)
repos = gh.list("forgeorg")
assert repos == [(u'forgeorg/foo', u'https://github.com/forgeorg/foo.git')]
def test_pull():
gh = Github(token)
repos = gh.list("forgeorg")
name, url = repos[0]
output = mkdtemp()
gh.pull(url, os.path.join(output, name))
assert os.path.exists(os.path.join(output, name, "README.md"))
rmtree(output)
def test_exists():
gh = Github(token)
assert gh.exists("https://github.com/forgeorg/foo.git")
assert not gh.exists("https://github.com/forgeorg/nosuchrepo.git")
unauth_gh = Github(None)
try:
unauth_gh.exists("https://github.com/forgeorg/nosuchrepo.git")
assert False
except TaskError, e:
assert "Authentication failed" in str(e)
def test_clone():
gh = Github(token)
output = mkdtemp()
gh.clone("https://github.com/forgeorg/foo.git", os.path.join(output, 'foo'))
assert os.path.exists(os.path.join(output, 'foo', "README.md"))
rmtree(output)
| # Copyright 2017 datawire. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time, os
from forge.tasks import TaskError
from forge.github import Github
from .common import mktree
from tempfile import mkdtemp
from shutil import rmtree
token = "8c91e6c758b16e7b5d7f0676d3475f9fa33693dd"
def test_list():
gh = Github(token)
repos = gh.list("forgeorg")
assert repos == [(u'forgeorg/foo', u'https://github.com/forgeorg/foo.git')]
def test_pull():
gh = Github(token)
repos = gh.list("forgeorg")
name, url = repos[0]
output = mkdtemp()
gh.pull(url, os.path.join(output, name))
assert os.path.exists(os.path.join(output, name, "README.md"))
rmtree(output)
def test_exists():
gh = Github(token)
assert gh.exists("https://github.com/forgeorg/foo.git")
assert not gh.exists("https://github.com/forgeorg/nosuchrepo.git")
unauth_gh = Github(None)
try:
unauth_gh.exists("https://github.com/forgeorg/nosuchrepo.git")
assert False
except TaskError, e:
assert "Authentication failed" in str(e)
def test_clone():
gh = Github(token)
output = mkdtemp()
gh.clone("https://github.com/forgeorg/foo.git", os.path.join(output, 'foo'))
assert os.path.exists(os.path.join(output, 'foo', "README.md"))
rmtree(output)
| Python | 0.000001 |
9722390c4fa1a6bb5b9e8d66a53219bcc2447b39 | Use zoom 13 tiles for station tests, so that the station is more likely to not be the only one in the tile, which provides a better test of the rank. | test/507-routes-via-stop-positions.py | test/507-routes-via-stop-positions.py | stations = [
(13, 2412, 3078, 'Penn Station', 895371274L, 1, [
'2100-2297', # Acela Express
'68-69', # Adirondack
'50-51', # Cardinal
'79-80', # Carolinian
'19-20', # Crescent
'230-296', # Empire Service
'600-674', # Keystone Service
'63', # Maple Leaf (Northbound)
'64', # Maple Leaf (Southbound)
'89-90', # Palmetto
'42-43', # Pennsylvanian
'97-98', # Silver Meteor
'91-92', # Silver Star
'54-57', # Vermonter
]),
(13, 2352, 3122, 'Camden Station', 845910705L, 5, ['Camden Line']),
(13, 1309, 3166, 'Castro MUNI', 297863017L, 1, ['K', 'L', 'M', 'T']),
(13, 2385, 3102, '30th Street', 32272623L, 1, [
'2100-2297', # Acela Express
'79-80', # Carolinian
'19-20', # Crescent
'600-674', # Keystone Service
'82-198', # Northeast Regional (Boston/Springfield & Lynchburg)
'89-90', # Palmetto
'Chestnut Hill West Line', # SEPTA - Chestnut Hill West Line
'Cynwyd Line', # SEPTA - Cynwyd Line
'Media/Elwyn Line', # SEPTA - Media/Elwyn Line
'Trenton Line', # SEPTA - Trenton Line
'Wilmington/Newark Line', # SEPTA - Wilmington/Newark Line
'91-92', # Silver Star
])
]
for z, x, y, name, osm_id, expected_rank, expected_routes in stations:
with features_in_tile_layer(z, x, y, 'pois') as pois:
found = False
for poi in pois:
props = poi['properties']
if props['id'] == osm_id:
found = True
routes = props.get('transit_routes', list())
rank = props['kind_tile_rank']
if rank > expected_rank:
raise Exception("Found %r, and was expecting a rank "
"of %r or less, but got %r."
% (name, expected_rank, rank))
for r in expected_routes:
count = 0
for route in routes:
if r in route:
count = count + 1
if count == 0:
raise Exception("Found %r, and was expecting at "
"least one %r route, but found "
"none. Routes: %r"
% (name, r, routes))
if not found:
raise Exception("Did not find %r (ID=%r) in tile." % (name, osm_id))
| stations = [
(17, 38596, 49262, 'Penn Station', 895371274L, 1, [
'2100-2297', # Acela Express
'68-69', # Adirondack
'50-51', # Cardinal
'79-80', # Carolinian
'19-20', # Crescent
'230-296', # Empire Service
'600-674', # Keystone Service
'63', # Maple Leaf (Northbound)
'64', # Maple Leaf (Southbound)
'89-90', # Palmetto
'42-43', # Pennsylvanian
'97-98', # Silver Meteor
'91-92', # Silver Star
'54-57', # Vermonter
]),
(17, 37639, 49960, 'Camden Station', 845910705L, 2, ['Camden Line']),
(17, 20958, 50667, 'Castro MUNI', 297863017L, 1, ['K', 'L', 'M', 'T']),
(17, 38163, 49642, '30th Street', 32272623L, 1, [
'2100-2297', # Acela Express
'79-80', # Carolinian
'19-20', # Crescent
'600-674', # Keystone Service
'82-198', # Northeast Regional (Boston/Springfield & Lynchburg)
'89-90', # Palmetto
'Chestnut Hill West Line', # SEPTA - Chestnut Hill West Line
'Cynwyd Line', # SEPTA - Cynwyd Line
'Media/Elwyn Line', # SEPTA - Media/Elwyn Line
'Trenton Line', # SEPTA - Trenton Line
'Wilmington/Newark Line', # SEPTA - Wilmington/Newark Line
'91-92', # Silver Star
])
]
for z, x, y, name, osm_id, expected_rank, expected_routes in stations:
with features_in_tile_layer(z, x, y, 'pois') as pois:
found = False
for poi in pois:
props = poi['properties']
if props['id'] == osm_id:
found = True
routes = props.get('transit_routes', list())
rank = props['kind_tile_rank']
if rank > expected_rank:
raise Exception("Found %r, and was expecting a rank "
"of %r or less, but got %r."
% (name, expected_rank, rank))
for r in expected_routes:
count = 0
for route in routes:
if r in route:
count = count + 1
if count == 0:
raise Exception("Found %r, and was expecting at "
"least one %r route, but found "
"none. Routes: %r"
% (name, r, routes))
if not found:
raise Exception("Did not find %r (ID=%r) in tile." % (name, osm_id))
| Python | 0 |
307e0c4bbd7e76c9a8becf39df539413fef20e60 | Add line magic %cpp | bindings/pyroot/ROOTaaS/iPyROOT/cppmagic.py | bindings/pyroot/ROOTaaS/iPyROOT/cppmagic.py | import IPython.core.magic as ipym
import ROOT
import utils
@ipym.magics_class
class CppMagics(ipym.Magics):
@ipym.line_cell_magic
def cpp(self, line, cell=None):
"""Inject into root."""
if cell is None: # this is a line magic
utils.processCppCode(line)
else:
utils.processCppCode(cell)
def load_ipython_extension(ipython):
ipython.register_magics(CppMagics)
| import IPython.core.magic as ipym
import ROOT
import utils
@ipym.magics_class
class CppMagics(ipym.Magics):
@ipym.cell_magic
def cpp(self, line, cell=None):
"""Inject into root."""
if cell:
utils.processCppCode(cell)
def load_ipython_extension(ipython):
ipython.register_magics(CppMagics)
| Python | 0.000003 |
0a3164a47854ed17765d567afc7fc6a05aa0fd21 | Fix bug in commonsdownloader with argument names | commonsdownloader/commonsdownloader.py | commonsdownloader/commonsdownloader.py | #!/usr/bin/python
# -=- encoding: latin-1 -=-
"""Download files from Wikimedia Commons."""
import os
import logging
import argparse
from thumbnaildownload import download_file
def get_file_names_from_textfile(textfile_handler):
"""Yield the file names and widths by parsing a given text fileahandler."""
for line in textfile_handler:
line = line.rstrip()
try:
(image_name, width) = line.split(',')
except ValueError:
image_name = line
width = None
yield (image_name, width)
def download_with_file_list(file_list, output_path):
"""Download files from a given textfile list."""
for (file_name, width) in get_file_names_from_textfile(file_list):
download_file(file_name, output_path, width=width)
def download_from_files(files, output_path, width):
"""Download files from a given file list."""
for file_name in files:
download_file(file_name, output_path, width=width)
class Folder(argparse.Action):
"""An argparse action for directories."""
def __call__(self, parser, namespace, values, option_string=None):
prospective_dir = values
if not os.path.isdir(prospective_dir):
msg = "Folder:{0} is not a valid path".format(prospective_dir)
raise argparse.ArgumentTypeError(msg)
else:
setattr(namespace, self.dest, prospective_dir)
def main():
"""Main method, entry point of the script."""
from argparse import ArgumentParser
description = "Download a bunch of thumbnails from Wikimedia Commons"
parser = ArgumentParser(description=description)
parser.add_argument("files",
nargs='*',
metavar="FILES",
help='A list of filenames')
parser.add_argument("-l", "--list", metavar="LIST",
dest="file_list",
type=argparse.FileType('r'),
help='A list of files <filename,width>')
parser.add_argument("-o", "--output", metavar="FOLDER",
dest="output_path",
action=Folder,
default=os.getcwd(),
help='The directory to download the files to')
parser.add_argument("-w", "--width",
dest="width",
type=int,
default=100,
help='The width of the thumbnail (default: 100)')
parser.add_argument("-v",
action="count",
dest="verbose",
default=0,
help="Verbosity level. -v for INFO, -vv for DEBUG")
args = parser.parse_args()
logging_map = {0: logging.WARNING,
1: logging.INFO,
2: logging.DEBUG}
logging.basicConfig(level=logging_map[args.verbose])
logging.info("Starting")
if args.file_list:
download_from_file_list(args.file_list, args.output_path)
elif args.files:
download_from_files(args.files, args.output_path, args.width)
else:
parser.print_help()
if __name__ == "__main__":
main()
| #!/usr/bin/python
# -=- encoding: latin-1 -=-
"""Download files from Wikimedia Commons."""
import os
import logging
import argparse
from thumbnaildownload import download_file
def get_file_names_from_textfile(textfile_handler):
"""Yield the file names and widths by parsing a given text fileahandler."""
for line in textfile_handler:
line = line.rstrip()
try:
(image_name, width) = line.split(',')
except ValueError:
image_name = line
width = None
yield (image_name, width)
def download_with_file_list(file_list, output_path):
"""Download files from a given textfile list."""
for (file_name, width) in get_file_names_from_textfile(args.file_list):
download_file(file_name, args.output_path, width=width)
def download_from_files(files, output_path, width):
"""Download files from a given file list."""
for file_name in files:
download_file(file_name, output_path, width=width)
class Folder(argparse.Action):
"""An argparse action for directories."""
def __call__(self, parser, namespace, values, option_string=None):
prospective_dir = values
if not os.path.isdir(prospective_dir):
msg = "Folder:{0} is not a valid path".format(prospective_dir)
raise argparse.ArgumentTypeError(msg)
else:
setattr(namespace, self.dest, prospective_dir)
def main():
"""Main method, entry point of the script."""
from argparse import ArgumentParser
description = "Download a bunch of thumbnails from Wikimedia Commons"
parser = ArgumentParser(description=description)
parser.add_argument("files",
nargs='*',
metavar="FILES",
help='A list of filenames')
parser.add_argument("-l", "--list", metavar="LIST",
dest="file_list",
type=argparse.FileType('r'),
help='A list of files <filename,width>')
parser.add_argument("-o", "--output", metavar="FOLDER",
dest="output_path",
action=Folder,
default=os.getcwd(),
help='The directory to download the files to')
parser.add_argument("-w", "--width",
dest="width",
type=int,
default=100,
help='The width of the thumbnail (default: 100)')
parser.add_argument("-v",
action="count",
dest="verbose",
default=0,
help="Verbosity level. -v for INFO, -vv for DEBUG")
args = parser.parse_args()
logging_map = {0: logging.WARNING,
1: logging.INFO,
2: logging.DEBUG}
logging.basicConfig(level=logging_map[args.verbose])
logging.info("Starting")
if args.file_list:
download_from_file_list(args.file_list, args.output_path)
elif args.files:
download_from_files(args.files, args.output_path, args.width)
else:
parser.print_help()
if __name__ == "__main__":
main()
| Python | 0 |
082cc2590f7b263e37fe214e3c4e6fc86039327a | correct pyunit | h2o-py/tests/testdir_algos/deeplearning/pyunit_tweedie_weightsDeeplearning.py | h2o-py/tests/testdir_algos/deeplearning/pyunit_tweedie_weightsDeeplearning.py | import sys
sys.path.insert(1, "../../../")
import h2o
def tweedie_weights(ip,port):
data = h2o.import_frame(h2o.locate("smalldata/glm_test/cancar_logIn.csv"))
data["C1M3"] = (data["Class"] == 1 and data["Merit"] == 3).asfactor()
data["C3M3"] = (data["Class"] == 3 and data["Merit"] == 3).asfactor()
data["C4M3"] = (data["Class"] == 4 and data["Merit"] == 3).asfactor()
data["C1M2"] = (data["Class"] == 1 and data["Merit"] == 2).asfactor()
data["Merit"] = data["Merit"].asfactor()
data["Class"] = data["Class"].asfactor()
loss = data["Cost"] / data["Insured"]
loss.setName(0,"Loss")
cancar = loss.cbind(data)
# Without weights
myX = ["Merit","Class","C1M3","C4M3"]
dl = h2o.deeplearning(x = cancar[myX],y = cancar["Loss"],distribution ="tweedie",hidden = [1],epochs = 1000,
train_samples_per_iteration = -1,reproducible = True,activation = "Tanh",balance_classes = False,
force_load_balance = False, seed = 2353123,tweedie_power = 1.5,score_training_samples = 0,
score_validation_samples = 0)
mean_residual_deviance = dl.mean_residual_deviance()
# With weights
dl = h2o.deeplearning(x = cancar[myX],y = cancar["Loss"],distribution ="tweedie",hidden = [1],epochs = 1000,
train_samples_per_iteration = -1,reproducible = True,activation = "Tanh",balance_classes = False,
force_load_balance = False, seed = 2353123,tweedie_power = 1.5,score_training_samples = 0,
score_validation_samples = 0,weights_column = "Insured",training_frame = cancar)
if __name__ == "__main__":
h2o.run_test(sys.argv, tweedie_weights)
| import sys
sys.path.insert(1, "../../../")
import h2o
#def tweedie_weights(ip,port):
h2o.init()
data = h2o.import_frame(h2o.locate("smalldata/glm_test/cancar_logIn.csv"))
data["C1M3"] = (data["Class"] == 1 and data["Merit"] == 3).asfactor()
data["C3M3"] = (data["Class"] == 3 and data["Merit"] == 3).asfactor()
data["C4M3"] = (data["Class"] == 4 and data["Merit"] == 3).asfactor()
data["C1M2"] = (data["Class"] == 1 and data["Merit"] == 2).asfactor()
data["Merit"] = data["Merit"].asfactor()
data["Class"] = data["Class"].asfactor()
loss = data["Cost"] / data["Insured"]
loss.setName(0,"Loss")
cancar = loss.cbind(data)
# Without weights
myX = ["Merit","Class","C1M3","C4M3"]
dl = h2o.deeplearning(x = cancar[myX],y = cancar["Loss"],distribution ="tweedie",hidden = [1],epochs = 1000,
train_samples_per_iteration = -1,reproducible = True,activation = "Tanh",balance_classes = False,
force_load_balance = False, seed = 2353123,tweedie_power = 1.5,score_training_samples = 0,
score_validation_samples = 0)
mean_residual_deviance = dl.mean_residual_deviance()
# With weights
dl = h2o.deeplearning(x = cancar[myX],y = cancar["Loss"],distribution ="tweedie",hidden = [1],epochs = 1000,
train_samples_per_iteration = -1,reproducible = True,activation = "Tanh",balance_classes = False,
force_load_balance = False, seed = 2353123,tweedie_power = 1.5,score_training_samples = 0,
score_validation_samples = 0,weights_column = "Insured",training_frame = cancar)
if __name__ == "__main__":
h2o.run_test(sys.argv, tweedie_weights)
| Python | 0.998797 |
169dda227f85f77ac52a4295e8fb7acd1b3184f5 | Make byte-separator mandatory in MAC addresses | core/observables/mac_address.py | core/observables/mac_address.py | from __future__ import unicode_literals
import re
from core.observables import Observable
class MacAddress(Observable):
regex = r'(?P<search>(([0-9A-Fa-f]{1,2}[.:-]){5,7}([0-9A-Fa-f]{1,2})))'
exclude_fields = Observable.exclude_fields
DISPLAY_FIELDS = Observable.DISPLAY_FIELDS
@classmethod
def is_valid(cls, match):
value = match.group('search')
return len(value) > 0
def normalize(self):
value = re.sub(r'[.:\-]', '', self.value).upper()
self.value = ':'.join(
value[i:i + 2] for i in xrange(0, len(value), 2)
)
| from __future__ import unicode_literals
import re
from core.observables import Observable
class MacAddress(Observable):
regex = r'(?P<search>(([0-9A-Fa-f]{1,2}[.:-]?){5,7}([0-9A-Fa-f]{1,2})))'
exclude_fields = Observable.exclude_fields
DISPLAY_FIELDS = Observable.DISPLAY_FIELDS
@classmethod
def is_valid(cls, match):
value = match.group('search')
return len(value) > 0
def normalize(self):
self.value = re.sub(r'[.:\-]', '', self.value)
self.value = self.value.upper()
self.value = \
':'.join([self.value[i:i + 2] for i in range(0, len(self.value), 2)])
| Python | 0 |
a28f8fe4427c12c2523b16903325d0362b53123e | Drop version dependency | acme/setup.py | acme/setup.py | import sys
from setuptools import setup
from setuptools import find_packages
version = '0.2.0.dev0'
install_requires = [
# load_pem_private/public_key (>=0.6)
# rsa_recover_prime_factors (>=0.8)
'cryptography>=0.8',
'ndg-httpsclient', # urllib3 InsecurePlatformWarning (#304)
'pyasn1', # urllib3 InsecurePlatformWarning (#304)
# Connection.set_tlsext_host_name (>=0.13)
'PyOpenSSL>=0.13',
'pyrfc3339',
'pytz',
'requests',
'setuptools', # pkg_resources
'six',
'werkzeug',
]
# env markers in extras_require cause problems with older pip: #517
if sys.version_info < (2, 7):
install_requires.extend([
# only some distros recognize stdlib argparse as already satisfying
'argparse',
'mock<1.1.0',
])
else:
install_requires.append('mock')
docs_extras = [
'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags
'sphinx_rtd_theme',
'sphinxcontrib-programoutput',
]
testing_extras = [
'nose',
'tox',
]
setup(
name='acme',
version=version,
description='ACME protocol implementation in Python',
url='https://github.com/letsencrypt/letsencrypt',
author="Let's Encrypt Project",
author_email='client-dev@letsencrypt.org',
license='Apache License 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Security',
],
packages=find_packages(),
include_package_data=True,
install_requires=install_requires,
extras_require={
'docs': docs_extras,
'testing': testing_extras,
},
entry_points={
'console_scripts': [
'jws = acme.jose.jws:CLI.run',
],
},
test_suite='acme',
)
| import sys
from setuptools import setup
from setuptools import find_packages
version = '0.2.0.dev0'
install_requires = [
# load_pem_private/public_key (>=0.6)
# rsa_recover_prime_factors (>=0.8)
'cryptography>=0.8',
'ndg-httpsclient', # urllib3 InsecurePlatformWarning (#304)
'pyasn1', # urllib3 InsecurePlatformWarning (#304)
# Connection.set_tlsext_host_name (>=0.13), X509Req.get_extensions (>=0.15)
'PyOpenSSL>=0.15',
'pyrfc3339',
'pytz',
'requests',
'setuptools', # pkg_resources
'six',
'werkzeug',
]
# env markers in extras_require cause problems with older pip: #517
if sys.version_info < (2, 7):
install_requires.extend([
# only some distros recognize stdlib argparse as already satisfying
'argparse',
'mock<1.1.0',
])
else:
install_requires.append('mock')
docs_extras = [
'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags
'sphinx_rtd_theme',
'sphinxcontrib-programoutput',
]
testing_extras = [
'nose',
'tox',
]
setup(
name='acme',
version=version,
description='ACME protocol implementation in Python',
url='https://github.com/letsencrypt/letsencrypt',
author="Let's Encrypt Project",
author_email='client-dev@letsencrypt.org',
license='Apache License 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Security',
],
packages=find_packages(),
include_package_data=True,
install_requires=install_requires,
extras_require={
'docs': docs_extras,
'testing': testing_extras,
},
entry_points={
'console_scripts': [
'jws = acme.jose.jws:CLI.run',
],
},
test_suite='acme',
)
| Python | 0.000001 |
cf2af12d926370d83e909e0d38d2c774553e0408 | Fix handshake | YamTorrent.py | YamTorrent.py | #!/usr/bin/env python3
import sys
import requests
import hashlib
import bencodepy
import struct
import socket
def DEBUG(*s):
if debugging:
print(*s)
def ERROR(*s):
print(*s)
exit()
def main():
# open file in binary
try:
torrentfile = open(sys.argv[1], "rb").read()
except IOError:
ERROR("BAD FILE NAME: " + sys.argv[1])
DEBUG("BEGINNING")
# dictionary of torrent file
# torrentdict = bencode.bdecode(torrentfile)
torrentdict = bencodepy.decode(torrentfile)
# print(torrentdict)
# print(type(torrentdict))
# re-bencode the info section
info = torrentdict[b"info"]
# print(info)
bencodedinfo = bencodepy.encode(info)
# print(info)
# print(bencodedinfo)
#COMPUTE PARAMETERS FOR ANNOUNCE
# SHA1 hash of info section
sha1 = hashlib.sha1(bencodedinfo)
info_hash = sha1.digest()
# print(type(bencodedinfo))
# for char in info_hash:
# print(hex(char))
# print(char)
peer_id = (hashlib.sha1(b"0")).digest()
port = b'6881'
uploaded = b'0'
downloaded = b'0'
try:
left = 0
for f in info[b'files']:
left += f[b'length']
except KeyError:
left = info[b'length']
compact = b'1'
event = b'started'
url = torrentdict[b'announce']
p = {'info_hash': info_hash, 'peer_id': peer_id, 'port': port, 'uploaded': uploaded, 'downloaded': downloaded, 'left': left, 'compact': compact, 'event': event}
#CONTACT TRACKER
r = requests.get(url.decode(), params=p)
# print(info_hash)
# print(bencodedinfo)
# with open("temp.txt",'wb') as f:
# f.write(r.text.encode())
DEBUG('URL')
DEBUG(r.url)
DEBUG('END URL')
DEBUG('CONTENT')
DEBUG(r.content)
DEBUG('END CONTENT')
try:
response = bencodepy.decode(r.content)
except bencodepy.exceptions.DecodingError:
ERROR("BAD RESPONSE")
#COMPUTE PEERS
peers = response[b'peers']
peers_list = []
for i in range(0,len(peers),6):
peer_dict = {}
#not sure if these are right
peer_dict['ip'] = socket.inet_ntoa(peers[i:i+4])
peer_dict['ip_int'] = struct.unpack("!L",peers[i:i+4])[0]
peer_dict['port'] = struct.unpack("!H",peers[i+4:i+6])[0]
peers_list.append(peer_dict)
DEBUG(peers_list)
first_peer = peers_list[0]
first_connection = socket.create_connection((first_peer['ip'],first_peer['port']))
DEBUG(type(first_connection))
handshake = struct.pack('!B',19) + b"BitTorrent protocol" + bytearray(8) + info_hash + peer_id
DEBUG(handshake)
DEBUG(len(handshake))
DEBUG(len(info_hash))
DEBUG(len(peer_id))
first_connection.sendall(handshake)
peer_response = first_connection.recv(4096)
DEBUG("handshake response", peer_response)
if __name__ == '__main__':
debugging = True
main()
| #!/usr/bin/env python3
import sys
import requests
import hashlib
import bencodepy
import struct
import socket
def DEBUG(s):
if debugging:
print(s)
def ERROR(s):
print(s)
exit()
def main():
# open file in binary
try:
torrentfile = open(sys.argv[1], "rb").read()
except IOError:
ERROR("BAD FILE NAME: " + sys.argv[1])
DEBUG("BEGINNING")
# dictionary of torrent file
# torrentdict = bencode.bdecode(torrentfile)
torrentdict = bencodepy.decode(torrentfile)
# print(torrentdict)
# print(type(torrentdict))
# re-bencode the info section
info = torrentdict[b"info"]
# print(info)
bencodedinfo = bencodepy.encode(info)
# print(info)
# print(bencodedinfo)
#COMPUTE PARAMETERS FOR ANNOUNCE
# SHA1 hash of info section
sha1 = hashlib.sha1(bencodedinfo)
info_hash = sha1.digest()
# print(type(bencodedinfo))
# for char in info_hash:
# print(hex(char))
# print(char)
peer_id = (hashlib.sha1(b"0")).digest()
port = b'6881'
uploaded = b'0'
downloaded = b'0'
try:
left = 0
for f in info[b'files']:
left += f[b'length']
except KeyError:
left = info[b'length']
compact = b'1'
event = b'started'
url = torrentdict[b'announce']
p = {'info_hash': info_hash, 'peer_id': peer_id, 'port': port, 'uploaded': uploaded, 'downloaded': downloaded, 'left': left, 'compact': compact, 'event': event}
#CONTACT TRACKER
r = requests.get(url.decode(), params=p)
# print(info_hash)
# print(bencodedinfo)
# with open("temp.txt",'wb') as f:
# f.write(r.text.encode())
DEBUG('URL')
DEBUG(r.url)
DEBUG('END URL')
DEBUG('CONTENT')
DEBUG(r.content)
DEBUG('END CONTENT')
try:
response = bencodepy.decode(r.content)
except bencodepy.exceptions.DecodingError:
ERROR("BAD RESPONSE")
#COMPUTE PEERS
peers = response[b'peers']
peers_list = []
for i in range(0,len(peers),6):
peer_dict = {}
#not sure if these are right
peer_dict['ip'] = socket.inet_ntoa(peers[i:i+4])
peer_dict['ip_int'] = struct.unpack("!L",peers[i:i+4])[0]
peer_dict['port'] = struct.unpack("!H",peers[i+4:i+6])[0]
peers_list.append(peer_dict)
DEBUG(peers_list)
first_peer = peers_list[0]
first_connection = socket.create_connection((first_peer['ip'],first_peer['port']))
DEBUG(type(first_connection))
handshake = b"handshake: " + struct.pack('!B',19) + b"BitTorrent protocol" + bytearray(8) + info_hash + peer_id
DEBUG(handshake)
DEBUG(len(handshake))
DEBUG(len(info_hash))
DEBUG(len(peer_id))
first_connection.sendall(handshake)
peer_response = first_connection.recv(4096)
DEBUG(peer_response)
if __name__ == '__main__':
debugging = True
main()
| Python | 0.000004 |
877a7ff09056ea7ca03f0b31eb4ef8e30ac9d3fa | Change names we expect in spreadsheet | openprescribing/pipeline/management/commands/import_pcns.py | openprescribing/pipeline/management/commands/import_pcns.py | from django.core.management import BaseCommand
from django.db import transaction
from frontend.models import PCN, Practice
from openpyxl import load_workbook
class Command(BaseCommand):
help = "This command imports PCNs and PCN mappings"
def add_arguments(self, parser):
parser.add_argument("--filename")
def handle(self, *args, **kwargs):
workbook = load_workbook(kwargs["filename"])
details_sheet = workbook.get_sheet_by_name("PCNDetails")
members_sheet = workbook.get_sheet_by_name("PCN Core Partner Details")
pcn_details = {}
for code, name in self.get_pcn_details_from_sheet(details_sheet):
pcn_details[code] = {"name": name, "members": set()}
for practice_code, pcn_code in self.get_pcn_members_from_sheet(members_sheet):
pcn_details[pcn_code]["members"].add(practice_code)
with transaction.atomic():
for code, details in pcn_details.items():
PCN.objects.update_or_create(
code=code, defaults={"name": details["name"]}
)
Practice.objects.filter(code__in=details["members"]).update(pcn=code)
def get_pcn_details_from_sheet(self, sheet):
rows = ([cell.value for cell in row] for row in sheet.rows)
headers = next(rows)
CODE_COL = headers.index("PCN Code")
NAME_COL = headers.index("PCN Name")
for n, row in enumerate(rows, start=2):
code = row[CODE_COL]
name = row[NAME_COL]
# Skip blank lines
if not code and not name:
continue
if not code or not name:
raise ValueError("Blank code or name on row {}".format(n))
yield code, name
def get_pcn_members_from_sheet(self, sheet):
rows = ([cell.value for cell in row] for row in sheet.rows)
headers = next(rows)
PRACTICE_COL = headers.index("Partner\nOrganisation\nCode")
PCN_COL = headers.index("PCN Code")
for n, row in enumerate(rows, start=2):
practice_code = row[PRACTICE_COL]
pcn_code = row[PCN_COL]
# Skip blank lines
if not practice_code and not pcn_code:
continue
if not practice_code or not pcn_code:
raise ValueError("Blank code on row {}".format(n))
yield practice_code, pcn_code
| from django.core.management import BaseCommand
from django.db import transaction
from frontend.models import PCN, Practice
from openpyxl import load_workbook
class Command(BaseCommand):
help = "This command imports PCNs and PCN mappings"
def add_arguments(self, parser):
parser.add_argument("--filename")
def handle(self, *args, **kwargs):
workbook = load_workbook(kwargs["filename"])
details_sheet = workbook.get_sheet_by_name("PCN Details")
members_sheet = workbook.get_sheet_by_name("PCN Core Partner Details")
pcn_details = {}
for code, name in self.get_pcn_details_from_sheet(details_sheet):
pcn_details[code] = {"name": name, "members": set()}
for practice_code, pcn_code in self.get_pcn_members_from_sheet(members_sheet):
pcn_details[pcn_code]["members"].add(practice_code)
with transaction.atomic():
for code, details in pcn_details.items():
PCN.objects.update_or_create(
code=code, defaults={"name": details["name"]}
)
Practice.objects.filter(code__in=details["members"]).update(pcn=code)
def get_pcn_details_from_sheet(self, sheet):
rows = ([cell.value for cell in row] for row in sheet.rows)
headers = next(rows)
CODE_COL = headers.index("PCN Code")
NAME_COL = headers.index("PCN Name")
for n, row in enumerate(rows, start=2):
code = row[CODE_COL]
name = row[NAME_COL]
# Skip blank lines
if not code and not name:
continue
if not code or not name:
raise ValueError("Blank code or name on row {}".format(n))
yield code, name
def get_pcn_members_from_sheet(self, sheet):
rows = ([cell.value for cell in row] for row in sheet.rows)
headers = next(rows)
PRACTICE_COL = headers.index("Partner Organisation Code")
PCN_COL = headers.index("PCN Code")
for n, row in enumerate(rows, start=2):
practice_code = row[PRACTICE_COL]
pcn_code = row[PCN_COL]
# Skip blank lines
if not practice_code and not pcn_code:
continue
if not practice_code or not pcn_code:
raise ValueError("Blank code on row {}".format(n))
yield practice_code, pcn_code
| Python | 0 |
14cee1112f2a506f4ec547b80e897036f601ab6d | Fix tests/utils/http_requests.py | chroma-manager/tests/utils/http_requests.py | chroma-manager/tests/utils/http_requests.py | #!/usr/bin/env python
#
# ========================================================
# Copyright (c) 2012 Whamcloud, Inc. All rights reserved.
# ========================================================
import json
import requests
from urlparse import urljoin
class HttpRequests(object):
def __init__(self, server_http_url = '', *args, **kwargs):
self.server_http_url = server_http_url
self.session = requests.session()
self.session.headers = {"Accept": "application/json",
"Content-type": "application/json"}
self.session.verify = False
def get(self, url, **kwargs):
response = self.session.get(
urljoin(self.server_http_url, url),
**kwargs
)
return HttpResponse(response)
def post(self, url, body = None, **kwargs):
if body and 'data' not in kwargs:
kwargs['data'] = json.dumps(body)
response = self.session.post(
urljoin(self.server_http_url, url),
**kwargs
)
return HttpResponse(response)
def put(self, url, body = None, **kwargs):
if body and 'data' not in kwargs:
kwargs['data'] = json.dumps(body)
response = self.session.put(
urljoin(self.server_http_url, url),
**kwargs
)
return HttpResponse(response)
def delete(self, url, **kwargs):
response = self.session.delete(
urljoin(self.server_http_url, url),
**kwargs
)
return HttpResponse(response)
def request(self, method, url, **kwargs):
response = self.session.request(
method,
urljoin(self.server_http_url, url),
**kwargs
)
return HttpResponse(response)
class HttpResponse(requests.Response):
def __init__(self, response, *args, **kwargs):
super(HttpResponse, self).__init__(*args, **kwargs)
self.__dict__.update(response.__dict__.copy())
@property
def json(self):
if self.text == '[]':
return []
else:
try:
return json.loads(self.text)
except ValueError:
print "Bad JSON: %s" % self.text
raise
@property
def successful(self):
# TODO: Make better
return 200 <= self.status_code < 300
class AuthorizedHttpRequests(HttpRequests):
def __init__(self, username, password, *args, **kwargs):
super(AuthorizedHttpRequests, self).__init__(*args, **kwargs)
response = self.get("/api/session/")
if not response.successful:
raise RuntimeError("Failed to open session")
self.session.headers['X-CSRFToken'] = response.cookies['csrftoken']
self.session.cookies['csrftoken'] = response.cookies['csrftoken']
self.session.cookies['sessionid'] = response.cookies['sessionid']
response = self.post("/api/session/", data = json.dumps({'username': username, 'password': password}))
if not response.successful:
raise RuntimeError("Failed to authenticate")
| #!/usr/bin/env python
#
# ========================================================
# Copyright (c) 2012 Whamcloud, Inc. All rights reserved.
# ========================================================
import json
import requests
from urlparse import urljoin
class HttpRequests(object):
def __init__(self, server_http_url = '', *args, **kwargs):
self.server_http_url = server_http_url
self.session = requests.session(headers = {"Accept": "application/json", "Content-type": "application/json"})
def get(self, url, **kwargs):
response = self.session.get(
urljoin(self.server_http_url, url),
**kwargs
)
return HttpResponse(response)
def post(self, url, body = None, **kwargs):
if body and 'data' not in kwargs:
kwargs['data'] = json.dumps(body)
response = self.session.post(
urljoin(self.server_http_url, url),
**kwargs
)
return HttpResponse(response)
def put(self, url, body = None, **kwargs):
if body and 'data' not in kwargs:
kwargs['data'] = json.dumps(body)
response = self.session.put(
urljoin(self.server_http_url, url),
**kwargs
)
return HttpResponse(response)
def delete(self, url, **kwargs):
response = self.session.delete(
urljoin(self.server_http_url, url),
**kwargs
)
return HttpResponse(response)
def request(self, method, url, **kwargs):
response = self.session.request(
method,
urljoin(self.server_http_url, url),
**kwargs
)
return HttpResponse(response)
class HttpResponse(requests.Response):
def __init__(self, response, *args, **kwargs):
super(HttpResponse, self).__init__(*args, **kwargs)
self.__dict__.update(response.__dict__.copy())
@property
def json(self):
if self.text == '[]':
return []
else:
try:
return json.loads(self.text)
except ValueError:
print "Bad JSON: %s" % self.text
raise
@property
def successful(self):
# TODO: Make better
return 200 <= self.status_code < 300
class AuthorizedHttpRequests(HttpRequests):
def __init__(self, username, password, *args, **kwargs):
super(AuthorizedHttpRequests, self).__init__(*args, **kwargs)
response = self.get("/api/session/")
if not response.successful:
raise RuntimeError("Failed to open session")
self.session.headers['X-CSRFToken'] = response.cookies['csrftoken']
self.session.cookies['csrftoken'] = response.cookies['csrftoken']
self.session.cookies['sessionid'] = response.cookies['sessionid']
response = self.post("/api/session/", data = json.dumps({'username': username, 'password': password}))
if not response.successful:
raise RuntimeError("Failed to authenticate")
| Python | 0.000001 |
e7a632718f379fb1ede70d1086f55279e4251e11 | fix geotag access - not an obj | cinder/scheduler/filters/geo_tags_filter.py | cinder/scheduler/filters/geo_tags_filter.py | # Copyright (c) 2014 Intel
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder import db
from cinder.openstack.common import log as logging
from cinder.openstack.common.scheduler import filters
LOG = logging.getLogger(__name__)
class GeoTagsFilter(filters.BaseHostFilter):
"""GeoTags Filter."""
def host_passes(self, host_state, filter_properties):
"""Return True if host has sufficient capacity."""
#(licostan): Add geotag data to the host_state instead of
#querying it...
#TODO: add scheduler hints to cinder.
metadata_hints = filter_properties.get('metadata') or {}
gt_hints = metadata_hints.get('geo_tags', None)
context = filter_properties['context']
geo_tag = db.geo_tag_get_by_node_name(context, host_state.host)
if not geo_tag:
LOG.info('NO GEO TAG FOUND FOR %s' % host_state.host)
return True
#do other geotags check here based on gt-hints
if geo_tag['valid_invalid'].lower() == 'valid':
LOG.info('GEO TAG FOUND FOR %s' % host_state.host)
return True
LOG.info('GEO TAG INVALID FOR %s' % host_state.host)
return False
| # Copyright (c) 2014 Intel
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from cinder import db
from cinder.openstack.common import log as logging
from cinder.openstack.common.scheduler import filters
LOG = logging.getLogger(__name__)
class GeoTagsFilter(filters.BaseHostFilter):
"""GeoTags Filter."""
def host_passes(self, host_state, filter_properties):
"""Return True if host has sufficient capacity."""
#(licostan): Add geotag data to the host_state instead of
#querying it...
#TODO: add scheduler hints to cinder.
metadata_hints = filter_properties.get('metadata') or {}
gt_hints = metadata_hints.get('geo_tags', None)
context = filter_properties['context']
geo_tag = db.geo_tag_get_by_node_name(context, host_state.host)
if not geo_tag:
LOG.info('NO GEO TAG FOUND FOR %s' % host_state.host)
return True
#do other geotags check here based on gt-hints
if geo_tag.valid_invalid.lower() == 'valid':
LOG.info('GEO TAG FOUND FOR %s' % host_state.host)
return True
LOG.info('GEO TAG INVALID FOR %s' % host_state.host)
return False
| Python | 0.000001 |
16ab5dcf1f6e52f89435adccdfa7021ce24e29a8 | fix formatting via make fix | tests/metal/contrib/test_baselines.py | tests/metal/contrib/test_baselines.py | import numpy as np
import torch
from metal.end_model import SparseLogisticRegression
def test_sparselogreg(self):
"""Confirm sparse logreg can overfit, works on padded data"""
F = 1000 # total number of possible features
N = 50 # number of data points
S = [10, 100] # range of features per data point
X = np.zeros((N, S[1]))
for i in range(N):
Si = np.random.randint(S[0], S[1])
X[i, :Si] = np.random.randint(F, size=(1, Si))
X = torch.from_numpy(X).long()
Y = torch.from_numpy(np.random.randint(1, 3, size=(N,)))
em = SparseLogisticRegression(
seed=1, input_dim=F, padding_idx=0, verbose=False
)
em.train_model((X, Y), n_epochs=5, optimizer="sgd", lr=0.0005)
self.assertEqual(float(em.network[-1].W.weight.data[0, :].sum()), 0.0)
score = em.score((X, Y), verbose=False)
self.assertGreater(score, 0.95)
|
import numpy as np
import torch
from metal.end_model import SparseLogisticRegression
def test_sparselogreg(self):
"""Confirm sparse logreg can overfit, works on padded data"""
F = 1000 # total number of possible features
N = 50 # number of data points
S = [10, 100] # range of features per data point
X = np.zeros((N, S[1]))
for i in range(N):
Si = np.random.randint(S[0], S[1])
X[i, :Si] = np.random.randint(F, size=(1, Si))
X = torch.from_numpy(X).long()
Y = torch.from_numpy(np.random.randint(1, 3, size=(N,)))
em = SparseLogisticRegression(
seed=1, input_dim=F, padding_idx=0, verbose=False
)
em.train_model((X, Y), n_epochs=5, optimizer="sgd", lr=0.0005)
self.assertEqual(float(em.network[-1].W.weight.data[0, :].sum()), 0.0)
score = em.score((X, Y), verbose=False)
self.assertGreater(score, 0.95)
| Python | 0 |
0341c38dff42ae5e86353c6d53c2d30aabca555e | update py-jupyter-client and new setuptools dependency (#13425) | var/spack/repos/builtin/packages/py-jupyter-client/package.py | var/spack/repos/builtin/packages/py-jupyter-client/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyJupyterClient(PythonPackage):
"""Jupyter protocol client APIs"""
homepage = "https://github.com/jupyter/jupyter_client"
url = "https://github.com/jupyter/jupyter_client/archive/4.4.0.tar.gz"
version('5.3.4', sha256='2af6f0e0e4d88009b11103490bea0bfb405c1c470e226c2b7b17c10e5dda9734')
version('4.4.0', sha256='2fda7fe1af35f0b4a77c4a2fd4ee38ac3666ed7f4d92a5b6ff8aaf764c38e199')
version('4.3.0', sha256='90b6ea3ced910ed94c5d558373490a81b33c672d877c1ffdc76b281e3216f1f6')
version('4.2.2', sha256='bf3e8ea4c44f07dbe2991e41031f6dab242734be424f4d40b72cc58a12c7d2ca')
version('4.2.1', sha256='547d443fb38ea667b468a6625ac374d476f8ac90fe17c3e35d75cab3cb8d40ba')
version('4.2.0', sha256='00eab54615fb10f1e508d8e7a952fbeeb2a82cd67b17582bd61be51a08a61d89')
version('4.1.1', sha256='ca6f3f66d5dc1e9bca81696ae607a93d652210c3ee9385a7c31c067d5ba88e6e')
version('4.1.0', sha256='ecf76a159381ec9880fd2c31388c6983b1d855f92f0292cf0667a90dd63f51c0')
version('4.0.0', sha256='33b15abb1307d8d3716b0d3b5d07aa22fdfbbf65a9f1aedf478a274a6adc11c0')
depends_on('python@2.7:2.8,3.3:', type=('build', 'run'))
depends_on('python@2.7:2.8,3.5:', type=('build', 'run'), when='@5:')
depends_on('py-traitlets', type=('build', 'run'))
depends_on('py-jupyter-core', type=('build', 'run'))
depends_on('py-pyzmq@13:', type=('build', 'run'))
depends_on('py-python-dateutil@2.1:', type=('build', 'run'), when='@5:')
depends_on('py-tornado@4.1:', type=('build', 'run'), when='@5:')
depends_on('py-setuptools', type='build', when='@5:')
| # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyJupyterClient(PythonPackage):
"""Jupyter protocol client APIs"""
homepage = "https://github.com/jupyter/jupyter_client"
url = "https://github.com/jupyter/jupyter_client/archive/4.4.0.tar.gz"
version('4.4.0', sha256='2fda7fe1af35f0b4a77c4a2fd4ee38ac3666ed7f4d92a5b6ff8aaf764c38e199')
version('4.3.0', sha256='90b6ea3ced910ed94c5d558373490a81b33c672d877c1ffdc76b281e3216f1f6')
version('4.2.2', sha256='bf3e8ea4c44f07dbe2991e41031f6dab242734be424f4d40b72cc58a12c7d2ca')
version('4.2.1', sha256='547d443fb38ea667b468a6625ac374d476f8ac90fe17c3e35d75cab3cb8d40ba')
version('4.2.0', sha256='00eab54615fb10f1e508d8e7a952fbeeb2a82cd67b17582bd61be51a08a61d89')
version('4.1.1', sha256='ca6f3f66d5dc1e9bca81696ae607a93d652210c3ee9385a7c31c067d5ba88e6e')
version('4.1.0', sha256='ecf76a159381ec9880fd2c31388c6983b1d855f92f0292cf0667a90dd63f51c0')
version('4.0.0', sha256='33b15abb1307d8d3716b0d3b5d07aa22fdfbbf65a9f1aedf478a274a6adc11c0')
depends_on('python@2.7:2.8,3.3:')
depends_on('py-traitlets', type=('build', 'run'))
depends_on('py-jupyter-core', type=('build', 'run'))
depends_on('py-pyzmq@13:', type=('build', 'run'))
| Python | 0 |
79bf0829769e456750d7904866e65ae289f1cd46 | add missing logfile write flushes | _log/dslog.py | _log/dslog.py | # -*- coding:utf8 -*-
import io
import os
import sys
import traceback
LOG_STDOUT=sys.stdout
LOG_STDERR=sys.stderr
logging_to_file=True
logfile="devsetup.log"
def init(project_folder, write_to_log=True):
global LOG_STDOUT
global LOG_STDERR
global logging_to_file
global logfile
# special case - we want to log directly to the screen
if not write_to_log:
logging_to_file = False
return
# if we get here, then we are logging
# to a file
# where will the logfile live?
log_filename = os.path.join(project_folder, logfile)
# create it
logfile_handle = io.open(log_filename, "w")
# all done
LOG_STDOUT = logfile_handle
LOG_STDERR = logfile_handle
def convert_command_to_string(cmd):
retval=''
for arg in cmd:
# are we appending to the return value?
if len(retval) > 0:
retval=retval+' '
# does the arg need quoting?
if ' ' in arg:
if "'" in arg:
retval=retval+"'" + arg + "'"
else:
retval=retval+'"' + arg + '"'
else:
retval=retval+arg
# all done
return retval
def log_command_output(output):
global LOG_STDOUT
for line in output:
LOG_STDOUT.write(unicode(line))
LOG_STDOUT.flush()
def log_command_start(cmd):
global LOG_STDOUT
# write the command to the logfile
LOG_STDOUT.write(unicode("$ " + convert_command_to_string(cmd) + "\n"))
LOG_STDOUT.flush()
def log_command_result(retval):
global LOG_STDOUT
LOG_STDOUT.write(unicode('# ... command exited with value ' + str(retval) + "\n\n"))
LOG_STDOUT.flush()
def log_comment(msg):
global LOG_STDOUT
global logging_to_file
if logging_to_file:
LOG_STDOUT.write(unicode("# " + msg + "\n"))
LOG_STDOUT.flush()
def log_comment_result(msg):
global LOG_STDOUT
global logging_to_file
if logging_to_file:
LOG_STDOUT.write(unicode("# ... " + msg + "\n\n"))
LOG_STDOUT.flush()
def log_last_exception():
if logging_to_file:
LOG_STDOUT.write(unicode("This resulted in the following exception:\n\n"))
output = traceback.format_exc()
LOG_STDOUT.write(unicode(output))
LOG_STDOUT.flush()
def log_new_operation(operation):
global LOG_STDOUT
global logging_to_file
if logging_to_file:
LOG_STDOUT.write(unicode("# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n"))
LOG_STDOUT.write(unicode("# " + operation + "\n\n"))
LOG_STDOUT.flush()
def log_operation_okay(operation):
global LOG_STDOUT
global logging_to_file
if logging_to_file:
LOG_STDOUT.write(unicode("# OKAY: " + operation + "\n"))
LOG_STDOUT.write(unicode("# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n\n"))
LOG_STDOUT.flush()
def log_operation_failed(operation):
global LOG_STDOUT
global logging_to_file
if logging_to_file:
LOG_STDOUT.write(unicode("# FAILED: " + operation + "\n"))
LOG_STDOUT.write(unicode("# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n\n"))
LOG_STDOUT.flush()
def log_operation_skipped(operation):
global LOG_STDOUT
global logging_to_file
if logging_to_file:
LOG_STDOUT.write(unicode("# SKIPPED: " + operation + "\n"))
LOG_STDOUT.write(unicode("# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n\n"))
LOG_STDOUT.flush()
| # -*- coding:utf8 -*-
import io
import os
import sys
import traceback
LOG_STDOUT=sys.stdout
LOG_STDERR=sys.stderr
logging_to_file=True
logfile="devsetup.log"
def init(project_folder, write_to_log=True):
global LOG_STDOUT
global LOG_STDERR
global logging_to_file
global logfile
# special case - we want to log directly to the screen
if not write_to_log:
logging_to_file = False
return
# if we get here, then we are logging
# to a file
# where will the logfile live?
log_filename = os.path.join(project_folder, logfile)
# create it
logfile_handle = io.open(log_filename, "w")
# all done
LOG_STDOUT = logfile_handle
LOG_STDERR = logfile_handle
def convert_command_to_string(cmd):
retval=''
for arg in cmd:
# are we appending to the return value?
if len(retval) > 0:
retval=retval+' '
# does the arg need quoting?
if ' ' in arg:
if "'" in arg:
retval=retval+"'" + arg + "'"
else:
retval=retval+'"' + arg + '"'
else:
retval=retval+arg
# all done
return retval
def log_command_output(output):
global LOG_STDOUT
for line in output:
LOG_STDOUT.write(unicode(line))
def log_command_start(cmd):
global LOG_STDOUT
# write the command to the logfile
LOG_STDOUT.write(unicode("$ " + convert_command_to_string(cmd) + "\n"))
LOG_STDOUT.flush()
def log_command_result(retval):
global LOG_STDOUT
LOG_STDOUT.write(unicode('# ... command exited with value ' + str(retval) + "\n\n"))
LOG_STDOUT.flush()
def log_comment(msg):
global LOG_STDOUT
global logging_to_file
if logging_to_file:
LOG_STDOUT.write(unicode("# " + msg + "\n"))
def log_comment_result(msg):
global LOG_STDOUT
global logging_to_file
if logging_to_file:
LOG_STDOUT.write(unicode("# ... " + msg + "\n\n"))
def log_last_exception():
if logging_to_file:
LOG_STDOUT.write(unicode("This resulted in the following exception:\n\n"))
output = traceback.format_exc()
LOG_STDOUT.write(unicode(output))
def log_new_operation(operation):
global LOG_STDOUT
global logging_to_file
if logging_to_file:
LOG_STDOUT.write(unicode("# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n"))
LOG_STDOUT.write(unicode("# " + operation + "\n\n"))
def log_operation_okay(operation):
global LOG_STDOUT
global logging_to_file
if logging_to_file:
LOG_STDOUT.write(unicode("# OKAY: " + operation + "\n"))
LOG_STDOUT.write(unicode("# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n\n"))
def log_operation_failed(operation):
global LOG_STDOUT
global logging_to_file
if logging_to_file:
LOG_STDOUT.write(unicode("# FAILED: " + operation + "\n"))
LOG_STDOUT.write(unicode("# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n\n"))
def log_operation_skipped(operation):
global LOG_STDOUT
global logging_to_file
if logging_to_file:
LOG_STDOUT.write(unicode("# SKIPPED: " + operation + "\n"))
LOG_STDOUT.write(unicode("# =-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=\n\n")) | Python | 0.000031 |
186c509f14968e9d51a6a7d3a7a23ed07eabc286 | Enable spam bug detection in all products (#1106) | auto_nag/scripts/spambug.py | auto_nag/scripts/spambug.py | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from auto_nag import people
from auto_nag.bugbug_utils import get_bug_ids_classification
from auto_nag.bzcleaner import BzCleaner
from auto_nag.utils import nice_round
COMMENT = """
The [Bugbug](https://github.com/mozilla/bugbug/) bot thinks this bug is invalid.
If you think the bot is wrong, please reopen the bug and move it back to its prior component.
Be aware this is a production bug database used by the Mozilla community to develop Firefox, and other products.
Filing test bugs here wastes the time of all our contributors, volunteers, as well as paid employees.
If you continue to abuse bugzilla.mozilla.org your account will be disabled.
""".strip()
class SpamBug(BzCleaner):
def __init__(self):
super().__init__()
self.autofix_bugs = {}
self.people = people.People.get_instance()
def description(self):
return "[Using ML] Detect spam bugs"
def has_default_products(self):
return False
def columns(self):
return ["id", "summary", "confidence"]
def sort_columns(self):
return lambda p: (-p[2], -int(p[0]))
def handle_bug(self, bug, data):
reporter = bug["creator"]
if self.people.is_mozilla(reporter):
return None
return bug
def get_bz_params(self, date):
start_date, _ = self.get_dates(date)
return {
"include_fields": ["id", "groups", "summary", "creator"],
# Ignore closed bugs.
"bug_status": "__open__",
"f1": "reporter",
"v1": "%group.editbugs%",
"o1": "notsubstring",
"f2": "creation_ts",
"o2": "greaterthan",
"v2": start_date,
}
def get_bugs(self, date="today", bug_ids=[]):
# Retrieve the bugs with the fields defined in get_bz_params
raw_bugs = super().get_bugs(date=date, bug_ids=bug_ids, chunk_size=7000)
if len(raw_bugs) == 0:
return {}
# Extract the bug ids
bug_ids = list(raw_bugs.keys())
# Classify those bugs
bugs = get_bug_ids_classification("spambug", bug_ids)
for bug_id in sorted(bugs.keys()):
bug_data = bugs[bug_id]
if not bug_data.get("available", True):
# The bug was not available, it was either removed or is a
# security bug
continue
if not {"prob", "index"}.issubset(bug_data.keys()):
raise Exception(f"Invalid bug response {bug_id}: {bug_data!r}")
bug = raw_bugs[bug_id]
prob = bug_data["prob"]
if prob[1] < self.get_config("confidence_threshold"):
continue
self.autofix_bugs[bug_id] = {
"id": bug_id,
"summary": bug["summary"],
"confidence": nice_round(prob[1]),
}
return self.autofix_bugs
def get_autofix_change(self):
result = {}
for bug_id in self.autofix_bugs:
result[bug_id] = {
"comment": {
"body": COMMENT.format(self.autofix_bugs[bug_id]["confidence"])
},
"product": "Invalid Bugs",
"component": "General",
"version": "unspecified",
"milestone": "---",
"status": "RESOLVED",
"resolution": "INVALID",
}
return result
if __name__ == "__main__":
SpamBug().run()
| # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from auto_nag import people
from auto_nag.bugbug_utils import get_bug_ids_classification
from auto_nag.bzcleaner import BzCleaner
from auto_nag.utils import nice_round
COMMENT = """
The [Bugbug](https://github.com/mozilla/bugbug/) bot thinks this bug is invalid.
If you think the bot is wrong, please reopen the bug and move it back to its prior component.
Be aware this is a production bug database used by the Mozilla community to develop Firefox, and other products.
Filing test bugs here wastes the time of all our contributors, volunteers, as well as paid employees.
If you continue to abuse bugzilla.mozilla.org your account will be disabled.
""".strip()
class SpamBug(BzCleaner):
def __init__(self):
super().__init__()
self.autofix_bugs = {}
self.people = people.People.get_instance()
def description(self):
return "[Using ML] Detect spam bugs"
def columns(self):
return ["id", "summary", "confidence"]
def sort_columns(self):
return lambda p: (-p[2], -int(p[0]))
def handle_bug(self, bug, data):
reporter = bug["creator"]
if self.people.is_mozilla(reporter):
return None
return bug
def get_bz_params(self, date):
start_date, _ = self.get_dates(date)
return {
"include_fields": ["id", "groups", "summary", "creator"],
# Ignore closed bugs.
"bug_status": "__open__",
"f1": "reporter",
"v1": "%group.editbugs%",
"o1": "notsubstring",
"f2": "creation_ts",
"o2": "greaterthan",
"v2": start_date,
}
def get_bugs(self, date="today", bug_ids=[]):
# Retrieve the bugs with the fields defined in get_bz_params
raw_bugs = super().get_bugs(date=date, bug_ids=bug_ids, chunk_size=7000)
if len(raw_bugs) == 0:
return {}
# Extract the bug ids
bug_ids = list(raw_bugs.keys())
# Classify those bugs
bugs = get_bug_ids_classification("spambug", bug_ids)
for bug_id in sorted(bugs.keys()):
bug_data = bugs[bug_id]
if not bug_data.get("available", True):
# The bug was not available, it was either removed or is a
# security bug
continue
if not {"prob", "index"}.issubset(bug_data.keys()):
raise Exception(f"Invalid bug response {bug_id}: {bug_data!r}")
bug = raw_bugs[bug_id]
prob = bug_data["prob"]
if prob[1] < self.get_config("confidence_threshold"):
continue
self.autofix_bugs[bug_id] = {
"id": bug_id,
"summary": bug["summary"],
"confidence": nice_round(prob[1]),
}
return self.autofix_bugs
def get_autofix_change(self):
result = {}
for bug_id in self.autofix_bugs:
result[bug_id] = {
"comment": {
"body": COMMENT.format(self.autofix_bugs[bug_id]["confidence"])
},
"product": "Invalid Bugs",
"component": "General",
"version": "unspecified",
"milestone": "---",
"status": "RESOLVED",
"resolution": "INVALID",
}
return result
if __name__ == "__main__":
SpamBug().run()
| Python | 0 |
bbe835c8aa561d8db58e116f0e55a5b19c4f9ca4 | Fix sitemap memory consumption during generation | firecares/sitemaps.py | firecares/sitemaps.py | from django.contrib import sitemaps
from firecares.firestation.models import FireDepartment
from django.db.models import Max
from django.core.urlresolvers import reverse
class BaseSitemap(sitemaps.Sitemap):
protocol = 'https'
def items(self):
return ['media', 'models_performance_score', 'models_community_risk', 'safe_grades', 'login', 'contact_us',
'firedepartment_list']
def priority(self, item):
return 1
def location(self, item):
return reverse(item)
class DepartmentsSitemap(sitemaps.Sitemap):
protocol = 'https'
max_population = 1
def items(self):
queryset = FireDepartment.objects.filter(archived=False).only('population', 'featured', 'name')
self.max_population = queryset.aggregate(Max('population'))['population__max']
return queryset
def location(self, item):
return item.get_absolute_url()
def priority(self, item):
if item.featured is True:
return 1
if item.population is None:
return 0
# adding a bit to the total so featured items are always above others
priority = item.population / float(self.max_population + 0.1)
return priority
def lastmod(self, item):
return item.modified
| from django.contrib import sitemaps
from firecares.firestation.models import FireDepartment
from django.db.models import Max
from django.core.urlresolvers import reverse
class BaseSitemap(sitemaps.Sitemap):
protocol = 'https'
def items(self):
return ['media', 'models_performance_score', 'models_community_risk', 'safe_grades', 'login', 'contact_us',
'firedepartment_list']
def priority(self, item):
return 1
def location(self, item):
return reverse(item)
class DepartmentsSitemap(sitemaps.Sitemap):
protocol = 'https'
max_population = 1
def items(self):
queryset = FireDepartment.objects.filter(archived=False)
self.max_population = queryset.aggregate(Max('population'))['population__max']
return queryset
def location(self, item):
return item.get_absolute_url()
def priority(self, item):
if item.featured is True:
return 1
if item.population is None:
return 0
# adding a bit to the total so featured items are always above others
priority = item.population / float(self.max_population + 0.1)
return priority
def lastmod(self, item):
return item.modified
| Python | 0 |
9c7d1deba7dbde9285e49cb2966b1d242ac8ddc2 | Use sphinxapi if available | flask_sphinxsearch.py | flask_sphinxsearch.py | try:
import sphinxapi as sphinxsearch
except ImportError:
import sphinxsearch
from flask import current_app
# Find the stack on which we want to store the database connection.
# Starting with Flask 0.9, the _app_ctx_stack is the correct one,
# before that we need to use the _request_ctx_stack.
try:
from flask import _app_ctx_stack as stack
except ImportError:
from flask import _request_ctx_stack as stack
class Sphinx(object):
"""
Simple wrapper around the `SphinxClient` object.
Usage:
from flask.ext.sphinxsearch import Sphinx
from myapp import app
sphinx = Sphinx(myapp)
print sphinx.client.Query("query")
"""
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app):
self.app = app
app.config.setdefault('SPHINX_HOST', 'localhost')
app.config.setdefault('SPHINX_PORT', 3312)
def connect(self):
client = sphinxsearch.SphinxClient()
client.SetServer(
current_app.config['SPHINX_HOST'],
current_app.config['SPHINX_PORT'])
return client
@property
def client(self):
ctx = stack.top
if ctx is not None:
if not hasattr(ctx, 'sphinxclient'):
ctx.sphinxclient = self.connect()
return ctx.sphinxclient
# set constants on the Sphinx object, for ease of use
for key in dir(sphinxsearch):
if key == key.upper():
setattr(Sphinx, key,
getattr(sphinxsearch, key))
| import sphinxsearch
from flask import current_app
# Find the stack on which we want to store the database connection.
# Starting with Flask 0.9, the _app_ctx_stack is the correct one,
# before that we need to use the _request_ctx_stack.
try:
from flask import _app_ctx_stack as stack
except ImportError:
from flask import _request_ctx_stack as stack
class Sphinx(object):
"""
Simple wrapper around the `SphinxClient` object.
Usage:
from flask.ext.sphinxsearch import Sphinx
from myapp import app
sphinx = Sphinx(myapp)
print sphinx.client.Query("query")
"""
def __init__(self, app=None):
self.app = app
if app is not None:
self.init_app(app)
def init_app(self, app):
self.app = app
app.config.setdefault('SPHINX_HOST', 'localhost')
app.config.setdefault('SPHINX_PORT', 3312)
def connect(self):
client = sphinxsearch.SphinxClient()
client.SetServer(
current_app.config['SPHINX_HOST'],
current_app.config['SPHINX_PORT'])
return client
@property
def client(self):
ctx = stack.top
if ctx is not None:
if not hasattr(ctx, 'sphinxclient'):
ctx.sphinxclient = self.connect()
return ctx.sphinxclient
# set constants on the Sphinx object, for ease of use
for key in dir(sphinxsearch):
if key == key.upper():
setattr(Sphinx, key,
getattr(sphinxsearch, key))
| Python | 0 |
726fa619627f449371f8cdd6df266d4c92aaad5d | Fix flaky NL test | samples/snippets/ocr_nl/main_test.py | samples/snippets/ocr_nl/main_test.py | #!/usr/bin/env python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for main."""
import re
import zipfile
import main
_TEST_IMAGE_URI = 'gs://{}/language/image8.png'
def test_batch_empty():
for batch_size in range(1, 10):
assert len(
list(main.batch([], batch_size=batch_size))) == 0
def test_batch_single():
for batch_size in range(1, 10):
batched = tuple(main.batch([1], batch_size=batch_size))
assert batched == ((1,),)
def test_single_image_returns_text(cloud_config):
vision_api_client = main.VisionApi()
image_path = _TEST_IMAGE_URI.format(cloud_config.storage_bucket)
texts = vision_api_client.detect_text([image_path])
assert image_path in texts
_, document = main.extract_description(texts[image_path])
assert "daughter" in document
assert "Bennet" in document
assert "hat" in document
def test_single_nonimage_returns_error():
vision_api_client = main.VisionApi()
texts = vision_api_client.detect_text(['README.md'])
assert "README.md" not in texts
def test_text_returns_entities():
text = "Holmes and Watson walked to the cafe."
text_analyzer = main.TextAnalyzer()
entities = text_analyzer.nl_detect(text)
assert entities
etype, ename, salience, wurl = text_analyzer.extract_entity_info(
entities[0])
assert ename == 'holmes'
assert wurl == 'http://en.wikipedia.org/wiki/Sherlock_Holmes'
def test_entities_list(cloud_config):
vision_api_client = main.VisionApi()
image_path = _TEST_IMAGE_URI.format(cloud_config.storage_bucket)
texts = vision_api_client.detect_text([image_path])
locale, document = main.extract_description(texts[image_path])
text_analyzer = main.TextAnalyzer()
entities = text_analyzer.nl_detect(document)
assert entities
etype, ename, salience, wurl = text_analyzer.extract_entity_info(
entities[0])
assert ename == 'bennet'
assert wurl == 'http://en.wikipedia.org/wiki/Mr_Bennet'
def test_main(remote_resource, tmpdir, capsys):
images_path = str(tmpdir.mkdir('images'))
# First, pull down some test data
zip_path = remote_resource('language/ocr_nl-images-small.zip', tmpdir)
# Extract it to the image directory
with zipfile.ZipFile(zip_path) as zfile:
zfile.extractall(images_path)
main.main(images_path, str(tmpdir.join('ocr_nl.db')))
stdout, _ = capsys.readouterr()
assert re.search(r'google was found with count', stdout)
| #!/usr/bin/env python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for main."""
import re
import zipfile
import main
_TEST_IMAGE_URI = 'gs://{}/language/image8.png'
def test_batch_empty():
for batch_size in range(1, 10):
assert len(
list(main.batch([], batch_size=batch_size))) == 0
def test_batch_single():
for batch_size in range(1, 10):
batched = tuple(main.batch([1], batch_size=batch_size))
assert batched == ((1,),)
def test_single_image_returns_text(cloud_config):
vision_api_client = main.VisionApi()
image_path = _TEST_IMAGE_URI.format(cloud_config.storage_bucket)
texts = vision_api_client.detect_text([image_path])
assert image_path in texts
_, document = main.extract_description(texts[image_path])
assert "daughter" in document
assert "Bennet" in document
assert "hat" in document
def test_single_nonimage_returns_error():
vision_api_client = main.VisionApi()
texts = vision_api_client.detect_text(['README.md'])
assert "README.md" not in texts
def test_text_returns_entities():
text = "Holmes and Watson walked to the cafe."
text_analyzer = main.TextAnalyzer()
entities = text_analyzer.nl_detect(text)
assert len(entities) == 2
etype, ename, salience, wurl = text_analyzer.extract_entity_info(
entities[0])
assert ename == 'holmes'
assert wurl == 'http://en.wikipedia.org/wiki/Sherlock_Holmes'
def test_entities_list(cloud_config):
vision_api_client = main.VisionApi()
image_path = _TEST_IMAGE_URI.format(cloud_config.storage_bucket)
texts = vision_api_client.detect_text([image_path])
locale, document = main.extract_description(texts[image_path])
text_analyzer = main.TextAnalyzer()
entities = text_analyzer.nl_detect(document)
assert entities
etype, ename, salience, wurl = text_analyzer.extract_entity_info(
entities[0])
assert ename == 'bennet'
assert wurl == 'http://en.wikipedia.org/wiki/Mr_Bennet'
def test_main(remote_resource, tmpdir, capsys):
images_path = str(tmpdir.mkdir('images'))
# First, pull down some test data
zip_path = remote_resource('language/ocr_nl-images-small.zip', tmpdir)
# Extract it to the image directory
with zipfile.ZipFile(zip_path) as zfile:
zfile.extractall(images_path)
main.main(images_path, str(tmpdir.join('ocr_nl.db')))
stdout, _ = capsys.readouterr()
assert re.search(r'google was found with count', stdout)
| Python | 0.999382 |
e959f849550fe4cfd2f2230c149a9bc0cb01bfe4 | bump version | jose/__init__.py | jose/__init__.py |
__version__ = "2.0.1"
__author__ = 'Michael Davis'
__license__ = 'MIT'
__copyright__ = 'Copyright 2016 Michael Davis'
from .exceptions import JOSEError
from .exceptions import JWSError
from .exceptions import ExpiredSignatureError
from .exceptions import JWTError
|
__version__ = "2.0.0"
__author__ = 'Michael Davis'
__license__ = 'MIT'
__copyright__ = 'Copyright 2016 Michael Davis'
from .exceptions import JOSEError
from .exceptions import JWSError
from .exceptions import ExpiredSignatureError
from .exceptions import JWTError
| Python | 0 |
7f38e297dcfc9a664af092f48a9dc596f5f6c27b | Fix PermissionError: [Errno 13] Permission denied on Windows | scipy/sparse/tests/test_matrix_io.py | scipy/sparse/tests/test_matrix_io.py | import os
import numpy as np
import tempfile
from numpy.testing import assert_array_almost_equal, run_module_suite, assert_
from scipy.sparse import csc_matrix, csr_matrix, bsr_matrix, dia_matrix, coo_matrix, save_npz, load_npz
def _save_and_load(matrix):
fd, tmpfile = tempfile.mkstemp(suffix='.npz')
os.close(fd)
try:
save_npz(tmpfile, matrix)
loaded_matrix = load_npz(tmpfile)
finally:
os.remove(tmpfile)
return loaded_matrix
def _check_save_and_load(dense_matrix):
for matrix_class in [csc_matrix, csr_matrix, bsr_matrix, dia_matrix, coo_matrix]:
matrix = matrix_class(dense_matrix)
loaded_matrix = _save_and_load(matrix)
assert_(type(loaded_matrix) is matrix_class)
assert_(loaded_matrix.shape == dense_matrix.shape)
assert_(loaded_matrix.dtype == dense_matrix.dtype)
assert_array_almost_equal(loaded_matrix.toarray(), dense_matrix)
def test_save_and_load_random():
N = 10
np.random.seed(0)
dense_matrix = np.random.random((N, N))
dense_matrix[dense_matrix > 0.7] = 0
_check_save_and_load(dense_matrix)
def test_save_and_load_empty():
dense_matrix = np.zeros((4,6))
_check_save_and_load(dense_matrix)
def test_save_and_load_one_entry():
dense_matrix = np.zeros((4,6))
dense_matrix[1,2] = 1
_check_save_and_load(dense_matrix)
if __name__ == "__main__":
run_module_suite()
| import numpy as np
import tempfile
from numpy.testing import assert_array_almost_equal, run_module_suite, assert_
from scipy.sparse import csc_matrix, csr_matrix, bsr_matrix, dia_matrix, coo_matrix, save_npz, load_npz
def _save_and_load(matrix):
with tempfile.NamedTemporaryFile(suffix='.npz') as file:
file = file.name
save_npz(file, matrix)
loaded_matrix = load_npz(file)
return loaded_matrix
def _check_save_and_load(dense_matrix):
for matrix_class in [csc_matrix, csr_matrix, bsr_matrix, dia_matrix, coo_matrix]:
matrix = matrix_class(dense_matrix)
loaded_matrix = _save_and_load(matrix)
assert_(type(loaded_matrix) is matrix_class)
assert_(loaded_matrix.shape == dense_matrix.shape)
assert_(loaded_matrix.dtype == dense_matrix.dtype)
assert_array_almost_equal(loaded_matrix.toarray(), dense_matrix)
def test_save_and_load_random():
N = 10
np.random.seed(0)
dense_matrix = np.random.random((N, N))
dense_matrix[dense_matrix > 0.7] = 0
_check_save_and_load(dense_matrix)
def test_save_and_load_empty():
dense_matrix = np.zeros((4,6))
_check_save_and_load(dense_matrix)
def test_save_and_load_one_entry():
dense_matrix = np.zeros((4,6))
dense_matrix[1,2] = 1
_check_save_and_load(dense_matrix)
if __name__ == "__main__":
run_module_suite()
| Python | 0 |
81b64f139dba88b744e6067f7a48ce1bdaff785c | Change variable names. | avenue/web.py | avenue/web.py | # -*- coding: utf-8 -*-
# Copyright (c) 2012 Michael Babich
# See LICENSE.txt or http://opensource.org/licenses/MIT
'''Acts as an interface between what Flask serves and what goes on in
the rest of the application.
'''
from avenue import app, api
from flask import render_template, make_response, redirect
def url_generator():
'''This function acts on a list of URLs, a text rule for each URL,
and a function that says what to do to that text rule to serve a
page. The action_list associates a subset of URLs with a
particular function to be used as the action for that group.
'''
data = api.read_data('forum')
threads = data['threads']
def forum_set_tags():
'''Turns strings containing tag names into tag objects that
can be used to generate HTML/CSS renderings of the tag.
'''
for thread in threads:
for post in threads[thread]['posts']:
if 'tags' in post:
for i in range(len(post['tags'])):
post['tags'][i] = data['tags'][post['tags'][i]]
def forum_page(name):
'''Makes a forum page of the given thread name.
'''
thread = threads[name]
html = '%s :: %s :: %s' % (thread['title'], data['forum'], data['site'])
main = '%s -- %s' % (data['site'], data['forum'])
title = { 'html' : html,
'main' : main,
'thread' : thread['title'],
'url' : data['forum_url'] }
return render_template('forum.html',
style='night',
sidebar=data['navbar'],
title=title,
posts=thread['posts'],
threaded=thread['threaded'])
def setup_url_rule(urls, action):
'''Sets up URL rules, given a dictionary of urls and a function
that they will act on.
'''
def url_page_function(text):
'''Returns a function that is associated with the URL
page. This function is called when the URL page is
requested. The anonymous (lambda) function does a
particular action given a particular string, text. It's
set up this way because the text fed into the action
function is always the same for a particular web page.
'''
return lambda: action(text)
for url in urls:
app.add_url_rule(url, url, url_page_function(urls[url]))
forum_set_tags()
action_list = [('redirect', redirect),
('forum_urls', forum_page),
('css', lambda theme:
api.make_css(data['style'][theme]))]
for action in action_list:
setup_url_rule(data['urls'][action[0]], action[1])
| # -*- coding: utf-8 -*-
# Copyright (c) 2012 Michael Babich
# See LICENSE.txt or http://opensource.org/licenses/MIT
'''Acts as an interface between what Flask serves and what goes on in
the rest of the application.
'''
from avenue import app, api
from flask import render_template, make_response, redirect
def url_generator():
'''This function acts on a list of URLs, a text rule for each URL,
and a function that says what to do to that text rule to serve a
page. The action_list associates a subset of URLs with a
particular function to be used as the action for that group.
'''
data = api.read_data('forum')
threads = data['threads']
def forum_set_tags():
'''Turns strings containing tag names into tag objects that
can be used to generate HTML/CSS renderings of the tag.
'''
for thread in threads:
for post in threads[thread]['posts']:
if 'tags' in post:
for i in range(len(post['tags'])):
post['tags'][i] = data['tags'][post['tags'][i]]
def forum_page(name):
'''Makes a forum page of the given thread name.
'''
thread = threads[name]
html_title = '%s :: %s :: %s' % (thread['title'], data['forum'],
data['site'])
main_title = '%s -- %s' % (data['site'], data['forum'])
title = { 'html' : html_title,
'main' : main_title,
'thread' : thread['title'],
'url' : data['forum_url'] }
return render_template('forum.html',
style='night',
sidebar=data['navbar'],
title=title,
posts=thread['posts'],
threaded=thread['threaded'])
def setup_url_rule(urls, action):
'''Sets up URL rules, given a dictionary of urls and a function
that they will act on.
'''
def url_page_function(text):
'''Returns a function that is associated with the URL
page. This function is called when the URL page is
requested. The anonymous (lambda) function does a
particular action given a particular string, text. It's
set up this way because the text fed into the action
function is always the same for a particular web page.
'''
return lambda: action(text)
for url in urls:
app.add_url_rule(url, url, url_page_function(urls[url]))
forum_set_tags()
action_list = [('redirect', redirect),
('forum_urls', forum_page),
('css', lambda theme:
api.make_css(data['style'][theme]))]
for action in action_list:
setup_url_rule(data['urls'][action[0]], action[1])
| Python | 0.000001 |
bc62bd28340d27fbfde164ea3c2f184922ddb9e9 | add Spirent like profile | scripts/astf/http_manual_tunables.py | scripts/astf/http_manual_tunables.py | # Example for creating your program by specifying buffers to send, without relaying on pcap file
from trex_astf_lib.api import *
# we can send either Python bytes type as below:
http_req = b'GET /3384 HTTP/1.1\r\nHost: 22.0.0.3\r\nConnection: Keep-Alive\r\nUser-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)\r\nAccept: */*\r\nAccept-Language: en-us\r\nAccept-Encoding: gzip, deflate, compress\r\n\r\n'
# or we can send Python string containing ascii chars, as below:
http_response = 'HTTP/1.1 200 OK\r\nServer: Microsoft-IIS/6.0\r\nContent-Type: text/html\r\nContent-Length: 32000\r\n\r\n<html><pre>'+('*'*11*1024)+'</pre></html>'
class Prof1():
def __init__(self):
pass # tunables
def create_profile(self):
# client commands
prog_c = ASTFProgram()
prog_c.connect();
prog_c.send(http_req)
prog_c.recv(len(http_response))
prog_c.delay(10);
prog_s = ASTFProgram()
prog_s.recv(len(http_req))
prog_s.delay(10);
prog_s.send(http_response)
prog_s.wait_for_peer_close()
# ip generator
ip_gen_c = ASTFIPGenDist(ip_range=["16.0.0.0", "16.0.0.255"], distribution="seq")
ip_gen_s = ASTFIPGenDist(ip_range=["48.0.0.0", "48.0.255.255"], distribution="seq")
ip_gen = ASTFIPGen(glob=ASTFIPGenGlobal(ip_offset="1.0.0.0"),
dist_client=ip_gen_c,
dist_server=ip_gen_s)
info = ASTFGlobalInfo()
info.tcp.mss = 1100
info.tcp.rxbufsize = 1102 # split the buffer to MSS and ack every buffer, no need the no_delay option
info.tcp.txbufsize = 1100
info.tcp.initwnd = 1
#info.tcp.no_delay = 1
info.tcp.do_rfc1323 =0
# template
temp_c = ASTFTCPClientTemplate(program=prog_c, ip_gen=ip_gen)
temp_s = ASTFTCPServerTemplate(program=prog_s) # using default association
template = ASTFTemplate(client_template=temp_c, server_template=temp_s)
# profile
profile = ASTFProfile(default_ip_gen=ip_gen, templates=template,
default_c_glob_info=info,
default_s_glob_info=info)
return profile
def get_profile(self, **kwargs):
return self.create_profile()
def register():
return Prof1()
| # Example for creating your program by specifying buffers to send, without relaying on pcap file
from trex_astf_lib.api import *
# we can send either Python bytes type as below:
http_req = b'GET /3384 HTTP/1.1\r\nHost: 22.0.0.3\r\nConnection: Keep-Alive\r\nUser-Agent: Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)\r\nAccept: */*\r\nAccept-Language: en-us\r\nAccept-Encoding: gzip, deflate, compress\r\n\r\n'
# or we can send Python string containing ascii chars, as below:
http_response = 'HTTP/1.1 200 OK\r\nServer: Microsoft-IIS/6.0\r\nContent-Type: text/html\r\nContent-Length: 32000\r\n\r\n<html><pre>'+('*'*11*1024)+'</pre></html>'
class Prof1():
def __init__(self):
pass # tunables
def create_profile(self):
# client commands
prog_c = ASTFProgram()
prog_c.connect();
prog_c.send(http_req)
prog_c.recv(len(http_response))
prog_c.delay(10);
prog_s = ASTFProgram()
prog_s.recv(len(http_req))
prog_s.send(http_response)
prog_s.wait_for_peer_close()
# ip generator
ip_gen_c = ASTFIPGenDist(ip_range=["16.0.0.0", "16.0.0.255"], distribution="seq")
ip_gen_s = ASTFIPGenDist(ip_range=["48.0.0.0", "48.0.255.255"], distribution="seq")
ip_gen = ASTFIPGen(glob=ASTFIPGenGlobal(ip_offset="1.0.0.0"),
dist_client=ip_gen_c,
dist_server=ip_gen_s)
info = ASTFGlobalInfo()
info.tcp.mss = 1100
info.tcp.initwnd = 1
info.tcp.no_delay = 1
# template
temp_c = ASTFTCPClientTemplate(program=prog_c, ip_gen=ip_gen)
temp_s = ASTFTCPServerTemplate(program=prog_s) # using default association
template = ASTFTemplate(client_template=temp_c, server_template=temp_s)
# profile
profile = ASTFProfile(default_ip_gen=ip_gen, templates=template,
default_c_glob_info=info,
default_s_glob_info=info)
return profile
def get_profile(self, **kwargs):
return self.create_profile()
def register():
return Prof1()
| Python | 0 |
b1277cd79102a30a894e370ab15773e6d86569ec | fix n/a causing issues for OT0010 ingest, sigh | scripts/ingestors/other/parse0010.py | scripts/ingestors/other/parse0010.py | """ISU Agronomy Hall Vantage Pro 2 OT0010"""
from __future__ import print_function
import datetime
import re
import os
import sys
import pytz
from pyiem.datatypes import speed, temperature, humidity
from pyiem.observation import Observation
from pyiem.meteorology import dewpoint
from pyiem.util import get_dbconn
def main():
"""Go Main Go"""
iemaccess = get_dbconn('iem')
cursor = iemaccess.cursor()
valid = datetime.datetime.utcnow()
valid = valid.replace(tzinfo=pytz.utc)
valid = valid.astimezone(pytz.timezone("America/Chicago"))
fn = valid.strftime("/mesonet/ARCHIVE/data/%Y/%m/%d/text/ot/ot0010.dat")
if not os.path.isfile(fn):
sys.exit(0)
lines = open(fn, "r").readlines()
lastline = lines[-1].strip()
tokens = re.split(r"[\s+]+", lastline)
if len(tokens) != 20:
return
tparts = re.split(":", tokens[3])
valid = valid.replace(hour=int(tparts[0]),
minute=int(tparts[1]), second=0, microsecond=0)
iem = Observation("OT0010", "OT", valid)
iem.data['tmpf'] = float(tokens[4])
iem.data['max_tmpf'] = float(tokens[5])
iem.data['min_tmpf'] = float(tokens[6])
iem.data['relh'] = int(tokens[7])
iem.data['dwpf'] = dewpoint(temperature(iem.data['tmpf'], 'F'),
humidity(iem.data['relh'], '%')).value("F")
iem.data['sknt'] = speed(float(tokens[8]), 'mph').value('KT')
iem.data['drct'] = int(tokens[9])
iem.data['max_sknt'] = speed(float(tokens[10]), 'mph').value('KT')
iem.data['alti'] = float(tokens[12])
iem.data['pday'] = float(tokens[13])
iem.data['srad'] = None if tokens[18] == 'n/a' else float(tokens[18])
iem.save(cursor)
cursor.close()
iemaccess.commit()
if __name__ == '__main__':
main()
| """ISU Agronomy Hall Vantage Pro 2 OT0010"""
from __future__ import print_function
import datetime
import re
import os
import sys
import pytz
from pyiem.datatypes import speed, temperature, humidity
from pyiem.observation import Observation
from pyiem.meteorology import dewpoint
from pyiem.util import get_dbconn
def main():
"""Go Main Go"""
iemaccess = get_dbconn('iem')
cursor = iemaccess.cursor()
valid = datetime.datetime.utcnow()
valid = valid.replace(tzinfo=pytz.utc)
valid = valid.astimezone(pytz.timezone("America/Chicago"))
fn = valid.strftime("/mesonet/ARCHIVE/data/%Y/%m/%d/text/ot/ot0010.dat")
if not os.path.isfile(fn):
sys.exit(0)
lines = open(fn, "r").readlines()
lastline = lines[-1].strip()
tokens = re.split(r"[\s+]+", lastline)
if len(tokens) != 20:
return
tparts = re.split(":", tokens[3])
valid = valid.replace(hour=int(tparts[0]),
minute=int(tparts[1]), second=0, microsecond=0)
iem = Observation("OT0010", "OT", valid)
iem.data['tmpf'] = float(tokens[4])
iem.data['max_tmpf'] = float(tokens[5])
iem.data['min_tmpf'] = float(tokens[6])
iem.data['relh'] = int(tokens[7])
iem.data['dwpf'] = dewpoint(temperature(iem.data['tmpf'], 'F'),
humidity(iem.data['relh'], '%')).value("F")
iem.data['sknt'] = speed(float(tokens[8]), 'mph').value('KT')
iem.data['drct'] = int(tokens[9])
iem.data['max_sknt'] = speed(float(tokens[10]), 'mph').value('KT')
iem.data['alti'] = float(tokens[12])
iem.data['pday'] = float(tokens[13])
iem.data['srad'] = float(tokens[18])
iem.save(cursor)
cursor.close()
iemaccess.commit()
if __name__ == '__main__':
main()
| Python | 0 |
c7a79f81734f360a232b2f91630872ad56a1ffa4 | clean up audio init | amen/audio.py | amen/audio.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import librosa
from amen.timing_list import TimingList
class Audio(object):
"""
Audio object: should wrap the output from libRosa.
"""
def __init__(self, file_path, convert_to_mono=False, sample_rate=22050):
"""
Opens a file path, loads it with librosa.
"""
self.file_path = file_path
y, sr = librosa.load(file_path, mono=convert_to_mono, sr=sample_rate)
self.sample_rate = float(sr)
self.raw_samples = y
self.num_channels = y.ndim
self.duration = librosa.get_duration(y=y, sr=sr)
self.timings = self.create_timings()
def create_timings(self):
timings = {}
timings['beats'] = TimingList('beats', self.get_beats(), self)
return timings
def get_beats(self):
y_mono = librosa.to_mono(self.raw_samples)
tempo, beat_frames = librosa.beat.beat_track(
y=y_mono, sr=self.sample_rate, trim=False)
# convert frames to times
beat_times = librosa.frames_to_time(beat_frames, sr=self.sample_rate)
# make the list of (start, duration)s that TimingList expects
starts_durs = []
for i, start in enumerate(beat_times[:-1]):
starts_durs.append((start, beat_times[i+1] - start))
# now get the last one
starts_durs.append((beat_times[-1], self.duration - beat_times[-1]))
return starts_durs
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import librosa
from amen.timing_list import TimingList
class Audio(object):
"""
Audio object: should wrap the output from libRosa.
"""
def __init__(self, file_path, convert_to_mono=False, sample_rate=22050):
"""
Opens a file path, loads it with librosa.
"""
self.file_path = file_path
y, sr = librosa.load(file_path, mono=convert_to_mono, sr=sample_rate)
self.sample_rate = float(sr)
self.raw_samples = y
if convert_to_mono:
self.num_channels = 1
else:
self.num_channels = 2
self.duration = len(self.raw_samples) / self.sample_rate
self.timings = self.create_timings()
def create_timings(self):
timings = {}
timings['beats'] = TimingList('beats', self.get_beats(), self)
return timings
def get_beats(self):
y_mono = librosa.to_mono(self.raw_samples)
tempo, beat_frames = librosa.beat.beat_track(
y=y_mono, sr=self.sample_rate, trim=False)
# convert frames to times
beat_times = librosa.frames_to_time(beat_frames, sr=self.sample_rate)
# make the list of (start, duration)s that TimingList expects
starts_durs = []
for i, start in enumerate(beat_times[:-1]):
starts_durs.append((start, beat_times[i+1] - start))
# now get the last one
starts_durs.append((beat_times[-1], self.duration - beat_times[-1]))
return starts_durs
| Python | 0.000025 |
4aca30e376b2310e2436fdb799bf3cae1c9a1d2b | Define global variables to clean up tests | dakota_utils/tests/test_file.py | dakota_utils/tests/test_file.py | #! /usr/bin/env python
#
# Tests for dakota_utils.file.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
from nose.tools import *
import os
import tempfile
import shutil
from dakota_utils.file import *
nondir = 'vwbwguv00240cnwuncdsv'
nonfile = nondir + '.pro'
bname = 'delete_me'
def setup_module():
print('File tests:')
os.environ['_test_tmp_dir'] = tempfile.mkdtemp()
def teardown_module():
shutil.rmtree(os.environ['_test_tmp_dir'])
@raises(TypeError)
def test_remove_zero_arguments():
'''
Tests for no input parameter to remove().
'''
remove()
@raises(TypeError)
def test_remove_file_zero_arguments():
'''
Tests for no input parameter to remove_file().
'''
remove_file()
@raises(TypeError)
def test_remove_directory_zero_arguments():
'''
Tests for no input parameter to remove_directory().
'''
remove_directory()
@raises(TypeError)
def test_touch_zero_arguments():
'''
Tests for no input parameter to touch().
'''
touch()
def test_remove_file_does_not_exist():
'''
Tests deleting a nonexistent file with remove_file().
'''
remove_file(os.path.join(os.environ['_test_tmp_dir'], nonfile))
def test_remove_directory_does_not_exist():
'''
Tests deleting a nonexistent directory with remove_directory().
'''
remove_directory(os.path.join(os.environ['_test_tmp_dir'], nondir))
def test_remove_does_not_exist():
'''
Tests deleting a nonexistent file or directory with remove().
'''
remove(os.path.join(os.environ['_test_tmp_dir'], nonfile))
remove(os.path.join(os.environ['_test_tmp_dir'], nondir))
def test_remove_file():
'''
Tests that remove_file() deletes a file.
'''
fname = os.path.join(os.environ['_test_tmp_dir'], bname)
touch(fname)
remove_file(fname)
assert_false(os.path.exists(fname))
def test_remove_directory():
'''
Tests that remove_directory() deletes a directory.
'''
dname = os.path.join(os.environ['_test_tmp_dir'], bname)
os.mkdir(dname)
remove_directory(dname)
assert_false(os.path.exists(dname))
def test_remove_a_file():
'''
Tests that remove() deletes a file. (Uses touch)
'''
fname = os.path.join(os.environ['_test_tmp_dir'], bname)
touch(fname)
remove(fname)
assert_false(os.path.exists(fname))
def test_remove_a_directory():
'''
Tests that remove() deletes a directory.
'''
dname = os.path.join(os.environ['_test_tmp_dir'], bname)
os.mkdir(dname)
remove(dname)
assert_false(os.path.exists(dname))
def test_touch():
'''
Tests that touch() makes a file. (Uses remove)
'''
fname = os.path.join(os.environ['_test_tmp_dir'], 'a_file')
touch(fname)
assert_true(os.path.exists(fname))
remove(fname)
| #! /usr/bin/env python
#
# Tests for dakota_utils.file.
#
# Call with:
# $ nosetests -sv
#
# Mark Piper (mark.piper@colorado.edu)
from nose.tools import *
import os
import tempfile
import shutil
from dakota_utils.file import *
def setup_module():
print('File tests:')
os.environ['_test_tmp_dir'] = tempfile.mkdtemp()
def teardown_module():
shutil.rmtree(os.environ['_test_tmp_dir'])
@raises(TypeError)
def test_remove_zero_arguments():
'''
Tests for no input parameter to remove().
'''
remove()
@raises(TypeError)
def test_remove_file_zero_arguments():
'''
Tests for no input parameter to remove_file().
'''
remove_file()
@raises(TypeError)
def test_remove_directory_zero_arguments():
'''
Tests for no input parameter to remove_directory().
'''
remove_directory()
@raises(TypeError)
def test_touch_zero_arguments():
'''
Tests for no input parameter to touch().
'''
touch()
def test_remove_file_does_not_exist():
'''
Tests deleting a nonexistent file with remove_file().
'''
fname = 'vwbwguv00240cnwuncdsv'
remove_file(os.path.join(os.environ['_test_tmp_dir'], fname))
def test_remove_directory_does_not_exist():
'''
Tests deleting a nonexistent directory with remove_directory().
'''
dname = 'vwbwguv00240cnwuncdsv'
remove_directory(os.path.join(os.environ['_test_tmp_dir'], dname))
def test_remove_does_not_exist():
'''
Tests deleting a nonexistent file or directory with remove().
'''
name = 'vwbwguv00240cnwuncdsv'
remove(os.path.join(os.environ['_test_tmp_dir'], name))
def test_remove_file():
'''
Tests that remove_file() deletes a file.
'''
bname = 'delete_me'
fname = os.path.join(os.environ['_test_tmp_dir'], bname)
touch(fname)
remove_file(fname)
assert_false(os.path.exists(fname))
def test_remove_directory():
'''
Tests that remove_directory() deletes a directory.
'''
bname = 'delete_me'
dname = os.path.join(os.environ['_test_tmp_dir'], bname)
os.mkdir(dname)
remove_directory(dname)
assert_false(os.path.exists(dname))
def test_remove_a_file():
'''
Tests that remove() deletes a file. (Uses touch)
'''
bname = 'delete_me'
fname = os.path.join(os.environ['_test_tmp_dir'], bname)
touch(fname)
remove(fname)
assert_false(os.path.exists(fname))
def test_remove_a_directory():
'''
Tests that remove() deletes a directory.
'''
bname = 'delete_me'
dname = os.path.join(os.environ['_test_tmp_dir'], bname)
os.mkdir(dname)
remove(dname)
assert_false(os.path.exists(dname))
def test_touch():
'''
Tests that touch() makes a file. (Uses remove)
'''
bname = 'a_file'
fname = os.path.join(os.environ['_test_tmp_dir'], bname)
touch(fname)
assert_true(os.path.exists(fname))
remove(fname)
| Python | 0.000003 |
1c56aeb3d96dbb26da62203d690b4ff49b4b5c0e | bump version to 0.5.2 | abstar/version.py | abstar/version.py | # Store the version here so:
# 1) we don't load dependencies by storing it in __init__.py
# 2) we can import it in setup.py for the same reason
# 3) we can import it into your module module
__version__ = '0.5.2' | # Store the version here so:
# 1) we don't load dependencies by storing it in __init__.py
# 2) we can import it in setup.py for the same reason
# 3) we can import it into your module module
__version__ = '0.5.1' | Python | 0.000001 |
609cffb674ba0494bbe450d8ce7839168a3d5a0a | remove unnecessary code from forms | accounts/forms.py | accounts/forms.py | # -*- coding: utf-8 -*-
from django.contrib.auth import get_user_model
from django import forms
from django.utils.translation import ugettext_lazy as _
User = get_user_model()
class ProfileEditForm(forms.ModelForm):
email = forms.RegexField(label=_("email"), max_length=75, regex=r"^[\w.@+-]+$")
password1 = forms.CharField(widget=forms.PasswordInput, label=_("Password"), required=False)
password2 = forms.CharField(widget=forms.PasswordInput, label=_("Password (again)"), required=False)
class Meta:
model = User
fields = ('username', 'email', 'first_name', 'last_name', 'picture',
'occupation', 'city', 'site', 'biography',)
def clean_username(self):
return self.instance.username
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(_("The two password fields didn't match."))
return password2
def save(self, commit=True):
if self.cleaned_data['password1']:
self.instance.set_password(self.cleaned_data['password1'])
return super(ProfileEditForm, self).save(commit=commit)
| # -*- coding: utf-8 -*-
try:
from django.contrib.auth import get_user_model
except ImportError:
from django.contrib.auth.models import User
else:
User = get_user_model()
from django import forms
from django.utils.translation import ugettext_lazy as _
class ProfileEditForm(forms.ModelForm):
email = forms.RegexField(label=_("email"), max_length=75, regex=r"^[\w.@+-]+$")
password1 = forms.CharField(widget=forms.PasswordInput, label=_("Password"), required=False)
password2 = forms.CharField(widget=forms.PasswordInput, label=_("Password (again)"), required=False)
class Meta:
model = User
fields = ('username', 'email', 'first_name', 'last_name', 'picture',
'occupation', 'city', 'site', 'biography',)
def clean_username(self):
return self.instance.username
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2:
if password1 != password2:
raise forms.ValidationError(_("The two password fields didn't match."))
return password2
def save(self, commit=True):
if self.cleaned_data['password1']:
self.instance.set_password(self.cleaned_data['password1'])
return super(ProfileEditForm, self).save(commit=commit)
| Python | 0.000007 |
67be76a3d65fa846c8888ef5415ec3df5ef9ab87 | Add test for expired tokens | accounts/tests.py | accounts/tests.py | """accounts app unittests
"""
import base64
from time import sleep
from django.contrib.auth import get_user_model
from django.test import TestCase
from accounts.token import LoginTokenGenerator
TEST_EMAIL = 'newvisitor@example.com'
class WelcomePageTest(TestCase):
"""Tests relating to the welcome_page view.
"""
def test_uses_welcome_template(self):
"""The root url should response with the welcome page template.
"""
response = self.client.get('/')
self.assertTemplateUsed(response, 'accounts/welcome.html')
class UserModelTest(TestCase):
"""Tests for passwordless user model.
"""
def test_user_valid_with_only_email(self):
"""Should not raise if the user model is happy with email only.
"""
user = get_user_model()(email=TEST_EMAIL)
user.full_clean()
def test_users_are_authenticated(self):
"""User objects should be authenticated for views/templates.
"""
user = get_user_model()()
self.assertTrue(user.is_authenticated())
class TokenGeneratorTest(TestCase):
"""Tests for login token model.
"""
def setUp(self):
self.generator = LoginTokenGenerator()
def test_unique_tokens_generated(self):
"""Tokens generated one second apart should differ.
"""
token1 = self.generator.create_token(TEST_EMAIL)
sleep(1)
token2 = self.generator.create_token(TEST_EMAIL)
self.assertNotEqual(token1, token2)
def test_email_recovered_from_token(self):
"""A consumed token should yield the original email address.
"""
token = self.generator.create_token(TEST_EMAIL)
email = self.generator.consume_token(token)
self.assertEqual(email, TEST_EMAIL)
def test_modified_token_fails(self):
"""A modified token returns None instead of an email.
"""
token = self.generator.create_token(TEST_EMAIL)
# Modify the email address which is 'signed'.
split_token = base64.urlsafe_b64decode(
token.encode()
).decode().split('@')
split_token[0] = 'maliciousvisitor'
malicious_token = base64.urlsafe_b64encode(
'@'.join(split_token).encode()
).decode()
self.assertIsNone(self.generator.consume_token(malicious_token))
def test_expired_token_fails(self):
"""A token which has expired returns None instead of an email.
"""
token = self.generator.create_token(TEST_EMAIL)
sleep(1) # Ensure the token is more than 0 seconds old.
email = self.generator.consume_token(token, 0)
self.assertIsNone(email)
| """accounts app unittests
"""
import base64
from time import sleep
from django.contrib.auth import get_user_model
from django.test import TestCase
from accounts.token import LoginTokenGenerator
TEST_EMAIL = 'newvisitor@example.com'
class WelcomePageTest(TestCase):
"""Tests relating to the welcome_page view.
"""
def test_uses_welcome_template(self):
"""The root url should response with the welcome page template.
"""
response = self.client.get('/')
self.assertTemplateUsed(response, 'accounts/welcome.html')
class UserModelTest(TestCase):
"""Tests for passwordless user model.
"""
def test_user_valid_with_only_email(self):
"""Should not raise if the user model is happy with email only.
"""
user = get_user_model()(email=TEST_EMAIL)
user.full_clean()
def test_users_are_authenticated(self):
"""User objects should be authenticated for views/templates.
"""
user = get_user_model()()
self.assertTrue(user.is_authenticated())
class TokenGeneratorTest(TestCase):
"""Tests for login token model.
"""
def setUp(self):
self.generator = LoginTokenGenerator()
def test_unique_tokens_generated(self):
"""Tokens generated one second apart should differ.
"""
token1 = self.generator.create_token(TEST_EMAIL)
sleep(1)
token2 = self.generator.create_token(TEST_EMAIL)
self.assertNotEqual(token1, token2)
def test_email_recovered_from_token(self):
"""A consumed token should yield the original email address.
"""
token = self.generator.create_token(TEST_EMAIL)
email = self.generator.consume_token(token)
self.assertEqual(email, TEST_EMAIL)
def test_modified_token_fails(self):
"""A modified token returns None instead of an email.
"""
token = self.generator.create_token(TEST_EMAIL)
split_token = base64.urlsafe_b64decode(
token.encode()
).decode().split('@')
split_token[0] = 'maliciousvisitor'
malicious_token = base64.urlsafe_b64encode(
'@'.join(split_token).encode()
).decode()
self.assertIsNone(self.generator.consume_token(malicious_token))
| Python | 0 |
5a4f05cb0f3a00a2d4faf828bd7850085c302541 | Implement functionality to delete logs created by digital justice users | cla_backend/apps/cla_eventlog/management/commands/find_and_delete_old_cases.py | cla_backend/apps/cla_eventlog/management/commands/find_and_delete_old_cases.py | import sys
from django.core.management.base import BaseCommand
from dateutil.relativedelta import relativedelta
from legalaid.models import Case
from cla_eventlog.models import Log
from cla_butler.tasks import DeleteOldData
class FindAndDeleteCasesUsingCreationTime(DeleteOldData):
def get_eligible_cases(self):
two_years = self.now - relativedelta(years=2)
return Case.objects.filter(created__lte=two_years).exclude(log__created__gte=two_years)
def get_digital_justice_user_logs(self):
return Log.objects.filter(created_by__email__endswith="digital.justice.gov.uk")
class Command(BaseCommand):
help = """
Use cases:
1. Find or delete cases that are 2 years old or over that were not deleted prior to the task command being fixed
2. Delete logs created by users with a @digital.justice.gov.uk email
"""
def handle_test_command(self, args, cases):
digital_justice_user_logs = self.instance.get_digital_justice_user_logs()
if args[0] == "delete":
self.instance.run()
elif args[0] == "delete-logs":
self.instance._delete_objects(digital_justice_user_logs)
def handle_terminal_command(self, args, cases):
digital_justice_user_logs = self.instance.get_digital_justice_user_logs()
if args[0] == "delete":
if len(args) > 1 and args[1] == "no-input":
self.instance.run()
else:
answer = raw_input(
"Number of cases that will be deleted: {0}\nAre you sure about this? (Yes/No) ".format(
cases.count()
)
)
if answer == "Yes":
self.instance.run()
elif args[0] == "delete-logs":
answer = raw_input(
"Number of digital justice user logs that will be deleted: {0}\nAre you sure about this? (Yes/No) ".format(
digital_justice_user_logs.count()
)
)
if answer == "Yes":
self.instance._delete_objects(digital_justice_user_logs)
def handle(self, *args, **kwargs):
self.instance = FindAndDeleteCasesUsingCreationTime()
cases = self.instance.get_eligible_cases()
django_command = sys.argv[1]
if django_command == "test": # If command is run in test
if args:
self.handle_test_command(args, cases)
else:
return cases
else: # If command is run in terminal
if args:
self.handle_terminal_command(args, cases)
else:
print("Number of cases to be deleted: " + str(cases.count()))
| import sys
from django.core.management.base import BaseCommand
from dateutil.relativedelta import relativedelta
from legalaid.models import Case
from cla_butler.tasks import DeleteOldData
class FindAndDeleteCasesUsingCreationTime(DeleteOldData):
def get_eligible_cases(self):
two_years = self.now - relativedelta(years=2)
return Case.objects.filter(created__lte=two_years).exclude(log__created__gte=two_years)
class Command(BaseCommand):
help = (
"Find or delete cases that are 2 years old or over that were not deleted prior to the task command being fixed"
)
def handle(self, *args, **kwargs):
instance = FindAndDeleteCasesUsingCreationTime()
cases = instance.get_eligible_cases()
django_command = sys.argv[1]
if django_command == "test": # If command is run in test
if args and args[0] == "delete":
instance.run()
else:
return cases
else: # If command is run in terminal
if args and args[0] == "delete":
if len(args) > 1 and args[1] == "no-input":
instance.run()
else:
answer = raw_input(
"Number of cases that will be deleted: {0}\nAre you sure about this? (Yes/No) ".format(
cases.count()
)
)
if answer == "Yes":
instance.run()
else:
print("Number of cases to be deleted: " + str(cases.count()))
| Python | 0.002474 |
f5c5c7de8af6ae5251ac1d878569c2692e119a04 | Set the login/logout URLs so authentication works. | adapt/settings.py | adapt/settings.py | """
Django settings for adapt project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv('SECRET_KEY', 'changeme')
# SECURITY WARNING: don't run with debug turned on in production!
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'import_export',
'adapt',
'clients',
'reports',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'adapt.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'),],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'adapt.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Detroit'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
# Parse database configuration from $DATABASE_URL
import dj_database_url
db_config = dj_database_url.config()
if db_config:
DATABASES['default'] = db_config
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static asset configuration
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
if os.getenv('DEV'):
DEBUG = True
else:
SECURE_SSL_REDIRECT = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SECURE_HSTS_SECONDS = 60*60*24*365
LOGIN_URL = '/admin/login/'
LOGOUT_URL = '/admin/logout/' | """
Django settings for adapt project.
Generated by 'django-admin startproject' using Django 1.8.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv('SECRET_KEY', 'changeme')
# SECURITY WARNING: don't run with debug turned on in production!
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'import_export',
'adapt',
'clients',
'reports',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'adapt.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'),],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'adapt.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Detroit'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
# Parse database configuration from $DATABASE_URL
import dj_database_url
db_config = dj_database_url.config()
if db_config:
DATABASES['default'] = db_config
# Honor the 'X-Forwarded-Proto' header for request.is_secure()
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# Allow all host headers
ALLOWED_HOSTS = ['*']
# Static asset configuration
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
STATIC_ROOT = 'staticfiles'
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, 'static'),
)
if os.getenv('DEV'):
DEBUG = True
else:
SECURE_SSL_REDIRECT = True
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SECURE_HSTS_SECONDS = 60*60*24*365
| Python | 0 |
b9c953cffd0c9961c22c0c671648f5e5a3e4426c | Update server | alchemy_server.py | alchemy_server.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 30 01:14:12 2017
@author: colm
"""
from flask import Flask, jsonify
import os
from models import Charity, Logo, Description
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import pandas as pd
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['SQLALCHEMY_DATABASE_URI']
@app.route("/gci")
def gci():
global session
query = session.query(Charity, Description.description, Logo.logo_url, Logo.has_face)\
.join(Logo, Charity.name == Logo.name)\
.join(Description, Charity.name == Description.name)
charities = pd.read_sql(query.statement, con=session.bind, index_col = 'name')
charities = charities[charities['has_face'] == False]
charities.drop('has_face', axis=1)
query = session.query(Charity.category).distinct()
categories = pd.read_sql(query.statement, con = session.bind)
categories = categories[~categories['category'].str.contains(',')]
payload = {'categories':categories.values.tolist(), 'charities':charities.to_dict('index')}
return jsonify(payload)
if __name__ == "__main__":
db = create_engine(os.environ['SQLALCHEMY_DATABASE_URI'])
Session = sessionmaker(bind=db)
session = Session()
app.run(host='0.0.0.0') | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Apr 30 01:14:12 2017
@author: colm
"""
from flask import Flask, jsonify
import os
from models import Charity, Logo, Description
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
import pandas as pd
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ['SQLALCHEMY_DATABASE_URI']
@app.route("/gci")
def gci():
global session
query = session.query(Charity)\
.leftjoin(Logo, Charity.name == Logo.name)\
.leftjoin(Description, Charity.name == Description.name)
charities = pd.read_sql(query.statment, session.bind)
query = session.query(Charity.category).distinct()
categories = pd.read_sql(query.statment, session.bind)
payload = {'categories':categories.values(), 'charities':charities.to_dict('index')}
return jsonify(payload)
if __name__ == "__main__":
db = create_engine(os.environ['SQLALCHEMY_DATABASE_URI'])
Session = sessionmaker(bind=db)
session = Session()
app.run(host='0.0.0.0')
print('test')
| Python | 0.000001 |
434e459059bba2a1e52e953813caae532a3cb16b | Update test_consume_4 | test_wordcount.py | test_wordcount.py | import os.path
import tempfile
import wordcount_lib
def _make_testfile(filename, data):
"Make a temp file containing the given data; return full path to file."
tempdir = tempfile.mkdtemp(prefix='wordcounttest_')
testfile = os.path.join(tempdir, filename)
with open(testfile, 'wt') as fp:
fp.write(data)
return testfile
def test_consume_1():
# do a basic test of the consume function.
testfile = _make_testfile('sometext.txt', 'a b cc\nddd')
chars, words, lines = wordcount_lib.consume(testfile)
assert chars == 10
assert words == 4
assert lines == 2
def test_consume_2():
# do another basic test of the consume function.
testfile = _make_testfile('sometext.txt', 'a\nb\ncc\nddd\ne')
chars, words, lines = wordcount_lib.consume(testfile)
assert chars == 12 # includes whitespace in char count
assert words == 5
assert lines == 5
def test_consume_3():
# check something tricky: whitespace at beginning & end of line
testfile = _make_testfile('sometext.txt', ' a b c ')
chars, words, lines = wordcount_lib.consume(testfile)
assert chars == 7 # includes whitespace in char count
assert words == 3
assert lines == 1
def test_consume_4():
# check something tricky: whitespace at beginning & end of line
testfile = _make_testfile('sometext.txt', ' a b c d e')
chars, words, lines = wordcount_lib.consume(testfile)
assert chars == 10 # includes whitespace in char count
assert words == 5
assert lines == 1 | import os.path
import tempfile
import wordcount_lib
def _make_testfile(filename, data):
"Make a temp file containing the given data; return full path to file."
tempdir = tempfile.mkdtemp(prefix='wordcounttest_')
testfile = os.path.join(tempdir, filename)
with open(testfile, 'wt') as fp:
fp.write(data)
return testfile
def test_consume_1():
# do a basic test of the consume function.
testfile = _make_testfile('sometext.txt', 'a b cc\nddd')
chars, words, lines = wordcount_lib.consume(testfile)
assert chars == 10
assert words == 4
assert lines == 2
def test_consume_2():
# do another basic test of the consume function.
testfile = _make_testfile('sometext.txt', 'a\nb\ncc\nddd\ne')
chars, words, lines = wordcount_lib.consume(testfile)
assert chars == 12 # includes whitespace in char count
assert words == 5
assert lines == 5
def test_consume_3():
# check something tricky: whitespace at beginning & end of line
testfile = _make_testfile('sometext.txt', ' a b c ')
chars, words, lines = wordcount_lib.consume(testfile)
assert chars == 7 # includes whitespace in char count
assert words == 3
assert lines == 1
def test_consume_4():
# check something tricky: whitespace at beginning & end of line
testfile = _make_testfile('sometext.txt', ' a b c d e')
chars, words, lines = wordcount_lib.consume(testfile)
assert chars == 9 # includes whitespace in char count
assert words == 5
assert lines == 1 | Python | 0.000003 |
2fa092add3508b774c58e880089c18c3275df840 | Set block_align on target population if given | backend/populate_targets.py | backend/populate_targets.py | import django
import os
import yaml
from backend.settings import BASE_DIR
from django.db import IntegrityError
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings')
django.setup()
from breach.models import Target
def create_target(target):
method = ''
for m in Target.METHOD_CHOICES:
if target['method'] == m[1]:
method = m[0]
break
if method:
target['method'] = method
else:
print '[!] Invalid method for target "{}".'.format(target['name'])
return
target_args = {
'name': target['name'],
'endpoint': target['endpoint'],
'prefix': target['prefix'],
'alphabet': target['alphabet'],
'secretlength': target['secretlength'],
'alignmentalphabet': target['alignmentalphabet'],
'recordscardinality': target['recordscardinality'],
'method': target['method']
}
if 'maxreflectionlength' in target:
target_args['maxreflectionlength'] = target['maxreflectionlength']
if 'block_align' in target:
target_args['block_align'] = target['block_align']
t = Target(**target_args)
t.save()
print '''Created Target:
\tname: {}
\tendpoint: {}
\tprefix: {}
\talphabet: {}
\tsecretlength: {}
\talignmentalphabet: {}
\trecordscardinality: {}
\tmethod: {}'''.format(
t.name,
t.endpoint,
t.prefix,
t.alphabet,
t.secretlength,
t.alignmentalphabet,
t.recordscardinality,
t.method
)
if __name__ == '__main__':
try:
with open(os.path.join(BASE_DIR, 'target_config.yml'), 'r') as ymlconf:
cfg = yaml.load(ymlconf)
except IOError, err:
print 'IOError: %s' % err
exit(1)
targets = cfg.items()
for t in targets:
target = t[1]
target['name'] = t[0]
try:
create_target(target)
except (IntegrityError, ValueError), err:
if isinstance(err, IntegrityError):
print '[!] Target "{}" already exists.'.format(target['name'])
elif isinstance(err, ValueError):
print '[!] Invalid parameters for target "{}".'.format(target['name'])
| import django
import os
import yaml
from backend.settings import BASE_DIR
from django.db import IntegrityError
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'backend.settings')
django.setup()
from breach.models import Target
def create_target(target):
method = ''
for m in Target.METHOD_CHOICES:
if target['method'] == m[1]:
method = m[0]
break
if method:
target['method'] = method
else:
print '[!] Invalid method for target "{}".'.format(target['name'])
return
target_args = {
'name': target['name'],
'endpoint': target['endpoint'],
'prefix': target['prefix'],
'alphabet': target['alphabet'],
'secretlength': target['secretlength'],
'alignmentalphabet': target['alignmentalphabet'],
'recordscardinality': target['recordscardinality'],
'method': target['method']
}
if 'maxreflectionlength' in target:
target_args['maxreflectionlength'] = target['maxreflectionlength']
t = Target(**target_args)
t.save()
print '''Created Target:
\tname: {}
\tendpoint: {}
\tprefix: {}
\talphabet: {}
\tsecretlength: {}
\talignmentalphabet: {}
\trecordscardinality: {}
\tmethod: {}'''.format(
t.name,
t.endpoint,
t.prefix,
t.alphabet,
t.secretlength,
t.alignmentalphabet,
t.recordscardinality,
t.method
)
if __name__ == '__main__':
try:
with open(os.path.join(BASE_DIR, 'target_config.yml'), 'r') as ymlconf:
cfg = yaml.load(ymlconf)
except IOError, err:
print 'IOError: %s' % err
exit(1)
targets = cfg.items()
for t in targets:
target = t[1]
target['name'] = t[0]
try:
create_target(target)
except (IntegrityError, ValueError), err:
if isinstance(err, IntegrityError):
print '[!] Target "{}" already exists.'.format(target['name'])
elif isinstance(err, ValueError):
print '[!] Invalid parameters for target "{}".'.format(target['name'])
| Python | 0.000001 |
829941e9d4675645752fa207c461dd596da6264c | refactor html template selection | satchmo/apps/satchmo_store/mail.py | satchmo/apps/satchmo_store/mail.py | from django.conf import settings
from django.template import loader, Context, TemplateDoesNotExist
from livesettings import config_value
import os.path
from socket import error as SocketError
import logging
log = logging.getLogger('satchmo_store.mail')
if "mailer" in settings.INSTALLED_APPS:
from mailer import send_mail
else:
from django.core.mail import send_mail
from django.core.mail import EmailMultiAlternatives
class NoRecipientsException(StandardError):
pass
def send_store_mail(subject, context, template, recipients_list=None,
format_subject=False, send_to_store=False,
fail_silently=False):
"""
:parameter: subject: A string.
:parameter: format_subject: Determines whether the *subject* parameter
is formatted. Only the %(shop_name)s specifier is supported now.
:parameter: context: A dictionary to use when rendering the message body.
This dictionary overwrites an internal dictionary which provides the key
`shop_name`.
:parameter: template: The path of the template to use when rendering the
message body.
If store config is set to enable HTML emails, will attempt to find the HTML
template and send it.
"""
from satchmo_store.shop.models import Config
shop_config = Config.objects.get_current()
shop_email = shop_config.store_email
shop_name = shop_config.store_name
send_html = config_value('SHOP', 'HTML_EMAIL')
if not shop_email:
log.warn('No email address configured for the shop. Using admin settings.')
shop_email = settings.ADMINS[0][1]
c_dict = {'shop_name': shop_name}
if format_subject:
subject = subject % c_dict
c_dict.update(context)
c = Context(c_dict)
if send_html:
base_dir,base_name = os.path.split(template)
file_name, ext = os.path.splitext(base_name)
template_name = file_name + '.html'
if settings.DEBUG:
log.info("Attempting to send html mail.")
try:
t = loader.get_template(os.path.join(base_dir, template_name))
except TemplateDoesNotExist:
log.warn('Unable to find html email template %s. Falling back to text only email.' % os.path.join(base_dir, template_name))
send_html = False
if not send_html:
t = loader.get_template(template)
body = t.render(c)
recipients = recipients_list or []
if send_to_store:
recipients.append(shop_email)
if not recipients:
raise NoRecipientsException
try:
if send_html:
msg = EmailMultiAlternatives(subject, body, shop_email, recipients)
msg.attach_alternative(body, "text/html")
msg.send(fail_silently=fail_silently)
else:
send_mail(subject, body, shop_email, recipients,
fail_silently=fail_silently)
except SocketError, e:
if settings.DEBUG:
log.error('Error sending mail: %s' % e)
log.warn('Ignoring email error, since you are running in DEBUG mode. Email was:\nTo:%s\nSubject: %s\n---\n%s', ",".join(recipients), subject, body)
else:
log.fatal('Error sending mail: %s' % e)
raise IOError('Could not send email. Please make sure your email settings are correct and that you are not being blocked by your ISP.')
| from django.conf import settings
from django.template import loader, Context, TemplateDoesNotExist
from livesettings import config_value
import os.path
from socket import error as SocketError
import logging
log = logging.getLogger('satchmo_store.mail')
if "mailer" in settings.INSTALLED_APPS:
from mailer import send_mail
else:
from django.core.mail import send_mail
from django.core.mail import EmailMultiAlternatives
class NoRecipientsException(StandardError):
pass
def send_store_mail(subject, context, template, recipients_list=None,
format_subject=False, send_to_store=False,
fail_silently=False):
"""
:parameter: subject: A string.
:parameter: format_subject: Determines whether the *subject* parameter
is formatted. Only the %(shop_name)s specifier is supported now.
:parameter: context: A dictionary to use when rendering the message body.
This dictionary overwrites an internal dictionary which provides the key
`shop_name`.
:parameter: template: The path of the template to use when rendering the
message body.
If store config is set to enable HTML emails, will attempt to find the HTML
template and send it.
"""
from satchmo_store.shop.models import Config
shop_config = Config.objects.get_current()
shop_email = shop_config.store_email
shop_name = shop_config.store_name
send_html = config_value('SHOP', 'HTML_EMAIL')
if not shop_email:
log.warn('No email address configured for the shop. Using admin settings.')
shop_email = settings.ADMINS[0][1]
c_dict = {'shop_name': shop_name}
if format_subject:
subject = subject % c_dict
c_dict.update(context)
c = Context(c_dict)
t = loader.get_template(template)
body = t.render(c)
if send_html:
base_dir,base_name = os.path.split(template)
file_name, ext = os.path.splitext(base_name)
template_name = file_name + '.html'
if settings.DEBUG:
log.info("Attempting to send html mail.")
try:
html_t = loader.get_template(os.path.join(base_dir, template_name))
html_body = html_t.render(c)
except TemplateDoesNotExist:
log.warn('Unable to find html email template %s. Falling back to text only email.' % os.path.join(base_dir, template_name))
send_html = False
recipients = recipients_list or []
if send_to_store:
recipients.append(shop_email)
if not recipients:
raise NoRecipientsException
try:
if send_html:
msg = EmailMultiAlternatives(subject, body, shop_email, recipients)
msg.attach_alternative(html_body, "text/html")
msg.send(fail_silently=fail_silently)
else:
send_mail(subject, body, shop_email, recipients,
fail_silently=fail_silently)
except SocketError, e:
if settings.DEBUG:
log.error('Error sending mail: %s' % e)
log.warn('Ignoring email error, since you are running in DEBUG mode. Email was:\nTo:%s\nSubject: %s\n---\n%s', ",".join(recipients), subject, body)
else:
log.fatal('Error sending mail: %s' % e)
raise IOError('Could not send email. Please make sure your email settings are correct and that you are not being blocked by your ISP.')
| Python | 0 |
abd2ad6098cb0bc827a8bebf12f21f1131dc83fa | Change version number | fluxghost/__init__.py | fluxghost/__init__.py | __version__ = "0.8.1"
DEBUG = False
| __version__ = "0.8.0"
DEBUG = False
| Python | 0.000009 |
52eed6f6d771045b2c06a941db17665785e90b23 | return an error exit code if tests failed | tests/__init__.py | tests/__init__.py | import sys
import unittest
import parse
import extent
def load_tests():
return unittest.TestSuite([parse.load_tests(), extent.load_tests()])
if __name__ == "__main__":
result = unittest.TextTestRunner(verbosity=2).run(load_tests())
if not result.wasSuccessful():
sys.exit(1)
| import unittest
import parse
import extent
def load_tests():
return unittest.TestSuite([parse.load_tests(), extent.load_tests()])
if __name__ == "__main__":
unittest.TextTestRunner(verbosity=2).run(load_tests())
| Python | 0.001138 |
a0d8be58248eaa3d314c624cd4150afc1d3dd203 | Fix DereferrablePanelTestCase.tearDownClass | tests/__init__.py | tests/__init__.py | import sublime
from textwrap import dedent
from unittesting import DeferrableTestCase
class DereferrablePanelTestCase(DeferrableTestCase):
@classmethod
def setUpClass(cls):
"""
Set up global test environment once for all tests owned by this class.
"""
cls.window = sublime.active_window()
cls.view = cls.window.create_output_panel("MarkdownUnitTests", unlisted=True)
settings = cls.view.settings()
settings.set("auto_indent", False)
settings.set("detect_indentation", False)
settings.set("fold_buttons", False)
settings.set("gutter", False)
settings.set("line_numbers", False)
settings.set("scroll_past_end", False)
settings.set("syntax", "Packages/MarkdownEditing/syntaxes/Markdown.sublime-syntax")
settings.set("word_wrap", False)
cls.view = cls.window.create_output_panel("MarkdownUnitTests", unlisted=True)
@classmethod
def tearDownClass(cls):
"""
Teardown global test environment once all tests finished.
"""
cls.window.destroy_output_panel("MarkdownUnitTests")
@classmethod
def addCaretAt(cls, row, col):
"""
Add caret to given point (row, col)
:param row: The natural 1-based row number. 1=first row
:param col: The natural 1-based column number. 1=first column
"""
cls.view.sel().add(cls.textPoint(row, col))
@classmethod
def setCaretTo(cls, row, col):
"""
Move caret to given point (row, col)
:param row: The natural 1-based row number. 1=first row
:param col: The natural 1-based column number. 1=first column
"""
cls.view.sel().clear()
cls.view.sel().add(cls.textPoint(row, col))
@classmethod
def setBlockText(cls, text):
"""
Replace everything with given block text
:param text: The triple quoted block text to put into scratch view.
"""
cls.setText(dedent(text).strip("\n"))
@classmethod
def setText(cls, text):
"""
Replace everything with given text
:param text: The text to put into scratch view.
"""
cls.view.run_command("select_all")
cls.view.run_command("right_delete")
cls.view.run_command("insert", {"characters": text})
@classmethod
def getText(cls):
"""
Return view's text content
"""
return cls.view.substr(sublime.Region(0, cls.view.size()))
@classmethod
def getRow(cls, row):
"""
Return row's text content.
:param row: The natural 1-based row number. 1=first row
"""
return cls.view.substr(cls.view.line(cls.textPoint(row, 0)))
@classmethod
def textPoint(cls, row, col):
"""
Return textpoint for given row,col coordinats.
:param row: The natural 1-based row number. 1=first row
:param col: The natural 1-based column number. 1=first column
"""
return cls.view.text_point(row - 1, col - 1)
def assertEqualBlockText(self, text):
"""
Assert view containing `text` after detenting and stripping whitespace.
:param text:
Triple quoted text, which is detented and stripped
before being compared with view's content.
"""
self.assertEqual(self.getText(), dedent(text).strip("\n"))
def assertEqualText(self, text):
"""
Assert view containing `text`.
:param text: The text expected to be equal with view's content.
"""
self.assertEqual(self.getText(), text)
| import sublime
from textwrap import dedent
from unittesting import DeferrableTestCase
class DereferrablePanelTestCase(DeferrableTestCase):
@classmethod
def setUpClass(cls):
"""
Set up global test environment once for all tests owned by this class.
"""
cls.window = sublime.active_window()
cls.view = cls.window.create_output_panel("MarkdownUnitTests", unlisted=True)
settings = cls.view.settings()
settings.set("auto_indent", False)
settings.set("detect_indentation", False)
settings.set("fold_buttons", False)
settings.set("gutter", False)
settings.set("line_numbers", False)
settings.set("scroll_past_end", False)
settings.set("syntax", "Packages/MarkdownEditing/syntaxes/Markdown.sublime-syntax")
settings.set("word_wrap", False)
cls.view = cls.window.create_output_panel("MarkdownUnitTests", unlisted=True)
@classmethod
def tearDownClass(cls):
"""
Teardown global test environment once all tests finished.
"""
cls.view = cls.window.destroy_output_panel("MarkdownUnitTests")
@classmethod
def addCaretAt(cls, row, col):
"""
Add caret to given point (row, col)
:param row: The natural 1-based row number. 1=first row
:param col: The natural 1-based column number. 1=first column
"""
cls.view.sel().add(cls.textPoint(row, col))
@classmethod
def setCaretTo(cls, row, col):
"""
Move caret to given point (row, col)
:param row: The natural 1-based row number. 1=first row
:param col: The natural 1-based column number. 1=first column
"""
cls.view.sel().clear()
cls.view.sel().add(cls.textPoint(row, col))
@classmethod
def setBlockText(cls, text):
"""
Replace everything with given block text
:param text: The triple quoted block text to put into scratch view.
"""
cls.setText(dedent(text).strip("\n"))
@classmethod
def setText(cls, text):
"""
Replace everything with given text
:param text: The text to put into scratch view.
"""
cls.view.run_command("select_all")
cls.view.run_command("right_delete")
cls.view.run_command("insert", {"characters": text})
@classmethod
def getText(cls):
"""
Return view's text content
"""
return cls.view.substr(sublime.Region(0, cls.view.size()))
@classmethod
def getRow(cls, row):
"""
Return row's text content.
:param row: The natural 1-based row number. 1=first row
"""
return cls.view.substr(cls.view.line(cls.textPoint(row, 0)))
@classmethod
def textPoint(cls, row, col):
"""
Return textpoint for given row,col coordinats.
:param row: The natural 1-based row number. 1=first row
:param col: The natural 1-based column number. 1=first column
"""
return cls.view.text_point(row - 1, col - 1)
def assertEqualBlockText(self, text):
"""
Assert view containing `text` after detenting and stripping whitespace.
:param text:
Triple quoted text, which is detented and stripped
before being compared with view's content.
"""
self.assertEqual(self.getText(), dedent(text).strip("\n"))
def assertEqualText(self, text):
"""
Assert view containing `text`.
:param text: The text expected to be equal with view's content.
"""
self.assertEqual(self.getText(), text)
| Python | 0.000001 |
7c2f34990dc3bf5b4736541a6e9faf88a07581fa | remove useless import | tests/__init__.py | tests/__init__.py | import asynctest
import logging
import os
from functools import wraps
import shortuuid
from typing import Generator, Any
from yarl import URL
from aio_pika import Connection, connect, Channel, Queue, Exchange
log = logging.getLogger(__name__)
for logger_name in ('pika.channel', 'pika.callback', 'pika.connection'):
logging.getLogger(logger_name).setLevel(logging.INFO)
logging.basicConfig(level=logging.DEBUG)
AMQP_URL = URL(os.getenv("AMQP_URL", "amqp://guest:guest@localhost"))
if not AMQP_URL.path:
AMQP_URL.path = '/'
class AsyncTestCase(asynctest.TestCase):
forbid_get_event_loop = True
def get_random_name(self, *args):
prefix = ['test']
for item in args:
prefix.append(item)
prefix.append(shortuuid.uuid())
return ".".join(prefix)
class BaseTestCase(AsyncTestCase):
async def create_connection(self, cleanup=True) -> Generator[Any, None, Connection]:
client = await connect(AMQP_URL, loop=self.loop)
if cleanup:
self.addCleanup(client.close)
return client
async def create_channel(self, connection=None, cleanup=True, **kwargs) -> Generator[Any, None, Channel]:
if connection is None:
connection = await self.create_connection()
channel = await connection.channel(**kwargs)
if cleanup:
self.addCleanup(channel.close)
return channel
async def declare_queue(self, *args, **kwargs) -> Generator[Any, None, Queue]:
if 'channel' not in kwargs:
channel = await self.create_channel()
else:
channel = kwargs.pop('channel')
queue = await channel.declare_queue(*args, **kwargs)
self.addCleanup(queue.delete)
return queue
async def declare_exchange(self, *args, **kwargs) -> Generator[Any, None, Exchange]:
if 'channel' not in kwargs:
channel = await self.create_channel()
else:
channel = kwargs.pop('channel')
exchange = await channel.declare_exchange(*args, **kwargs)
self.addCleanup(exchange.delete)
return exchange
def timeout(timeout_sec=5):
def decorator(func):
@wraps(func)
async def wrap(self, *args, **kwargs):
loop = self.loop
task = loop.create_task(func(self, *args, **kwargs))
def on_timeout():
if task.done():
return
task.cancel()
self.loop.call_later(timeout_sec, on_timeout)
return await task
return wrap
return decorator
| import asyncio
import asynctest
import logging
import os
from functools import wraps
import shortuuid
from typing import Generator, Any
from yarl import URL
from aio_pika import Connection, connect, Channel, Queue, Exchange
log = logging.getLogger(__name__)
for logger_name in ('pika.channel', 'pika.callback', 'pika.connection'):
logging.getLogger(logger_name).setLevel(logging.INFO)
logging.basicConfig(level=logging.DEBUG)
AMQP_URL = URL(os.getenv("AMQP_URL", "amqp://guest:guest@localhost"))
if not AMQP_URL.path:
AMQP_URL.path = '/'
class AsyncTestCase(asynctest.TestCase):
forbid_get_event_loop = True
def get_random_name(self, *args):
prefix = ['test']
for item in args:
prefix.append(item)
prefix.append(shortuuid.uuid())
return ".".join(prefix)
class BaseTestCase(AsyncTestCase):
async def create_connection(self, cleanup=True) -> Generator[Any, None, Connection]:
client = await connect(AMQP_URL, loop=self.loop)
if cleanup:
self.addCleanup(client.close)
return client
async def create_channel(self, connection=None, cleanup=True, **kwargs) -> Generator[Any, None, Channel]:
if connection is None:
connection = await self.create_connection()
channel = await connection.channel(**kwargs)
if cleanup:
self.addCleanup(channel.close)
return channel
async def declare_queue(self, *args, **kwargs) -> Generator[Any, None, Queue]:
if 'channel' not in kwargs:
channel = await self.create_channel()
else:
channel = kwargs.pop('channel')
queue = await channel.declare_queue(*args, **kwargs)
self.addCleanup(queue.delete)
return queue
async def declare_exchange(self, *args, **kwargs) -> Generator[Any, None, Exchange]:
if 'channel' not in kwargs:
channel = await self.create_channel()
else:
channel = kwargs.pop('channel')
exchange = await channel.declare_exchange(*args, **kwargs)
self.addCleanup(exchange.delete)
return exchange
def timeout(timeout_sec=5):
def decorator(func):
@wraps(func)
async def wrap(self, *args, **kwargs):
loop = self.loop
task = loop.create_task(func(self, *args, **kwargs))
def on_timeout():
if task.done():
return
task.cancel()
self.loop.call_later(timeout_sec, on_timeout)
return await task
return wrap
return decorator
| Python | 0.000004 |
1858b0ae7f70798f3d11ecca1af55719a52def49 | Fix downgrade in migration | neutron/db/migration/alembic_migrations/versions/2a6d0b51f4bb_cisco_plugin_cleanup.py | neutron/db/migration/alembic_migrations/versions/2a6d0b51f4bb_cisco_plugin_cleanup.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""cisco plugin cleanup
Revision ID: 2a6d0b51f4bb
Revises: 1d76643bcec4
Create Date: 2013-01-17 22:24:37.730466
"""
# revision identifiers, used by Alembic.
revision = '2a6d0b51f4bb'
down_revision = '1d76643bcec4'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.plugins.cisco.network_plugin.PluginV2'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.drop_table(u'portprofile_bindings')
op.drop_table(u'portprofiles')
op.drop_table(u'port_bindings')
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.create_table(
u'port_bindings',
sa.Column(u'id', sa.Integer(), autoincrement=True,
nullable=False),
sa.Column(u'port_id', sa.String(255), nullable=False),
sa.Column(u'blade_intf_dn', sa.String(255), nullable=False),
sa.Column(u'portprofile_name', sa.String(255),
nullable=True),
sa.Column(u'vlan_name', sa.String(255), nullable=True),
sa.Column(u'vlan_id', sa.Integer(), nullable=True),
sa.Column(u'qos', sa.String(255), nullable=True),
sa.Column(u'tenant_id', sa.String(255), nullable=True),
sa.Column(u'vif_id', sa.String(255), nullable=True),
sa.PrimaryKeyConstraint(u'id')
)
op.create_table(
u'portprofiles',
sa.Column(u'uuid', sa.String(255), nullable=False),
sa.Column(u'name', sa.String(255), nullable=True),
sa.Column(u'vlan_id', sa.Integer(), nullable=True),
sa.Column(u'qos', sa.String(255), nullable=True),
sa.PrimaryKeyConstraint(u'uuid')
)
op.create_table(
u'portprofile_bindings',
sa.Column(u'id', sa.String(255), nullable=False),
sa.Column(u'tenant_id', sa.String(255), nullable=True),
sa.Column(u'port_id', sa.String(255), nullable=True),
sa.Column(u'portprofile_id', sa.String(255), nullable=True),
sa.Column(u'default', sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint(['portprofile_id'], ['portprofiles.uuid'], ),
sa.ForeignKeyConstraint(['port_id'], ['ports.id'], ),
sa.PrimaryKeyConstraint(u'id')
)
| # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""cisco plugin cleanup
Revision ID: 2a6d0b51f4bb
Revises: 1d76643bcec4
Create Date: 2013-01-17 22:24:37.730466
"""
# revision identifiers, used by Alembic.
revision = '2a6d0b51f4bb'
down_revision = '1d76643bcec4'
# Change to ['*'] if this migration applies to all plugins
migration_for_plugins = [
'neutron.plugins.cisco.network_plugin.PluginV2'
]
from alembic import op
import sqlalchemy as sa
from neutron.db import migration
def upgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.drop_table(u'portprofile_bindings')
op.drop_table(u'portprofiles')
op.drop_table(u'port_bindings')
def downgrade(active_plugins=None, options=None):
if not migration.should_run(active_plugins, migration_for_plugins):
return
op.create_table(
u'port_bindings',
sa.Column(u'id', sa.Integer(), autoincrement=True,
nullable=False),
sa.Column(u'port_id', sa.String(255), nullable=False),
sa.Column(u'blade_intf_dn', sa.String(255), nullable=False),
sa.Column(u'portprofile_name', sa.String(255),
nullable=True),
sa.Column(u'vlan_name', sa.String(255), nullable=True),
sa.Column(u'vlan_id', sa.Integer(), nullable=True),
sa.Column(u'qos', sa.String(255), nullable=True),
sa.Column(u'tenant_id', sa.String(255), nullable=True),
sa.Column(u'vif_id', sa.String(255), nullable=True),
sa.PrimaryKeyConstraint(u'id')
)
op.create_table(
u'portprofiles',
sa.Column(u'uuid', sa.String(255), nullable=False),
sa.Column(u'name', sa.String(255), nullable=True),
sa.Column(u'vlan_id', sa.Integer(), nullable=True),
sa.Column(u'qos', sa.String(255), nullable=True),
sa.PrimaryKeyConstraint(u'uuid')
)
op.create_table(
u'portprofile_bindings',
sa.Column(u'id', sa.String(255), nullable=False),
sa.Column(u'tenant_id', sa.String(255), nullable=True),
sa.Column(u'port_id', sa.Integer(), nullable=True),
sa.Column(u'portprofile_id', sa.String(255), nullable=True),
sa.Column(u'portprofile_id', sa.Boolean(), nullable=False),
sa.ForeignKeyConstraint(['portprofile_id'], ['portprofiles.uuid'], ),
sa.ForeignKeyConstraint(['ports'], ['ports.id'], ),
sa.PrimaryKeyConstraint(u'id')
)
| Python | 0.00003 |
bb94d126ae9ff86efc00cfbda5f3fff375490e16 | Add missing import to tests/__init__.py. | tests/__init__.py | tests/__init__.py | # -*- coding: utf-8 -*-
# Copyright (c) 2010-2011, GEM Foundation.
#
# OpenQuake is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# only, as published by the Free Software Foundation.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License version 3 for more details
# (a copy is included in the LICENSE file that accompanied this code).
#
# You should have received a copy of the GNU Lesser General Public License
# version 3 along with OpenQuake. If not, see
# <http://www.gnu.org/licenses/lgpl-3.0.txt> for a copy of the LGPLv3 License.
from alchemy_db_utils_unittest import *
from black_box_tests import *
from bulk_insert_unittest import *
from cache_gc_unittest import *
from db_loader_unittest import *
from db_loader_unittest import *
from deterministic_hazard_unittest import *
from deterministic_risk_unittest import *
from geo_unittest import *
from handlers_unittest import *
from hazard_classical_unittest import *
from hazard_nrml_unittest import *
from hazard_unittest import *
from input_risk_unittest import *
from java_unittest import *
from job_unittest import *
from kvs_unittest import *
from logs_unittest import *
from loss_map_output_unittest import *
from loss_output_unittest import *
from output_hazard_unittest import *
from output_risk_unittest import *
from output_unittest import *
from output_writers_unittest import *
from parser_exposure_portfolio_unittest import *
from parser_hazard_curve_unittest import *
from parser_hazard_map_unittest import *
from parser_vulnerability_model_unittest import *
from probabilistic_unittest import *
from producer_unittest import *
from risk_job_unittest import *
from risk_parser_unittest import *
from risk_unittest import *
from schema_unittest import *
from shapes_unittest import *
from tools_dbmaint_unittest import *
from utils_general_unittest import *
from utils_tasks_unittest import *
from utils_version_unittest import *
from validator_unittest import *
import glob
import os
import sys
for path in glob.glob(os.path.join(os.path.dirname(__file__), '*test*.py')):
test = os.path.splitext(os.path.basename(path))[0]
module = 'tests.' + test
if module not in sys.modules:
print >>sys.stderr, "Potential missing import of " + module
| # -*- coding: utf-8 -*-
# Copyright (c) 2010-2011, GEM Foundation.
#
# OpenQuake is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# only, as published by the Free Software Foundation.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License version 3 for more details
# (a copy is included in the LICENSE file that accompanied this code).
#
# You should have received a copy of the GNU Lesser General Public License
# version 3 along with OpenQuake. If not, see
# <http://www.gnu.org/licenses/lgpl-3.0.txt> for a copy of the LGPLv3 License.
from alchemy_db_utils_unittest import *
from black_box_tests import *
from bulk_insert_unittest import *
from cache_gc_unittest import *
from db_loader_unittest import *
from db_loader_unittest import *
from deterministic_hazard_unittest import *
from deterministic_risk_unittest import *
from geo_unittest import *
from handlers_unittest import *
from hazard_classical_unittest import *
from hazard_nrml_unittest import *
from hazard_unittest import *
from java_unittest import *
from job_unittest import *
from kvs_unittest import *
from logs_unittest import *
from loss_map_output_unittest import *
from loss_output_unittest import *
from output_hazard_unittest import *
from output_risk_unittest import *
from output_unittest import *
from output_writers_unittest import *
from parser_exposure_portfolio_unittest import *
from parser_hazard_curve_unittest import *
from parser_hazard_map_unittest import *
from parser_vulnerability_model_unittest import *
from probabilistic_unittest import *
from producer_unittest import *
from risk_job_unittest import *
from risk_parser_unittest import *
from risk_unittest import *
from schema_unittest import *
from shapes_unittest import *
from tools_dbmaint_unittest import *
from utils_general_unittest import *
from utils_tasks_unittest import *
from utils_version_unittest import *
from validator_unittest import *
import glob
import os
import sys
for path in glob.glob(os.path.join(os.path.dirname(__file__), '*test*.py')):
test = os.path.splitext(os.path.basename(path))[0]
module = 'tests.' + test
if module not in sys.modules:
print >>sys.stderr, "Potential missing import of " + module
| Python | 0.999554 |
7bfabc008cf3f580a5d1adfd6ef7bb7142a706d0 | Add checking of space permissions and kernel space unique ID | autotest/client/hardware_TPMCheck/hardware_TPMCheck.py | autotest/client/hardware_TPMCheck/hardware_TPMCheck.py | # Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging, os, re
from autotest_lib.client.bin import test, utils
from autotest_lib.client.common_lib import error
def old_or_missing_firmware_version():
f = open("/sys/devices/platform/chromeos_acpi/FWID")
if not f:
return True
version = f.readline().strip()
logging.info("firmware version: %s", version)
# Expect a dot-separated list of 6 elements. Discard 1st element.
v = re.split("\.", version)[1:]
w = re.split("\.", "any-nickname.03.60.1118.0036.")[1:]
if len(v) != len(w):
raise error.TestError("malformed firmware version %s" % version)
return v < w
def dict_from_command(command):
dict = {}
out = os.popen(command)
for linecr in out.readlines():
line = linecr.strip()
match = re.match("([^ ]+) (.*)", line)
k = match.group(1)
v = match.group(2)
dict[k] = v
return dict
def expect(d, key, value):
if (d[key] != value):
raise error.TestError("expecting %s = %s, observing %s = %s" %
(key, value, key, d[key]))
def checkp(space, permission):
c = "tpmc getp %s" % space
l = os.popen(c).readline()
if (not re.match(".*%s" % permission, l)):
raise error.TestError("invalid response to %s: %s" % (c, l))
class hardware_TPMCheck(test.test):
version = 1
def run_once(self):
if old_or_missing_firmware_version():
logging.warning("skipping test because firmware " +
"version missing or deemed too old")
return
try:
utils.system("stop tcsd", ignore_status=True)
# Check volatile (ST_CLEAR) flags
d = dict_from_command("tpmc getvf");
expect(d, "deactivated", "0")
expect(d, "physicalPresence", "0")
expect(d, "physicalPresenceLock", "1")
expect(d, "bGlobalLock", "1")
# Check permanent flags
d = dict_from_command("tpmc getpf");
expect(d, "disable", "0")
expect(d, "ownership", "1")
expect(d, "deactivated", "0")
expect(d, "physicalPresenceHWEnable", "0")
expect(d, "physicalPresenceCMDEnable", "1")
expect(d, "physicalPresenceLifetimeLock", "1")
expect(d, "nvLocked", "1")
# Check space permissions
checkp("0x1007", "0x8001")
checkp("0x1008", "0x1")
# Check kernel space UID
l = os.popen("tpmc read 0x1008 0x5").readline()
if (not re.match(".* 4c 57 52 47$", l)):
raise error.TestError("invalid kernel space UID: %s" % l)
finally:
utils.system("start tcsd", ignore_status=True)
| # Copyright (c) 2010 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os, re
from autotest_lib.client.bin import test, utils
from autotest_lib.client.common_lib import error
def dict_from_command(command):
dict = {}
out = os.popen(command)
for linecr in out.readlines():
line = linecr.strip()
match = re.match("([^ ]+) (.*)", line)
k = match.group(1)
v = match.group(2)
dict[k] = v
return dict
def expect(d, key, value):
if (d[key] != value):
utils.system("start tcsd", ignore_status=True)
raise error.TestError("expecting %s = %s, receiving %s = %s" %
(key, value, key, d[key]))
class hardware_TPMCheck(test.test):
version = 1
def run_once(self):
utils.system("stop tcsd", ignore_status=True)
d = dict_from_command("tpmc getvf");
expect(d, "deactivated", "0")
expect(d, "physicalPresence", "0")
expect(d, "physicalPresenceLock", "1")
expect(d, "bGlobalLock", "1")
d = dict_from_command("tpmc getpf");
expect(d, "disable", "0")
expect(d, "ownership", "1")
expect(d, "deactivated", "0")
expect(d, "physicalPresenceHWEnable", "0")
expect(d, "physicalPresenceCMDEnable", "1")
expect(d, "physicalPresenceLifetimeLock", "1")
expect(d, "nvLocked", "1")
utils.system("start tcsd", ignore_status=True)
| Python | 0 |
3225abc4006378d0b9f1e861116aac8116d47ec0 | fix wrong indent | monasca_notification/plugins/slack_notifier.py | monasca_notification/plugins/slack_notifier.py | # (C) Copyright 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import urlparse
import requests
from monasca_notification.plugins import abstract_notifier
"""
notification.address = https://slack.com/api/chat.postMessage?token=token&channel=#channel"
Slack documentation about tokens:
1. Login to your slack account via browser and check the following pages
a. https://api.slack.com/docs/oauth-test-tokens
b. https://api.slack.com/tokens
"""
class SlackNotifier(abstract_notifier.AbstractNotifier):
def __init__(self, log):
self._log = log
def config(self, config_dict):
self._config = {'timeout': 5}
self._config.update(config_dict)
@property
def type(self):
return "slack"
@property
def statsd_name(self):
return 'sent_slack_count'
def _build_slack_message(self, notification):
"""Builds slack message body
"""
slack_request = dict(text=notification.alarm_description)
return json.dumps(slack_request)
def send_notification(self, notification):
"""Send the notification via slack
Posts on the given url
"""
slack_message = self._build_slack_message(notification)
address = notification.address
# "#" is reserved character and replace it with ascii equivalent
# Slack room has "#" as first character
address = address.replace("#", "%23")
parsed_url = urlparse.urlsplit(address)
query_params = urlparse.parse_qs(parsed_url.query)
# URL without query params
url = urlparse.urljoin(address, urlparse.urlparse(address).path)
# Default option is to do cert verification
verify = self._config.get('insecure', False)
# If ca_certs is specified, do cert validation and ignore insecure flag
if (self._config.get("ca_certs")):
verify = self._config.get("ca_certs")
proxyDict = None
if (self._config.get("proxy")):
proxyDict = {"https": self._config.get("proxy")}
try:
# Posting on the given URL
self._log.debug("Sending to the url {0} , with query_params {1}".format(url, query_params))
result = requests.post(url=url,
data=slack_message,
verify=verify,
params=query_params,
proxies=proxyDict,
timeout=self._config['timeout'])
result.raise_for_status()
if result.headers['content-type'] == 'application/json':
response = result.json()
if response.get('ok'):
self._log.debug("Notification successfully posted.")
else:
self._log.warning("Received an error message {} when trying to send to slack on URL {}."
.format(response.get("error"), url))
return False
return True
except Exception as ex:
self._log.exception("Error trying to send to slack on URL {}: {}".format(url, ex.message))
return False
| # (C) Copyright 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import urlparse
import requests
from monasca_notification.plugins import abstract_notifier
"""
notification.address = https://slack.com/api/chat.postMessage?token=token&channel=#channel"
Slack documentation about tokens:
1. Login to your slack account via browser and check the following pages
a. https://api.slack.com/docs/oauth-test-tokens
b. https://api.slack.com/tokens
"""
class SlackNotifier(abstract_notifier.AbstractNotifier):
def __init__(self, log):
self._log = log
def config(self, config_dict):
self._config = {'timeout': 5}
self._config.update(config_dict)
@property
def type(self):
return "slack"
@property
def statsd_name(self):
return 'sent_slack_count'
def _build_slack_message(self, notification):
"""Builds slack message body
"""
slack_request = dict(text=notification.alarm_description)
return json.dumps(slack_request)
def send_notification(self, notification):
"""Send the notification via slack
Posts on the given url
"""
slack_message = self._build_slack_message(notification)
address = notification.address
# "#" is reserved character and replace it with ascii equivalent
# Slack room has "#" as first character
address = address.replace("#", "%23")
parsed_url = urlparse.urlsplit(address)
query_params = urlparse.parse_qs(parsed_url.query)
# URL without query params
url = urlparse.urljoin(address, urlparse.urlparse(address).path)
# Default option is to do cert verification
verify = self._config.get('insecure', False)
# If ca_certs is specified, do cert validation and ignore insecure flag
if (self._config.get("ca_certs")):
verify = self._config.get("ca_certs")
proxyDict = None
if (self._config.get("proxy")):
proxyDict = {"https": self._config.get("proxy")}
try:
# Posting on the given URL
self._log.debug("Sending to the url {0} , with query_params {1}".format(url, query_params))
result = requests.post(url=url,
data=slack_message,
verify=verify,
params=query_params,
proxies=proxyDict,
timeout=self._config['timeout'])
result.raise_for_status()
if result.headers['content-type'] == 'application/json':
response = result.json()
if response.get('ok'):
self._log.debug("Notification successfully posted.")
return True
else:
self._log.warning("Received an error message {} when trying to send to slack on URL {}."
.format(response.get("error"), url))
return False
except Exception as ex:
self._log.exception("Error trying to send to slack on URL {}: {}".format(url, ex.message))
return False
| Python | 0.984856 |
4bafa90acca39a3d3fa5df0303d885c810244700 | Add URL | lc034_find_first_and_last_position_of_element_in_sorted_array.py | lc034_find_first_and_last_position_of_element_in_sorted_array.py | """Leetcode 34. Find First and Last Position of Element in Sorted Array
Medium
URL: https://leetcode.com/problems/find-first-and-last-position-of-element-in-sorted-array
Given an array of integers nums sorted in ascending order,
find the starting and ending position of a given target value.
Your algorithm's runtime complexity must be in the order of O(log n).
If the target is not found in the array, return [-1, -1].
Example 1:
Input: nums = [5,7,7,8,8,10], target = 8
Output: [3,4]
Example 2:
Input: nums = [5,7,7,8,8,10], target = 6
Output: [-1,-1]
"""
class Solution(object):
def searchRange(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
Time complexity: O(logn), where n is the length of nums.
Space complexity: O(1).
"""
# Apply to 2 binary searches to update result [-1, -1].
res = [-1, -1]
if not nums:
return res
# Apply the 1st binary search to search target's left position.
first, last = 0, len(nums) - 1
while first < last:
mid = first + (last - first) // 2
if nums[mid] < target:
first = mid + 1
else:
last = mid
if nums[first] != target:
return res
else:
res[0] = first
# Apply the 2nd binary search to search target's right position.
last = len(nums) - 1
while first < last:
# Make mid biased to the right.
mid = first + (last - first) // 2 + 1
if nums[mid] > target:
last = mid - 1
else:
first = mid
res[1] = last
return res
def main():
# Ans: [3,4]
nums = [5,7,7,8,8,10]
target = 8
print Solution().searchRange(nums, target)
# Ans: [-1,-1]
nums = [5,7,7,8,8,10]
target = 6
print Solution().searchRange(nums, target)
if __name__ == '__main__':
main()
| """Leetcode 34. Find First and Last Position of Element in Sorted Array
Medium
Given an array of integers nums sorted in ascending order,
find the starting and ending position of a given target value.
Your algorithm's runtime complexity must be in the order of O(log n).
If the target is not found in the array, return [-1, -1].
Example 1:
Input: nums = [5,7,7,8,8,10], target = 8
Output: [3,4]
Example 2:
Input: nums = [5,7,7,8,8,10], target = 6
Output: [-1,-1]
"""
class Solution(object):
def searchRange(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
Time complexity: O(logn), where n is the length of nums.
Space complexity: O(1).
"""
# Apply to 2 binary searches to update result [-1, -1].
res = [-1, -1]
if not nums:
return res
# Apply the 1st binary search to search target's left position.
first, last = 0, len(nums) - 1
while first < last:
mid = first + (last - first) // 2
if nums[mid] < target:
first = mid + 1
else:
last = mid
if nums[first] != target:
return res
else:
res[0] = first
# Apply the 2nd binary search to search target's right position.
last = len(nums) - 1
while first < last:
# Make mid biased to the right.
mid = first + (last - first) // 2 + 1
if nums[mid] > target:
last = mid - 1
else:
first = mid
res[1] = last
return res
def main():
# Ans: [3,4]
nums = [5,7,7,8,8,10]
target = 8
print Solution().searchRange(nums, target)
# Ans: [-1,-1]
nums = [5,7,7,8,8,10]
target = 6
print Solution().searchRange(nums, target)
if __name__ == '__main__':
main()
| Python | 0.000001 |
f2dcee8364087209b7f160806a023ce2dc198466 | Remove hidden keys | bdp/platform/frontend/src/bdp_fe/jobconf/views_util.py | bdp/platform/frontend/src/bdp_fe/jobconf/views_util.py | """
Utility functions for controllers.
"""
from random import choice
from django.conf import settings
from django.http import HttpResponseNotFound
from django.shortcuts import get_object_or_404
from models import CustomJobModel
from pymongo import Connection
from pymongo.errors import AutoReconnect, ConnectionFailure
from bdp_fe.jobconf.models import Job
from bdp_fe.middleware403 import Http403
HIDDEN_KEYS = []
class NoResultsError(Exception):
pass
class NoConnectionError(Exception):
pass
class MongoRecord(object):
"""
A MongoRecord is a document from a Mongo database, but with additional
methods to allow for easier display.
"""
def __init__(self, raw_mongo_document, primary_key):
self.document = raw_mongo_document
self.pk = primary_key
def get_primary_key(self):
"""Gets the value of the primary key for this record"""
return self.document[self.pk]
def get_fields(self):
"""Gets the values of all non-primary keys for this record"""
ans = {}
for k, v in self.document.iteritems():
if k != self.pk:
ans.setdefault(k, v)
return ans
def safe_int_param(query_dict, param_name, default_value=None):
"""
Safe conversion of query parameters to int.
By default, returns None for absent or non-integer values.
"""
try:
return int(query_dict.get(param_name, ''))
except ValueError:
return default_value
def get_owned_job_or_40x(request, job_id):
try:
job = get_object_or_404(Job, pk=int(job_id))
except ValueError:
raise HttpResponseNotFound()
if job.user == request.user:
return job
else:
raise Http403()
def retrieve_results(job_id, primary_key):
ans = []
jobmodel = CustomJobModel.objects.get(id=job_id)
mongo_url = jobmodel.mongo_url()
mongo_db = jobmodel.job.user.username
mongo_collection = 'job_%s' % jobmodel.job.id
try:
connection = Connection(mongo_url)
db = connection[mongo_db]
job_results = db[mongo_collection]
if not primary_key:
some_result = job_results.find_one()
if not some_result:
raise NoResultsError
primary_key = choice([k for k in some_result.keys()
if k not in HIDDEN_KEYS])
for job_result in job_results.find():
mongo_result = MongoRecord(job_result, primary_key)
ans.append(mongo_result)
return ans
except AutoReconnect, ConnectionFailure:
raise NoConnectionError
| """
Utility functions for controllers.
"""
from random import choice
from django.conf import settings
from django.http import HttpResponseNotFound
from django.shortcuts import get_object_or_404
from models import CustomJobModel
from pymongo import Connection
from pymongo.errors import AutoReconnect, ConnectionFailure
from bdp_fe.jobconf.models import Job
from bdp_fe.middleware403 import Http403
HIDDEN_KEYS = ['_id', 'job_id']
class NoResultsError(Exception):
pass
class NoConnectionError(Exception):
pass
class MongoRecord(object):
"""
A MongoRecord is a document from a Mongo database, but with additional
methods to allow for easier display.
"""
def __init__(self, raw_mongo_document, primary_key):
self.document = raw_mongo_document
self.pk = primary_key
def get_primary_key(self):
"""Gets the value of the primary key for this record"""
return self.document[self.pk]
def get_fields(self):
"""Gets the values of all non-primary keys for this record"""
ans = {}
for k, v in self.document.iteritems():
if k != self.pk:
ans.setdefault(k, v)
return ans
def safe_int_param(query_dict, param_name, default_value=None):
"""
Safe conversion of query parameters to int.
By default, returns None for absent or non-integer values.
"""
try:
return int(query_dict.get(param_name, ''))
except ValueError:
return default_value
def get_owned_job_or_40x(request, job_id):
try:
job = get_object_or_404(Job, pk=int(job_id))
except ValueError:
raise HttpResponseNotFound()
if job.user == request.user:
return job
else:
raise Http403()
def retrieve_results(job_id, primary_key):
ans = []
jobmodel = CustomJobModel.objects.get(id=job_id)
mongo_url = jobmodel.mongo_url()
mongo_db = jobmodel.job.user.username
mongo_collection = 'job_%s' % jobmodel.job.id
try:
connection = Connection(mongo_url)
db = connection[mongo_db]
job_results = db[mongo_collection]
if not primary_key:
some_result = job_results.find_one()
if not some_result:
raise NoResultsError
primary_key = choice([k for k in some_result.keys()
if k not in HIDDEN_KEYS])
for job_result in job_results.find():
mongo_result = MongoRecord(job_result, primary_key)
ans.append(mongo_result)
return ans
except AutoReconnect, ConnectionFailure:
raise NoConnectionError
| Python | 0.000002 |
49c64731fab1de1fc08b61a70190930b829d70d3 | Remove import for random | src/python/m5/internal/__init__.py | src/python/m5/internal/__init__.py | # Copyright (c) 2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
import core
import debug
import event
import stats
import trace
| # Copyright (c) 2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
import core
import debug
import event
import random
import stats
import trace
| Python | 0.000003 |
f3c7f0d488bcd41ed3fb19d83e78fa1436315a08 | Add test for improved pretty printing behaviour | bindings/pyroot/pythonizations/test/pretty_printing.py | bindings/pyroot/pythonizations/test/pretty_printing.py | import unittest
import ROOT
class PrettyPrinting(unittest.TestCase):
# Helpers
def _print(self, obj):
print("print({}) -> {}".format(repr(obj), obj))
# Tests
def test_RVec(self):
x = ROOT.ROOT.VecOps.RVec("float")(4)
for i in range(x.size()):
x[i] = i
self._print(x)
self.assertIn("{ 0", x.__str__())
def test_STLVector(self):
x = ROOT.std.vector("float")(4)
for i in range(x.size()):
x[i] = i
self._print(x)
self.assertIn("{ 0", x.__str__())
def test_STLMap(self):
x = ROOT.std.map("string", "int")()
for i, s in enumerate(["foo", "bar"]):
x[s] = i
self._print(x)
self.assertIn("foo", x.__str__())
self.assertIn("bar", x.__str__())
def test_STLPair(self):
x = ROOT.std.pair("string", "int")("foo", 42)
self._print(x)
self.assertIn("foo", x.__str__())
def test_STLString(self):
# std::string is not pythonized with the pretty printing, because:
# 1. gInterpreter->ToString("s") returns ""s""
# 2. cppyy already does the right thing
s = ROOT.std.string("x")
self.assertEqual(str(s), "x")
def test_TH1F(self):
x = ROOT.TH1F("name", "title", 10, 0, 1)
self._print(x)
self.assertEqual("Name: name Title: title NbinsX: 10", x.__str__())
def test_user_class(self):
# Test fall-back to __repr__
ROOT.gInterpreter.Declare('class MyClass {};')
x = ROOT.MyClass()
self._print(x)
s = x.__str__()
r = x.__repr__()
self.assertIn("MyClass object at", s)
self.assertEqual(s, r)
def test_null_object(self):
# ROOT-9935: test null proxied cpp object
x = ROOT.MakeNullPointer("TTree")
s = x.__str__()
r = x.__repr__()
self.assertIn("TTree object at", s)
self.assertEqual(s, r)
def test_user_class_with_str(self):
# ROOT-10967: Respect existing __str__ method defined in C++
ROOT.gInterpreter.Declare('struct MyClassWithStr { std::string __str__() { return "foo"; } };')
x = ROOT.MyClassWithStr()
self._print(x)
s = x.__str__()
r = x.__repr__()
self.assertIn("MyClassWithStr object at", r)
self.assertEqual(s, "foo")
# Test inherited class
ROOT.gInterpreter.Declare('struct MyClassWithStr2 : public MyClassWithStr { };')
x2 = ROOT.MyClassWithStr2()
self._print(x2)
s2 = x2.__str__()
r2 = x2.__repr__()
self.assertIn("MyClassWithStr2 object at", r2)
self.assertEqual(s2, "foo")
# TNamed and TObject are not pythonized because these object are touched
# by PyROOT before any pythonizations are added. Following, the classes
# are not piped through the pythonizor functions again.
"""
def test_TNamed(self):
x = ROOT.TNamed("name", "title")
self._print(x)
self.assertEqual("Name: name Title: title", x.__str__())
def test_TObject(self):
x = ROOT.TObject()
self._print(x)
self.assertEqual("Name: TObject Title: Basic ROOT object", x.__str__())
"""
if __name__ == '__main__':
unittest.main()
| import unittest
import ROOT
class PrettyPrinting(unittest.TestCase):
# Helpers
def _print(self, obj):
print("print({}) -> {}".format(repr(obj), obj))
# Tests
def test_RVec(self):
x = ROOT.ROOT.VecOps.RVec("float")(4)
for i in range(x.size()):
x[i] = i
self._print(x)
self.assertIn("{ 0", x.__str__())
def test_STLVector(self):
x = ROOT.std.vector("float")(4)
for i in range(x.size()):
x[i] = i
self._print(x)
self.assertIn("{ 0", x.__str__())
def test_STLMap(self):
x = ROOT.std.map("string", "int")()
for i, s in enumerate(["foo", "bar"]):
x[s] = i
self._print(x)
self.assertIn("foo", x.__str__())
self.assertIn("bar", x.__str__())
def test_STLPair(self):
x = ROOT.std.pair("string", "int")("foo", 42)
self._print(x)
self.assertIn("foo", x.__str__())
def test_STLString(self):
# std::string is not pythonized with the pretty printing, because:
# 1. gInterpreter->ToString("s") returns ""s""
# 2. cppyy already does the right thing
s = ROOT.std.string("x")
self.assertEqual(str(s), "x")
def test_TH1F(self):
x = ROOT.TH1F("name", "title", 10, 0, 1)
self._print(x)
self.assertEqual("Name: name Title: title NbinsX: 10", x.__str__())
def test_user_class(self):
# Test fall-back to __repr__
ROOT.gInterpreter.Declare('class MyClass {};')
x = ROOT.MyClass()
self._print(x)
s = x.__str__()
r = x.__repr__()
self.assertIn("MyClass object at", s)
self.assertEqual(s, r)
def test_null_object(self):
# ROOT-9935: test null proxied cpp object
x = ROOT.MakeNullPointer("TTree")
s = x.__str__()
r = x.__repr__()
self.assertIn("TTree object at", s)
self.assertEqual(s, r)
# TNamed and TObject are not pythonized because these object are touched
# by PyROOT before any pythonizations are added. Following, the classes
# are not piped through the pythonizor functions again.
"""
def test_TNamed(self):
x = ROOT.TNamed("name", "title")
self._print(x)
self.assertEqual("Name: name Title: title", x.__str__())
def test_TObject(self):
x = ROOT.TObject()
self._print(x)
self.assertEqual("Name: TObject Title: Basic ROOT object", x.__str__())
"""
if __name__ == '__main__':
unittest.main()
| Python | 0 |
01c88b514c64f001fc7824a30b8609a425d646ef | Set defaults for CI and DETERMINISTIC_TESTS. (#653) | tests/conftest.py | tests/conftest.py | # -*- coding: utf-8 -*-
'''
General-purpose fixtures for vdirsyncer's testsuite.
'''
import logging
import os
import click_log
from hypothesis import HealthCheck, Verbosity, settings
import pytest
@pytest.fixture(autouse=True)
def setup_logging():
click_log.basic_config('vdirsyncer').setLevel(logging.DEBUG)
try:
import pytest_benchmark
except ImportError:
@pytest.fixture
def benchmark():
return lambda x: x()
else:
del pytest_benchmark
settings.suppress_health_check = [HealthCheck.too_slow]
settings.register_profile("ci", settings(
max_examples=1000,
verbosity=Verbosity.verbose,
))
settings.register_profile("deterministic", settings(
derandomize=True,
))
if os.environ.get('DETERMINISTIC_TESTS', 'false').lower() == 'true':
settings.load_profile("deterministic")
elif os.environ.get('CI', 'false').lower() == 'true':
settings.load_profile("ci")
| # -*- coding: utf-8 -*-
'''
General-purpose fixtures for vdirsyncer's testsuite.
'''
import logging
import os
import click_log
from hypothesis import HealthCheck, Verbosity, settings
import pytest
@pytest.fixture(autouse=True)
def setup_logging():
click_log.basic_config('vdirsyncer').setLevel(logging.DEBUG)
try:
import pytest_benchmark
except ImportError:
@pytest.fixture
def benchmark():
return lambda x: x()
else:
del pytest_benchmark
settings.suppress_health_check = [HealthCheck.too_slow]
settings.register_profile("ci", settings(
max_examples=1000,
verbosity=Verbosity.verbose,
))
settings.register_profile("deterministic", settings(
derandomize=True,
))
if os.environ['DETERMINISTIC_TESTS'].lower() == 'true':
settings.load_profile("deterministic")
elif os.environ['CI'].lower() == 'true':
settings.load_profile("ci")
| Python | 0 |
88bba8a6145f67fd65e4062123db295601c92000 | Fix lint errors | tests/conftest.py | tests/conftest.py | # -*- encoding: utf-8
import os
from hotchocolate import Site
# TODO: Tidy this up, and don't duplicate code from cli.py
curdir = os.path.abspath(os.curdir)
os.chdir('tests/examplesite')
site = Site.from_folder('content')
site.build()
os.chdir(curdir)
| # -*- encoding: utf-8
import os
from hotchocolate import Site
import hotchocolate.cli as hcli
# TODO: Tidy this up, and don't duplicate code from cli.py
curdir = os.path.abspath(os.curdir)
os.chdir('tests/examplesite')
site = Site.from_folder('content')
site.build()
os.chdir(curdir)
| Python | 0.000396 |
8dc79a0a1b99d1742ae297db7da26a0404e5ec33 | Fix pep8 | tests/conftest.py | tests/conftest.py | import pytest
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from models import Base
from config import create_new_sqla
from helpers import get_video_douban_ids
test_database_url = 'sqlite:///test.db'
@pytest.fixture(scope='session')
def session(request):
sqla = create_new_sqla(test_database_url, echo=False)
session = sqla['session']
engine = sqla['engine']
Base.metadata.create_all(engine)
def teardown():
Base.metadata.drop_all(engine)
request.addfinalizer(teardown)
return session
@pytest.fixture
def douban_movie_ids():
return list(get_video_douban_ids())
| import pytest
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from models import Base
from config import create_new_sqla
from helpers import get_video_douban_ids
test_database_url = 'sqlite:///test.db'
@pytest.fixture(scope='session')
def session(request):
sqla = create_new_sqla(test_database_url, echo=False)
session = sqla['session']
engine = sqla['engine']
Base.metadata.create_all(engine)
def teardown():
Base.metadata.drop_all(engine)
request.addfinalizer(teardown)
return session
@pytest.fixture
def douban_movie_ids():
return list(get_video_douban_ids())
| Python | 0.000001 |
ab81767d7504bc3016786780902d8c3997e37f64 | Add option to use proxies in JiraHook | airflow/contrib/hooks/jira_hook.py | airflow/contrib/hooks/jira_hook.py | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from jira import JIRA
from jira.exceptions import JIRAError
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.utils.log.logging_mixin import LoggingMixin
class JiraHook(BaseHook, LoggingMixin):
"""
Jira interaction hook, a Wrapper around JIRA Python SDK.
:param jira_conn_id: reference to a pre-defined Jira Connection
:type jira_conn_id: string
"""
def __init__(self,
jira_conn_id='jira_default',
proxies=None):
super(JiraHook, self).__init__(jira_conn_id)
self.jira_conn_id = jira_conn_id
self.proxies = proxies
self.client = None
self.get_conn()
def get_conn(self):
if not self.client:
self.log.debug('Creating Jira client for conn_id: %s', self.jira_conn_id)
get_server_info = True
validate = True
extra_options = {}
conn = None
if self.jira_conn_id is not None:
conn = self.get_connection(self.jira_conn_id)
if conn.extra is not None:
extra_options = conn.extra_dejson
# only required attributes are taken for now,
# more can be added ex: async, logging, max_retries
# verify
if 'verify' in extra_options \
and extra_options['verify'].lower() == 'false':
extra_options['verify'] = False
# validate
if 'validate' in extra_options \
and extra_options['validate'].lower() == 'false':
validate = False
if 'get_server_info' in extra_options \
and extra_options['get_server_info'].lower() == 'false':
get_server_info = False
try:
self.client = JIRA(conn.host,
options=extra_options,
basic_auth=(conn.login, conn.password),
get_server_info=get_server_info,
validate=validate,
proxies=self.proxies)
except JIRAError as jira_error:
raise AirflowException('Failed to create jira client, jira error: %s'
% str(jira_error))
except Exception as e:
raise AirflowException('Failed to create jira client, error: %s'
% str(e))
return self.client
| # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from jira import JIRA
from jira.exceptions import JIRAError
from airflow.exceptions import AirflowException
from airflow.hooks.base_hook import BaseHook
from airflow.utils.log.logging_mixin import LoggingMixin
class JiraHook(BaseHook, LoggingMixin):
"""
Jira interaction hook, a Wrapper around JIRA Python SDK.
:param jira_conn_id: reference to a pre-defined Jira Connection
:type jira_conn_id: string
"""
def __init__(self,
jira_conn_id='jira_default'):
super(JiraHook, self).__init__(jira_conn_id)
self.jira_conn_id = jira_conn_id
self.client = None
self.get_conn()
def get_conn(self):
if not self.client:
self.log.debug('Creating Jira client for conn_id: %s', self.jira_conn_id)
get_server_info = True
validate = True
extra_options = {}
conn = None
if self.jira_conn_id is not None:
conn = self.get_connection(self.jira_conn_id)
if conn.extra is not None:
extra_options = conn.extra_dejson
# only required attributes are taken for now,
# more can be added ex: async, logging, max_retries
# verify
if 'verify' in extra_options \
and extra_options['verify'].lower() == 'false':
extra_options['verify'] = False
# validate
if 'validate' in extra_options \
and extra_options['validate'].lower() == 'false':
validate = False
if 'get_server_info' in extra_options \
and extra_options['get_server_info'].lower() == 'false':
get_server_info = False
try:
self.client = JIRA(conn.host,
options=extra_options,
basic_auth=(conn.login, conn.password),
get_server_info=get_server_info,
validate=validate)
except JIRAError as jira_error:
raise AirflowException('Failed to create jira client, jira error: %s'
% str(jira_error))
except Exception as e:
raise AirflowException('Failed to create jira client, error: %s'
% str(e))
return self.client
| Python | 0 |
423ec9d9b38be990ab7dca027877e1c12f3d07fe | add in django-registration update media url | imagr_site/settings.py | imagr_site/settings.py | """
Django settings for imagr_site project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_0)ionh8p(-xw=uh-3_8un)^xo+=&obsad&lhohn-d93j(p!21'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
AUTH_USER_MODEL = 'imagr_users.ImagrUser'
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'imagr_users',
'imagr_images',
'south',
'registration',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'imagr_site.urls'
WSGI_APPLICATION = 'imagr_site.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
MEDIA_ROOT = BASE_DIR + "/media/"
| """
Django settings for imagr_site project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '_0)ionh8p(-xw=uh-3_8un)^xo+=&obsad&lhohn-d93j(p!21'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
AUTH_USER_MODEL = 'imagr_users.ImagrUser'
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'imagr_users',
'imagr_images',
'south',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'imagr_site.urls'
WSGI_APPLICATION = 'imagr_site.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = 'http://localhost:8000/media/'
MEDIA_ROOT = BASE_DIR + "/media/"
| Python | 0 |
1675252b3442ff4e32881ce1c28f1753c521fb3f | Remove the main from the new file | code/spearmint-configs/dbnmnist/mnistdbn.py | code/spearmint-configs/dbnmnist/mnistdbn.py | """Spearmint for the DBN module in pydeeplearn."""
__author__ = "Mihaela Rosca"
__contact__ = "mihaela.c.rosca@gmail.com"
import argparse
from lib import deepbelief as db
from lib.common import *
from lib.activationfunctions import *
from read import readmnist
parser = argparse.ArgumentParser(description='digit recognition')
parser.add_argument('--path',dest='path', type = str, default="/data/mcr10/project/pydeeplearn/code/MNIST",
help="the path to the MNIST files")
parser.add_argument('--trainSize', type=int, default=100,
help='the number of tranining cases to be considered')
parser.add_argument('--testSize', type=int, default=10,
help='the number of testing cases to be considered')
args = parser.parse_args()
def trainDBN(unsupervisedLearningRate,
supervisedLearningRate,
visibleDropout,
hiddenDropout,
miniBatchSize,
momentumMax,
maxEpochs):
trainVectors, trainLabels =\
readmnist.read(0, args.trainSize, digits=None, bTrain=True, path=args.path)
testVectors, testLabels =\
readmnist.read(args.trainSize, args.trainSize + args.testSize,
digits=None, bTrain=True, path=args.path)
trainVectors, trainLabels = shuffle(trainVectors, trainLabels)
trainVectors = np.array(trainVectors, dtype='float')
trainingScaledVectors = scale(trainVectors)
testVectors = np.array(testVectors, dtype='float')
testingScaledVectors = scale(testVectors)
trainVectorLabels = labelsToVectors(trainLabels, 10)
net = db.DBN(5, [784, 1000, 1000, 1000, 10],
binary=False,
unsupervisedLearningRate=unsupervisedLearningRate,
supervisedLearningRate=supervisedLearningRate,
momentumMax=momentumMax,
nesterovMomentum=True,
rbmNesterovMomentum=True,
activationFunction=Rectified(),
rbmActivationFunctionVisible=Identity(),
rbmActivationFunctionHidden=RectifiedNoisy(),
rmsprop=True,
visibleDropout=0.8,
hiddenDropout=0.5,
weightDecayL1=0,
weightDecayL2=0,
rbmHiddenDropout=1.0,
rbmVisibleDropout=1.0,
miniBatchSize=miniBatchSize,
# TODO: make this a learned param
preTrainEpochs=100,
sparsityConstraintRbm=False,
sparsityTragetRbm=0.01,
sparsityRegularizationRbm=None)
net.train(trainingScaledVectors, trainVectorLabels,
maxEpochs=maxEpochs, validation=False)
proabilities, predicted = net.classify(testingScaledVectors)
error = getClassificationError(predicted, testLabels)
print "error", error
return error
# Write a function like this called 'main'
def main(job_id, params):
print 'params', params
return trainDBN(unsupervisedLearningRate=params['unsupervisedLearningRate'][0],
supervisedLearningRate=params['supervisedLearningRate'][0],
visibleDropout=params['visibleDropout'][0],
hiddenDropout=params['hiddenDropout'][0],
miniBatchSize=params['miniBatchSize'][0],
momentumMax=params['momentumMax'][0],
maxEpochs=params['maxEpochs'][0])
| """Spearmint for the DBN module in pydeeplearn."""
__author__ = "Mihaela Rosca"
__contact__ = "mihaela.c.rosca@gmail.com"
import argparse
from lib import deepbelief as db
from lib.common import *
from lib.activationfunctions import *
from read import readmnist
parser = argparse.ArgumentParser(description='digit recognition')
parser.add_argument('--path',dest='path', type = str, default="/data/mcr10/project/pydeeplearn/code/MNIST",
help="the path to the MNIST files")
parser.add_argument('--trainSize', type=int, default=100,
help='the number of tranining cases to be considered')
parser.add_argument('--testSize', type=int, default=10,
help='the number of testing cases to be considered')
args = parser.parse_args()
def trainDBN(unsupervisedLearningRate,
supervisedLearningRate,
visibleDropout,
hiddenDropout,
miniBatchSize,
momentumMax,
maxEpochs):
trainVectors, trainLabels =\
readmnist.read(0, args.trainSize, digits=None, bTrain=True, path=args.path)
testVectors, testLabels =\
readmnist.read(args.trainSize, args.trainSize + args.testSize,
digits=None, bTrain=True, path=args.path)
trainVectors, trainLabels = shuffle(trainVectors, trainLabels)
trainVectors = np.array(trainVectors, dtype='float')
trainingScaledVectors = scale(trainVectors)
testVectors = np.array(testVectors, dtype='float')
testingScaledVectors = scale(testVectors)
trainVectorLabels = labelsToVectors(trainLabels, 10)
net = db.DBN(5, [784, 1000, 1000, 1000, 10],
binary=False,
unsupervisedLearningRate=unsupervisedLearningRate,
supervisedLearningRate=supervisedLearningRate,
momentumMax=momentumMax,
nesterovMomentum=True,
rbmNesterovMomentum=True,
activationFunction=Rectified(),
rbmActivationFunctionVisible=Identity(),
rbmActivationFunctionHidden=RectifiedNoisy(),
rmsprop=True,
visibleDropout=0.8,
hiddenDropout=0.5,
weightDecayL1=0,
weightDecayL2=0,
rbmHiddenDropout=1.0,
rbmVisibleDropout=1.0,
miniBatchSize=miniBatchSize,
# TODO: make this a learned param
preTrainEpochs=100,
sparsityConstraintRbm=False,
sparsityTragetRbm=0.01,
sparsityRegularizationRbm=None)
net.train(trainingScaledVectors, trainVectorLabels,
maxEpochs=maxEpochs, validation=False)
proabilities, predicted = net.classify(testingScaledVectors)
error = getClassificationError(predicted, testLabels)
print "error", error
return error
# Write a function like this called 'main'
def main(job_id, params):
print 'params', params
return trainDBN(unsupervisedLearningRate=params['unsupervisedLearningRate'][0],
supervisedLearningRate=params['supervisedLearningRate'][0],
visibleDropout=params['visibleDropout'][0],
hiddenDropout=params['hiddenDropout'][0],
miniBatchSize=params['miniBatchSize'][0],
momentumMax=params['momentumMax'][0],
maxEpochs=params['maxEpochs'][0])
if __name__ == '__main__':
params = {
'unsupervisedLearningRate': [0],
'supervisedLearningRate': [0],
'visibleDropout': [0],
'hiddenDropout': [0],
'miniBatchSize': [0],
'momentumMax': [0],
'maxEpochs': [0]
}
main(1, params) | Python | 0.000002 |
ac754a6a711edc9b3628499ae18e74892efd7f98 | Add recording interaction print statements | src/tdl/runner/recording_system.py | src/tdl/runner/recording_system.py | import unirest
RECORDING_SYSTEM_ENDPOINT = "http://localhost:41375"
class RecordingEvent:
def __init__(self):
pass
ROUND_START = 'new'
ROUND_SOLUTION_DEPLOY = 'deploy'
ROUND_COMPLETED = 'done'
class RecordingSystem:
def __init__(self, recording_required):
self._recording_required = recording_required
def is_recording_system_ok(self):
return RecordingSystem.is_running() if self._recording_required else True
@staticmethod
def is_running():
try:
response = unirest.get("{}/status".format(RECORDING_SYSTEM_ENDPOINT))
if response.code == 200 and response.body.startswith("OK"):
return True
except Exception as e:
print("Could not reach recording system: {}".format(str(e)))
return False
def notify_event(self, round_id, event_name):
print('Notify round "{}", event "{}"'.format(round_id, event_name))
self._send_post("/notify", round_id + "/" + event_name)
def tell_to_stop(self):
print('Stopping recording system')
self._send_post("/stop", "")
def _send_post(self, endpoint, body):
if not self.is_recording_system_ok():
return
try:
response = unirest.post("{}{}".format(RECORDING_SYSTEM_ENDPOINT, endpoint),
params=body)
if response.code != 200:
print("Recording system returned code: {}".format(response.code))
return
if not response.body.startswith("ACK"):
print("Recording system returned body: {}".format(response.body))
except Exception as e:
print("Could not reach recording system: {}".format(str(e)))
def on_new_round(self, round_id):
self.notify_event(round_id, RecordingEvent.ROUND_START)
| import unirest
RECORDING_SYSTEM_ENDPOINT = "http://localhost:41375"
class RecordingEvent:
def __init__(self):
pass
ROUND_START = 'new'
ROUND_SOLUTION_DEPLOY = 'deploy'
ROUND_COMPLETED = 'done'
class RecordingSystem:
def __init__(self, recording_required):
self._recording_required = recording_required
def is_recording_system_ok(self):
return RecordingSystem.is_running() if self._recording_required else True
@staticmethod
def is_running():
try:
response = unirest.get("{}/status".format(RECORDING_SYSTEM_ENDPOINT))
if response.code == 200 and response.body.startswith("OK"):
return True
except Exception as e:
print("Could not reach recording system: {}".format(str(e)))
return False
def notify_event(self, round_id, event_name):
self._send_post("/notify", round_id + "/" + event_name)
def tell_to_stop(self):
self._send_post("/stop", "")
def _send_post(self, endpoint, body):
if not self.is_recording_system_ok():
return
try:
response = unirest.post("{}{}".format(RECORDING_SYSTEM_ENDPOINT, endpoint),
params=body)
if response.code != 200:
print("Recording system returned code: {}".format(response.code))
return
if not response.body.startswith("ACK"):
print("Recording system returned body: {}".format(response.body))
except Exception as e:
print("Could not reach recording system: {}".format(str(e)))
def on_new_round(self, round_id):
self.notify_event(round_id, RecordingEvent.ROUND_START)
| Python | 0.000004 |
0b3247c23d37c372d3f3984391b976fa904d00c6 | bump to v1.4.0 (#5975) | var/spack/repos/builtin/packages/miniamr/package.py | var/spack/repos/builtin/packages/miniamr/package.py | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Miniamr(MakefilePackage):
"""Proxy Application. 3D stencil calculation with
Adaptive Mesh Refinement (AMR)
"""
homepage = "https://mantevo.org"
url = "https://github.com/Mantevo/miniAMR/archive/v1.4.tar.gz"
tags = ['proxy-app', 'ecp-proxy-app']
version('1.4.0', '3aab0247047a94e343709cf2e51cc46e')
variant('mpi', default=True, description='Build with MPI support')
depends_on('mpi', when="+mpi")
@property
def build_targets(self):
targets = []
if '+mpi' in self.spec:
targets.append('CC={0}'.format(self.spec['mpi'].mpicc))
targets.append('LD={0}'.format(self.spec['mpi'].mpicc))
targets.append('LDLIBS=-lm')
else:
targets.append('CC={0}'.format(self.compiler.cc))
targets.append('LD={0}'.format(self.compiler.cc))
targets.append('--directory=ref')
return targets
def install(self, spec, prefix):
# Manual installation
mkdir(prefix.bin)
mkdir(prefix.doc)
install('ref/ma.x', prefix.bin)
# Install Support Documents
install('ref/README', prefix.doc)
| ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Miniamr(MakefilePackage):
"""Proxy Application. 3D stencil calculation with
Adaptive Mesh Refinement (AMR)
"""
homepage = "https://mantevo.org"
url = "http://mantevo.org/downloads/releaseTarballs/miniapps/MiniAMR/miniAMR_1.0_all.tgz"
tags = ['proxy-app', 'ecp-proxy-app']
version('1.0', '812e5aaaab99689a4e9381a3bbd718a6')
variant('mpi', default=True, description='Build with MPI support')
depends_on('mpi', when="+mpi")
@property
def build_targets(self):
targets = []
if '+mpi' in self.spec:
targets.append('CC={0}'.format(self.spec['mpi'].mpicc))
targets.append('LDLIBS=-lm')
targets.append('--file=Makefile.mpi')
targets.append('--directory=miniAMR_ref')
else:
targets.append('--file=Makefile.serial')
targets.append('--directory=miniAMR_serial')
return targets
def install(self, spec, prefix):
# Manual installation
mkdir(prefix.bin)
mkdir(prefix.doc)
if '+mpi' in spec:
install('miniAMR_ref/miniAMR.x', prefix.bin)
else:
install('miniAMR_serial/miniAMR.x', prefix.bin)
# Install Support Documents
install('miniAMR_ref/README', prefix.doc)
| Python | 0 |
779393e6c18539c97ff3bdaeb471253170645bc2 | Update group.py | web-app/numeter_webapp/configuration/forms/group.py | web-app/numeter_webapp/configuration/forms/group.py | """
Group Form module.
"""
from django import forms
from django.utils.translation import ugettext_lazy as _
from core.models import Group
class Group_Form(forms.ModelForm):
"""Simple Group Form"""
class Meta:
model = Group
widgets = {
'name': forms.TextInput({'placeholder':_('Name'), 'class':'span', 'ng-model': 'tabIndex.form.name'}),
}
def get_submit_url(self):
"""Return url matching with creation or updating."""
if self.instance.id:
return self.instance.get_rest_detail_url()
else:
return self.instance.get_rest_list_url()
def get_submit_method(self):
"""Return method matching with creation or updating."""
if self.instance.id:
return 'PATCH'
else:
return 'POST'
| """
Group Form module.
"""
from django import forms
from django.utils.translation import ugettext_lazy as _
from djangular.forms.angular_model import NgModelFormMixin
from core.models import Group
class Group_Form(forms.ModelForm):
"""Simple Group Form"""
class Meta:
model = Group
widgets = {
'name': forms.TextInput({'placeholder':_('Name'), 'class':'span', 'ng-model': 'tabIndex.form.name'}),
}
def get_submit_url(self):
"""Return url matching with creation or updating."""
if self.instance.id:
return self.instance.get_rest_detail_url()
else:
return self.instance.get_rest_list_url()
def get_submit_method(self):
"""Return method matching with creation or updating."""
if self.instance.id:
return 'PATCH'
else:
return 'POST'
| Python | 0 |
7ca6dd5cd84222845db331afd97fc2f314999cff | fix yaspin.compat module docstring | yaspin/compat.py | yaspin/compat.py | # -*- coding: utf-8 -*-
"""
yaspin.compat
~~~~~~~~~~~~~
Compatibility layer.
"""
import sys
PY2 = sys.version_info[0] == 2
if PY2:
builtin_str = str
bytes = str
str = unicode # noqa
def iteritems(dct):
return dct.iteritems()
else:
builtin_str = str
bytes = bytes
str = str
def iteritems(dct):
return dct.items()
| # -*- coding: utf-8 -*-
"""
tests.compat
~~~~~~~~~~~~~
Compatibility layer.
"""
import sys
PY2 = sys.version_info[0] == 2
if PY2:
builtin_str = str
bytes = str
str = unicode # noqa
def iteritems(dct):
return dct.iteritems()
else:
builtin_str = str
bytes = bytes
str = str
def iteritems(dct):
return dct.items()
| Python | 0 |
739f72ae0bd873ac8d51789e90988d609b08a803 | Add typos and nonsense to pass distribution plan | inpassing/pass_util.py | inpassing/pass_util.py | # Copyright (c) 2016 Luke San Antonio Bialecki
# All rights reserved.
from sqlalchemy.sql import and_, or_
from .models import Pass, PassRequest, db
def get_user_passes(user_id):
"""Returns all owned, borrowed and requested passes of a user."""
# Find pending and successfull requests
pending_requests = db.session.query(PassRequest).filter(
and_(PassRequest.requestor_id == user_id,
PassRequest.assigned_pass_id == None)
).all()
successful_requests = db.session.query(PassRequest).filter(
and_(PassRequest.requestor_id == user_id,
PassRequest.assigned_pass_id != None)
).all()
# Borrowed passes are ones that are not owned by this user but are currently
# being used / borrowed.
borrowed_passes = db.session.query(Pass).filter(
and_(Pass.owner_id != user_id,
Pass.user_id == user_id)
).all()
# All non-pending passes related to this user
passes = borrowed_passes[:]
# Note that the request state ID and spot num can be different from
# what was actually assigned, so we have to use the values from the assigned
# pass object, itself.
passes.extend([req.assigned_pass for req in successful_requests])
ret = []
ret.extend([{
'pass_id': pas.id,
'org_id': pas.org_id,
'pending': False,
'owned': pas.owner_id == user_id,
'using': ((pas.owner_id == user_id and pas.user_id == None) or
pas.user_id == user_id),
'state_id': pas.state_id,
'spot_num': pas.spot_num,
} for pas in passes])
ret.extend([{
'request_id': req.id,
'org_id': req.org_id,
'pending': True,
'owned': False,
'using': False,
'request_time': req.request_time.isoformat(),
'state_id': req.state_id,
'spot_num': req.spot_num,
} for req in pending_requests])
return ret
def distribute_passes(users):
"""Distribute / lend passes to new users with a fancy magic algorithm.
= Proposed algorithm
1. For each user, weight the time since their last borrow and how many
borrows overall to form their score.
2. Sort users by score.
3. Give pass to the user with the highest score.
4. ???
5. Profit
= Stupid Ideas
1. First come, first serve.
2. Distribute a pass to a random (seeking) individual at a random time after
the pass goes up for grabs.
3. Give Luke the pass. *Always*.
= Smart Ideas
$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
$ 1. Users pay for each pass where price scales with the score $
$ detailed above. $
$ 2. Have users play that gambling game where you drop a ball on pegs $
$ and it randomly goes left or right until the bottom. The ball in the $
$ center hole gets the pass. Each ball costs the user one ad viewing. $
$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
"""
| # Copyright (c) 2016 Luke San Antonio Bialecki
# All rights reserved.
from sqlalchemy.sql import and_, or_
from .models import Pass, PassRequest, db
def get_user_passes(user_id):
"""Returns all owned, borrowed and requested passes of a user."""
# Find pending and successfull requests
pending_requests = db.session.query(PassRequest).filter(
and_(PassRequest.requestor_id == user_id,
PassRequest.assigned_pass_id == None)
).all()
successful_requests = db.session.query(PassRequest).filter(
and_(PassRequest.requestor_id == user_id,
PassRequest.assigned_pass_id != None)
).all()
# Borrowed passes are ones that are not owned by this user but are currently
# being used / borrowed.
borrowed_passes = db.session.query(Pass).filter(
and_(Pass.owner_id != user_id,
Pass.user_id == user_id)
).all()
# All non-pending passes related to this user
passes = borrowed_passes[:]
# Note that the request state ID and spot num can be different from
# what was actually assigned, so we have to use the values from the assigned
# pass object, itself.
passes.extend([req.assigned_pass for req in successful_requests])
ret = []
ret.extend([{
'pass_id': pas.id,
'org_id': pas.org_id,
'pending': False,
'owned': pas.owner_id == user_id,
'using': ((pas.owner_id == user_id and pas.user_id == None) or
pas.user_id == user_id),
'state_id': pas.state_id,
'spot_num': pas.spot_num,
} for pas in passes])
ret.extend([{
'request_id': req.id,
'org_id': req.org_id,
'pending': True,
'owned': False,
'using': False,
'request_time': req.request_time.isoformat(),
'state_id': req.state_id,
'spot_num': req.spot_num,
} for req in pending_requests])
return ret
def distribute_passes(users):
"""Distribute / lend passes to new users with a fancy magic algorithm.
= Proposed algorithm
1. For each user, weight the time since their last borrow and how many
borrows overall to form their score.
2. Sort users by score
3. Give pass to top user.
= Stupid Ideas
1. First come, first serve.
2. Distribute a pass to a random seeking individual at a random time after
the pass goes up for grabs.
3. Give Luke the pass. Always.
= Smart Ideas
$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
$ 1. Pay to for each pass. Price scales with the score detailed above. $
$ 2. Have users play that gambling game where you drop a ball on pegs $
$ and it randomly goes left or right until the bottom. The ball in the $
$ center hole gets the pass. Each ball costs the user one ad viewing. $
$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$
"""
| Python | 0 |
b9fc0685b3adb05a5049cfa9b68676e00878d48a | Add .fillna(0) | instagram_collector.py | instagram_collector.py | import sys
from settings import instgram_access_token
from api import InstagramAPI, Alchemy
import pandas as pd
def following_users(api, user_name):
instgram_user_id = api.user_id(user_name=user_name)
following_users = api.follows_list(user_id=instgram_user_id)
return following_users
def userinfo_list(api, following_users):
userinfo_list = []
for user in following_users:
entries = api.media_list(user["user_id"])
for entry in entries:
tag_list = Alchemy.tag_list(image_url=entry['url'])
if tag_list is None:
return userinfo_list
entry.update({'tag_list': tag_list})
tags = [entry['tag_list'] for entry in entries]
df = pd.DataFrame(tags).fillna(0)
user_summery = df.sum()
user_summery = user_summery.to_dict()
user.update(user_summery)
userinfo_list.append(user)
return userinfo_list
if __name__ == '__main__':
argvs = sys.argv
argc = len(argvs)
if len(argvs) != 2:
print('Usage: # python %s INSTAGRAM_USER_NAME' % argvs[0])
quit()
instgram_user_name = argvs[1]
api = InstagramAPI(access_token=instgram_access_token)
following_users = following_users(api, instgram_user_name)
following_users = following_users[0:40]
userinfo_list = userinfo_list(api, following_users)
users_df = pd.DataFrame(userinfo_list).fillna(0)
users_df.to_csv("user_tags.csv")
# for following_user in following_users:
# # entries = api.media_list(user_name=following_user)
# # for entry in entries:
# # image_url = entry["url"]
# # tag_list = Alchemy.tag_list(image_url=image_url)
# # entry.update({"tag_list": tag_list})
# # print(entry)
# # print(entries)
print(userinfo_list)
| import sys
from settings import instgram_access_token
from api import InstagramAPI, Alchemy
import pandas as pd
def following_users(api, user_name):
instgram_user_id = api.user_id(user_name=user_name)
following_users = api.follows_list(user_id=instgram_user_id)
return following_users
def userinfo_list(api, following_users):
userinfo_list = []
for user in following_users:
entries = api.media_list(user["user_id"])
for entry in entries:
tag_list = Alchemy.tag_list(image_url=entry['url'])
if tag_list is None:
return userinfo_list
entry.update({'tag_list': tag_list})
tags = [entry['tag_list'] for entry in entries]
df = pd.DataFrame(tags).fillna(0)
user_summery = df.sum()
user_summery = user_summery.to_dict()
user.update(user_summery)
userinfo_list.append(user)
return userinfo_list
if __name__ == '__main__':
argvs = sys.argv
argc = len(argvs)
if len(argvs) != 2:
print('Usage: # python %s INSTAGRAM_USER_NAME' % argvs[0])
quit()
instgram_user_name = argvs[1]
api = InstagramAPI(access_token=instgram_access_token)
following_users = following_users(api, instgram_user_name)
following_users = following_users[0:40]
userinfo_list = userinfo_list(api, following_users)
users_df = pd.DataFrame(userinfo_list)
users_df.to_csv("user_tags.csv")
# for following_user in following_users:
# # entries = api.media_list(user_name=following_user)
# # for entry in entries:
# # image_url = entry["url"]
# # tag_list = Alchemy.tag_list(image_url=image_url)
# # entry.update({"tag_list": tag_list})
# # print(entry)
# # print(entries)
print(userinfo_list)
| Python | 0.000001 |
1483b7946f929ee6dc8d5a8e972c712af35d4aea | Add capacity to save parsed objects to models in management command "process_xslt" | xml_json_import/management/commands/process_xslt.py | xml_json_import/management/commands/process_xslt.py | from django.core.management.base import BaseCommand
from lxml import etree, html
import urllib2
from os import path
import importlib
class Command(BaseCommand):
help = 'Processes XSLT transformation on a fetched by URL resource and outputs the result'
def add_arguments(self, parser):
parser.add_argument('url', help='URL to fetch source XML')
parser.add_argument('xslt_file', help='Path to XSLT transformation file')
parser.add_argument('--validate', action='store_true',
help='Validate against Relax NG schema after transformation')
rng_file = path.join(path.dirname(path.dirname(path.dirname(path.abspath(__file__)))), 'schema.rng')
parser.add_argument('--rng_file', default=rng_file,
help='Path to RELAX NG file. Defaults to schema.rng in module dir. '
'Used only if --validate is set')
parser.add_argument('--save', action='store_true',
help='Save data to the model. Successful validation against Relax NG '
'schema is required. Model names and fields in transformed XML '
'must represent existing models and fields. Otherwise import '
'will break with an exception')
def handle(self, *args, **options):
response = urllib2.urlopen(options['url'])
encoding = response.headers.getparam('charset')
content_type = response.info().type
if 'xml' in content_type:
source_etree = etree.parse(response)
elif 'html' in content_type:
source_etree = html.parse(response)
xslt_etree = etree.parse(options['xslt_file'])
transform = etree.XSLT(xslt_etree)
transformed_etree = transform(source_etree)
output = etree.tostring(transformed_etree, pretty_print=True, encoding=encoding)
print '<?xml version="1.0" encoding="' + encoding + '"?>\n' + output
if options['validate'] or options['save']:
rng_file_etree = etree.parse(options['rng_file'])
relaxng = etree.RelaxNG(rng_file_etree)
try:
relaxng.assertValid(transformed_etree)
print 'Document is valid'
if options['save']:
saved_objects_count = 0
for model_element in transformed_etree.xpath('//model'):
application_name, model_name = model_element.attrib['model'].split('.')
models_import_str = application_name + '.models'
models = importlib.import_module(models_import_str)
model = getattr(models, model_name)
for item_element in model_element.xpath('.//item'):
obj = model()
for field_element in item_element.xpath('.//field'):
setattr(obj, field_element.attrib['name'], field_element.text)
obj.save()
saved_objects_count += 1
print 'Saved objects: ' + str(saved_objects_count)
except etree.DocumentInvalid as ex:
print 'Document is not valid: ' + str(ex)
| from django.core.management.base import BaseCommand
from lxml import etree, html
import urllib2
from os import path
class Command(BaseCommand):
help = 'Processes XSLT transformation on a fetched by URL resource and outputs the result'
def add_arguments(self, parser):
parser.add_argument('url', help='URL to fetch source XML')
parser.add_argument('xslt_file', help='Path to XSLT transformation file')
parser.add_argument('--validate', action='store_true',
help='Validate against Relax NG schema after transformation')
rng_file = path.join(path.dirname(path.dirname(path.dirname(path.abspath(__file__)))), 'schema.rng')
parser.add_argument('--rng_file', default=rng_file,
help='Path to RELAX NG file. Defaults to schema.rng in module dir. '
'Used only if --validate is set')
def handle(self, *args, **options):
response = urllib2.urlopen(options['url'])
encoding = response.headers.getparam('charset')
content_type = response.info().type
if 'xml' in content_type:
source_etree = etree.parse(response)
elif 'html' in content_type:
source_etree = html.parse(response)
xslt_etree = etree.parse(options['xslt_file'])
transform = etree.XSLT(xslt_etree)
transformed_etree = transform(source_etree)
output = etree.tostring(transformed_etree, pretty_print=True, encoding=encoding)
print '<?xml version="1.0" encoding="' + encoding + '"?>\n' + output
if options['validate']:
rng_file_etree = etree.parse(options['rng_file'])
relaxng = etree.RelaxNG(rng_file_etree)
try:
relaxng.assertValid(transformed_etree)
print 'Document is valid'
except etree.DocumentInvalid as ex:
print 'Document is not valid: ' + str(ex)
| Python | 0 |
5ac7bba7ba8f411ed8daaf2055fde56eda152b6c | Add missing context processor to test app | tests/runtests.py | tests/runtests.py | #!/usr/bin/env python
import os
import sys
from optparse import OptionParser
AVAILABLE_DATABASES = {
'psql': {'ENGINE': 'django.db.backends.postgresql_psycopg2'},
'mysql': {'ENGINE': 'django.db.backends.mysql'},
'sqlite': {'ENGINE': 'django.db.backends.sqlite3'},
}
def main():
# Parse the command-line options.
parser = OptionParser()
parser.add_option(
"-v", "--verbosity",
action="store",
dest="verbosity",
default="1",
type="choice",
choices=["0", "1", "2", "3"],
help="Verbosity level; 0=minimal output, 1=normal output, 2=all output",
)
parser.add_option(
"--noinput",
action="store_false",
dest="interactive",
default=True,
help="Tells Django to NOT prompt the user for input of any kind.",
)
parser.add_option(
"--failfast",
action="store_true",
dest="failfast",
default=False,
help="Tells Django to stop running the test suite after first failed test.",
)
parser.add_option(
"-d", "--database",
action="store",
dest="database",
default="sqlite",
type="choice",
choices=list(AVAILABLE_DATABASES.keys()),
help="Select database backend for tests. Available choices: {}".format(
', '.join(AVAILABLE_DATABASES.keys())),
)
options, args = parser.parse_args()
# Configure Django.
from django.conf import settings
# database settings
if options.database:
database_setting = AVAILABLE_DATABASES[options.database]
if options.database == "sqlite":
database_default_name = os.path.join(os.path.dirname(__file__), "db.sqlite3")
else:
database_default_name = "test_project"
database_setting.update(dict(
NAME=os.environ.get("DB_NAME", database_default_name),
USER=os.environ.get("DB_USER", ""),
PASSWORD=os.environ.get("DB_PASSWORD", "")))
else:
database_setting = dict(
ENGINE=os.environ.get("DB_ENGINE", 'django.db.backends.sqlite3'),
NAME=os.environ.get("DB_NAME", os.path.join(os.path.dirname(__file__), "db.sqlite3")),
USER=os.environ.get("DB_USER", ""),
PASSWORD=os.environ.get("DB_PASSWORD", ""))
settings.configure(
DEBUG=False,
DATABASES={
"default": database_setting
},
ROOT_URLCONF='test_watson.urls',
INSTALLED_APPS=(
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.admin",
"watson",
"test_watson",
),
MIDDLEWARE_CLASSES=(
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
),
USE_TZ=True,
STATIC_URL="/static/",
TEST_RUNNER="django.test.runner.DiscoverRunner",
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'OPTIONS': {'context_processors': ['django.contrib.auth.context_processors.auth']},
'APP_DIRS': True,
}],
)
# Run Django setup (1.7+).
import django
try:
django.setup()
except AttributeError:
pass # This is Django < 1.7
# Configure the test runner.
from django.test.utils import get_runner
TestRunner = get_runner(settings)
test_runner = TestRunner(
verbosity=int(options.verbosity),
interactive=options.interactive,
failfast=options.failfast,
)
# Run the tests.
failures = test_runner.run_tests(["test_watson"])
if failures:
sys.exit(failures)
if __name__ == "__main__":
main()
| #!/usr/bin/env python
import os
import sys
from optparse import OptionParser
AVAILABLE_DATABASES = {
'psql': {'ENGINE': 'django.db.backends.postgresql_psycopg2'},
'mysql': {'ENGINE': 'django.db.backends.mysql'},
'sqlite': {'ENGINE': 'django.db.backends.sqlite3'},
}
def main():
# Parse the command-line options.
parser = OptionParser()
parser.add_option(
"-v", "--verbosity",
action="store",
dest="verbosity",
default="1",
type="choice",
choices=["0", "1", "2", "3"],
help="Verbosity level; 0=minimal output, 1=normal output, 2=all output",
)
parser.add_option(
"--noinput",
action="store_false",
dest="interactive",
default=True,
help="Tells Django to NOT prompt the user for input of any kind.",
)
parser.add_option(
"--failfast",
action="store_true",
dest="failfast",
default=False,
help="Tells Django to stop running the test suite after first failed test.",
)
parser.add_option(
"-d", "--database",
action="store",
dest="database",
default="sqlite",
type="choice",
choices=list(AVAILABLE_DATABASES.keys()),
help="Select database backend for tests. Available choices: {}".format(
', '.join(AVAILABLE_DATABASES.keys())),
)
options, args = parser.parse_args()
# Configure Django.
from django.conf import settings
# database settings
if options.database:
database_setting = AVAILABLE_DATABASES[options.database]
if options.database == "sqlite":
database_default_name = os.path.join(os.path.dirname(__file__), "db.sqlite3")
else:
database_default_name = "test_project"
database_setting.update(dict(
NAME=os.environ.get("DB_NAME", database_default_name),
USER=os.environ.get("DB_USER", ""),
PASSWORD=os.environ.get("DB_PASSWORD", "")))
else:
database_setting = dict(
ENGINE=os.environ.get("DB_ENGINE", 'django.db.backends.sqlite3'),
NAME=os.environ.get("DB_NAME", os.path.join(os.path.dirname(__file__), "db.sqlite3")),
USER=os.environ.get("DB_USER", ""),
PASSWORD=os.environ.get("DB_PASSWORD", ""))
settings.configure(
DEBUG=False,
DATABASES={
"default": database_setting
},
ROOT_URLCONF='test_watson.urls',
INSTALLED_APPS=(
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.messages",
"django.contrib.staticfiles",
"django.contrib.admin",
"watson",
"test_watson",
),
MIDDLEWARE_CLASSES=(
"django.middleware.common.CommonMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
),
USE_TZ=True,
STATIC_URL="/static/",
TEST_RUNNER="django.test.runner.DiscoverRunner",
TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['templates'],
'APP_DIRS': True,
}],
)
# Run Django setup (1.7+).
import django
try:
django.setup()
except AttributeError:
pass # This is Django < 1.7
# Configure the test runner.
from django.test.utils import get_runner
TestRunner = get_runner(settings)
test_runner = TestRunner(
verbosity=int(options.verbosity),
interactive=options.interactive,
failfast=options.failfast,
)
# Run the tests.
failures = test_runner.run_tests(["test_watson"])
if failures:
sys.exit(failures)
if __name__ == "__main__":
main()
| Python | 0.000001 |
84cdde09d574d2a52446bd751445747407733b22 | Remove print statement | tests/settings.py | tests/settings.py | import uuid
import os.path
from django.conf import global_settings, settings
from oscar import OSCAR_MAIN_TEMPLATE_DIR, get_core_apps
from oscar.defaults import * # noqa
from accounts import TEMPLATE_DIR as ACCOUNTS_TEMPLATE_DIR
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
}
STATICFILES_FINDERS=(
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
SECRET_KEY = str(uuid.uuid4())
INSTALLED_APPS=[
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.staticfiles',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.flatpages',
'accounts',
'compressor',
'widget_tweaks',
] + get_core_apps()
MIDDLEWARE_CLASSES=global_settings.MIDDLEWARE_CLASSES + (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'oscar.apps.basket.middleware.BasketMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS=global_settings.TEMPLATE_CONTEXT_PROCESSORS + (
'django.core.context_processors.request',
'oscar.apps.search.context_processors.search_form',
'oscar.apps.promotions.context_processors.promotions',
'oscar.apps.checkout.context_processors.checkout',
'oscar.core.context_processors.metadata',
)
DEBUG=False
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine'
}
}
ROOT_URLCONF = 'tests.urls'
TEMPLATE_DIRS = (
OSCAR_MAIN_TEMPLATE_DIR,
os.path.join(OSCAR_MAIN_TEMPLATE_DIR, 'templates'),
ACCOUNTS_TEMPLATE_DIR,
# Include sandbox templates as they patch from templates that
# are in Oscar 0.4 but not 0.3
'sandbox/templates',
)
STATIC_URL='/static/'
COMPRESS_ROOT=''
COMPRESS_ENABLED=False
SITE_ID=1
ACCOUNTS_UNIT_NAME='Giftcard'
NOSE_ARGS=['--nocapture']
USE_TZ=True
DDF_FILL_NULLABLE_FIELDS=False
ACCOUNTS_DEFERRED_INCOME_ACCOUNT_TYPES=('Test accounts',)
| import uuid
import os.path
from django.conf import global_settings, settings
from oscar import OSCAR_MAIN_TEMPLATE_DIR, get_core_apps
from oscar.defaults import * # noqa
from accounts import TEMPLATE_DIR as ACCOUNTS_TEMPLATE_DIR
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
}
}
STATICFILES_FINDERS=(
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
SECRET_KEY = str(uuid.uuid4())
INSTALLED_APPS=[
'django.contrib.auth',
'django.contrib.admin',
'django.contrib.contenttypes',
'django.contrib.staticfiles',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.flatpages',
'accounts',
'compressor',
'widget_tweaks',
] + get_core_apps()
MIDDLEWARE_CLASSES=global_settings.MIDDLEWARE_CLASSES + (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'oscar.apps.basket.middleware.BasketMiddleware',
)
TEMPLATE_CONTEXT_PROCESSORS=global_settings.TEMPLATE_CONTEXT_PROCESSORS + (
'django.core.context_processors.request',
'oscar.apps.search.context_processors.search_form',
'oscar.apps.promotions.context_processors.promotions',
'oscar.apps.checkout.context_processors.checkout',
'oscar.core.context_processors.metadata',
)
DEBUG=False
HAYSTACK_CONNECTIONS = {
'default': {
'ENGINE': 'haystack.backends.simple_backend.SimpleEngine'
}
}
ROOT_URLCONF = 'tests.urls'
TEMPLATE_DIRS = (
OSCAR_MAIN_TEMPLATE_DIR,
os.path.join(OSCAR_MAIN_TEMPLATE_DIR, 'templates'),
ACCOUNTS_TEMPLATE_DIR,
# Include sandbox templates as they patch from templates that
# are in Oscar 0.4 but not 0.3
'sandbox/templates',
)
print TEMPLATE_DIRS
STATIC_URL='/static/'
COMPRESS_ROOT=''
COMPRESS_ENABLED=False
SITE_ID=1
ACCOUNTS_UNIT_NAME='Giftcard'
NOSE_ARGS=['--nocapture']
USE_TZ=True
DDF_FILL_NULLABLE_FIELDS=False
ACCOUNTS_DEFERRED_INCOME_ACCOUNT_TYPES=('Test accounts',)
| Python | 0.007015 |
26116bb984f7a970c67bcdc01ff026a3fc5f0905 | create secondary parses | tests/test_ddl.py | tests/test_ddl.py | from pytest import fixture
from cdm.ddl import parse_line, create_vertex, create_vertex_index,\
CreateVertex, \
CreateEdge, CreateProperty, CreateIndex, CreateGraph
def test_create_graph():
s = "CREATE GRAPH jon"
parsed = parse_line(s)
assert isinstance(parsed, CreateGraph)
assert "system.createGraph('jon').build()" in str(parsed)
def test_create_vertex_label():
cmd = "CREATE vertex movie"
result = create_vertex.parseString(cmd)[0]
assert isinstance(result, CreateVertex)
result = parse_line(cmd)
assert isinstance(result, CreateVertex)
assert result.label == "movie"
assert "buildVertexLabel" in str(result)
assert "movie" in str(result)
result2 = parse_line("CREATE vertex label movie")
assert isinstance(result, CreateVertex)
def test_create_edge_label():
result = parse_line("CREATE edge rated")
assert isinstance(result, CreateEdge)
assert result.label == "rated"
result2 = parse_line("CREATE edge label rated")
assert isinstance(result2, CreateEdge)
def test_create_property():
result = parse_line("CREATE PROPERTY name text")
assert isinstance(result, CreateProperty)
result = parse_line("CREATE PROPERTY name TEXT")
assert isinstance(result, CreateProperty)
"""
graph.schema().vertexLabel("ip").buildVertexIndex("ipById").materialized().byPropertyKey("id").add()
Secondary
graph.schema().vertexLabel("ip").buildVertexIndex("ipByCountry").secondary().byPropertyKey("country").add()
Search
graph.schema().vertexLabel("swid").buildVertexIndex("search").search().byPropertyKey("dob").add()
"""
def test_create_index_fulltext():
s = "CREATE materialized INDEX movie_title_idx ON VERTEX movie(title )"
result = create_vertex_index.parseString(s)
s = "CREATE secondary INDEX movie_title_idx ON VERTEX movie(title )"
result = create_vertex_index.parseString(s)
# result = parse_line()
# assert isinstance(result, CreateIndex)
#
# def test_create_index_materialize():
# result = parse_line("CREATE INDEX movie_title_idx ON movie(title) SEARCH");
# result = parse_line("CREATE INDEX user_id_idx ON movie(user_id) MATERIALIZED")
| from pytest import fixture
from cdm.ddl import parse_line, create_vertex, create_vertex_index,\
CreateVertex, \
CreateEdge, CreateProperty, CreateIndex, CreateGraph
def test_create_graph():
s = "CREATE GRAPH jon"
parsed = parse_line(s)
assert isinstance(parsed, CreateGraph)
assert "system.createGraph('jon').build()" in str(parsed)
def test_create_vertex_label():
cmd = "CREATE vertex movie"
result = create_vertex.parseString(cmd)[0]
assert isinstance(result, CreateVertex)
result = parse_line(cmd)
assert isinstance(result, CreateVertex)
assert result.label == "movie"
assert "buildVertexLabel" in str(result)
assert "movie" in str(result)
result2 = parse_line("CREATE vertex label movie")
assert isinstance(result, CreateVertex)
def test_create_edge_label():
result = parse_line("CREATE edge rated")
assert isinstance(result, CreateEdge)
assert result.label == "rated"
result2 = parse_line("CREATE edge label rated")
assert isinstance(result2, CreateEdge)
def test_create_property():
result = parse_line("CREATE PROPERTY name text")
assert isinstance(result, CreateProperty)
result = parse_line("CREATE PROPERTY name TEXT")
assert isinstance(result, CreateProperty)
"""
graph.schema().vertexLabel("ip").buildVertexIndex("ipById").materialized().byPropertyKey("id").add()
Secondary
graph.schema().vertexLabel("ip").buildVertexIndex("ipByCountry").secondary().byPropertyKey("country").add()
Search
graph.schema().vertexLabel("swid").buildVertexIndex("search").search().byPropertyKey("dob").add()
"""
def test_create_index_fulltext():
s = "CREATE materialized INDEX movie_title_idx ON VERTEX movie(title )"
result = create_vertex_index.parseString(s)
# result = parse_line()
# assert isinstance(result, CreateIndex)
#
# def test_create_index_materialize():
# result = parse_line("CREATE INDEX movie_title_idx ON movie(title) SEARCH");
# result = parse_line("CREATE INDEX user_id_idx ON movie(user_id) MATERIALIZED")
| Python | 0.000281 |
8802611f515df7b123f907efb6f7ffac9f11a42f | create mock ami and add test for ami list. | tests/test_ec2.py | tests/test_ec2.py | from __future__ import (absolute_import, print_function, unicode_literals)
from acli.output.ec2 import (output_ec2_list, output_ec2_info)
from acli.services.ec2 import (ec2_list, ec2_info, ec2_summary, ami_list)
from acli.config import Config
from moto import mock_ec2
import pytest
from boto3.session import Session
session = Session(region_name="eu-west-1")
@pytest.yield_fixture(scope='function')
def ec2_instances():
"""EC2 mock service"""
mock = mock_ec2()
mock.start()
client = session.client('ec2')
client.create_security_group(GroupName='group1', Description='my first sec group')
reservations = client.run_instances(ImageId='ami-12345', MinCount=2, MaxCount=2, SecurityGroups=['group1'])
for i, s in enumerate(reservations.get('Instances')):
client.create_tags(
Resources=[s.get('InstanceId')],
Tags=[{'Key': 'Name', 'Value': 'Bob'}])
ec2_resource = session.resource('ec2')
all_instances = ec2_resource.instances.all()
yield all_instances
mock.stop()
@pytest.yield_fixture(scope='function')
def amis():
"""AMI mock service"""
mock = mock_ec2()
mock.start()
client = session.client('ec2')
reservation = client.run_instances(ImageId='ami-1234abcd', MinCount=1, MaxCount=1)
instance = reservation.get('Instances')[0]
image_id = client.create_image(InstanceId=instance.get('InstanceId'),
Name="test-ami",
Description="this is a test ami")
yield client.describe_images()
mock.stop()
config = Config(cli_args={'--region': 'eu-west-1',
'--access_key_id': 'AKIAIOSFODNN7EXAMPLE',
'--secret_access_key': 'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY'})
def test_ec2_list_service(ec2_instances):
with pytest.raises(SystemExit):
assert ec2_list(aws_config=config)
@mock_ec2
def test_ec2_list_service_no_instances():
with pytest.raises(SystemExit):
assert ec2_list(aws_config=config)
def test_ec2_info_service(ec2_instances):
with pytest.raises(SystemExit):
assert ec2_info(aws_config=config, instance_id=list(ec2_instances)[0].id)
def test_ec2_list_output(ec2_instances):
with pytest.raises(SystemExit):
assert output_ec2_list(output_media='console', instances=ec2_instances)
def test_ec2_output(ec2_instances):
with pytest.raises(SystemExit):
instance = list(ec2_instances)[0]
assert output_ec2_info(output_media='console', instance=instance)
def test_ami_list_service(amis):
with pytest.raises(SystemExit):
assert ami_list(aws_config=config)
# def test_ec2_summary(ec2_instances):
# with pytest.raises(SystemExit):
# instance = list(ec2_instances)[0]
# assert ec2_summary(aws_config=config)
| from __future__ import (absolute_import, print_function, unicode_literals)
from acli.output.ec2 import (output_ec2_list, output_ec2_info)
from acli.services.ec2 import (ec2_list, ec2_info, ec2_summary)
from acli.config import Config
from moto import mock_ec2
import pytest
from boto3.session import Session
session = Session(region_name="eu-west-1")
@pytest.yield_fixture(scope='function')
def ec2_instances():
"""EC2 mock service"""
mock = mock_ec2()
mock.start()
client = session.client('ec2')
client.create_security_group(GroupName='group1', Description='my first sec group')
reservations = client.run_instances(ImageId='ami-12345', MinCount=2, MaxCount=2, SecurityGroups=['group1'])
for i, s in enumerate(reservations.get('Instances')):
client.create_tags(
Resources=[s.get('InstanceId')],
Tags=[{'Key': 'Name', 'Value': 'Bob'}])
ec2_resource = session.resource('ec2')
all_instances = ec2_resource.instances.all()
yield all_instances
mock.stop()
config = Config(cli_args={'--region': 'eu-west-1',
'--access_key_id': 'AKIAIOSFODNN7EXAMPLE',
'--secret_access_key': 'wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY'})
def test_ec2_list_service(ec2_instances):
with pytest.raises(SystemExit):
assert ec2_list(aws_config=config)
@mock_ec2
def test_ec2_list_service_no_instances():
with pytest.raises(SystemExit):
assert ec2_list(aws_config=config)
def test_ec2_info_service(ec2_instances):
with pytest.raises(SystemExit):
assert ec2_info(aws_config=config, instance_id=list(ec2_instances)[0].id)
def test_ec2_list_output(ec2_instances):
with pytest.raises(SystemExit):
assert output_ec2_list(output_media='console', instances=ec2_instances)
def test_ec2_output(ec2_instances):
with pytest.raises(SystemExit):
instance = list(ec2_instances)[0]
assert output_ec2_info(output_media='console', instance=instance)
# def test_ec2_summary(ec2_instances):
# with pytest.raises(SystemExit):
# instance = list(ec2_instances)[0]
# assert ec2_summary(aws_config=config)
| Python | 0 |
581eb398360cff5de1488fa06890195c808f8d10 | fix make requests test | tests/test_run.py | tests/test_run.py | # coding=utf-8
from os.path import join
import pytest
from xpaw.spider import Spider
from xpaw.cmdline import main
from xpaw.run import run_crawler, run_spider, make_requests
from xpaw.http import HttpRequest, HttpResponse
from xpaw.errors import ClientError, HttpError
def test_run_crawler(tmpdir):
proj_name = 'test_run_crawler'
proj_dir = join(str(tmpdir), proj_name)
main(argv=['xpaw', 'init', proj_dir])
run_crawler(proj_dir, log_level='DEBUG')
def test_run_crawler_bad_config(tmpdir, capsys):
proj_dir = join(str(tmpdir))
config_file = join(proj_dir, 'config.py')
with open(config_file, 'w') as f:
f.write('bad config')
with pytest.raises(SyntaxError):
run_crawler(proj_dir, log_level='DEBUG')
_, _ = capsys.readouterr()
def test_failed_to_create_cluster(tmpdir, capsys):
proj_dir = join(str(tmpdir))
with pytest.raises(Exception):
run_crawler(proj_dir, log_level='DEBUG')
_, _ = capsys.readouterr()
class DummySpider(Spider):
def start_requests(self):
pass
def parse(self, response):
pass
def test_run_spider():
run_spider(DummySpider, log_level='DEBUG')
def test_make_requests():
requests = [None, 'http://unknonw',
'http://python.org/', HttpRequest('http://python.org'),
'http://httpbin.org/status/404']
results = make_requests(requests, log_level='DEBUG')
assert len(results) == len(requests)
assert results[0] is None
assert isinstance(results[1], ClientError)
assert isinstance(results[2], HttpResponse) and results[2].status == 200
assert isinstance(results[3], HttpResponse) and results[3].status == 200
assert isinstance(results[4], HttpError) and results[4].response.status == 404
| # coding=utf-8
from os.path import join
import pytest
from xpaw.spider import Spider
from xpaw.cmdline import main
from xpaw.run import run_crawler, run_spider, make_requests
from xpaw.http import HttpRequest, HttpResponse
from xpaw.errors import ClientError, HttpError
def test_run_crawler(tmpdir):
proj_name = 'test_run_crawler'
proj_dir = join(str(tmpdir), proj_name)
main(argv=['xpaw', 'init', proj_dir])
run_crawler(proj_dir, log_level='DEBUG')
def test_run_crawler_bad_config(tmpdir, capsys):
proj_dir = join(str(tmpdir))
config_file = join(proj_dir, 'config.py')
with open(config_file, 'w') as f:
f.write('bad config')
with pytest.raises(SyntaxError):
run_crawler(proj_dir, log_level='DEBUG')
_, _ = capsys.readouterr()
def test_failed_to_create_cluster(tmpdir, capsys):
proj_dir = join(str(tmpdir))
with pytest.raises(Exception):
run_crawler(proj_dir, log_level='DEBUG')
_, _ = capsys.readouterr()
class DummySpider(Spider):
def start_requests(self):
pass
def parse(self, response):
pass
def test_run_spider():
run_spider(DummySpider, log_level='DEBUG')
def test_make_requests():
requests = [None, 'http://localhost:8080',
'http://python.org/', HttpRequest('http://python.org'),
'http://httpbin.org/status/404']
results = make_requests(requests, log_level='DEBUG')
assert len(results) == len(requests)
assert results[0] is None
assert isinstance(results[1], ClientError)
assert isinstance(results[2], HttpResponse) and results[2].status == 200
assert isinstance(results[3], HttpResponse) and results[3].status == 200
assert isinstance(results[4], HttpError) and results[4].response.status == 404
| Python | 0.000002 |
4ba0a99a626e54cd7ca68692c5135bcd6b2f8d3a | Add test for STL vertex order | tests/test_stl.py | tests/test_stl.py | """
Check things related to STL files
"""
try:
from . import generic as g
except BaseException:
import generic as g
class STLTests(g.unittest.TestCase):
def test_header(self):
m = g.get_mesh('featuretype.STL')
# make sure we have the right mesh
assert g.np.isclose(m.volume, 11.627733431196749, atol=1e-6)
# should have saved the header from the STL file
assert len(m.metadata['header']) > 0
# should have saved the STL face attributes
assert len(m.face_attributes['stl']) == len(m.faces)
assert len(m.faces) > 1000
# add a non-correlated face attribute, which should be ignored
m.face_attributes['nah'] = 10
# remove all faces except three random ones
m.update_faces([1, 3, 4])
# faces and face attributes should be untouched
assert len(m.faces) == 3
assert len(m.face_attributes['stl']) == 3
# attribute that wasn't len(m.faces) shouldn't have been touched
assert m.face_attributes['nah'] == 10
def test_attrib(self):
m = g.get_mesh('featuretype.STL')
len_vertices = len(m.vertices)
# assign some random vertex attributes
random = g.np.random.random(len(m.vertices))
m.vertex_attributes['random'] = random
m.vertex_attributes['nah'] = 20
# should have saved the STL face attributes
assert len(m.face_attributes['stl']) == len(m.faces)
assert len(m.faces) > 1000
# add a non-correlated face attribute, which should be ignored
m.face_attributes['nah'] = 10
# remove all faces except three random ones
m.update_faces([1, 3, 4])
# faces and face attributes should be untouched
assert len(m.faces) == 3
assert len(m.face_attributes['stl']) == 3
# attribute that wasn't len(m.faces) shouldn't have been touched
assert m.face_attributes['nah'] == 10
# check all vertices are still in place
assert m.vertex_attributes['nah'] == 20
assert g.np.allclose(random, m.vertex_attributes['random'])
assert len(m.vertices) == len_vertices
# remove all vertices except four
v_mask = [0, 1, 2, 3]
m.update_vertices(v_mask)
# make sure things are still correct
assert m.vertex_attributes['nah'] == 20
assert g.np.allclose(m.vertex_attributes['random'], random[v_mask])
assert len(m.vertices) == len(v_mask)
def test_ascii_multibody(self):
s = g.get_mesh('multibody.stl')
assert len(s.geometry) == 2
def test_empty(self):
# demo files to check
empty_files = ['stl_empty_ascii.stl',
'stl_empty_bin.stl']
for empty_file in empty_files:
e = g.get_mesh('emptyIO/' + empty_file)
# result should be an empty scene without vertices
assert isinstance(e, g.trimesh.Scene)
assert not hasattr(e, 'vertices')
# create export
try:
e.export(file_type='ply')
except BaseException:
return
raise ValueError("Shouldn't export empty scenes!")
def test_vertex_order(self):
# removing doubles should respect the vertex order
m_raw = g.get_mesh('featuretype.STL', process=False)
m_proc = g.get_mesh('featuretype.STL', process=True)
verts_raw = g.trimesh.grouping.hashable_rows(m_raw.vertices)
verts_proc = g.trimesh.grouping.hashable_rows(m_proc.vertices)
# go through all processed verts
# find index in unprocessed mesh
idxs = []
for vert in verts_proc:
idxs.append(g.np.where(verts_raw == vert)[0][0])
# indices should be increasing
assert (g.np.diff(idxs) >= 0).all()
if __name__ == '__main__':
g.trimesh.util.attach_to_log()
g.unittest.main()
| """
Check things related to STL files
"""
try:
from . import generic as g
except BaseException:
import generic as g
class STLTests(g.unittest.TestCase):
def test_header(self):
m = g.get_mesh('featuretype.STL')
# make sure we have the right mesh
assert g.np.isclose(m.volume, 11.627733431196749, atol=1e-6)
# should have saved the header from the STL file
assert len(m.metadata['header']) > 0
# should have saved the STL face attributes
assert len(m.face_attributes['stl']) == len(m.faces)
assert len(m.faces) > 1000
# add a non-correlated face attribute, which should be ignored
m.face_attributes['nah'] = 10
# remove all faces except three random ones
m.update_faces([1, 3, 4])
# faces and face attributes should be untouched
assert len(m.faces) == 3
assert len(m.face_attributes['stl']) == 3
# attribute that wasn't len(m.faces) shouldn't have been touched
assert m.face_attributes['nah'] == 10
def test_attrib(self):
m = g.get_mesh('featuretype.STL')
len_vertices = len(m.vertices)
# assign some random vertex attributes
random = g.np.random.random(len(m.vertices))
m.vertex_attributes['random'] = random
m.vertex_attributes['nah'] = 20
# should have saved the STL face attributes
assert len(m.face_attributes['stl']) == len(m.faces)
assert len(m.faces) > 1000
# add a non-correlated face attribute, which should be ignored
m.face_attributes['nah'] = 10
# remove all faces except three random ones
m.update_faces([1, 3, 4])
# faces and face attributes should be untouched
assert len(m.faces) == 3
assert len(m.face_attributes['stl']) == 3
# attribute that wasn't len(m.faces) shouldn't have been touched
assert m.face_attributes['nah'] == 10
# check all vertices are still in place
assert m.vertex_attributes['nah'] == 20
assert g.np.allclose(random, m.vertex_attributes['random'])
assert len(m.vertices) == len_vertices
# remove all vertices except four
v_mask = [0, 1, 2, 3]
m.update_vertices(v_mask)
# make sure things are still correct
assert m.vertex_attributes['nah'] == 20
assert g.np.allclose(m.vertex_attributes['random'], random[v_mask])
assert len(m.vertices) == len(v_mask)
def test_ascii_multibody(self):
s = g.get_mesh('multibody.stl')
assert len(s.geometry) == 2
def test_empty(self):
# demo files to check
empty_files = ['stl_empty_ascii.stl',
'stl_empty_bin.stl']
for empty_file in empty_files:
e = g.get_mesh('emptyIO/' + empty_file)
# result should be an empty scene without vertices
assert isinstance(e, g.trimesh.Scene)
assert not hasattr(e, 'vertices')
# create export
try:
e.export(file_type='ply')
except BaseException:
return
raise ValueError("Shouldn't export empty scenes!")
if __name__ == '__main__':
g.trimesh.util.attach_to_log()
g.unittest.main()
| Python | 0 |
f30b658275a62294593d31175e1e13118140abb7 | Fix flake8 in test_vcs.py | tests/test_vcs.py | tests/test_vcs.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_vcs
------------
Tests for `cookiecutter.vcs` module.
"""
import locale
import os
import pytest
import subprocess
import unittest
from cookiecutter import exceptions, utils, vcs
from tests.skipif_markers import skipif_no_network
try:
no_network = os.environ[u'DISABLE_NETWORK_TESTS']
except KeyError:
no_network = False
encoding = locale.getdefaultlocale()[1]
@skipif_no_network
def test_git_clone():
repo_dir = vcs.clone(
'https://github.com/audreyr/cookiecutter-pypackage.git'
)
assert repo_dir == 'cookiecutter-pypackage'
assert os.path.isfile('cookiecutter-pypackage/README.rst')
if os.path.isdir('cookiecutter-pypackage'):
utils.rmtree('cookiecutter-pypackage')
@skipif_no_network
def test_git_clone_checkout():
repo_dir = vcs.clone(
'https://github.com/audreyr/cookiecutter-pypackage.git',
'console-script'
)
git_dir = 'cookiecutter-pypackage'
assert repo_dir == git_dir
assert os.path.isfile(os.path.join('cookiecutter-pypackage', 'README.rst'))
proc = subprocess.Popen(
['git', 'symbolic-ref', 'HEAD'],
cwd=git_dir,
stdout=subprocess.PIPE
)
symbolic_ref = proc.communicate()[0]
branch = symbolic_ref.decode(encoding).strip().split('/')[-1]
assert 'console-script' == branch
if os.path.isdir(git_dir):
utils.rmtree(git_dir)
@skipif_no_network
def test_git_clone_custom_dir():
os.makedirs("tests/custom_dir1/custom_dir2/")
repo_dir = vcs.clone(
repo_url='https://github.com/audreyr/cookiecutter-pypackage.git',
checkout=None,
clone_to_dir="tests/custom_dir1/custom_dir2/"
)
with utils.work_in("tests/custom_dir1/custom_dir2/"):
test_dir = 'tests/custom_dir1/custom_dir2/cookiecutter-pypackage'
assert repo_dir == test_dir.replace("/", os.sep)
assert os.path.isfile('cookiecutter-pypackage/README.rst')
if os.path.isdir('cookiecutter-pypackage'):
utils.rmtree('cookiecutter-pypackage')
if os.path.isdir('tests/custom_dir1'):
utils.rmtree('tests/custom_dir1')
@skipif_no_network
def test_hg_clone():
repo_dir = vcs.clone(
'https://bitbucket.org/pokoli/cookiecutter-trytonmodule'
)
assert repo_dir == 'cookiecutter-trytonmodule'
assert os.path.isfile('cookiecutter-trytonmodule/README.rst')
if os.path.isdir('cookiecutter-trytonmodule'):
utils.rmtree('cookiecutter-trytonmodule')
@skipif_no_network
def test_vcs_not_installed(monkeypatch):
monkeypatch.setattr(
'cookiecutter.vcs.identify_repo',
lambda x: u'stringthatisntashellcommand'
)
with pytest.raises(exceptions.VCSNotInstalled):
vcs.clone("http://norepotypespecified.com")
if __name__ == '__main__':
unittest.main()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_vcs
------------
Tests for `cookiecutter.vcs` module.
"""
import locale
import os
import pytest
import subprocess
import unittest
from cookiecutter.compat import patch
from cookiecutter import exceptions, utils, vcs
from tests.skipif_markers import skipif_no_network
try:
no_network = os.environ[u'DISABLE_NETWORK_TESTS']
except KeyError:
no_network = False
encoding = locale.getdefaultlocale()[1]
@skipif_no_network
def test_git_clone():
repo_dir = vcs.clone(
'https://github.com/audreyr/cookiecutter-pypackage.git'
)
assert repo_dir == 'cookiecutter-pypackage'
assert os.path.isfile('cookiecutter-pypackage/README.rst')
if os.path.isdir('cookiecutter-pypackage'):
utils.rmtree('cookiecutter-pypackage')
@skipif_no_network
def test_git_clone_checkout():
repo_dir = vcs.clone(
'https://github.com/audreyr/cookiecutter-pypackage.git',
'console-script'
)
git_dir = 'cookiecutter-pypackage'
assert repo_dir == git_dir
assert os.path.isfile(os.path.join('cookiecutter-pypackage', 'README.rst'))
proc = subprocess.Popen(
['git', 'symbolic-ref', 'HEAD'],
cwd=git_dir,
stdout=subprocess.PIPE
)
symbolic_ref = proc.communicate()[0]
branch = symbolic_ref.decode(encoding).strip().split('/')[-1]
assert 'console-script' == branch
if os.path.isdir(git_dir):
utils.rmtree(git_dir)
@skipif_no_network
def test_git_clone_custom_dir():
os.makedirs("tests/custom_dir1/custom_dir2/")
repo_dir = vcs.clone(
repo_url='https://github.com/audreyr/cookiecutter-pypackage.git',
checkout=None,
clone_to_dir="tests/custom_dir1/custom_dir2/"
)
with utils.work_in("tests/custom_dir1/custom_dir2/"):
test_dir = 'tests/custom_dir1/custom_dir2/cookiecutter-pypackage'
assert repo_dir == test_dir.replace("/", os.sep)
assert os.path.isfile('cookiecutter-pypackage/README.rst')
if os.path.isdir('cookiecutter-pypackage'):
utils.rmtree('cookiecutter-pypackage')
if os.path.isdir('tests/custom_dir1'):
utils.rmtree('tests/custom_dir1')
@skipif_no_network
def test_hg_clone():
repo_dir = vcs.clone(
'https://bitbucket.org/pokoli/cookiecutter-trytonmodule'
)
assert repo_dir == 'cookiecutter-trytonmodule'
assert os.path.isfile('cookiecutter-trytonmodule/README.rst')
if os.path.isdir('cookiecutter-trytonmodule'):
utils.rmtree('cookiecutter-trytonmodule')
@skipif_no_network
def test_vcs_not_installed(monkeypatch):
monkeypatch.setattr(
'cookiecutter.vcs.identify_repo',
lambda x: u'stringthatisntashellcommand'
)
with pytest.raises(exceptions.VCSNotInstalled):
vcs.clone("http://norepotypespecified.com")
if __name__ == '__main__':
unittest.main()
| Python | 0.000001 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.