commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
e72da8231e7a5b05f098db1f78b66b8cb57f27ba | remove checking in autots import (#5489) | python/chronos/src/bigdl/chronos/autots/__init__.py | python/chronos/src/bigdl/chronos/autots/__init__.py | #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
import os
if os.getenv("LD_PRELOAD", "null") != "null":
warnings.warn("Users of `bigdl.chronos.autots` should "
"unset bigdl-nano environment variables!"
"Please run `source bigdl-nano-unset-env` "
"in your bash terminal")
try:
# TODO: make this a LazyImport
from .autotsestimator import AutoTSEstimator
from .tspipeline import TSPipeline
except ImportError:
pass
| #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import warnings
from bigdl.nano.utils.log4Error import invalidInputError
import os
if os.getenv("LD_PRELOAD", "null") != "null":
invalidInputError(False,
errMsg="Users of `bigdl.chronos.autots` should "
"unset bigdl-nano environment variables!",
fixMsg="Please run `source bigdl-nano-unset-env` "
"in your bash terminal")
try:
# TODO: make this a LazyImport
from .autotsestimator import AutoTSEstimator
from .tspipeline import TSPipeline
except ImportError:
pass
| Python | 0 |
b52a937356f2112ecd5adcdf79ac6430169a735f | fix file close bug causing errors in pypy | new_pymtl/translation_tools/verilator_sim.py | new_pymtl/translation_tools/verilator_sim.py | #===============================================================================
# verilator_sim.py
#===============================================================================
#from verilator_cython import verilog_to_pymtl
from verilator_cffi import verilog_to_pymtl
import verilog
import os
import sys
import filecmp
#------------------------------------------------------------------------------
# get_verilated
#------------------------------------------------------------------------------
def get_verilated( model_inst ):
model_inst.elaborate()
model_name = model_inst.class_name
# Translate the PyMTL module to Verilog, if we've already done
# translation check if there's been any changes to the source
verilog_file = model_name + '.v'
temp_file = verilog_file + '.tmp'
#verilog.translate( model_inst, open( verilog_file, 'w+' ) )
#cached = False
# Write the output to a temporary file
fd = open( temp_file, 'w+' )
verilog.translate( model_inst, fd )
fd.close()
# Check if the temporary file matches an existing file (caching)
cached = False
if os.path.exists( verilog_file ):
cached = filecmp.cmp( temp_file, verilog_file )
if not cached:
os.system( ' diff %s %s'%( temp_file, verilog_file ))
# Rename temp to actual output
os.rename( temp_file, verilog_file )
# Verilate the module only if we've updated the verilog source
if not cached:
print "NOT CACHED", verilog_file
verilog_to_pymtl( model_inst, verilog_file )
# Use some trickery to import the verilated version of the model
sys.path.append( os.getcwd() )
__import__( 'W' + model_name )
imported_module = sys.modules[ 'W'+model_name ]
# Get the model class from the module, instantiate and elaborate it
model_class = imported_module.__dict__[ model_name ]
model_inst = model_class()
return model_inst
| #===============================================================================
# verilator_sim.py
#===============================================================================
#from verilator_cython import verilog_to_pymtl
from verilator_cffi import verilog_to_pymtl
import verilog
import os
import sys
import filecmp
#------------------------------------------------------------------------------
# get_verilated
#------------------------------------------------------------------------------
def get_verilated( model_inst ):
model_inst.elaborate()
model_name = model_inst.class_name
# Translate the PyMTL module to Verilog, if we've already done
# translation check if there's been any changes to the source
verilog_file = model_name + '.v'
temp_file = verilog_file + '.tmp'
#verilog.translate( model_inst, open( verilog_file, 'w+' ) )
#cached = False
# Caching avoids regeneration/recompilation
if os.path.exists( verilog_file ):
verilog.translate( model_inst, open( temp_file, 'w+' ) )
cached = filecmp.cmp( temp_file, verilog_file )
if not cached:
os.system( ' diff %s %s'%( temp_file, verilog_file ))
os.rename( temp_file, verilog_file )
else:
verilog.translate( model_inst, open( verilog_file, 'w+' ) )
cached = False
# Verilate the module only if we've updated the verilog source
if not cached:
print "NOT CACHED", verilog_file
verilog_to_pymtl( model_inst, verilog_file )
# Use some trickery to import the verilated version of the model
sys.path.append( os.getcwd() )
__import__( 'W' + model_name )
imported_module = sys.modules[ 'W'+model_name ]
# Get the model class from the module, instantiate and elaborate it
model_class = imported_module.__dict__[ model_name ]
model_inst = model_class()
return model_inst
| Python | 0 |
b0806c0b8b950a3007107cc58fb21e504cf09427 | Move serial device path to settings | homedisplay/control_milight/management/commands/listen_433.py | homedisplay/control_milight/management/commands/listen_433.py | from control_milight.utils import process_automatic_trigger
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
import serial
import time
class Command(BaseCommand):
args = ''
help = 'Listen for 433MHz radio messages'
ITEM_MAP = {
"5236713": "kitchen",
"7697747": "hall",
"1328959": "front-door",
"247615": "unused-magnetic-switch",
"8981913": "table",
}
def handle(self, *args, **options):
s = serial.Serial(settings.ARDUINO_433, 9600)
sent_event_map = {}
while True:
line = s.readline()
print "- %s" % line
if line.startswith("Received "):
id = line.split(" ")[1]
if id in self.ITEM_MAP:
item_name = self.ITEM_MAP[id]
if item_name in sent_event_map:
if sent_event_map[item_name] > time.time() - 5:
print "Too recent event: %s" % item_name
continue
process_automatic_trigger(item_name)
sent_event_map[item_name] = time.time()
else:
print "Unknown id: %s" % id
| from django.core.management.base import BaseCommand, CommandError
from control_milight.utils import process_automatic_trigger
import serial
import time
class Command(BaseCommand):
args = ''
help = 'Listen for 433MHz radio messages'
ITEM_MAP = {
"5236713": "kitchen",
"7697747": "hall",
"1328959": "front-door",
"247615": "unused-magnetic-switch",
"8981913": "table",
}
def handle(self, *args, **options):
s = serial.Serial("/dev/tty.usbserial-A9007LzM", 9600)
sent_event_map = {}
while True:
line = s.readline()
print "- %s" % line
if line.startswith("Received "):
id = line.split(" ")[1]
if id in self.ITEM_MAP:
item_name = self.ITEM_MAP[id]
if item_name in sent_event_map:
if sent_event_map[item_name] > time.time() - 5:
print "Too recent event: %s" % item_name
continue
process_automatic_trigger(item_name)
sent_event_map[item_name] = time.time()
else:
print "Unknown id: %s" % id
| Python | 0.000001 |
f041cd9623ef06777189ecd538f5bdb30cf33722 | Fix export_v8_tarball.py to work with python2.7 | tools/export_tarball/export_v8_tarball.py | tools/export_tarball/export_v8_tarball.py | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates a tarball with V8 sources, but without .svn directories.
This allows easy packaging of V8, synchronized with browser releases.
Example usage:
export_v8_tarball.py /foo/bar
The above will create file /foo/bar/v8-VERSION.tar.bz2 if it doesn't exist.
"""
import optparse
import os
import re
import subprocess
import sys
import tarfile
_V8_MAJOR_VERSION_PATTERN = re.compile(r'#define\s+MAJOR_VERSION\s+(.*)')
_V8_MINOR_VERSION_PATTERN = re.compile(r'#define\s+MINOR_VERSION\s+(.*)')
_V8_BUILD_NUMBER_PATTERN = re.compile(r'#define\s+BUILD_NUMBER\s+(.*)')
_V8_PATCH_LEVEL_PATTERN = re.compile(r'#define\s+PATCH_LEVEL\s+(.*)')
_V8_PATTERNS = [
_V8_MAJOR_VERSION_PATTERN,
_V8_MINOR_VERSION_PATTERN,
_V8_BUILD_NUMBER_PATTERN,
_V8_PATCH_LEVEL_PATTERN]
def GetV8Version(v8_directory):
"""
Returns version number as string based on the string
contents of version.cc file.
"""
with open(os.path.join(v8_directory, 'src', 'version.cc')) as version_file:
version_contents = version_file.read()
version_components = []
for pattern in _V8_PATTERNS:
version_components.append(pattern.search(version_contents).group(1).strip())
if version_components[len(version_components) - 1] == '0':
version_components.pop()
return '.'.join(version_components)
def GetSourceDirectory():
return os.path.realpath(
os.path.join(os.path.dirname(__file__), '..', '..', '..', 'src'))
def GetV8Directory():
return os.path.join(GetSourceDirectory(), 'v8')
# Workaround lack of the exclude parameter in add method in python-2.4.
# TODO(phajdan.jr): remove the workaround when it's not needed on the bot.
class MyTarFile(tarfile.TarFile):
def add(self, name, arcname=None, recursive=True, exclude=None, filter=None):
head, tail = os.path.split(name)
if tail in ('.svn', '.git'):
return
tarfile.TarFile.add(self, name, arcname=arcname, recursive=recursive)
def main(argv):
parser = optparse.OptionParser()
options, args = parser.parse_args(argv)
if len(args) != 1:
print 'You must provide only one argument: output file directory'
return 1
v8_directory = GetV8Directory()
if not os.path.exists(v8_directory):
print 'Cannot find the v8 directory.'
return 1
v8_version = GetV8Version(v8_directory)
print 'Packaging V8 version %s...' % v8_version
output_basename = 'v8-%s' % v8_version
output_fullname = os.path.join(args[0], output_basename + '.tar.bz2')
if os.path.exists(output_fullname):
print 'Already packaged, exiting.'
return 0
subprocess.check_call(["make", "dependencies"], cwd=v8_directory)
archive = MyTarFile.open(output_fullname, 'w:bz2')
try:
archive.add(v8_directory, arcname=output_basename)
finally:
archive.close()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Creates a tarball with V8 sources, but without .svn directories.
This allows easy packaging of V8, synchronized with browser releases.
Example usage:
export_v8_tarball.py /foo/bar
The above will create file /foo/bar/v8-VERSION.tar.bz2 if it doesn't exist.
"""
import optparse
import os
import re
import subprocess
import sys
import tarfile
_V8_MAJOR_VERSION_PATTERN = re.compile(r'#define\s+MAJOR_VERSION\s+(.*)')
_V8_MINOR_VERSION_PATTERN = re.compile(r'#define\s+MINOR_VERSION\s+(.*)')
_V8_BUILD_NUMBER_PATTERN = re.compile(r'#define\s+BUILD_NUMBER\s+(.*)')
_V8_PATCH_LEVEL_PATTERN = re.compile(r'#define\s+PATCH_LEVEL\s+(.*)')
_V8_PATTERNS = [
_V8_MAJOR_VERSION_PATTERN,
_V8_MINOR_VERSION_PATTERN,
_V8_BUILD_NUMBER_PATTERN,
_V8_PATCH_LEVEL_PATTERN]
def GetV8Version(v8_directory):
"""
Returns version number as string based on the string
contents of version.cc file.
"""
with open(os.path.join(v8_directory, 'src', 'version.cc')) as version_file:
version_contents = version_file.read()
version_components = []
for pattern in _V8_PATTERNS:
version_components.append(pattern.search(version_contents).group(1).strip())
if version_components[len(version_components) - 1] == '0':
version_components.pop()
return '.'.join(version_components)
def GetSourceDirectory():
return os.path.realpath(
os.path.join(os.path.dirname(__file__), '..', '..', '..', 'src'))
def GetV8Directory():
return os.path.join(GetSourceDirectory(), 'v8')
# Workaround lack of the exclude parameter in add method in python-2.4.
# TODO(phajdan.jr): remove the workaround when it's not needed on the bot.
class MyTarFile(tarfile.TarFile):
def add(self, name, arcname=None, recursive=True, exclude=None):
head, tail = os.path.split(name)
if tail in ('.svn', '.git'):
return
tarfile.TarFile.add(self, name, arcname=arcname, recursive=recursive)
def main(argv):
parser = optparse.OptionParser()
options, args = parser.parse_args(argv)
if len(args) != 1:
print 'You must provide only one argument: output file directory'
return 1
v8_directory = GetV8Directory()
if not os.path.exists(v8_directory):
print 'Cannot find the v8 directory.'
return 1
v8_version = GetV8Version(v8_directory)
print 'Packaging V8 version %s...' % v8_version
output_basename = 'v8-%s' % v8_version
output_fullname = os.path.join(args[0], output_basename + '.tar.bz2')
if os.path.exists(output_fullname):
print 'Already packaged, exiting.'
return 0
subprocess.check_call(["make", "dependencies"], cwd=v8_directory)
archive = MyTarFile.open(output_fullname, 'w:bz2')
try:
archive.add(v8_directory, arcname=output_basename)
finally:
archive.close()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| Python | 0.000272 |
430799c753bf637acac84a37bae90dd4e2193bd9 | Update account_invoice_refund_reason/tests/test_account_invoice_refund_reason.py | account_invoice_refund_reason/tests/test_account_invoice_refund_reason.py | account_invoice_refund_reason/tests/test_account_invoice_refund_reason.py | # Copyright (C) 2019 Open Source Integrators
# Copyright (C) 2019 Serpent Consulting Services Pvt. Ltd.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo.tests.common import TransactionCase
import datetime
class TestAccountInvoiceRefundReason(TransactionCase):
def setUp(self):
super(TestAccountInvoiceRefundReason, self).setUp()
self.account_invoice_obj = self.env['account.invoice']
self.account_obj = self.env['account.account']
self.journal_obj = self.env['account.journal']
self.invoice_refund_obj = self.env['account.invoice.refund']
self.reason_obj = self.env['account.invoice.refund.reason']
self.payment_term = self.env.ref('account.account_payment_term_advance')
self.partner3 = self.env.ref('base.res_partner_3')
self.account_user_type =\
self.env.ref('account.data_account_type_receivable')
self.product_id = self.env.ref('product.product_product_5')
self.account_revenue = self.env.ref('account.data_account_type_revenue')
self.journalrec = self.journal_obj.search([('type', '=', 'sale')])[0]
self.account_id = self.account_obj.search([
('user_type_id', '=', self.account_revenue.id)], limit=1)
self.reason_id = self.env.ref('account_invoice_refund_reason.'
'refund_reason_cancellation')
self.account_rec1_id = self.account_obj.create(dict(
code="cust_acc",
name="customer account",
user_type_id=self.account_user_type.id,
reconcile=True,
))
invoice_line_data = [
(0, 0,
{
'product_id': self.product_id.id,
'quantity': 10.0,
'account_id': self.account_id.id,
'name': 'product test 5',
'price_unit': 100.00,
}
)
]
self.account_invoice_customer0 = self.account_invoice_obj.create(dict(
name="Test Customer Invoice",
payment_term_id=self.payment_term.id,
journal_id=self.journalrec.id,
partner_id=self.partner3.id,
account_id=self.account_rec1_id.id,
invoice_line_ids=invoice_line_data
))
def test_onchange_reason_id(self):
self.account_invoice_customer0.action_invoice_open()
self.account_invoice_refund_0 = self.invoice_refund_obj.create(dict(
description='Credit Note',
date=datetime.date.today(),
filter_refund='refund',
reason_id=self.reson_id.id
))
self.account_invoice_refund_0._onchange_reason_id()
self.assertEqual(self.account_invoice_refund_0.description,
self.account_invoice_refund_0.reason_id.name)
self.account_invoice_refund_0.invoice_refund()
| # Copyright (C) 2019 Open Source Integrators
# Copyright (C) 2019 Serpent Consulting Services Pvt. Ltd.
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo.tests.common import TransactionCase
import datetime
class TestAccountInvoiceRefundReason(TransactionCase):
def setUp(self):
super(TestAccountInvoiceRefundReason, self).setUp()
self.account_invoice_obj = self.env['account.invoice']
self.account_obj = self.env['account.account']
self.journal_obj = self.env['account.journal']
self.invoice_refund_obj = self.env['account.invoice.refund']
self.reason_obj = self.env['account.invoice.refund.reason']
self.payment_term = self.env.ref('account.account_payment_term_advance')
self.partner3 = self.env.ref('base.res_partner_3')
self.account_user_type =\
self.env.ref('account.data_account_type_receivable')
self.product_id = self.env.ref('product.product_product_5')
self.account_revenue = self.env.ref('account.data_account_type_revenue')
self.journalrec = self.journal_obj.search([('type', '=', 'sale')])[0]
self.account_id = self.account_obj.search([
('user_type_id', '=', self.account_revenue.id)], limit=1)
self.reson_id = self.env.ref('account_invoice_refund_reason.'
'refund_reason_cancellation')
self.account_rec1_id = self.account_obj.create(dict(
code="cust_acc",
name="customer account",
user_type_id=self.account_user_type.id,
reconcile=True,
))
invoice_line_data = [
(0, 0,
{
'product_id': self.product_id.id,
'quantity': 10.0,
'account_id': self.account_id.id,
'name': 'product test 5',
'price_unit': 100.00,
}
)
]
self.account_invoice_customer0 = self.account_invoice_obj.create(dict(
name="Test Customer Invoice",
payment_term_id=self.payment_term.id,
journal_id=self.journalrec.id,
partner_id=self.partner3.id,
account_id=self.account_rec1_id.id,
invoice_line_ids=invoice_line_data
))
def test_onchange_reason_id(self):
self.account_invoice_customer0.action_invoice_open()
self.account_invoice_refund_0 = self.invoice_refund_obj.create(dict(
description='Credit Note',
date=datetime.date.today(),
filter_refund='refund',
reason_id=self.reson_id.id
))
self.account_invoice_refund_0._onchange_reason_id()
self.assertEqual(self.account_invoice_refund_0.description,
self.account_invoice_refund_0.reason_id.name)
self.account_invoice_refund_0.invoice_refund()
| Python | 0 |
270825e739e0662c04181a68d393249acc6e59c5 | FIX reg ganancias no requerido en pagos de clientes | l10n_ar_account_withholding/models/account_payment_group.py | l10n_ar_account_withholding/models/account_payment_group.py | # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import models, api, fields
class AccountPaymentGroup(models.Model):
_inherit = "account.payment.group"
# @api.model
# def _get_regimen_ganancias(self):
# result = []
# for line in self.
# return
retencion_ganancias = fields.Selection([
# _get_regimen_ganancias,
('imposibilidad_retencion', 'Imposibilidad de Retención'),
('no_aplica', 'No Aplica'),
('nro_regimen', 'Nro Regimen'),
],
'Retención Ganancias',
readonly=True,
states={'draft': [('readonly', False)],
'confirmed': [('readonly', False)]}
)
regimen_ganancias_id = fields.Many2one(
'afip.tabla_ganancias.alicuotasymontos',
'Regimen Ganancias',
readonly=True,
ondelete='restrict',
states={'draft': [('readonly', False)],
'confirmed': [('readonly', False)]}
)
company_regimenes_ganancias_ids = fields.Many2many(
'afip.tabla_ganancias.alicuotasymontos',
compute='_company_regimenes_ganancias',
)
@api.multi
@api.depends('company_id.regimenes_ganancias_ids')
def _company_regimenes_ganancias(self):
"""
Lo hacemos con campo computado y no related para que solo se setee
y se exija si es pago de o a proveedor
"""
for rec in self.filtered(lambda x: x.partner_type == 'supplier'):
rec.company_regimenes_ganancias_ids = (
rec.company_id.regimenes_ganancias_ids)
@api.onchange('retencion_ganancias', 'commercial_partner_id')
def change_retencion_ganancias(self):
def_regimen = False
if self.retencion_ganancias == 'nro_regimen':
cia_regs = self.company_regimenes_ganancias_ids
partner_regimen = (
self.commercial_partner_id.default_regimen_ganancias_id)
if partner_regimen and partner_regimen in cia_regs:
def_regimen = partner_regimen
elif cia_regs:
def_regimen = cia_regs[0]
self.regimen_ganancias_id = def_regimen
@api.onchange('company_regimenes_ganancias_ids')
def change_company_regimenes_ganancias(self):
# partner_type == 'supplier' ya lo filtra el company_regimenes_ga...
if self.company_regimenes_ganancias_ids:
self.retencion_ganancias = 'nro_regimen'
# sacamos esto por ahora ya que no es muy prolijo y nos se esta usando, si
# lo llegamos a activar entonces tener en cuenta que en sipreco no queremos
# que en borrador se setee ninguna regimen de ganancias
# @api.model
# def create(self, vals):
# """
# para casos donde se paga desde algun otro lugar (por ej. liquidador
# de impuestos), seteamos no aplica si no hay nada seteado
# """
# payment_group = super(AccountPaymentGroup, self).create(vals)
# if (
# payment_group.company_regimenes_ganancias_ids and
# payment_group.partner_type == 'supplier' and
# not payment_group.retencion_ganancias and
# not payment_group.regimen_ganancias_id):
# payment_group.retencion_ganancias = 'no_aplica'
# return payment_group
| # -*- coding: utf-8 -*-
##############################################################################
# For copyright and license notices, see __openerp__.py file in module root
# directory
##############################################################################
from openerp import models, api, fields
class AccountPaymentGroup(models.Model):
_inherit = "account.payment.group"
# @api.model
# def _get_regimen_ganancias(self):
# result = []
# for line in self.
# return
retencion_ganancias = fields.Selection([
# _get_regimen_ganancias,
('imposibilidad_retencion', 'Imposibilidad de Retención'),
('no_aplica', 'No Aplica'),
('nro_regimen', 'Nro Regimen'),
],
'Retención Ganancias',
readonly=True,
states={'draft': [('readonly', False)],
'confirmed': [('readonly', False)]}
)
regimen_ganancias_id = fields.Many2one(
'afip.tabla_ganancias.alicuotasymontos',
'Regimen Ganancias',
readonly=True,
ondelete='restrict',
states={'draft': [('readonly', False)],
'confirmed': [('readonly', False)]}
)
company_regimenes_ganancias_ids = fields.Many2many(
related='company_id.regimenes_ganancias_ids',
readonly=True,
)
@api.onchange('retencion_ganancias', 'commercial_partner_id')
def change_retencion_ganancias(self):
def_regimen = False
if self.retencion_ganancias == 'nro_regimen':
cia_regs = self.company_regimenes_ganancias_ids
partner_regimen = (
self.commercial_partner_id.default_regimen_ganancias_id)
if partner_regimen and partner_regimen in cia_regs:
def_regimen = partner_regimen
elif cia_regs:
def_regimen = cia_regs[0]
self.regimen_ganancias_id = def_regimen
@api.onchange('company_regimenes_ganancias_ids')
def change_company_regimenes_ganancias(self):
if (
self.company_regimenes_ganancias_ids and
self.partner_type == 'supplier'):
self.retencion_ganancias = 'nro_regimen'
# sacamos esto por ahora ya que no es muy prolijo y nos se esta usando, si
# lo llegamos a activar entonces tener en cuenta que en sipreco no queremos
# que en borrador se setee ninguna regimen de ganancias
# @api.model
# def create(self, vals):
# """
# para casos donde se paga desde algun otro lugar (por ej. liquidador
# de impuestos), seteamos no aplica si no hay nada seteado
# """
# payment_group = super(AccountPaymentGroup, self).create(vals)
# if (
# payment_group.company_regimenes_ganancias_ids and
# payment_group.partner_type == 'supplier' and
# not payment_group.retencion_ganancias and
# not payment_group.regimen_ganancias_id):
# payment_group.retencion_ganancias = 'no_aplica'
# return payment_group
| Python | 0 |
a3cd9df5c807026ac78de56614d09411991d9573 | Add message limit to subscription lifecycle filtering | subscriptions/management/commands/fix_subscription_lifecycle.py | subscriptions/management/commands/fix_subscription_lifecycle.py | from datetime import datetime
from django.core.management.base import BaseCommand
from django.utils import timezone
import json
from subscriptions.models import Subscription
from subscriptions.tasks import send_next_message
class Command(BaseCommand):
help = ("This command is used when the subscription has fallen behind "
"schedule. Leave the action argument blank to see how many "
"subscriptions are behind. Running the command with `--action "
"send` will send a message to each subscription that is behind. "
"Running the command with `--action fast_forward` will fast "
"forward the subscriptions that are behind to the end_date. "
"Running the command with `--action diff` will print out the "
"differences that running the command would make.")
def add_arguments(self, parser):
parser.add_argument(
"--end_date", dest="end_date", default=datetime.now(),
type=lambda today: datetime.strptime(today, '%Y%m%d'),
help='''Fast forward subscription to end_date
By default it will use datetime.now() (format YYYYMMDD)'''
)
parser.add_argument(
"--action", dest="action", default=False,
help=("Set to `send` to send next message or `fast_forward` to "
"fast forward the subscription or `diff` to print out the "
"changes that the command would make."))
parser.add_argument(
"--verbose", dest="verbose", default=False,
help=("Print out some details on the relevant subscriptions."))
parser.add_argument(
"--message-set", dest="message_set", default=None, type=int,
help=("Only apply the action to the subscriptions that are for "
"the specified message set, defaults to all message sets."))
parser.add_argument(
"--messages-limit", dest="messages_limit", default=None, type=int,
help=("Only apply the action to subscriptions that are behind by"
"the limit or less than the limit. Defaults to no limit.")
)
def handle(self, *args, **options):
action = options['action']
verbose = options['verbose']
end_date = options['end_date']
end_date = end_date.replace(tzinfo=timezone.utc)
message_set = options['message_set']
messages_limit = options['messages_limit']
behind = 0
forwards = 0
sends = 0
subscriptions = Subscription.objects.filter(active=True,
process_status=0)
if message_set is not None:
subscriptions = subscriptions.filter(messageset__pk=message_set)
for sub in subscriptions.iterator():
number, complete = sub.get_expected_next_sequence_number(end_date)
if (
messages_limit is not None and
number - sub.next_sequence_number > messages_limit):
continue
if number > sub.next_sequence_number:
if verbose:
self.stdout.write("{}: {}".format(sub.id, number -
sub.next_sequence_number))
if action == 'fast_forward':
Subscription.fast_forward_lifecycle(sub, end_date)
forwards += 1
elif action == 'send':
send_next_message.apply_async(args=[str(sub.id)])
sends += 1
elif action == 'diff':
start_ms = sub.messageset.pk
start_nseq = sub.next_sequence_number
subs = Subscription.fast_forward_lifecycle(
sub, end_date, save=False)
end_sub = subs[-1]
self.stdout.write(json.dumps({
"language": sub.lang,
"identity": sub.identity,
"current_messageset_id": start_ms,
"current_sequence_number": start_nseq,
"expected_messageset_id": end_sub.messageset.pk,
"expected_sequence_number":
end_sub.next_sequence_number,
}))
behind += 1
self.stdout.write("%s subscription%s behind schedule."
% (behind, '' if behind == 1 else 's'))
self.stdout.write("%s subscription%s fast forwarded to end date."
% (forwards, '' if forwards == 1 else 's'))
self.stdout.write("Message sent to %s subscription%s."
% (sends, '' if sends == 1 else 's'))
| from datetime import datetime
from django.core.management.base import BaseCommand
from django.utils import timezone
import json
from subscriptions.models import Subscription
from subscriptions.tasks import send_next_message
class Command(BaseCommand):
help = ("This command is used when the subscription has fallen behind "
"schedule. Leave the action argument blank to see how many "
"subscriptions are behind. Running the command with `--action "
"send` will send a message to each subscription that is behind. "
"Running the command with `--action fast_forward` will fast "
"forward the subscriptions that are behind to the end_date. "
"Running the command with `--action diff` will print out the "
"differences that running the command would make.")
def add_arguments(self, parser):
parser.add_argument(
"--end_date", dest="end_date", default=datetime.now(),
type=lambda today: datetime.strptime(today, '%Y%m%d'),
help='''Fast forward subscription to end_date
By default it will use datetime.now() (format YYYYMMDD)'''
)
parser.add_argument(
"--action", dest="action", default=False,
help=("Set to `send` to send next message or `fast_forward` to "
"fast forward the subscription or `diff` to print out the "
"changes that the command would make."))
parser.add_argument(
"--verbose", dest="verbose", default=False,
help=("Print out some details on the relevant subscriptions."))
parser.add_argument(
"--message-set", dest="message_set", default=None, type=int,
help=("Only apply the action to the subscriptions that are for "
"the specified message set, defaults to all message sets."))
def handle(self, *args, **options):
action = options['action']
verbose = options['verbose']
end_date = options['end_date']
end_date = end_date.replace(tzinfo=timezone.utc)
message_set = options['message_set']
behind = 0
forwards = 0
sends = 0
subscriptions = Subscription.objects.filter(active=True,
process_status=0)
if message_set is not None:
subscriptions = subscriptions.filter(messageset__pk=message_set)
for sub in subscriptions.iterator():
number, complete = sub.get_expected_next_sequence_number(end_date)
if number > sub.next_sequence_number:
if verbose:
self.stdout.write("{}: {}".format(sub.id, number -
sub.next_sequence_number))
if action == 'fast_forward':
Subscription.fast_forward_lifecycle(sub, end_date)
forwards += 1
elif action == 'send':
send_next_message.apply_async(args=[str(sub.id)])
sends += 1
elif action == 'diff':
start_ms = sub.messageset.pk
start_nseq = sub.next_sequence_number
subs = Subscription.fast_forward_lifecycle(
sub, end_date, save=False)
end_sub = subs[-1]
self.stdout.write(json.dumps({
"language": sub.lang,
"identity": sub.identity,
"current_messageset_id": start_ms,
"current_sequence_number": start_nseq,
"expected_messageset_id": end_sub.messageset.pk,
"expected_sequence_number":
end_sub.next_sequence_number,
}))
behind += 1
self.stdout.write("%s subscription%s behind schedule."
% (behind, '' if behind == 1 else 's'))
self.stdout.write("%s subscription%s fast forwarded to end date."
% (forwards, '' if forwards == 1 else 's'))
self.stdout.write("Message sent to %s subscription%s."
% (sends, '' if sends == 1 else 's'))
| Python | 0 |
5d083a15a71aac24c3c4d29dd753067a93c62495 | Fix id builtin being overwritten | EasyEuler/data.py | EasyEuler/data.py | import collections
import json
import os
from jinja2 import Environment, FileSystemLoader
from EasyEuler import paths
class ProblemList(collections.Sequence):
def __init__(self, problems):
self._problems = problems
def get(self, problem_id):
if problem_id < 1 or len(self) < problem_id:
# We don't want a negative index,
# because it'll wrap back around.
return None
return self[problem_id]
def __getitem__(self, problem_id):
return self._problems[problem_id - 1]
def __len__(self):
return len(self._problems)
class ConfigurationDictionary(collections.Mapping):
def __init__(self, configs):
self._config = {}
for config in configs:
self._config = self._update(self._config, config)
def _update(self, config, updates):
for key, value in updates.items():
if isinstance(value, collections.Mapping):
updated = self._update(config.get(key, {}), value)
config[key] = updated
else:
config[key] = value
return config
def get_language(self, key, value):
for name, options in self._config['languages'].items():
if options[key] == value:
return {'name': name, **options}
return None
def __getitem__(self, key):
return self._config[key]
def __iter__(self):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
config_list = []
for CONFIG_PATH in paths.CONFIGS:
if os.path.exists(CONFIG_PATH):
with open(CONFIG_PATH) as conf:
config_list.append(json.load(conf))
with open(paths.PROBLEMS) as f:
problem_list = json.load(f)
config = ConfigurationDictionary(config_list)
problems = ProblemList(problem_list)
templates = Environment(loader=FileSystemLoader(paths.TEMPLATES))
| import collections
import json
import os
from jinja2 import Environment, FileSystemLoader
from EasyEuler import paths
class ProblemList(collections.Sequence):
def __init__(self, problems):
self._problems = problems
def get(self, id):
if id < 1 or len(self) < id:
# We don't want a negative index, because it'll wrap back around.
return None
return self[id]
def __getitem__(self, id):
return self._problems[id - 1]
def __len__(self):
return len(self._problems)
class ConfigurationDictionary(collections.Mapping):
def __init__(self, configs):
self._config = {}
for config in configs:
self._config = self._update(self._config, config)
def _update(self, config, updates):
for key, value in updates.items():
if isinstance(value, collections.Mapping):
updated = self._update(config.get(key, {}), value)
config[key] = updated
else:
config[key] = value
return config
def get_language(self, key, value):
for name, options in self._config['languages'].items():
if options[key] == value:
return {'name': name, **options}
return None
def __getitem__(self, key):
return self._config[key]
def __iter__(self):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
config_list = []
for CONFIG_PATH in paths.CONFIGS:
if os.path.exists(CONFIG_PATH):
with open(CONFIG_PATH) as conf:
config_list.append(json.load(conf))
with open(paths.PROBLEMS) as f:
problem_list = json.load(f)
config = ConfigurationDictionary(config_list)
problems = ProblemList(problem_list)
templates = Environment(loader=FileSystemLoader(paths.TEMPLATES))
| Python | 0.000001 |
d095e0179befa80c03c83314c6a095d3b0d0bd70 | and , should be optional | IRCMessage.py | IRCMessage.py | # -*- coding: utf-8 -*-
import ServerInfo
from enum import Enum
import re
class TargetTypes(Enum):
CHANNEL = 1
USER = 2
class IRCChannel(object):
def __init__(self, name):
"""
@type name: str
"""
self.Name = name
self.Topic = ''
self.TopicSetBy = ''
self.Users = {}
self.Ranks = {}
self.Modes = {}
def __str__(self):
return self.Name
def getHighestStatusOfUser(self, nickname):
if not self.Ranks[nickname]:
return None
for mode in ServerInfo.StatusOrder:
if mode in self.Ranks[nickname]:
return mode
return None
class IRCUser(object):
def __init__(self, user):
"""
@type user: str
"""
self.User = None
self.Hostmask = None
if '!' in user:
userArray = user.split('!')
self.Name = userArray[0]
if len(userArray) > 1:
userArray = userArray[1].split('@')
self.User = userArray[0]
self.Hostmask = userArray[1]
self.String = "{}!{}@{}".format(self.Name, self.User, self.Hostmask)
else:
self.Name = user
self.String = "{}!{}@{}".format(self.Name, None, None)
class IRCMessage(object):
def __init__(self, msgType, user, channel, message, bot):
"""
@type msgType: str
@type user: str
@type channel: IRCChannel
@type message: unicode
@type bot: MoronBot
"""
try:
unicodeMessage = message.decode('utf-8', 'ignore')
except UnicodeEncodeError: # Already utf-8?
unicodeMessage = message
self.Type = msgType
self.MessageList = unicodeMessage.strip().split(' ')
self.MessageString = unicodeMessage
self.User = IRCUser(user)
self.Channel = None
if channel is None:
self.ReplyTo = self.User.Name
self.TargetType = TargetTypes.USER
else:
self.Channel = channel
# I would like to set this to the channel object but I would probably break functionality if I did :I
self.ReplyTo = channel.Name
self.TargetType = TargetTypes.CHANNEL
self.Command = ''
self.Parameters = ''
self.ParameterList = []
if self.MessageList[0].startswith(bot.commandChar):
self.Command = self.MessageList[0][len(bot.commandChar):]
if self.Command == '':
self.Command = self.MessageList[1]
self.Parameters = u' '.join(self.MessageList[2:])
else:
self.Parameters = u' '.join(self.MessageList[1:])
elif re.match('{}[:,]?'.format(re.escape(bot.nickname)), self.MessageList[0], re.IGNORECASE):
if len(self.MessageList) > 1:
self.Command = self.MessageList[1]
self.Parameters = u' '.join(self.MessageList[2:])
if self.Parameters.strip():
self.ParameterList = self.Parameters.split(' ')
self.ParameterList = [param for param in self.ParameterList if param != '']
if len(self.ParameterList) == 1 and not self.ParameterList[0]:
self.ParameterList = []
| # -*- coding: utf-8 -*-
import ServerInfo
from enum import Enum
import re
class TargetTypes(Enum):
CHANNEL = 1
USER = 2
class IRCChannel(object):
def __init__(self, name):
"""
@type name: str
"""
self.Name = name
self.Topic = ''
self.TopicSetBy = ''
self.Users = {}
self.Ranks = {}
self.Modes = {}
def __str__(self):
return self.Name
def getHighestStatusOfUser(self, nickname):
if not self.Ranks[nickname]:
return None
for mode in ServerInfo.StatusOrder:
if mode in self.Ranks[nickname]:
return mode
return None
class IRCUser(object):
def __init__(self, user):
"""
@type user: str
"""
self.User = None
self.Hostmask = None
if '!' in user:
userArray = user.split('!')
self.Name = userArray[0]
if len(userArray) > 1:
userArray = userArray[1].split('@')
self.User = userArray[0]
self.Hostmask = userArray[1]
self.String = "{}!{}@{}".format(self.Name, self.User, self.Hostmask)
else:
self.Name = user
self.String = "{}!{}@{}".format(self.Name, None, None)
class IRCMessage(object):
def __init__(self, msgType, user, channel, message, bot):
"""
@type msgType: str
@type user: str
@type channel: IRCChannel
@type message: unicode
@type bot: MoronBot
"""
try:
unicodeMessage = message.decode('utf-8', 'ignore')
except UnicodeEncodeError: # Already utf-8?
unicodeMessage = message
self.Type = msgType
self.MessageList = unicodeMessage.strip().split(' ')
self.MessageString = unicodeMessage
self.User = IRCUser(user)
self.Channel = None
if channel is None:
self.ReplyTo = self.User.Name
self.TargetType = TargetTypes.USER
else:
self.Channel = channel
# I would like to set this to the channel object but I would probably break functionality if I did :I
self.ReplyTo = channel.Name
self.TargetType = TargetTypes.CHANNEL
self.Command = ''
self.Parameters = ''
self.ParameterList = []
if self.MessageList[0].startswith(bot.commandChar):
self.Command = self.MessageList[0][len(bot.commandChar):]
if self.Command == '':
self.Command = self.MessageList[1]
self.Parameters = u' '.join(self.MessageList[2:])
else:
self.Parameters = u' '.join(self.MessageList[1:])
elif re.match('{}[:,]'.format(re.escape(bot.nickname)), self.MessageList[0], re.IGNORECASE):
if len(self.MessageList) > 1:
self.Command = self.MessageList[1]
self.Parameters = u' '.join(self.MessageList[2:])
if self.Parameters.strip():
self.ParameterList = self.Parameters.split(' ')
self.ParameterList = [param for param in self.ParameterList if param != '']
if len(self.ParameterList) == 1 and not self.ParameterList[0]:
self.ParameterList = []
| Python | 0.999967 |
4899188e8eb0676ccf13525ed6562b85878cb0ea | Return attributes in a dictionary rather than in a list | scripts/parse_header.py | scripts/parse_header.py | #!/usr/bin/python
# parse_header.py finds all exported functions in a GDK header and writes them
# to a nice XML file. That file can be used to e.g. generate code for SA-MP natives.
import os
import re
import sys
import xml.dom.minidom
def parse_argument_list(string):
""" For each entry of the arg_list returns a tuple made of
argument type and name. """
for string in re.split(r"\s*,\s*", string):
if len(string) == 0:
continue
match = re.match(r"([\w ]+ |[\w ]+\*)(\w+)$", string)
if len(match.groups()) < 2:
continue
yield (match.group(1).strip(), match.group(2).strip())
def parse_attributes(string):
""" Parse generator attributes. Each attribute is a key=value pair
separated by commas. """
if string != None:
items = re.split(r"\s*,\s*", string)
for item in items:
attr = re.split(r"\s*=\s*", item, maxsplit=1)
if len(attr) != 2:
yield (attr[0], None)
else:
yield tuple(attr)
def get_comment_text(comment):
""" Extracts text in /* ... */ comments (C-style comments). """
text = comment
text = re.sub("^\s*/\*\s*", "", text)
text = re.sub("\s*\*/\s*$", "", text)
return text
def parse_function_decl(string, pattern):
""" Returns a tuple of the form: (type, name, args, attributes)
where "args" is a again a list of tuples (type, name) that represents
function arguments and "attributes" is a dictionary of attributes. """
match = re.match(pattern, string)
if match == None:
return None
type = match.group(1)
name = match.group(2)
args = parse_argument_list(match.group(3))
comment = match.group(4)
if comment is not None:
comment = get_comment_text(comment)
attrs = {}
for name, value in parse_attributes(comment):
attrs[name] = value
return (type, name, args, attrs)
def parse_header(text):
pattern = r"SAMPGDK_EXPORT (int|bool|float) SAMPGDK_CALL (\w+)\((.*)\);\s*(/\*.*$)?"
for line in text.splitlines():
decl = parse_function_decl(line, pattern)
if decl is not None:
yield decl
def main(argv):
document = xml.dom.minidom.Document()
exports = document.createElement("exports")
for type, name, args, attrs in parse_header(sys.stdin.read()):
function = document.createElement("function")
function.setAttribute("type", type)
function.setAttribute("name", name)
for type, name in args:
argument = document.createElement("argument")
argument.setAttribute("type", type)
argument.setAttribute("name", name)
function.appendChild(argument)
for name, value in attrs.items():
attribute = document.createElement("attribute")
attribute.setAttribute("name", name)
if value is not None:
attribute.setAttribute("value", value)
function.appendChild(attribute)
exports.appendChild(function)
pass
document.appendChild(exports)
print document.toprettyxml(indent="\t")
if __name__ == "__main__":
main(sys.argv)
| #!/usr/bin/python
# parse_header.py finds all exported functions in a GDK header and writes them
# to a nice XML file. That file can be used to e.g. generate code for SA-MP natives.
import os
import re
import sys
import xml.dom.minidom
def parse_argument_list(string):
""" For each entry of the arg_list returns a tuple made of
argument type and name. """
for string in re.split(r"\s*,\s*", string):
if len(string) == 0:
continue
match = re.match(r"([\w ]+ |[\w ]+\*)(\w+)$", string)
if len(match.groups()) < 2:
continue
yield (match.group(1).strip(), match.group(2).strip())
def parse_attributes(string):
""" Parse generator attributes. Each attribute is a key=value pair
separated by commas. """
if string != None:
items = re.split(r"\s*,\s*", string)
for item in items:
attr = re.split(r"\s*=\s*", item, maxsplit=1)
if len(attr) != 2:
yield (attr[0], None)
else:
yield tuple(attr)
def get_comment_text(comment):
""" Extracts text in /* ... */ comments (C-style comments). """
text = comment
text = re.sub("^\s*/\*\s*", "", text)
text = re.sub("\s*\*/\s*$", "", text)
return text
def parse_function_decl(string, pattern):
""" Returns a tuple of the form: (type, name, args, attributes)
where "args" is a again a list of tuples (type, name) that represents
function arguments and "attributes" is a dictionary of attributes. """
match = re.match(pattern, string)
if match == None:
return None
type = match.group(1)
name = match.group(2)
args = parse_argument_list(match.group(3))
comment = match.group(4)
if comment is not None:
comment = get_comment_text(comment)
attrs = parse_attributes(comment)
return (type, name, args, attrs)
def parse_header(text):
pattern = r"SAMPGDK_EXPORT (int|bool|float) SAMPGDK_CALL (\w+)\((.*)\);\s*(/\*.*$)?"
for line in text.splitlines():
decl = parse_function_decl(line, pattern)
if decl is not None:
yield decl
def main(argv):
document = xml.dom.minidom.Document()
exports = document.createElement("exports")
for type, name, args, attrs in parse_header(sys.stdin.read()):
function = document.createElement("function")
function.setAttribute("type", type)
function.setAttribute("name", name)
for type, name in args:
argument = document.createElement("argument")
argument.setAttribute("type", type)
argument.setAttribute("name", name)
function.appendChild(argument)
for name, value in attrs:
attribute = document.createElement("attribute")
attribute.setAttribute("name", name)
if value is not None:
attribute.setAttribute("value", value)
function.appendChild(attribute)
exports.appendChild(function)
pass
document.appendChild(exports)
print document.toprettyxml(indent="\t")
if __name__ == "__main__":
main(sys.argv)
| Python | 0.000099 |
483a66a693fd119192c12ee63c56a1da406fa3ca | fix templates path | accounts/views.py | accounts/views.py | from django.shortcuts import render
from django.urls import reverse
def profile(response, profile):
return render(response, 'account/profile.html')
| from django.shortcuts import render
from django.urls import reverse
def profile(response, profile):
return render(response, 'accounts/profile.html')
| Python | 0.000001 |
da9bab1d15d3f54d2ac65701e533b9bc34ebfea5 | remove test skip | tests/cupy_tests/array_api_tests/test_sorting_functions.py | tests/cupy_tests/array_api_tests/test_sorting_functions.py | import pytest
from cupy import array_api as xp
@pytest.mark.parametrize(
"obj, axis, expected",
[
([0, 0], -1, [0, 1]),
([0, 1, 0], -1, [1, 0, 2]),
([[0, 1], [1, 1]], 0, [[1, 0], [0, 1]]),
([[0, 1], [1, 1]], 1, [[1, 0], [0, 1]]),
],
)
def test_stable_desc_argsort(obj, axis, expected):
"""
Indices respect relative order of a descending stable-sort
See https://github.com/numpy/numpy/issues/20778
"""
x = xp.asarray(obj)
out = xp.argsort(x, axis=axis, stable=True, descending=True)
assert xp.all(out == xp.asarray(expected))
| import pytest
from cupy import array_api as xp
@pytest.mark.parametrize(
"obj, axis, expected",
[
([0, 0], -1, [0, 1]),
([0, 1, 0], -1, [1, 0, 2]),
([[0, 1], [1, 1]], 0, [[1, 0], [0, 1]]),
([[0, 1], [1, 1]], 1, [[1, 0], [0, 1]]),
],
)
@pytest.mark.skipif(
# https://github.com/cupy/cupy/issues/5701
True, reason="Sorting functions miss arguments kind and order")
def test_stable_desc_argsort(obj, axis, expected):
"""
Indices respect relative order of a descending stable-sort
See https://github.com/numpy/numpy/issues/20778
"""
x = xp.asarray(obj)
out = xp.argsort(x, axis=axis, stable=True, descending=True)
assert xp.all(out == xp.asarray(expected))
| Python | 0.000001 |
6231afb51f5653e210f41d47c66797c4bd4d738d | Make it possible for the user to change username | accounts/views.py | accounts/views.py | # coding: utf-8
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.views.generic.edit import UpdateView
from django.core.urlresolvers import reverse_lazy
from volunteer_planner.utils import LoginRequiredMixin
@login_required()
def user_account_detail(request):
user = request.user
return render(request, 'user_detail.html', {'user': user})
class AccountUpdateView(LoginRequiredMixin, UpdateView):
"""
Allows a user to update their profile.
"""
fields = ['first_name', 'last_name', 'username']
template_name = "user_account_edit.html"
success_url = reverse_lazy('account_detail')
def get_object(self, queryset=None):
return self.request.user
| # coding: utf-8
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.views.generic.edit import UpdateView
from django.core.urlresolvers import reverse_lazy
from volunteer_planner.utils import LoginRequiredMixin
@login_required()
def user_account_detail(request):
user = request.user
return render(request, 'user_detail.html', {'user': user})
class AccountUpdateView(LoginRequiredMixin, UpdateView):
"""
Allows a user to update their profile.
"""
fields = ['first_name', 'last_name']
template_name = "user_account_edit.html"
success_url = reverse_lazy('account_detail')
def get_object(self, queryset=None):
return self.request.user
| Python | 0.998407 |
0bb558351a58caaca61eb381cc9a3a4ee4b881bb | format code | accounts/views.py | accounts/views.py | from django.shortcuts import render, redirect
from accounts.models import UserProfile
def index(request):
users = UserProfile.objects.all()
message = request.session.get('message', None)
info = request.session.get('info', None)
warning = request.session.get('warning', None)
alert = request.session.get('alert', None)
request.session['message'] = None
request.session['info'] = None
request.session['warning'] = None
request.session['alert'] = None
return render(request, 'accounts/index.html', {
'users': users,
'message': message,
'info': info,
'warning': warning,
'alert': alert
})
def add(request):
if request.method == 'POST':
user = UserProfile(username=request.POST.get('username'),
birthday=request.POST.get('birthday'))
user.save()
return redirect('/')
return render(request, 'accounts/add.html')
def edit(request, pk):
user = UserProfile.objects.get(pk=pk)
if request.method == 'POST':
user.username = request.POST.get('username', user.username)
user.birthday = request.POST.get('birthday', user.birthday)
user.save()
return redirect('/')
return render(request, 'accounts/edit.html', {
'user': user
})
def view(request, pk):
user = UserProfile.objects.get(pk=pk)
return render(request, 'accounts/view.html', {
'user': user
})
def delete(request, pk):
try:
user = UserProfile.objects.get(pk=pk)
user.delete()
request.session['message'] = 'User has been deleted'
except UserProfile.DoesNotExist:
request.session['alert'] = 'User does not exist'
return redirect('/')
| from django.shortcuts import render, redirect
from accounts.models import UserProfile
def index(request):
users = UserProfile.objects.all()
message = request.session.get('message', None)
info = request.session.get('info', None)
warning = request.session.get('warning', None)
alert = request.session.get('alert', None)
request.session['message'] = None
request.session['info'] = None
request.session['warning'] = None
request.session['alert'] = None
return render(request, 'accounts/index.html', {
'users': users,
'message': message,
'info': info,
'warning': warning,
'alert': alert
})
def add(request):
if request.method == 'POST':
user = UserProfile(username=request.POST.get('username'),
birthday=request.POST.get('birthday'))
user.save()
return redirect('/')
return render(request, 'accounts/add.html')
def edit(request, pk):
user = UserProfile.objects.get(pk=pk)
if request.method == 'POST':
user.username = request.POST.get('username', user.username)
user.birthday = request.POST.get('birthday', user.birthday)
user.save()
return redirect('/')
return render(request, 'accounts/edit.html', {
'user': user
})
def view(request, pk):
user = UserProfile.objects.get(pk=pk)
return render(request, 'accounts/view.html', {
'user': user
})
def delete(request, pk):
try:
user = UserProfile.objects.get(pk=pk)
user.delete()
request.session['message'] = 'User has been deleted'
except UserProfile.DoesNotExist:
request.session['alert'] = 'User does not exist'
return redirect('/')
| Python | 0.000061 |
7c75da48d6746fc148a79051338c3cd554d75615 | Change variable name to next for logout function | accounts/views.py | accounts/views.py | from django.shortcuts import redirect
from django.contrib.auth import logout as auth_logout
from django.conf import settings
def logout(request):
"""Logs out user redirects if in request"""
next = request.GET.get('next', '')
auth_logout(request)
if next:
return redirect('{}/?next={}'.format(settings.OPENSTAX_ACCOUNTS_LOGOUT_URL, next))
else:
return redirect(settings.OPENSTAX_ACCOUNTS_LOGOUT_URL)
| from django.shortcuts import redirect
from django.contrib.auth import logout as auth_logout
from django.conf import settings
def logout(request):
"""Logs out user redirects if in request"""
r = request.GET.get('r', '')
auth_logout(request)
if r:
return redirect('{}/?r={}'.format(settings.OPENSTAX_ACCOUNTS_LOGOUT_URL, r))
else:
return redirect(settings.OPENSTAX_ACCOUNTS_LOGOUT_URL)
| Python | 0.000001 |
a1aa922643ddbe6bd2beb497d087f5ff9e6233df | Add edx needed dependencies | test-settings.py | test-settings.py | from settings import *
from path import path
from openedx.core.lib.tempdir import mkdtemp_clean
from django.conf import settings
from uuid import uuid4
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Add edx required
'instructor_task',
'enrollment',
# Student Identity Verification
'lms.djangoapps.verify_student',
# ADD EDX-platform dependencies before test
'xmodule_django',
'track',
'social.apps.django_app.default',
'xblock_django',
'student',
# Real app of this package
'ecoapi',
'oai',
'xapi'
)
# Copy from edx-platform
FEATURES = {}
ALL_LANGUAGES = (
[u"en", u"English"],
[u"it", u"Italian"],
)
XQUEUE_INTERFACE = {
"basic_auth": [
"edx",
"edx"
],
"django_auth": {
"password": "password",
"username": "lms"
},
"url": "http://localhost:18040"
}
TRACK_MAX_EVENT = 50000
COMMON_ROOT = os.environ.get("PYTHONENV", "") + "/edx-platform/common"
COMMON_TEST_DATA_ROOT = COMMON_ROOT + "/test/data"
TEST_ROOT = path("test_root")
print TEST_ROOT
MONGO_PORT_NUM = int(os.environ.get('MONGO_PORT_27017_TCP_PORT', '27017'))
MONGO_HOST = os.environ.get('MONGO_PORT_27017_TCP_ADDR', 'localhost')
THIS_UUID = uuid4().hex[:5]
DOC_STORE_CONFIG = {
'host': MONGO_HOST,
'db': 'test_xmodule',
'collection': 'test_modulestore{0}'.format(THIS_UUID),
'port': MONGO_PORT_NUM
# If 'asset_collection' defined, it'll be used
# as the collection name for asset metadata.
# Otherwise, a default collection name will be used.
}
HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS = {}
MODULESTORE_BRANCH = 'draft-preferred'
MODULESTORE = {
'default': {
'ENGINE': 'xmodule.modulestore.mixed.MixedModuleStore',
'OPTIONS': {
'mappings': {},
'stores': [
{
'NAME': 'split',
'ENGINE': 'xmodule.modulestore.split_mongo.split_draft.DraftVersioningModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': {
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
'fs_root': TEST_ROOT / "data",
'render_template': 'edxmako.shortcuts.render_to_string',
}
},
{
'NAME': 'draft',
'ENGINE': 'xmodule.modulestore.mongo.DraftMongoModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': {
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
'fs_root': TEST_ROOT / "data",
'render_template': 'edxmako.shortcuts.render_to_string',
}
},
{
'NAME': 'xml',
'ENGINE': 'xmodule.modulestore.xml.XMLModuleStore',
'OPTIONS': {
'data_dir': mkdtemp_clean(dir=TEST_ROOT), # never inadvertently load all the XML courses
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
}
}
]
}
}
}
CONTENTSTORE = {
'ENGINE': 'xmodule.contentstore.mongo.MongoContentStore',
'DOC_STORE_CONFIG': {
'host': MONGO_HOST,
'port': MONGO_PORT_NUM,
'db': 'xcontent'
}
}
| from settings import *
from path import path
from openedx.core.lib.tempdir import mkdtemp_clean
from django.conf import settings
from uuid import uuid4
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Add edx required
'instructor_task',
'enrollment',
# ADD EDX-platform dependencies before test
'xmodule_django',
'track',
'social.apps.django_app.default',
'xblock_django',
'student',
# Real app of this package
'ecoapi',
'oai',
'xapi'
)
# Copy from edx-platform
FEATURES = {}
ALL_LANGUAGES = (
[u"en", u"English"],
[u"it", u"Italian"],
)
XQUEUE_INTERFACE = {
"basic_auth": [
"edx",
"edx"
],
"django_auth": {
"password": "password",
"username": "lms"
},
"url": "http://localhost:18040"
}
TRACK_MAX_EVENT = 50000
COMMON_ROOT = os.environ.get("PYTHONENV", "") + "/edx-platform/common"
COMMON_TEST_DATA_ROOT = COMMON_ROOT + "/test/data"
TEST_ROOT = path("test_root")
print TEST_ROOT
MONGO_PORT_NUM = int(os.environ.get('MONGO_PORT_27017_TCP_PORT', '27017'))
MONGO_HOST = os.environ.get('MONGO_PORT_27017_TCP_ADDR', 'localhost')
THIS_UUID = uuid4().hex[:5]
DOC_STORE_CONFIG = {
'host': MONGO_HOST,
'db': 'test_xmodule',
'collection': 'test_modulestore{0}'.format(THIS_UUID),
'port': MONGO_PORT_NUM
# If 'asset_collection' defined, it'll be used
# as the collection name for asset metadata.
# Otherwise, a default collection name will be used.
}
HOSTNAME_MODULESTORE_DEFAULT_MAPPINGS = {}
MODULESTORE_BRANCH = 'draft-preferred'
MODULESTORE = {
'default': {
'ENGINE': 'xmodule.modulestore.mixed.MixedModuleStore',
'OPTIONS': {
'mappings': {},
'stores': [
{
'NAME': 'split',
'ENGINE': 'xmodule.modulestore.split_mongo.split_draft.DraftVersioningModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': {
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
'fs_root': TEST_ROOT / "data",
'render_template': 'edxmako.shortcuts.render_to_string',
}
},
{
'NAME': 'draft',
'ENGINE': 'xmodule.modulestore.mongo.DraftMongoModuleStore',
'DOC_STORE_CONFIG': DOC_STORE_CONFIG,
'OPTIONS': {
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
'fs_root': TEST_ROOT / "data",
'render_template': 'edxmako.shortcuts.render_to_string',
}
},
{
'NAME': 'xml',
'ENGINE': 'xmodule.modulestore.xml.XMLModuleStore',
'OPTIONS': {
'data_dir': mkdtemp_clean(dir=TEST_ROOT), # never inadvertently load all the XML courses
'default_class': 'xmodule.hidden_module.HiddenDescriptor',
}
}
]
}
}
}
CONTENTSTORE = {
'ENGINE': 'xmodule.contentstore.mongo.MongoContentStore',
'DOC_STORE_CONFIG': {
'host': MONGO_HOST,
'port': MONGO_PORT_NUM,
'db': 'xcontent'
}
}
| Python | 0 |
f57326e5f5c7d64d6f7d5f204bcf388de897d5b0 | Revise palindrome function names | alg_palindrome.py | alg_palindrome.py | """Palindrome: a string that read the same forward and backward.
For example: radar, madam.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def palindrome(a_str):
"""Check palindrom by front & rear match by Deque."""
from ds_deque import Deque
str_deque = Deque()
for s in a_str:
str_deque.add_rear(s)
still_match = True
while str_deque.size() > 1 and still_match:
first = str_deque.remove_front()
last = str_deque.remove_rear()
if first != last:
still_match = False
return still_match
def palindrom_recur(a_str):
"""Check palindrome by recursion."""
if len(a_str) <= 1:
return True
else:
return a_str[0] == a_str[-1] and palindrom_recur(a_str[1:-1])
def main():
import time
start_time = time.time()
a_str = 'madam'
print('{0}: {1}'.format(a_str, palindrome(a_str)))
a_str = 'Bowen'
print('{0}: {1}'.format(a_str, palindrome(a_str)))
a_str = 'toot'
print('{0}: {1}'.format(a_str, palindrome(a_str)))
print('Time for palindrome(): {}'
.format(time.time() - start_time))
start_time = time.time()
a_str = 'madam'
print('{0}: {1}'.format(a_str, palindrom_recur(a_str)))
a_str = 'Bowen'
print('{0}: {1}'.format(a_str, palindrom_recur(a_str)))
a_str = 'toot'
print('{0}: {1}'.format(a_str, palindrom_recur(a_str)))
print('Time for palindrom_recur(): {}'
.format(time.time() - start_time))
if __name__ == '__main__':
main()
| """Palindrome: a string that read the same forward and backward.
For example: radar, madam.
"""
from __future__ import print_function
def match_palindrome(a_str):
"""Check palindrom by front & rear match by Deque."""
from ds_deque import Deque
str_deque = Deque()
for s in a_str:
str_deque.add_rear(s)
still_match = True
while str_deque.size() > 1 and still_match:
first = str_deque.remove_front()
last = str_deque.remove_rear()
if first != last:
still_match = False
return still_match
def match_palindrom_recur(a_str):
"""Check palindrome by recursion."""
if len(a_str) <= 1:
return True
else:
return a_str[0] == a_str[-1] and match_palindrom_recur(a_str[1:-1])
def main():
import time
start_time = time.time()
a_str = 'madam'
print('{0}: {1}'.format(a_str, match_palindrome(a_str)))
a_str = 'Bowen'
print('{0}: {1}'.format(a_str, match_palindrome(a_str)))
a_str = 'toot'
print('{0}: {1}'.format(a_str, match_palindrome(a_str)))
print('Time for match_palindrome(): {}'
.format(time.time() - start_time))
start_time = time.time()
a_str = 'madam'
print('{0}: {1}'.format(a_str, match_palindrom_recur(a_str)))
a_str = 'Bowen'
print('{0}: {1}'.format(a_str, match_palindrom_recur(a_str)))
a_str = 'toot'
print('{0}: {1}'.format(a_str, match_palindrom_recur(a_str)))
print('Time for match_palindrom_recur(): {}'
.format(time.time() - start_time))
if __name__ == '__main__':
main()
| Python | 0.998495 |
4f2fb3ac84216096411a5b6583e4fbb22c8e5196 | bump dev version | allel/__init__.py | allel/__init__.py | # -*- coding: utf-8 -*-
# flake8: noqa
from allel import model
from allel import stats
from allel import plot
from allel import io
from allel import chunked
from allel import constants
from allel import util
# convenient shortcuts
from allel.model.ndarray import *
from allel.model.chunked import *
# experimental
try:
import dask.array as _da
from allel.model.dask import *
except ImportError:
pass
# deprecated
try:
import bcolz as _bcolz
from allel.model.bcolz import *
except ImportError:
pass
__version__ = '0.21.0.dev3'
| # -*- coding: utf-8 -*-
# flake8: noqa
from allel import model
from allel import stats
from allel import plot
from allel import io
from allel import chunked
from allel import constants
from allel import util
# convenient shortcuts
from allel.model.ndarray import *
from allel.model.chunked import *
# experimental
try:
import dask.array as _da
from allel.model.dask import *
except ImportError:
pass
# deprecated
try:
import bcolz as _bcolz
from allel.model.bcolz import *
except ImportError:
pass
__version__ = '0.21.0.dev2'
| Python | 0 |
2cc8a2cace6c4a44e7280b0d2312c5aed6d68640 | check matrix condition when generating variances | mosfit/modules/outputs/lightcurve.py | mosfit/modules/outputs/lightcurve.py | """Definitions for the `LightCurve` class."""
from collections import OrderedDict
import numpy as np
from mosfit.modules.outputs.output import Output
# Important: Only define one ``Module`` class per file.
class LightCurve(Output):
"""Output a light curve to disk."""
_lc_keys = [
'magnitudes', 'e_magnitudes', 'model_observations', 'countrates',
'e_countrates', 'all_telescopes', 'all_bands', 'all_systems',
'all_instruments', 'all_bandsets', 'all_modes', 'all_times',
'all_frequencies', 'observed', 'all_band_indices', 'observation_types'
]
def __init__(self, **kwargs):
"""Initialize module."""
super(LightCurve, self).__init__(**kwargs)
self._dense_keys = self._lc_keys
self._limiting_magnitude = self._model._fitter._limiting_magnitude
def process(self, **kwargs):
"""Process module."""
# First, rename some keys.
output = OrderedDict()
for key in sorted(kwargs.keys()):
if key in self._dense_keys:
continue
output[key] = kwargs[key]
for key in self._dense_keys:
output[key.replace('all_', '')] = kwargs[key]
if self._limiting_magnitude is not None:
ls = 0.0
if isinstance(self._limiting_magnitude, list):
lm = float(self._limiting_magnitude[0])
if len(self._limiting_magnitude) > 1:
ls = float(self._limiting_magnitude[1])
else:
lm = self._limiting_magnitude
lmo = len(output['model_observations'])
omags = output['observation_types'] == 'magnitude'
output['model_variances'] = np.zeros_like(output[
'model_observations'])
output['model_upper_limits'] = np.full(lmo, False)
lms = lm + ls * np.random.randn(lmo)
varias = 10.0 ** (-lms / 2.5)
mods = 10.0 ** (
-np.array(output['model_observations'][omags]) / 2.5)
output['model_observations'][omags] = -2.5 * np.log10(
varias[omags] * np.random.randn(len(omags)) + mods)
obsas = 10.0 ** (
-np.array(output['model_observations']) / 2.5)
output['model_variances'][omags] = np.abs(-output[
'model_observations'][omags] - 2.5 * (
np.log10(varias[omags] + obsas)))
ul_mask = omags & (obsas < 3.0 * varias)
output['model_upper_limits'] = ul_mask
output['model_observations'][ul_mask] = lms[ul_mask]
output['model_variances'][ul_mask] = 2.5 * (
np.log10(2.0 * varias[ul_mask]) - np.log10(varias[ul_mask]))
return output
# Then, apply GP predictions, if available.
if (all([x in kwargs
for x in ['kmat', 'kfmat', 'koamat', 'kaomat']]) and not
any([kwargs[x] is None
for x in ['kmat', 'kfmat', 'koamat', 'kaomat']])):
kmat = kwargs['kmat'] + np.diag(kwargs['kdiagonal'])
if np.linalg.cond(kmat) > 1e10:
output['model_variances'] = np.full(
len(output['model_observations']), kwargs['variance'])
else:
ikmat = np.linalg.inv(kmat)
kfmatd = np.diagonal(kwargs['kfmat'])
koamat = kwargs['koamat']
kaomat = kwargs['kaomat']
output['model_variances'] = np.sqrt(
kfmatd - np.diagonal(np.matmul(np.matmul(
kaomat, ikmat), koamat)))
else:
output['model_variances'] = np.full(
len(output['model_observations']), kwargs['abandvs'])
return output
| """Definitions for the `LightCurve` class."""
from collections import OrderedDict
import numpy as np
from mosfit.modules.outputs.output import Output
# Important: Only define one ``Module`` class per file.
class LightCurve(Output):
"""Output a light curve to disk."""
_lc_keys = [
'magnitudes', 'e_magnitudes', 'model_observations', 'countrates',
'e_countrates', 'all_telescopes', 'all_bands', 'all_systems',
'all_instruments', 'all_bandsets', 'all_modes', 'all_times',
'all_frequencies', 'observed', 'all_band_indices', 'observation_types'
]
def __init__(self, **kwargs):
"""Initialize module."""
super(LightCurve, self).__init__(**kwargs)
self._dense_keys = self._lc_keys
self._limiting_magnitude = self._model._fitter._limiting_magnitude
def process(self, **kwargs):
"""Process module."""
# First, rename some keys.
output = OrderedDict()
for key in sorted(kwargs.keys()):
if key in self._dense_keys:
continue
output[key] = kwargs[key]
for key in self._dense_keys:
output[key.replace('all_', '')] = kwargs[key]
if self._limiting_magnitude is not None:
ls = 0.0
if isinstance(self._limiting_magnitude, list):
lm = float(self._limiting_magnitude[0])
if len(self._limiting_magnitude) > 1:
ls = float(self._limiting_magnitude[1])
else:
lm = self._limiting_magnitude
lmo = len(output['model_observations'])
omags = output['observation_types'] == 'magnitude'
output['model_variances'] = np.zeros_like(output[
'model_observations'])
output['model_upper_limits'] = np.full(lmo, False)
lms = lm + ls * np.random.randn(lmo)
varias = 10.0 ** (-lms / 2.5)
mods = 10.0 ** (
-np.array(output['model_observations'][omags]) / 2.5)
output['model_observations'][omags] = -2.5 * np.log10(
varias[omags] * np.random.randn(len(omags)) + mods)
obsas = 10.0 ** (
-np.array(output['model_observations']) / 2.5)
output['model_variances'][omags] = np.abs(-output[
'model_observations'][omags] - 2.5 * (
np.log10(varias[omags] + obsas)))
ul_mask = omags & (obsas < 3.0 * varias)
output['model_upper_limits'] = ul_mask
output['model_observations'][ul_mask] = lms[ul_mask]
output['model_variances'][ul_mask] = 2.5 * (
np.log10(2.0 * varias[ul_mask]) - np.log10(varias[ul_mask]))
return output
# Then, apply GP predictions, if available.
if (all([x in kwargs
for x in ['kmat', 'kfmat', 'koamat', 'kaomat']]) and not
any([kwargs[x] is None
for x in ['kmat', 'kfmat', 'koamat', 'kaomat']])):
ikmat = np.linalg.inv(
kwargs['kmat'] + np.diag(kwargs['kdiagonal']))
kfmatd = np.diagonal(kwargs['kfmat'])
koamat = kwargs['koamat']
kaomat = kwargs['kaomat']
output['model_variances'] = np.sqrt(kfmatd - np.diagonal(np.matmul(
np.matmul(kaomat, ikmat), koamat)))
else:
output['model_variances'] = np.full(
len(output['model_observations']), kwargs['abandvs'])
return output
| Python | 0 |
709f807368ea7915bc5c2f7d6236b3a24df92c8c | Simplify script for recorded ctrl message injection | scripts/sc-test-cmsg.py | scripts/sc-test-cmsg.py | #!/usr/bin/env python
# The MIT License (MIT)
#
# Copyright (c) 2015 Stany MARCEL <stanypub@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Steam Controller Control Message tester"""
import sys
import struct
import time
from steamcontroller import SteamController
def dump(sc, sci):
print(sci)
def _main():
try:
sc = SteamController(callback=dump)
for line in sys.stdin:
sc.handleEvents()
words = [int('0x'+x,16) for x in line.split()]
sc._sendControl(struct.pack('>' + 'I' * len(words), *words))
sc.run()
except KeyboardInterrupt:
pass
except Exception as e:
sys.stderr.write(str(e) + '\n')
print("Bye")
if __name__ == '__main__':
_main()
| #!/usr/bin/env python
# The MIT License (MIT)
#
# Copyright (c) 2015 Stany MARCEL <stanypub@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Steam Controller USB Dumper"""
import sys
import struct
import time
from steamcontroller import SteamController
def dump(sc, sci):
print(sci)
def _main():
try:
sc = SteamController(callback=dump)
sc.handleEvents()
sc._sendControl(struct.pack('>' + 'I' * 1, 0x81000000))
sc._sendControl(struct.pack('>' + 'I' * 6, 0x87153284, 0x03180000, 0x31020008, 0x07000707, 0x00301400, 0x2f010000))
#sc._sendControl(struct.pack('>' + 'I' * 1, 0xad020000))
#sc._sendControl(struct.pack('>' + 'I' * 1, 0xad020000))
#sc._sendControl(struct.pack('>' + 'I' * 1, 0xa1000000))
#sc._sendControl(struct.pack('>' + 'I' * 1, 0xad020000))
#sc._sendControl(struct.pack('>' + 'I' * 1, 0x8e000000))
#sc._sendControl(struct.pack('>' + 'I' * 1, 0x85000000))
#sc._sendControl(struct.pack('>' + 'I' * 1, 0xa1000000))
#sc._sendControl(struct.pack('>' + 'I' * 1, 0xb4000000))
#sc._sendControl(struct.pack('>' + 'I' * 5, 0x9610730b, 0xc7191248, 0x074eff14, 0x464e82d6, 0xaa960000))
#sc._sendControl(struct.pack('>' + 'I' * 1, 0xa1000000))
#sc._sendControl(struct.pack('>' + 'I' * 5, 0x9610e0b5, 0xda3a1e90, 0x5b325088, 0x0a6224d2, 0x67690000))
#sc._sendControl(struct.pack('>' + 'I' * 1, 0xa1000000))
#sc._sendControl(struct.pack('>' + 'I' * 5, 0x96107ef6, 0x0e193e8c, 0xe61d2eda, 0xb80906eb, 0x9fe90000))
#sc._sendControl(struct.pack('>' + 'I' * 1, 0xa1000000))
#sc._sendControl(struct.pack('>' + 'I' * 5, 0x96106e4a, 0xa4753ef0, 0x017ab50a, 0x24390f1f, 0x71fa0000))
#sc._sendControl(struct.pack('>' + 'I' * 1, 0x83000000))
#sc._sendControl(struct.pack('>' + 'I' * 6, 0xae150100, 0x00000001, 0x02110000, 0x02030000, 0x000a6d92, 0xd2550400))
sc.run()
except KeyboardInterrupt:
pass
except Exception as e:
sys.stderr.write(str(e) + '\n')
print("Bye")
if __name__ == '__main__':
_main()
| Python | 0.000001 |
51c597d6ea93b27a1e2879cb1d9d250da9ecc799 | Use EventSettingProperty in CFA | indico/modules/events/abstracts/models/call_for_abstracts.py | indico/modules/events/abstracts/models/call_for_abstracts.py | # This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from indico.modules.events.abstracts.settings import abstracts_settings, abstracts_reviewing_settings
from indico.modules.events.settings import EventSettingProperty
from indico.util.date_time import now_utc
from indico.util.string import return_ascii
class CallForAbstracts(object):
"""Proxy class to facilitate access to the call for abstracts settings"""
def __init__(self, event):
self.event = event
@return_ascii
def __repr__(self):
return '<CallForAbstracts({}, start_dt={}, end_dt={})>'.format(self.event.id, self.start_dt, self.end_dt)
start_dt = EventSettingProperty(abstracts_settings, 'start_dt')
end_dt = EventSettingProperty(abstracts_settings, 'end_dt')
modification_end_dt = EventSettingProperty(abstracts_settings, 'modification_end_dt')
allow_contributors_in_comments = EventSettingProperty(abstracts_reviewing_settings,
'allow_contributors_in_comments')
allow_convener_judgment = EventSettingProperty(abstracts_reviewing_settings, 'allow_convener_judgment')
@property
def has_started(self):
return self.start_dt is not None and self.start_dt <= now_utc()
@property
def has_ended(self):
return self.end_dt is not None and self.end_dt <= now_utc()
@property
def is_open(self):
return self.has_started and not self.has_ended
@property
def modification_ended(self):
return self.modification_end_dt is not None and self.modification_end_dt <= now_utc()
@property
def rating_range(self):
return tuple(abstracts_reviewing_settings.get(self.event, key) for key in ('scale_lower', 'scale_upper'))
def can_submit_abstracts(self, user):
return self.is_open or abstracts_settings.acls.contains_user(self.event, 'authorized_submitters', user)
def can_edit_abstracts(self, user):
modification_end = self.modification_end_dt
return self.can_submit_abstracts(user) or (modification_end is not None and modification_end > now_utc())
def schedule(self, start_dt, end_dt, modification_end_dt):
abstracts_settings.set_multi(self.event, {
'start_dt': start_dt,
'end_dt': end_dt,
'modification_end_dt': modification_end_dt
})
def open(self):
if self.has_ended:
abstracts_settings.set_multi(self.event, {
'end_dt': None,
'modification_end_dt': None
})
else:
abstracts_settings.set(self.event, 'start_dt', now_utc(False))
def close(self):
now = now_utc(False)
abstracts_settings.set(self.event, 'end_dt', now)
if not self.has_started:
abstracts_settings.set(self.event_new, 'start_dt', now)
| # This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from indico.modules.events.abstracts.settings import abstracts_settings, abstracts_reviewing_settings
from indico.util.date_time import now_utc
from indico.util.string import return_ascii
class CallForAbstracts(object):
"""Proxy class to facilitate access to the call for abstracts settings"""
def __init__(self, event):
self.event = event
@return_ascii
def __repr__(self):
return '<CallForAbstracts({}, start_dt={}, end_dt={})>'.format(self.event.id, self.start_dt, self.end_dt)
@property
def allow_contributors_in_comments(self):
return abstracts_reviewing_settings.get(self.event, 'allow_contributors_in_comments')
@property
def allow_convener_judgment(self):
return abstracts_reviewing_settings.get(self.event, 'allow_convener_judgment')
@property
def has_started(self):
return self.start_dt is not None and self.start_dt <= now_utc()
@property
def has_ended(self):
return self.end_dt is not None and self.end_dt <= now_utc()
@property
def is_open(self):
return self.has_started and not self.has_ended
@property
def modification_ended(self):
return self.modification_end_dt is not None and self.modification_end_dt <= now_utc()
@property
def start_dt(self):
return abstracts_settings.get(self.event, 'start_dt')
@property
def end_dt(self):
return abstracts_settings.get(self.event, 'end_dt')
@property
def modification_end_dt(self):
return abstracts_settings.get(self.event, 'modification_end_dt') or self.end_dt
@property
def rating_range(self):
return tuple(abstracts_reviewing_settings.get(self.event, key) for key in ('scale_lower', 'scale_upper'))
def can_submit_abstracts(self, user):
return self.is_open or abstracts_settings.acls.contains_user(self.event, 'authorized_submitters', user)
def can_edit_abstracts(self, user):
modification_end = self.modification_end_dt
return self.can_submit_abstracts(user) or (modification_end is not None and modification_end > now_utc())
def schedule(self, start_dt, end_dt, modification_end_dt):
abstracts_settings.set_multi(self.event, {
'start_dt': start_dt,
'end_dt': end_dt,
'modification_end_dt': modification_end_dt
})
def open(self):
if self.has_ended:
abstracts_settings.set_multi(self.event, {
'end_dt': None,
'modification_end_dt': None
})
else:
abstracts_settings.set(self.event, 'start_dt', now_utc(False))
def close(self):
now = now_utc(False)
abstracts_settings.set(self.event, 'end_dt', now)
if not self.has_started:
abstracts_settings.set(self.event_new, 'start_dt', now)
| Python | 0 |
172de448cc0a0903014029eca0a7eee9bbe8b0dc | Set use_middlewares to False in tasks request handlers. | source/tipfy/ext/tasks/__init__.py | source/tipfy/ext/tasks/__init__.py | # -*- coding: utf-8 -*-
"""
tipfy.ext.tasks
~~~~~~~~~~~~~~~
Task queue utilities extension.
:copyright: 2009 by tipfy.org.
:license: BSD, see LICENSE.txt for more details.
"""
import logging
from google.appengine.ext import db
from google.appengine.api.labs import taskqueue
from google.appengine.ext.deferred import run, PermanentTaskFailure
from tipfy import local, url_for, RequestHandler
class DeferredHandler(RequestHandler):
"""A handler class that processes deferred tasks invocations, mirrored from
`google.appengine.ext.deferred`. Map to this handler if you want to use the
deferred package running on the same WSGI application as other handlers.
Tipfy utilities will then be available to be used in the deferred function.
The setup for app.yaml is:
- url: /_ah/queue/deferred
script: main.py
login: admin
The URL rule for urls.py is:
Rule('/_ah/queue/deferred', endpoint='tasks/deferred',
handler='tipfy.ext.tasks:DeferredHandler')
"""
use_middlewares = False
def post(self):
headers = ['%s:%s' % (k, v) for k, v in local.request.headers.items()
if k.lower().startswith('x-appengine-')]
logging.info(', '.join(headers))
try:
run(local.request.data)
except PermanentTaskFailure, e:
logging.exception('Permanent failure attempting to execute task')
return local.response
class EntityTaskHandler(RequestHandler):
"""A base class to process all entities in single datastore kind, using
the task queue. On each request, an entity is processed and a new task is
added to process the next entity.
For example, to process all 'MyModel' records:
class MyModelTasks(EntityTaskHandler):
model = MyModel
endpoint = 'tasks/mymodel'
def process_entity(self, entity, retry_count):
# do something with current entity...
# ...
# Return True to process next, using 0 as countdown.
return (True, 0)
A couple of URL rules with a 'key' argument are required:
Rule('/_tasks/process-mymodel/', endpoint='tasks/mymodel',
handler='somemodule.MyModelTasks')
Rule('/_tasks/process-mymodel/<string:key>', endpoint='tasks/mymodel',
handler='somemodule.MyModelTasks')
"""
use_middlewares = False
model = None
endpoint = None
def get(self, **kwargs):
return self.post(**kwargs)
def post(self, **kwargs):
if self.model is None or self.endpoint is None:
raise ValueError('Model or endpoint is not defined.')
entity = self.get_entity(kwargs.get('key', None))
if not entity:
logging.info('Finished all %s entities!' % self.model.__class__)
return local.response
# Process current entity.
logging.info('Processing %s from %s' % (str(entity.key()),
self.model.__class__))
retry_count = int(local.request.headers.get(
'X-AppEngine-TaskRetryCount', 0))
current_key = str(entity.key())
process_next, countdown = self.process_entity(entity, retry_count)
if process_next is True:
# Process next entity.
taskqueue.add(url=url_for(self.endpoint, key=current_key),
countdown=countdown)
return local.response
def get_entity(self, key):
query = self.model.all().order('__key__')
if key:
query.filter('__key__ >', db.Key(key))
return query.get()
def process_entity(self, entity, retry_count):
"""Process an entity and returns a tuple (process_next, countdown). If
process_next is True, a new task is added to process the next entity.
"""
return (False, 0)
| # -*- coding: utf-8 -*-
"""
tipfy.ext.tasks
~~~~~~~~~~~~~~~
Task queue utilities extension.
:copyright: 2009 by tipfy.org.
:license: BSD, see LICENSE.txt for more details.
"""
import logging
from google.appengine.ext import db
from google.appengine.api.labs import taskqueue
from google.appengine.ext.deferred import run, PermanentTaskFailure
from tipfy import local, url_for, RequestHandler
class DeferredHandler(RequestHandler):
"""A handler class that processes deferred tasks invocations, mirrored from
`google.appengine.ext.deferred`. Map to this handler if you want to use the
deferred package running on the same WSGI application as other handlers.
Tipfy utilities will then be available to be used in the deferred function.
The setup for app.yaml is:
- url: /_ah/queue/deferred
script: main.py
login: admin
The URL rule for urls.py is:
Rule('/_ah/queue/deferred', endpoint='tasks/deferred',
handler='tipfy.ext.tasks:DeferredHandler')
"""
def post(self):
headers = ['%s:%s' % (k, v) for k, v in local.request.headers.items()
if k.lower().startswith('x-appengine-')]
logging.info(', '.join(headers))
try:
run(local.request.data)
except PermanentTaskFailure, e:
logging.exception('Permanent failure attempting to execute task')
return local.response
class EntityTaskHandler(RequestHandler):
"""A base class to process all entities in single datastore kind, using
the task queue. On each request, an entity is processed and a new task is
added to process the next entity.
For example, to process all 'MyModel' records:
class MyModelTasks(EntityTaskHandler):
model = MyModel
endpoint = 'tasks/mymodel'
def process_entity(self, entity, retry_count):
# do something with current entity...
# ...
# Return True to process next, using 0 as countdown.
return (True, 0)
A couple of URL rules with a 'key' argument are required:
Rule('/_tasks/process-mymodel/', endpoint='tasks/mymodel',
handler='somemodule.MyModelTasks')
Rule('/_tasks/process-mymodel/<string:key>', endpoint='tasks/mymodel',
handler='somemodule.MyModelTasks')
"""
model = None
endpoint = None
def get(self, **kwargs):
return self.post(**kwargs)
def post(self, **kwargs):
if self.model is None or self.endpoint is None:
raise ValueError('Model or endpoint is not defined.')
entity = self.get_entity(kwargs.get('key', None))
if not entity:
logging.info('Finished all %s entities!' % self.model.__class__)
return local.response
# Process current entity.
logging.info('Processing %s from %s' % (str(entity.key()),
self.model.__class__))
retry_count = int(local.request.headers.get(
'X-AppEngine-TaskRetryCount', 0))
current_key = str(entity.key())
process_next, countdown = self.process_entity(entity, retry_count)
if process_next is True:
# Process next entity.
taskqueue.add(url=url_for(self.endpoint, key=current_key),
countdown=countdown)
return local.response
def get_entity(self, key):
query = self.model.all().order('__key__')
if key:
query.filter('__key__ >', db.Key(key))
return query.get()
def process_entity(self, entity, retry_count):
"""Process an entity and returns a tuple (process_next, countdown). If
process_next is True, a new task is added to process the next entity.
"""
return (False, 0)
| Python | 0 |
4b2b59bb3676afd262596425372a4ce0053ba416 | Improve formatting in replication script. | couchdb/tools/replicate.py | couchdb/tools/replicate.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2009 Maximillian Dornseif <md@hudora.de>
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
"""
This script replicates databases from one CouchDB server to an other.
This is mainly for backup purposes or "priming" a new server before
setting up trigger based replication. But you can also use the
'--continuous' option to set up automatic replication on newer
CouchDB versions.
Use 'python replicate.py --help' to get more detailed usage
instructions.
Be careful when using 127.0.0.1 as the source-server or target-server.
With pull replication you can use 127.0.0.1 on the target-server.
With push replication you can use 127.0.0.1 on the source-server.
But I suggest you always use Fully Qualified domain names.
"""
import couchdb.client
import optparse
import sys
import time
def compact(server, dbnames):
for dbname in dbnames:
sys.stdout.flush()
db = server[dbname]
db.resource.post('_compact')
def main():
usage = '%prog [options]'
parser = optparse.OptionParser(usage=usage)
parser.add_option('--source-server',
action='store',
dest='source_url',
help='the url of the server to replicate from')
parser.add_option('--target-server',
action='store',
dest='target_url',
default="http://127.0.0.1:5984",
help='the url of the server to replicate to [%default]')
parser.add_option('--database',
action='append',
dest='dbnames',
help='Database to replicate. Can be given more than once. [all databases]')
parser.add_option('--no-target-compaction',
action='store_false',
dest='compact_target',
help='do not start compaction of target after replications')
parser.add_option('--continuous',
action='store_true',
dest='continuous',
help='trigger continuous replication in cochdb')
parser.add_option('--push',
action='store_true',
help='use push instead of pull replication')
options, args = parser.parse_args()
if not options.target_url or (not options.source_url):
parser.error("Need at least --source-server and --target-server")
sys.exit(1)
if not options.source_url.endswith('/'):
options.source_url = options.source_url + '/'
if not options.target_url.endswith('/'):
options.target_url = options.target_url + '/'
source_server = couchdb.client.Server(options.source_url)
target_server = couchdb.client.Server(options.target_url)
if not options.dbnames:
dbnames = sorted(i for i in source_server)
else:
dbnames = options.dbnames
targetdbs = sorted(i for i in target_server)
for dbname in sorted(dbnames, reverse=True):
start = time.time()
print dbname,
sys.stdout.flush()
if dbname not in targetdbs:
target_server.create(dbname)
print "created",
sys.stdout.flush()
body = {}
if options.continuous:
body['continuous'] = True
if options.push:
body.update({'source': dbname, 'target': '%s%s' % (options.target_url, dbname)})
source_server.resource.post('_replicate', body)
else:
# pull seems to be more reliable than push
body.update({'source': '%s%s' % (options.source_url, dbname), 'target': dbname})
target_server.resource.post('_replicate', body)
print '%.1fs' % (time.time() - start)
if options.compact_target:
compact(target_server, dbnames)
if __name__ == '__main__':
main()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2009 Maximillian Dornseif <md@hudora.de>
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
"""
This script replicates databases from one CouchDB server to an other.
This is mainly for backup purposes or "priming" a new server before
setting up trigger based replication. But you can also use the
'--continuous' option to set up automatic replication on newer
CouchDB versions.
Use 'python replicate.py --help' to get more detailed usage
instructions.
Be careful when using 127.0.0.1 as the source-server or target-server.
With pull replication you can use 127.0.0.1 on the target-server.
With push replication you can use 127.0.0.1 on the source-server.
But I suggest you always use Fully Qualified domain names.
"""
import couchdb.client
import optparse
import sys
import time
def compact(server, dbnames):
for dbname in dbnames:
sys.stdout.flush()
db = server[dbname]
db.resource.post('_compact')
def main():
usage = '%prog [options]'
parser = optparse.OptionParser(usage=usage)
parser.add_option('--source-server',
action='store',
dest='source_url',
help='the url of the server to replicate from')
parser.add_option('--target-server',
action='store',
dest='target_url',
default="http://127.0.0.1:5984",
help='the url of the server to replicate to [%default]')
parser.add_option('--database',
action='append',
dest='dbnames',
help='Database to replicate. Can be given more than once. [all databases]')
parser.add_option('--no-target-compaction',
action='store_false',
dest='compact_target',
help='do not start compaction of target after replications')
parser.add_option('--continuous',
action='store_true',
dest='continuous',
help='trigger continuous replication in cochdb')
parser.add_option('--push',
action='store_true',
help='use push instead of pull replication')
options, args = parser.parse_args()
if not options.target_url or (not options.source_url):
parser.error("Need at least --source-server and --target-server")
sys.exit(1)
if not options.source_url.endswith('/'):
options.source_url = options.source_url + '/'
if not options.target_url.endswith('/'):
options.target_url = options.target_url + '/'
source_server = couchdb.client.Server(options.source_url)
target_server = couchdb.client.Server(options.target_url)
if not options.dbnames:
dbnames = sorted(i for i in source_server)
else:
dbnames = options.dbnames
targetdbs = sorted(i for i in target_server)
for dbname in sorted(dbnames, reverse=True):
start = time.time()
print dbname,
sys.stdout.flush()
if dbname not in targetdbs:
target_server.create(dbname)
print "created",
sys.stdout.flush()
body = {}
if options.continuous:
body['continuous'] = True
if options.push:
body.update({'source': dbname, 'target': '%s%s' % (options.target_url, dbname)})
source_server.resource.post('_replicate', body)
else:
# pull seems to be more reliable than push
body.update({'source': '%s%s' % (options.source_url, dbname), 'target': dbname})
target_server.resource.post('_replicate', body)
print "%.1f s" % (time.time() - start)
if options.compact_target:
compact(target_server, dbnames)
if __name__ == '__main__':
main()
| Python | 0 |
6cbec939130ba8e17969e8d13b35765f9683b692 | add exception 2017/06/06 | crawler/tools/MysqlBase.py | crawler/tools/MysqlBase.py | #-*- encoding:UTF-8 -*-
import urllib2
import re
import StringIO
import gzip
import logging
import sqlite3
import logutils
import urllib
import sys
import MySQLdb
reload(sys)
sys.setdefaultencoding('utf8')
class MysqlBase:
def __init__(self,dbname):
self.conn=None
self.reconn=False
self.dbname=dbname
self.connect()
def connect(self):
if self.conn is not None:
self.conn.close()
self.conn=MySQLdb.connect(host='localhost',user='root',passwd='',port=3306)
self.conn.select_db(self.dbname)
self.conn.set_character_set('utf8')
def reconnect(self):
if self.reconn:
self.connect()
def execute(self,sql):
try:
self.reconnect()
cur=self.conn.cursor()
cur.execute(sql)
self.conn.commit()
cur.close()
except Exception,e:
print 'db failed, reason:%s' % str(e)
return None
def query(self,sql,value=None):
self.reconnect()
cur=self.conn.cursor()
if value is None:
cur.execute(sql)
else:
cur.execute(sql,value)
alldata = cur.fetchall()
cur.close()
return alldata
def insert(self,sql,value):
values=[]
values.append(value)
self.multi_insert(sql,values)
def multi_insert(self,sql,values):
try:
self.reconnect()
cur=self.conn.cursor()
cur.executemany(sql,values)
self.conn.commit()
cur.close()
except Exception,e:
print 'db failed, reason:%s' % str(e)
return None
def multi_insert_test(self):
values=[]
for i in range(20):
values.append((i,'hi rollen'+str(i)))
self.multi_insert('insert into test values(%s,%s)',values)
| #-*- encoding:UTF-8 -*-
import urllib2
import re
import StringIO
import gzip
import logging
import sqlite3
import logutils
import urllib
import sys
import MySQLdb
reload(sys)
sys.setdefaultencoding('utf8')
class MysqlBase:
def __init__(self,dbname):
self.conn=None
self.reconn=False
self.dbname=dbname
self.connect()
def connect(self):
if self.conn is not None:
self.conn.close()
self.conn=MySQLdb.connect(host='localhost',user='root',passwd='',port=3306)
self.conn.select_db(self.dbname)
self.conn.set_character_set('utf8')
def reconnect(self):
if self.reconn:
self.connect()
def execute(self,sql):
self.reconnect()
cur=self.conn.cursor()
cur.execute(sql)
self.conn.commit()
cur.close()
def query(self,sql,value=None):
self.reconnect()
cur=self.conn.cursor()
if value is None:
cur.execute(sql)
else:
cur.execute(sql,value)
alldata = cur.fetchall()
cur.close()
return alldata
def insert(self,sql,value):
values=[]
values.append(value)
self.multi_insert(sql,values)
def multi_insert(self,sql,values):
self.reconnect()
cur=self.conn.cursor()
cur.executemany(sql,values)
self.conn.commit()
cur.close()
def multi_insert_test(self):
values=[]
for i in range(20):
values.append((i,'hi rollen'+str(i)))
self.multi_insert('insert into test values(%s,%s)',values)
| Python | 0 |
1ceef7205121141cf3c01826a1bb5d01013e74db | clean cruft | ymir/data.py | ymir/data.py | # -*- coding: utf-8 -*-
""" ymir.data
"""
from fabric.colors import green
STATUS_DEAD = ['terminated', 'shutting-down']
OK = green(' ok')
| # -*- coding: utf-8 -*-
""" ymir.data
"""
from fabric.colors import green
DEFAULT_SUPERVISOR_PORT = 9001 # supervisor WUI port
STATUS_DEAD = ['terminated', 'shutting-down']
OK = green(' ok')
| Python | 0 |
20c61a39b0f2bc35eabc41f519732e2706c6f59c | test domain is uuid | corehq/apps/data_dictionary/tests/test_util.py | corehq/apps/data_dictionary/tests/test_util.py | import uuid
from django.test import TestCase
from mock import patch
from corehq.apps.data_dictionary.models import CaseType, CaseProperty
from corehq.apps.data_dictionary.util import generate_data_dictionary
class GenerateDictionaryTest(TestCase):
domain = uuid.uuid4()
def tearDown(self):
CaseType.objects.filter(domain=self.domain).delete()
@patch('corehq.apps.data_dictionary.util._get_all_case_properties')
def test_no_types(self, mock):
mock.return_value = {}
generate_data_dictionary(self.domain)
self.assertEqual(CaseType.objects.filter(domain=self.domain).count(), 0)
self.assertEqual(CaseProperty.objects.filter(case_type__domain=self.domain).count(), 0)
@patch('corehq.apps.data_dictionary.util._get_all_case_properties')
def test_empty_type(self, mock):
mock.return_value = {'': ['prop']}
generate_data_dictionary(self.domain)
self.assertEqual(CaseType.objects.filter(domain=self.domain).count(), 0)
self.assertEqual(CaseProperty.objects.filter(case_type__domain=self.domain).count(), 0)
@patch('corehq.apps.data_dictionary.util._get_all_case_properties')
def test_no_properties(self, mock):
mock.return_value = {'type': []}
generate_data_dictionary(self.domain)
self.assertEqual(CaseType.objects.filter(domain=self.domain).count(), 1)
self.assertEqual(CaseProperty.objects.filter(case_type__domain=self.domain).count(), 0)
@patch('corehq.apps.data_dictionary.util._get_all_case_properties')
def test_one_type(self, mock):
mock.return_value = {'type': ['property']}
generate_data_dictionary(self.domain)
self.assertEqual(CaseType.objects.filter(domain=self.domain).count(), 1)
self.assertEqual(CaseProperty.objects.filter(case_type__domain=self.domain).count(), 1)
@patch('corehq.apps.data_dictionary.util._get_all_case_properties')
def test_two_types(self, mock):
mock.return_value = {'type': ['property'], 'type2': ['property']}
generate_data_dictionary(self.domain)
self.assertEqual(CaseType.objects.filter(domain=self.domain).count(), 2)
self.assertEqual(CaseProperty.objects.filter(case_type__domain=self.domain).count(), 2)
@patch('corehq.apps.data_dictionary.util._get_all_case_properties')
def test_two_properties(self, mock):
mock.return_value = {'type': ['property', 'property2']}
generate_data_dictionary(self.domain)
self.assertEqual(CaseType.objects.filter(domain=self.domain).count(), 1)
self.assertEqual(CaseProperty.objects.filter(case_type__domain=self.domain).count(), 2)
| from django.test import TestCase
from mock import patch
from corehq.apps.data_dictionary.models import CaseType, CaseProperty
from corehq.apps.data_dictionary.util import generate_data_dictionary
class GenerateDictionaryTest(TestCase):
domain = 'data-dictionary'
def tearDown(self):
CaseType.objects.filter(domain=self.domain).delete()
@patch('corehq.apps.data_dictionary.util._get_all_case_properties')
def test_no_types(self, mock):
mock.return_value = {}
generate_data_dictionary(self.domain)
self.assertEqual(CaseType.objects.filter(domain=self.domain).count(), 0)
self.assertEqual(CaseProperty.objects.filter(case_type__domain=self.domain).count(), 0)
@patch('corehq.apps.data_dictionary.util._get_all_case_properties')
def test_empty_type(self, mock):
mock.return_value = {'': ['prop']}
generate_data_dictionary(self.domain)
self.assertEqual(CaseType.objects.filter(domain=self.domain).count(), 0)
self.assertEqual(CaseProperty.objects.filter(case_type__domain=self.domain).count(), 0)
@patch('corehq.apps.data_dictionary.util._get_all_case_properties')
def test_no_properties(self, mock):
mock.return_value = {'type': []}
generate_data_dictionary(self.domain)
self.assertEqual(CaseType.objects.filter(domain=self.domain).count(), 1)
self.assertEqual(CaseProperty.objects.filter(case_type__domain=self.domain).count(), 0)
@patch('corehq.apps.data_dictionary.util._get_all_case_properties')
def test_one_type(self, mock):
mock.return_value = {'type': ['property']}
generate_data_dictionary(self.domain)
self.assertEqual(CaseType.objects.filter(domain=self.domain).count(), 1)
self.assertEqual(CaseProperty.objects.filter(case_type__domain=self.domain).count(), 1)
@patch('corehq.apps.data_dictionary.util._get_all_case_properties')
def test_two_types(self, mock):
mock.return_value = {'type': ['property'], 'type2': ['property']}
generate_data_dictionary(self.domain)
self.assertEqual(CaseType.objects.filter(domain=self.domain).count(), 2)
self.assertEqual(CaseProperty.objects.filter(case_type__domain=self.domain).count(), 2)
@patch('corehq.apps.data_dictionary.util._get_all_case_properties')
def test_two_properties(self, mock):
mock.return_value = {'type': ['property', 'property2']}
generate_data_dictionary(self.domain)
self.assertEqual(CaseType.objects.filter(domain=self.domain).count(), 1)
self.assertEqual(CaseProperty.objects.filter(case_type__domain=self.domain).count(), 2)
| Python | 0.999975 |
863a0c00c6fd00d06e8d62d35506d9faae42e8d8 | use templates manipulation to add the link to open the pad form, rather than using JS | addons/piratepad/editors.py | addons/piratepad/editors.py | # -*- coding: utf-8 -*-
from openobject.widgets import JSLink
import openobject.templating
class SidebarTemplateEditor(openobject.templating.TemplateEditor):
templates = ['/openerp/widgets/templates/sidebar.mako']
ADD_ATTACHMENT_BUTTON = u'id="add-attachment"'
BINARY_ATTACHMENTS_FORM = u'<form id="attachment-box"'
def insert_pad_link(self, output):
# Insert the link on the line right after the link to open the
# attachment form
form_opener_insertion = output.index(
'\n',
output.index(self.ADD_ATTACHMENT_BUTTON)) + 1
output = output[:form_opener_insertion] + \
'<a href="#" id="add-pad" class="button-a">${_("Pad")}</a>\n' + \
output[form_opener_insertion:]
return output
def edit(self, template, template_text):
output = super(SidebarTemplateEditor, self).edit(template, template_text)
output = self.insert_pad_link(output)
form_insertion_point = output.index(self.BINARY_ATTACHMENTS_FORM)
return output[:form_insertion_point] + '''
<form id="pad-box" action="/piratepad/link" method="post">
<label for="sidebar_pad_datas">${_("Name")}:</label>
<table width="100%">
<tr>
<td width=60% style="padding-right:8px;">
<input type="text" id="sidebar_pad_datas" class="binary"
name="pad_name" kind="url" size="5" />
</td>
<td>
<a href="#" id="sidebar_pad_ok" class="button-a">${_("Ok")}</a>
</td>
</tr>
</table>
</form>
<script type="text/javascript">
jQuery(document).ready(function() {
var padForm = jQuery('#pad-box').hide();
jQuery('#sidebar_pad_ok').bind('click', function(){
padForm.submit();
});
jQuery('#add-pad').click(function(e){
padForm.show();
e.preventDefault();
});
padForm.bind({
submit: createAttachment
});
});
</script>
''' + output[form_insertion_point:]
| # -*- coding: utf-8 -*-
from openobject.widgets import JSLink
import openobject.templating
class SidebarTemplateEditor(openobject.templating.TemplateEditor):
templates = ['/openerp/widgets/templates/sidebar.mako']
BINARY_ATTACHMENTS_FORM = u'<form id="attachment-box"'
def edit(self, template, template_text):
output = super(SidebarTemplateEditor, self).edit(template, template_text)
insertion_point = output.index(self.BINARY_ATTACHMENTS_FORM)
return output[:insertion_point] + '''
<form id="pad-box" action="/piratepad/link" method="post">
<label for="sidebar_pad_datas">${_("Name")}:</label>
<table width="100%">
<tr>
<td width=60% style="padding-right:8px;">
<input type="text" id="sidebar_pad_datas" class="binary"
name="pad_name" kind="url" size="5" />
</td>
<td>
<a href="#" id="sidebar_pad_ok" class="button-a">${_("Ok")}</a>
</td>
</tr>
</table>
</form>
<script type="text/javascript">
jQuery(document).ready(function() {
var padForm = jQuery('#pad-box').hide();
jQuery('#attachments').prev().append(
jQuery('<a>',{
'href': '#',
'id': 'add-pad',
'class': 'button-a',
}).text('${_("Pad")}')
);
jQuery('#sidebar_pad_ok').bind('click', function(){
padForm.submit();
});
jQuery('#add-pad').click(function(e){
padForm.show();
e.preventDefault();
});
padForm.bind({
submit: createAttachment
});
});
</script>
''' + output[insertion_point:]
| Python | 0 |
1ade506f5408cbbe099bb83bd701472137470618 | Add extra version of py-contextlib2 (#15322) | var/spack/repos/builtin/packages/py-contextlib2/package.py | var/spack/repos/builtin/packages/py-contextlib2/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyContextlib2(PythonPackage):
"""contextlib2 is a backport of the standard library's contextlib module to
earlier Python versions."""
homepage = "https://contextlib2.readthedocs.io/en/stable/"
url = "https://github.com/jazzband/contextlib2/archive/v0.6.0.tar.gz"
version('0.6.0', sha256='4f18e2f28bb642aae9447aacec93b1319c8ee838711553c0a2bd906753f2ad33')
version('0.5.5', sha256='613569263db0271f34c8484792360272a731f2185567c31c8118e9c994412170')
depends_on('py-setuptools', type='build')
depends_on('python@2.7:2.8,3.4:', type=('build', 'run'))
| # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyContextlib2(PythonPackage):
"""contextlib2 is a backport of the standard library's contextlib module to
earlier Python versions."""
homepage = "https://contextlib2.readthedocs.io/en/stable/"
url = "https://github.com/jazzband/contextlib2/archive/v0.6.0.tar.gz"
version('0.6.0', sha256='4f18e2f28bb642aae9447aacec93b1319c8ee838711553c0a2bd906753f2ad33')
depends_on('py-setuptools', type='build')
depends_on('python@2.7:2.8,3.4:', type=('build', 'run'))
| Python | 0 |
71615632defe37681d1257912ea03f6e1cdeffde | add v1.1-3 (#20923) | var/spack/repos/builtin/packages/r-fitdistrplus/package.py | var/spack/repos/builtin/packages/r-fitdistrplus/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RFitdistrplus(RPackage):
"""Help to Fit of a Parametric Distribution to Non-Censored or Censored Data
Extends the fitdistr() function (of the MASS package) with several
functions to help the fit of a parametric distribution to non-censored or
censored data. Censored data may contain left censored, right censored and
interval censored values, with several lower and upper bounds. In addition
to maximum likelihood estimation (MLE), the package provides moment
matching (MME), quantile matching (QME) and maximum goodness-of-fit
estimation (MGE) methods (available only for non-censored data). Weighted
versions of MLE, MME and QME are available. See e.g. Casella & Berger
(2002). Statistical inference. Pacific Grove."""
homepage = "https://lbbe.univ-lyon1.fr/fitdistrplus.html"
url = "https://cloud.r-project.org/src/contrib/fitdistrplus_1.0-14.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/fitdistrplus"
version('1.1-3', sha256='776d5456e14398e44b78b3d7db526559bb7a3537e546a29c88aa192141c756de')
version('1.0-14', sha256='85082590f62aa08d99048ea3414c5cc1e5b780d97b3779d2397c6cb435470083')
depends_on('r@3.2.0:', type=('build', 'run'))
depends_on('r-mass', type=('build', 'run'))
depends_on('r-survival', type=('build', 'run'))
depends_on('r-npsurv', when='@:1.0-14', type=('build', 'run'))
| # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RFitdistrplus(RPackage):
"""Extends the fitdistr() function (of the MASS package) with several
functions to help the fit of a parametric distribution to non-censored or
censored data. Censored data may contain left censored, right censored and
interval censored values, with several lower and upper bounds. In addition
to maximum likelihood estimation (MLE), the package provides moment
matching (MME), quantile matching (QME) and maximum goodness-of-fit
estimation (MGE) methods (available only for non-censored data). Weighted
versions of MLE, MME and QME are available. See e.g. Casella & Berger
(2002). Statistical inference. Pacific Grove."""
homepage = "https://lbbe.univ-lyon1.fr/fitdistrplus.html"
url = "https://cloud.r-project.org/src/contrib/fitdistrplus_1.0-14.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/fitdistrplus"
version('1.0-14', sha256='85082590f62aa08d99048ea3414c5cc1e5b780d97b3779d2397c6cb435470083')
depends_on('r@3.2.0:', type=('build', 'run'))
depends_on('r-mass', type=('build', 'run'))
depends_on('r-survival', type=('build', 'run'))
depends_on('r-npsurv', type=('build', 'run'))
| Python | 0 |
a7a10cf40c781a8c0cb4e96bdd7077d6cbe8afce | fix batching of changes in doctypemigration continuous | corehq/doctypemigrations/continuous_migrate.py | corehq/doctypemigrations/continuous_migrate.py | import datetime
from corehq.util.couch import IterDB
from dimagi.utils.couch.database import iter_docs
from dimagi.utils.chunked import chunked
import logging
def filter_doc_ids_by_doc_type(db, doc_ids, doc_types):
for doc_ids_chunk in chunked(doc_ids, 100):
keys = [[doc_type, doc_id]
for doc_id in doc_ids_chunk
for doc_type in doc_types]
results = db.view('all_docs/by_doc_type', keys=keys, reduce=False)
for result in results:
yield result['id']
def copy_docs(source_db, target_db, doc_ids):
"""
copy docs from source_db to target_db
by doc_id
"""
with IterDB(target_db, new_edits=False) as iter_db:
for doc in iter_docs(source_db, doc_ids, attachments=True):
iter_db.save(doc)
if iter_db.errors_by_type:
logging.error('errors bulk saving in copy_docs: {!r}'
.format(iter_db.errors_by_type))
def delete_docs(target_db, doc_id_rev_pairs):
"""
delete docs from database by doc _id and _rev
"""
with IterDB(target_db, new_edits=False) as iter_db:
for doc_id, doc_rev in doc_id_rev_pairs:
iter_db.delete({'_id': doc_id, '_rev': doc_rev})
if iter_db.errors_by_type:
logging.error('errors bulk saving in delete_docs: {!r}'
.format(iter_db.errors_by_type))
class ContinuousReplicator(object):
def __init__(self, source_db, target_db, doc_types,
max_changes_before_commit=100,
max_time_before_commit=datetime.timedelta(seconds=5)):
self.source_db = source_db
self.target_db = target_db
self.doc_types = doc_types
self.max_changes_before_commit = max_changes_before_commit
self.max_time_before_commit = max_time_before_commit
self._ids_to_save = None
self._id_rev_pairs_to_delete = None
self._reset()
def _reset(self):
self._last_commit_time = datetime.datetime.utcnow()
self._uncommitted_changes_count = 0
self._ids_to_save = set()
self._id_rev_pairs_to_delete = set()
def replicate_change(self, change):
if change.deleted:
self._id_rev_pairs_to_delete.add((change.id, change.rev))
else:
self._ids_to_save.add(change.id)
self._uncommitted_changes_count += 1
def commit(self):
ids_to_save = filter_doc_ids_by_doc_type(
self.source_db, self._ids_to_save, self.doc_types)
copy_docs(self.source_db, self.target_db, ids_to_save)
delete_docs(self.target_db, self._id_rev_pairs_to_delete)
self._reset()
def _get_time_since_last_commit(self):
return datetime.datetime.utcnow() - self._last_commit_time
def should_commit(self):
return (self._uncommitted_changes_count > self.max_changes_before_commit or
self._get_time_since_last_commit > self.max_time_before_commit)
| import datetime
from corehq.util.couch import IterDB
from dimagi.utils.couch.database import iter_docs
from dimagi.utils.chunked import chunked
import logging
def filter_doc_ids_by_doc_type(db, doc_ids, doc_types):
for doc_ids_chunk in chunked(doc_ids, 100):
keys = [[doc_type, doc_id]
for doc_id in doc_ids_chunk
for doc_type in doc_types]
results = db.view('all_docs/by_doc_type', keys=keys, reduce=False)
for result in results:
yield result['id']
def copy_docs(source_db, target_db, doc_ids):
"""
copy docs from source_db to target_db
by doc_id
"""
with IterDB(target_db, new_edits=False) as iter_db:
for doc in iter_docs(source_db, doc_ids, attachments=True):
iter_db.save(doc)
if iter_db.errors_by_type:
logging.error('errors bulk saving in copy_docs: {!r}'
.format(iter_db.errors_by_type))
def delete_docs(target_db, doc_id_rev_pairs):
"""
delete docs from database by doc _id and _rev
"""
with IterDB(target_db, new_edits=False) as iter_db:
for doc_id, doc_rev in doc_id_rev_pairs:
iter_db.delete({'_id': doc_id, '_rev': doc_rev})
if iter_db.errors_by_type:
logging.error('errors bulk saving in delete_docs: {!r}'
.format(iter_db.errors_by_type))
class ContinuousReplicator(object):
def __init__(self, source_db, target_db, doc_types,
max_changes_before_commit=10000,
max_time_before_commit=datetime.timedelta(hours=1)):
self.source_db = source_db
self.target_db = target_db
self.doc_types = doc_types
self.max_changes_before_commit = max_changes_before_commit
self.max_time_before_commit = max_time_before_commit
self._ids_to_save = None
self._id_rev_pairs_to_delete = None
self._reset()
def _reset(self):
self._last_commit_time = datetime.datetime.utcnow()
self._uncommitted_changes_count = 0
self._ids_to_save = set()
self._id_rev_pairs_to_delete = set()
def replicate_change(self, change):
if change.deleted:
self._id_rev_pairs_to_delete.add((change.id, change.rev))
else:
self._ids_to_save.add(change.id)
self._uncommitted_changes_count += 1
def commit(self):
ids_to_save = filter_doc_ids_by_doc_type(
self.source_db, self._ids_to_save, self.doc_types)
copy_docs(self.source_db, self.target_db, ids_to_save)
delete_docs(self.target_db, self._id_rev_pairs_to_delete)
self._reset()
def _get_time_since_last_commit(self):
return datetime.datetime.utcnow() - self._last_commit_time
def should_commit(self):
return (self._uncommitted_changes_count < self.max_changes_before_commit or
self._get_time_since_last_commit < self.max_time_before_commit)
| Python | 0 |
e71161e8a42bdb4643995dec186d945c49d292eb | fix pylint issues | unit_tests/lib/events_utils.py | unit_tests/lib/events_utils.py | # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright (c) 2020 ScyllaDB
import time
import shutil
import tempfile
import unittest.mock
from contextlib import contextmanager
from sdcm.sct_events.setup import EVENTS_DEVICE_START_DELAY, start_events_device, stop_events_device
from sdcm.sct_events.events_device import start_events_main_device, get_events_main_device
from sdcm.sct_events.file_logger import get_events_logger
from sdcm.sct_events.events_processes import EventsProcessesRegistry
class EventsUtilsMixin:
temp_dir = None
events_processes_registry = None
events_processes_registry_patcher = None
events_main_device = None
@classmethod
def setup_events_processes(cls, events_device: bool, events_main_device: bool, registry_patcher: bool):
"""TestConfig own copy of Events Device machinery."""
cls.temp_dir = tempfile.mkdtemp()
cls.events_processes_registry = EventsProcessesRegistry(log_dir=cls.temp_dir)
if registry_patcher:
cls.events_processes_registry_patcher = \
unittest.mock.patch("sdcm.sct_events.base.SctEvent._events_processes_registry",
cls.events_processes_registry)
cls.events_processes_registry_patcher.start()
if events_device:
start_events_device(_registry=cls.events_processes_registry)
elif events_main_device:
start_events_main_device(_registry=cls.events_processes_registry)
time.sleep(EVENTS_DEVICE_START_DELAY)
cls.events_main_device = get_events_main_device(_registry=cls.events_processes_registry)
@classmethod
def teardown_events_processes(cls):
stop_events_device(_registry=cls.events_processes_registry)
if cls.events_processes_registry_patcher:
cls.events_processes_registry_patcher.stop()
shutil.rmtree(cls.temp_dir)
@contextmanager
def wait_for_n_events(self, subscriber, count: int, timeout: float = 1, # pylint: disable=no-self-use
last_event_processing_delay: float = 1):
last_event_n = subscriber.events_counter + count
end_time = time.perf_counter() + timeout
yield
while time.perf_counter() < end_time and subscriber.events_counter < last_event_n:
time.sleep(0.1)
assert last_event_n <= subscriber.events_counter, \
f"Subscriber {subscriber} didn't receive {count} events in {timeout} seconds"
# Give a chance to the subscriber to handle last event received.
time.sleep(last_event_processing_delay)
@classmethod
def get_events_logger(cls):
return get_events_logger(_registry=cls.events_processes_registry)
@classmethod
def get_raw_events_log(cls):
return get_events_main_device(_registry=cls.events_processes_registry).raw_events_log
| # This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright (c) 2020 ScyllaDB
import time
import shutil
import tempfile
import unittest.mock
from contextlib import contextmanager
from sdcm.sct_events.setup import EVENTS_DEVICE_START_DELAY, start_events_device, stop_events_device
from sdcm.sct_events.events_device import start_events_main_device, get_events_main_device
from sdcm.sct_events.file_logger import get_events_logger
from sdcm.sct_events.events_processes import EventsProcessesRegistry
class EventsUtilsMixin:
temp_dir = None
events_processes_registry = None
events_processes_registry_patcher = None
events_main_device = None
@classmethod
def setup_events_processes(cls, events_device: bool, events_main_device: bool, registry_patcher: bool):
"""TestConfig own copy of Events Device machinery."""
cls.temp_dir = tempfile.mkdtemp()
cls.events_processes_registry = EventsProcessesRegistry(log_dir=cls.temp_dir)
if registry_patcher:
cls.events_processes_registry_patcher = \
unittest.mock.patch("sdcm.sct_events.base.SctEvent._events_processes_registry",
cls.events_processes_registry)
cls.events_processes_registry_patcher.start()
if events_device:
start_events_device(_registry=cls.events_processes_registry)
elif events_main_device:
start_events_main_device(_registry=cls.events_processes_registry)
time.sleep(EVENTS_DEVICE_START_DELAY)
cls.events_main_device = get_events_main_device(_registry=cls.events_processes_registry)
@classmethod
def teardown_events_processes(cls):
stop_events_device(_registry=cls.events_processes_registry)
if cls.events_processes_registry_patcher:
cls.events_processes_registry_patcher.stop()
shutil.rmtree(cls.temp_dir)
@contextmanager
def wait_for_n_events(self, subscriber, count: int, timeout: float = 1, last_event_processing_delay: float = 1):
last_event_n = subscriber.events_counter + count
end_time = time.perf_counter() + timeout
yield
while time.perf_counter() < end_time and subscriber.events_counter < last_event_n:
time.sleep(0.1)
assert last_event_n <= subscriber.events_counter, \
f"Subscriber {subscriber} didn't receive {count} events in {timeout} seconds"
# Give a chance to the subscriber to handle last event received.
time.sleep(last_event_processing_delay)
@classmethod
def get_events_logger(cls):
return get_events_logger(_registry=cls.events_processes_registry)
@classmethod
def get_raw_events_log(cls):
return get_events_main_device(_registry=cls.events_processes_registry).raw_events_log
| Python | 0.000002 |
8c29892ab43faebc8ee18ece4beedabc13ca5a72 | Use optional return value for linenumber | lib/ansiblelint/rules/__init__.py | lib/ansiblelint/rules/__init__.py | """All internal ansible-lint rules."""
import re
from ansiblelint.skip_utils import get_rule_skips_from_line
from ansiblelint.skip_utils import append_skipped_rules
from ansiblelint.errors import Match
import ansiblelint.utils
class AnsibleLintRule(object):
def __repr__(self):
"""Return a AnsibleLintRule instance representation."""
return self.id + ": " + self.shortdesc
def verbose(self):
return self.id + ": " + self.shortdesc + "\n " + self.description
match = None
matchtask = None
matchplay = None
@staticmethod
def unjinja(text):
return re.sub(r"{{[^}]*}}", "JINJA_VAR", text)
def matchlines(self, file, text):
matches = []
if not self.match:
return matches
# arrays are 0-based, line numbers are 1-based
# so use prev_line_no as the counter
for (prev_line_no, line) in enumerate(text.split("\n")):
if line.lstrip().startswith('#'):
continue
rule_id_list = get_rule_skips_from_line(line)
if self.id in rule_id_list:
continue
result = self.match(file, line)
if not result:
continue
message = None
if isinstance(result, str):
message = result
matches.append(Match(prev_line_no + 1, line,
file['path'], self, message))
return matches
def matchtasks(self, file, text):
matches = []
if not self.matchtask:
return matches
if file['type'] == 'meta':
return matches
yaml = ansiblelint.utils.parse_yaml_linenumbers(text, file['path'])
if not yaml:
return matches
yaml = append_skipped_rules(yaml, text, file['type'])
for task in ansiblelint.utils.get_normalized_tasks(yaml, file):
if self.id in task.get('skipped_rules', ()):
continue
if 'action' not in task:
continue
result = self.matchtask(file, task)
if not result:
continue
message = None
if isinstance(result, str):
message = result
task_msg = "Task/Handler: " + ansiblelint.utils.task_to_str(task)
matches.append(Match(task[ansiblelint.utils.LINE_NUMBER_KEY], task_msg,
file['path'], self, message))
return matches
def _unpack_result(self, play, result):
linenumber = play[ansiblelint.utils.LINE_NUMBER_KEY]
if len(result) == 2:
section, message = result
else:
section, linenumber, message = result
return section, linenumber, message
@staticmethod
def _matchplay_linenumber(play, optional_linenumber):
try:
linenumber, = optional_linenumber
except:
linenumber = play[ansiblelint.utils.LINE_NUMBER_KEY]
return linenumber
def matchyaml(self, file, text):
matches = []
if not self.matchplay:
return matches
yaml = ansiblelint.utils.parse_yaml_linenumbers(text, file['path'])
if not yaml:
return matches
if isinstance(yaml, dict):
yaml = [yaml]
yaml = ansiblelint.skip_utils.append_skipped_rules(yaml, text, file['type'])
for play in yaml:
if self.id in play.get('skipped_rules', ()):
continue
result = self.matchplay(file, play)
if not result:
continue
if isinstance(result, tuple):
result = [result]
if not isinstance(result, list):
raise TypeError("{} is not a list".format(result))
for section, message, *optional_linenumber in result:
linenumber = self._matchplay_linenumber(play, optional_linenumber)
matches.append(Match(linenumber,
section, file['path'], self, message))
return matches
| """All internal ansible-lint rules."""
import re
from ansiblelint.skip_utils import get_rule_skips_from_line
from ansiblelint.skip_utils import append_skipped_rules
from ansiblelint.errors import Match
import ansiblelint.utils
class AnsibleLintRule(object):
def __repr__(self):
"""Return a AnsibleLintRule instance representation."""
return self.id + ": " + self.shortdesc
def verbose(self):
return self.id + ": " + self.shortdesc + "\n " + self.description
match = None
matchtask = None
matchplay = None
@staticmethod
def unjinja(text):
return re.sub(r"{{[^}]*}}", "JINJA_VAR", text)
def matchlines(self, file, text):
matches = []
if not self.match:
return matches
# arrays are 0-based, line numbers are 1-based
# so use prev_line_no as the counter
for (prev_line_no, line) in enumerate(text.split("\n")):
if line.lstrip().startswith('#'):
continue
rule_id_list = get_rule_skips_from_line(line)
if self.id in rule_id_list:
continue
result = self.match(file, line)
if not result:
continue
message = None
if isinstance(result, str):
message = result
matches.append(Match(prev_line_no + 1, line,
file['path'], self, message))
return matches
def matchtasks(self, file, text):
matches = []
if not self.matchtask:
return matches
if file['type'] == 'meta':
return matches
yaml = ansiblelint.utils.parse_yaml_linenumbers(text, file['path'])
if not yaml:
return matches
yaml = append_skipped_rules(yaml, text, file['type'])
for task in ansiblelint.utils.get_normalized_tasks(yaml, file):
if self.id in task.get('skipped_rules', ()):
continue
if 'action' not in task:
continue
result = self.matchtask(file, task)
if not result:
continue
message = None
if isinstance(result, str):
message = result
task_msg = "Task/Handler: " + ansiblelint.utils.task_to_str(task)
matches.append(Match(task[ansiblelint.utils.LINE_NUMBER_KEY], task_msg,
file['path'], self, message))
return matches
def _unpack_result(self, play, result):
linenumber = play[ansiblelint.utils.LINE_NUMBER_KEY]
if len(result) == 2:
section, message = result
else:
section, linenumber, message = result
return section, linenumber, message
def matchyaml(self, file, text):
matches = []
if not self.matchplay:
return matches
yaml = ansiblelint.utils.parse_yaml_linenumbers(text, file['path'])
if not yaml:
return matches
if isinstance(yaml, dict):
yaml = [yaml]
yaml = ansiblelint.skip_utils.append_skipped_rules(yaml, text, file['type'])
for play in yaml:
if self.id in play.get('skipped_rules', ()):
continue
result = self.matchplay(file, play)
if not result:
continue
if isinstance(result, tuple):
result = [result]
if not isinstance(result, list):
raise TypeError("{} is not a list".format(result))
for match in result:
section, linenumber, message = self._unpack_result(play, match)
matches.append(Match(linenumber,
section, file['path'], self, message))
return matches
| Python | 0.000003 |
7ac27aa4d365d02d998c3f4c82bc740791a1b515 | Update script.py | HexChat/script.py | HexChat/script.py | from __future__ import print_function
import os
import sys
if sys.version_info[0] < 3:
import urllib as request
else:
import urllib.request as request
import hexchat
__module_name__ = 'Script'
__module_author__ = 'TingPing'
__module_version__ = '3'
__module_description__ = 'Download scripts'
script_help = 'Script: Valid commands are:\n \
INSTALL script\n \
EDIT script\n \
UPDATE script\n \
REMOVE script'
addon_dir = os.path.join(hexchat.get_info('configdir'), 'addons')
# Store as preference?
addon_types = ('py', 'pl', 'lua', 'js') # tcl has no way to unload a single script?
addon_sites = ('http://raw.github.com/TingPing/plugins/master/HexChat/',
'http://raw.github.com/Arnavion/random/master/hexchat/',
'http://raw.github.com/Farow/hexchat-scripts/master/',
'http://orvp.net/xchat/')
def expand_script(script):
return os.path.join(addon_dir, script)
def download(script):
if script.partition('.')[2] not in addon_types:
print('Script: Not a valid script file type.')
return False
for site in addon_sites:
if request.urlopen(site + script).getcode() == 200:
print('Script: Downloading {}...'.format(script))
request.urlretrieve(site + script, expand_script(script))
return True
print('Script: Could not find {}'.format(script))
def script_cb(word, word_eol, userdata):
if len(word) > 2:
cmd = word[1].lower()
arg = word[2]
else:
hexchat.command('help script')
return hexchat.EAT_ALL
if cmd == 'install':
if os.path.exists(expand_script(arg)):
print('Script: {} is already installed.'.format(arg))
return hexchat.EAT_ALL
if download(arg):
hexchat.command('timer .5 load ' + expand_script(arg))
elif cmd == 'update':
if os.path.exists(expand_script(arg)) and download(arg):
hexchat.command('timer .5 unload ' + arg)
hexchat.command('timer 1 load ' + arg)
elif cmd == 'edit':
hexchat.command('url ' + expand_script(arg))
elif cmd == 'remove':
if arg == 'script.py':
print('Script: I refuse.')
return hexchat.EAT_ALL
if os.path.exists(expand_script(arg)):
hexchat.command('unload ' + expand_script(arg))
os.remove(expand_script(arg))
else:
print('Script: {} is not installed.'.format(arg))
else:
hexchat.command('help script')
return hexchat.EAT_ALL
def unload_callback(userdata):
print(__module_name__, 'version', __module_version__, 'unloaded.')
hexchat.hook_command('script', script_cb, help=script_help)
hexchat.hook_unload(unload_callback)
print(__module_name__, 'version', __module_version__, 'loaded.')
| from __future__ import print_function
import os
import sys
if sys.version_info[0] < 3:
import urllib as request
else:
import urllib.request as request
import hexchat
__module_name__ = 'Script'
__module_author__ = 'TingPing'
__module_version__ = '3'
__module_description__ = 'Download scripts'
script_help = 'Script: Valid commands are:\n \
INSTALL script\n \
EDIT script\n \
UPDATE script\n \
REMOVE script'
addon_dir = os.path.join(hexchat.get_info('configdir'), 'addons')
# Store as preference?
addon_types = ('py', 'pl', 'lua', 'js') # tcl has no way to unload a single script?
addon_sites = ('http://raw.github.com/TingPing/plugins/master/HexChat/',
'http://raw.github.com/Arnavion/random/master/hexchat/',
'http://orvp.net/xchat/')
def expand_script(script):
return os.path.join(addon_dir, script)
def download(script):
if script.partition('.')[2] not in addon_types:
print('Script: Not a valid script file type.')
return False
for site in addon_sites:
if request.urlopen(site + script).getcode() == 200:
print('Script: Downloading {}...'.format(script))
request.urlretrieve(site + script, expand_script(script))
return True
print('Script: Could not find {}'.format(script))
def script_cb(word, word_eol, userdata):
if len(word) > 2:
cmd = word[1].lower()
arg = word[2]
else:
hexchat.command('help script')
return hexchat.EAT_ALL
if cmd == 'install':
if os.path.exists(expand_script(arg)):
print('Script: {} is already installed.'.format(arg))
return hexchat.EAT_ALL
if download(arg):
hexchat.command('timer .5 load ' + expand_script(arg))
elif cmd == 'update':
if os.path.exists(expand_script(arg)) and download(arg):
hexchat.command('timer .5 unload ' + arg)
hexchat.command('timer 1 load ' + arg)
elif cmd == 'edit':
hexchat.command('url ' + expand_script(arg))
elif cmd == 'remove':
if arg == 'script.py':
print('Script: I refuse.')
return hexchat.EAT_ALL
if os.path.exists(expand_script(arg)):
hexchat.command('unload ' + expand_script(arg))
os.remove(expand_script(arg))
else:
print('Script: {} is not installed.'.format(arg))
else:
hexchat.command('help script')
return hexchat.EAT_ALL
def unload_callback(userdata):
print(__module_name__, 'version', __module_version__, 'unloaded.')
hexchat.hook_command('script', script_cb, help=script_help)
hexchat.hook_unload(unload_callback)
print(__module_name__, 'version', __module_version__, 'loaded.')
| Python | 0.000001 |
4a10ca9895b8cfd9996e9563494dfc8d5008a263 | Support reading a program from a file | bfinterpreter.py | bfinterpreter.py | #!/usr/bin/python3
class Tape:
def __init__(self):
self.cells = [0]
self.pointer = 0
def inc_val(self):
self.cells[self.pointer] += 1
def dec_val(self):
self.cells[self.pointer] -= 1
def move_right(self):
self.pointer += 1
if self.pointer == len(self.cells):
self.cells.append(0)
def move_left(self):
if self.pointer == 0:
raise Error("Cannot move past the start of the tape")
self.pointer -= 1
def get_val(self):
return self.cells[self.pointer]
def print_val(self):
print(chr(self.cells[self.pointer]), end="")
class Brainfuck:
def __init__(self, tape, program, allow_nested_loops = True, debug = False):
self.tape = tape
self.program = program
self.pointer = 0
self.allow_nested_loops = allow_nested_loops
self.debug = debug
self.basic_ops = {
"+" : self.tape.inc_val,
"-" : self.tape.dec_val,
">" : self.tape.move_right,
"<" : self.tape.move_left,
"." : self.tape.print_val
}
def end_loop(self):
nested_loop_count = 0
while True:
self.pointer += 1
if self.program[self.pointer] == "]":
if nested_loop_count == 0:
break
else:
nested_loop_count -= 1
elif self.program[self.pointer] == "[":
nested_loop_count += 1
def run_program(self):
if self.debug == True:
import time
loop_pointers = []
program_length = len(self.program)
while self.pointer < program_length:
char = self.program[self.pointer]
if char in self.basic_ops.keys():
self.basic_ops[char]()
self.pointer += 1
elif char == "[":
if self.tape.get_val() == 0:
if self.allow_nested_loops == True:
self.end_loop()
else:
self.pointer = self.program.index("]", self.pointer)
else:
loop_pointers.append(self.pointer)
self.pointer += 1
elif char == "]":
loop_start = loop_pointers.pop()
if self.tape.get_val() == 0:
self.pointer += 1
else:
self.pointer = loop_start
else:
self.pointer += 1
if self.debug == True and self.pointer < program_length:
print(self.pointer, "\t", self.program[self.pointer], "\t", self.tape.pointer, "\t", self.tape.get_val())
time.sleep(0.1)
if __name__ == "__main__":
import sys
def read_program_file(filename):
with open(filename, encoding="utf-8") as program_file:
return program_file.read()
def parse_bool(string):
if string == "true" or string == "y" or string == "yes":
return True
elif string == "false" or string == "n" or string == "no":
return False
else:
return None
program = ""
allow_nested_loops = True
debug = False
args = sys.argv[1:]
for x, arg in enumerate(args):
if arg == "--program":
program = args[x + 1]
elif arg == "--program-file":
program = read_program_file(args[x + 1])
elif arg == "--nested-loops":
allow_nested_loops = parse_bool(args[x + 1])
elif arg == "--debug":
debug = parse_bool(args[x + 1])
tape = Tape()
brainfuck = Brainfuck(tape, program, allow_nested_loops, debug)
brainfuck.run_program()
| #!/usr/bin/python3
class Tape:
def __init__(self):
self.cells = [0]
self.pointer = 0
def inc_val(self):
self.cells[self.pointer] += 1
def dec_val(self):
self.cells[self.pointer] -= 1
def move_right(self):
self.pointer += 1
if self.pointer == len(self.cells):
self.cells.append(0)
def move_left(self):
if self.pointer == 0:
raise Error("Cannot move past the start of the tape")
self.pointer -= 1
def get_val(self):
return self.cells[self.pointer]
def print_val(self):
print(chr(self.cells[self.pointer]), end="")
class Brainfuck:
def __init__(self, tape, program, allow_nested_loops = True, debug = False):
self.tape = tape
self.program = program
self.pointer = 0
self.allow_nested_loops = allow_nested_loops
self.debug = debug
self.basic_ops = {
"+" : self.tape.inc_val,
"-" : self.tape.dec_val,
">" : self.tape.move_right,
"<" : self.tape.move_left,
"." : self.tape.print_val
}
def end_loop(self):
nested_loop_count = 0
while True:
self.pointer += 1
if self.program[self.pointer] == "]":
if nested_loop_count == 0:
break
else:
nested_loop_count -= 1
elif self.program[self.pointer] == "[":
nested_loop_count += 1
def run_program(self):
if self.debug == True:
import time
loop_pointers = []
program_length = len(self.program)
while self.pointer < program_length:
char = self.program[self.pointer]
if char in self.basic_ops.keys():
self.basic_ops[char]()
self.pointer += 1
elif char == "[":
if self.tape.get_val() == 0:
if self.allow_nested_loops == True:
self.end_loop()
else:
self.pointer = self.program.index("]", self.pointer)
else:
loop_pointers.append(self.pointer)
self.pointer += 1
elif char == "]":
loop_start = loop_pointers.pop()
if self.tape.get_val() == 0:
self.pointer += 1
else:
self.pointer = loop_start
else:
self.pointer += 1
if self.debug == True and self.pointer < program_length:
print(self.pointer, "\t", self.program[self.pointer], "\t", self.tape.pointer, "\t", self.tape.get_val())
time.sleep(0.1)
if __name__ == "__main__":
import sys
def parse_bool(string):
if string == "true" or string == "y" or string == "yes":
return True
elif string == "false" or string == "n" or string == "no":
return False
else:
return None
program = ""
allow_nested_loops = True
debug = False
args = sys.argv[1:]
for x, arg in enumerate(args):
if arg == "--program":
program = args[x + 1]
elif arg == "--nested-loops":
allow_nested_loops = parse_bool(args[x + 1])
elif arg == "--debug":
debug = parse_bool(args[x + 1])
tape = Tape()
brainfuck = Brainfuck(tape, program, allow_nested_loops, debug)
brainfuck.run_program()
| Python | 0 |
6a899eeb5be7a8b49b45ff0fc0f490a5cad151bd | Add SourceGroup model | entity_event/models.py | entity_event/models.py | from django.db import models
class Medium(models.Model):
name = models.CharField(max_length=64, unique=True)
display_name = models.CharField(max_length=64)
description = models.TextField()
def __unicode__(self):
return self.display_name
class Source(models.Model):
name = models.CharField(max_length=64, unique=True)
display_name = models.CharField(max_length=64)
description = models.TextField()
group = models.ForeignKey('SourceGroup')
def __unicode__(self):
return self.display_name
class SourceGroup(models.Model):
name = models.CharField(max_length=64, unique=True)
display_name = models.CharField(max_length=64)
description = models.TextField()
def __unicode__(self):
return self.display_name
class Subscription(models.Model):
medium = models.ForeignKey('Medium')
source = models.ForeignKey('Source')
entity = models.ForeignKey(Entity)
subentity_type = models.ForeignKey(ContentType, null=True)
def __unicode__(self):
s = '{entity} to {source} by {medium}'
entity = self.entity.__unicode__()
source = self.source.__unicode__()
medium = self.medium.__unicode__()
return s.format(entity=entity, source=source, medium=medium)
class Unsubscription(models.Model):
entity = models.ForeignKey(Entity)
medium = models.ForeignKey('Medium')
source = models.ForeignKey('Source')
def __unicode__(self):
s = '{entity} from {source} by {medium}'
entity = self.entity.__unicode__()
source = self.source.__unicode__()
medium = self.medium.__unicode__()
return s.format(entity=entity, source=source, medium=medium)
class Event(models.Model):
entity = models.ForeignKey(Entity)
subentity_type = models.ForeignKey(ContentType, null=True)
source = models.ForeignKey(Source)
context = jsonfield.JSONField()
time = models.DateTimeField(auto_add_now=True)
time_expires = models.DateTimeField(null=True, default=None)
uuid = models.CharField(max_length=128, unique=True)
def __unicode__(self):
s = '{source} event at {time}'
source = self.source.__unicode__()
time = self.time.strftime('%Y-%m-%d::%H:%M:%S')
return s.format(source=source, time=time)
class EventSeen(models.Model):
event = models.ForeignKey('Event')
medium = models.ForeignKey(Medium)
time_seen = models.DateTimeField(null=True, default=None)
def __unicode__(self):
s = 'seen by {medium} at {time}'
medium = self.medium.__unicode__()
time = self.time_seen.strftime('%Y-%m-%d::%H:%M:%S')
return s.format(medium=medium, time=time)
| from django.db import models
class Medium(models.Model):
name = models.CharField(max_length=64, unique=True)
display_name = models.CharField(max_length=64)
description = models.TextField()
def __unicode__(self):
return self.display_name
class Source(models.Model):
name = models.CharField(max_length=64, unique=True)
display_name = models.CharField(max_length=64)
description = models.TextField()
def __unicode__(self):
return self.display_name
class Subscription(models.Model):
medium = models.ForeignKey('Medium')
source = models.ForeignKey('Source')
entity = models.ForeignKey(Entity)
subentity_type = models.ForeignKey(ContentType, null=True)
def __unicode__(self):
s = '{entity} to {source} by {medium}'
entity = self.entity.__unicode__()
source = self.source.__unicode__()
medium = self.medium.__unicode__()
return s.format(entity=entity, source=source, medium=medium)
class Unsubscription(models.Model):
entity = models.ForeignKey(Entity)
medium = models.ForeignKey('Medium')
source = models.ForeignKey('Source')
def __unicode__(self):
s = '{entity} from {source} by {medium}'
entity = self.entity.__unicode__()
source = self.source.__unicode__()
medium = self.medium.__unicode__()
return s.format(entity=entity, source=source, medium=medium)
class Event(models.Model):
entity = models.ForeignKey(Entity)
subentity_type = models.ForeignKey(ContentType, null=True)
source = models.ForeignKey(Source)
context = jsonfield.JSONField()
time = models.DateTimeField(auto_add_now=True)
time_expires = models.DateTimeField(null=True, default=None)
uuid = models.CharField(max_length=128, unique=True)
def __unicode__(self):
s = '{source} event at {time}'
source = self.source.__unicode__()
time = self.time.strftime('%Y-%m-%d::%H:%M:%S')
return s.format(source=source, time=time)
class EventSeen(models.Model):
event = models.ForeignKey('Event')
medium = models.ForeignKey(Medium)
time_seen = models.DateTimeField(null=True, default=None)
def __unicode__(self):
s = 'seen by {medium} at {time}'
medium = self.medium.__unicode__()
time = self.time_seen.strftime('%Y-%m-%d::%H:%M:%S')
return s.format(medium=medium, time=time)
| Python | 0 |
bcf4c6be490b96230877d9388c27783abdbc487e | Fix aibrake | lib/ansible/modules/extras/monitoring/airbrake_deployment.py | lib/ansible/modules/extras/monitoring/airbrake_deployment.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2013 Bruce Pennypacker <bruce@pennypacker.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: airbrake_deployment
version_added: "1.2"
author: "Bruce Pennypacker (@bpennypacker)"
short_description: Notify airbrake about app deployments
description:
- Notify airbrake about app deployments (see http://help.airbrake.io/kb/api-2/deploy-tracking)
options:
token:
description:
- API token.
required: true
environment:
description:
- The airbrake environment name, typically 'production', 'staging', etc.
required: true
user:
description:
- The username of the person doing the deployment
required: false
repo:
description:
- URL of the project repository
required: false
revision:
description:
- A hash, number, tag, or other identifier showing what revision was deployed
required: false
url:
description:
- Optional URL to submit the notification to. Use to send notifications to Airbrake-compliant tools like Errbit.
required: false
default: "https://airbrake.io/deploys.txt"
version_added: "1.5"
validate_certs:
description:
- If C(no), SSL certificates for the target url will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
requirements: []
'''
EXAMPLES = '''
- airbrake_deployment: token=AAAAAA
environment='staging'
user='ansible'
revision=4.2
'''
import urllib
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
token=dict(required=True, no_log=True),
environment=dict(required=True),
user=dict(required=False),
repo=dict(required=False),
revision=dict(required=False),
url=dict(required=False, default='https://api.airbrake.io/deploys.txt'),
validate_certs=dict(default='yes', type='bool'),
),
supports_check_mode=True
)
# build list of params
params = {}
if module.params["environment"]:
params["deploy[rails_env]"] = module.params["environment"]
if module.params["user"]:
params["deploy[local_username]"] = module.params["user"]
if module.params["repo"]:
params["deploy[scm_repository]"] = module.params["repo"]
if module.params["revision"]:
params["deploy[scm_revision]"] = module.params["revision"]
params["api_key"] = module.params["token"]
url = module.params.get('url')
# If we're in check mode, just exit pretending like we succeeded
if module.check_mode:
module.exit_json(changed=True)
# Send the data to airbrake
data = urllib.urlencode(params)
response, info = fetch_url(module, url, data=data)
if info['status'] == 200:
module.exit_json(changed=True)
else:
module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url))
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
| #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2013 Bruce Pennypacker <bruce@pennypacker.org>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: airbrake_deployment
version_added: "1.2"
author: "Bruce Pennypacker (@bpennypacker)"
short_description: Notify airbrake about app deployments
description:
- Notify airbrake about app deployments (see http://help.airbrake.io/kb/api-2/deploy-tracking)
options:
token:
description:
- API token.
required: true
environment:
description:
- The airbrake environment name, typically 'production', 'staging', etc.
required: true
user:
description:
- The username of the person doing the deployment
required: false
repo:
description:
- URL of the project repository
required: false
revision:
description:
- A hash, number, tag, or other identifier showing what revision was deployed
required: false
url:
description:
- Optional URL to submit the notification to. Use to send notifications to Airbrake-compliant tools like Errbit.
required: false
default: "https://airbrake.io/deploys.txt"
version_added: "1.5"
validate_certs:
description:
- If C(no), SSL certificates for the target url will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
requirements: []
'''
EXAMPLES = '''
- airbrake_deployment: token=AAAAAA
environment='staging'
user='ansible'
revision=4.2
'''
import urllib
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
token=dict(required=True),
environment=dict(required=True),
user=dict(required=False),
repo=dict(required=False),
revision=dict(required=False),
url=dict(required=False, default='https://api.airbrake.io/deploys.txt'),
validate_certs=dict(default='yes', type='bool'),
),
supports_check_mode=True
)
# build list of params
params = {}
if module.params["environment"]:
params["deploy[rails_env]"] = module.params["environment"]
if module.params["user"]:
params["deploy[local_username]"] = module.params["user"]
if module.params["repo"]:
params["deploy[scm_repository]"] = module.params["repo"]
if module.params["revision"]:
params["deploy[scm_revision]"] = module.params["revision"]
params["api_key"] = module.params["token"]
url = module.params.get('url')
# If we're in check mode, just exit pretending like we succeeded
if module.check_mode:
module.exit_json(changed=True)
# Send the data to airbrake
data = urllib.urlencode(params)
response, info = fetch_url(module, url, data=data)
if info['status'] == 200:
module.exit_json(changed=True)
else:
module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url))
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
| Python | 0.00005 |
8cbce567c8dc6c78a48972f7918897056a99a854 | fix to_3d | gdsfactory/export/to_3d.py | gdsfactory/export/to_3d.py | from typing import Optional, Tuple
import shapely
from gdsfactory.component import Component
from gdsfactory.layers import LayerColors
from gdsfactory.pdk import get_layer_colors, get_layer_stack
from gdsfactory.tech import LayerStack
from gdsfactory.types import Layer
def to_3d(
component: Component,
layer_colors: Optional[LayerColors] = None,
layer_stack: Optional[LayerStack] = None,
exclude_layers: Optional[Tuple[Layer, ...]] = None,
):
"""Return Component 3D trimesh Scene.
Args:
component: to exture in 3D.
layer_colors: layer colors from Klayout Layer Properties file.
Defaults to active PDK.layer_colors.
layer_stack: contains thickness and zmin for each layer.
Defaults to active PDK.layer_stack.
exclude_layers: layers to exclude.
"""
try:
import matplotlib.colors
from trimesh.creation import extrude_polygon
from trimesh.scene import Scene
except ImportError as e:
print("you need to `pip install trimesh`")
raise e
layer_colors = layer_colors or get_layer_colors()
layer_stack = layer_stack or get_layer_stack()
scene = Scene()
layer_to_thickness = layer_stack.get_layer_to_thickness()
layer_to_zmin = layer_stack.get_layer_to_zmin()
exclude_layers = exclude_layers or ()
has_polygons = False
for layer, polygons in component.get_polygons(by_spec=True, as_array=False).items():
if (
layer not in exclude_layers
and layer in layer_to_thickness
and layer in layer_to_zmin
):
height = layer_to_thickness[layer]
zmin = layer_to_zmin[layer]
layer_color = layer_colors.get_from_tuple(layer)
color_hex = layer_color.color
color_rgb = matplotlib.colors.to_rgb(color_hex)
for polygon in polygons:
p = shapely.geometry.Polygon(polygon.points)
mesh = extrude_polygon(p, height=height)
mesh.apply_translation((0, 0, zmin))
mesh.visual.face_colors = (*color_rgb, 0.5)
scene.add_geometry(mesh)
has_polygons = True
if not has_polygons:
raise ValueError(
f"{component.name!r} does not have polygons defined in the "
"layer_stack or layer_colors for the active Pdk {get_active_pdk().name!r}"
)
return scene
if __name__ == "__main__":
import gdsfactory as gf
c = gf.components.taper_strip_to_ridge()
# c = gf.components.straight()
s = to_3d(c)
s.show()
| from typing import Optional, Tuple
import shapely
from gdsfactory.component import Component
from gdsfactory.layers import LayerColors
from gdsfactory.pdk import get_layer_colors, get_layer_stack
from gdsfactory.tech import LayerStack
from gdsfactory.types import Layer
def to_3d(
component: Component,
layer_colors: Optional[LayerColors] = None,
layer_stack: Optional[LayerStack] = None,
exclude_layers: Optional[Tuple[Layer, ...]] = None,
):
"""Return Component 3D trimesh Scene.
Args:
component: to exture in 3D.
layer_colors: layer colors from Klayout Layer Properties file.
Defaults to active PDK.layer_colors.
layer_stack: contains thickness and zmin for each layer.
Defaults to active PDK.layer_stack.
exclude_layers: layers to exclude.
"""
try:
import matplotlib.colors
from trimesh.creation import extrude_polygon
from trimesh.scene import Scene
except ImportError as e:
print("you need to `pip install trimesh`")
raise e
layer_colors = layer_colors or get_layer_colors()
layer_stack = layer_stack or get_layer_stack()
scene = Scene()
layer_to_thickness = layer_stack.get_layer_to_thickness()
layer_to_zmin = layer_stack.get_layer_to_zmin()
exclude_layers = exclude_layers or ()
has_polygons = False
for layer, polygons in component.get_polygons(by_spec=True).items():
if (
layer not in exclude_layers
and layer in layer_to_thickness
and layer in layer_to_zmin
):
height = layer_to_thickness[layer]
zmin = layer_to_zmin[layer]
layer_color = layer_colors.get_from_tuple(layer)
color_hex = layer_color.color
color_rgb = matplotlib.colors.to_rgb(color_hex)
for polygon in polygons:
p = shapely.geometry.Polygon(polygon.points)
mesh = extrude_polygon(p, height=height)
mesh.apply_translation((0, 0, zmin))
mesh.visual.face_colors = (*color_rgb, 0.5)
scene.add_geometry(mesh)
has_polygons = True
if not has_polygons:
raise ValueError(
f"{component.name!r} does not have polygons defined in the "
"layer_stack or layer_colors for the active Pdk {get_active_pdk().name!r}"
)
return scene
if __name__ == "__main__":
import gdsfactory as gf
c = gf.components.taper_strip_to_ridge()
# c = gf.components.straight()
s = to_3d(c)
s.show()
| Python | 0.00156 |
9621de820ccbdd12a42bb4e4ff2f228ed245ee2e | Set velocity to maximum Maxon EC45 velocity. | epos_control_server.py | epos_control_server.py | #!/usr/bin/python
import logging.config
import signal
import threading
from flask import Flask, send_from_directory
from flask.ext.socketio import SocketIO
from epos_lib_wrapper import EposLibWrapper
from position_fetcher import PositionFetcher
POSITION_MAX_DELTA_TO_END = 0
EPOS_RELATIVE_POSITION = 20000000
EPOS_VELOCITY = 4840
# Instanciate Flask (Static files and REST API)
app = Flask(__name__)
# Instanciate SocketIO (Websockets, used for events) on top of it
socketio = SocketIO(app)
# EPOS2 control library
epos = None
# Position fetcher
position_fetch = None
# Watch position
watch_position = True
# Target position
target_position = 512
@app.route('/')
def index():
return send_from_directory('static', 'index.html')
@app.route('/js/<path:path>')
def static_js_proxy(path):
return send_from_directory('static/js/', path)
@socketio.on('moveTo', namespace='/servo')
def on_move_to(position):
global target_position
logging.error("Got move to %s", position)
target_position = position
@socketio.on('stop', namespace='/servo')
def on_stop():
stop()
def truncate_position(input_position):
try:
ret = int(input_position)
ret = min(ret, 1023)
ret = max(ret, 0)
return ret
except Exception:
return 512
def move_to(target_position):
position = truncate_position(target_position)
current_position, is_end = position_fetch.get_current_position()
if position < current_position and not (is_end and abs(position - current_position) < POSITION_MAX_DELTA_TO_END):
move_to_low()
elif position > current_position and not (is_end and abs(position - current_position) < POSITION_MAX_DELTA_TO_END):
move_to_high()
else:
logging.info("You asked me to move to %s, but position is %s, is_end: %s",
position, current_position, is_end)
stop()
def move_to_low():
logging.debug("Moving to lower")
epos.moveToPositionWithVelocity(-EPOS_RELATIVE_POSITION, EPOS_VELOCITY)
def move_to_high():
logging.debug("Moving to higher")
epos.moveToPositionWithVelocity(EPOS_RELATIVE_POSITION, EPOS_VELOCITY)
def stop():
logging.info("Stopping")
epos.stop()
def init_epos():
global epos
# Instanciate EPOS2 control library
epos = EposLibWrapper()
epos.openDevice()
def init_position_fetcher():
global position_fetch
position_fetch = PositionFetcher()
position_fetch.start()
def position_watcher():
while watch_position:
move_to(target_position)
logging.error("Position watcher stopped")
def sig_term_handler(signum, frame):
raise KeyboardInterrupt('Signal %i receivied!' % signum)
def main():
global watch_position
# Initialize logger
logging.config.fileConfig('log.ini')
try:
# Set signal handler for Shutdown
signal.signal(signal.SIGTERM, sig_term_handler)
init_position_fetcher()
init_epos()
watcher_thread = threading.Thread(target=position_watcher)
watcher_thread.start()
# Blocking! - Start Flask server
socketio.run(app, host='0.0.0.0')
except KeyboardInterrupt:
pass
finally:
if position_fetch:
position_fetch.stop()
watch_position = False
if __name__ == '__main__':
main()
| #!/usr/bin/python
import logging.config
import signal
import threading
from flask import Flask, send_from_directory
from flask.ext.socketio import SocketIO
from epos_lib_wrapper import EposLibWrapper
from position_fetcher import PositionFetcher
POSITION_MAX_DELTA_TO_END = 0
EPOS_RELATIVE_POSITION = 20000000
EPOS_VELOCITY = 3000
# Instanciate Flask (Static files and REST API)
app = Flask(__name__)
# Instanciate SocketIO (Websockets, used for events) on top of it
socketio = SocketIO(app)
# EPOS2 control library
epos = None
# Position fetcher
position_fetch = None
# Watch position
watch_position = True
# Target position
target_position = 512
@app.route('/')
def index():
return send_from_directory('static', 'index.html')
@app.route('/js/<path:path>')
def static_js_proxy(path):
return send_from_directory('static/js/', path)
@socketio.on('moveTo', namespace='/servo')
def on_move_to(position):
global target_position
logging.error("Got move to %s", position)
target_position = position
@socketio.on('stop', namespace='/servo')
def on_stop():
stop()
def truncate_position(input_position):
try:
ret = int(input_position)
ret = min(ret, 1023)
ret = max(ret, 0)
return ret
except Exception:
return 512
def move_to(target_position):
position = truncate_position(target_position)
current_position, is_end = position_fetch.get_current_position()
if position < current_position and not (is_end and abs(position - current_position) < POSITION_MAX_DELTA_TO_END):
move_to_low()
elif position > current_position and not (is_end and abs(position - current_position) < POSITION_MAX_DELTA_TO_END):
move_to_high()
else:
logging.info("You asked me to move to %s, but position is %s, is_end: %s",
position, current_position, is_end)
stop()
def move_to_low():
logging.debug("Moving to lower")
epos.moveToPositionWithVelocity(-EPOS_RELATIVE_POSITION, EPOS_VELOCITY)
def move_to_high():
logging.debug("Moving to higher")
epos.moveToPositionWithVelocity(EPOS_RELATIVE_POSITION, EPOS_VELOCITY)
def stop():
logging.info("Stopping")
epos.stop()
def init_epos():
global epos
# Instanciate EPOS2 control library
epos = EposLibWrapper()
epos.openDevice()
def init_position_fetcher():
global position_fetch
position_fetch = PositionFetcher()
position_fetch.start()
def position_watcher():
while watch_position:
move_to(target_position)
logging.error("Position watcher stopped")
def sig_term_handler(signum, frame):
raise KeyboardInterrupt('Signal %i receivied!' % signum)
def main():
global watch_position
# Initialize logger
logging.config.fileConfig('log.ini')
try:
# Set signal handler for Shutdown
signal.signal(signal.SIGTERM, sig_term_handler)
init_position_fetcher()
init_epos()
watcher_thread = threading.Thread(target=position_watcher)
watcher_thread.start()
# Blocking! - Start Flask server
socketio.run(app, host='0.0.0.0')
except KeyboardInterrupt:
pass
finally:
if position_fetch:
position_fetch.stop()
watch_position = False
if __name__ == '__main__':
main()
| Python | 0 |
fe4c426fe6384b570bcc2a105bdf04f2f412a31f | Use Query.executQuery for filterCasts.py | InformationScripting/scripts/filterCasts.py | InformationScripting/scripts/filterCasts.py | # filterCasts
classUses = Query.executeQuery('ast -t=CastExpression|attribute -at=castType -input|uses -input -t=Class', [])
def hasTypeIdMethod( cl ):
for method in cl.methods:
if method.name == "typeIdStatic":
return True
return False
for tuple in classUses[0].tuples("uses"):
if hasTypeIdMethod(tuple.used):
values = [("ast", tuple.user)]
Query.result.add(Tuple(values))
Query.result = Query.toParent(["-t=CastExpression", "-addAs=node"], [Query.result])[0]
| # filterCasts
casts = Query.ast(["-t=CastExpression"] + Query.args, [])
castTypeAttributes = Query.attribute(["-at=castType", "-s=of"], casts)
classUses = Query.uses(["-s=of", "-t=Class"], castTypeAttributes)
def hasTypeIdMethod( cl ):
for method in cl.methods:
if method.name == "typeIdStatic":
return True
return False
for tuple in classUses[0].tuples("uses"):
if hasTypeIdMethod(tuple.used):
values = [("ast", tuple.user)]
Query.result.add(Tuple(values))
Query.result = Query.toParent(["-t=CastExpression", "-addAs=node"], [Query.result])[0]
| Python | 0 |
866f9cbe01e360872e0b7f55b00f2683adffaabc | Fix typo | ckanext/mapactiontheme/controllers/admin_controller.py | ckanext/mapactiontheme/controllers/admin_controller.py | from ckan.controllers.admin import AdminController
import ckan.lib.base as base
import ckan.lib.helpers as h
import ckan.model as model
import ckan.logic as logic
from ckan.lib.base import BaseController
from ckan.plugins.toolkit import c, request, _
from ckan.authz import has_user_permission_for_group_or_org
class CustomAdminController(BaseController):
def __before__(self, action, **params):
super(CustomAdminController, self).__before__(action, **params)
context = {'model': model,
'user': c.user, 'auth_user_obj': c.userobj}
if action == u"trash" and c.user:
# 'delete_dataset' is a permision that only
# org `editor` or `admin` has
if has_user_permission_for_group_or_org('mapaction',
c.user,
'delete_dataset'):
context['ignore_auth'] = True
try:
logic.check_access('sysadmin', context, {})
except logic.NotAuthorized:
base.abort(403, _(
'Need to be system administrator to administer'))
else:
x = AdminController()
x.__before__(action, **params)
def trash(self):
c.deleted_revisions = model.Session.query(
model.Revision).filter_by(state=model.State.DELETED)
c.deleted_packages = model.Session.query(
model.Package).filter_by(state=model.State.DELETED)
if not request.params or (len(request.params) == 1 and '__no_cache__'
in request.params):
return base.render('admin/trash.html')
else:
# NB: we repeat retrieval of revisions
# this is obviously inefficient (but probably not *that* bad)
# but has to be done to avoid (odd) sqlalchemy errors (when doing
# purge packages) of form: "this object already exists in the
# session"
msgs = []
if ('purge-packages' in request.params) or ('purge-revisions' in
request.params):
if 'purge-packages' in request.params:
revs_to_purge = []
for pkg in c.deleted_packages:
revisions = [x[0] for x in pkg.all_related_revisions]
# ensure no accidental purging of other(non-deleted)
# packages initially just avoided purging revisions
# where non-deleted packages were affected
# however this lead to confusing outcomes e.g.
# we succesfully deleted revision in which package
# was deleted (so package now active again) but no
# other revisions
problem = False
for r in revisions:
affected_pkgs = set(r.packages).\
difference(set(c.deleted_packages))
if affected_pkgs:
msg = _('Cannot purge package %s as '
'associated revision %s includes '
'non-deleted packages %s')
msg = msg % (pkg.id, r.id, [pkg.id for r
in affected_pkgs])
msgs.append(msg)
problem = True
break
if not problem:
revs_to_purge += [r.id for r in revisions]
model.Session.remove()
else:
revs_to_purge = [rev.id for rev in c.deleted_revisions]
revs_to_purge = list(set(revs_to_purge))
for id in revs_to_purge:
revision = model.Session.query(model.Revision).get(id)
try:
# TODO deleting the head revision corrupts the edit
# page Ensure that whatever 'head' pointer is used
# gets moved down to the next revision
model.repo.purge_revision(revision, leave_record=False)
except Exception, inst:
msg = _('Problem purging revision %s: %s') % (id, inst)
msgs.append(msg)
h.flash_success(_('Purge complete'))
else:
msgs.append(_('Action not implemented.'))
for msg in msgs:
h.flash_error(msg)
h.redirect_to(controller='admin', action='trash')
| from ckan.controllers.admin import AdminController
import ckan.lib.base as base
import ckan.lib.helpers as h
import ckan.model as model
import ckan.logic as logic
from ckan.lib.base import BaseController
from ckan.plugins.toolkit import c, request, _
from ckan.authz import has_user_permission_for_group_or_org
class CustomAdminController(BaseController):
def __before__(self, action, **params):
super(CustomAdminController, self).__before__(action, **params)
context = {'model': model,
'user': c.user, 'auth_user_obj': c.userobj}
if action == u"trash" and c.user:
# 'delete_dataset' is a permision that only
# org `editor` or `admin` has
if has_user_permission_for_group_or_org('mapaction',
c.user,
'delete_dataset'):
context['ignore_auth'] = True
try:
logic.check_access('sysadmin', context, {})
except logic.NotAuthorized:
base.abort(403, _(
'Need to be system administrator to administer'))
else:
x = AdminController()
x.__before__(action, **params)
def trash(self):
c.deleted_revisions = model.Session.query(
model.Revision).filter_by(state=model.State.DELETED)
c.deleted_packages = model.Session.query(
model.Package).filter_by(state=model.State.DELETED)
if not request.params or (len(request.params) == 1 and '__no_cache__'
in request.params):
return base.render('admin/trash.html')
else:
# NB: we repeat retrieval of of revisions
# this is obviously inefficient (but probably not *that* bad)
# but has to be done to avoid (odd) sqlalchemy errors (when doing
# purge packages) of form: "this object already exists in the
# session"
msgs = []
if ('purge-packages' in request.params) or ('purge-revisions' in
request.params):
if 'purge-packages' in request.params:
revs_to_purge = []
for pkg in c.deleted_packages:
revisions = [x[0] for x in pkg.all_related_revisions]
# ensure no accidental purging of other(non-deleted)
# packages initially just avoided purging revisions
# where non-deleted packages were affected
# however this lead to confusing outcomes e.g.
# we succesfully deleted revision in which package
# was deleted (so package now active again) but no
# other revisions
problem = False
for r in revisions:
affected_pkgs = set(r.packages).\
difference(set(c.deleted_packages))
if affected_pkgs:
msg = _('Cannot purge package %s as '
'associated revision %s includes '
'non-deleted packages %s')
msg = msg % (pkg.id, r.id, [pkg.id for r
in affected_pkgs])
msgs.append(msg)
problem = True
break
if not problem:
revs_to_purge += [r.id for r in revisions]
model.Session.remove()
else:
revs_to_purge = [rev.id for rev in c.deleted_revisions]
revs_to_purge = list(set(revs_to_purge))
for id in revs_to_purge:
revision = model.Session.query(model.Revision).get(id)
try:
# TODO deleting the head revision corrupts the edit
# page Ensure that whatever 'head' pointer is used
# gets moved down to the next revision
model.repo.purge_revision(revision, leave_record=False)
except Exception, inst:
msg = _('Problem purging revision %s: %s') % (id, inst)
msgs.append(msg)
h.flash_success(_('Purge complete'))
else:
msgs.append(_('Action not implemented.'))
for msg in msgs:
h.flash_error(msg)
h.redirect_to(controller='admin', action='trash')
| Python | 0.999999 |
f5c94105f6652186e05ebe201f127a1c8b7bd94c | add script to download and save articles | newsplease/tests/downloadarticles.py | newsplease/tests/downloadarticles.py | import json
import os
name = 'trump-in-saudi-arabia.txt'
basepath = '/Users/felix/Downloads/'
download_dir = basepath + 'dir' + name + '/'
os.makedirs(download_dir)
articles = NewsPlease.download_from_file(basepath + name)
for url in articles:
article = articles[url]
with open(download_dir + article['filename'], 'w') as outfile:
json.dump(article, outfile)
| import json
import os
name = 'trump-in-saudi-arabia.txt'
basepath = '/Users/felix/Downloads/'
download_dir = basepath + 'dir' + name + '/'
os.makedirs(download_dir)
articles = NewsPlease.download_from_file(basepath + name)
for url in articles:
article = articles[url]
with open(download_dir + article['filename'], 'w') as outfile:
json.dump(article, outfile)
| Python | 0 |
f6686169cf7344e0c75c6d060332d3692fc7df1c | Update curation table format | bin/trait_mapping/create_table_for_manual_curation.py | bin/trait_mapping/create_table_for_manual_curation.py | #!/usr/bin/env python3
import argparse
from eva_cttv_pipeline.trait_mapping.ols import (
get_ontology_label_from_ols, is_current_and_in_efo, is_in_efo,
)
def find_previous_mapping(trait_name, previous_mappings):
if trait_name not in previous_mappings:
return ''
uri = previous_mappings[trait_name]
label = get_ontology_label_from_ols(uri)
uri_is_current_and_in_efo = is_current_and_in_efo(uri)
uri_in_efo = is_in_efo(uri)
if uri_in_efo:
trait_status = 'EFO_CURRENT' if uri_is_current_and_in_efo else 'EFO_OBSOLETE'
else:
trait_status = 'NOT_CONTAINED'
trait_string = '|'.join([uri, label, 'NOT_SPECIFIED', 'previously-used', trait_status])
return trait_string
def find_exact_mapping(trait_name, mappings):
for mapping in mappings:
if mapping.lower().split('|')[1] == trait_name:
return mapping
return ''
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-t', '--traits-for-curation',
help='Table with traits for which the pipeline failed to make a confident prediction')
parser.add_argument(
'-m', '--previous-mappings',
help='Table with all mappings previously issued by EVA')
parser.add_argument(
'-o', '--output',
help='Output TSV to be loaded in Google Sheets for manual curation')
args = parser.parse_args()
outfile = open(args.output, 'w')
# Load all previous mappings
previous_mappings = dict(l.rstrip().split('\t') for l in open(args.previous_mappings))
# Process all mappings which require manual curation
for line in open(args.traits_for_curation):
fields = line.rstrip().split('\t')
trait_name, trait_freq = fields[:2]
mappings = fields[2:]
previous_mapping = find_previous_mapping(trait_name, previous_mappings)
exact_mapping = find_exact_mapping(trait_name, mappings)
out_line = '\t'.join(
[trait_name, trait_freq, previous_mapping, exact_mapping] + mappings
) + '\n'
outfile.write(out_line)
outfile.close()
| #!/usr/bin/env python3
import argparse
from eva_cttv_pipeline.trait_mapping.ols import (
get_ontology_label_from_ols, is_current_and_in_efo, is_in_efo,
)
def find_previous_mapping(trait_name, previous_mappings):
if trait_name not in previous_mappings:
return ''
uri = previous_mappings[trait_name]
label = get_ontology_label_from_ols(uri)
uri_is_current_and_in_efo = is_current_and_in_efo(uri)
uri_in_efo = is_in_efo(uri)
if uri_in_efo:
trait_status = 'EFO_CURRENT' if uri_is_current_and_in_efo else 'EFO_OBSOLETE'
else:
trait_status = 'NOT_CONTAINED'
trait_string = '|'.join([uri, label, 'NOT_SPECIFIED', 'previously-used', trait_status])
return trait_string
def find_exact_mapping(trait_name, mappings):
for mapping in mappings:
if mapping.lower().split('|')[1] == trait_name:
return mapping
return ''
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-t', '--traits-for-curation',
help='Table with traits for which the pipeline failed to make a confident prediction')
parser.add_argument(
'-m', '--previous-mappings',
help='Table with all mappings previously issued by EVA')
parser.add_argument(
'-o', '--output',
help='Output TSV to be loaded in Google Sheets for manual curation')
args = parser.parse_args()
outfile = open(args.output, 'w')
# Load all previous mappings
previous_mappings = dict(l.rstrip().split('\t') for l in open(args.previous_mappings))
# Process all mappings which require manual curation
for line in open(args.traits_for_curation):
fields = line.rstrip().split('\t')
trait_name, trait_freq = fields[:2]
mappings = fields[2:]
previous_mapping = find_previous_mapping(trait_name, previous_mappings)
exact_mapping = find_exact_mapping(trait_name, mappings)
out_line = '\t'.join(
[trait_name, trait_freq,
# Mapping to use, if ready, comment, mapping URI, mapping label, whether exact, in EFO
'', '', '', '', '', '', '',
previous_mapping, exact_mapping] + mappings
) + '\n'
outfile.write(out_line)
outfile.close()
| Python | 0 |
620210707477e6496ab665ec7df8afaf2ba408aa | consolidate test version conditional boilerplate into assertion method | tests/test_codegen.py | tests/test_codegen.py | """
Part of the astor library for Python AST manipulation
License: 3-clause BSD
Copyright 2014 (c) Berker Peksag
"""
import ast
import sys
import textwrap
try:
import unittest2 as unittest
except ImportError:
import unittest
import astor
class CodegenTestCase(unittest.TestCase):
def assertAstSourceEqual(self, source):
self.assertEqual(astor.to_source(ast.parse(source)), source)
def assertAstSourceEqualIfAtLeastVersion(self, source, version_tuple):
if sys.version_info >= version_tuple:
self.assertAstSourceEqual(source)
else:
self.assertRaises(SyntaxError, ast.parse, source)
def test_imports(self):
source = "import ast"
self.assertAstSourceEqual(source)
source = "import operator as op"
self.assertAstSourceEqual(source)
source = "from math import floor"
self.assertAstSourceEqual(source)
def test_dictionary_literals(self):
source = "{'a': 1, 'b': 2}"
self.assertAstSourceEqual(source)
another_source = "{'nested': ['structures', {'are': 'important'}]}"
self.assertAstSourceEqual(another_source)
def test_try_expect(self):
source = textwrap.dedent("""\
try:
'spam'[10]
except IndexError:
pass""")
self.assertAstSourceEqual(source)
source = textwrap.dedent("""\
try:
'spam'[10]
except IndexError as exc:
sys.stdout.write(exc)""")
self.assertAstSourceEqual(source)
def test_del_statement(self):
source = "del l[0]"
self.assertAstSourceEqual(source)
source = "del obj.x"
self.assertAstSourceEqual(source)
def test_arguments(self):
source = textwrap.dedent("""\
j = [1, 2, 3]
def test(a1, a2, b1=j, b2='123', b3={}, b4=[]):
pass""")
self.assertAstSourceEqual(source)
def test_pass_arguments_node(self):
source = textwrap.dedent("""\
j = [1, 2, 3]
def test(a1, a2, b1=j, b2='123', b3={}, b4=[]):
pass""")
root_node = ast.parse(source)
arguments_node = [n for n in ast.walk(root_node)
if isinstance(n, ast.arguments)][0]
self.assertEqual(astor.to_source(arguments_node),
"a1, a2, b1=j, b2='123', b3={}, b4=[]")
def test_matrix_multiplication(self):
for source in ("(a @ b)", "a @= b"):
self.assertAstSourceEqualIfAtLeastVersion(source, (3, 5))
def test_multiple_unpackings(self):
source = textwrap.dedent("""\
my_function(*[1], *[2], **{'three': 3}, **{'four': 'four'})""")
self.assertAstSourceEqualIfAtLeastVersion(source, (3, 5))
def test_async_def_with_for(self):
source = textwrap.dedent("""\
async def read_data(db):
async with connect(db) as db_cxn:
data = await db_cxn.fetch('SELECT foo FROM bar;')
async for datum in data:
if quux(datum):
return datum""")
self.assertAstSourceEqualIfAtLeastVersion(source, (3, 5))
if __name__ == '__main__':
unittest.main()
| """
Part of the astor library for Python AST manipulation
License: 3-clause BSD
Copyright 2014 (c) Berker Peksag
"""
import ast
import sys
import textwrap
try:
import unittest2 as unittest
except ImportError:
import unittest
import astor
class CodegenTestCase(unittest.TestCase):
def assertAstSourceEqual(self, source):
self.assertEqual(astor.to_source(ast.parse(source)), source)
def test_imports(self):
source = "import ast"
self.assertAstSourceEqual(source)
source = "import operator as op"
self.assertAstSourceEqual(source)
source = "from math import floor"
self.assertAstSourceEqual(source)
def test_dictionary_literals(self):
source = "{'a': 1, 'b': 2}"
self.assertAstSourceEqual(source)
another_source = "{'nested': ['structures', {'are': 'important'}]}"
self.assertAstSourceEqual(another_source)
def test_try_expect(self):
source = textwrap.dedent("""\
try:
'spam'[10]
except IndexError:
pass""")
self.assertAstSourceEqual(source)
source = textwrap.dedent("""\
try:
'spam'[10]
except IndexError as exc:
sys.stdout.write(exc)""")
self.assertAstSourceEqual(source)
def test_del_statement(self):
source = "del l[0]"
self.assertAstSourceEqual(source)
source = "del obj.x"
self.assertAstSourceEqual(source)
def test_arguments(self):
source = textwrap.dedent("""\
j = [1, 2, 3]
def test(a1, a2, b1=j, b2='123', b3={}, b4=[]):
pass""")
self.assertAstSourceEqual(source)
def test_pass_arguments_node(self):
source = textwrap.dedent("""\
j = [1, 2, 3]
def test(a1, a2, b1=j, b2='123', b3={}, b4=[]):
pass""")
root_node = ast.parse(source)
arguments_node = [n for n in ast.walk(root_node)
if isinstance(n, ast.arguments)][0]
self.assertEqual(astor.to_source(arguments_node),
"a1, a2, b1=j, b2='123', b3={}, b4=[]")
def test_matrix_multiplication(self):
for source in ("(a @ b)", "a @= b"):
if sys.version_info >= (3, 5):
self.assertAstSourceEqual(source)
else:
# matrix multiplication operator introduced in Python 3.5
self.assertRaises(SyntaxError, ast.parse, source)
def test_multiple_unpackings(self):
source = textwrap.dedent("""\
my_function(*[1], *[2], **{'three': 3}, **{'four': 'four'})""")
if sys.version_info >= (3, 5):
self.assertAstSourceEqual(source)
else:
self.assertRaises(SyntaxError, ast.parse, source)
def test_async_def_with_for(self):
source = textwrap.dedent("""\
async def read_data(db):
async with connect(db) as db_cxn:
data = await db_cxn.fetch('SELECT foo FROM bar;')
async for datum in data:
if quux(datum):
return datum""")
if sys.version_info >= (3, 5):
self.assertAstSourceEqual(source)
else:
self.assertRaises(SyntaxError, ast.parse, source)
if __name__ == '__main__':
unittest.main()
| Python | 0.000024 |
c5902af643d639ecefa756a0caaeeb58a7c6d151 | Update P4_textToExcel working solution | books/AutomateTheBoringStuffWithPython/Chapter12/PracticeProjects/P4_textToExcel.py | books/AutomateTheBoringStuffWithPython/Chapter12/PracticeProjects/P4_textToExcel.py | # Write a program to read in the contents of several text files (you can make
# the text files yourself) and insert those contents into a spreadsheet, with
# one line of text per row. The lines of the first text file will be in the
# cells of column A, the lines of the second text file will be in the cells of
# column B, and so on.
import openpyxl
import os
FOLDER = "./p4files/"
# Open workbook
wb = openpyxl.Workbook()
sheet = wb.active
# Get list of files
filelist = os.listdir(FOLDER)
filelist.sort()
# Open file
for file in filelist:
with open(FOLDER + file) as fileObj:
index = 1
for line in fileObj:
# Transpose line into relevant workbook column
sheet.cell(row=index, column=(filelist.index(file) + 1)).value = line.strip()
index += 1
# Save workbook
wb.save("textToExcel.xlsx")
| # Write a program to read in the contents of several text files (you can make
# the text files yourself) and insert those contents into a spreadsheet, with
# one line of text per row. The lines of the first text file will be in the
# cells of column A, the lines of the second text file will be in the cells of
# column B, and so on.
import openpyxl
# Open workbook
wb = openpyxl.Workbook()
sheet = wb.active
# Get list of files
# Open file
# Scan lines into list
# Transpose list into relevant workbook column
# Close file
# Save workbook
wb.save("textToExcel.xlsx")
| Python | 0 |
7597497017053356cdfbebc38aa1468240df2e45 | fix the install to ./install requirements | fabfile/build.py | fabfile/build.py | from fabric.api import task, local, execute
import clean
__all__ = ['sdist', 'install', 'sphinx']
@task
def sdist():
"""create the sdist"""
execute(clean.all)
local("python setup.py sdist --format=bztar,zip")
@task
def install():
"""install cloudmesh"""
local("./install requirements.txt")
local("python setup.py install")
@task
def sphinx():
local("rm -rf /tmp/sphinx-contrib")
local("cd /tmp; hg clone http://bitbucket.org/birkenfeld/sphinx-contrib/")
local("cd /tmp/sphinx-contrib/autorun/; python setup.py install")
| from fabric.api import task, local, execute
import clean
__all__ = ['req', 'sdist', 'install', 'sphinx']
@task
def req():
"""install the requirements"""
local("pip install -r requirements.txt")
@task
def sdist():
"""create the sdist"""
execute(clean.all)
local("python setup.py sdist --format=bztar,zip")
@task
def install():
"""install cloudmesh"""
local("pip install -r requirements.txt")
local("python setup.py install")
@task
def sphinx():
local("rm -rf /tmp/sphinx-contrib")
local("cd /tmp; hg clone http://bitbucket.org/birkenfeld/sphinx-contrib/")
local("cd /tmp/sphinx-contrib/autorun/; python setup.py install")
| Python | 0.000005 |
9646fb2b7f7f441c6630e04fa1e1af358f9c7d10 | Set version to 0.20 final | eulexistdb/__init__.py | eulexistdb/__init__.py | # file eulexistdb/__init__.py
#
# Copyright 2010,2011 Emory University Libraries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interact with `eXist-db`_ XML databases.
This package provides classes to ease interaction with eXist XML databases.
It contains the following modules:
* :mod:`eulexistdb.db` -- Connect to the database and query
* :mod:`eulexistdb.query` -- Query :class:`~eulxml.xmlmap.XmlObject`
models from eXist with semantics like a Django_ QuerySet
.. _eXist-db: http://exist.sourceforge.net/
.. _Django: http://www.djangoproject.com/
"""
__version_info__ = (0, 20, 0, None)
# Dot-connect all but the last. Last is dash-connected if not None.
__version__ = '.'.join([str(i) for i in __version_info__[:-1]])
if __version_info__[-1] is not None:
__version__ += ('-%s' % (__version_info__[-1],))
| # file eulexistdb/__init__.py
#
# Copyright 2010,2011 Emory University Libraries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interact with `eXist-db`_ XML databases.
This package provides classes to ease interaction with eXist XML databases.
It contains the following modules:
* :mod:`eulexistdb.db` -- Connect to the database and query
* :mod:`eulexistdb.query` -- Query :class:`~eulxml.xmlmap.XmlObject`
models from eXist with semantics like a Django_ QuerySet
.. _eXist-db: http://exist.sourceforge.net/
.. _Django: http://www.djangoproject.com/
"""
__version_info__ = (0, 20, 0, 'dev')
# Dot-connect all but the last. Last is dash-connected if not None.
__version__ = '.'.join([str(i) for i in __version_info__[:-1]])
if __version_info__[-1] is not None:
__version__ += ('-%s' % (__version_info__[-1],))
| Python | 0.000037 |
14cb7c49d5b2e155e92c07ecd1e58dc386b0ddb3 | stop failing on exceptions--skip instead | parsing/parsinglib/jobcontainer.py | parsing/parsinglib/jobcontainer.py | from ..models import Job
import datetime
class JobContainer():
def __init__(self):
self.organization = None
self.title = None
self.division = None
self.date_posted = None
self.date_closing = None
self.date_collected = None
self.url_detail = None
self.salary_waged = None
self.salary_amount = None
self.region = None
def is_unique(self):
""" Checks whether job (denoted by URL) already exists in DB.
Remember to use this function before doing any intense parsing operations.
"""
if not self.url_detail:
raise KeyError(
"Queried record uniqueness before detail URL set: {}".format(self))
else:
if len(Job.objects.filter(url_detail=self.url_detail)) == 0:
return True
else:
# print("Job already exists in DB: {}".format(self.url_detail))
return False
def cleanup(self):
self.title = self.title.title() if self.title.isupper() else self.title
self.salary_amount = 0 if self.salary_amount == None else self.salary_amount
# totally arbitray amount
self.salary_waged = True if self.salary_amount < 5000 else False
self.date_collected = datetime.date.today()
def validate(self):
field_dict = self.__dict__
attributes = {
k: v for k, v in field_dict.items() if not k.startswith("_")}
for k, v in attributes.items():
if v == None:
raise KeyError(
"Job {} was missing {}".format(self.url_detail, k))
def save(self):
""" Save job to DB, after final checks.
"""
if not self.is_unique(): # failsafe in case we forgot to check this earlier.
print(
"{} tried to save a job hat is not unique!".format(self.organization))
return
self.cleanup()
try:
self.validate()
except KeyError as err:
print("|| EXCEPTION")
print("|| ", err)
return
print("Saved job to DB: {}".format(self))
j = Job(organization=self.organization, title=self.title, division=self.division, date_posted=self.date_posted, date_closing=self.date_closing, url_detail=self.url_detail, salary_waged=self.salary_waged, salary_amount=self.salary_amount, region=self.region, date_collected=self.date_collected
)
j.save()
def __str__(self):
return "{} at {}".format(self.title, self.organization)
| from ..models import Job
import datetime
class JobContainer():
def __init__(self):
self.organization = None
self.title = None
self.division = None
self.date_posted = None
self.date_closing = None
self.date_collected = None
self.url_detail = None
self.salary_waged = None
self.salary_amount = None
self.region = None
def is_unique(self):
""" Checks whether job (denoted by URL) already exists in DB.
Remember to use this function before doing any intense parsing operations.
"""
if not self.url_detail:
raise KeyError("Queried record uniqueness before detail URL set: {}".format(self))
else:
if len(Job.objects.filter(url_detail=self.url_detail)) == 0:
return True
else:
# print("Job already exists in DB: {}".format(self.url_detail))
return False
def cleanup(self):
self.title = self.title.title() if self.title.isupper() else self.title
self.salary_amount = 0 if self.salary_amount == None else self.salary_amount
self.salary_waged = True if self.salary_amount < 5000 else False # totally arbitray amount
self.date_collected = datetime.date.today()
def validate(self):
field_dict = self.__dict__
attributes = {k:v for k, v in field_dict.items() if not k.startswith("_")}
for k, v in attributes.items():
if v == None:
raise KeyError("Job {} was missing {}".format(self.url_detail, k))
def save(self):
""" Save job to DB, after final checks.
"""
if not self.is_unique(): # failsafe in case we forgot to check this earlier.
print("{} tried to save a job that is not unique!".format(self.organization))
return
self.cleanup()
self.validate()
print("Saved job to DB: {}".format(self))
j = Job(organization=self.organization
, title=self.title
, division=self.division
, date_posted=self.date_posted
, date_closing=self.date_closing
, url_detail=self.url_detail
, salary_waged=self.salary_waged
, salary_amount=self.salary_amount
, region=self.region
, date_collected = self.date_collected
)
j.save()
def __str__(self):
return "{} at {}".format(self.title, self.organization)
| Python | 0 |
f633df6bb8e0e84699db2f47178f4b402ccc07a8 | Fix `OverflowError`. | eventkit/utils/time.py | eventkit/utils/time.py | from datetime import datetime, timedelta
from timezone import timezone
ROUND_DOWN = 'ROUND_DOWN'
ROUND_NEAREST = 'ROUND_NEAREST'
ROUND_UP = 'ROUND_UP'
WEEKDAYS = {
'MON': 0,
'TUE': 1,
'WED': 2,
'THU': 3,
'FRI': 4,
'SAT': 5,
'SUN': 6,
}
MON = 'MON'
TUE = 'TUE'
WED = 'WED'
THU = 'THU'
FRI = 'FRI'
SAT = 'SAT'
SUN = 'SUN'
def round_datetime(when=None, precision=60, rounding=ROUND_NEAREST):
"""
Round a datetime object to a time that matches the given precision.
when (datetime), default now
The datetime object to be rounded.
precision (int, timedelta, str), default 60
The number of seconds, weekday (MON, TUE, WED, etc.) or timedelta
object to which the datetime object should be rounded.
rounding (str), default ROUND_NEAREST
The rounding method to use (ROUND_DOWN, ROUND_NEAREST, ROUND_UP).
"""
when = when or timezone.now()
weekday = WEEKDAYS.get(precision, WEEKDAYS['MON'])
if precision in WEEKDAYS:
precision = int(timedelta(days=7).total_seconds())
elif isinstance(precision, timedelta):
precision = int(precision.total_seconds())
# Get delta between the beginning of time and the given datetime object.
# If precision is a weekday, the beginning of time must be that same day.
when_min = when.min + timedelta(days=weekday)
if timezone.is_aware(when):
# It doesn't seem to be possible to localise the `min` datetime without
# raising `OverflowError`, so create a timezone aware object manually.
when_min = datetime(tzinfo=when.tzinfo, *when_min.timetuple()[:3])
delta = when - when_min
remainder = int(delta.total_seconds()) % precision
# First round down and strip microseconds.
when -= timedelta(seconds=remainder, microseconds=when.microsecond)
# Then add precision to round up.
if rounding == ROUND_UP or (
rounding == ROUND_NEAREST and remainder >= precision / 2):
when += timedelta(seconds=precision)
return when
| from datetime import timedelta
from timezone import timezone
ROUND_DOWN = 'ROUND_DOWN'
ROUND_NEAREST = 'ROUND_NEAREST'
ROUND_UP = 'ROUND_UP'
WEEKDAYS = {
'MON': 0,
'TUE': 1,
'WED': 2,
'THU': 3,
'FRI': 4,
'SAT': 5,
'SUN': 6,
}
MON = 'MON'
TUE = 'TUE'
WED = 'WED'
THU = 'THU'
FRI = 'FRI'
SAT = 'SAT'
SUN = 'SUN'
def round_datetime(when=None, precision=60, rounding=ROUND_NEAREST):
"""
Round a datetime object to a time that matches the given precision.
when (datetime), default now
The datetime object to be rounded.
precision (int, timedelta, str), default 60
The number of seconds, weekday (MON, TUE, WED, etc.) or timedelta
object to which the datetime object should be rounded.
rounding (str), default ROUND_NEAREST
The rounding method to use (ROUND_DOWN, ROUND_NEAREST, ROUND_UP).
"""
when = when or timezone.now()
weekday = WEEKDAYS.get(precision, WEEKDAYS['MON'])
if precision in WEEKDAYS:
precision = int(timedelta(days=7).total_seconds())
elif isinstance(precision, timedelta):
precision = int(precision.total_seconds())
# Get delta between the beginning of time and the given datetime object.
# If precision is a weekday, the beginning of time must be that same day.
when_min = when.min + timedelta(days=weekday)
if timezone.is_aware(when):
when_min = \
timezone.datetime(tzinfo=when.tzinfo, *when_min.timetuple()[:3])
delta = when - when_min
remainder = int(delta.total_seconds()) % precision
# First round down and strip microseconds.
when -= timedelta(seconds=remainder, microseconds=when.microsecond)
# Then add precision to round up.
if rounding == ROUND_UP or (
rounding == ROUND_NEAREST and remainder >= precision / 2):
when += timedelta(seconds=precision)
return when
| Python | 0 |
09a6c3b5d860f8bbfafec9f5cdb4cef00cdae9c9 | Implement an additional test for handling exceptions in bake | tests/test_cookies.py | tests/test_cookies.py | # -*- coding: utf-8 -*-
import json
def test_help_message(testdir):
result = testdir.runpytest(
'--help',
)
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines([
'cookies:',
'*--template=TEMPLATE*',
])
def test_cookies_fixture(testdir):
"""Make sure that pytest accepts the `cookies` fixture."""
# create a temporary pytest test module
testdir.makepyfile("""
def test_valid_fixture(cookies):
assert hasattr(cookies, 'bake')
assert callable(cookies.bake)
""")
# run pytest with the following cmd args
result = testdir.runpytest('-v')
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines([
'*::test_valid_fixture PASSED',
])
# make sure that that we get a '0' exit code for the testsuite
assert result.ret == 0
def test_cookies_bake(testdir):
"""Programmatically create a **Cookiecutter** template and use `bake` to
create a project from it.
"""
template = testdir.tmpdir.ensure('cookiecutter-template', dir=True)
template_config = {
'repo_name': 'foobar',
'short_description': 'Test Project'
}
template.join('cookiecutter.json').write(json.dumps(template_config))
template_readme = '\n'.join([
'{{cookiecutter.repo_name}}',
'{% for _ in cookiecutter.repo_name %}={% endfor %}',
'{{cookiecutter.short_description}}',
])
repo = template.ensure('{{cookiecutter.repo_name}}', dir=True)
repo.join('README.rst').write(template_readme)
testdir.makepyfile("""
def test_bake_project(cookies):
result = cookies.bake(extra_context={'repo_name': 'helloworld'})
assert result.exit_code == 0
assert result.exception is None
assert result.project.basename == 'helloworld'
assert result.project.isdir()
def test_bake_should_create_new_output(cookies):
first_result = cookies.bake()
assert first_result.project.dirname.endswith('bake00')
second_result = cookies.bake()
assert second_result.project.dirname.endswith('bake01')
""")
# run pytest with the following cmd args
result = testdir.runpytest('-v', '--template={}'.format(template))
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines([
'*::test_bake_project PASSED',
])
def test_cookies_bake_should_handle_exception(testdir):
"""Programmatically create a **Cookiecutter** template and make sure that
cookies.bake() handles exceptions that happen during project generation.
We expect **Cookiecutter** to raise a `NonTemplatedInputDirException`.
"""
template = testdir.tmpdir.ensure('cookiecutter-fail', dir=True)
template_config = {
'repo_name': 'foobar',
'short_description': 'Test Project'
}
template.join('cookiecutter.json').write(json.dumps(template_config))
template.ensure('cookiecutter.repo_name', dir=True)
testdir.makepyfile("""
def test_bake_should_fail(cookies):
result = cookies.bake()
assert result.exit_code == -1
assert result.exception is not None
assert result.project is None
""")
# run pytest with the following cmd args
result = testdir.runpytest('-v', '--template={}'.format(template))
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines([
'*::test_bake_should_fail PASSED',
])
| # -*- coding: utf-8 -*-
import json
def test_help_message(testdir):
result = testdir.runpytest(
'--help',
)
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines([
'cookies:',
'*--template=TEMPLATE*',
])
def test_cookies_fixture(testdir):
"""Make sure that pytest accepts the `cookies` fixture."""
# create a temporary pytest test module
testdir.makepyfile("""
def test_valid_fixture(cookies):
assert hasattr(cookies, 'bake')
assert callable(cookies.bake)
""")
# run pytest with the following cmd args
result = testdir.runpytest('-v')
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines([
'*::test_valid_fixture PASSED',
])
# make sure that that we get a '0' exit code for the testsuite
assert result.ret == 0
def test_cookies_bake(testdir):
"""Programmatically create a **Cookiecutter** template and use `bake` to
create a project from it.
"""
template = testdir.tmpdir.ensure('cookiecutter-template', dir=True)
template_config = {
'repo_name': 'foobar',
'short_description': 'Test Project'
}
template.join('cookiecutter.json').write(json.dumps(template_config))
template_readme = '\n'.join([
'{{cookiecutter.repo_name}}',
'{% for _ in cookiecutter.repo_name %}={% endfor %}',
'{{cookiecutter.short_description}}',
])
repo = template.ensure('{{cookiecutter.repo_name}}', dir=True)
repo.join('README.rst').write(template_readme)
testdir.makepyfile("""
def test_bake_project(cookies):
result = cookies.bake(extra_context={'repo_name': 'helloworld'})
assert result.exit_code == 0
assert result.exception is None
assert result.project.basename == 'helloworld'
assert result.project.isdir()
def test_bake_should_create_new_output(cookies):
first_result = cookies.bake()
assert first_result.project.dirname.endswith('bake00')
second_result = cookies.bake()
assert second_result.project.dirname.endswith('bake01')
""")
# run pytest with the following cmd args
result = testdir.runpytest('-v', '--template={}'.format(template))
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines([
'*::test_bake_project PASSED',
])
| Python | 0.000001 |
3b4de1be81c7951ca064ff46e1f3e1ed95436ae3 | fix XSS vulnerability | django_bootstrap_breadcrumbs/templatetags/django_bootstrap_breadcrumbs.py | django_bootstrap_breadcrumbs/templatetags/django_bootstrap_breadcrumbs.py | # -*- coding: utf-8 -*-
"""
:copyright: Copyright 2013 by Łukasz Mierzwa
:contact: l.mierzwa@gmail.com
"""
from inspect import ismethod
from django.core.urlresolvers import reverse, NoReverseMatch
from django.utils.html import escape
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.db.models import Model
from django import template
register = template.Library()
CONTEXT_KEY = 'DJANGO_BREADCRUMB_LINKS'
def breadcrumb(context, label, viewname, *args):
"""
Add link to list of breadcrumbs, usage:
{% load bubbles_breadcrumbs %}
{% breadcrumb "Home" "index" %}
Remember to use it inside {% block %} with {{ block.super }} to get all
parent breadcrumbs.
:param label: Breadcrumb link label.
:param viewname: Name of the view to link this breadcrumb to, or Model
instance with implemented get_absolute_url().
:param args: Any arguments to view function.
"""
context['request'].META[CONTEXT_KEY] = context['request'].META.get(
CONTEXT_KEY, []) + [(escape(label), viewname, args)]
return ''
def render_breadcrumbs(context):
"""
Render breadcrumbs html using twitter bootstrap css classes.
"""
links = []
for (label, viewname, args) in context['request'].META.get(
CONTEXT_KEY, []):
if isinstance(viewname, Model) and hasattr(
viewname, 'get_absolute_url') and ismethod(
viewname.get_absolute_url):
url = viewname.get_absolute_url()
else:
try:
url = reverse(viewname=viewname, args=args)
except NoReverseMatch:
url = viewname
links.append((url, _(unicode(label)) if label else label))
if not links:
return ''
ret = '<ul class="breadcrumb">'
total = len(links)
for (i, (url, label)) in enumerate(links, 1):
ret += '<li>'
if total > 1 and i < total:
ret += '<a href="%s">%s</a>' % (url, label)
ret += ' <span class="divider">/</span>'
else:
ret += label
ret += '</li>'
ret += '</ul>'
return mark_safe(ret)
register.simple_tag(takes_context=True)(breadcrumb)
register.simple_tag(takes_context=True)(render_breadcrumbs)
| # -*- coding: utf-8 -*-
"""
:copyright: Copyright 2013 by Łukasz Mierzwa
:contact: l.mierzwa@gmail.com
"""
from inspect import ismethod
from django.core.urlresolvers import reverse, NoReverseMatch
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.db.models import Model
from django import template
register = template.Library()
CONTEXT_KEY = 'DJANGO_BREADCRUMB_LINKS'
def breadcrumb(context, label, viewname, *args):
"""
Add link to list of breadcrumbs, usage:
{% load bubbles_breadcrumbs %}
{% breadcrumb "Home" "index" %}
Remember to use it inside {% block %} with {{ block.super }} to get all
parent breadcrumbs.
:param label: Breadcrumb link label.
:param viewname: Name of the view to link this breadcrumb to, or Model
instance with implemented get_absolute_url().
:param args: Any arguments to view function.
"""
context['request'].META[CONTEXT_KEY] = context['request'].META.get(
CONTEXT_KEY, []) + [(label, viewname, args)]
return ''
def render_breadcrumbs(context):
"""
Render breadcrumbs html using twitter bootstrap css classes.
"""
links = []
for (label, viewname, args) in context['request'].META.get(
CONTEXT_KEY, []):
if isinstance(viewname, Model) and hasattr(
viewname, 'get_absolute_url') and ismethod(
viewname.get_absolute_url):
url = viewname.get_absolute_url()
else:
try:
url = reverse(viewname=viewname, args=args)
except NoReverseMatch:
url = viewname
links.append((url, _(unicode(label)) if label else label))
if not links:
return ''
ret = '<ul class="breadcrumb">'
total = len(links)
for (i, (url, label)) in enumerate(links, 1):
ret += '<li>'
if total > 1 and i < total:
ret += '<a href="%s">%s</a>' % (url, label)
ret += ' <span class="divider">/</span>'
else:
ret += label
ret += '</li>'
ret += '</ul>'
return mark_safe(ret)
register.simple_tag(takes_context=True)(breadcrumb)
register.simple_tag(takes_context=True)(render_breadcrumbs)
| Python | 0 |
41ea0dd8c48ef8a336422482e9bbd1911bb7e168 | Make that it works in 90% of the cases. 3:30. | Commitment.py | Commitment.py | import sublime
import sublime_plugin
import HTMLParser
from commit import Commitment
whatthecommit = 'http://whatthecommit.com/'
randomMessages = Commitment()
class CommitmentToClipboardCommand(sublime_plugin.WindowCommand):
def run(self):
commit = randomMessages.get()
message = HTMLParser.HTMLParser().unescape(commit.get('message', '').replace('\n','').replace('<br/>', '\n'))
message_hash = commit.get('message_hash', '')
if message:
print 'Commitment: ' + '\n' + message + '\n' + 'Permalink: ' + whatthecommit + message_hash
sublime.set_clipboard(message)
class CommitmentToStatusBarCommand(sublime_plugin.WindowCommand):
def run(self):
commit = randomMessages.get()
message = HTMLParser.HTMLParser().unescape(commit.get('message', '').replace('\n','').replace('<br/>', '\n'))
message_hash = commit.get('message_hash', '')
if message:
print 'Commitment: ' + '\n' + message + '\n' + 'Permalink: ' + whatthecommit + message_hash
sublime.status_message(message) | import sublime
import sublime_plugin
from commit import Commitment
whatthecommit = 'http://whatthecommit.com/'
randomMessages = Commitment()
class CommitmentToClipboardCommand(sublime_plugin.WindowCommand):
def run(self):
commit = randomMessages.get()
message = commit.get('message', '')
message_hash = commit.get('message_hash', '')
if message:
print 'Commitment: ' + message + '\n' + 'Permalink: ' + whatthecommit + message_hash
sublime.set_clipboard(message)
class CommitmentToStatusBarCommand(sublime_plugin.WindowCommand):
def run(self):
commit = randomMessages.get()
message = commit.get('message', '')
message_hash = commit.get('message_hash', '')
if message:
print 'Commitment: ' + message + '\n' + 'Permalink: ' + whatthecommit + message_hash
sublime.status_message(message) | Python | 0.999808 |
81c32c9bc0868f7ccd764d8432fd46ccb7e6a8ef | Use get instead | paystackapi/tests/test_transfer.py | paystackapi/tests/test_transfer.py | import httpretty
from paystackapi.tests.base_test_case import BaseTestCase
from paystackapi.transfer import Transfer
class TestTransfer(BaseTestCase):
@httpretty.activate
def test_initiate(self):
"""Method defined to test transfer initiation."""
httpretty.register_uri(
httpretty.POST,
self.endpoint_url("/transfer"),
content_type='text/json',
body='{"status": true, "message": "Transfer requires OTP to continue"}',
status=201,
)
response = Transfer.initiate(
source="balance",
reason="Calm down",
amount="3794800",
recipient="RCP_gx2wn530m0i3w3m",
)
self.assertTrue(response['status'])
@httpretty.activate
def test_list(self):
"""Method defined to test transfer list."""
httpretty.register_uri(
httpretty.GET,
self.endpoint_url("/transfer"),
content_type='text/json',
body='{"status": true, "message": "Transfers retrieved"}',
status=201,
)
response = Transfer.list(
perPage=3,
page=1
)
self.assertTrue(response['status'])
| import httpretty
from paystackapi.tests.base_test_case import BaseTestCase
from paystackapi.transfer import Transfer
class TestTransfer(BaseTestCase):
@httpretty.activate
def test_initiate(self):
"""Method defined to test transfer initiation."""
httpretty.register_uri(
httpretty.POST,
self.endpoint_url("/transfer"),
content_type='text/json',
body='{"status": true, "message": "Transfer requires OTP to continue"}',
status=201,
)
response = Transfer.initiate(
source="balance",
reason="Calm down",
amount="3794800",
recipient="RCP_gx2wn530m0i3w3m",
)
self.assertTrue(response['status'])
@httpretty.activate
def test_list(self):
"""Method defined to test transfer list."""
httpretty.register_uri(
httpretty.POST,
self.endpoint_url("/transfer"),
content_type='text/json',
body='{"status": true, "message": "Transfers retrieved"}',
status=201,
)
response = Transfer.list(
perPage=3,
page=1
)
self.assertTrue(response['status'])
| Python | 0 |
9a425bae3af8cca7ad8be938d7f698ef65f42f3a | Update load_groups_pipeline.py (#210) | google/cloud/security/inventory/pipelines/load_groups_pipeline.py | google/cloud/security/inventory/pipelines/load_groups_pipeline.py | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pipeline to load GSuite Groups into Inventory."""
import json
from google.cloud.security.common.gcp_api import errors as api_errors
from google.cloud.security.common.util import log_util
from google.cloud.security.inventory import errors as inventory_errors
from google.cloud.security.inventory.pipelines import base_pipeline
LOGGER = log_util.get_logger(__name__)
class LoadGroupsPipeline(base_pipeline.BasePipeline):
"""Pipeline to load groups data into Inventory."""
RESOURCE_NAME = 'groups'
def __init__(self, cycle_timestamp, configs, admin_client, dao):
"""Constructor for the data pipeline.
Args:
cycle_timestamp: String of timestamp, formatted as YYYYMMDDTHHMMSSZ.
configs: Dictionary of configurations.
admin_client: Admin API client.
dao: Data access object.
Returns:
None
"""
super(LoadGroupsPipeline, self).__init__(
cycle_timestamp, configs, admin_client, dao)
def _transform(self, groups_map):
"""Yield an iterator of loadable groups.
Args:
A list of group objects from the Admin SDK.
Yields:
An iterable of loadable groups as a per-group dictionary.
"""
for group in groups_map:
yield {'group_id': group.get('id'),
'group_email': group.get('email'),
'group_kind': group.get('kind'),
'direct_member_count': group.get('directMembersCount'),
'raw_group': json.dumps(group)}
def _retrieve(self):
"""Retrieve the groups from GSuite.
Returns:
A list of group list objects from the Admin SDK.
Raises:
LoadDataPipelineException: An error with loading data has occurred.
"""
try:
return self.api_client.get_groups()
except api_errors.ApiExecutionError as e:
raise inventory_errors.LoadDataPipelineError(e)
def run(self):
"""Runs the load GSuite account groups pipeline."""
groups_map = self._retrieve()
if isinstance(groups_map, list):
loadable_groups = self._transform(groups_map)
self._load(self.RESOURCE_NAME, loadable_groups)
self._get_loaded_count()
else:
LOGGER.warn('No groups retrieved.')
| # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pipeline to load GSuite Groups into Inventory."""
import json
from google.cloud.security.common.gcp_api import errors as api_errors
from google.cloud.security.common.util import log_util
from google.cloud.security.inventory import errors as inventory_errors
from google.cloud.security.inventory.pipelines import base_pipeline
LOGGER = log_util.get_logger(__name__)
class LoadGroupsPipeline(base_pipeline.BasePipeline):
"""Pipeline to load groups data into Inventory."""
RESOURCE_NAME = 'groups'
def __init__(self, cycle_timestamp, configs, admin_client, dao):
"""Constructor for the data pipeline.
Args:
cycle_timestamp: String of timestamp, formatted as YYYYMMDDTHHMMSSZ.
configs: Dictionary of configurations.
admin_client: Admin API client.
dao: Data access object.
Returns:
None
"""
super(LoadGroupsPipeline, self).__init__(
cycle_timestamp, configs, admin_client, dao)
def _transform(self, groups_map):
"""Yield an iterator of loadable groups.
Args:
A list of group objects from the Admin SDK.
Yields:
An iterable of loadable groups as a per-group dictionary.
"""
for group in groups_map:
yield {'group_id': group['id'],
'group_email': group['email'],
'group_kind': group['kind'],
'direct_member_count': group['directMembersCount'],
'raw_group': json.dumps(group)}
def _retrieve(self):
"""Retrieve the groups from GSuite.
Returns:
A list of group list objects from the Admin SDK.
Raises:
LoadDataPipelineException: An error with loading data has occurred.
"""
try:
return self.api_client.get_groups()
except api_errors.ApiExecutionError as e:
raise inventory_errors.LoadDataPipelineError(e)
def run(self):
"""Runs the load GSuite account groups pipeline."""
groups_map = self._retrieve()
if isinstance(groups_map, list):
loadable_groups = self._transform(groups_map)
self._load(self.RESOURCE_NAME, loadable_groups)
self._get_loaded_count()
else:
LOGGER.warn('No groups retrieved.')
| Python | 0 |
cc3ca68df357572767280bdddf332cfd430e9203 | Enhance the test to avoid celery internal queues in rabbitmq status. | oneflow/base/utils/stats/rabbitmq.py | oneflow/base/utils/stats/rabbitmq.py | # -*- coding: utf-8 -*-
u"""
Copyright 2012-2014 Olivier Cortès <oc@1flow.io>.
This file is part of the 1flow project.
1flow is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
1flow is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public
License along with 1flow. If not, see http://www.gnu.org/licenses/
"""
import logging
import pyrabbit
from django.conf import settings
# from sparks.fabric import is_localhost
from oneflow.base.utils.http import split_url
LOGGER = logging.getLogger(__name__)
# AMQP_RE = re.compile(ur'amqp://(?P<username>[^:]+):(?P<password>\w+)@(?P<hostname_and_port>[^/]+)/(?P<vhost>[^/]+)', re.I) # NOQA
def get_rabbitmq_client_args_from_broker_url():
""" Decompose BROKER_URL into a tuple suitable for rabbitmq.Client(). """
proto, host_and_port, vhost = split_url(settings.BROKER_URL)
things = host_and_port.rsplit('@', 1)
if len(things) > 1:
username, password = things[0].split(':', 1)
host_and_port = things[1]
else:
username, password = 'guest', 'guest'
if not vhost:
vhost = '/'
host_and_port = host_and_port.replace(':5672', ':55672')
return [host_and_port, username, password, vhost]
if settings.BROKER_URL.lower().startswith('amqp://'):
rabbitmq_params = get_rabbitmq_client_args_from_broker_url()
rabbitmq_client = pyrabbit.Client(*rabbitmq_params[:-1])
try:
rabbitmq_client.is_alive()
except:
rabbitmq_params[0] = rabbitmq_params[0].replace(':55672', ':15672')
rabbitmq_client = pyrabbit.Client(*rabbitmq_params[:-1])
rabbitmq_vhost = rabbitmq_params[-1]
else:
rabbitmq_client = None
def rabbitmq_queues():
""" Return rabbitMQ client get_queues() result, or {}.
``{}`` is when RabbitMQ is not available, eg. ``BROKER_URL`` doesn't
start with ``amqp://``.
"""
if rabbitmq_client is None:
return {}
try:
queues = rabbitmq_client.get_queues(rabbitmq_vhost)
except:
LOGGER.exception(u'Could not connect to RabbitMQ API. '
u'Is the web interface plugin enabled?')
return {}
return [
q for q in sorted(queues, key=lambda q: q['name'])
if not (
q['name'].startswith('amq.gen')
or 'celery' in q['name']
)
]
| # -*- coding: utf-8 -*-
u"""
Copyright 2012-2014 Olivier Cortès <oc@1flow.io>.
This file is part of the 1flow project.
1flow is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
1flow is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public
License along with 1flow. If not, see http://www.gnu.org/licenses/
"""
import logging
import pyrabbit
from django.conf import settings
# from sparks.fabric import is_localhost
from oneflow.base.utils.http import split_url
LOGGER = logging.getLogger(__name__)
# AMQP_RE = re.compile(ur'amqp://(?P<username>[^:]+):(?P<password>\w+)@(?P<hostname_and_port>[^/]+)/(?P<vhost>[^/]+)', re.I) # NOQA
def get_rabbitmq_client_args_from_broker_url():
""" Decompose BROKER_URL into a tuple suitable for rabbitmq.Client(). """
proto, host_and_port, vhost = split_url(settings.BROKER_URL)
things = host_and_port.rsplit('@', 1)
if len(things) > 1:
username, password = things[0].split(':', 1)
host_and_port = things[1]
else:
username, password = 'guest', 'guest'
if not vhost:
vhost = '/'
host_and_port = host_and_port.replace(':5672', ':55672')
return [host_and_port, username, password, vhost]
if settings.BROKER_URL.lower().startswith('amqp://'):
rabbitmq_params = get_rabbitmq_client_args_from_broker_url()
rabbitmq_client = pyrabbit.Client(*rabbitmq_params[:-1])
try:
rabbitmq_client.is_alive()
except:
rabbitmq_params[0] = rabbitmq_params[0].replace(':55672', ':15672')
rabbitmq_client = pyrabbit.Client(*rabbitmq_params[:-1])
rabbitmq_vhost = rabbitmq_params[-1]
else:
rabbitmq_client = None
def rabbitmq_queues():
""" Return rabbitMQ client get_queues() result, or {}.
``{}`` is when RabbitMQ is not available, eg. ``BROKER_URL`` doesn't
start with ``amqp://``.
"""
if rabbitmq_client is None:
return {}
try:
queues = rabbitmq_client.get_queues(rabbitmq_vhost)
except:
LOGGER.exception(u'Could not connect to RabbitMQ API. '
u'Is the web interface plugin enabled?')
return {}
return [q for q in sorted(queues, key=lambda q: q['name'])
if not (q['name'].startswith('amq.gen')
or q['name'].startswith('celery'))]
| Python | 0 |
d9001013a070176756f49166552b7dbb5fb6aeb0 | Fix plugin rendering in tests | tests/test_plugins.py | tests/test_plugins.py | # -*- coding: utf-8 -*-
import pytest
from cms.api import add_plugin
from cms.models import Placeholder
from cms.plugin_rendering import ContentRenderer
from cmsplugin_articles_ai.cms_plugins import ArticleList, TagFilterArticleList, TagList
from cmsplugin_articles_ai.factories import PublicArticleFactory, TagFactory
def create_articles(amount):
for _ in range(amount):
PublicArticleFactory()
def init_content_renderer(request=None):
"""
Create and return `ContentRenderer` instance initiated with request.
Request may be `None` in some cases.
"""
return ContentRenderer(request)
def init_plugin(plugin_type, lang="en", **plugin_data):
"""
Creates a plugin attached into a placeholder
Returns an instance of plugin_type
"""
placeholder = Placeholder.objects.create(slot="test")
return add_plugin(placeholder, plugin_type, lang, **plugin_data)
@pytest.mark.django_db
def test_article_list_plugin_article_count():
"""
Test article list plugin inserts correct amount of articles into
the context. Amount is should be same as defined in plugin settings.
"""
article_count = 10
create_articles(article_count)
plugin = init_plugin(ArticleList, article_amount=3)
plugin_instance = plugin.get_plugin_class_instance()
context = plugin_instance.render({}, plugin, None)
assert len(context["articles"]) == 3
@pytest.mark.django_db
@pytest.mark.parametrize("language_filter", ["", "en", "fi"])
def test_article_list_plugin_language_filter(language_filter):
"""
Test article list plugin filters articles according to language filter
"""
article_fi = PublicArticleFactory(language="fi")
article_en = PublicArticleFactory(language="en")
plugin = init_plugin(ArticleList, language_filter=language_filter)
plugin_instance = plugin.get_plugin_class_instance()
context = plugin_instance.render({}, plugin, None)
if language_filter == "en":
assert article_fi not in context["articles"]
assert article_en in context["articles"]
elif language_filter == "fi":
assert article_fi in context["articles"]
assert article_en not in context["articles"]
else:
assert article_fi in context["articles"]
assert article_en in context["articles"]
@pytest.mark.urls("cmsplugin_articles_ai.article_urls")
@pytest.mark.django_db
def test_article_list_plugin_html():
"""
Test article list plugin rendering works and html has
relevant content.
"""
plugin = init_plugin(ArticleList)
article = PublicArticleFactory()
renderer = init_content_renderer()
html = renderer.render_plugin(instance=plugin, context={}, placeholder=plugin.placeholder)
assert article.title in html
@pytest.mark.urls("cmsplugin_articles_ai.article_urls")
@pytest.mark.django_db
def test_tag_article_list_plugin_html():
"""
Test article list plugin rendering works and html has
relevant content.
"""
tag = TagFactory()
article = PublicArticleFactory(tags=[tag])
plugin = init_plugin(TagFilterArticleList)
plugin.tags.add(tag)
renderer = init_content_renderer()
html = renderer.render_plugin(instance=plugin, context={}, placeholder=plugin.placeholder)
assert article.title in html
@pytest.mark.urls("cmsplugin_articles_ai.article_urls")
@pytest.mark.django_db
def test_tag_list_plugin_html():
"""
Test tag list plugin rendering works and html has
relevant content.
"""
plugin = init_plugin(TagList)
tag = TagFactory()
renderer = init_content_renderer()
html = renderer.render_plugin(instance=plugin, context={}, placeholder=plugin.placeholder)
assert tag.name in html
| # -*- coding: utf-8 -*-
import pytest
from cms.api import add_plugin
from cms.models import Placeholder
from cmsplugin_articles_ai.cms_plugins import ArticleList, TagFilterArticleList, TagList
from cmsplugin_articles_ai.factories import PublicArticleFactory, TagFactory
def create_articles(amount):
for _ in range(amount):
PublicArticleFactory()
def init_plugin(plugin_type, lang="en", **plugin_data):
"""
Creates a plugin attached into a placeholder
Returns an instance of plugin_type
"""
placeholder = Placeholder.objects.create(slot="test")
return add_plugin(placeholder, plugin_type, lang, **plugin_data)
@pytest.mark.django_db
def test_article_list_plugin_article_count():
"""
Test article list plugin inserts correct amount of articles into
the context. Amount is should be same as defined in plugin settings.
"""
article_count = 10
create_articles(article_count)
plugin = init_plugin(ArticleList, article_amount=3)
plugin_instance = plugin.get_plugin_class_instance()
context = plugin_instance.render({}, plugin, None)
assert len(context["articles"]) == 3
@pytest.mark.django_db
@pytest.mark.parametrize("language_filter", ["", "en", "fi"])
def test_article_list_plugin_language_filter(language_filter):
"""
Test article list plugin filters articles according to language filter
"""
article_fi = PublicArticleFactory(language="fi")
article_en = PublicArticleFactory(language="en")
plugin = init_plugin(ArticleList, language_filter=language_filter)
plugin_instance = plugin.get_plugin_class_instance()
context = plugin_instance.render({}, plugin, None)
if language_filter == "en":
assert article_fi not in context["articles"]
assert article_en in context["articles"]
elif language_filter == "fi":
assert article_fi in context["articles"]
assert article_en not in context["articles"]
else:
assert article_fi in context["articles"]
assert article_en in context["articles"]
@pytest.mark.urls("cmsplugin_articles_ai.article_urls")
@pytest.mark.django_db
def test_article_list_plugin_html():
"""
Test article list plugin rendering works and html has
relevant content.
"""
plugin = init_plugin(ArticleList)
article = PublicArticleFactory()
html = plugin.render_plugin({})
assert article.title in html
@pytest.mark.urls("cmsplugin_articles_ai.article_urls")
@pytest.mark.django_db
def test_tag_article_list_plugin_html():
"""
Test article list plugin rendering works and html has
relevant content.
"""
tag = TagFactory()
article = PublicArticleFactory(tags=[tag])
plugin = init_plugin(TagFilterArticleList)
plugin.tags.add(tag)
html = plugin.render_plugin({})
assert article.title in html
@pytest.mark.urls("cmsplugin_articles_ai.article_urls")
@pytest.mark.django_db
def test_tag_list_plugin_html():
"""
Test tag list plugin rendering works and html has
relevant content.
"""
plugin = init_plugin(TagList)
tag = TagFactory()
html = plugin.render_plugin({})
assert tag.name in html
| Python | 0 |
dba62be0d8b87a66b415460a07f453536209b08e | change user api prefix to user/ from users/ | dubdubdub/api_urls.py | dubdubdub/api_urls.py | from django.conf.urls import patterns, url
from django.views.decorators.cache import cache_page
from schools.api_views import SchoolsList, SchoolsInfo, SchoolInfo, Districts, \
SchoolsDiseInfo, SchoolDemographics, SchoolProgrammes, SchoolFinance, \
Blocks, Clusters, BlocksInsideDistrict, ClustersInsideDistrict, ClustersInsideBlock, \
DistrictOfSchool, BlockOfSchool, ClusterOfSchool, PincodeOfSchool, AssemblyOfSchool, \
ParliamentOfSchool
from users.api_views import TestAuthenticatedView
urlpatterns = patterns('',
# Caches the results of the url for 60 seconds
#url(r'^schools/list', cache_page(60)(SchoolsList.as_view()), name='api_schools_list'),
url(r'^$', 'schools.api_views.api_root', name='api_root'),
url(r'^schools/list$', SchoolsList.as_view(), name='api_schools_list'),
url(r'^schools/info$', SchoolsInfo.as_view(), name='api_schools_info'),
url(r'^schools/dise/(?P<year>[0-9\-]*)$', SchoolsDiseInfo.as_view(), name='api_schools_dise'),
url(r'^schools/school/(?P<pk>[0-9]+)$', SchoolInfo.as_view(), name='api_school_info'),
url(r'^schools/school/(?P<pk>[0-9]+)/demographics$', SchoolDemographics.as_view(), name='api_school_demo'),
url(r'^schools/school/(?P<pk>[0-9]+)/programmes$', SchoolProgrammes.as_view(), name='api_school_prog'),
url(r'^schools/school/(?P<pk>[0-9]+)/finance$', SchoolFinance.as_view(), name='api_school_finance'),
url(r'^boundary/districts$', Districts.as_view(), name="api_districts"),
url(r'^boundary/districts/(?P<id>[0-9]+)/blocks$', BlocksInsideDistrict.as_view(), name="api_districts_block"),
url(r'^boundary/districts/(?P<id>[0-9]+)/clusters$', ClustersInsideDistrict.as_view(), name="api_districts_cluster"),
url(r'^boundary/blocks$', Blocks.as_view(), name="api_blocks"),
url(r'^boundary/blocks/(?P<id>[0-9]+)/clusters$', ClustersInsideBlock.as_view(), name="api_blocks_clusters"),
url(r'^boundary/clusters$', Clusters.as_view(), name="api_clusters"),
url(r'^geo/district/(?P<pk>[0-9]+)$', DistrictOfSchool.as_view(), name="api_school_district"),
url(r'^geo/block/(?P<pk>[0-9]+)$', BlockOfSchool.as_view(), name="api_school_block"),
url(r'^geo/cluster/(?P<pk>[0-9]+)$', ClusterOfSchool.as_view(), name="api_school_cluster"),
url(r'^geo/pincode/(?P<pk>[0-9]+)$', PincodeOfSchool.as_view(), name="api_school_pincode"),
url(r'^geo/assembly/(?P<pk>[0-9]+)$', AssemblyOfSchool.as_view(), name="api_school_assembly"),
url(r'^geo/parliament/(?P<pk>[0-9]+)$', ParliamentOfSchool.as_view(), name="api_school_parliament"),
url('^user/signup$', 'users.api_views.signup', name='api_signup'),
url('^user/signin$', 'users.api_views.signin', name='api_signin'),
url('^user/signout$', 'users.api_views.signout', name='api_signout'),
url('^user/test_authenticated', TestAuthenticatedView.as_view(), name='api_test_authenticated'),
)
| from django.conf.urls import patterns, url
from django.views.decorators.cache import cache_page
from schools.api_views import SchoolsList, SchoolsInfo, SchoolInfo, Districts, \
SchoolsDiseInfo, SchoolDemographics, SchoolProgrammes, SchoolFinance, \
Blocks, Clusters, BlocksInsideDistrict, ClustersInsideDistrict, ClustersInsideBlock, \
DistrictOfSchool, BlockOfSchool, ClusterOfSchool, PincodeOfSchool, AssemblyOfSchool, \
ParliamentOfSchool
from users.api_views import TestAuthenticatedView
urlpatterns = patterns('',
# Caches the results of the url for 60 seconds
#url(r'^schools/list', cache_page(60)(SchoolsList.as_view()), name='api_schools_list'),
url(r'^$', 'schools.api_views.api_root', name='api_root'),
url(r'^schools/list$', SchoolsList.as_view(), name='api_schools_list'),
url(r'^schools/info$', SchoolsInfo.as_view(), name='api_schools_info'),
url(r'^schools/dise/(?P<year>[0-9\-]*)$', SchoolsDiseInfo.as_view(), name='api_schools_dise'),
url(r'^schools/school/(?P<pk>[0-9]+)$', SchoolInfo.as_view(), name='api_school_info'),
url(r'^schools/school/(?P<pk>[0-9]+)/demographics$', SchoolDemographics.as_view(), name='api_school_demo'),
url(r'^schools/school/(?P<pk>[0-9]+)/programmes$', SchoolProgrammes.as_view(), name='api_school_prog'),
url(r'^schools/school/(?P<pk>[0-9]+)/finance$', SchoolFinance.as_view(), name='api_school_finance'),
url(r'^boundary/districts$', Districts.as_view(), name="api_districts"),
url(r'^boundary/districts/(?P<id>[0-9]+)/blocks$', BlocksInsideDistrict.as_view(), name="api_districts_block"),
url(r'^boundary/districts/(?P<id>[0-9]+)/clusters$', ClustersInsideDistrict.as_view(), name="api_districts_cluster"),
url(r'^boundary/blocks$', Blocks.as_view(), name="api_blocks"),
url(r'^boundary/blocks/(?P<id>[0-9]+)/clusters$', ClustersInsideBlock.as_view(), name="api_blocks_clusters"),
url(r'^boundary/clusters$', Clusters.as_view(), name="api_clusters"),
url(r'^geo/district/(?P<pk>[0-9]+)$', DistrictOfSchool.as_view(), name="api_school_district"),
url(r'^geo/block/(?P<pk>[0-9]+)$', BlockOfSchool.as_view(), name="api_school_block"),
url(r'^geo/cluster/(?P<pk>[0-9]+)$', ClusterOfSchool.as_view(), name="api_school_cluster"),
url(r'^geo/pincode/(?P<pk>[0-9]+)$', PincodeOfSchool.as_view(), name="api_school_pincode"),
url(r'^geo/assembly/(?P<pk>[0-9]+)$', AssemblyOfSchool.as_view(), name="api_school_assembly"),
url(r'^geo/parliament/(?P<pk>[0-9]+)$', ParliamentOfSchool.as_view(), name="api_school_parliament"),
url('^users/signup$', 'users.api_views.signup', name='api_signup'),
url('^users/signin$', 'users.api_views.signin', name='api_signin'),
url('^users/signout$', 'users.api_views.signout', name='api_signout'),
url('^users/test_authenticated', TestAuthenticatedView.as_view(), name='api_test_authenticated'),
)
| Python | 0 |
88d56e2857f09223175e9f845aebb496c143d08b | check for gl errors in sampler tests | tests/test_sampler.py | tests/test_sampler.py | import unittest
import moderngl
from common import get_context
def checkerror(func):
def wrapper(*args, **kwargs):
_ = get_context().error
func(*args, **kwargs)
err = get_context().error
assert err == 'GL_NO_ERROR', "Error: %s" % err
return wrapper
class TestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.ctx = get_context()
@checkerror
def test_create(self):
sampler = self.ctx.sampler()
sampler.use(location=0)
sampler.clear(location=0)
@checkerror
def test_defaults(self):
sampler = self.ctx.sampler()
self.assertEqual(sampler.anisotropy, 1.0)
self.assertTrue(sampler.repeat_x)
self.assertTrue(sampler.repeat_y)
self.assertTrue(sampler.repeat_z)
self.assertEqual(sampler.filter, (moderngl.LINEAR, moderngl.LINEAR))
self.assertEqual(sampler.compare_func, '?')
self.assertEqual(sampler.border_color, (0.0, 0.0, 0.0, 0.0))
self.assertEqual(sampler.min_lod, -1000.0)
self.assertEqual(sampler.max_lod, 1000.0)
@checkerror
def test_prop_changes(self):
sampler = self.ctx.sampler()
# Change values
sampler.anisotropy = self.ctx.max_anisotropy
sampler.filter = (moderngl.NEAREST_MIPMAP_NEAREST, moderngl.NEAREST)
sampler.compare_func = "<="
self.assertEqual(sampler.anisotropy, self.ctx.max_anisotropy)
self.assertEqual(sampler.filter, (moderngl.NEAREST_MIPMAP_NEAREST, moderngl.NEAREST))
self.assertEqual(sampler.compare_func, "<=")
# Ensure repeat parameters are set correctly
sampler.repeat_x = False
self.assertEqual((sampler.repeat_x, sampler.repeat_y, sampler.repeat_z), (False, True, True))
sampler.repeat_y = False
self.assertEqual((sampler.repeat_x, sampler.repeat_y, sampler.repeat_z), (False, False, True))
sampler.repeat_z = False
self.assertEqual((sampler.repeat_x, sampler.repeat_y, sampler.repeat_z), (False, False, False))
@checkerror
def test_border_color(self):
sampler = self.ctx.sampler()
# Ensure border color values are set correctly
colors = [
(1.0, 0.0, 0.0, 0.0),
(0.0, 1.0, 0.0, 0.0),
(0.0, 0.0, 1.0, 0.0),
(0.0, 0.0, 0.0, 1.0),
]
for color in colors:
sampler.border_color = color
self.assertEqual(sampler.border_color, color)
@checkerror
def test_lod(self):
sampler = self.ctx.sampler()
sampler.min_lod = 0.0
self.assertEqual(sampler.min_lod, 0.0)
sampler.max_lod = 500.0
self.assertEqual(sampler.max_lod, 500.0)
@checkerror
def test_clear_samplers(self):
self.ctx.clear_samplers(start=0, end=5)
self.ctx.clear_samplers(start=5, end=10)
self.ctx.clear_samplers(start=10, end=100)
| import unittest
import moderngl
from common import get_context
class TestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.ctx = get_context()
def test_attributes(self):
sampler = self.ctx.sampler()
# Default values
self.assertEqual(sampler.anisotropy, 1.0)
self.assertTrue(sampler.repeat_x)
self.assertTrue(sampler.repeat_y)
self.assertTrue(sampler.repeat_z)
self.assertEqual(sampler.filter, (moderngl.LINEAR, moderngl.LINEAR))
self.assertEqual(sampler.compare_func, '?')
self.assertEqual(sampler.border_color, (0.0, 0.0, 0.0, 0.0))
# Change values
sampler.anisotropy = self.ctx.max_anisotropy
sampler.filter = (moderngl.NEAREST_MIPMAP_NEAREST, moderngl.NEAREST)
sampler.compare_func = "<="
self.assertEqual(sampler.anisotropy, self.ctx.max_anisotropy)
self.assertEqual(sampler.filter, (moderngl.NEAREST_MIPMAP_NEAREST, moderngl.NEAREST))
self.assertEqual(sampler.compare_func, "<=")
# Ensure repeat parameters are set correctly
sampler.repeat_x = False
self.assertEqual((sampler.repeat_x, sampler.repeat_y, sampler.repeat_z), (False, True, True))
sampler.repeat_y = False
self.assertEqual((sampler.repeat_x, sampler.repeat_y, sampler.repeat_z), (False, False, True))
sampler.repeat_z = False
self.assertEqual((sampler.repeat_x, sampler.repeat_y, sampler.repeat_z), (False, False, False))
# Ensure border color values are set correctly
colors = [
(1.0, 0.0, 0.0, 0.0),
(0.0, 1.0, 0.0, 0.0),
(0.0, 0.0, 1.0, 0.0),
(0.0, 0.0, 0.0, 1.0),
]
for color in colors:
sampler.border_color = color
self.assertEqual(sampler.border_color, color)
# LOD
self.assertEqual(sampler.min_lod, -1000.0)
self.assertEqual(sampler.max_lod, 1000.0)
sampler.min_lod = 0.0
self.assertEqual(sampler.min_lod, 0.0)
sampler.max_lod = 500.0
self.assertEqual(sampler.max_lod, 500.0)
def test_clear_samplers(self):
self.ctx.clear_samplers(start=0, end=5)
self.ctx.clear_samplers(start=5, end=10)
self.ctx.clear_samplers(start=10, end=100)
| Python | 0 |
aeb2aaa106d7b37e0c9a3fc8a71364d79b00346d | Remove some debugging output from a migration | uk_results/migrations/0030_populate_postresult_post_election.py | uk_results/migrations/0030_populate_postresult_post_election.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from collections import defaultdict
from django.db import migrations, models
def set_post_election_from_post(apps, schema_editor):
"""
This is far from ideal. Try to guess the PostExtraElection
that this PostResult relates to. This will have to be done by looking
and the related memberships and assuming they're correct (sometimes they
won't be, and that will have to be fixed manually later).
"""
PostResult = apps.get_model('uk_results', 'PostResult')
PostExtraElection = apps.get_model('candidates', 'PostExtraElection')
qs = PostResult.objects.all().select_related('post__extra')
for post_result in qs:
pee = None
elections = post_result.post.extra.elections.all()
if not elections.exists():
raise ValueError("Post with no elections found.")
if elections.count() == 1:
# This is an easy case – this post only has one known election
pee = PostExtraElection.objects.get(
election=elections.first(),
postextra=post_result.post.extra
)
post_result.post_election = pee
post_result.save()
else:
if not post_result.result_sets.exists():
# There are no results sets for this post_result
# so we can just delete it
post_result.delete()
continue
result_sets_by_election = defaultdict(list)
# Work out how many elections we have results for.
# If it's only 1, then use that one
for result_set in post_result.result_sets.all():
for candidate_result in result_set.candidate_results.all():
this_election = candidate_result.membership.extra.election
result_sets_by_election[this_election].append(result_set)
if len(set(result_sets_by_election.keys())) == 1:
election = result_sets_by_election.keys()[0]
pee = PostExtraElection.objects.get(
election=election,
postextra=post_result.post.extra
)
post_result.post_election = pee
post_result.save()
else:
# We have results for more than one election, but only
# a single PostResult object.
# Split the result_sets up in to a new PostResult per election
for election, result_sets in result_sets_by_election.items():
result_sets = set(result_sets)
pee = PostExtraElection.objects.get(
election=election,
postextra=post_result.post.extra
)
pr = PostResult.objects.create(
post_election=pee,
post=post_result.post,
confirmed=post_result.confirmed,
confirmed_resultset=post_result.confirmed_resultset
)
for result_set in result_sets:
result_set.post_result = pr
result_set.save()
post_result.delete()
class Migration(migrations.Migration):
dependencies = [
('uk_results', '0029_add_postresult_post_election'),
]
operations = [
migrations.RunPython(set_post_election_from_post),
]
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from collections import defaultdict
from django.db import migrations, models
def set_post_election_from_post(apps, schema_editor):
"""
This is far from ideal. Try to guess the PostExtraElection
that this PostResult relates to. This will have to be done by looking
and the related memberships and assuming they're correct (sometimes they
won't be, and that will have to be fixed manually later).
"""
PostResult = apps.get_model('uk_results', 'PostResult')
PostExtraElection = apps.get_model('candidates', 'PostExtraElection')
print(PostResult.objects.all().count())
qs = PostResult.objects.all().select_related('post__extra')
for post_result in qs:
pee = None
elections = post_result.post.extra.elections.all()
if not elections.exists():
raise ValueError("Post with no elections found.")
if elections.count() == 1:
# This is an easy case – this post only has one known election
pee = PostExtraElection.objects.get(
election=elections.first(),
postextra=post_result.post.extra
)
post_result.post_election = pee
post_result.save()
else:
if not post_result.result_sets.exists():
# There are no results sets for this post_result
# so we can just delete it
post_result.delete()
continue
result_sets_by_election = defaultdict(list)
# Work out how many elections we have results for.
# If it's only 1, then use that one
for result_set in post_result.result_sets.all():
for candidate_result in result_set.candidate_results.all():
this_election = candidate_result.membership.extra.election
result_sets_by_election[this_election].append(result_set)
if len(set(result_sets_by_election.keys())) == 1:
election = result_sets_by_election.keys()[0]
pee = PostExtraElection.objects.get(
election=election,
postextra=post_result.post.extra
)
post_result.post_election = pee
post_result.save()
else:
# We have results for more than one election, but only
# a single PostResult object.
# Split the result_sets up in to a new PostResult per election
for election, result_sets in result_sets_by_election.items():
result_sets = set(result_sets)
pee = PostExtraElection.objects.get(
election=election,
postextra=post_result.post.extra
)
pr = PostResult.objects.create(
post_election=pee,
post=post_result.post,
confirmed=post_result.confirmed,
confirmed_resultset=post_result.confirmed_resultset
)
for result_set in result_sets:
result_set.post_result = pr
result_set.save()
post_result.delete()
class Migration(migrations.Migration):
dependencies = [
('uk_results', '0029_add_postresult_post_election'),
]
operations = [
migrations.RunPython(set_post_election_from_post),
]
| Python | 0.000004 |
8fd65190a2a68a7afeab91b0a02c83309f72ccd6 | Add tests to gen_test for generator, seems to work | tests/test_testing.py | tests/test_testing.py |
import greenado
from greenado.testing import gen_test
from tornado.testing import AsyncTestCase
from tornado import gen
@gen.coroutine
def coroutine():
raise gen.Return(1234)
class GreenadoTests(AsyncTestCase):
@gen_test
def test_without_timeout1(self):
assert greenado.gyield(coroutine()) == 1234
@gen_test
@greenado.generator
def test_without_timeout2(self):
assert (yield coroutine()) == 1234
@gen_test(timeout=5)
def test_with_timeout1(self):
assert greenado.gyield(coroutine()) == 1234
@gen_test(timeout=5)
@greenado.generator
def test_with_timeout2(self):
assert (yield coroutine()) == 1234
|
import greenado
from greenado.testing import gen_test
from tornado.testing import AsyncTestCase
from tornado import gen
@gen.coroutine
def coroutine():
raise gen.Return(1234)
class GreenadoTests(AsyncTestCase):
@gen_test
def test_without_timeout(self):
assert greenado.gyield(coroutine()) == 1234
@gen_test(timeout=5)
def test_with_timeout(self):
assert greenado.gyield(coroutine()) == 1234
| Python | 0 |
0d313502b8b5d850109b48cde8d3dea2dae0d802 | Clean up __init__.py . | vcr/__init__.py | vcr/__init__.py | import logging
from .config import VCR
# Set default logging handler to avoid "No handler found" warnings.
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
default_vcr = VCR()
use_cassette = default_vcr.use_cassette
| import logging
from .config import VCR
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
default_vcr = VCR()
def use_cassette(path, **kwargs):
return default_vcr.use_cassette(path, **kwargs)
| Python | 0.000021 |
0a2937311d4c319d5233b0a92073a7a3d57d9452 | remove commented out code. Also fix up indents and other violations | common/lib/xmodule/xmodule/modulestore/xml_exporter.py | common/lib/xmodule/xmodule/modulestore/xml_exporter.py | import logging
from xmodule.modulestore import Location
from xmodule.modulestore.inheritance import own_metadata
from fs.osfs import OSFS
from json import dumps
def export_to_xml(modulestore, contentstore, course_location, root_dir, course_dir, draft_modulestore=None):
course = modulestore.get_item(course_location)
fs = OSFS(root_dir)
export_fs = fs.makeopendir(course_dir)
xml = course.export_to_xml(export_fs)
with export_fs.open('course.xml', 'w') as course_xml:
course_xml.write(xml)
# export the static assets
contentstore.export_all_for_course(course_location, root_dir + '/' + course_dir + '/static/')
# export the static tabs
export_extra_content(export_fs, modulestore, course_location, 'static_tab', 'tabs', '.html')
# export the custom tags
export_extra_content(export_fs, modulestore, course_location, 'custom_tag_template', 'custom_tags')
# export the course updates
export_extra_content(export_fs, modulestore, course_location, 'course_info', 'info', '.html')
# export the grading policy
policies_dir = export_fs.makeopendir('policies')
course_run_policy_dir = policies_dir.makeopendir(course.location.name)
with course_run_policy_dir.open('grading_policy.json', 'w') as grading_policy:
grading_policy.write(dumps(course.grading_policy))
# export all of the course metadata in policy.json
with course_run_policy_dir.open('policy.json', 'w') as course_policy:
policy = {'course/' + course.location.name: own_metadata(course)}
course_policy.write(dumps(policy))
# export draft content
# NOTE: this code assumes that verticals are the top most draftable container
# should we change the application, then this assumption will no longer
# be valid
if draft_modulestore is not None:
draft_verticals = draft_modulestore.get_items([None, course_location.org, course_location.course,
'vertical', None, 'draft'])
if len(draft_verticals) > 0:
draft_course_dir = export_fs.makeopendir('drafts')
for draft_vertical in draft_verticals:
parent_locs = draft_modulestore.get_parent_locations(draft_vertical.location, course.location.course_id)
logging.debug('parent_locs = {0}'.format(parent_locs))
draft_vertical.xml_attributes['parent_sequential_url'] = Location(parent_locs[0]).url()
sequential = modulestore.get_item(Location(parent_locs[0]))
index = sequential.children.index(draft_vertical.location.url())
draft_vertical.xml_attributes['index_in_children_list'] = str(index)
draft_vertical.export_to_xml(draft_course_dir)
def export_extra_content(export_fs, modulestore, course_location, category_type, dirname, file_suffix=''):
query_loc = Location('i4x', course_location.org, course_location.course, category_type, None)
items = modulestore.get_items(query_loc)
if len(items) > 0:
item_dir = export_fs.makeopendir(dirname)
for item in items:
with item_dir.open(item.location.name + file_suffix, 'w') as item_file:
item_file.write(item.data.encode('utf8'))
| import logging
from xmodule.modulestore import Location
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.inheritance import own_metadata
from fs.osfs import OSFS
from json import dumps
def export_to_xml(modulestore, contentstore, course_location, root_dir, course_dir, draft_modulestore = None):
course = modulestore.get_item(course_location)
fs = OSFS(root_dir)
export_fs = fs.makeopendir(course_dir)
xml = course.export_to_xml(export_fs)
with export_fs.open('course.xml', 'w') as course_xml:
course_xml.write(xml)
# export the static assets
contentstore.export_all_for_course(course_location, root_dir + '/' + course_dir + '/static/')
# export the static tabs
export_extra_content(export_fs, modulestore, course_location, 'static_tab', 'tabs', '.html')
# export the custom tags
export_extra_content(export_fs, modulestore, course_location, 'custom_tag_template', 'custom_tags')
# export the course updates
export_extra_content(export_fs, modulestore, course_location, 'course_info', 'info', '.html')
# export the grading policy
policies_dir = export_fs.makeopendir('policies')
course_run_policy_dir = policies_dir.makeopendir(course.location.name)
with course_run_policy_dir.open('grading_policy.json', 'w') as grading_policy:
grading_policy.write(dumps(course.grading_policy))
# export all of the course metadata in policy.json
with course_run_policy_dir.open('policy.json', 'w') as course_policy:
policy = {'course/' + course.location.name: own_metadata(course)}
course_policy.write(dumps(policy))
# export everything from the draft store, unfortunately this will create lots of duplicates
'''
if draft_modulestore is not None:
draft_course = draft_modulestore.get_item(course_location)
xml = draft_course.export_to_xml(draft_course_dir)
with draft_course_dir.open('course.xml', 'w') as course_xml:
course_xml.write(xml)
'''
# export draft content
# NOTE: this code assumes that verticals are the top most draftable container
# should we change the application, then this assumption will no longer
# be valid
if draft_modulestore is not None:
draft_verticals = draft_modulestore.get_items([None, course_location.org, course_location.course,
'vertical', None, 'draft'])
if len(draft_verticals)>0:
draft_course_dir = export_fs.makeopendir('drafts')
for draft_vertical in draft_verticals:
parent_locs = draft_modulestore.get_parent_locations(draft_vertical.location, course.location.course_id)
logging.debug('parent_locs = {0}'.format(parent_locs))
draft_vertical.xml_attributes['parent_sequential_url'] = Location(parent_locs[0]).url()
sequential = modulestore.get_item(Location(parent_locs[0]))
index = sequential.children.index(draft_vertical.location.url())
draft_vertical.xml_attributes['index_in_children_list'] = str(index)
draft_vertical.export_to_xml(draft_course_dir)
def export_extra_content(export_fs, modulestore, course_location, category_type, dirname, file_suffix=''):
query_loc = Location('i4x', course_location.org, course_location.course, category_type, None)
items = modulestore.get_items(query_loc)
if len(items) > 0:
item_dir = export_fs.makeopendir(dirname)
for item in items:
with item_dir.open(item.location.name + file_suffix, 'w') as item_file:
item_file.write(item.data.encode('utf8'))
| Python | 0 |
e353bae122c6e55da022d73c42d7eee09a558b44 | clean code | bin/visual_dl.py | bin/visual_dl.py | """ entry point of visual_dl
"""
import json
import os
import sys
from optparse import OptionParser
from flask import Flask, redirect
from flask import send_from_directory
from visualdl.log import logger
app = Flask(__name__, static_url_path="")
def option_parser():
"""
:return:
"""
parser = OptionParser(usage="usage: visual_dl visual_dl.py "\
"-p port [options]")
parser.add_option(
"-p",
"--port",
default=8040,
action="store",
dest="port",
help="rest api service port")
return parser.parse_args()
# return data
# status, msg, data
def gen_result(status, msg):
"""
:param status:
:param msg:
:return:
"""
result = dict()
result['status'] = status
result['msg'] = msg
result['data'] = {}
return result
server_path = os.path.abspath(os.path.dirname(sys.argv[0]))
static_file_path = "../visualdl/frontend/dist/"
@app.route('/static/<path:filename>')
def serve_static(filename):
return send_from_directory(os.path.join(server_path, static_file_path), filename)
@app.route("/")
def index():
return redirect('/static/index.html', code=302)
@app.route('/hello')
def hello():
result = gen_result(0, "Hello, this is VisualDL!")
return json.dumps(result)
if __name__ == '__main__':
options, args = option_parser()
logger.info(" port=" + str(options.port))
app.run(debug=False, host="0.0.0.0", port=options.port)
| """ entry point of visual_dl
"""
import json
import os
import sys
from optparse import OptionParser
from flask import Flask, redirect
from flask import send_from_directory
from visualdl.log import logger
app = Flask(__name__, static_url_path="")
def option_parser():
"""
:return:
"""
parser = OptionParser(usage="usage: visual_dl visual_dl.py "\
"-p port [options]")
parser.add_option(
"-p",
"--port",
default=8040,
action="store",
dest="port",
help="rest api service port")
return parser.parse_args()
# return data
# status, msg, data
def gen_result(status, msg):
"""
:param status:
:param msg:
:return:
"""
result = dict()
result['status'] = status
result['msg'] = msg
result['data'] = {}
return result
server_path = os.path.abspath(os.path.dirname(sys.argv[0]))
static_file_path = "../visualdl/frontend/dist/"
@app.route('/static/<path:filename>')
def serve_static(filename):
print("aaa")
return send_from_directory(os.path.join(server_path, static_file_path), filename)
@app.route("/")
def index():
return redirect('/static/index.html', code=302)
@app.route('/hello')
def hello():
result = gen_result(0, "Hello, this is VisualDL!")
return json.dumps(result)
if __name__ == '__main__':
options, args = option_parser()
logger.info(" port=" + str(options.port))
app.run(debug=False, host="0.0.0.0", port=options.port)
| Python | 0.000008 |
3c72aa1266f1008552a3979ac057251bf2f93053 | Bump tensorflow in /training/xgboost/structured/base (#212) | training/xgboost/structured/base/setup.py | training/xgboost/structured/base/setup.py | #!/usr/bin/env python
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from setuptools import find_packages
from setuptools import setup
# While this is an xgboost sample, we will still require tensorflow and
# scikit-learn to be installed, since the sample uses certain functionalities
# available in those libraries:
# tensorflow: mainly to copy files seamlessly to GCS
# scikit-learn: the helpfer functions it provides, e.g. splitting datasets
REQUIRED_PACKAGES = [
'tensorflow==1.15.4',
'scikit-learn==0.20.2',
'pandas==0.24.2',
'xgboost==0.81',
'cloudml-hypertune',
]
setup(
name='trainer',
version='0.1',
install_requires=REQUIRED_PACKAGES,
packages=find_packages(),
include_package_data=True,
description='AI Platform | Training | xgboost | Base'
)
| #!/usr/bin/env python
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from setuptools import find_packages
from setuptools import setup
# While this is an xgboost sample, we will still require tensorflow and
# scikit-learn to be installed, since the sample uses certain functionalities
# available in those libraries:
# tensorflow: mainly to copy files seamlessly to GCS
# scikit-learn: the helpfer functions it provides, e.g. splitting datasets
REQUIRED_PACKAGES = [
'tensorflow==1.15.2',
'scikit-learn==0.20.2',
'pandas==0.24.2',
'xgboost==0.81',
'cloudml-hypertune',
]
setup(
name='trainer',
version='0.1',
install_requires=REQUIRED_PACKAGES,
packages=find_packages(),
include_package_data=True,
description='AI Platform | Training | xgboost | Base'
)
| Python | 0.000545 |
0ec3bfbd91e6e967bb2baae0307e76aafbb5aa91 | Simplify the base types | blackjax/base.py | blackjax/base.py | from typing import NamedTuple, Tuple
from typing_extensions import Protocol
from .types import PRNGKey, PyTree
Position = PyTree
State = NamedTuple
Info = NamedTuple
class InitFn(Protocol):
"""A `Callable` used to initialize the kernel state.
Sampling algorithms often need to carry over some informations between
steps, often to avoid computing the same quantity twice. Therefore the
kernels do not operate on the chain positions themselves, but on states that
contain this position and other information.
The `InitFn` returns the state corresponding to a chain position. This state
can then be passed to the `update` function of the `SamplingAlgorithm`.
"""
def __call__(self, position: Position) -> State:
"""The initialization function.
Parameters
----------
position
A chain position.
Returns
-------
The kernel state that corresponds to the position.
"""
class Kernel:
"""A transition kernel used as the `update` of a `SamplingAlgorithms`.
Kernels are pure functions and are idempotent. They necessarily take a
random state `rng_key` and the current kernel state (which contains the
current position) as parameters, return a new state and some information
about the transtion.
"""
def __call__(self, rng_key: PRNGKey, state: State) -> Tuple[State, Info]:
"""The transition kernel.
Parameters
----------
rng_key:
The random state used by JAX's random numbers generator.
state:
The current kernel state. The kernel state contains the current
chain position as well as other information the kernel needs to
carry over from the previous step.
Returns
-------
A new state, as well as a NamedTuple that contains extra information
about the transition that does not need to be carried over to the next
step.
"""
class SamplingAlgorithm(NamedTuple):
"""A pair of functions that implement a sampling algorithm.
Blackjax sampling algorithms are implemented as a pair of pure functions: a
kernel, that takes a new samples starting from the current state, and an
initialization function that creates a kernel state from a chain position.
As they represent Markov kernels, the kernel functions are pure functions
and do not have internal state. To save computation time they also operate
on states which contain the chain state and additional information that
needs to be carried over for the next step.
Attributes
---------
init:
A pure function which when called with the initial position and the
target density probability function will return the kernel's initial
state.
step:
A pure function that takes a rng key, a state and possibly some
parameters and returns a new state and some information about the
transition.
"""
init: InitFn
step: Kernel
| from typing import Callable, NamedTuple, Tuple
from typing_extensions import Protocol
from .types import PRNGKey, PyTree
Position = PyTree
State = NamedTuple
Info = NamedTuple
class InitFn(Protocol):
"""A `Callable` used to initialize the kernel state.
Sampling algorithms often need to carry over some informations between
steps, often to avoid computing the same quantity twice. Therefore the
kernels do not operate on the chain positions themselves, but on states that
contain this position and other information.
The `InitFn` returns the state corresponding to a chain position. This state
can then be passed to the `update` function of the `SamplingAlgorithm`.
"""
def __call__(self, position: Position) -> State:
"""The initialization function.
Parameters
----------
position
A chain position.
Returns
-------
The kernel state that corresponds to the position.
"""
class Kernel:
"""A transition kernel used as the `update` of a `SamplingAlgorithms`.
Kernels are pure functions and are idempotent. They necessarily take a
random state `rng_key` and the current kernel state (which contains the
current position) as parameters, return a new state and some information
about the transtion.
"""
def __call__(self, rng_key: PRNGKey, state: State) -> Tuple[State, Info]:
"""The transition kernel.
Parameters
----------
rng_key:
The random state used by JAX's random numbers generator.
state:
The current kernel state. The kernel state contains the current
chain position as well as other information the kernel needs to
carry over from the previous step.
Returns
-------
A new state, as well as a NamedTuple that contains extra information
about the transition that does not need to be carried over to the next
step.
"""
class SamplingAlgorithm(NamedTuple):
"""A pair of functions that implement a sampling algorithm.
Blackjax sampling algorithms are implemented as a pair of pure functions: a
kernel, that takes a new samples starting from the current state, and an
initialization function that creates a kernel state from a chain position.
As they represent Markov kernels, the kernel functions are pure functions
and do not have internal state. To save computation time they also operate
on states which contain the chain state and additional information that
needs to be carried over for the next step.
Attributes
---------
init:
A pure function which when called with the initial position and the
target density probability function will return the kernel's initial
state.
step:
A pure function that takes a rng key, a state and possibly some
parameters and returns a new state and some information about the
transition.
"""
init: InitFn
step: Kernel
class SamplingAlgorithmGenerator(NamedTuple):
"""A pair of functions that implement a kenel generator.
This is meant to be a quick fix until we can pass the values of parameters
directly to the step function.
"""
init: InitFn
kernel: Callable
| Python | 0.002563 |
f1b22cfcca8470a59a7bab261bbd2a46a7c2a2ed | Fix unicode issues at url translation | socib_cms/cmsutils/utils.py | socib_cms/cmsutils/utils.py | # coding: utf-8
import re
from django.core.urlresolvers import reverse
from django.conf import settings
def reverse_no_i18n(viewname, *args, **kwargs):
result = reverse(viewname, *args, **kwargs)
m = re.match(r'(/[^/]*)(/.*$)', result)
return m.groups()[1]
def change_url_language(url, language):
if hasattr(settings, 'LANGUAGES'):
languages = [lang[0] for lang in settings.LANGUAGES]
m = re.match(r'/([^/]*)(/.*$)', url)
if m and m.groups()[0] in languages:
return u"/{lang}{url}".format(
lang=language,
url=m.groups()[1])
return u"/{lang}{url}".format(
lang=language,
url=url)
return url
| # coding: utf-8
import re
from django.core.urlresolvers import reverse
from django.conf import settings
def reverse_no_i18n(viewname, *args, **kwargs):
result = reverse(viewname, *args, **kwargs)
m = re.match(r'(/[^/]*)(/.*$)', result)
return m.groups()[1]
def change_url_language(url, language):
if hasattr(settings, 'LANGUAGES'):
languages = [lang[0] for lang in settings.LANGUAGES]
m = re.match(r'/([^/]*)(/.*$)', url)
if m and m.groups()[0] in languages:
return "/{lang}{url}".format(
lang=language,
url=m.groups()[1])
return "/{lang}{url}".format(
lang=language,
url=url)
return url
| Python | 0.00022 |
d15c2107f4132b53fb77622748753bb9f3c2916f | Update messenger.py | bot/messenger.py | bot/messenger.py | # -*- coding: utf-8 -*-
import logging
import random
import sys
reload(sys)
sys.setdefaultencoding('utf8')
logger = logging.getLogger(__name__)
class Messenger(object):
def __init__(self, slack_clients):
self.clients = slack_clients
def send_message(self, channel_id, msg):
# in the case of Group and Private channels, RTM channel payload is a complex dictionary
if isinstance(channel_id, dict):
channel_id = channel_id['id']
logger.debug('Sending msg: %s to channel: %s' % (msg, channel_id))
channel = self.clients.rtm.server.channels.find(channel_id)
channel.send_message(msg)
def write_help_message(self, channel_id):
bot_uid = self.clients.bot_user_id()
txt = '{}\n{}\n{}\n'.format(
"Hi :wave:, who doesnt like a good quote ?",
"> `hi <@" + bot_uid + ">` - I'll respond with a randomized greeting mentioning you. :wave:",
"> `<@" + bot_uid + "> Quote` - I'll tell you one of my finest quotes"
)
self.send_message(channel_id, txt)
def write_greeting(self, channel_id, user_id):
greetings = ["Do you feel lucky ....", "Greetings ....","Winter is coming...", "Valar Morghulis...","Say hello to my little friend...","You talkin to me .."]
txt = '{} <@{}>!'.format(random.choice(greetings), user_id)
self.send_message(channel_id, txt)
def write_prompt(self, channel_id):
bot_uid = self.clients.bot_user_id()
txt = "Whoa ... spell it out for me.. please ? (e.g. `<@" + bot_uid + "> help`)"
self.send_message(channel_id, txt)
def write_quote(self, channel_id):
quotes=["There can be only @juantwothree",":spock-hand::skin-tone-2: Live long and prosper :spock-hand::skin-tone-2:"]
txt = random.choice(quotes)
self.clients.send_user_typing_pause(channel_id)
self.send_message(channel_id, txt)
def write_quoteBB(self, channel_id):
quotesBB=["A guy opens his door and gets shot and you think that of me? No...I AM THE ONE WHO KNOCKS","Whats the point of being an outlaw when you got responsibilities?","Stay out of my territory","This is my own private domicile and I will not be harassed…bitch!"]
txt = random.choice(quotesBB)
self.clients.send_user_typing_pause(channel_id)
self.send_message(channel_id, txt)
def write_error(self, channel_id, err_msg):
txt = ":face_with_head_bandage: Houston, we have a problem :\n>```{}```".format(err_msg)
self.send_message(channel_id, txt)
| # -*- coding: utf-8 -*-
import logging
import random
import sys
reload(sys)
sys.setdefaultencoding('utf8')
logger = logging.getLogger(__name__)
class Messenger(object):
def __init__(self, slack_clients):
self.clients = slack_clients
def send_message(self, channel_id, msg):
# in the case of Group and Private channels, RTM channel payload is a complex dictionary
if isinstance(channel_id, dict):
channel_id = channel_id['id']
logger.debug('Sending msg: %s to channel: %s' % (msg, channel_id))
channel = self.clients.rtm.server.channels.find(channel_id)
channel.send_message(msg)
def write_help_message(self, channel_id):
bot_uid = self.clients.bot_user_id()
txt = '{}\n{}\n{}\n'.format(
"Hi :wave:, who doesnt like a good quote ?",
"> `hi <@" + bot_uid + ">` - I'll respond with a randomized greeting mentioning you. :wave:",
"> `<@" + bot_uid + "> Quote` - I'll tell you one of my finest quotes"
)
self.send_message(channel_id, txt)
def write_greeting(self, channel_id, user_id):
greetings = ["Do you feel lucky ....", "Greetings ....","Winter is coming...", "Valar Morghulis...","Say hello to my little friend...","You talkin to me .."]
txt = '{} <@{}>!'.format(random.choice(greetings), user_id)
self.send_message(channel_id, txt)
def write_prompt(self, channel_id):
bot_uid = self.clients.bot_user_id()
txt = "Whoa ... spell it out for me.. please ? (e.g. `<@" + bot_uid + "> help`)"
self.send_message(channel_id, txt)
def write_quote(self, channel_id):
quotes=["To infinity…and beyond!","I have come here to chew bubblegum and kick ass, and Im all out of bubblegum.","Surely, you cant be serious – I am serious, and dont call me Shirley.","I pity the fool","There can be only juan","THIS IS SPARTA!!!!","Shit just got real","Its clobberin time!","Go ahead, make my day....","Run, Forrest, run!!!","Im too old for this shit..","Ill be back","SHOW ME THE MONEY!!!","Greed, for lack of a better word, is good..","You cant handle the truth!","Snap out of it!","I feel the need…the need for speed","Youre gonna need a bigger boat","I see dead people","Great scott!","Life is like a box of chocolates: you never know what youre gonna get","Im gonna make him an offer he cant refuse","They may take our lives, but theyll never take…OUR FREEDOM!","Oh, behave!","You had me at hello","Im not bad. Im just drawn that way","Ssssssssssssmokin","Ill have what shes having","Wax on, wax off. Wax on, wax off","Hakuna Matata","Im sorry,Sharpy...Im afraid I cant do that",":spock-hand::skin-tone-2: Live long and prosper :spock-hand::skin-tone-2:"]
txt = random.choice(quotes)
self.clients.send_user_typing_pause(channel_id)
self.send_message(channel_id, txt)
def write_quoteBB(self, channel_id):
quotesBB=["A guy opens his door and gets shot and you think that of me? No...I AM THE ONE WHO KNOCKS","Whats the point of being an outlaw when you got responsibilities?","Stay out of my territory","This is my own private domicile and I will not be harassed…bitch!"]
txt = random.choice(quotesBB)
self.clients.send_user_typing_pause(channel_id)
self.send_message(channel_id, txt)
def write_error(self, channel_id, err_msg):
txt = ":face_with_head_bandage: Houston, we have a problem :\n>```{}```".format(err_msg)
self.send_message(channel_id, txt)
| Python | 0.000001 |
dd9843c97c9e15c2522034a6f5333f68714cd031 | copy with original content type | filer/storage.py | filer/storage.py | #-*- coding: utf-8 -*-
import urllib.request, urllib.parse, urllib.error
from django.core.files.storage import FileSystemStorage
from django.utils.encoding import smart_str
try:
from storages.backends.s3boto import S3BotoStorage
except ImportError:
from storages.backends.s3boto3 import S3Boto3Storage as S3BotoStorage
class PublicFileSystemStorage(FileSystemStorage):
"""
File system storage that saves its files in the filer public directory
See ``filer.settings`` for the defaults for ``location`` and ``base_url``.
"""
is_secure = False
class PrivateFileSystemStorage(FileSystemStorage):
"""
File system storage that saves its files in the filer private directory.
This directory should NOT be served directly by the web server.
See ``filer.settings`` for the defaults for ``location`` and ``base_url``.
"""
is_secure = True
def filepath_to_url(path):
if path is None:
return path
return urllib.parse.quote(smart_str(path).replace("\\", "/"), safe="/~!*()")
class PatchedS3BotoStorage(S3BotoStorage):
def url(self, name):
if self.custom_domain:
name = filepath_to_url(self._normalize_name(self._clean_name(name)))
return "%s://%s/%s" % ('https' if self.secure_urls else 'http',
self.custom_domain, name)
return self.connection.generate_url(
self.querystring_expire,
method='GET', bucket=self.bucket.name, key=self._encode_name(name),
query_auth=self.querystring_auth, force_http=not self.secure_urls)
def has_public_read(self, object_key):
old_acl = object_key.Acl().grants
if not old_acl:
return False
for right in old_acl:
if (
'AllUsers' in right.get('Grantee', {}).get('URI', '') and
right.get('Permission', '').upper() == 'READ'
):
return True
return False
def copy(self, src_name, dst_name):
src_path = self._normalize_name(self._clean_name(src_name))
dst_path = self._normalize_name(self._clean_name(dst_name))
copy_source = {
'Bucket': self.bucket.name,
'Key': src_path
}
extra_args = {}
# we cannot preserve acl in boto3, but we can give public read
source_obj = self.bucket.Object(src_path)
if self.has_public_read(source_obj):
extra_args = {
'ACL': 'public-read',
'ContentType': source_obj.content_type
}
self.bucket.copy(copy_source, dst_path, extra_args) | #-*- coding: utf-8 -*-
import urllib.request, urllib.parse, urllib.error
from django.core.files.storage import FileSystemStorage
from django.utils.encoding import smart_str
try:
from storages.backends.s3boto import S3BotoStorage
except ImportError:
from storages.backends.s3boto3 import S3Boto3Storage as S3BotoStorage
class PublicFileSystemStorage(FileSystemStorage):
"""
File system storage that saves its files in the filer public directory
See ``filer.settings`` for the defaults for ``location`` and ``base_url``.
"""
is_secure = False
class PrivateFileSystemStorage(FileSystemStorage):
"""
File system storage that saves its files in the filer private directory.
This directory should NOT be served directly by the web server.
See ``filer.settings`` for the defaults for ``location`` and ``base_url``.
"""
is_secure = True
def filepath_to_url(path):
if path is None:
return path
return urllib.parse.quote(smart_str(path).replace("\\", "/"), safe="/~!*()")
class PatchedS3BotoStorage(S3BotoStorage):
def url(self, name):
if self.custom_domain:
name = filepath_to_url(self._normalize_name(self._clean_name(name)))
return "%s://%s/%s" % ('https' if self.secure_urls else 'http',
self.custom_domain, name)
return self.connection.generate_url(
self.querystring_expire,
method='GET', bucket=self.bucket.name, key=self._encode_name(name),
query_auth=self.querystring_auth, force_http=not self.secure_urls)
def has_public_read(self, path):
old_acl = self.bucket.Object(path).Acl().grants
if not old_acl:
return False
for right in old_acl:
if (
'AllUsers' in right.get('Grantee', {}).get('URI', '') and
right.get('Permission', '').upper() == 'READ'
):
return True
return False
def copy(self, src_name, dst_name):
src_path = self._normalize_name(self._clean_name(src_name))
dst_path = self._normalize_name(self._clean_name(dst_name))
copy_source = {
'Bucket': self.bucket.name,
'Key': src_path
}
extra_args = {}
# we cannot preserve acl in boto3, but we can give public read
if self.has_public_read(src_path):
extra_args = {
'ACL': 'public-read'
}
self.bucket.copy(copy_source, dst_path, extra_args) | Python | 0 |
19b77442ee3cc80d8c7eaee6bde6c87d6a9e9277 | Test a fix for the wheel test | tests/integration/modules/saltutil.py | tests/integration/modules/saltutil.py | # -*- coding: utf-8 -*-
'''
Integration tests for the saltutil module.
'''
# Import Python libs
from __future__ import absolute_import
import time
# Import Salt Testing libs
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt libs
import integration
class SaltUtilModuleTest(integration.ModuleCase):
'''
Testcase for the saltutil execution module
'''
def setUp(self):
self.run_function('saltutil.refresh_pillar')
# Tests for the wheel function
def test_wheel_just_function(self):
'''
Tests using the saltutil.wheel function when passing only a function.
'''
# Wait for the pillar refresh to kick in, so that grains are ready to go
time.sleep(3)
ret = self.run_function('saltutil.wheel', ['minions.connected'])
self.assertIn('minion', ret['return'])
self.assertIn('sub_minion', ret['return'])
def test_wheel_with_arg(self):
'''
Tests using the saltutil.wheel function when passing a function and an arg.
'''
ret = self.run_function('saltutil.wheel', ['key.list', 'minion'])
self.assertEqual(ret['return'], {})
def test_wheel_no_arg_raise_error(self):
'''
Tests using the saltutil.wheel function when passing a function that requires
an arg, but one isn't supplied.
'''
self.assertRaises(TypeError, 'saltutil.wheel', ['key.list'])
def test_wheel_with_kwarg(self):
'''
Tests using the saltutil.wheel function when passing a function and a kwarg.
This function just generates a key pair, but doesn't do anything with it. We
just need this for testing purposes.
'''
ret = self.run_function('saltutil.wheel', ['key.gen'], keysize=1024)
self.assertIn('pub', ret['return'])
self.assertIn('priv', ret['return'])
if __name__ == '__main__':
from integration import run_tests
run_tests(SaltUtilModuleTest)
| # -*- coding: utf-8 -*-
'''
Integration tests for the saltutil module.
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt libs
import integration
class SaltUtilModuleTest(integration.ModuleCase):
'''
Testcase for the saltutil execution module
'''
# Tests for the wheel function
def test_wheel_just_function(self):
'''
Tests using the saltutil.wheel function when passing only a function.
'''
ret = self.run_function('saltutil.wheel', ['minions.connected'])
self.assertIn('minion', ret['return'])
self.assertIn('sub_minion', ret['return'])
def test_wheel_with_arg(self):
'''
Tests using the saltutil.wheel function when passing a function and an arg.
'''
ret = self.run_function('saltutil.wheel', ['key.list', 'minion'])
self.assertEqual(ret['return'], {})
def test_wheel_no_arg_raise_error(self):
'''
Tests using the saltutil.wheel function when passing a function that requires
an arg, but one isn't supplied.
'''
self.assertRaises(TypeError, 'saltutil.wheel', ['key.list'])
def test_wheel_with_kwarg(self):
'''
Tests using the saltutil.wheel function when passing a function and a kwarg.
This function just generates a key pair, but doesn't do anything with it. We
just need this for testing purposes.
'''
ret = self.run_function('saltutil.wheel', ['key.gen'], keysize=1024)
self.assertIn('pub', ret['return'])
self.assertIn('priv', ret['return'])
if __name__ == '__main__':
from integration import run_tests
run_tests(SaltUtilModuleTest)
| Python | 0 |
40d2de6f25a4081dac3d809c9d0b8d20478cf92c | Tidy test settings and introduce django-nose | wagtailmenus/tests/settings.py | wagtailmenus/tests/settings.py | import os
import hashlib
from django.conf.global_settings import * # NOQA
DEBUG = True
SITE_ID = 1
DATABASES = {
'default': {
'NAME': 'wagtailmenus_test.sqlite',
'TEST_NAME': 'wagtailmenus_test_test.sqlite',
'ENGINE': 'django.db.backends.sqlite3',
}
}
TIME_ZONE = 'Europe/London'
USE_TZ = True
USE_I18N = True
USE_L10N = True
INSTALLED_APPS = (
'wagtailmenus.tests',
'wagtailmenus',
'wagtail.wagtailforms',
'wagtail.wagtailsearch',
'wagtail.wagtailembeds',
'wagtail.wagtailimages',
'wagtail.wagtailsites',
'wagtail.wagtailusers',
'wagtail.wagtailsnippets',
'wagtail.wagtaildocs',
'wagtail.wagtailredirects',
'wagtail.wagtailadmin',
'wagtail.api',
'wagtail.wagtailcore',
'wagtailmodeladmin',
'django_nose',
'taggit',
'modelcluster',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
)
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'test-static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'test-media')
MEDIA_URL = '/media/'
SECRET_KEY = 'fake-key'
ROOT_URLCONF = 'wagtailmenus.tests.urls'
WAGTAIL_SITE_NAME = 'Test site'
LOGIN_URL = 'wagtailadmin_login'
LOGIN_REDIRECT_URL = 'wagtailadmin_home'
# =============================================================================
# django-nose config
# =============================================================================
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = [
'--with-coverage',
'--cover-package=wagtailmenus',
]
# =============================================================================
# Templates
# =============================================================================
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(PROJECT_ROOT, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.request',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'wagtail.contrib.settings.context_processors.settings',
],
},
},
]
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'wagtail.wagtailcore.middleware.SiteMiddleware',
'wagtail.wagtailredirects.middleware.RedirectMiddleware',
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
| import os
import hashlib
from django.conf.global_settings import * # NOQA
DEBUG = True
SITE_ID = 1
DATABASES = {
'default': {
'NAME': 'wagtailmenus.sqlite',
'TEST_NAME': 'wagtailmenus_test.sqlite',
'ENGINE': 'django.db.backends.sqlite3',
}
}
TIME_ZONE = 'Europe/London'
USE_TZ = True
USE_I18N = True
USE_L10N = True
INSTALLED_APPS = (
'wagtailmenus.tests',
'wagtailmenus',
'wagtail.wagtailforms',
'wagtail.wagtailsearch',
'wagtail.wagtailembeds',
'wagtail.wagtailimages',
'wagtail.wagtailsites',
'wagtail.wagtailusers',
'wagtail.wagtailsnippets',
'wagtail.wagtaildocs',
'wagtail.wagtailredirects',
'wagtail.wagtailadmin',
'wagtail.api',
'wagtail.wagtailcore',
'wagtailmodeladmin',
'taggit',
'modelcluster',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
)
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'test-static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'test-media')
MEDIA_URL = '/media/'
SECRET_KEY = 'fake-key'
ROOT_URLCONF = 'wagtailmenus.tests.urls'
LOGIN_URL = 'wagtailadmin_login'
LOGIN_REDIRECT_URL = 'wagtailadmin_home'
# =============================================================================
# Templates
# =============================================================================
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(PROJECT_ROOT, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.request',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'wagtail.contrib.settings.context_processors.settings',
],
},
},
]
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'wagtail.wagtailcore.middleware.SiteMiddleware',
'wagtail.wagtailredirects.middleware.RedirectMiddleware',
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
| Python | 0 |
b359d71a3c13720257167c57959a18b7a5ce9d07 | Fix Python package version checking | gravity/tilt/tilt_tests.py | gravity/tilt/tilt_tests.py | import os, subprocess, sys
import pkg_resources
from packaging import version
# This function is used ot check if an apt package is installed on Raspbian, Ubuntu, Debian, etc.
def apt_package_installed(package_name: str) -> bool:
devnull = open(os.devnull,"w")
retval = subprocess.call(["dpkg", "-s", package_name],stdout=devnull,stderr=subprocess.STDOUT)
devnull.close()
if retval != 0:
return False
return True
# This is just a means to check if apt (dpkg) is installed at all
def has_apt() -> bool:
try:
devnull = open(os.devnull,"w")
retval = subprocess.call(["dpkg", "--version"],stdout=devnull,stderr=subprocess.STDOUT)
devnull.close()
if retval != 0:
return False
return True
except:
# dpkg doesn't exist
return False
def check_apt_packages() -> (bool, list):
package_list = ["bluez", "libcap2-bin", "libbluetooth3", "libbluetooth-dev", "redis-server", "python3-dev"]
test_results = []
all_packages_ok = True
for package in package_list:
result = {'package': package, 'result': True}
if apt_package_installed(package):
result['result'] = True
else:
result ['result'] = False
all_packages_ok = False
test_results.append(result)
return all_packages_ok, test_results
def check_python_packages() -> (bool, list):
if sys.platform == "darwin":
# The MacOS support uses different packages from the support for Linux
package_list = [
{'name': 'pyobjc', 'version': version.parse("6.2")},
{'name': 'redis', 'version': version.parse("3.4.1")},
]
else:
package_list = [
{'name': 'PyBluez', 'version': version.parse("0.23")},
{'name': 'aioblescan', 'version': version.parse("0.2.6")},
{'name': 'redis', 'version': version.parse("3.4.1")},
]
test_results = []
all_packages_ok = True
for package_to_find in package_list:
result_stub = {
'package': package_to_find['name'],
'required_version': package_to_find['version'],
'installed_version': None,
'ok': False,
}
for package in pkg_resources.working_set:
if package.project_name == package_to_find['name']:
result_stub['installed_version'] = package.parsed_version
if result_stub['installed_version'].public == result_stub['required_version'].public:
result_stub['ok'] = True
if result_stub['ok'] is False:
all_packages_ok = False
test_results.append(result_stub)
return all_packages_ok, test_results
# The following was used for testing during development
if __name__ == "__main__":
if has_apt():
apt_ok, apt_test_results = check_apt_packages()
if apt_ok:
print("All apt packages found. Package status:")
else:
print("Missing apt packages. Package status:")
for this_test in apt_test_results:
print("Package {}: {}".format(this_test['package'],
("Installed" if this_test['result'] else "Not Installed")))
else:
print("dpkg not installed - not checking to see if system packages are installed")
print("")
# Next, check the python packages
python_ok, python_test_results = check_python_packages()
if python_ok:
print("All required python packages found. Package status:")
else:
print("Missing/incorrect python packages. Package status:")
for this_test in python_test_results:
print("Package {} - Required Version {} - Installed Version {} - OK? {}".format(
this_test['package'], this_test['required_version'], this_test['installed_version'], this_test['ok']))
print("")
| import os, subprocess, sys
import pkg_resources
from packaging import version
# This function is used ot check if an apt package is installed on Raspbian, Ubuntu, Debian, etc.
def apt_package_installed(package_name: str) -> bool:
devnull = open(os.devnull,"w")
retval = subprocess.call(["dpkg", "-s", package_name],stdout=devnull,stderr=subprocess.STDOUT)
devnull.close()
if retval != 0:
return False
return True
# This is just a means to check if apt (dpkg) is installed at all
def has_apt() -> bool:
try:
devnull = open(os.devnull,"w")
retval = subprocess.call(["dpkg", "--version"],stdout=devnull,stderr=subprocess.STDOUT)
devnull.close()
if retval != 0:
return False
return True
except:
# dpkg doesn't exist
return False
def check_apt_packages() -> (bool, list):
package_list = ["bluez", "libcap2-bin", "libbluetooth3", "libbluetooth-dev", "redis-server", "python3-dev"]
test_results = []
all_packages_ok = True
for package in package_list:
result = {'package': package, 'result': True}
if apt_package_installed(package):
result['result'] = True
else:
result ['result'] = False
all_packages_ok = False
test_results.append(result)
return all_packages_ok, test_results
def check_python_packages() -> (bool, list):
if sys.platform == "darwin":
# The MacOS support uses different packages from the support for Linux
package_list = [
{'name': 'PyObjc', 'version': version.parse("6.2")},
{'name': 'redis', 'version': version.parse("3.4.1")},
]
else:
package_list = [
{'name': 'PyBluez', 'version': version.parse("0.23")},
{'name': 'aioblescan', 'version': version.parse("0.2.6")},
{'name': 'redis', 'version': version.parse("3.4.1")},
]
test_results = []
all_packages_ok = True
for package_to_find in package_list:
result_stub = {
'package': package_to_find['name'],
'required_version': package_to_find['version'],
'installed_version': None,
'ok': False,
}
for package in pkg_resources.working_set:
if package.project_name == package_to_find['name']:
result_stub['installed_version'] = package.parsed_version
if result_stub['installed_version'] == result_stub['required_version']:
result_stub['ok'] = True
if result_stub['ok'] is False:
all_packages_ok = False
test_results.append(result_stub)
return all_packages_ok, test_results
# The following was used for testing during development
if __name__ == "__main__":
if has_apt():
apt_ok, apt_test_results = check_apt_packages()
if apt_ok:
print("All apt packages found. Package status:")
else:
print("Missing apt packages. Package status:")
for this_test in apt_test_results:
print("Package {}: {}".format(this_test['package'],
("Installed" if this_test['result'] else "Not Installed")))
else:
print("dpkg not installed - not checking to see if system packages are installed")
print("")
# Next, check the python packages
python_ok, python_test_results = check_python_packages()
if python_ok:
print("All required python packages found. Package status:")
else:
print("Missing/incorrect python packages. Package status:")
for this_test in python_test_results:
print("Package {} - Required Version {} - Installed Version {} - OK? {}".format(
this_test['package'], this_test['required_version'], this_test['installed_version'], this_test['ok']))
print("")
| Python | 0.000026 |
4338b097f97bb03be27c81a810a5fc652f842c8a | change cnab processor selection to method" | l10n_br_account_payment_brcobranca/models/account_payment_mode.py | l10n_br_account_payment_brcobranca/models/account_payment_mode.py | # Copyright (C) 2012-Today - KMEE (<http://kmee.com.br>).
# @author Luis Felipe Miléo - mileo@kmee.com.br
# @author Renato Lima - renato.lima@akretion.com.br
# Copyright (C) 2021-Today - Akretion (<http://www.akretion.com>).
# @author Magno Costa <magno.costa@akretion.com.br>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import api, models
class AccountPaymentMode(models.Model):
"""
Override Account Payment Mode
"""
_inherit = "account.payment.mode"
@api.model
def _selection_cnab_processor(self):
selection = super()._selection_cnab_processor()
selection.append(("brcobranca", "BRCobrança"))
return selection
| # Copyright (C) 2012-Today - KMEE (<http://kmee.com.br>).
# @author Luis Felipe Miléo - mileo@kmee.com.br
# @author Renato Lima - renato.lima@akretion.com.br
# Copyright (C) 2021-Today - Akretion (<http://www.akretion.com>).
# @author Magno Costa <magno.costa@akretion.com.br>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import fields, models
class AccountPaymentMode(models.Model):
"""
Override Account Payment Mode
"""
_inherit = "account.payment.mode"
cnab_processor = fields.Selection(
selection_add=[("brcobranca", "BRCobrança")],
)
| Python | 0 |
0bbd10058ff58ca5160e74374c0b34f99c429ad8 | Update docstrings | openpathsampling/high_level/part_in_b_tps.py | openpathsampling/high_level/part_in_b_tps.py | from openpathsampling.high_level.network import FixedLengthTPSNetwork
from openpathsampling.high_level.transition import FixedLengthTPSTransition
import openpathsampling as paths
class PartInBFixedLengthTPSTransition(FixedLengthTPSTransition):
"""Fixed length TPS transition accepting any frame in the final state.
Transition that builds an ensemble used to facilitate the rate
calculation in fixed-length TPS. [1]_ Details in
:class:`.PartInBFixedLengthTPSNetwork`.
See also
--------
PartInBFixedLengthTPSNetwork
References
----------
.. [1] C. Dellago, P.G. Bolhuis, and D. Chandler. J. Chem. Phys. 110,
6617 (1999). http://dx.doi.org/10.1063/1.478569
"""
def _tps_ensemble(self, stateA, stateB):
return paths.SequentialEnsemble([
paths.LengthEnsemble(1) & paths.AllInXEnsemble(stateA),
paths.LengthEnsemble(self.length - 1) \
& paths.PartInXEnsemble(stateB)
])
class PartInBFixedLengthTPSNetwork(FixedLengthTPSNetwork):
"""Network for fixed-length TPS accepting any frame in the final state
This network samples a single path ensemble where the paths must begin
in an initial state, run for a fixed total number of frames, and must
have at least one frame in a final state. This was used to assist in
the flux part of the TPS rate calculation. [1]_ This version is
generalized to multiple states.
Parameters
----------
intial_states : (list of) :class:`.Volume`
acceptable initial states
final_states : (list of) :class:`.Volume`
acceptable final states
length : int
length of paths in the path ensemble, in frames
allow_self_transitions : bool
whether self-transitions (A->A) are allowed; default is False. For
this network, A->B->A transitions are *always* allowed.
References
----------
.. [1] C. Dellago, P.G. Bolhuis, and D. Chandler. J. Chem. Phys. 110,
6617 (1999). http://dx.doi.org/10.1063/1.478569
"""
TransitionType = PartInBFixedLengthTPSTransition
| from openpathsampling.high_level.network import FixedLengthTPSNetwork
from openpathsampling.high_level.transition import FixedLengthTPSTransition
import openpathsampling as paths
class PartInBFixedLengthTPSTransition(FixedLengthTPSTransition):
"""Fixed length TPS transition accepting any frame in the final state.
Implements the ensemble in [1]_. Details in :class:`.PartInBNetwork`.
See also
--------
PartInBNetwork
References
----------
.. [1] C. Dellago, P.G. Bolhuis, and D. Chandler. J. Chem. Phys. 110,
6617 (1999). http://dx.doi.org/10.1063/1.478569
"""
def _tps_ensemble(self, stateA, stateB):
return paths.SequentialEnsemble([
paths.LengthEnsemble(1) & paths.AllInXEnsemble(stateA),
paths.LengthEnsemble(self.length - 1) \
& paths.PartInXEnsemble(stateB)
])
class PartInBFixedLengthTPSNetwork(FixedLengthTPSNetwork):
"""Network for fixed-length TPS accepting any frame in the final state
This network samples a single path ensemble where the paths must begin
in an initial state, run for a fixed total number of frames, and must
have at least one frame in a final state. This was used to assist in
the flux part of the rate calculation in Ref. [1]_. This version is
generalized to multiple states.
Parameters
----------
intial_states : (list of) :class:`.Volume`
acceptable initial states
final_states : (list of) :class:`.Volume`
acceptable final states
length : int
length of paths in the path ensemble, in frames
allow_self_transitions : bool
whether self-transitions (A->A) are allowed; default is False. For
this network, A->B->A transitions are *always* allowed.
References
----------
.. [1] C. Dellago, P.G. Bolhuis, and D. Chandler. J. Chem. Phys. 110,
6617 (1999). http://dx.doi.org/10.1063/1.478569
"""
TransitionType = PartInBFixedLengthTPSTransition
| Python | 0.000001 |
03fe3aad7358ee4593b9e8909d5374bae9e58b34 | revert changes | denumerator/denumerator.py | denumerator/denumerator.py | #!/usr/bin/python
#pylint: disable=invalid-name
"""
--- dENUMerator ---
by bl4de | bloorq@gmail.com | Twitter: @_bl4de | HackerOne: bl4de
Enumerates list of subdomains (output from tools like Sublist3r or subbrute)
and creates output file with servers responding on port 80/HTTP
This indicates (in most caes) working webserver
usage:
$ ./denumerator.py [domain_list_file]
"""
import sys
import requests
welcome = """
--- dENUMerator ---
usage:
$ ./denumerator.py [domain_list_file]
"""
requests.packages.urllib3.disable_warnings()
allowed_http_responses = [200, 302, 304, 401, 404, 403, 500]
def usage():
"""
prints welcome message
"""
print welcome
def send_request(proto, domain):
"""
sends request to check if server is alive
"""
protocols = {
'http': 'http://',
'https': 'https://'
}
resp = requests.get(protocols.get(proto.lower()) + domain,
timeout=5,
allow_redirects=False,
verify=False,
headers={'Host': domain})
if resp.status_code in allowed_http_responses:
print '[+] domain {}:\t\t HTTP {}'.format(domain, resp.status_code)
output_file.write('{}\n'.format(domain))
return resp.status_code
def enumerate_domains(domains):
"""
enumerates domain from domains
"""
for d in domains:
try:
d = d.strip('\n').strip('\r')
return_code = send_request('http', d)
# if http not working, try https
if return_code not in allowed_http_responses:
send_request('https', d)
except requests.exceptions.InvalidURL:
print '[-] {} is not a valid URL :/'.format(d)
except requests.exceptions.ConnectTimeout:
print '[-] {} :('.format(d)
continue
except requests.exceptions.ConnectionError:
print '[-] connection to {} aborted :/'.format(d)
except requests.exceptions.ReadTimeout:
print '[-] {} read timeout :/'.format(d)
except requests.exceptions.TooManyRedirects:
print '[-] {} probably went into redirects loop :('.format(d)
else:
pass
if len(sys.argv) < 2:
print welcome
exit(0)
domains = open(sys.argv[1].strip(), 'rw').readlines()
output_file = open('denumerator-{}-output.txt'.format(domains[0].strip()), 'w')
enumerate_domains(domains)
output_file.close()
| #!/usr/bin/python
# pylint: disable=invalid-name
"""
--- dENUMerator ---
by bl4de | bloorq@gmail.com | Twitter: @_bl4de | HackerOne: bl4de
Enumerates list of subdomains (output from tools like Sublist3r or subbrute)
and creates output file with servers responding on port 80/HTTP
This indicates (in most caes) working webserver
usage:
$ ./denumerator.py [domain_list_file]
"""
import sys
import requests
welcome = """
--- dENUMerator ---
usage:
$ ./denumerator.py [domain_list_file]
"""
requests.packages.urllib3.disable_warnings()
allowed_http_responses = [200, 302, 304, 401, 404, 403, 500]
http_ports_short_list = [80, 443, 8000, 8008, 8080, 9080]
http_ports_long_list = [80, 443, 591, 981, 1311, 4444,
4445, 7001, 7002, 8000, 8008, 8080, 8088, 8222, 8530, 8531, 8887, 8888, 9080, 16080, 18091]
def usage():
"""
prints welcome message
"""
print welcome
def send_request(proto, domain, port=80):
"""
sends request to check if server is alive
"""
protocols = {
'http': 'http://',
'https': 'https://'
}
full_url = protocols.get(proto.lower()) + domain + ":" + str(port)
resp = requests.get(full_url,
timeout=5,
allow_redirects=False,
verify=False,
headers={'Host': domain})
if resp.status_code in allowed_http_responses:
print '[+] domain {}:\t\t HTTP {}'.format(domain, resp.status_code)
output_file.write('{}\n'.format(domain))
return resp.status_code
def enumerate_domains(domains):
"""
enumerates domain from domains
"""
for d in domains:
# TODO: make selection of port(s) list or pass as option:
for port in http_ports_short_list:
try:
d = d.strip('\n').strip('\r')
return_code = send_request('http', d, port)
# if http not working on this port, try https
if return_code not in allowed_http_responses:
send_request('https', d, port)
except requests.exceptions.InvalidURL:
print '[-] {} is not a valid URL :/'.format(d)
except requests.exceptions.ConnectTimeout:
print '[-] {} :('.format(d)
continue
except requests.exceptions.ConnectionError:
print '[-] connection to {} aborted :/'.format(d)
except requests.exceptions.ReadTimeout:
print '[-] {} read timeout :/'.format(d)
except requests.exceptions.TooManyRedirects:
print '[-] {} probably went into redirects loop :('.format(d)
else:
pass
if len(sys.argv) < 2:
print welcome
exit(0)
domains = open(sys.argv[1].strip(), 'rw').readlines()
output_file = open('denumerator-{}-output.txt'.format(domains[0].strip()), 'w')
enumerate_domains(domains)
output_file.close()
| Python | 0 |
5c0a19386894e36898a48e7f10f01008e284e0c9 | Update dependency bazelbuild/bazel to latest version | third_party/bazel.bzl | third_party/bazel.bzl | # Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is autogenerated by copybara, please do not edit.
bazel_version = "f259b8abfd575f544635f57f3bb6678d566ef309"
bazel_sha256 = "7e262ca5f5595a74d75953dfdcb75b271c2561a292972da7f3be449a3e8b28f6"
| # Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is autogenerated by copybara, please do not edit.
bazel_version = "03719362d021a241ef9af04f33db6efcfd18590a"
bazel_sha256 = "eff6cd1c44a7c3ec63163b415383a4fb7db6c99dfcda1288a586df9671346512"
| Python | 0.000066 |
05a2189224589ac84b14240bf96b110d7c531dfb | add missing parent class inherit | vimball/base.py | vimball/base.py | import bz2
import errno
import gzip
import lzma
import os
import re
import tempfile
def mkdir_p(path):
"""Create potentially nested directories as required.
Does nothing if the path already exists and is a directory.
"""
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def is_vimball(fd):
"""Test for vimball archive format compliance.
Simple check to see if the first line of the file starts with standard
vimball archive header.
"""
fd.seek(0)
try:
header = fd.readline()
except UnicodeDecodeError:
# binary files will raise exceptions when trying to decode raw bytes to
# str objects in our readline() wrapper
return False
if re.match('^" Vimball Archiver', header) is not None:
return True
return False
class ArchiveError(Exception):
"""Catch-all archive error exception class."""
pass
class Vimball(object):
"""Vimball archive format."""
def __init__(self, path):
if not os.path.exists(path):
raise ArchiveError("path doesn't exist: '{}'".format(path))
self.path = path
_filebase, ext = os.path.splitext(path)
if ext == ".gz":
self.fd = gzip.open(path)
elif ext == ".bz2":
self.fd = bz2.BZ2File(path)
elif ext == ".xz":
self.fd = lzma.open(path)
else:
self.fd = open(path)
if not is_vimball(self.fd):
raise ArchiveError('invalid archive format')
def __del__(self):
try:
self.fd.close()
except AttributeError:
return
def readline(self):
"""Readline wrapper to force readline() to return str objects."""
line = self.fd.__class__.readline(self.fd)
if isinstance(line, bytes):
line = line.decode()
return line
@property
def files(self):
"""Yields archive file information."""
# try new file header format first, then fallback on old
for header in (r"(.*)\t\[\[\[1\n", r"^(\d+)\n$"):
header = re.compile(header)
filename = None
self.fd.seek(0)
line = self.readline()
while line:
m = header.match(line)
if m is not None:
filename = m.group(1)
try:
filelines = int(self.readline().rstrip())
except ValueError:
raise ArchiveError('invalid archive format')
filestart = self.fd.tell()
yield (filename, filelines, filestart)
line = self.readline()
if filename is not None:
break
def extract(self, extractdir=None, verbose=False):
"""Extract archive files to a directory."""
if extractdir is None:
filebase, ext = os.path.splitext(self.path)
if ext in ('.gz', '.bz2', '.xz'):
filebase, _ext = os.path.splitext(filebase)
extractdir = os.path.basename(filebase)
if os.path.exists(extractdir):
tempdir = tempfile.mkdtemp(prefix='vimball-', dir=os.getcwd())
extractdir = os.path.join(tempdir.split('/')[-1], extractdir)
self.fd.seek(0)
for filename, lines, offset in self.files:
filepath = os.path.join(extractdir, filename)
try:
directory = os.path.dirname(filepath)
mkdir_p(directory)
except OSError as e:
raise ArchiveError("failed creating directory '{}': {}".format(
directory, os.strerror(e.errno)))
with open(filepath, 'w') as f:
if verbose:
print(filepath)
self.fd.seek(offset)
for i in range(lines):
f.write(self.readline())
| import bz2
import errno
import gzip
import lzma
import os
import re
import tempfile
def mkdir_p(path):
"""Create potentially nested directories as required.
Does nothing if the path already exists and is a directory.
"""
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def is_vimball(fd):
"""Test for vimball archive format compliance.
Simple check to see if the first line of the file starts with standard
vimball archive header.
"""
fd.seek(0)
try:
header = fd.readline()
except UnicodeDecodeError:
# binary files will raise exceptions when trying to decode raw bytes to
# str objects in our readline() wrapper
return False
if re.match('^" Vimball Archiver', header) is not None:
return True
return False
class ArchiveError(Exception):
"""Catch-all archive error exception class."""
pass
class Vimball:
"""Vimball archive format."""
def __init__(self, path):
if not os.path.exists(path):
raise ArchiveError("path doesn't exist: '{}'".format(path))
self.path = path
_filebase, ext = os.path.splitext(path)
if ext == ".gz":
self.fd = gzip.open(path)
elif ext == ".bz2":
self.fd = bz2.BZ2File(path)
elif ext == ".xz":
self.fd = lzma.open(path)
else:
self.fd = open(path)
if not is_vimball(self.fd):
raise ArchiveError('invalid archive format')
def __del__(self):
try:
self.fd.close()
except AttributeError:
return
def readline(self):
"""Readline wrapper to force readline() to return str objects."""
line = self.fd.__class__.readline(self.fd)
if isinstance(line, bytes):
line = line.decode()
return line
@property
def files(self):
"""Yields archive file information."""
# try new file header format first, then fallback on old
for header in (r"(.*)\t\[\[\[1\n", r"^(\d+)\n$"):
header = re.compile(header)
filename = None
self.fd.seek(0)
line = self.readline()
while line:
m = header.match(line)
if m is not None:
filename = m.group(1)
try:
filelines = int(self.readline().rstrip())
except ValueError:
raise ArchiveError('invalid archive format')
filestart = self.fd.tell()
yield (filename, filelines, filestart)
line = self.readline()
if filename is not None:
break
def extract(self, extractdir=None, verbose=False):
"""Extract archive files to a directory."""
if extractdir is None:
filebase, ext = os.path.splitext(self.path)
if ext in ('.gz', '.bz2', '.xz'):
filebase, _ext = os.path.splitext(filebase)
extractdir = os.path.basename(filebase)
if os.path.exists(extractdir):
tempdir = tempfile.mkdtemp(prefix='vimball-', dir=os.getcwd())
extractdir = os.path.join(tempdir.split('/')[-1], extractdir)
self.fd.seek(0)
for filename, lines, offset in self.files:
filepath = os.path.join(extractdir, filename)
try:
directory = os.path.dirname(filepath)
mkdir_p(directory)
except OSError as e:
raise ArchiveError("failed creating directory '{}': {}".format(
directory, os.strerror(e.errno)))
with open(filepath, 'w') as f:
if verbose:
print(filepath)
self.fd.seek(offset)
for i in range(lines):
f.write(self.readline())
| Python | 0.000029 |
24f5afff6b8e65c633521189f4ac6bf4fbacbdb7 | Fix datapusher.wsgi to work with ckan-service-provider 0.0.2 | deployment/datapusher.wsgi | deployment/datapusher.wsgi | import os
import sys
import hashlib
activate_this = os.path.join('/usr/lib/ckan/datapusher/bin/activate_this.py')
execfile(activate_this, dict(__file__=activate_this))
import ckanserviceprovider.web as web
import datapusher.jobs as jobs
os.environ['JOB_CONFIG'] = '/etc/ckan/datapusher_settings.py'
web.init()
application = web.app
| import os
import sys
import hashlib
activate_this = os.path.join('/usr/lib/ckan/datapusher/bin/activate_this.py')
execfile(activate_this, dict(__file__=activate_this))
import ckanserviceprovider.web as web
import datapusher.jobs as jobs
os.environ['JOB_CONFIG'] = '/etc/ckan/datapusher_settings.py'
web.configure()
application = web.app
| Python | 0.000002 |
efb420ddc6aa0052ecea6da84613da6e4cf1afc8 | Update Bazel to latest version | third_party/bazel.bzl | third_party/bazel.bzl | # Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
bazel_version = "b017468d07da1e45282b9d153a4308fdace11eeb"
bazel_sha256 = "ce8dc5936238b6b7e27cdcdc13d481c94f20526fabfe20cbbceff17da83503e7"
| # Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
bazel_version = "6fe70c2fef70b8a3da3aa3cbea26c6bf60f17e13"
bazel_sha256 = "ad525027ecc7056feb23fe96cfe8b28257a6c47a9d908e0bc4e0e0988bf61d28"
| Python | 0 |
0bfa8373f82f801b34e7609f4ff6f28ab280a635 | Change mnist.py to follow the latest spec | example/mnist/mnist.py | example/mnist/mnist.py | #!/usr/bin/env python
"""Chainer example: train a multi-layer perceptron on MNIST
This is a minimal example to write a feed-forward net. It requires scikit-learn
to load MNIST dataset.
"""
import numpy as np
import six
import chainer
from chainer import cuda, FunctionSet
import chainer.functions as F
from chainer import optimizers
class MNIST(object):
@staticmethod
def create(*args, **kwargs):
self = MNIST()
if 'model_file_path' in kwargs:
with open(kwargs['model_file_path']) as model_pickle:
self.model = six.moves.cPickle.load(model_pickle)
else:
n_units = 1000
self.model = FunctionSet(
l1=F.Linear(784, n_units),
l2=F.Linear(n_units, n_units),
l3=F.Linear(n_units, 10))
if 'gpu' in kwargs:
self.gpu = kwargs['gpu']
else:
self.gpu = -1
self.prepare_gpu_and_optimizer()
return self
@staticmethod
def load(filepath, *args, **kwargs):
with open(filepath, 'r') as f:
return six.moves.cPickle.load(f)
def prepare_gpu_and_optimizer(self):
if self.gpu >= 0:
cuda.init(self.gpu)
self.model.to_gpu()
# Setup optimizer
self.optimizer = optimizers.Adam()
self.optimizer.setup(self.model.collect_parameters())
def forward(self, x_data, train=True):
x = chainer.Variable(x_data)
h1 = F.dropout(F.relu(self.model.l1(x)), train=train)
h2 = F.dropout(F.relu(self.model.l2(h1)), train=train)
return self.model.l3(h2)
def fit(self, xys):
x = []
y = []
for d in xys:
x.append(d['data'])
y.append(d['label'])
x_batch = np.array(x, dtype=np.float32)
y_batch = np.array(y, dtype=np.int32)
if self.gpu >= 0:
x_batch = cuda.to_gpu(x_batch)
y_batch = cuda.to_gpu(y_batch)
self.optimizer.zero_grads()
y = self.forward(x_batch)
t = chainer.Variable(y_batch)
loss = F.softmax_cross_entropy(y, t)
acc = F.accuracy(y, t)
loss.backward()
self.optimizer.update()
nloss = float(cuda.to_cpu(loss.data)) * len(y_batch)
naccuracy = float(cuda.to_cpu(acc.data)) * len(y_batch)
retmap = {
'loss': nloss,
'accuracy': naccuracy,
}
return retmap
def predict(self, x):
# non batch
xx = []
xx.append(x)
x_data = np.array(xx, dtype=np.float32)
if self.gpu >= 0:
x_data = cuda.to_gpu(x_data)
y = self.forward(x_data, train=False)
y = y.data.reshape(y.data.shape[0], y.data.size / y.data.shape[0])
pred = y.argmax(axis=1)
return int(pred[0])
def get_model(self):
return self.model
def save(self, filepath, *args, **kwargs):
with open(filepath, 'w') as f:
six.moves.cPickle.dump(self, f)
def load_model(self, model_data):
self.model = six.moves.cPickle.loads(str(model_data))
| #!/usr/bin/env python
"""Chainer example: train a multi-layer perceptron on MNIST
This is a minimal example to write a feed-forward net. It requires scikit-learn
to load MNIST dataset.
"""
import numpy as np
import six
import chainer
from chainer import cuda, FunctionSet
import chainer.functions as F
from chainer import optimizers
class MNIST(object):
@staticmethod
def create(params):
self = MNIST()
if 'model_file_path' in params:
with open(params['model_file_path']) as model_pickle:
self.model = six.moves.cPickle.load(model_pickle)
else:
n_units = 1000
self.model = FunctionSet(
l1=F.Linear(784, n_units),
l2=F.Linear(n_units, n_units),
l3=F.Linear(n_units, 10))
if 'gpu' in params:
self.gpu = params['gpu']
else:
self.gpu = -1
self.prepare_gpu_and_optimizer()
return self
@staticmethod
def load(filepath, params):
with open(filepath, 'r') as f:
return six.moves.cPickle.load(f)
def prepare_gpu_and_optimizer(self):
if self.gpu >= 0:
cuda.init(self.gpu)
self.model.to_gpu()
# Setup optimizer
self.optimizer = optimizers.Adam()
self.optimizer.setup(self.model.collect_parameters())
def forward(self, x_data, train=True):
x = chainer.Variable(x_data)
h1 = F.dropout(F.relu(self.model.l1(x)), train=train)
h2 = F.dropout(F.relu(self.model.l2(h1)), train=train)
return self.model.l3(h2)
def fit(self, xys):
x = []
y = []
for d in xys:
x.append(d['data'])
y.append(d['label'])
x_batch = np.array(x, dtype=np.float32)
y_batch = np.array(y, dtype=np.int32)
if self.gpu >= 0:
x_batch = cuda.to_gpu(x_batch)
y_batch = cuda.to_gpu(y_batch)
self.optimizer.zero_grads()
y = self.forward(x_batch)
t = chainer.Variable(y_batch)
loss = F.softmax_cross_entropy(y, t)
acc = F.accuracy(y, t)
loss.backward()
self.optimizer.update()
nloss = float(cuda.to_cpu(loss.data)) * len(y_batch)
naccuracy = float(cuda.to_cpu(acc.data)) * len(y_batch)
retmap = {
'loss': nloss,
'accuracy': naccuracy,
}
return retmap
def predict(self, x):
# non batch
xx = []
xx.append(x)
x_data = np.array(xx, dtype=np.float32)
if self.gpu >= 0:
x_data = cuda.to_gpu(x_data)
y = self.forward(x_data, train=False)
y = y.data.reshape(y.data.shape[0], y.data.size / y.data.shape[0])
pred = y.argmax(axis=1)
return int(pred[0])
def get_model(self):
return self.model
def save(self, filepath, params):
with open(filepath, 'w') as f:
six.moves.cPickle.dump(self, f)
def load_model(self, model_data):
self.model = six.moves.cPickle.loads(str(model_data))
| Python | 0 |
8959d982ddc810f9c226ce36884521cf979a61f1 | add destroy cb | gui/tests/testicontheme.py | gui/tests/testicontheme.py | #!/usr/bin/env python
# doesnt work. segfault.
# TODO: other screens?
import pygtk
pygtk.require("2.0")
import gtk
import xfce4
widget = xfce4.gui.IconTheme(gtk.gdk.screen_get_default())
ic = widget.load("folder", 24)
print ic
icname = widget.lookup("folder", 24)
print icname
image = gtk.Image()
image.set_from_pixbuf(ic)
image.show()
w = gtk.Window()
w.connect("destroy", lambda x: gtk.main_quit())
w.add(image)
w.show()
gtk.main()
| #!/usr/bin/env python
# doesnt work. segfault.
# TODO: other screens?
import pygtk
pygtk.require("2.0")
import gtk
import xfce4
widget = xfce4.gui.IconTheme(gtk.gdk.screen_get_default())
ic = widget.load("folder", 24)
print ic
icname = widget.lookup("folder", 24)
print icname
image = gtk.Image()
image.set_from_pixbuf(ic)
image.show()
w = gtk.Window()
w.add(image)
w.show()
gtk.main()
| Python | 0 |
23c8044b84557dea940d527213022bfa19d28293 | test that Human is in Ensembl species | tests/test_ensembl_species_service.py | tests/test_ensembl_species_service.py | #
# Author : Manuel Bernal Llinares
# Project : trackhub-creator
# Timestamp : 04-07-2017 09:14
# ---
# © 2017 Manuel Bernal Llinares <mbdebian@gmail.com>
# All rights reserved.
#
"""
Unit Tests for Ensembl Species Service
"""
import unittest
# App modules
import ensembl.service
class TestEnsemblSpeciesService(unittest.TestCase):
__NCB_TAXONOMY_HUMAN = '9606'
def setUp(self):
self.ensembl_service = ensembl.service.get_service()
def test_get_species_data(self):
species_data_service = self.ensembl_service.get_species_data_service()
self.assertIsNotNone(species_data_service.get_species_data(),
"Requested RAW species data from Ensembl IS NOT None")
def test_count_of_species(self):
self.assertNotEqual(self.ensembl_service.get_species_data_service().count_ensembl_species(),
0,
"Ensembl has a non-zero number of species")
def test_human_species_is_present(self):
"""
Test that Human taxonomy is present, this unit test is also testing the indexing mechanism
:return: no returned value
"""
self.assertIsNotNone(
self.ensembl_service.get_species_data_service().get_species_entry_for_taxonomy_id(
self.__NCB_TAXONOMY_HUMAN), "Human NCBI taxonomy is in species data from Ensembl")
if __name__ == '__main__':
print("ERROR: This script is part of a pipeline collection and it is not meant to be run in stand alone mode")
| #
# Author : Manuel Bernal Llinares
# Project : trackhub-creator
# Timestamp : 04-07-2017 09:14
# ---
# © 2017 Manuel Bernal Llinares <mbdebian@gmail.com>
# All rights reserved.
#
"""
Unit Tests for Ensembl Species Service
"""
import unittest
# App modules
import ensembl.service
class TestEnsemblSpeciesService(unittest.TestCase):
__NCB_TAXONOMY_HUMAN = 9606
def setUp(self):
self.ensembl_service = ensembl.service.get_service()
def test_get_species_data(self):
species_data_service = self.ensembl_service.get_species_data_service()
self.assertIsNotNone(species_data_service.get_species_data(),
"Requested RAW species data from Ensembl IS NOT None")
def test_count_of_species(self):
self.assertNotEqual(self.ensembl_service.get_species_data_service().count_ensembl_species(),
0,
"Ensembl has a non-zero number of species")
def test_human_species_is_present(self):
"""
Test that Human taxonomy is present, this unit test is also testing the indexing mechanism
:return: no returned value
"""
#TODO
pass
if __name__ == '__main__':
print("ERROR: This script is part of a pipeline collection and it is not meant to be run in stand alone mode")
| Python | 0.999977 |
d977a9ee9814264bd1d3080cadcd7e43b7c1d27e | Revert changes | examples/News/news2.py | examples/News/news2.py | #!/usr/bin/env python
from Kiwi2 import Delegates
from Kiwi2.Widgets.List import List, Column
from Kiwi2.initgtk import gtk
class NewsItem:
"""An instance that holds information about a news article."""
def __init__(self, title, author, url):
self.title, self.author, self.url = title, author, url
# Assemble friendly Pigdog.org news into NewsItem instances so they can
# be used in the CListDelegate
news = [
NewsItem("Smallpox Vaccinations for EVERYONE", "JRoyale",
"http://www.pigdog.org/auto/Power_Corrupts/link/2700.html"),
NewsItem("Is that uranium in your pocket or are you just happy to see me?",
"Baron Earl",
"http://www.pigdog.org/auto/bad_people/link/2699.html"),
NewsItem("Cut 'n Paste", "Baron Earl",
"http://www.pigdog.org/auto/ArtFux/link/2690.html"),
NewsItem("A Slippery Exit", "Reverend CyberSatan",
"http://www.pigdog.org/auto/TheCorporateFuck/link/2683.html"),
NewsItem("Those Crazy Dutch Have Resurrected Elvis", "Miss Conduct",
"http://www.pigdog.org/auto/viva_la_musica/link/2678.html")
]
# Specify the columns: one for each attribute of NewsItem, the URL
# column invisible by default
my_columns = [ Column("title", sorted=True),
Column("author"),
Column("url", title="URL", visible=False) ]
kiwilist = List(my_columns, news)
slave = Delegates.SlaveDelegate(toplevel=kiwilist)
slave.show_all()
gtk.main()
| #!/usr/bin/env python
from Kiwi2 import Delegates
from Kiwi2.Widgets.List import List, Column
from Kiwi2.initgtk import gtk
class NewsItem:
"""An instance that holds information about a news article."""
def __init__(self, title, author, url):
self.title, self.author, self.url = title, author, url
# Assemble friendly Pigdog.org news into NewsItem instances so they can
# be used in the CListDelegate
news = [
NewsItem("Smallpox Vaccinations for EVERYONE", "JRoyale",
"http://www.pigdog.org/auto/Power_Corrupts/link/2700.html"),
NewsItem("Is that uranium in your pocket or are you just happy to see me?",
"Baron Earl",
"http://www.pigdog.org/auto/bad_people/link/2699.html"),
NewsItem("Cut 'n Paste", "Baron Earl",
"http://www.pigdog.org/auto/ArtFux/link/2690.html"),
NewsItem("A Slippery Exit", "Reverend CyberSatan",
"http://www.pigdog.org/auto/TheCorporateFuck/link/2683.html"),
NewsItem("Those Crazy Dutch Have Resurrected Elvis", "Miss Conduct",
"http://www.pigdog.org/auto/viva_la_musica/link/2678.html")
]
# Specify the columns: one for each attribute of NewsItem, the URL
# column invisible by default
my_columns = [ Column("title", sorted=True),
Column("author", justify=gtk.JUSTIFY_RIGHT),
Column("url", title="URL", visible=False) ]
kiwilist = List(my_columns, news)
w = gtk.Window()
w.set_size_request(600, 250)
w.add(kiwilist)
w.show_all()
gtk.main()
| Python | 0.000001 |
7c91d556220088ea5286611f3674aaa88f3a6340 | Add failing test for "Crash if session was flushed before commit (with validity strategy)" | tests/test_exotic_operation_combos.py | tests/test_exotic_operation_combos.py | from six import PY3
from tests import TestCase
class TestExoticOperationCombos(TestCase):
def test_insert_deleted_object(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
self.session.add(article)
self.session.commit()
self.session.delete(article)
article2 = self.Article(id=article.id, name=u'Some article')
self.session.add(article2)
self.session.commit()
assert article2.versions.count() == 2
assert article2.versions[0].operation_type == 0
assert article2.versions[1].operation_type == 0
def test_insert_deleted_and_flushed_object(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
self.session.add(article)
self.session.commit()
self.session.delete(article)
self.session.flush()
article2 = self.Article(id=article.id, name=u'Some article')
self.session.add(article2)
self.session.commit()
assert article2.versions.count() == 2
assert article2.versions[0].operation_type == 0
assert article2.versions[1].operation_type == 1
def test_insert_flushed_object(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
self.session.add(article)
self.session.flush()
self.session.commit()
assert article.versions.count() == 1
assert article.versions[0].operation_type == 0
def test_replace_deleted_object_with_update(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
article2 = self.Article()
article2.name = u'Another article'
article2.content = u'Some other content'
self.session.add(article)
self.session.add(article2)
self.session.commit()
self.session.delete(article)
self.session.flush()
article2.id = article.id
self.session.commit()
assert article2.versions.count() == 2
assert article2.versions[0].operation_type == 0
assert article2.versions[1].operation_type == 1
class TestExoticOperationCombosWithValidityStrategy(TestExoticOperationCombos):
versioning_strategy = 'validity'
| from six import PY3
from tests import TestCase
class TestExoticOperationCombos(TestCase):
def test_insert_deleted_object(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
self.session.add(article)
self.session.commit()
self.session.delete(article)
article2 = self.Article(id=article.id, name=u'Some article')
self.session.add(article2)
self.session.commit()
assert article2.versions.count() == 2
assert article2.versions[0].operation_type == 0
assert article2.versions[1].operation_type == 0
def test_insert_deleted_and_flushed_object(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
self.session.add(article)
self.session.commit()
self.session.delete(article)
self.session.flush()
article2 = self.Article(id=article.id, name=u'Some article')
self.session.add(article2)
self.session.commit()
assert article2.versions.count() == 2
assert article2.versions[0].operation_type == 0
assert article2.versions[1].operation_type == 1
def test_insert_flushed_object(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
self.session.add(article)
self.session.flush()
self.session.commit()
assert article.versions.count() == 1
assert article.versions[0].operation_type == 0
def test_replace_deleted_object_with_update(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
article2 = self.Article()
article2.name = u'Another article'
article2.content = u'Some other content'
self.session.add(article)
self.session.add(article2)
self.session.commit()
self.session.delete(article)
self.session.flush()
article2.id = article.id
self.session.commit()
assert article2.versions.count() == 2
assert article2.versions[0].operation_type == 0
assert article2.versions[1].operation_type == 1
| Python | 0 |
e816b1f63c299141c6ad907c860d2c5411829405 | Simplify aggregator code | aleph/analysis/aggregate.py | aleph/analysis/aggregate.py | import logging
from collections import defaultdict
from followthemoney.types import registry
from aleph.analysis.util import tag_key
from aleph.analysis.util import TAG_COUNTRY, TAG_PHONE
from aleph.analysis.util import TAG_PERSON, TAG_COMPANY
log = logging.getLogger(__name__)
class TagAggregator(object):
MAX_TAGS = 10000
CUTOFFS = {
TAG_COUNTRY: .3,
TAG_PERSON: .003,
TAG_COMPANY: .003,
TAG_PHONE: .05,
}
def __init__(self):
self.values = defaultdict(list)
self.types = defaultdict(int)
def add(self, prop, value):
key = tag_key(value)
if key is None:
return
if (key, prop) not in self.values:
if len(self.values) > self.MAX_TAGS:
return
self.values[(key, prop)].append(value)
self.types[prop] += 1
def prop_cutoff(self, prop):
freq = self.CUTOFFS.get(prop, 0)
return self.types.get(prop, 0) * freq
@property
def entities(self):
for (_, prop), tags in self.values.items():
# skip entities that do not meet a threshold of relevance:
cutoff = self.prop_cutoff(prop)
if len(tags) < cutoff:
continue
label = tags[0]
if prop in (TAG_COMPANY, TAG_PERSON):
label = registry.name.pick(tags)
yield label, prop
def __len__(self):
return len(self.values)
| import logging
from Levenshtein import setmedian
from aleph.analysis.util import tag_key
from aleph.analysis.util import TAG_COUNTRY, TAG_LANGUAGE, TAG_PHONE
from aleph.analysis.util import TAG_PERSON, TAG_COMPANY
log = logging.getLogger(__name__)
class TagAggregator(object):
MAX_TAGS = 10000
CUTOFFS = {
TAG_COUNTRY: .2,
TAG_LANGUAGE: .3,
TAG_PERSON: .003,
TAG_COMPANY: .003,
TAG_PHONE: .05,
}
def __init__(self):
self.tags = {}
self.types = {}
def add(self, type_, tag):
key = tag_key(tag)
if key is None:
return
if (key, type_) not in self.tags:
self.tags[(key, type_)] = []
self.tags[(key, type_)].append(tag)
if type_ not in self.types:
if len(self.types) > self.MAX_TAGS:
return
self.types[type_] = 0
self.types[type_] += 1
def type_cutoff(self, type_):
freq = self.CUTOFFS.get(type_, 0)
return self.types.get(type_, 0) * freq
@property
def entities(self):
for (key, type_), tags in self.tags.items():
# skip entities that do not meet a threshold of relevance:
cutoff = self.type_cutoff(type_)
if len(tags) < cutoff:
continue
label = tags[0]
if type_ in (TAG_COMPANY, TAG_PERSON) and len(set(tags)) > 0:
label = setmedian(tags)
yield label, type_
def __len__(self):
return len(self.tags)
| Python | 0.000073 |
30f55607990a356f49c03f04a707aa7d59a8eedf | Remove unncessary extra parens | examples/chatserver.py | examples/chatserver.py | #!/usr/bin/env python
"""Chat Server Example
This example demonstrates how to create a very simple telnet-style chat
server that supports many connecting clients.
"""
from optparse import OptionParser
from circuits import Component, Debugger
from circuits.net.sockets import TCPServer, Write
__version__ = "0.0.1"
USAGE = "%prog [options]"
VERSION = "%prog v" + __version__
def parse_options():
parser = OptionParser(usage=USAGE, version=VERSION)
parser.add_option(
"-b", "--bind",
action="store", type="string",
default="0.0.0.0:8000", dest="bind",
help="Bind to address:[port]"
)
parser.add_option(
"-d", "--debug",
action="store_true",
default=False, dest="debug",
help="Enable debug mode"
)
opts, args = parser.parse_args()
return opts, args
class ChatServer(Component):
def init(self, args, opts):
"""Initialize our ``ChatServer`` Component.
This uses the convenience ``init`` method which is called after the
component is proeprly constructed and initialized and passed the
same args and kwargs that were passed during construction.
"""
self.args = args
self.opts = opts
self.clients = {}
if opts.debug:
Debugger().register(self)
if ":" in opts.bind:
address, port = opts.bind.split(":")
port = int(port)
else:
address, port = opts.bind, 8000
bind = (address, port)
TCPServer(bind).register(self)
def broadcast(self, data, exclude=None):
exclude = exclude or []
targets = (sock for sock in self.clients.keys() if sock not in exclude)
for target in targets:
self.fire(Write(target, data))
def connect(self, sock, host, port):
"""Connect Event -- Triggered for new connecting clients"""
self.clients[sock] = {
"host": sock,
"port": port,
"state": {
"nickname": None,
"registered": False
}
}
self.fire(Write(sock, "Welcome to the circuits Chat Server!\n"))
self.fire(Write(sock, "Please enter a desired nickname: "))
def disconnect(self, sock):
"""Disconnect Event -- Triggered for disconnecting clients"""
nickname = self.clients[sock]["state"]["nickname"]
self.broadcast("!!! {0:s} has left !!!\n".format(nickname),
exclude=[sock])
del self.clients[sock]
def read(self, sock, data):
"""Read Event -- Triggered for when client conenctions have data"""
if not self.clients[sock]["state"]["registered"]:
nickname = data.strip()
self.clients[sock]["state"]["registered"] = True
self.clients[sock]["state"]["nickname"] = nickname
self.broadcast("!!! {0:s} has joined !!!\n".format(nickname),
exclude=[sock])
else:
nickname = self.clients[sock]["state"]["nickname"]
self.broadcast("<{0:s}> {1:s}\n".format(nickname, data.strip()),
exclude=[sock])
def main():
opts, args = parse_options()
# Configure and "run" the System.
ChatServer(args, opts).run()
if __name__ == "__main__":
main()
| #!/usr/bin/env python
"""Chat Server Example
This example demonstrates how to create a very simple telnet-style chat
server that supports many connecting clients.
"""
from optparse import OptionParser
from circuits import Component, Debugger
from circuits.net.sockets import TCPServer, Write
__version__ = "0.0.1"
USAGE = "%prog [options]"
VERSION = "%prog v" + __version__
def parse_options():
parser = OptionParser(usage=USAGE, version=VERSION)
parser.add_option(
"-b", "--bind",
action="store", type="string",
default="0.0.0.0:8000", dest="bind",
help="Bind to address:[port]"
)
parser.add_option(
"-d", "--debug",
action="store_true",
default=False, dest="debug",
help="Enable debug mode"
)
opts, args = parser.parse_args()
return opts, args
class ChatServer(Component):
def init(self, args, opts):
"""Initialize our ``ChatServer`` Component.
This uses the convenience ``init`` method which is called after the
component is proeprly constructed and initialized and passed the
same args and kwargs that were passed during construction.
"""
self.args = args
self.opts = opts
self.clients = {}
if opts.debug:
Debugger().register(self)
if ":" in opts.bind:
address, port = opts.bind.split(":")
port = int(port)
else:
address, port = opts.bind, 8000
bind = (address, port)
TCPServer(bind).register(self)
def broadcast(self, data, exclude=None):
exclude = exclude or []
targets = (sock for sock in self.clients.keys() if sock not in exclude)
for target in targets:
self.fire(Write(target, data))
def connect(self, sock, host, port):
"""Connect Event -- Triggered for new connecting clients"""
self.clients[sock] = {
"host": sock,
"port": port,
"state": {
"nickname": None,
"registered": False
}
}
self.fire(Write(sock, "Welcome to the circuits Chat Server!\n"))
self.fire(Write(sock, "Please enter a desired nickname: "))
def disconnect(self, sock):
"""Disconnect Event -- Triggered for disconnecting clients"""
nickname = self.clients[sock]["state"]["nickname"]
self.broadcast("!!! {0:s} has left !!!\n".format(nickname),
exclude=[sock])
del self.clients[sock]
def read(self, sock, data):
"""Read Event -- Triggered for when client conenctions have data"""
if not self.clients[sock]["state"]["registered"]:
nickname = data.strip()
self.clients[sock]["state"]["registered"] = True
self.clients[sock]["state"]["nickname"] = nickname
self.broadcast("!!! {0:s} has joined !!!\n".format(nickname),
exclude=[sock])
else:
nickname = self.clients[sock]["state"]["nickname"]
self.broadcast("<{0:s}> {1:s}\n".format(nickname, data.strip()),
exclude=[sock])
def main():
opts, args = parse_options()
# Configure and "run" the System.
(ChatServer(args, opts)).run()
if __name__ == "__main__":
main()
| Python | 0.000004 |
8c6ebf17541e48e6d4fdd9d521a4391ce621f301 | Use getNextSchedule in ScheduleDao | broadcast_api.py | broadcast_api.py | from mysql import mysql
from mysql import DB_Exception
from datetime import date
from datetime import datetime
from datetime import timedelta
import os.path
import json
#The API load schedule.txt and find out the first image which has not print and the time limit still allow
def load_schedule():
try:
return_msg = {}
return_msg["result"] = "fail"
schedule_dir = ""
sche_target_id = ""
type_id = ""
system_file_name = ""
#connect to mysql
db = mysql()
db.connect()
#find next schedule
with ScheduleDao() as scheduleDao:
next_schedule = scheduleDao.getNextSchedule()
if next_schedule is None:
return_msg["error"] = "no schedule"
return return_msg
return_msg["schedule_id"] = next_schedule['schedule_id']
sche_target_id = next_schedule['sche_target_id']
return_msg["display_time"] = int(next_schedule['display_time'])
#find the file
if sche_target_id[0:4]=="imge":
sql = ("SELECT type_id, img_system_name, img_like_count FROM image_data WHERE img_id=\"" + sche_target_id + "\" ")
return_msg["file_type"] = "image"
elif sche_target_id[0:4]=="text":
sql = ("SELECT type_id, text_system_name, text_like_count FROM text_data WHERE text_id=\"" + sche_target_id + "\" ")
return_msg["file_type"] = "text"
else :
db.close()
return_msg["error"] = "target id type error"
return return_msg
pure_result = db.query(sql)
try:
type_id = int(pure_result[0][0])
system_file_name = pure_result[0][1]
return_msg["like_count"] = int(pure_result[0][2])
except:
db.close()
return_msg["error"] = "no file record"
return return_msg
#find type dir
sql = ("SELECT type_dir, type_name FROM data_type WHERE type_id=" + str(type_id))
pure_result = db.query(sql)
try:
schedule_dir = os.path.join(schedule_dir, "static/")
schedule_dir = os.path.join(schedule_dir, pure_result[0][0])
schedule_dir = os.path.join(schedule_dir, system_file_name)
return_msg["file"] = os.path.join(pure_result[0][0], system_file_name)
return_msg["type_name"] = str(pure_result[0][1])
except:
db.close()
return_msg["error"] = "no type record"
return return_msg
#if text read file
if return_msg["file_type"] == "text":
if not os.path.isfile(schedule_dir) :
db.close()
return_msg["error"] = "no file"
return return_msg
else :
with open(schedule_dir,"r") as fp:
file_content = json.load(fp)
return_msg["file_text"] = file_content
#update display count
if return_msg["file_type"] == "image":
sql = "UPDATE image_data SET img_display_count=img_display_count+1 WHERE img_id='"+sche_target_id+"'"
elif return_msg["file_type"] == "text":
sql = "UPDATE text_data SET text_display_count=text_display_count+1 WHERE text_id='"+sche_target_id+"'"
db.cmd(sql)
return_msg["result"] = "success"
return return_msg
except DB_Exception as e:
db.close()
return_msg["error"] = e.args[1]
return return_msg
| from mysql import mysql
from mysql import DB_Exception
from datetime import date
from datetime import datetime
from datetime import timedelta
import os.path
import json
#The API load schedule.txt and find out the first image which has not print and the time limit still allow
def load_schedule():
try:
return_msg = {}
return_msg["result"] = "fail"
schedule_dir = ""
sche_target_id = ""
type_id = ""
system_file_name = ""
#connect to mysql
db = mysql()
db.connect()
#find schedule
sql = ("SELECT sche_id, sche_target_id, sche_display_time FROM schedule WHERE sche_is_used=0 ORDER BY sche_sn ASC LIMIT 1")
pure_result = db.query(sql)
try:
return_msg["schedule_id"] = pure_result[0][0]
sche_target_id = pure_result[0][1]
return_msg["display_time"] = int(pure_result[0][2])
except:
db.close()
return_msg["error"] = "no schedule"
return return_msg
#find the file
if sche_target_id[0:4]=="imge":
sql = ("SELECT type_id, img_system_name, img_like_count FROM image_data WHERE img_id=\"" + sche_target_id + "\" ")
return_msg["file_type"] = "image"
elif sche_target_id[0:4]=="text":
sql = ("SELECT type_id, text_system_name, text_like_count FROM text_data WHERE text_id=\"" + sche_target_id + "\" ")
return_msg["file_type"] = "text"
else :
db.close()
return_msg["error"] = "target id type error"
return return_msg
pure_result = db.query(sql)
try:
type_id = int(pure_result[0][0])
system_file_name = pure_result[0][1]
return_msg["like_count"] = int(pure_result[0][2])
except:
db.close()
return_msg["error"] = "no file record"
return return_msg
#find type dir
sql = ("SELECT type_dir, type_name FROM data_type WHERE type_id=" + str(type_id))
pure_result = db.query(sql)
try:
schedule_dir = os.path.join(schedule_dir, "static/")
schedule_dir = os.path.join(schedule_dir, pure_result[0][0])
schedule_dir = os.path.join(schedule_dir, system_file_name)
return_msg["file"] = os.path.join(pure_result[0][0], system_file_name)
return_msg["type_name"] = str(pure_result[0][1])
except:
db.close()
return_msg["error"] = "no type record"
return return_msg
#if text read file
if return_msg["file_type"] == "text":
if not os.path.isfile(schedule_dir) :
db.close()
return_msg["error"] = "no file"
return return_msg
else :
with open(schedule_dir,"r") as fp:
file_content = json.load(fp)
return_msg["file_text"] = file_content
#update display count
if return_msg["file_type"] == "image":
sql = "UPDATE image_data SET img_display_count=img_display_count+1 WHERE img_id='"+sche_target_id+"'"
elif return_msg["file_type"] == "text":
sql = "UPDATE text_data SET text_display_count=text_display_count+1 WHERE text_id='"+sche_target_id+"'"
db.cmd(sql)
return_msg["result"] = "success"
return return_msg
except DB_Exception as e:
db.close()
return_msg["error"] = e.args[1]
return return_msg
| Python | 0 |
4d75e9e17830ab5ca03c4cc73eefc7b17a43e810 | Make CNN tagger example work with GPU | examples/cnn_tagger.py | examples/cnn_tagger.py | from __future__ import print_function
from timeit import default_timer as timer
import plac
import numpy
from thinc.neural.id2vec import Embed
from thinc.neural.vec2vec import Model, ReLu, Softmax
from thinc.neural._classes.convolution import ExtractWindow
from thinc.neural._classes.maxout import Maxout
from thinc.neural._classes.batchnorm import BatchNorm
from thinc.loss import categorical_crossentropy
from thinc.api import layerize, chain, clone
from thinc.neural.util import flatten_sequences, remap_ids
from thinc.neural.ops import NumpyOps, CupyOps
from thinc.extra.datasets import ancora_pos_tags
def to_categorical(y, nb_classes=None):
# From keras
y = numpy.array(y, dtype='int').ravel()
if not nb_classes:
nb_classes = numpy.max(y) + 1
n = y.shape[0]
categorical = numpy.zeros((n, nb_classes), dtype='float32')
categorical[numpy.arange(n), y] = 1
return categorical
def main(width=64, vector_length=64):
train_data, check_data, nr_tag = ancora_pos_tags(numpy)
#Model.ops = CupyOps()
with Model.define_operators({'**': clone, '>>': chain}):
model = (
Embed(width, vector_length, nV=5000)
>> ExtractWindow(nW=1)
>> Maxout(300)
>> ExtractWindow(nW=1)
>> Maxout(300)
>> ExtractWindow(nW=1)
>> Maxout(300)
>> Softmax(nr_tag))
train_X, train_y = zip(*train_data)
print("NR vector", max(max(seq) for seq in train_X))
dev_X, dev_y = zip(*check_data)
n_train = sum(len(x) for x in train_X)
remapping = remap_ids(NumpyOps())
train_X = remapping(flatten_sequences(train_X)[0])[0]
dev_X = remapping(flatten_sequences(dev_X)[0])[0]
train_y = flatten_sequences(train_y)[0]
train_y = to_categorical(train_y, nb_classes=nr_tag)
dev_y = flatten_sequences(dev_y)[0]
dev_y = to_categorical(dev_y, nb_classes=nr_tag)
train_X = model.ops.asarray(train_X)
train_y = model.ops.asarray(train_y)
dev_X = model.ops.asarray(dev_X)
dev_y = model.ops.asarray(dev_y)
with model.begin_training(train_X, train_y) as (trainer, optimizer):
trainer.batch_size = 128
trainer.nb_epoch = 20
trainer.dropout = 0.0
trainer.dropout_decay = 1e-4
epoch_times = [timer()]
def track_progress():
start = timer()
acc = model.evaluate(dev_X, dev_y)
end = timer()
with model.use_params(optimizer.averages):
avg_acc = model.evaluate(dev_X, dev_y)
stats = (
acc,
avg_acc,
float(n_train) / (end-epoch_times[-1]),
float(dev_y.shape[0]) / (end-start))
print("%.3f (%.3f) acc, %d wps train, %d wps run" % stats)
epoch_times.append(end)
trainer.each_epoch.append(track_progress)
for X, y in trainer.iterate(train_X, train_y):
yh, backprop = model.begin_update(X, drop=trainer.dropout)
#d_loss, loss = categorical_crossentropy(yh, y)
#optimizer.set_loss(loss)
backprop(yh-y, optimizer)
with model.use_params(optimizer.averages):
print(model.evaluate(dev_X, dev_y))
if __name__ == '__main__':
if 1:
plac.call(main)
else:
import cProfile
import pstats
cProfile.runctx("plac.call(main)", globals(), locals(), "Profile.prof")
s = pstats.Stats("Profile.prof")
s.strip_dirs().sort_stats("time").print_stats()
| from __future__ import print_function
from timeit import default_timer as timer
import plac
from thinc.neural.id2vec import Embed
from thinc.neural.vec2vec import Model, ReLu, Softmax
from thinc.neural._classes.convolution import ExtractWindow
from thinc.neural._classes.maxout import Maxout
from thinc.loss import categorical_crossentropy
from thinc.api import layerize, chain, clone
from thinc.neural.util import flatten_sequences
from thinc.extra.datasets import ancora_pos_tags
def main(width=64, vector_length=64):
train_data, check_data, nr_tag = ancora_pos_tags()
with Model.define_operators({'**': clone, '>>': chain}):
model = (
layerize(flatten_sequences)
>> Embed(width, vector_length)
>> ExtractWindow(nW=1)
>> Maxout(128)
>> ExtractWindow(nW=1)
>> Maxout(128)
>> ExtractWindow(nW=1)
>> Maxout(128)
>> Softmax(nr_tag))
train_X, train_y = zip(*train_data)
print("NR vector", max(max(seq) for seq in train_X))
dev_X, dev_y = zip(*check_data)
dev_y = model.ops.flatten(dev_y)
n_train = sum(len(x) for x in train_X)
with model.begin_training(train_X, train_y) as (trainer, optimizer):
trainer.batch_size = 4
trainer.nb_epoch = 20
trainer.dropout = 0.9
trainer.dropout_decay = 1e-4
epoch_times = [timer()]
def track_progress():
start = timer()
acc = model.evaluate(dev_X, dev_y)
end = timer()
with model.use_params(optimizer.averages):
avg_acc = model.evaluate(dev_X, dev_y)
stats = (
acc,
avg_acc,
float(n_train) / (end-epoch_times[-1]),
float(dev_y.shape[0]) / (end-start))
print("%.3f (%.3f) acc, %d wps train, %d wps run" % stats)
epoch_times.append(end)
trainer.each_epoch.append(track_progress)
for X, y in trainer.iterate(train_X, train_y):
y = model.ops.flatten(y)
yh, backprop = model.begin_update(X, drop=trainer.dropout)
d_loss, loss = categorical_crossentropy(yh, y)
optimizer.set_loss(loss)
backprop(d_loss, optimizer)
with model.use_params(optimizer.averages):
print(model.evaluate(dev_X, dev_y))
if __name__ == '__main__':
if 1:
plac.call(main)
else:
import cProfile
import pstats
cProfile.runctx("plac.call(main)", globals(), locals(), "Profile.prof")
s = pstats.Stats("Profile.prof")
s.strip_dirs().sort_stats("time").print_stats()
| Python | 0.000002 |
ca06a55d096eb4c67bf70c479107128b73087ab9 | integrate update | w1_integrate.py | w1_integrate.py | from sympy import integrate, symbols, log
# if 0 <= x < 0.25:
# return float(0)
# elif 0.25 <= x < 0.5:
# return 16.0 * (x - 0.25)
# elif 0.5 <= x < 0.75:
# return -16.0 * (x - 0.75)
# elif 0.75 < x <= 1:
# return float(0)
# h(f) = integrate(-f(x)lnf(x), (x, 0, 1))
x = symbols('x')
left = integrate(-16.0 * (x - 0.25) * log(16.0 * (x - 0.25)), (x, 0.25, 0.5))
right = integrate(16.0 * (x - 0.75) * log(-16.0 * (x - 0.75)), (x, 0.5, 0.75))
with open('w1_integrate_result.txt', 'w') as f:
f.write('left:{0} bit\n'.format(left * 1.44))
f.flush()
f.write('right:{0} bit\n'.format(right * 1.44))
f.flush()
f.write('all:{0} bit\n'.format((left + right) * 1.44))
f.flush()
f.close()
| from sympy import integrate, symbols, log
# if 0 <= x < 0.25:
# return float(0)
# elif 0.25 <= x < 0.5:
# return 16.0 * (x - 0.25)
# elif 0.5 <= x < 0.75:
# return -16.0 * (x - 0.75)
# elif 0.75 < x <= 1:
# return float(0)
# h(f) = integrate(-f(x)lnf(x), (x, 0, 1))
x = symbols('x')
left = integrate(-16.0 * (x - 0.25) * log(16.0 * (x - 0.25)), (x, 0.25, 0.5))
right = integrate(16.0 * (x - 0.75) * log(-16.0 * (x - 0.75)), (x, 0.5, 0.75))
print 'left {0}'.format(left)
print 'right {0}'.format(right)
print 'all {0}'.format(left + right)
| Python | 0 |
f2bcbddab48eff06df78faff1ebb47c28adb4e0d | fix schema test | altair/tests/test_schema.py | altair/tests/test_schema.py | from altair.schema import load_schema
def test_schema():
schema = load_schema()
assert schema["$schema"]=="http://json-schema.org/draft-04/schema#"
| from altair.schema import SCHEMA
def test_schema():
assert SCHEMA["$schema"]=="http://json-schema.org/draft-04/schema#"
| Python | 0.000001 |
48f4c8dba40cb2fe03a74a7a4d7d979892601ddc | use __file__ to determine library path | tests/context.py | tests/context.py | # -*- coding: utf-8 -*-
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
import sample
| # -*- coding: utf-8 -*-
import sys
import os
sys.path.insert(0, os.path.abspath('..'))
import sample | Python | 0.000002 |
24b2dd2f84a2a9ece9a9a4f7898c6f29233c19bc | Add message to welcome accepted students. | app/soc/modules/gsoc/models/program.py | app/soc/modules/gsoc/models/program.py | # Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the GSoC specific Program Model.
"""
from google.appengine.ext import db
from django.utils.translation import ugettext
from soc.models import program
class GSoCProgramMessages(program.ProgramMessages):
"""The GSoCProgramMessages model.
"""
#: Message sent to the students that are accepted for the program.
accepted_students_msg = db.TextProperty(required=False,
verbose_name=ugettext('Accepted Students Message'))
#: Message sent to welcome accepted students to the program. This does
#: not include any personalized text from the organization they got
#: accepted for.
accepted_students_welcome_msg = db.TextProperty(required=False,
verbose_name=ugettext('Accepted Students Welcome Message'))
#: Message sent to the students that are rejected for the program.
rejected_students_msg = db.TextProperty(required=False,
verbose_name=ugettext('Rejected Students Message'))
class GSoCProgram(program.Program):
"""GSoC Program model extends the basic Program model.
"""
_messages_model = GSoCProgramMessages
homepage_url_name = "gsoc_homepage"
#: Required field storing application limit of the program.
apps_tasks_limit = db.IntegerProperty(required=True,
verbose_name=ugettext('Application/Tasks Limit'))
apps_tasks_limit.group = program.GENERAL_INFO_GROUP
apps_tasks_limit.help_text = ugettext(
'<small><i>e.g.</i></small> '
'<tt><b>20</b> is the student applications limit for <i>Google Summer '
'of Code</i>.</tt>')
#: Optional field storing minimum slots per organization
min_slots = db.IntegerProperty(required=False, default=1,
verbose_name=ugettext('Min slots per org'))
min_slots.group = program.GENERAL_INFO_GROUP
min_slots.help_text = ugettext(
'The amount of slots each org should get at the very least.')
#: Optional field storing maximum slots per organization
max_slots = db.IntegerProperty(required=False, default=50,
verbose_name=ugettext('Max slots per org'))
max_slots.group = program.GENERAL_INFO_GROUP
max_slots.help_text = ugettext(
'The amount of slots each organization should get at most.')
#: Required field storing slots limit of the program.
slots = db.IntegerProperty(required=True,
verbose_name=ugettext('Slots'))
slots.group = program.GENERAL_INFO_GROUP
slots.help_text = ugettext(
'<small><i>e.g.</i></small> '
'<tt><b>500</b> might be an amount of slots for <i>Google Summer '
'of Code</i>, which indicates how many students can be accepted.</tt>')
#: Optional field storing the allocation of slots for this program
slots_allocation = db.TextProperty(required=False,
verbose_name=ugettext('the allocation of slots.'))
#: Whether the slots allocations are visible
allocations_visible = db.BooleanProperty(default=False,
verbose_name=ugettext('Slot allocations visible'))
allocations_visible.group = program.GENERAL_INFO_GROUP
allocations_visible.help_text = ugettext(
'Field used to indicate if the slot allocations should be visible.')
#: Whether the duplicates are visible
duplicates_visible = db.BooleanProperty(default=False,
verbose_name=ugettext('Duplicate proposals visible'))
duplicates_visible.group = program.GENERAL_INFO_GROUP
duplicates_visible.help_text = ugettext(
'Field used to indicate if duplicate proposals should be made visible '
'to org admins.')
| # Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the GSoC specific Program Model.
"""
from google.appengine.ext import db
from django.utils.translation import ugettext
from soc.models import program
class GSoCProgramMessages(program.ProgramMessages):
"""The GSoCProgramMessages model.
"""
#: Message sent to the students that are accepted for the program.
accepted_students_msg = db.TextProperty(required=False,
verbose_name=ugettext('Accepted Students Message'))
#: Message sent to the students that are rejected for the program.
rejected_students_msg = db.TextProperty(required=False,
verbose_name=ugettext('Rejected Students Message'))
class GSoCProgram(program.Program):
"""GSoC Program model extends the basic Program model.
"""
_messages_model = GSoCProgramMessages
homepage_url_name = "gsoc_homepage"
#: Required field storing application limit of the program.
apps_tasks_limit = db.IntegerProperty(required=True,
verbose_name=ugettext('Application/Tasks Limit'))
apps_tasks_limit.group = program.GENERAL_INFO_GROUP
apps_tasks_limit.help_text = ugettext(
'<small><i>e.g.</i></small> '
'<tt><b>20</b> is the student applications limit for <i>Google Summer '
'of Code</i>.</tt>')
#: Optional field storing minimum slots per organization
min_slots = db.IntegerProperty(required=False, default=1,
verbose_name=ugettext('Min slots per org'))
min_slots.group = program.GENERAL_INFO_GROUP
min_slots.help_text = ugettext(
'The amount of slots each org should get at the very least.')
#: Optional field storing maximum slots per organization
max_slots = db.IntegerProperty(required=False, default=50,
verbose_name=ugettext('Max slots per org'))
max_slots.group = program.GENERAL_INFO_GROUP
max_slots.help_text = ugettext(
'The amount of slots each organization should get at most.')
#: Required field storing slots limit of the program.
slots = db.IntegerProperty(required=True,
verbose_name=ugettext('Slots'))
slots.group = program.GENERAL_INFO_GROUP
slots.help_text = ugettext(
'<small><i>e.g.</i></small> '
'<tt><b>500</b> might be an amount of slots for <i>Google Summer '
'of Code</i>, which indicates how many students can be accepted.</tt>')
#: Optional field storing the allocation of slots for this program
slots_allocation = db.TextProperty(required=False,
verbose_name=ugettext('the allocation of slots.'))
#: Whether the slots allocations are visible
allocations_visible = db.BooleanProperty(default=False,
verbose_name=ugettext('Slot allocations visible'))
allocations_visible.group = program.GENERAL_INFO_GROUP
allocations_visible.help_text = ugettext(
'Field used to indicate if the slot allocations should be visible.')
#: Whether the duplicates are visible
duplicates_visible = db.BooleanProperty(default=False,
verbose_name=ugettext('Duplicate proposals visible'))
duplicates_visible.group = program.GENERAL_INFO_GROUP
duplicates_visible.help_text = ugettext(
'Field used to indicate if duplicate proposals should be made visible '
'to org admins.')
| Python | 0 |
3c3013b8e7de5e1f8ae57e1d4a8b672cab8f6c47 | Test helpers : Message box, click yes vs enter | tests/helpers.py | tests/helpers.py | from PyQt5.QtWidgets import QApplication, QMessageBox, QDialog, QFileDialog
from PyQt5.QtCore import Qt
from PyQt5.QtTest import QTest
def click_on_top_message_box():
topWidgets = QApplication.topLevelWidgets()
for w in topWidgets:
if isinstance(w, QMessageBox):
QTest.mouseClick(w.button(QMessageBox.Yes), Qt.LeftButton)
elif isinstance(w, QDialog) and w.windowTitle() == "Registration":
QTest.keyClick(w, Qt.Key_Enter)
def select_file_dialog(filename):
topWidgets = QApplication.topLevelWidgets()
for w in topWidgets:
if isinstance(w, QFileDialog) and w.isVisible():
w.hide()
w.selectFile(filename)
w.show()
w.accept()
| from PyQt5.QtWidgets import QApplication, QMessageBox, QDialog, QFileDialog
from PyQt5.QtCore import Qt
from PyQt5.QtTest import QTest
def click_on_top_message_box():
topWidgets = QApplication.topLevelWidgets()
for w in topWidgets:
if isinstance(w, QMessageBox):
QTest.keyClick(w, Qt.Key_Enter)
elif isinstance(w, QDialog) and w.windowTitle() == "Registration":
QTest.keyClick(w, Qt.Key_Enter)
def select_file_dialog(filename):
topWidgets = QApplication.topLevelWidgets()
for w in topWidgets:
if isinstance(w, QFileDialog) and w.isVisible():
w.hide()
w.selectFile(filename)
w.show()
w.accept()
| Python | 0 |
1ab939ed7da45e7f6ff113b7e71017b28ee877a2 | Use 'with' keyword while opening file in tests/helpers.py | tests/helpers.py | tests/helpers.py | import razorpay
import os
import unittest
def mock_file(filename):
if not filename:
return ''
file_dir = os.path.dirname(__file__)
file_path = "{}/mocks/{}.json".format(file_dir, filename)
with open(file_path) as f:
mock_file_data = f.read()
return mock_file_data
class ClientTestCase(unittest.TestCase):
def setUp(self):
self.base_url = 'https://api.razorpay.com/v1'
self.secondary_url = 'https://test-api.razorpay.com/v1'
self.payment_id = 'fake_payment_id'
self.refund_id = 'fake_refund_id'
self.card_id = 'fake_card_id'
self.customer_id = 'fake_customer_id'
self.token_id = 'fake_token_id'
self.addon_id = 'fake_addon_id'
self.subscription_id = 'fake_subscription_id'
self.plan_id = 'fake_plan_id'
self.settlement_id = 'fake_settlement_id'
self.client = razorpay.Client(auth=('key_id', 'key_secret'))
self.secondary_client = razorpay.Client(auth=('key_id', 'key_secret'),
base_url=self.secondary_url)
| import razorpay
import os
import unittest
def mock_file(filename):
if not filename:
return ''
file_dir = os.path.dirname(__file__)
file_path = "{}/mocks/{}.json".format(file_dir, filename)
return open(file_path).read()
class ClientTestCase(unittest.TestCase):
def setUp(self):
self.base_url = 'https://api.razorpay.com/v1'
self.secondary_url = 'https://test-api.razorpay.com/v1'
self.payment_id = 'fake_payment_id'
self.refund_id = 'fake_refund_id'
self.card_id = 'fake_card_id'
self.customer_id = 'fake_customer_id'
self.token_id = 'fake_token_id'
self.addon_id = 'fake_addon_id'
self.subscription_id = 'fake_subscription_id'
self.plan_id = 'fake_plan_id'
self.settlement_id = 'fake_settlement_id'
self.client = razorpay.Client(auth=('key_id', 'key_secret'))
self.secondary_client = razorpay.Client(auth=('key_id', 'key_secret'),
base_url=self.secondary_url)
| Python | 0.000002 |
9f069cf4fe634f34ccda29c18c03c63db04fe199 | Update Funcaptcha example | examples/funcaptcha.py | examples/funcaptcha.py | from urllib.parse import urlparse
import requests
from os import environ
import re
from random import choice
from python_anticaptcha import AnticaptchaClient, FunCaptchaTask
api_key = environ['KEY']
site_key_pattern = 'data-pkey="(.+?)"'
url = 'https://www.funcaptcha.com/demo/'
client = AnticaptchaClient(api_key)
session = requests.Session()
UA = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 ' \
'(KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'
session.headers = {'User-Agent': UA}
proxy_urls = environ['PROXY_URL'].split(',')
def parse_url(url):
parsed = urlparse(url)
return dict(
proxy_type=parsed.scheme,
proxy_address=parsed.hostname,
proxy_port=parsed.port,
proxy_login=parsed.username,
proxy_password=parsed.password
)
def get_form_html():
return session.get(url).text
def get_token(form_html):
proxy_url = choice(proxy_urls)
proxy = parse_url(proxy_url)
site_key = re.search(site_key_pattern, form_html).group(1)
task = FunCaptchaTask(url, site_key, proxy=proxy, user_agent=UA)
job = client.createTask(task)
job.join(maximum_time=10**4)
return job.get_token_response()
def process():
html = get_form_html()
return get_token(html)
if __name__ == '__main__':
print(process())
| import requests
from os import environ
import re
from random import choice
from python_anticaptcha import AnticaptchaClient, FunCaptchaTask, Proxy
api_key = environ['KEY']
site_key_pattern = 'data-pkey="(.+?)"'
url = 'https://www.funcaptcha.com/demo/'
client = AnticaptchaClient(api_key)
session = requests.Session()
UA = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 ' \
'(KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'
session.headers = {'User-Agent': UA}
proxy_urls = environ['PROXY_URL'].split(',')
def get_form_html():
return session.get(url).text
def get_token(form_html):
proxy_url = choice(proxy_urls)
proxy = Proxy.parse_url(proxy_url)
site_key = re.search(site_key_pattern, form_html).group(1)
task = FunCaptchaTask(url, site_key, proxy=proxy, user_agent=UA)
job = client.createTask(task)
job.join(maximum_time=10**4)
return job.get_token_response()
def process():
html = get_form_html()
return get_token(html)
if __name__ == '__main__':
print(process())
| Python | 0 |
d3583108eca98f72b9b4898a5cc5e9cf1cacf251 | Fix log_invocation test on python2 with hash randomization | test/units/module_utils/basic/test__log_invocation.py | test/units/module_utils/basic/test__log_invocation.py | # -*- coding: utf-8 -*-
# (c) 2016, James Cammarata <jimi@sngx.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
import sys
import json
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import MagicMock
class TestModuleUtilsBasic(unittest.TestCase):
@unittest.skipIf(sys.version_info[0] >= 3, "Python 3 is not supported on targets (yet)")
def test_module_utils_basic__log_invocation(self):
from ansible.module_utils import basic
# test basic log invocation
basic.MODULE_COMPLEX_ARGS = json.dumps(dict(foo=False, bar=[1,2,3], bam="bam", baz=u'baz'))
am = basic.AnsibleModule(
argument_spec=dict(
foo = dict(default=True, type='bool'),
bar = dict(default=[], type='list'),
bam = dict(default="bam"),
baz = dict(default=u"baz"),
password = dict(default=True),
no_log = dict(default="you shouldn't see me", no_log=True),
),
)
am.log = MagicMock()
am._log_invocation()
# Message is generated from a dict so it will be in an unknown order.
# have to check this manually rather than with assert_called_with()
args = am.log.call_args[0]
self.assertEqual(len(args), 1)
message = args[0]
self.assertEqual(len(message), len('Invoked with bam=bam bar=[1, 2, 3] foo=False baz=baz no_log=NOT_LOGGING_PARAMETER password=NOT_LOGGING_PASSWORD'))
self.assertTrue(message.startswith('Invoked with '))
self.assertIn(' bam=bam', message)
self.assertIn(' bar=[1, 2, 3]', message)
self.assertIn(' foo=False', message)
self.assertIn(' baz=baz', message)
self.assertIn(' no_log=NOT_LOGGING_PARAMETER', message)
self.assertIn(' password=NOT_LOGGING_PASSWORD', message)
kwargs = am.log.call_args[1]
self.assertEqual(kwargs,
dict(log_args={
'foo': 'False',
'bar': '[1, 2, 3]',
'bam': 'bam',
'baz': 'baz',
'password': 'NOT_LOGGING_PASSWORD',
'no_log': 'NOT_LOGGING_PARAMETER',
})
)
| # -*- coding: utf-8 -*-
# (c) 2016, James Cammarata <jimi@sngx.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
import json
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import MagicMock
class TestModuleUtilsBasic(unittest.TestCase):
@unittest.skip("Skipping due to unknown reason. See #15105")
def test_module_utils_basic__log_invocation(self):
from ansible.module_utils import basic
# test basic log invocation
basic.MODULE_COMPLEX_ARGS = json.dumps(dict(foo=False, bar=[1,2,3], bam="bam", baz=u'baz'))
am = basic.AnsibleModule(
argument_spec=dict(
foo = dict(default=True, type='bool'),
bar = dict(default=[], type='list'),
bam = dict(default="bam"),
baz = dict(default=u"baz"),
password = dict(default=True),
no_log = dict(default="you shouldn't see me", no_log=True),
),
)
am.log = MagicMock()
am._log_invocation()
am.log.assert_called_with(
'Invoked with bam=bam bar=[1, 2, 3] foo=False baz=baz no_log=NOT_LOGGING_PARAMETER password=NOT_LOGGING_PASSWORD ',
log_args={
'foo': 'False',
'bar': '[1, 2, 3]',
'bam': 'bam',
'baz': 'baz',
'password': 'NOT_LOGGING_PASSWORD',
'no_log': 'NOT_LOGGING_PARAMETER',
},
)
| Python | 0.000002 |
d2fb1f22be6c6434873f2bcafb6b8a9b714acde9 | Use fail signal in fail_archive_on_error decorator | website/archiver/decorators.py | website/archiver/decorators.py | import functools
from framework.exceptions import HTTPError
from website.project.decorators import _inject_nodes
from website.archiver import ARCHIVER_UNCAUGHT_ERROR
from website.archiver import signals
def fail_archive_on_error(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except HTTPError as e:
_inject_nodes(kwargs)
registration = kwargs['node']
signals.send.archive_fail(
registration,
ARCHIVER_UNCAUGHT_ERROR,
[str(e)]
)
return wrapped
| import functools
from framework.exceptions import HTTPError
from website.project.decorators import _inject_nodes
from website.archiver import ARCHIVER_UNCAUGHT_ERROR
from website.archiver import utils
def fail_archive_on_error(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except HTTPError as e:
_inject_nodes(kwargs)
registration = kwargs['node']
utils.handle_archive_fail(
ARCHIVER_UNCAUGHT_ERROR,
registration.registered_from,
registration,
registration.registered_user,
str(e)
)
return wrapped
| Python | 0.000001 |
3caa77b0f4b43e274eba21a8d759335f7833b99d | Change OSF_COOKIE_DOMAIN to None in local-dist.py | website/settings/local-dist.py | website/settings/local-dist.py | # -*- coding: utf-8 -*-
'''Example settings/local.py file.
These settings override what's in website/settings/defaults.py
NOTE: local.py will not be added to source control.
'''
from . import defaults
DEV_MODE = True
DEBUG_MODE = True # Sets app to debug mode, turns off template caching, etc.
SEARCH_ENGINE = 'elastic'
ELASTIC_TIMEOUT = 10
# Comment out to use SHARE in development
USE_SHARE = False
# Comment out to use celery in development
USE_CELERY = False
# Comment out to use GnuPG in development
USE_GNUPG = False # Changing this may require you to re-enter encrypted fields
# Email
USE_EMAIL = False
MAIL_SERVER = 'localhost:1025' # For local testing
MAIL_USERNAME = 'osf-smtp'
MAIL_PASSWORD = 'CHANGEME'
# Mailchimp email subscriptions
ENABLE_EMAIL_SUBSCRIPTIONS = False
# Session
OSF_COOKIE_DOMAIN = None
COOKIE_NAME = 'osf'
SECRET_KEY = "CHANGEME"
# Uncomment if GPG was installed with homebrew
# GNUPG_BINARY = '/usr/local/bin/gpg'
##### Celery #####
## Default RabbitMQ broker
BROKER_URL = 'amqp://'
# Default RabbitMQ backend
CELERY_RESULT_BACKEND = 'amqp://'
USE_CDN_FOR_CLIENT_LIBS = False
# Example of extending default settings
# defaults.IMG_FMTS += ["pdf"]
| # -*- coding: utf-8 -*-
'''Example settings/local.py file.
These settings override what's in website/settings/defaults.py
NOTE: local.py will not be added to source control.
'''
from . import defaults
DEV_MODE = True
DEBUG_MODE = True # Sets app to debug mode, turns off template caching, etc.
SEARCH_ENGINE = 'elastic'
ELASTIC_TIMEOUT = 10
# Comment out to use SHARE in development
USE_SHARE = False
# Comment out to use celery in development
USE_CELERY = False
# Comment out to use GnuPG in development
USE_GNUPG = False # Changing this may require you to re-enter encrypted fields
# Email
USE_EMAIL = False
MAIL_SERVER = 'localhost:1025' # For local testing
MAIL_USERNAME = 'osf-smtp'
MAIL_PASSWORD = 'CHANGEME'
# Mailchimp email subscriptions
ENABLE_EMAIL_SUBSCRIPTIONS = False
# Session
OSF_COOKIE_DOMAIN = '.localhost'
COOKIE_NAME = 'osf'
SECRET_KEY = "CHANGEME"
# Uncomment if GPG was installed with homebrew
# GNUPG_BINARY = '/usr/local/bin/gpg'
##### Celery #####
## Default RabbitMQ broker
BROKER_URL = 'amqp://'
# Default RabbitMQ backend
CELERY_RESULT_BACKEND = 'amqp://'
USE_CDN_FOR_CLIENT_LIBS = False
# Example of extending default settings
# defaults.IMG_FMTS += ["pdf"]
| Python | 0 |
22ae3a2e9a236de61c078d234d920a3e6bc62d7b | Add a bit of docs | pylisp/application/lispd/address_tree/ddt_container_node.py | pylisp/application/lispd/address_tree/ddt_container_node.py | '''
Created on 1 jun. 2013
@author: sander
'''
from .container_node import ContainerNode
class DDTContainerNode(ContainerNode):
'''
A ContainerNode that indicates that we are responsible for this part of
the DDT tree.
'''
| '''
Created on 1 jun. 2013
@author: sander
'''
from .container_node import ContainerNode
class DDTContainerNode(ContainerNode):
pass
| Python | 0.000004 |
8acaec546de0311f5f33c2e8fb9e1828a1cbc44b | Fix memory leak caused by using rabbit as the result backend for celery | worker_manager/celeryconfig.py | worker_manager/celeryconfig.py | """
Configuration file for celerybeat/worker.
Dynamically adds consumers from all manifest files in worker_manager/manifests/
to the celerybeat schedule. Also adds a heartbeat function to the schedule,
which adds every 30 seconds, and a monthly task to normalize all non-normalized
documents.
"""
from celery.schedules import crontab
from datetime import timedelta
import os
import yaml
BROKER_URL = 'amqp://guest@localhost'
# CELERY_RESULT_BACKEND = 'amqp://guest@localhost'
CELERY_TASK_SERIALIZER = 'pickle'
CELERY_RESULT_SERIALIZER = 'pickle'
CELERY_ACCEPT_CONTENT = ['pickle']
CELERY_ENABLE_UTC = True
CELERY_TIMEZONE = 'UTC'
CELERY_IMPORTS = ('worker_manager.celerytasks',)
# Programmatically generate celery beat schedule
SCHED = {}
for manifest in os.listdir('worker_manager/manifests/'):
filepath = 'worker_manager/manifests/' + manifest
with open(filepath) as f:
info = yaml.load(f)
SCHED['run ' + manifest] = {
'task': 'worker_manager.celerytasks.run_consumer',
'schedule': crontab(day_of_week=info['days'], hour=info['hour'], minute=info['minute']),
'args': [filepath],
}
# Deprecated
SCHED['request normalization of recent documents'] = {
'task': 'worker_manager.celerytasks.request_normalized',
'schedule': crontab(minute='*/1')
}
SCHED['check_archive'] = {
'task': 'worker_manager.celerytasks.check_archive',
'schedule': crontab(day_of_month='1', hour='23', minute='59'),
}
SCHED['heartbeat'] = {
'task': 'worker_manager.celerytasks.heartbeat',
'schedule': timedelta(seconds=30),
'args': (16, 16)
}
CELERYBEAT_SCHEDULE = SCHED
| """
Configuration file for celerybeat/worker.
Dynamically adds consumers from all manifest files in worker_manager/manifests/
to the celerybeat schedule. Also adds a heartbeat function to the schedule,
which adds every 30 seconds, and a monthly task to normalize all non-normalized
documents.
"""
from celery.schedules import crontab
from datetime import timedelta
import os
import yaml
BROKER_URL = 'amqp://guest@localhost'
CELERY_RESULT_BACKEND = 'amqp://guest@localhost'
CELERY_TASK_SERIALIZER = 'pickle'
CELERY_RESULT_SERIALIZER = 'pickle'
CELERY_ACCEPT_CONTENT = ['pickle']
CELERY_ENABLE_UTC = True
CELERY_TIMEZONE = 'UTC'
CELERY_IMPORTS = ('worker_manager.celerytasks',)
# Programmatically generate celery beat schedule
SCHED = {}
for manifest in os.listdir('worker_manager/manifests/'):
filepath = 'worker_manager/manifests/' + manifest
with open(filepath) as f:
info = yaml.load(f)
SCHED['run ' + manifest] = {
'task': 'worker_manager.celerytasks.run_consumer',
'schedule': crontab(day_of_week=info['days'], hour=info['hour'], minute=info['minute']),
'args': [filepath],
}
# Deprecated
SCHED['request normalization of recent documents'] = {
'task': 'worker_manager.celerytasks.request_normalized',
'schedule': crontab(minute='*/1')
}
SCHED['check_archive'] = {
'task': 'worker_manager.celerytasks.check_archive',
'schedule': crontab(day_of_month='1', hour='23', minute='59'),
}
SCHED['add'] = {
'task': 'worker_manager.celerytasks.heartbeat',
'schedule': timedelta(seconds=30),
'args': (16, 16)
}
CELERYBEAT_SCHEDULE = SCHED
| Python | 0.000004 |
16806f7a620ddaba727fc6c7d6387eaa1c17f103 | Update p4-test-tool.py | benchexec/tools/p4-test-tool.py | benchexec/tools/p4-test-tool.py | # This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import benchexec.util as util
import benchexec.tools.template
import benchexec.result as result
class Tool(benchexec.tools.template.BaseTool2):
#Needed for benchexec to run, but irrelevant for p4 extension
def executable(self, tool):
return "/"
def name(self):
return "P4 Test"
def determine_result(self, run):
for line in run.output:
if run.cmdline[3] + " ... ok" in line:
return benchexec.result.RESULT_CLASS_TRUE
else:
return benchexec.result.RESULT_CLASS_FALSE
| # This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import benchexec.util as util
import benchexec.tools.template
import benchexec.result as result
class Tool(benchexec.tools.template.BaseTool2):
#Needed for benchexec to run, but irrelevant for p4 extension
def executable(self, tool):
return "/"
def name(self):
return "P4 Test"
def determine_result(self, run):
for line in run.output:
if run.cmdline[3] + " ... ok" in line:
return benchexec.result.RESULT_CLASS_TRUE
else:
return benchexec.result.RESULT_CLASS_FALSE
| Python | 0.000001 |
b9d30a39f31862af607af44e97878a287f9361c5 | bump to v0.5.3 | steam/__init__.py | steam/__init__.py | __version__ = "0.5.3"
__author__ = "Rossen Georgiev"
from steam.steamid import SteamID
from steam.webapi import WebAPI
| __version__ = "0.5.2"
__author__ = "Rossen Georgiev"
from steam.steamid import SteamID
from steam.webapi import WebAPI
| Python | 0.000001 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.