commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
12d3f87dc06edfbb39ad378242f3c253ae256108 | Convert racket to new style executor, even if it doesn't work | DMOJ/judge,DMOJ/judge,DMOJ/judge | dmoj/executors/RKT.py | dmoj/executors/RKT.py | import errno
from dmoj.cptbox.syscalls import *
from dmoj.executors.base_executor import ScriptExecutor
from dmoj.judgeenv import env
class Executor(ScriptExecutor):
ext = '.rkt'
name = 'RKT'
fs = ['.*\.(?:so|rkt?$)', '/dev/tty$', '/proc/meminfo$', '.*racket.*', '/proc/stat$',
'/proc/self/maps$', '/usr/lib/i386-linux-gnu', '/etc/nsswitch.conf$',
'/etc/passwd$', '/dev/null$', '/sys/devices/system/cpu/online$']
command = env['runtime'].get('racket')
syscalls = ['timer_create', 'timer_settime',
'timer_delete', 'newselect', 'select']
address_grace = 131072
test_program = '''\
#lang racket
(displayln "Hello, World!")
'''
def get_security(self):
security = super(Executor, self).get_security()
def handle_socketcall(debugger):
def socket_return():
debugger.result = -errno.EACCES
debugger.syscall = debugger.getpid_syscall
debugger.on_return(socket_return)
return True
security[sys_socketcall] = handle_socketcall
security[sys_epoll_create] = True
security[sys_sigprocmask] = True
security[sys_prctl] = lambda debugger: debugger.arg0 in (15,)
return security
def get_cmdline(self):
return ['racket', self._code]
initialize = Executor.initialize
| from subprocess import Popen
import errno
from dmoj.executors.resource_proxy import ResourceProxy
from .utils import test_executor
from dmoj.cptbox import SecurePopen, CHROOTSecurity, PIPE
from dmoj.cptbox.syscalls import *
from dmoj.judgeenv import env
RACKET_FS = ['.*\.(?:so|rkt?$)', '/dev/tty$', '/proc/meminfo$', '.*racket.*', '/proc/stat$',
'/proc/self/maps$', '/usr/lib/i386-linux-gnu', '/etc/nsswitch.conf$',
'/etc/passwd$', '/dev/null$']
class Executor(ResourceProxy):
def __init__(self, problem_id, source_code):
super(Executor, self).__init__()
self._script = source_code_file = self._file('%s.rkt' % problem_id)
with open(source_code_file, 'wb') as fo:
fo.write(source_code)
def _security(self):
security = CHROOTSecurity(RACKET_FS)
def handle_socketcall(debugger):
def socket_return():
debugger.result = -errno.EACCES
debugger.syscall = debugger.getpid_syscall
debugger.on_return(socket_return)
return True
security[sys_socketcall] = handle_socketcall
security[sys_epoll_create] = True
security[sys_sigprocmask] = True
security[sys_prctl] = lambda debugger: debugger.arg0 in (15,)
return security
def launch(self, *args, **kwargs):
return SecurePopen([env['runtime']['racket'], self._script] + list(args),
security=self._security(),
time=kwargs.get('time'),
memory=kwargs.get('memory'),
address_grace=131072,
stderr=(PIPE if kwargs.get('pipe_stderr', False) else None),
env={'LANG': 'C'}, cwd=self._dir)
def launch_unsafe(self, *args, **kwargs):
return Popen([env['runtime']['racket'], self._script] + list(args),
env={'LANG': 'C'},
cwd=self._dir,
**kwargs)
def initialize(sandbox=True):
if 'racket' not in env['runtime']:
return False
return test_executor('RKT', Executor, '''\
#lang racket
(displayln "Hello, World!")
''', sandbox=sandbox)
| agpl-3.0 | Python |
41c718c6cda0c85a68addcf9308d1ecd52bc8e34 | remove unnecessary wrapping of stateful, stateless function | paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,gautam1858/tensorflow,paolodedios/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,gautam1858/tensorflow,yongtang/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow,tensorflow/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,gautam1858/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,gautam1858/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-Corporation/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,Intel-tensorflow/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,gautam1858/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,yongtang/tensorflow,Intel-Corporation/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,tensorflow/tensorflow,Intel-tensorflow/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,Intel-Corporation/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,gautam1858/tensorflow,karllessard/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-Corporation/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,gautam1858/tensorflow,karllessard/tensorflow,Intel-tensorflow/tensorflow,karllessard/tensorflow,yongtang/tensorflow,gautam1858/tensorflow,paolodedios/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,Intel-tensorflow/tensorflow,gautam1858/tensorflow | tensorflow/python/ops/script_ops_test.py | tensorflow/python/ops/script_ops_test.py | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for script operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import def_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import script_ops
from tensorflow.python.ops.script_ops import numpy_function
from tensorflow.python.platform import test
class NumpyFunctionTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_numpy_arguments(self):
def plus(a, b):
return a + b
actual_result = script_ops.numpy_function(plus, [1, 2], dtypes.int32)
expect_result = constant_op.constant(3, dtypes.int32)
self.assertAllEqual(actual_result, expect_result)
def test_stateless_flag(self):
call_count = 0
def plus(a, b):
nonlocal call_count
call_count += 1
return a + b
@def_function.function(autograph=False)
def tensor_double_plus_stateless(a, b):
sum1 = numpy_function(plus, [a, b], dtypes.int32, stateful=False)
sum2 = numpy_function(plus, [a, b], dtypes.int32, stateful=False)
return sum1 + sum2
# different argument
tensor_double_plus_stateless(
constant_op.constant(1),
constant_op.constant(2),
)
self.assertEqual(call_count, 1) # +1 as only the first encounter was executed
@def_function.function(autograph=False)
def tensor_double_plus_stateful(a, b):
sum1 = numpy_function(plus, [a, b], dtypes.int32, stateful=True)
sum2 = numpy_function(plus, [a, b], dtypes.int32, stateful=True)
return sum1 + sum2
tensor_double_plus_stateful(
constant_op.constant(3),
constant_op.constant(4),
)
self.assertEqual(call_count, 3) # +2 as it is stateful, func was both times executed
if __name__ == "__main__":
test.main()
| # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for script operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.eager import def_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.framework import constant_op
from tensorflow.python.ops import script_ops
from tensorflow.python.ops.script_ops import numpy_function
from tensorflow.python.platform import test
class NumpyFunctionTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_numpy_arguments(self):
def plus(a, b):
return a + b
actual_result = script_ops.numpy_function(plus, [1, 2], dtypes.int32)
expect_result = constant_op.constant(3, dtypes.int32)
self.assertAllEqual(actual_result, expect_result)
def test_stateless_flag(self):
call_count = 0
def plus(a, b):
global call_count
call_count += 1
return a + b
@def_function.function
def tensor_plus_stateful(a, b):
return numpy_function(plus, [a, b], dtypes.int32, stateful=True)
@def_function.function
def tensor_plus_stateless(a, b):
return numpy_function(plus, [a, b], dtypes.int32, stateful=False)
@def_function.function(autograph=False)
def tensor_double_plus_stateless(a, b):
sum1 = tensor_plus_stateless(a, b)
sum2 = tensor_plus_stateless(a, b)
return sum1 + sum2
# different argument
tensor_double_plus_stateless(
constant_op.constant(1),
constant_op.constant(2),
)
self.assertEqual(call_count, 1) # +1 as only the first encounter was executed
@def_function.function(autograph=False)
def tensor_double_plus_stateful(a, b):
sum1 = tensor_plus_stateful(a, b)
sum2 = tensor_plus_stateful(a, b)
return sum1 + sum2
tensor_double_plus_stateful(
constant_op.constant(3),
constant_op.constant(4),
)
self.assertEqual(call_count, 3) # +2 as it is stateful, func was both times executed
if __name__ == "__main__":
test.main()
| apache-2.0 | Python |
4da8d8f227a85dfa89c52cfb5c496b5b40d5f07d | Update config.py | wildfishyu/BuildBot | BuildBot/config.py | BuildBot/config.py | # coding=utf-8
#
WORKSPACE = ''
SVN_PATH = '%WORKSPACE%' + '\\KBSS_WLFS\\'
SOLUTION_PATH = SVN_PATH + '\\solution\\'
SOLUTION = SOLUTION_PATH + '\\KBSS.sln'
VERSION_TAG = 'KBSS_WLFS_V1.0_SP4'
PUBLISH_PATH = '%WORKSPACE%' + '\\issue\\KBSS_WLFS\\'
KCBP_CONTROL_SERVICE = 'http://127.0.0.1:8001/restartkcbp'
# BUILDBOT_FILE`
BUILDBOT_INFO_FILE = 'buildbot.info'
TASKS = (
{'id': 'LoadBuildbotInfoFile'},
{'id': 'UpdateSvn'},
{'id': 'GetCurRev'},
{'id': 'MakeBuildbotEnv'},
#{'id': 'ShellTask', 'name': 'Build Solution', 'cmd': '%BUILDBOT_PATH%\\build_task\\build.bat'},
#{'id': 'ShellTask', 'name': 'Make KBSSSPD.XML', 'cmd': '%BUILDBOT_PATH%\\build_task\\bpxml.bat'},
#{'id': 'ShellTask', 'name': 'Publish Pack', 'cmd': '%BUILDBOT_PATH%\\build_task\\publish_packet.bat'},
#{'id': 'ShellTask', 'name': 'Publish To Kcbp', 'cmd': '%BUILDBOT_PATH%\\build_task\\publish_to_kcbp.bat'},
#{'id': 'ShellTask', 'name': 'Publish To Kcbp', 'cmd': '%BUILDBOT_PATH%\\build_task\\publish_to_ftp.bat'},
#{'id': 'UpdBuildbotInfoFile'},
#{'id': 'StartKcbp'}
{'id': 'ShellTask', 'name': 'UpdateProject', 'cmd': '%BUILDBOT_PATH%\\build_task\\upd_solution.bat'},
)
| # coding=utf-8
#
WORKSPACE = ''
SVN_PATH = '%WORKSPACE%' + '\\KBSS_WLFS\\'
SOLUTION_PATH = SVN_PATH + '\\solution\\'
SOLUTION = SOLUTION_PATH + '\\KBSS.sln'
VERSION_TAG = 'KBSS_WLFS_V1.0_SP4'
PUBLISH_PATH = '%WORKSPACE%' + '\\issue\\KBSS_WLFS\\'
KCBP_CONTROL_SERVICE = 'http://172.16.41.114:8001/restartkcbp'
# BUILDBOT_FILE`
BUILDBOT_INFO_FILE = 'buildbot.info'
TASKS = (
{'id': 'LoadBuildbotInfoFile'},
{'id': 'UpdateSvn'},
{'id': 'GetCurRev'},
{'id': 'MakeBuildbotEnv'},
#{'id': 'ShellTask', 'name': 'Build Solution', 'cmd': '%BUILDBOT_PATH%\\build_task\\build.bat'},
#{'id': 'ShellTask', 'name': 'Make KBSSSPD.XML', 'cmd': '%BUILDBOT_PATH%\\build_task\\bpxml.bat'},
#{'id': 'ShellTask', 'name': 'Publish Pack', 'cmd': '%BUILDBOT_PATH%\\build_task\\publish_packet.bat'},
#{'id': 'ShellTask', 'name': 'Publish To Kcbp', 'cmd': '%BUILDBOT_PATH%\\build_task\\publish_to_kcbp.bat'},
#{'id': 'ShellTask', 'name': 'Publish To Kcbp', 'cmd': '%BUILDBOT_PATH%\\build_task\\publish_to_ftp.bat'},
#{'id': 'UpdBuildbotInfoFile'},
#{'id': 'StartKcbp'}
{'id': 'ShellTask', 'name': 'UpdateProject', 'cmd': '%BUILDBOT_PATH%\\build_task\\upd_solution.bat'},
)
| apache-2.0 | Python |
f13e232e451254dc150c80fd625354907a9a4713 | update unicel urls | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/messaging/smsbackends/unicel/urls.py | corehq/messaging/smsbackends/unicel/urls.py | from django.conf.urls import patterns, url
from corehq.messaging.smsbackends.unicel.views import incoming
urlpatterns = patterns('',
url(r'^in/$', incoming, name='incoming'),
)
| from django.conf.urls import *
urlpatterns = patterns('',
url(r'^in/$', 'corehq.messaging.smsbackends.unicel.views.incoming'),
)
| bsd-3-clause | Python |
066aefc6256d40087df0a1f30b07466e3701ba2a | remove one final bit of hstore | crateio/crate.io | crate_project/settings/production/gondor.py | crate_project/settings/production/gondor.py | from .base import *
from local_settings import *
from secret_settings import *
# Fix Email Settings
SERVER_EMAIL = "server@crate.io"
DEFAULT_FROM_EMAIL = "donald@crate.io"
DATABASES["default"]["ENGINE"] = "django.db.backends.postgresql_psycopg2"
# Configure Celery
BROKER_TRANSPORT = "redis"
BROKER_HOST = GONDOR_REDIS_HOST
BROKER_PORT = GONDOR_REDIS_PORT
BROKER_VHOST = "0"
BROKER_PASSWORD = GONDOR_REDIS_PASSWORD
BROKER_POOL_LIMIT = 10
CELERY_RESULT_BACKEND = "redis"
CELERY_REDIS_HOST = GONDOR_REDIS_HOST
CELERY_REDIS_PORT = GONDOR_REDIS_PORT
CELERY_REDIS_PASSWORD = GONDOR_REDIS_PASSWORD
| from .base import *
from local_settings import *
from secret_settings import *
# Fix Email Settings
SERVER_EMAIL = "server@crate.io"
DEFAULT_FROM_EMAIL = "donald@crate.io"
# Fix Database Engine for hstore
DATABASES["default"]["ENGINE"] = "django_hstore.postgresql_psycopg2"
# Configure Celery
BROKER_TRANSPORT = "redis"
BROKER_HOST = GONDOR_REDIS_HOST
BROKER_PORT = GONDOR_REDIS_PORT
BROKER_VHOST = "0"
BROKER_PASSWORD = GONDOR_REDIS_PASSWORD
BROKER_POOL_LIMIT = 10
CELERY_RESULT_BACKEND = "redis"
CELERY_REDIS_HOST = GONDOR_REDIS_HOST
CELERY_REDIS_PORT = GONDOR_REDIS_PORT
CELERY_REDIS_PASSWORD = GONDOR_REDIS_PASSWORD
| bsd-2-clause | Python |
f6759ae7b089bce8d10b8455d5157c0bf3cb8561 | Change the name of file | tarzan0820/odoo,NL66278/OCB,jiangzhixiao/odoo,ClearCorp-dev/odoo,dalegregory/odoo,nagyistoce/odoo-dev-odoo,juanalfonsopr/odoo,takis/odoo,funkring/fdoo,erkrishna9/odoo,avoinsystems/odoo,aviciimaxwell/odoo,alqfahad/odoo,wangjun/odoo,apanju/odoo,bakhtout/odoo-educ,thanhacun/odoo,osvalr/odoo,havt/odoo,Endika/odoo,feroda/odoo,colinnewell/odoo,spadae22/odoo,markeTIC/OCB,Drooids/odoo,goliveirab/odoo,PongPi/isl-odoo,xzYue/odoo,takis/odoo,credativUK/OCB,apocalypsebg/odoo,Adel-Magebinary/odoo,grap/OpenUpgrade,erkrishna9/odoo,Bachaco-ve/odoo,tvibliani/odoo,alexteodor/odoo,BT-ojossen/odoo,tinkerthaler/odoo,slevenhagen/odoo,aviciimaxwell/odoo,nuuuboo/odoo,mkieszek/odoo,ramitalat/odoo,guewen/OpenUpgrade,ccomb/OpenUpgrade,x111ong/odoo,ingadhoc/odoo,slevenhagen/odoo,hoatle/odoo,PongPi/isl-odoo,feroda/odoo,nuncjo/odoo,apocalypsebg/odoo,ojengwa/odoo,papouso/odoo,addition-it-solutions/project-all,ShineFan/odoo,fgesora/odoo,xzYue/odoo,hoatle/odoo,Endika/OpenUpgrade,srimai/odoo,fgesora/odoo,BT-ojossen/odoo,FlorianLudwig/odoo,bguillot/OpenUpgrade,BT-fgarbely/odoo,damdam-s/OpenUpgrade,n0m4dz/odoo,juanalfonsopr/odoo,microcom/odoo,mszewczy/odoo,gavin-feng/odoo,cloud9UG/odoo,wangjun/odoo,juanalfonsopr/odoo,agrista/odoo-saas,rgeleta/odoo,srimai/odoo,jpshort/odoo,sebalix/OpenUpgrade,juanalfonsopr/odoo,hmen89/odoo,gavin-feng/odoo,diagramsoftware/odoo,rschnapka/odoo,sinbazhou/odoo,x111ong/odoo,tvtsoft/odoo8,CopeX/odoo,Elico-Corp/odoo_OCB,florian-dacosta/OpenUpgrade,dfang/odoo,guewen/OpenUpgrade,cdrooom/odoo,apanju/odoo,poljeff/odoo,ClearCorp-dev/odoo,ThinkOpen-Solutions/odoo,waytai/odoo,minhtuancn/odoo,OpenUpgrade-dev/OpenUpgrade,sergio-incaser/odoo,ramadhane/odoo,nhomar/odoo-mirror,patmcb/odoo,nuncjo/odoo,ecosoft-odoo/odoo,hbrunn/OpenUpgrade,hip-odoo/odoo,hopeall/odoo,fjbatresv/odoo,virgree/odoo,elmerdpadilla/iv,alqfahad/odoo,Grirrane/odoo,CatsAndDogsbvba/odoo,tvibliani/odoo,ojengwa/odoo,sinbazhou/odoo,ccomb/OpenUpgrade,Danisan/odoo-1,codekaki/odoo,fossoult/odoo,feroda/odoo,OSSESAC/odoopubarquiluz,gsmartway/odoo,pplatek/odoo,SerpentCS/odoo,glovebx/odoo,hoatle/odoo,xzYue/odoo,fuselock/odoo,abstract-open-solutions/OCB,syci/OCB,jeasoft/odoo,CatsAndDogsbvba/odoo,shivam1111/odoo,JonathanStein/odoo,shaufi10/odoo,Kilhog/odoo,mustafat/odoo-1,papouso/odoo,dkubiak789/odoo,ApuliaSoftware/odoo,minhtuancn/odoo,realsaiko/odoo,mlaitinen/odoo,Gitlab11/odoo,luiseduardohdbackup/odoo,fossoult/odoo,omprakasha/odoo,lsinfo/odoo,joshuajan/odoo,shaufi/odoo,Endika/odoo,Maspear/odoo,dllsf/odootest,tinkerthaler/odoo,JGarcia-Panach/odoo,codekaki/odoo,synconics/odoo,ApuliaSoftware/odoo,fevxie/odoo,oihane/odoo,OpenPymeMx/OCB,kybriainfotech/iSocioCRM,acshan/odoo,frouty/odoo_oph,OpenPymeMx/OCB,xzYue/odoo,sv-dev1/odoo,srsman/odoo,omprakasha/odoo,jolevq/odoopub,lombritz/odoo,srimai/odoo,spadae22/odoo,ihsanudin/odoo,ujjwalwahi/odoo,kybriainfotech/iSocioCRM,abdellatifkarroum/odoo,abenzbiria/clients_odoo,OSSESAC/odoopubarquiluz,mvaled/OpenUpgrade,datenbetrieb/odoo,lsinfo/odoo,sysadminmatmoz/OCB,Grirrane/odoo,alexteodor/odoo,dsfsdgsbngfggb/odoo,shingonoide/odoo,matrixise/odoo,agrista/odoo-saas,shaufi10/odoo,sergio-incaser/odoo,jolevq/odoopub,optima-ict/odoo,javierTerry/odoo,leoliujie/odoo,thanhacun/odoo,ujjwalwahi/odoo,naousse/odoo,goliveirab/odoo,highco-groupe/odoo,KontorConsulting/odoo,prospwro/odoo,KontorConsulting/odoo,x111ong/odoo,savoirfairelinux/OpenUpgrade,datenbetrieb/odoo,rschnapka/odoo,mszewczy/odoo,cedk/odoo,abstract-open-solutions/OCB,alhashash/odoo,ClearCorp-dev/odoo,joariasl/odoo,rowemoore/odoo,mvaled/OpenUpgrade,Danisan/odoo-1,jusdng/odoo,ingadhoc/odoo,rowemoore/odoo,Antiun/odoo,alhashash/odoo,apanju/GMIO_Odoo,agrista/odoo-saas,jolevq/odoopub,jeasoft/odoo,takis/odoo,NL66278/OCB,kittiu/odoo,grap/OpenUpgrade,ovnicraft/odoo,Endika/OpenUpgrade,TRESCLOUD/odoopub,bplancher/odoo,ThinkOpen-Solutions/odoo,sysadminmatmoz/OCB,oliverhr/odoo,gavin-feng/odoo,tangyiyong/odoo,BT-ojossen/odoo,tangyiyong/odoo,sv-dev1/odoo,Ichag/odoo,ehirt/odoo,hanicker/odoo,christophlsa/odoo,GauravSahu/odoo,jolevq/odoopub,microcom/odoo,VitalPet/odoo,mvaled/OpenUpgrade,Ichag/odoo,mlaitinen/odoo,wangjun/odoo,kirca/OpenUpgrade,hifly/OpenUpgrade,cedk/odoo,makinacorpus/odoo,OpenUpgrade/OpenUpgrade,luiseduardohdbackup/odoo,jiangzhixiao/odoo,poljeff/odoo,optima-ict/odoo,ApuliaSoftware/odoo,AuyaJackie/odoo,ygol/odoo,osvalr/odoo,kirca/OpenUpgrade,jesramirez/odoo,patmcb/odoo,shaufi/odoo,guerrerocarlos/odoo,cloud9UG/odoo,funkring/fdoo,savoirfairelinux/odoo,christophlsa/odoo,klunwebale/odoo,jesramirez/odoo,simongoffin/website_version,shivam1111/odoo,Nick-OpusVL/odoo,nuncjo/odoo,pplatek/odoo,gdgellatly/OCB1,bguillot/OpenUpgrade,jaxkodex/odoo,naousse/odoo,tvtsoft/odoo8,frouty/odoogoeen,MarcosCommunity/odoo,windedge/odoo,JonathanStein/odoo,ihsanudin/odoo,rahuldhote/odoo,ApuliaSoftware/odoo,OpenUpgrade-dev/OpenUpgrade,virgree/odoo,ApuliaSoftware/odoo,Maspear/odoo,leorochael/odoo,massot/odoo,incaser/odoo-odoo,joshuajan/odoo,numerigraphe/odoo,ihsanudin/odoo,ramadhane/odoo,cedk/odoo,jfpla/odoo,datenbetrieb/odoo,ingadhoc/odoo,papouso/odoo,VitalPet/odoo,RafaelTorrealba/odoo,dllsf/odootest,rdeheele/odoo,sysadminmatmoz/OCB,camptocamp/ngo-addons-backport,deKupini/erp,fdvarela/odoo8,CubicERP/odoo,QianBIG/odoo,provaleks/o8,aviciimaxwell/odoo,florentx/OpenUpgrade,vnsofthe/odoo,havt/odoo,NeovaHealth/odoo,VitalPet/odoo,ehirt/odoo,jolevq/odoopub,mustafat/odoo-1,Endika/OpenUpgrade,fdvarela/odoo8,lsinfo/odoo,Drooids/odoo,fjbatresv/odoo,dsfsdgsbngfggb/odoo,cedk/odoo,kirca/OpenUpgrade,bwrsandman/OpenUpgrade,matrixise/odoo,rgeleta/odoo,podemos-info/odoo,jiangzhixiao/odoo,jesramirez/odoo,luiseduardohdbackup/odoo,apanju/GMIO_Odoo,camptocamp/ngo-addons-backport,camptocamp/ngo-addons-backport,inspyration/odoo,bplancher/odoo,osvalr/odoo,elmerdpadilla/iv,hoatle/odoo,Danisan/odoo-1,alexcuellar/odoo,frouty/odoo_oph,janocat/odoo,gorjuce/odoo,odootr/odoo,0k/OpenUpgrade,poljeff/odoo,credativUK/OCB,apanju/GMIO_Odoo,acshan/odoo,Daniel-CA/odoo,fossoult/odoo,patmcb/odoo,bakhtout/odoo-educ,tarzan0820/odoo,dsfsdgsbngfggb/odoo,oihane/odoo,nagyistoce/odoo-dev-odoo,blaggacao/OpenUpgrade,OpenUpgrade/OpenUpgrade,sv-dev1/odoo,joshuajan/odoo,numerigraphe/odoo,rahuldhote/odoo,juanalfonsopr/odoo,xzYue/odoo,Ichag/odoo,Drooids/odoo,numerigraphe/odoo,VielSoft/odoo,wangjun/odoo,CopeX/odoo,oasiswork/odoo,Nick-OpusVL/odoo,camptocamp/ngo-addons-backport,diagramsoftware/odoo,bakhtout/odoo-educ,alqfahad/odoo,ecosoft-odoo/odoo,jeasoft/odoo,nuncjo/odoo,rahuldhote/odoo,savoirfairelinux/OpenUpgrade,dsfsdgsbngfggb/odoo,alhashash/odoo,laslabs/odoo,fuhongliang/odoo,salaria/odoo,glovebx/odoo,kittiu/odoo,rubencabrera/odoo,funkring/fdoo,jiangzhixiao/odoo,minhtuancn/odoo,nitinitprof/odoo,Elico-Corp/odoo_OCB,JonathanStein/odoo,vnsofthe/odoo,jeasoft/odoo,credativUK/OCB,florentx/OpenUpgrade,BT-rmartin/odoo,addition-it-solutions/project-all,rgeleta/odoo,dalegregory/odoo,kirca/OpenUpgrade,slevenhagen/odoo-npg,codekaki/odoo,BT-astauder/odoo,odoousers2014/odoo,Noviat/odoo,gorjuce/odoo,avoinsystems/odoo,bguillot/OpenUpgrade,agrista/odoo-saas,tinkhaven-organization/odoo,jeasoft/odoo,Noviat/odoo,Adel-Magebinary/odoo,florian-dacosta/OpenUpgrade,incaser/odoo-odoo,luiseduardohdbackup/odoo,demon-ru/iml-crm,syci/OCB,vnsofthe/odoo,odoo-turkiye/odoo,BT-rmartin/odoo,ihsanudin/odoo,collex100/odoo,apocalypsebg/odoo,codekaki/odoo,damdam-s/OpenUpgrade,ojengwa/odoo,microcom/odoo,NeovaHealth/odoo,papouso/odoo,odoo-turkiye/odoo,Maspear/odoo,Nowheresly/odoo,salaria/odoo,abstract-open-solutions/OCB,leorochael/odoo,minhtuancn/odoo,ubic135/odoo-design,mvaled/OpenUpgrade,draugiskisprendimai/odoo,factorlibre/OCB,florian-dacosta/OpenUpgrade,erkrishna9/odoo,demon-ru/iml-crm,prospwro/odoo,grap/OCB,joariasl/odoo,charbeljc/OCB,nhomar/odoo-mirror,fuselock/odoo,gsmartway/odoo,idncom/odoo,nitinitprof/odoo,salaria/odoo,vrenaville/ngo-addons-backport,aviciimaxwell/odoo,windedge/odoo,SerpentCS/odoo,BT-astauder/odoo,Antiun/odoo,gavin-feng/odoo,SAM-IT-SA/odoo,demon-ru/iml-crm,eino-makitalo/odoo,slevenhagen/odoo,inspyration/odoo,odoo-turkiye/odoo,hifly/OpenUpgrade,poljeff/odoo,luistorresm/odoo,Gitlab11/odoo,rschnapka/odoo,QianBIG/odoo,aviciimaxwell/odoo,fdvarela/odoo8,alexcuellar/odoo,makinacorpus/odoo,fuhongliang/odoo,OpusVL/odoo,abstract-open-solutions/OCB,mkieszek/odoo,nagyistoce/odoo-dev-odoo,slevenhagen/odoo-npg,ramitalat/odoo,AuyaJackie/odoo,oasiswork/odoo,odoo-turkiye/odoo,incaser/odoo-odoo,ramadhane/odoo,apocalypsebg/odoo,ramadhane/odoo,ehirt/odoo,markeTIC/OCB,hip-odoo/odoo,luistorresm/odoo,spadae22/odoo,tinkhaven-organization/odoo,QianBIG/odoo,dkubiak789/odoo,florentx/OpenUpgrade,dllsf/odootest,simongoffin/website_version,mustafat/odoo-1,CatsAndDogsbvba/odoo,idncom/odoo,VielSoft/odoo,optima-ict/odoo,funkring/fdoo,massot/odoo,FlorianLudwig/odoo,naousse/odoo,gorjuce/odoo,datenbetrieb/odoo,shingonoide/odoo,storm-computers/odoo,codekaki/odoo,JGarcia-Panach/odoo,tvibliani/odoo,nuuuboo/odoo,colinnewell/odoo,MarcosCommunity/odoo,demon-ru/iml-crm,virgree/odoo,sebalix/OpenUpgrade,mmbtba/odoo,VielSoft/odoo,lombritz/odoo,camptocamp/ngo-addons-backport,ihsanudin/odoo,VitalPet/odoo,ApuliaSoftware/odoo,pplatek/odoo,dalegregory/odoo,ApuliaSoftware/odoo,hoatle/odoo,havt/odoo,inspyration/odoo,dgzurita/odoo,pplatek/odoo,sadleader/odoo,steedos/odoo,tinkhaven-organization/odoo,lsinfo/odoo,acshan/odoo,jeasoft/odoo,rubencabrera/odoo,cloud9UG/odoo,spadae22/odoo,Nick-OpusVL/odoo,sebalix/OpenUpgrade,tinkhaven-organization/odoo,grap/OCB,shaufi/odoo,joariasl/odoo,0k/OpenUpgrade,vrenaville/ngo-addons-backport,Ichag/odoo,jaxkodex/odoo,ShineFan/odoo,ShineFan/odoo,stonegithubs/odoo,OpenUpgrade-dev/OpenUpgrade,fuselock/odoo,hanicker/odoo,cpyou/odoo,ehirt/odoo,papouso/odoo,fossoult/odoo,stonegithubs/odoo,PongPi/isl-odoo,waytai/odoo,windedge/odoo,QianBIG/odoo,shingonoide/odoo,inspyration/odoo,shaufi/odoo,highco-groupe/odoo,eino-makitalo/odoo,datenbetrieb/odoo,rgeleta/odoo,Danisan/odoo-1,incaser/odoo-odoo,colinnewell/odoo,GauravSahu/odoo,gvb/odoo,TRESCLOUD/odoopub,lightcn/odoo,rdeheele/odoo,bplancher/odoo,QianBIG/odoo,salaria/odoo,tarzan0820/odoo,laslabs/odoo,OpusVL/odoo,Kilhog/odoo,n0m4dz/odoo,gsmartway/odoo,csrocha/OpenUpgrade,simongoffin/website_version,vrenaville/ngo-addons-backport,tinkerthaler/odoo,aviciimaxwell/odoo,lsinfo/odoo,rahuldhote/odoo,steedos/odoo,bobisme/odoo,RafaelTorrealba/odoo,nhomar/odoo-mirror,thanhacun/odoo,kirca/OpenUpgrade,apanju/GMIO_Odoo,luistorresm/odoo,osvalr/odoo,jiachenning/odoo,dezynetechnologies/odoo,abenzbiria/clients_odoo,srsman/odoo,pedrobaeza/odoo,zchking/odoo,x111ong/odoo,grap/OCB,nhomar/odoo,laslabs/odoo,eino-makitalo/odoo,NeovaHealth/odoo,kybriainfotech/iSocioCRM,ramadhane/odoo,cysnake4713/odoo,hmen89/odoo,odoousers2014/odoo,doomsterinc/odoo,tvibliani/odoo,dkubiak789/odoo,bakhtout/odoo-educ,cpyou/odoo,sergio-incaser/odoo,laslabs/odoo,matrixise/odoo,ovnicraft/odoo,florian-dacosta/OpenUpgrade,dkubiak789/odoo,savoirfairelinux/OpenUpgrade,Ernesto99/odoo,ecosoft-odoo/odoo,salaria/odoo,luiseduardohdbackup/odoo,idncom/odoo,apanju/odoo,eino-makitalo/odoo,ChanduERP/odoo,hbrunn/OpenUpgrade,omprakasha/odoo,alexcuellar/odoo,brijeshkesariya/odoo,hubsaysnuaa/odoo,jaxkodex/odoo,hopeall/odoo,abstract-open-solutions/OCB,sebalix/OpenUpgrade,ccomb/OpenUpgrade,OpenUpgrade/OpenUpgrade,CatsAndDogsbvba/odoo,fuselock/odoo,JGarcia-Panach/odoo,nhomar/odoo,cysnake4713/odoo,bwrsandman/OpenUpgrade,codekaki/odoo,Eric-Zhong/odoo,deKupini/erp,fgesora/odoo,hifly/OpenUpgrade,dfang/odoo,ubic135/odoo-design,Endika/odoo,leorochael/odoo,OpusVL/odoo,diagramsoftware/odoo,frouty/odoo_oph,CubicERP/odoo,Adel-Magebinary/odoo,Kilhog/odoo,janocat/odoo,slevenhagen/odoo-npg,wangjun/odoo,Ichag/odoo,osvalr/odoo,RafaelTorrealba/odoo,takis/odoo,podemos-info/odoo,omprakasha/odoo,podemos-info/odoo,sv-dev1/odoo,patmcb/odoo,mlaitinen/odoo,fuhongliang/odoo,CatsAndDogsbvba/odoo,gsmartway/odoo,alexcuellar/odoo,lombritz/odoo,joariasl/odoo,guewen/OpenUpgrade,nuuuboo/odoo,arthru/OpenUpgrade,BT-fgarbely/odoo,Noviat/odoo,feroda/odoo,rubencabrera/odoo,srsman/odoo,storm-computers/odoo,dsfsdgsbngfggb/odoo,Antiun/odoo,steedos/odoo,funkring/fdoo,bwrsandman/OpenUpgrade,abenzbiria/clients_odoo,alqfahad/odoo,BT-fgarbely/odoo,highco-groupe/odoo,CatsAndDogsbvba/odoo,tvibliani/odoo,Bachaco-ve/odoo,brijeshkesariya/odoo,ehirt/odoo,avoinsystems/odoo,leoliujie/odoo,acshan/odoo,dgzurita/odoo,Ichag/odoo,salaria/odoo,ThinkOpen-Solutions/odoo,guerrerocarlos/odoo,vnsofthe/odoo,storm-computers/odoo,VitalPet/odoo,Daniel-CA/odoo,gorjuce/odoo,leoliujie/odoo,storm-computers/odoo,papouso/odoo,cpyou/odoo,n0m4dz/odoo,deKupini/erp,rowemoore/odoo,JonathanStein/odoo,acshan/odoo,Endika/odoo,OpenPymeMx/OCB,ovnicraft/odoo,alqfahad/odoo,fjbatresv/odoo,andreparames/odoo,Ernesto99/odoo,GauravSahu/odoo,hoatle/odoo,odoousers2014/odoo,factorlibre/OCB,ShineFan/odoo,janocat/odoo,bealdav/OpenUpgrade,oihane/odoo,zchking/odoo,tinkhaven-organization/odoo,lombritz/odoo,0k/OpenUpgrade,demon-ru/iml-crm,PongPi/isl-odoo,SAM-IT-SA/odoo,vrenaville/ngo-addons-backport,shivam1111/odoo,windedge/odoo,windedge/odoo,codekaki/odoo,syci/OCB,shaufi10/odoo,AuyaJackie/odoo,shaufi10/odoo,Kilhog/odoo,GauravSahu/odoo,bealdav/OpenUpgrade,spadae22/odoo,JCA-Developpement/Odoo,apanju/odoo,jaxkodex/odoo,chiragjogi/odoo,dariemp/odoo,AuyaJackie/odoo,jiangzhixiao/odoo,Elico-Corp/odoo_OCB,lightcn/odoo,hubsaysnuaa/odoo,odooindia/odoo,apocalypsebg/odoo,provaleks/o8,ChanduERP/odoo,javierTerry/odoo,OpenPymeMx/OCB,oihane/odoo,rubencabrera/odoo,bguillot/OpenUpgrade,apanju/GMIO_Odoo,grap/OpenUpgrade,nhomar/odoo-mirror,kybriainfotech/iSocioCRM,Eric-Zhong/odoo,christophlsa/odoo,jiangzhixiao/odoo,odootr/odoo,guerrerocarlos/odoo,lombritz/odoo,alexteodor/odoo,avoinsystems/odoo,n0m4dz/odoo,charbeljc/OCB,bealdav/OpenUpgrade,leorochael/odoo,vnsofthe/odoo,csrocha/OpenUpgrade,CopeX/odoo,CubicERP/odoo,ChanduERP/odoo,abdellatifkarroum/odoo,stonegithubs/odoo,sysadminmatmoz/OCB,sebalix/OpenUpgrade,gavin-feng/odoo,Maspear/odoo,nitinitprof/odoo,thanhacun/odoo,sv-dev1/odoo,laslabs/odoo,charbeljc/OCB,grap/OpenUpgrade,florentx/OpenUpgrade,lgscofield/odoo,stephen144/odoo,lgscofield/odoo,mszewczy/odoo,stephen144/odoo,ThinkOpen-Solutions/odoo,bkirui/odoo,gdgellatly/OCB1,lsinfo/odoo,podemos-info/odoo,TRESCLOUD/odoopub,lgscofield/odoo,Nick-OpusVL/odoo,hopeall/odoo,hubsaysnuaa/odoo,klunwebale/odoo,havt/odoo,frouty/odoogoeen,Antiun/odoo,shingonoide/odoo,leorochael/odoo,BT-fgarbely/odoo,gsmartway/odoo,MarcosCommunity/odoo,virgree/odoo,shivam1111/odoo,pedrobaeza/OpenUpgrade,kifcaliph/odoo,storm-computers/odoo,ShineFan/odoo,bwrsandman/OpenUpgrade,leoliujie/odoo,Endika/odoo,factorlibre/OCB,slevenhagen/odoo-npg,hassoon3/odoo,gsmartway/odoo,elmerdpadilla/iv,cpyou/odoo,vrenaville/ngo-addons-backport,factorlibre/OCB,stephen144/odoo,nitinitprof/odoo,srimai/odoo,Maspear/odoo,grap/OpenUpgrade,bobisme/odoo,mmbtba/odoo,rdeheele/odoo,mszewczy/odoo,ubic135/odoo-design,Maspear/odoo,abenzbiria/clients_odoo,oasiswork/odoo,charbeljc/OCB,odootr/odoo,dariemp/odoo,funkring/fdoo,dsfsdgsbngfggb/odoo,Daniel-CA/odoo,idncom/odoo,draugiskisprendimai/odoo,oliverhr/odoo,fossoult/odoo,odooindia/odoo,Adel-Magebinary/odoo,NeovaHealth/odoo,frouty/odoo_oph,makinacorpus/odoo,bwrsandman/OpenUpgrade,Grirrane/odoo,oihane/odoo,nexiles/odoo,hanicker/odoo,sinbazhou/odoo,draugiskisprendimai/odoo,OSSESAC/odoopubarquiluz,thanhacun/odoo,odootr/odoo,frouty/odoogoeen,cdrooom/odoo,Daniel-CA/odoo,Endika/odoo,mustafat/odoo-1,glovebx/odoo,blaggacao/OpenUpgrade,fgesora/odoo,gvb/odoo,bguillot/OpenUpgrade,bakhtout/odoo-educ,odoo-turkiye/odoo,Nowheresly/odoo,joariasl/odoo,cloud9UG/odoo,bkirui/odoo,slevenhagen/odoo-npg,jusdng/odoo,nexiles/odoo,dgzurita/odoo,csrocha/OpenUpgrade,Endika/odoo,stephen144/odoo,Codefans-fan/odoo,ujjwalwahi/odoo,simongoffin/website_version,cloud9UG/odoo,nuncjo/odoo,Antiun/odoo,fevxie/odoo,MarcosCommunity/odoo,srsman/odoo,virgree/odoo,Kilhog/odoo,janocat/odoo,KontorConsulting/odoo,provaleks/o8,CopeX/odoo,csrocha/OpenUpgrade,ygol/odoo,JonathanStein/odoo,SerpentCS/odoo,damdam-s/OpenUpgrade,lgscofield/odoo,cedk/odoo,oliverhr/odoo,odoousers2014/odoo,SerpentCS/odoo,BT-ojossen/odoo,bplancher/odoo,MarcosCommunity/odoo,jaxkodex/odoo,havt/odoo,deKupini/erp,RafaelTorrealba/odoo,Daniel-CA/odoo,Adel-Magebinary/odoo,guerrerocarlos/odoo,lgscofield/odoo,nuuuboo/odoo,gdgellatly/OCB1,VielSoft/odoo,SAM-IT-SA/odoo,Endika/OpenUpgrade,ehirt/odoo,abdellatifkarroum/odoo,kybriainfotech/iSocioCRM,BT-fgarbely/odoo,sve-odoo/odoo,oasiswork/odoo,pedrobaeza/odoo,steedos/odoo,abdellatifkarroum/odoo,CubicERP/odoo,rahuldhote/odoo,xujb/odoo,omprakasha/odoo,lightcn/odoo,srimai/odoo,bwrsandman/OpenUpgrade,tinkhaven-organization/odoo,apocalypsebg/odoo,sebalix/OpenUpgrade,odoo-turkiye/odoo,Maspear/odoo,odooindia/odoo,poljeff/odoo,brijeshkesariya/odoo,SAM-IT-SA/odoo,FlorianLudwig/odoo,diagramsoftware/odoo,NeovaHealth/odoo,mustafat/odoo-1,MarcosCommunity/odoo,bobisme/odoo,BT-ojossen/odoo,ClearCorp-dev/odoo,damdam-s/OpenUpgrade,leorochael/odoo,luistorresm/odoo,alexteodor/odoo,cpyou/odoo,fossoult/odoo,OpenPymeMx/OCB,MarcosCommunity/odoo,ramadhane/odoo,andreparames/odoo,bkirui/odoo,tvtsoft/odoo8,oliverhr/odoo,dezynetechnologies/odoo,ojengwa/odoo,tinkerthaler/odoo,csrocha/OpenUpgrade,fgesora/odoo,mkieszek/odoo,savoirfairelinux/odoo,mustafat/odoo-1,alexcuellar/odoo,omprakasha/odoo,addition-it-solutions/project-all,doomsterinc/odoo,fjbatresv/odoo,MarcosCommunity/odoo,microcom/odoo,guewen/OpenUpgrade,JonathanStein/odoo,Nick-OpusVL/odoo,ChanduERP/odoo,Codefans-fan/odoo,xujb/odoo,NL66278/OCB,hopeall/odoo,zchking/odoo,nagyistoce/odoo-dev-odoo,patmcb/odoo,chiragjogi/odoo,sergio-incaser/odoo,andreparames/odoo,stephen144/odoo,pedrobaeza/odoo,dfang/odoo,brijeshkesariya/odoo,pedrobaeza/OpenUpgrade,factorlibre/OCB,waytai/odoo,Nowheresly/odoo,ygol/odoo,luistorresm/odoo,fossoult/odoo,jaxkodex/odoo,nuncjo/odoo,blaggacao/OpenUpgrade,gdgellatly/OCB1,dgzurita/odoo,savoirfairelinux/OpenUpgrade,salaria/odoo,spadae22/odoo,Eric-Zhong/odoo,microcom/odoo,Codefans-fan/odoo,ramitalat/odoo,markeTIC/OCB,storm-computers/odoo,rahuldhote/odoo,bkirui/odoo,Noviat/odoo,slevenhagen/odoo-npg,collex100/odoo,frouty/odoo_oph,nexiles/odoo,syci/OCB,bplancher/odoo,BT-fgarbely/odoo,realsaiko/odoo,Danisan/odoo-1,minhtuancn/odoo,TRESCLOUD/odoopub,fuhongliang/odoo,joshuajan/odoo,jusdng/odoo,rdeheele/odoo,BT-astauder/odoo,fjbatresv/odoo,Daniel-CA/odoo,provaleks/o8,fgesora/odoo,fdvarela/odoo8,aviciimaxwell/odoo,collex100/odoo,ujjwalwahi/odoo,rdeheele/odoo,bkirui/odoo,mszewczy/odoo,prospwro/odoo,Ichag/odoo,tvtsoft/odoo8,mkieszek/odoo,goliveirab/odoo,chiragjogi/odoo,avoinsystems/odoo,ujjwalwahi/odoo,arthru/OpenUpgrade,Elico-Corp/odoo_OCB,VielSoft/odoo,rowemoore/odoo,sve-odoo/odoo,pedrobaeza/OpenUpgrade,tinkerthaler/odoo,Kilhog/odoo,Gitlab11/odoo,slevenhagen/odoo,janocat/odoo,florentx/OpenUpgrade,rowemoore/odoo,kybriainfotech/iSocioCRM,Drooids/odoo,apanju/odoo,jiachenning/odoo,fevxie/odoo,doomsterinc/odoo,dezynetechnologies/odoo,chiragjogi/odoo,nitinitprof/odoo,tvtsoft/odoo8,ingadhoc/odoo,gdgellatly/OCB1,chiragjogi/odoo,hbrunn/OpenUpgrade,poljeff/odoo,csrocha/OpenUpgrade,doomsterinc/odoo,ccomb/OpenUpgrade,hopeall/odoo,markeTIC/OCB,ojengwa/odoo,shaufi10/odoo,hoatle/odoo,glovebx/odoo,fuselock/odoo,blaggacao/OpenUpgrade,naousse/odoo,Gitlab11/odoo,goliveirab/odoo,OpenPymeMx/OCB,xujb/odoo,tinkhaven-organization/odoo,synconics/odoo,ChanduERP/odoo,dariemp/odoo,kifcaliph/odoo,joariasl/odoo,lightcn/odoo,guewen/OpenUpgrade,shivam1111/odoo,OpenUpgrade/OpenUpgrade,tangyiyong/odoo,chiragjogi/odoo,fevxie/odoo,CopeX/odoo,gvb/odoo,Grirrane/odoo,cloud9UG/odoo,joariasl/odoo,odootr/odoo,sysadminmatmoz/OCB,florian-dacosta/OpenUpgrade,odoousers2014/odoo,frouty/odoogoeen,brijeshkesariya/odoo,slevenhagen/odoo,shaufi/odoo,addition-it-solutions/project-all,JGarcia-Panach/odoo,jaxkodex/odoo,rgeleta/odoo,tangyiyong/odoo,OpenUpgrade-dev/OpenUpgrade,cdrooom/odoo,hifly/OpenUpgrade,ecosoft-odoo/odoo,dalegregory/odoo,frouty/odoogoeen,Daniel-CA/odoo,alhashash/odoo,collex100/odoo,dezynetechnologies/odoo,luistorresm/odoo,prospwro/odoo,fdvarela/odoo8,ingadhoc/odoo,nhomar/odoo,tinkerthaler/odoo,hip-odoo/odoo,dkubiak789/odoo,klunwebale/odoo,ingadhoc/odoo,gorjuce/odoo,dalegregory/odoo,fuhongliang/odoo,incaser/odoo-odoo,zchking/odoo,Antiun/odoo,matrixise/odoo,rgeleta/odoo,minhtuancn/odoo,kittiu/odoo,charbeljc/OCB,stephen144/odoo,Codefans-fan/odoo,optima-ict/odoo,pedrobaeza/OpenUpgrade,arthru/OpenUpgrade,vnsofthe/odoo,oliverhr/odoo,omprakasha/odoo,hubsaysnuaa/odoo,prospwro/odoo,mmbtba/odoo,jusdng/odoo,rowemoore/odoo,fgesora/odoo,bobisme/odoo,idncom/odoo,mmbtba/odoo,steedos/odoo,feroda/odoo,shaufi10/odoo,nhomar/odoo,RafaelTorrealba/odoo,rgeleta/odoo,ecosoft-odoo/odoo,simongoffin/website_version,eino-makitalo/odoo,klunwebale/odoo,FlorianLudwig/odoo,jpshort/odoo,ovnicraft/odoo,stonegithubs/odoo,gavin-feng/odoo,n0m4dz/odoo,mvaled/OpenUpgrade,odoousers2014/odoo,Nick-OpusVL/odoo,avoinsystems/odoo,patmcb/odoo,SAM-IT-SA/odoo,gorjuce/odoo,srsman/odoo,mlaitinen/odoo,nitinitprof/odoo,jiachenning/odoo,Codefans-fan/odoo,provaleks/o8,fuhongliang/odoo,takis/odoo,savoirfairelinux/odoo,xzYue/odoo,shaufi/odoo,collex100/odoo,rschnapka/odoo,Elico-Corp/odoo_OCB,javierTerry/odoo,SerpentCS/odoo,stonegithubs/odoo,NeovaHealth/odoo,oasiswork/odoo,tangyiyong/odoo,tvibliani/odoo,datenbetrieb/odoo,grap/OCB,ThinkOpen-Solutions/odoo,erkrishna9/odoo,ubic135/odoo-design,waytai/odoo,janocat/odoo,ChanduERP/odoo,jusdng/odoo,dalegregory/odoo,frouty/odoogoeen,ShineFan/odoo,dgzurita/odoo,gvb/odoo,apanju/odoo,GauravSahu/odoo,mlaitinen/odoo,luistorresm/odoo,synconics/odoo,GauravSahu/odoo,nuuuboo/odoo,rschnapka/odoo,sysadminmatmoz/OCB,Grirrane/odoo,Ernesto99/odoo,hifly/OpenUpgrade,jpshort/odoo,leoliujie/odoo,JCA-Developpement/Odoo,VitalPet/odoo,BT-rmartin/odoo,JGarcia-Panach/odoo,PongPi/isl-odoo,tarzan0820/odoo,pplatek/odoo,joshuajan/odoo,savoirfairelinux/odoo,srimai/odoo,0k/odoo,gdgellatly/OCB1,bkirui/odoo,gsmartway/odoo,poljeff/odoo,OSSESAC/odoopubarquiluz,shingonoide/odoo,lombritz/odoo,Endika/OpenUpgrade,osvalr/odoo,jfpla/odoo,sv-dev1/odoo,xujb/odoo,hubsaysnuaa/odoo,janocat/odoo,prospwro/odoo,codekaki/odoo,rubencabrera/odoo,xujb/odoo,dezynetechnologies/odoo,bakhtout/odoo-educ,Drooids/odoo,ojengwa/odoo,dariemp/odoo,pedrobaeza/odoo,christophlsa/odoo,takis/odoo,BT-ojossen/odoo,rschnapka/odoo,grap/OpenUpgrade,collex100/odoo,shivam1111/odoo,glovebx/odoo,klunwebale/odoo,colinnewell/odoo,n0m4dz/odoo,fuhongliang/odoo,dariemp/odoo,jpshort/odoo,sergio-incaser/odoo,abdellatifkarroum/odoo,matrixise/odoo,jusdng/odoo,apanju/odoo,xujb/odoo,dsfsdgsbngfggb/odoo,0k/OpenUpgrade,CopeX/odoo,OpenUpgrade-dev/OpenUpgrade,zchking/odoo,hbrunn/OpenUpgrade,hmen89/odoo,brijeshkesariya/odoo,mszewczy/odoo,Nowheresly/odoo,alexteodor/odoo,realsaiko/odoo,jeasoft/odoo,Noviat/odoo,odoo-turkiye/odoo,christophlsa/odoo,massot/odoo,spadae22/odoo,OpenUpgrade/OpenUpgrade,damdam-s/OpenUpgrade,bealdav/OpenUpgrade,slevenhagen/odoo-npg,PongPi/isl-odoo,glovebx/odoo,sadleader/odoo,lsinfo/odoo,Nick-OpusVL/odoo,BT-rmartin/odoo,bguillot/OpenUpgrade,OpenPymeMx/OCB,sinbazhou/odoo,numerigraphe/odoo,leoliujie/odoo,0k/OpenUpgrade,laslabs/odoo,cysnake4713/odoo,tangyiyong/odoo,hmen89/odoo,nagyistoce/odoo-dev-odoo,Bachaco-ve/odoo,optima-ict/odoo,odootr/odoo,KontorConsulting/odoo,CatsAndDogsbvba/odoo,tinkerthaler/odoo,BT-fgarbely/odoo,feroda/odoo,vrenaville/ngo-addons-backport,ecosoft-odoo/odoo,markeTIC/OCB,oihane/odoo,Bachaco-ve/odoo,Eric-Zhong/odoo,nagyistoce/odoo-dev-odoo,cedk/odoo,mmbtba/odoo,thanhacun/odoo,virgree/odoo,mszewczy/odoo,Danisan/odoo-1,lgscofield/odoo,alqfahad/odoo,KontorConsulting/odoo,waytai/odoo,doomsterinc/odoo,Ernesto99/odoo,draugiskisprendimai/odoo,ujjwalwahi/odoo,goliveirab/odoo,wangjun/odoo,klunwebale/odoo,ingadhoc/odoo,feroda/odoo,chiragjogi/odoo,kybriainfotech/iSocioCRM,credativUK/OCB,nexiles/odoo,kittiu/odoo,guerrerocarlos/odoo,nuuuboo/odoo,kittiu/odoo,JCA-Developpement/Odoo,pedrobaeza/OpenUpgrade,hassoon3/odoo,vrenaville/ngo-addons-backport,Adel-Magebinary/odoo,hbrunn/OpenUpgrade,andreparames/odoo,jfpla/odoo,zchking/odoo,sadleader/odoo,hopeall/odoo,AuyaJackie/odoo,oihane/odoo,0k/odoo,Codefans-fan/odoo,OpenUpgrade/OpenUpgrade,KontorConsulting/odoo,SAM-IT-SA/odoo,mlaitinen/odoo,makinacorpus/odoo,fevxie/odoo,bobisme/odoo,jiachenning/odoo,Gitlab11/odoo,OSSESAC/odoopubarquiluz,sve-odoo/odoo,nuncjo/odoo,sadleader/odoo,ramitalat/odoo,fuselock/odoo,fuselock/odoo,highco-groupe/odoo,ccomb/OpenUpgrade,pplatek/odoo,shivam1111/odoo,nexiles/odoo,hip-odoo/odoo,xujb/odoo,hanicker/odoo,tarzan0820/odoo,blaggacao/OpenUpgrade,synconics/odoo,gavin-feng/odoo,Noviat/odoo,odooindia/odoo,eino-makitalo/odoo,savoirfairelinux/OpenUpgrade,abenzbiria/clients_odoo,realsaiko/odoo,jfpla/odoo,podemos-info/odoo,nhomar/odoo,SAM-IT-SA/odoo,dgzurita/odoo,juanalfonsopr/odoo,ClearCorp-dev/odoo,Bachaco-ve/odoo,shaufi10/odoo,ramitalat/odoo,tvibliani/odoo,windedge/odoo,makinacorpus/odoo,frouty/odoogoeen,rubencabrera/odoo,blaggacao/OpenUpgrade,incaser/odoo-odoo,OSSESAC/odoopubarquiluz,Nowheresly/odoo,ehirt/odoo,ygol/odoo,waytai/odoo,massot/odoo,JGarcia-Panach/odoo,Codefans-fan/odoo,savoirfairelinux/odoo,christophlsa/odoo,abdellatifkarroum/odoo,odooindia/odoo,draugiskisprendimai/odoo,gvb/odoo,naousse/odoo,vrenaville/ngo-addons-backport,addition-it-solutions/project-all,diagramsoftware/odoo,colinnewell/odoo,oliverhr/odoo,fjbatresv/odoo,Eric-Zhong/odoo,jeasoft/odoo,cloud9UG/odoo,jfpla/odoo,hassoon3/odoo,bealdav/OpenUpgrade,collex100/odoo,Nowheresly/odoo,acshan/odoo,OpenUpgrade/OpenUpgrade,alexcuellar/odoo,apanju/GMIO_Odoo,stonegithubs/odoo,prospwro/odoo,oasiswork/odoo,FlorianLudwig/odoo,Noviat/odoo,JGarcia-Panach/odoo,sinbazhou/odoo,javierTerry/odoo,OpenUpgrade-dev/OpenUpgrade,mmbtba/odoo,klunwebale/odoo,diagramsoftware/odoo,hmen89/odoo,pedrobaeza/OpenUpgrade,elmerdpadilla/iv,jesramirez/odoo,microcom/odoo,massot/odoo,CubicERP/odoo,sv-dev1/odoo,rschnapka/odoo,frouty/odoogoeen,rowemoore/odoo,credativUK/OCB,Drooids/odoo,jusdng/odoo,mlaitinen/odoo,diagramsoftware/odoo,Nowheresly/odoo,nagyistoce/odoo-dev-odoo,kittiu/odoo,0k/OpenUpgrade,VitalPet/odoo,realsaiko/odoo,vnsofthe/odoo,Adel-Magebinary/odoo,x111ong/odoo,javierTerry/odoo,jiangzhixiao/odoo,lightcn/odoo,damdam-s/OpenUpgrade,idncom/odoo,Eric-Zhong/odoo,alhashash/odoo,steedos/odoo,kirca/OpenUpgrade,andreparames/odoo,pedrobaeza/odoo,PongPi/isl-odoo,datenbetrieb/odoo,hifly/OpenUpgrade,elmerdpadilla/iv,kifcaliph/odoo,apanju/GMIO_Odoo,fevxie/odoo,dgzurita/odoo,kirca/OpenUpgrade,tvtsoft/odoo8,nexiles/odoo,NL66278/OCB,tangyiyong/odoo,hubsaysnuaa/odoo,dalegregory/odoo,syci/OCB,markeTIC/OCB,hanicker/odoo,havt/odoo,x111ong/odoo,leorochael/odoo,lightcn/odoo,guewen/OpenUpgrade,ThinkOpen-Solutions/odoo,synconics/odoo,RafaelTorrealba/odoo,alqfahad/odoo,hip-odoo/odoo,BT-rmartin/odoo,colinnewell/odoo,ecosoft-odoo/odoo,rubencabrera/odoo,andreparames/odoo,Eric-Zhong/odoo,Gitlab11/odoo,slevenhagen/odoo,deKupini/erp,AuyaJackie/odoo,jfpla/odoo,avoinsystems/odoo,BT-astauder/odoo,glovebx/odoo,jpshort/odoo,incaser/odoo-odoo,BT-rmartin/odoo,podemos-info/odoo,colinnewell/odoo,dfang/odoo,hassoon3/odoo,factorlibre/OCB,CopeX/odoo,podemos-info/odoo,Bachaco-ve/odoo,srsman/odoo,nuuuboo/odoo,Drooids/odoo,sysadminmatmoz/OCB,agrista/odoo-saas,takis/odoo,funkring/fdoo,virgree/odoo,charbeljc/OCB,guerrerocarlos/odoo,Ernesto99/odoo,ihsanudin/odoo,guerrerocarlos/odoo,x111ong/odoo,abstract-open-solutions/OCB,christophlsa/odoo,naousse/odoo,Bachaco-ve/odoo,hanicker/odoo,Antiun/odoo,sve-odoo/odoo,bplancher/odoo,ShineFan/odoo,savoirfairelinux/odoo,lightcn/odoo,papouso/odoo,bealdav/OpenUpgrade,naousse/odoo,shingonoide/odoo,minhtuancn/odoo,KontorConsulting/odoo,thanhacun/odoo,n0m4dz/odoo,mustafat/odoo-1,steedos/odoo,hassoon3/odoo,shaufi/odoo,alexcuellar/odoo,grap/OCB,ubic135/odoo-design,OpenPymeMx/OCB,QianBIG/odoo,nitinitprof/odoo,oliverhr/odoo,grap/OCB,Endika/OpenUpgrade,JCA-Developpement/Odoo,GauravSahu/odoo,arthru/OpenUpgrade,andreparames/odoo,windedge/odoo,bwrsandman/OpenUpgrade,SerpentCS/odoo,CubicERP/odoo,Grirrane/odoo,AuyaJackie/odoo,cysnake4713/odoo,bakhtout/odoo-educ,joshuajan/odoo,gvb/odoo,FlorianLudwig/odoo,hbrunn/OpenUpgrade,frouty/odoo_oph,VielSoft/odoo,makinacorpus/odoo,apocalypsebg/odoo,optima-ict/odoo,erkrishna9/odoo,0k/odoo,sadleader/odoo,luiseduardohdbackup/odoo,lgscofield/odoo,alhashash/odoo,ovnicraft/odoo,ChanduERP/odoo,blaggacao/OpenUpgrade,gorjuce/odoo,NL66278/OCB,cysnake4713/odoo,guewen/OpenUpgrade,camptocamp/ngo-addons-backport,nexiles/odoo,provaleks/o8,leoliujie/odoo,florentx/OpenUpgrade,JCA-Developpement/Odoo,sinbazhou/odoo,bobisme/odoo,tarzan0820/odoo,kittiu/odoo,gvb/odoo,luiseduardohdbackup/odoo,RafaelTorrealba/odoo,abdellatifkarroum/odoo,stonegithubs/odoo,grap/OCB,addition-it-solutions/project-all,mmbtba/odoo,doomsterinc/odoo,makinacorpus/odoo,dllsf/odootest,goliveirab/odoo,ygol/odoo,rschnapka/odoo,gdgellatly/OCB1,havt/odoo,javierTerry/odoo,credativUK/OCB,0k/odoo,abstract-open-solutions/OCB,nhomar/odoo-mirror,ramadhane/odoo,xzYue/odoo,savoirfairelinux/OpenUpgrade,acshan/odoo,patmcb/odoo,CubicERP/odoo,kifcaliph/odoo,VitalPet/odoo,credativUK/OCB,ihsanudin/odoo,ovnicraft/odoo,ThinkOpen-Solutions/odoo,ccomb/OpenUpgrade,waytai/odoo,Endika/OpenUpgrade,numerigraphe/odoo,Danisan/odoo-1,cdrooom/odoo,provaleks/o8,ccomb/OpenUpgrade,grap/OpenUpgrade,sinbazhou/odoo,dariemp/odoo,camptocamp/ngo-addons-backport,fjbatresv/odoo,Kilhog/odoo,gdgellatly/OCB1,bguillot/OpenUpgrade,bobisme/odoo,mkieszek/odoo,lombritz/odoo,javierTerry/odoo,draugiskisprendimai/odoo,Elico-Corp/odoo_OCB,BT-astauder/odoo,FlorianLudwig/odoo,dezynetechnologies/odoo,arthru/OpenUpgrade,damdam-s/OpenUpgrade,nhomar/odoo,ojengwa/odoo,camptocamp/ngo-addons-backport,srsman/odoo,hip-odoo/odoo,brijeshkesariya/odoo,oasiswork/odoo,pedrobaeza/OpenUpgrade,TRESCLOUD/odoopub,eino-makitalo/odoo,VielSoft/odoo,NeovaHealth/odoo,jpshort/odoo,goliveirab/odoo,draugiskisprendimai/odoo,highco-groupe/odoo,hanicker/odoo,Gitlab11/odoo,numerigraphe/odoo,JonathanStein/odoo,BT-rmartin/odoo,slevenhagen/odoo,dfang/odoo,ygol/odoo,florian-dacosta/OpenUpgrade,jiachenning/odoo,credativUK/OCB,jpshort/odoo,odootr/odoo,srimai/odoo,hopeall/odoo,pplatek/odoo,grap/OCB,sve-odoo/odoo,syci/OCB,charbeljc/OCB,dezynetechnologies/odoo,mkieszek/odoo,Ernesto99/odoo,ujjwalwahi/odoo,factorlibre/OCB,arthru/OpenUpgrade,hubsaysnuaa/odoo,numerigraphe/odoo,dfang/odoo,fevxie/odoo,synconics/odoo,BT-ojossen/odoo,ramitalat/odoo,cedk/odoo,bkirui/odoo,csrocha/OpenUpgrade,dllsf/odootest,tarzan0820/odoo,dariemp/odoo,mvaled/OpenUpgrade,jiachenning/odoo,juanalfonsopr/odoo,shingonoide/odoo,ovnicraft/odoo,mvaled/OpenUpgrade,wangjun/odoo,rahuldhote/odoo,idncom/odoo,jesramirez/odoo,sebalix/OpenUpgrade,dkubiak789/odoo,0k/odoo,doomsterinc/odoo,sergio-incaser/odoo,hifly/OpenUpgrade,zchking/odoo,pedrobaeza/odoo,synconics/odoo,OpusVL/odoo,dkubiak789/odoo,osvalr/odoo,ygol/odoo,markeTIC/OCB,Ernesto99/odoo,hassoon3/odoo,kifcaliph/odoo,jfpla/odoo,SerpentCS/odoo | addons/base_report_designer/__terp__.py | addons/base_report_designer/__terp__.py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2008 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Report designer interface module",
"description" : """
This module adds wizards to import/export documents to be edited in
OpenOffice.
""",
"version" : "0.1",
"depends" : ["base"],
"author" : "Tiny",
"website" : "http://tinyerp.com",
"category" : "Generic Modules/Base",
"init_xml" : [ ],
"demo_xml" : ["base_report_data.xml"],
"update_xml" : [ "base_report_designer_wizard.xml" ],
"active": False,
"installable": True
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2008 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "Report designer interface module",
"description" : """
This module adds wizards to import/export documents to be edited in
OpenOffice.
""",
"version" : "0.1",
"depends" : ["base"],
"author" : "Tiny",
"website" : "http://tinyerp.com",
"category" : "Generic Modules/Base",
"init_xml" : [ ],
"demo_xml" : ["base_report_user_data.xml"],
"update_xml" : [ "base_report_designer_wizard.xml" ],
"active": False,
"installable": True
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | Python |
4aa349715c3cca9ef7ed8463a484dc130d367c8e | Update test.py | inkenbrandt/WellApplication | test/test.py | test/test.py | # -*- coding: utf-8 -*-
"""
Created on Sat Jan 23 13:03:00 2016
@author: p
"""
import wellapplication as wa
def test_getelev(self):
x = [-111.21,41.4]
g = wa.getelev(x)
assert g > 100.0
| # -*- coding: utf-8 -*-
"""
Created on Sat Jan 23 13:03:00 2016
@author: p
"""
import unittest
import wellapplication as wa
class TestMethods(unittest.TestCase):
def setUp(self):
pass
def test_getelev(self):
x = [-111.21,41.4]
g = wa.getelev(x)
self.assertTrue(g > 100.0)
if __name__ == '__main__':
unittest.main()
| mit | Python |
dd84635c1c990c23cb0c2d8f48c1bc226687f0c3 | Remove item | SG345/autopep8,vauxoo-dev/autopep8,Vauxoo/autopep8,Vauxoo/autopep8,vauxoo-dev/autopep8,hhatto/autopep8,MeteorAdminz/autopep8,SG345/autopep8,MeteorAdminz/autopep8,hhatto/autopep8 | test/todo.py | test/todo.py | """Incomplete fixes."""
| """Incomplete fixes."""
# E501: This should be wrapped similar to how pprint does it
{'2323k2323': 24232323, '2323323232323': 3434343434343434, '34434343434535535': 3434343434343434, '4334343434343': 3434343434}
# See below
{'2323323232323': 3434343434343434,
'2323k2323': 24232323,
'34434343434535535': 3434343434343434,
'4334343434343': 3434343434}
| mit | Python |
1fd96248bbfd74a1938520b37cf136762559a23b | Update pylsy_test.py | bcho/Pylsy,bcho/Pylsy,gnithin/Pylsy,huiyi1990/Pylsy,muteness/Pylsy,gnithin/Pylsy,muteness/Pylsy,huiyi1990/Pylsy | pylsy/tests/pylsy_test.py | pylsy/tests/pylsy_test.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import unittest
import sys
sys.path.append('.')
from pylsy import pylsytable
class PylsyTableTests(unittest.TestCase):
def setUp(self):
attributes = ["name", "age"]
self.table = pylsytable(attributes)
def tearDown(self):
self.table = None
def testCreateTable(self):
name = ["a", "b"]
self.table.add_data("name", name)
age = [1, 2]
self.table.add_data("age", age)
correct_file = open('correct.out', 'r')
correctPrint = correct_file.read()
try:
# import io
# from contextlib import redirect_stdout
# with io.StringIO() as buf, redirect_stdout(buf):
# print(self.table,end='')
output = self.table.__str__()
self.assertEqual(output, correctPrint)
except ImportError:
import sys
f_handler = open('test.out', 'w')
sys.stdout = f_handler
self.table.create_table()
f_handler.close()
f_handler = open('test.out', 'r')
self.assertEqual(f_handler.read(), correctPrint)
if __name__ == '__main__':
unittest.main()
| # -*- coding: utf-8 -*-
from __future__ import absolute_import
import unittest
import sys
sys.path.append('..')
from pylsy import pylsytable
class PylsyTableTests(unittest.TestCase):
def setUp(self):
attributes = ["name", "age"]
self.table = pylsytable(attributes)
def tearDown(self):
self.table = None
def testCreateTable(self):
name = ["a", "b"]
self.table.add_data("name", name)
age = [1, 2]
self.table.add_data("age", age)
correct_file = open('correct.out', 'r')
correctPrint = correct_file.read()
try:
# import io
# from contextlib import redirect_stdout
# with io.StringIO() as buf, redirect_stdout(buf):
# print(self.table,end='')
output = self.table.__str__()
self.assertEqual(output, correctPrint)
except ImportError:
import sys
f_handler = open('test.out', 'w')
sys.stdout = f_handler
self.table.create_table()
f_handler.close()
f_handler = open('test.out', 'r')
self.assertEqual(f_handler.read(), correctPrint)
if __name__ == '__main__':
unittest.main()
| mit | Python |
8eb5da4a5d2757ade473894d624303692a80bc67 | check Python version | cyberbeast/pympler,aigeano/pympler,tharunkalwa/pympler | pympler/sizer/__init__.py | pympler/sizer/__init__.py |
# check supported Python version
import sys
if getattr(sys, 'hexversion', 0) < 0x2020000:
raise NotImplementedError('sizer requires Python 2.2 or newer')
from asizeof import *
| from asizeof import *
| apache-2.0 | Python |
8c3fc2bafd0722e23c53b4edc054703cef167b89 | Remove ipdb.set_trace() debug statement. | AchyuthIIIT/mediacloud,AchyuthIIIT/mediacloud,berkmancenter/mediacloud,AchyuthIIIT/mediacloud,AchyuthIIIT/mediacloud,berkmancenter/mediacloud,berkmancenter/mediacloud,AchyuthIIIT/mediacloud,AchyuthIIIT/mediacloud,AchyuthIIIT/mediacloud,berkmancenter/mediacloud,AchyuthIIIT/mediacloud,AchyuthIIIT/mediacloud,berkmancenter/mediacloud | python_scripts/mc_solr.py | python_scripts/mc_solr.py | import requests
import ipdb
import mc_config
import psycopg2
import psycopg2.extras
import time
def get_solr_location():
##TODO -- get this from the yaml file
return 'http://localhost:8983'
def get_solr_collection_url_prefix():
return get_solr_location() + '/solr/collection1'
def solr_request( path, params):
url = get_solr_collection_url_prefix() + '/' + path
print 'url: {}'.format( url )
params['wt'] = 'json'
r = requests.get( url, params=params, headers = { 'Accept': 'application/json'})
print 'request url '
print r.url
data = r.json()
return data
def dataimport_command( command, params={}):
params['command'] = command
return solr_request( 'dataimport', params )
def dataimport_status():
return dataimport_command( 'status' )
def dataimport_delta_import():
params = {
'commit': 'true',
'clean': 'false',
}
##Note: We're using the delta import through full import approach
return dataimport_command( 'full-import', params )
def dataimport_full_import():
params = {
'commit': 'true',
'clean': 'true',
}
##Note: We're using the delta import through full import approach
return dataimport_command( 'full-import', params )
def dataimport_reload_config():
return dataimport_command( 'reload' )
| import requests
import ipdb
import mc_config
import psycopg2
import psycopg2.extras
import time
def get_solr_location():
##TODO -- get this from the yaml file
return 'http://localhost:8983'
def get_solr_collection_url_prefix():
return get_solr_location() + '/solr/collection1'
def solr_request( path, params):
ipdb.set_trace()
url = get_solr_collection_url_prefix() + '/' + path
print 'url: {}'.format( url )
params['wt'] = 'json'
r = requests.get( url, params=params, headers = { 'Accept': 'application/json'})
print 'request url '
print r.url
data = r.json()
return data
def dataimport_command( command, params={}):
params['command'] = command
return solr_request( 'dataimport', params )
def dataimport_status():
return dataimport_command( 'status' )
def dataimport_delta_import():
params = {
'commit': 'true',
'clean': 'false',
}
##Note: We're using the delta import through full import approach
return dataimport_command( 'full-import', params )
def dataimport_full_import():
params = {
'commit': 'true',
'clean': 'true',
}
##Note: We're using the delta import through full import approach
return dataimport_command( 'full-import', params )
def dataimport_reload_config():
return dataimport_command( 'reload' )
| agpl-3.0 | Python |
006b645315190eb532ede9c36c77a7fbc4c27237 | Allow retrieval of a random quote | jessamynsmith/socialjusticebingo,jessamynsmith/underquoted,jessamynsmith/socialjusticebingo,jessamynsmith/underquoted,jessamynsmith/underquoted,jessamynsmith/socialjusticebingo,jessamynsmith/underquoted | quotations/apps/api/v1.py | quotations/apps/api/v1.py | from tastypie.authorization import DjangoAuthorization
from tastypie import fields
from tastypie.resources import ModelResource, ALL_WITH_RELATIONS
from quotations.apps.quotations import models as quotations_models
from quotations.libs.auth import MethodAuthentication
from quotations.libs.serializers import Serializer
class BaseMeta(object):
serializer = Serializer()
authentication = MethodAuthentication()
authorization = DjangoAuthorization()
class AuthorResource(ModelResource):
class Meta(BaseMeta):
queryset = quotations_models.Author.objects.all()
resource_name = 'authors'
filtering = {
'name': ['exact', 'contains']
}
class QuotationResource(ModelResource):
author = fields.ForeignKey(AuthorResource, 'author', full=True)
class Meta(BaseMeta):
queryset = quotations_models.Quotation.objects.all()
resource_name = 'quotations'
filtering = {
'text': ['contains'],
'author': ALL_WITH_RELATIONS
}
def get_object_list(self, request):
object_list = super(QuotationResource, self).get_object_list(request)
if request.GET.get('random', False):
object_list = object_list.order_by('?')
return object_list
| from tastypie.authorization import DjangoAuthorization
from tastypie import fields
from tastypie.resources import ModelResource, ALL_WITH_RELATIONS
from quotations.apps.quotations import models as quotations_models
from quotations.libs.auth import MethodAuthentication
from quotations.libs.serializers import Serializer
class BaseMeta(object):
serializer = Serializer()
authentication = MethodAuthentication()
authorization = DjangoAuthorization()
class AuthorResource(ModelResource):
class Meta(BaseMeta):
queryset = quotations_models.Author.objects.all()
resource_name = 'authors'
filtering = {
'name': ['exact', 'contains']
}
class QuotationResource(ModelResource):
author = fields.ForeignKey(AuthorResource, 'author', full=True)
class Meta(BaseMeta):
queryset = quotations_models.Quotation.objects.all()
resource_name = 'quotations'
filtering = {
'text': ['contains'],
'author': ALL_WITH_RELATIONS
}
| mit | Python |
00dbe74fdea0bdb651131b7071f1c4f235db837c | Add properties for collections | McGillX/edx_data_research,McGillX/edx_data_research,McGillX/edx_data_research | reporting_scripts/base.py | reporting_scripts/base.py | import csv
from pymongo import MongoClient
class BaseEdX(object):
def __init__(self, args):
self.url = args.url
client = MongoClient(self.url)
self.db = client[args.db_name]
self._collections = None
self.output_directory = args.output_directory
self.row_limit = args.row_limit
self.csv_data = None
self.list_of_headers = None
def generate_csv(self, csv_data, list_of_headers, output_file):
"""
Genersate csv report from generated data and given list of headers
"""
self.csv_data = csv_data
self.list_of_headers = list_of_headers
number_of_rows = len(csv_data) + 1
if number_of_rows <= self.row_limit:
self._write_to_csv(self.output_file)
else:
if number_of_rows % self.row_limit:
number_of_splits = number_of_rows // self.row_limit + 1
else:
number_of_splits = number_of_rows // self.row_limit
for index in xrange(number_of_splits):
self._write_to_csv(output_file.split('.')[0] + '_' + str(index) + '.csv', index)
def _write_to_csv(self, output_file, number_of_splits=0):
"""
Helper method to write rows to csv files
"""
with open(output_file, 'w') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(self.list_of_headers)
for row in self.csv_data[number_of_splits * self.row_limit : (number_of_splits + 1) * self.row_limit]:
# This loop looks for unicode objects and encodes them to ASCII to avoif Unicode errors,
# for e.g. UnicodeEncodeError: 'ascii' codec can't encode character u'\xf1'
for index,item in enumerate(row[:]):
if type(item) is unicode:
row[index] = item.encode('ascii', 'ignore')
writer.writerow(row)
@property
def collections(self):
return self._collections
@collections.setter
def collections(self, *collections):
self._collections = {collection : self.db[collection] for collection in *collections}
| import csv
from pymongo import MongoClient
class BaseEdX(object):
def __init__(self, args):
self.url = args.url
client = MongoClient(self.url)
self.db = client[args.db_name]
self.collections = None
self.output_directory = args.output_directory
self.row_limit = args.row_limit
self.csv_data = None
self.list_of_headers = None
def generate_csv(self, csv_data, list_of_headers, output_file):
"""
Genersate csv report from generated data and given list of headers
"""
self.csv_data = csv_data
self.list_of_headers = list_of_headers
number_of_rows = len(csv_data) + 1
if number_of_rows <= self.row_limit:
self._write_to_csv(self.output_file)
else:
if number_of_rows % self.row_limit:
number_of_splits = number_of_rows // self.row_limit + 1
else:
number_of_splits = number_of_rows // self.row_limit
for index in xrange(number_of_splits):
self._write_to_csv(output_file.split('.')[0] + '_' + str(index) + '.csv', index)
def _write_to_csv(self, output_file, number_of_splits=0):
"""
Helper method to write rows to csv files
"""
with open(output_file, 'w') as csv_file:
writer = csv.writer(csv_file)
writer.writerow(self.list_of_headers)
for row in self.csv_data[number_of_splits * self.row_limit : (number_of_splits + 1) * self.row_limit]:
# This loop looks for unicode objects and encodes them to ASCII to avoif Unicode errors,
# for e.g. UnicodeEncodeError: 'ascii' codec can't encode character u'\xf1'
for index,item in enumerate(row[:]):
if type(item) is unicode:
row[index] = item.encode('ascii', 'ignore')
writer.writerow(row)
| mit | Python |
602689fb7e5b348c83f18067aa5dbbcfd2d51262 | Correct import order | rickmak/chat,SkygearIO/chat,SkygearIO/chat | chat/pubsub.py | chat/pubsub.py | from skygear.models import Record
from skygear.options import options as skyoptions
from skygear.pubsub import Hub
from .encoding import serialize_record
from .utils import _get_channel_by_user_id
def _publish_event(user_id: str, event: str, data: dict = None) -> None:
channel_name = _get_channel_by_user_id(user_id)
if channel_name:
hub = Hub(api_key=skyoptions.apikey)
hub.publish(channel_name, {
'event': event,
'data': data
})
def _publish_record_event(user_id: str,
record_type: str,
event: str,
record: Record) -> None:
_publish_event(user_id, event, {
'type': 'record',
'record_type': record_type,
'record': serialize_record(record)
})
| from skygear.models import Record
from skygear.pubsub import Hub
from skygear.options import options as skyoptions
from .encoding import serialize_record
from .utils import _get_channel_by_user_id
def _publish_event(user_id: str, event: str, data: dict = None) -> None:
channel_name = _get_channel_by_user_id(user_id)
if channel_name:
hub = Hub(api_key=skyoptions.apikey)
hub.publish(channel_name, {
'event': event,
'data': data
})
def _publish_record_event(user_id: str,
record_type: str,
event: str,
record: Record) -> None:
_publish_event(user_id, event, {
'type': 'record',
'record_type': record_type,
'record': serialize_record(record)
})
| apache-2.0 | Python |
ac9352ca3ca7d18128fde3727738dddb25b8418f | Update urls.py | ikatson/django-chunks | chunks/urls.py | chunks/urls.py | from django.conf.urls import *
urlpatterns = patterns('chunks.views',
url(r'^(?P<slug>\w+)/edit/$', 'edit_link', name='edit_chunk'),
)
| from django.conf.urls.defaults import *
urlpatterns = patterns('chunks.views',
url(r'^(?P<slug>\w+)/edit/$', 'edit_link', name='edit_chunk'),
)
| bsd-3-clause | Python |
d7cc236de2056997660c2e01861b8d8f75e46bcc | Add documentation to the booking methods | andreagrandi/booking-example | booking/restaurants/booking.py | booking/restaurants/booking.py | from datetime import timedelta
from .models import Table, Booking
def book_restaurant_table(restaurant, booking_date_time, people, minutes_slot=90):
"""
This method uses get_first_table_available to get the first table available, then it
creates a Booking on the database.
"""
table = get_first_table_available(restaurant, booking_date_time, people, minutes_slot)
if table:
booking = Booking(table=table, people=people, booking_date_time=booking_date_time)
booking.save()
return {'booking': booking.id, 'table': table.id}
else:
return None
def get_first_table_available(restaurant, booking_date_time, people, minutes_slot=90):
"""
This method returns the first available table of a restaurant, given a specific number of
people and a booking date/time.
"""
# I make sure to check if the tables are not already booked within the time slot required
# by the new booking
delta = timedelta(seconds=60*minutes_slot)
l_bound_time = booking_date_time - delta
u_bound_time = booking_date_time + delta
# First I get a list of tables booked in that restaurant, within the given time range
tables_booked = Booking.objects.filter(table__restaurant=restaurant,
booking_date_time__gt=l_bound_time, booking_date_time__lt=u_bound_time).values('table')
tables_booked_ids = [x['table'] for x in tables_booked]
# Then I get a list of all the tables, of the needed size, available in that restaurant and
# I exclude the previous list of unavailable tables. I order the list from the smaller table
# to the bigger one and I return the first, smaller one, available.
tables = Table.objects.filter(restaurant=restaurant,
restaurant__opening_time__lte=booking_date_time.hour,
restaurant__closing_time__gte=booking_date_time.hour+(minutes_slot / float(60)),
size__gte=people).exclude(id__in=tables_booked_ids).order_by('size')
if tables.count() == 0:
return None
else:
return tables[0]
| from datetime import timedelta
from .models import Table, Booking
def book_restaurant_table(restaurant, booking_date_time, people, minutes_slot=90):
table = get_first_table_available(restaurant, booking_date_time, people, minutes_slot)
if table:
booking = Booking(table=table, people=people, booking_date_time=booking_date_time)
booking.save()
return {'booking': booking.id, 'table': table.id}
else:
return None
def get_first_table_available(restaurant, booking_date_time, people, minutes_slot=90):
delta = timedelta(seconds=60*minutes_slot)
l_bound_time = booking_date_time - delta
u_bound_time = booking_date_time + delta
tables_booked = Booking.objects.filter(table__restaurant=restaurant,
booking_date_time__gt=l_bound_time, booking_date_time__lt=u_bound_time).values('table')
tables_booked_ids = [x['table'] for x in tables_booked]
tables = Table.objects.filter(restaurant=restaurant,
restaurant__opening_time__lte=booking_date_time.hour,
restaurant__closing_time__gte=booking_date_time.hour+(minutes_slot / float(60)),
size__gte=people).exclude(id__in=tables_booked_ids).order_by('size')
if tables.count() == 0:
return None
else:
return tables[0]
| mit | Python |
98c23a94152b31b047519df4e59c313f3a5e855f | Improve watermark functionality (1) | jiss-software/jiss-rendering-service,jiss-software/jiss-rendering-service | handler/Watermark.py | handler/Watermark.py | import core
import tornado
import uuid
import time
from utils import open_remote_image, add_watermark, open_image
class WatermarkHandler(core.BaseHandler):
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
self.logger.info('Request watermark generation for remote file')
name = '/tmp/%s.png' % str(uuid.uuid4())
proportion = self.get_query_argument('proportion', default=1.5)
text = self.get_query_argument('text', default="Test")
add_watermark(open_remote_image(self.get_query_argument('url')), name, text, proportion)
self.response_file(name)
@tornado.web.asynchronous
@tornado.gen.coroutine
def post(self):
self.logger.info('Request watermark generation for request file')
proportion = self.get_query_argument('proportion', default=1.5)
text = self.get_query_argument('text', default="Test")
for item in self.request.files.values():
for file_info in item:
name = '/tmp/%s-%s.pdf' % (time.time(), file_info['filename'])
#with open(name, 'w') as f:
# f.write(file_info['body'])
add_watermark(open_image(file_info['body']), name, text, proportion)
self.response_file(name)
return
| import core
import tornado
import uuid
from utils import open_remote_image, add_watermark, open_image
class WatermarkHandler(core.BaseHandler):
@tornado.web.asynchronous
@tornado.gen.coroutine
def get(self):
self.logger.info('Request watermark generation for remote file')
name = '/tmp/%s.png' % str(uuid.uuid4())
proportion = self.get_query_argument('proportion', default=1.5)
text = self.get_query_argument('text', default="Test")
add_watermark(open_remote_image(self.get_query_argument('url')), name, text, proportion)
self.response_file(name)
@tornado.web.asynchronous
@tornado.gen.coroutine
def post(self):
self.logger.info('Request watermark generation for request file')
proportion = self.get_query_argument('proportion', default=1.5)
text = self.get_query_argument('text', default="Test")
for item in self.request.files.values():
for file_info in item:
name = '/tmp/%s-%s.pdf' % (time.time(), file_info['filename'])
#with open(name, 'w') as f:
# f.write(file_info['body'])
add_watermark(open_image(file_info['body']), name, text, proportion)
self.response_file(name)
return
| apache-2.0 | Python |
01f643bee6f71f1af1a52ecd06170b8a1f0ef578 | Increase version to 0.3dev | tekton/happybase,rickysaltzer/happybase,wfxiang08/happybase,georgesuperman/happybase,TAKEALOT/happybase | happybase/version.py | happybase/version.py | """
HappyBase version module.
This module defines the package version for use in __init__.py and
setup.py.
"""
__version__ = '0.3dev'
| """
HappyBase version module.
This module defines the package version for use in __init__.py and
setup.py.
"""
__version__ = '0.2'
| apache-2.0 | Python |
bf46a771d6ca15da75110cee6ed4f9335e1c67f8 | Add test form ResultMonitor.get_logframe | daniell/kashana,aptivate/kashana,daniell/kashana,daniell/kashana,daniell/kashana,aptivate/alfie,aptivate/kashana,aptivate/alfie,aptivate/kashana,aptivate/kashana,aptivate/alfie,aptivate/alfie | django/website/logframe/tests/test_views.py | django/website/logframe/tests/test_views.py | import pytest
import json
from unittest import TestCase
from django_dynamic_fixture import G
from ..views import ResultEditor, ResultMonitor
from ..models import (
LogFrame,
Result,
Assumption,
Rating,
)
from ..api import ResultSerializer
class ResultEditorTests(TestCase):
def setUp(self):
self.view = ResultEditor()
logframe = G(LogFrame, id=25, name="Logframe")
rating = G(Rating, log_frame=logframe)
result = G(Result, log_frame=logframe, rating=rating, ignore_fields=['parent'])
self.view.object = result
@pytest.mark.django_db
def test_data_in_context(self):
context = self.view.get_context_data()
data = self.view.get_data(self.view.object.log_frame, {})
self.assertTrue('data' in context)
data_dict = json.loads(context['data'])
for item in data:
assert item in data_dict
assert data_dict[item] == data[item]
@pytest.mark.django_db
def test__json_object_list(self):
lf = G(LogFrame)
G(Result, name="Impact", log_frame=lf, ignore_fields=['parent', 'rating'])
G(Result, name="Outcome", log_frame=lf, ignore_fields=['parent', 'rating'])
results = self.view._json_object_list(lf.results, ResultSerializer)
self.assertEqual(len(results), 2)
results_names = set([r['name'] for r in results])
self.assertEqual(set(["Impact", "Outcome"]), results_names)
@pytest.mark.django_db
def test_get_data_has_assumptions(self):
lf = self.view.object.log_frame
r1 = G(Result, name="Outcome", log_frame=lf, ignore_fields=['parent', 'rating'])
self.view.object = r1
G(Assumption, description='one', result=r1)
G(Assumption, description='two', result=r1)
r2 = G(Result, name="Output", log_frame=lf, ignore_fields=['parent', 'rating'])
G(Assumption, description='three', result=r2)
G(Assumption, description='four', result=G(Result, log_frame=G(LogFrame, name='Logframe 2'), ignore_fields=['parent', 'rating']))
full_dict = self.view.get_data(lf, {})
self.assertTrue('assumptions' in full_dict)
results = full_dict['assumptions']
self.assertEqual(3, len(results))
class ResultMonitorTests(TestCase):
@pytest.mark.django_db
def test_result_monitor_get_logframe_returns_result_logframe(self):
log_frame, _ = LogFrame.objects.get_or_create(name='Test Logframe')
result = Result(log_frame=log_frame)
result_monitor_view = ResultMonitor()
result_monitor_view.object = result
assert log_frame == result_monitor_view.get_logframe()
| import pytest
import json
from unittest import TestCase
from django_dynamic_fixture import G
from ..views import ResultEditor
from ..models import (
LogFrame,
Result,
Assumption,
Rating,
)
from ..api import ResultSerializer
class ResultEditorTests(TestCase):
def setUp(self):
self.view = ResultEditor()
logframe = G(LogFrame, id=25, name="Logframe")
rating = G(Rating, log_frame=logframe)
result = G(Result, log_frame=logframe, rating=rating, ignore_fields=['parent'])
self.view.object = result
@pytest.mark.django_db
def test_data_in_context(self):
context = self.view.get_context_data()
data = self.view.get_data(self.view.object.log_frame, {})
self.assertTrue('data' in context)
data_dict = json.loads(context['data'])
for item in data:
assert item in data_dict
assert data_dict[item] == data[item]
@pytest.mark.django_db
def test__json_object_list(self):
lf = G(LogFrame)
G(Result, name="Impact", log_frame=lf, ignore_fields=['parent', 'rating'])
G(Result, name="Outcome", log_frame=lf, ignore_fields=['parent', 'rating'])
results = self.view._json_object_list(lf.results, ResultSerializer)
self.assertEqual(len(results), 2)
results_names = set([r['name'] for r in results])
self.assertEqual(set(["Impact", "Outcome"]), results_names)
@pytest.mark.django_db
def test_get_data_has_assumptions(self):
lf = self.view.object.log_frame
r1 = G(Result, name="Outcome", log_frame=lf, ignore_fields=['parent', 'rating'])
self.view.object = r1
G(Assumption, description='one', result=r1)
G(Assumption, description='two', result=r1)
r2 = G(Result, name="Output", log_frame=lf, ignore_fields=['parent', 'rating'])
G(Assumption, description='three', result=r2)
G(Assumption, description='four', result=G(Result, log_frame=G(LogFrame, name='Logframe 2'), ignore_fields=['parent', 'rating']))
full_dict = self.view.get_data(lf, {})
self.assertTrue('assumptions' in full_dict)
results = full_dict['assumptions']
self.assertEqual(3, len(results))
| agpl-3.0 | Python |
2985e6b2dd918fa1770469a31e613ab6f43b40f9 | Stop using Python 2 | MichaelCurrin/twitterverse,MichaelCurrin/twitterverse | app/lib/db_query/schema/table_counts.py | app/lib/db_query/schema/table_counts.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Database stats report for all the tables and row counts.
Usage:
$ python -m lib.query.schema.table_counts
# => print results to console.
"""
from __future__ import absolute_import
from __future__ import print_function
from sqlobject.dberrors import OperationalError
import models
def showTableCounts():
"""
Print a table of db table names and row counts, separated by a pipe symbol.
The column widths are adjusted to accommodate the widest strings.
"""
summaryData = []
nameWidth = 1
countWidth = 1
for tableName in models.__all__:
tableClass = getattr(models, tableName)
try:
count = tableClass.select().count()
except OperationalError:
count = 'table missing!'
summaryData.append((tableName, count))
if len(tableName) > nameWidth:
nameWidth = len(tableName)
# Error text does not count towards line width.
if isinstance(count, int) and len(str(count)) > countWidth:
countWidth = len(str(count))
template = '{0:%s} | {1:>%s}' % (nameWidth, countWidth)
print("Table | Rows")
print("================|===============")
for row in summaryData:
print(template.format(*row))
print()
if __name__ == '__main__':
showTableCounts()
| #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Database stats report for all the tables and row counts.
Usage:
$ python -m lib.query.schema.table_counts
# => print results to console.
"""
from __future__ import absolute_import
from __future__ import print_function
from sqlobject.dberrors import OperationalError
import models
def showTableCounts():
"""
Print a table of db table names and row counts, separated by a pipe symbol.
The column widths are adjusted to accommodate the widest strings.
"""
summaryData = []
nameWidth = 1
countWidth = 1
for tableName in models.__all__:
tableClass = getattr(models, tableName)
try:
count = tableClass.select().count()
except OperationalError:
count = 'table missing!'
summaryData.append((tableName, count))
if len(tableName) > nameWidth:
nameWidth = len(tableName)
# Error text does not count towards line width.
if isinstance(count, int) and len(str(count)) > countWidth:
countWidth = len(str(count))
template = '{0:%s} | {1:>%s}' % (nameWidth, countWidth)
print("Table | Rows")
print("================|===============")
for row in summaryData:
print(template.format(*row))
print()
if __name__ == '__main__':
showTableCounts()
| mit | Python |
8be6b576007f89fad50ea1dfacad46614c0a97c5 | ADD new exception -> EnvironmentNotFound! | OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft,OpenMined/PySyft | apps/domain/src/main/core/exceptions.py | apps/domain/src/main/core/exceptions.py | """Specific PyGrid exceptions."""
class PyGridError(Exception):
def __init__(self, message):
super().__init__(message)
class AuthorizationError(PyGridError):
def __init__(self, message=""):
if not message:
message = "User is not authorized for this operation!"
super().__init__(message)
class RoleNotFoundError(PyGridError):
def __init__(self):
message = "Role ID not found!"
super().__init__(message)
class UserNotFoundError(PyGridError):
def __init__(self):
message = "User not found!"
super().__init__(message)
class EnvironmentNotFoundError(PyGridError):
def __init__(self):
message = "Environment not found!"
super().__init__(message)
class GroupNotFoundError(PyGridError):
def __init__(self):
message = "Group ID not found!"
super().__init__(message)
class InvalidRequestKeyError(PyGridError):
def __init__(self):
message = "Invalid request key!"
super().__init__(message)
class InvalidCredentialsError(PyGridError):
def __init__(self):
message = "Invalid credentials!"
super().__init__(message)
class MissingRequestKeyError(PyGridError):
def __init__(self, message=""):
if not message:
message = "Missing request key!"
super().__init__(message)
| """Specific PyGrid exceptions."""
class PyGridError(Exception):
def __init__(self, message):
super().__init__(message)
class AuthorizationError(PyGridError):
def __init__(self, message=""):
if not message:
message = "User is not authorized for this operation!"
super().__init__(message)
class RoleNotFoundError(PyGridError):
def __init__(self):
message = "Role ID not found!"
super().__init__(message)
class UserNotFoundError(PyGridError):
def __init__(self):
message = "User not found!"
super().__init__(message)
class GroupNotFoundError(PyGridError):
def __init__(self):
message = "Group ID not found!"
super().__init__(message)
class InvalidRequestKeyError(PyGridError):
def __init__(self):
message = "Invalid request key!"
super().__init__(message)
class InvalidCredentialsError(PyGridError):
def __init__(self):
message = "Invalid credentials!"
super().__init__(message)
class MissingRequestKeyError(PyGridError):
def __init__(self, message=""):
if not message:
message = "Missing request key!"
super().__init__(message)
| apache-2.0 | Python |
50616732244e76afa375e5aefcbd8625c0ba6503 | Test for parsing a list of integers + disambiguation. | igordejanovic/parglare,igordejanovic/parglare | tests/func/test_parse_list_of_objects.py | tests/func/test_parse_list_of_objects.py | import pytest # noqa
from parglare import Grammar, Parser
from parglare.exceptions import ParseError
from parglare.actions import pass_single, pass_nochange, collect
def test_parse_list_of_integers():
grammar = """
Numbers = all_less_than_five EOF;
all_less_than_five = all_less_than_five int_less_than_five
| int_less_than_five;
"""
def int_less_than_five(input, pos):
if input[pos] < 5:
return [input[pos]]
recognizers = {
'int_less_than_five': int_less_than_five
}
g = Grammar.from_string(grammar, recognizers=recognizers, debug=True)
actions = {
'Numbers': pass_single,
'all_less_than_five': collect,
'int_less_than_five': pass_single
}
parser = Parser(g, actions=actions)
ints = [3, 4, 1, 4]
p = parser.parse(ints)
assert p == ints
# Test that error is correctly reported.
with pytest.raises(ParseError) as e:
parser.parse([4, 2, 1, 6, 3])
assert 'Error at position 1,3 => "[4, 2, 1]*[6, 3]".' in str(e)
assert 'int_less_than_five' in str(e)
def test_parse_list_of_integers_lexical_disambiguation():
def int_less_than_five(input, pos):
if input[pos] < 5:
return [input[pos]]
def int_two(input, pos):
if input[pos] == 2:
return [input[pos]]
def ascending(input, pos):
"Match sublist of ascending elements. Matches at least one."
last = pos
while True:
cint = input[last]
last += 1
if input[last] <= cint:
break
return input[pos:last]
def ascending_nosingle(input, pos):
"Match sublist of ascending elements. Matches at least two."
last = pos
while True:
cint = input[last]
last += 1
if input[last] <= cint:
break
if last-pos > 2:
return input[pos:last]
grammar = """
Numbers = all_less_than_five ascending all_less_than_five EOF;
all_less_than_five = all_less_than_five int_less_than_five
| int_less_than_five;
"""
recognizers = {
'int_less_than_five': int_less_than_five,
'int_two': int_two,
'ascending': ascending
}
g = Grammar.from_string(grammar, recognizers=recognizers)
actions = {
'Numbers': lambda _, nodes: [nodes[0], nodes[1], nodes[2]],
'all_less_than_five': collect,
'int_less_than_five': pass_single, # Unpack element for collect
'ascending': pass_nochange
}
parser = Parser(g, actions=actions, debug=True)
ints = [3, 4, 1, 4, 7, 8, 9, 3]
# This must fail as ascending and int_less_than_five recognizers both
# might match just a single int and after parser has saw 3 it will try
# to disambiguate and fail as the following 4 is recognized by both
# recognizers.
with pytest.raises(ParseError) as e:
p = parser.parse(ints)
assert 'disambiguate' in str(e)
# Now we change the recognizer for ascending to match at least two
# consecutive ascending numbers.
recognizers['ascending'] = ascending_nosingle
g = Grammar.from_string(grammar, recognizers=recognizers)
parser = Parser(g, actions=actions, debug=True)
# Parsing now must pass
p = parser.parse(ints)
assert p == [[3, 4], [1, 4, 7, 8, 9], [3]]
| import pytest # noqa
from parglare import Grammar, Parser
from parglare.exceptions import ParseError
from parglare.actions import pass_single, collect
def test_parse_list_of_integers():
grammar = """
Numbers = all_less_than_five EOF;
all_less_than_five = all_less_than_five int_less_than_five
| int_less_than_five;
"""
def int_less_than_five(input, pos):
if input[pos] < 5:
return [input[pos]]
recognizers = {
'int_less_than_five': int_less_than_five
}
g = Grammar.from_string(grammar, recognizers=recognizers, debug=True)
actions = {
'Numbers': pass_single,
'all_less_than_five': collect,
'int_less_than_five': pass_single
}
parser = Parser(g, actions=actions)
ints = [3, 4, 1, 4]
p = parser.parse(ints)
assert p == ints
# Test that error is correctly reported.
with pytest.raises(ParseError) as e:
parser.parse([4, 2, 1, 6, 3])
assert 'Error at position 1,3 => "[4, 2, 1]*[6, 3]".' in str(e)
assert 'int_less_than_five' in str(e)
| mit | Python |
a24afe5e44a14d2f5971c122b373efa1d4052d19 | Index doesn't capture path | w0rm/todo,w0rm/todo | controllers/index.py | controllers/index.py | from template import render
class Index:
def GET(self):
return render.index()
| from template import render
class Index:
def GET(self, path=None):
return render.index()
| mit | Python |
a04aa71a1e097773c1c1966f3d872ddfd79c7253 | Allow usage of Cookiecutter as a library, not just a command. | michaeljoseph/cookiecutter,vincentbernat/cookiecutter,utek/cookiecutter,janusnic/cookiecutter,takeflight/cookiecutter,lgp171188/cookiecutter,kkujawinski/cookiecutter,moi65/cookiecutter,cichm/cookiecutter,lucius-feng/cookiecutter,jhermann/cookiecutter,cguardia/cookiecutter,nhomar/cookiecutter,pjbull/cookiecutter,atlassian/cookiecutter,benthomasson/cookiecutter,sp1rs/cookiecutter,audreyr/cookiecutter,janusnic/cookiecutter,tylerdave/cookiecutter,vintasoftware/cookiecutter,letolab/cookiecutter,sp1rs/cookiecutter,Springerle/cookiecutter,0k/cookiecutter,Vauxoo/cookiecutter,christabor/cookiecutter,venumech/cookiecutter,pjbull/cookiecutter,willingc/cookiecutter,takeflight/cookiecutter,drgarcia1986/cookiecutter,terryjbates/cookiecutter,foodszhang/cookiecutter,luzfcb/cookiecutter,stevepiercy/cookiecutter,foodszhang/cookiecutter,Vauxoo/cookiecutter,tylerdave/cookiecutter,christabor/cookiecutter,audreyr/cookiecutter,drgarcia1986/cookiecutter,utek/cookiecutter,lucius-feng/cookiecutter,Springerle/cookiecutter,hackebrot/cookiecutter,ionelmc/cookiecutter,stevepiercy/cookiecutter,willingc/cookiecutter,ramiroluz/cookiecutter,hackebrot/cookiecutter,ramiroluz/cookiecutter,dajose/cookiecutter,moi65/cookiecutter,vincentbernat/cookiecutter,jhermann/cookiecutter,agconti/cookiecutter,alex/cookiecutter,0k/cookiecutter,benthomasson/cookiecutter,cguardia/cookiecutter,kkujawinski/cookiecutter,letolab/cookiecutter,alex/cookiecutter,luzfcb/cookiecutter,vintasoftware/cookiecutter,cichm/cookiecutter,agconti/cookiecutter,michaeljoseph/cookiecutter,atlassian/cookiecutter,nhomar/cookiecutter,dajose/cookiecutter,lgp171188/cookiecutter,ionelmc/cookiecutter,venumech/cookiecutter,terryjbates/cookiecutter | cookiecutter/main.py | cookiecutter/main.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
cookiecutter.main
-----------------
Main entry point for the `cookiecutter` command.
The code in this module is also a good example of how to use Cookiecutter as a
library rather than a script.
"""
import argparse
import logging
import os
from .cleanup import remove_repo
from .find import find_template
from .generate import generate_context, generate_files
from .vcs import git_clone
def cookiecutter(input_dir):
"""
API equivalent to using Cookiecutter at the command line.
"""
# If it's a git repo, clone and prompt
if input_dir.endswith('.git'):
got_repo_arg = True
repo_dir = git_clone(input_dir)
project_template = find_template(repo_dir)
os.chdir(repo_dir)
else:
project_template = input_dir
# Create project from local context and project template.
context = generate_context(
json_dir='json/'
)
generate_files(
input_dir=project_template,
context=context
)
# Remove repo if Cookiecutter cloned it in the first place.
# Here the user just wants a project, not a project template.
if got_repo_arg:
generated_project = context['project']['repo_name']
remove_repo(repo_dir, generated_project)
def main():
""" Entry point for the package, as defined in setup.py. """
# Log info and above to console
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
# Get command line input/output arguments
parser = argparse.ArgumentParser(
description='Create a project from a Cookiecutter project template.'
)
parser.add_argument(
'input_dir',
help='Cookiecutter project template dir, e.g. {{project.repo_name}}/'
)
args = parser.parse_args()
cookiecutter(args.input_dir)
if __name__ == '__main__':
main()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
cookiecutter.main
-----------------
Main entry point for the `cookiecutter` command.
The code in this module is also a good example of how to use Cookiecutter as a
library rather than a script.
"""
import argparse
import logging
import os
from .cleanup import remove_repo
from .find import find_template
from .generate import generate_context, generate_files
from .vcs import git_clone
def main():
""" Entry point for the package, as defined in setup.py. """
# Log info and above to console
logging.basicConfig(format='%(levelname)s: %(message)s', level=logging.INFO)
# Get command line input/output arguments
parser = argparse.ArgumentParser(
description='Create a project from a Cookiecutter project template.'
)
parser.add_argument(
'input_dir',
help='Cookiecutter project template dir, e.g. {{project.repo_name}}/'
)
args = parser.parse_args()
# If it's a git repo, clone and prompt
if args.input_dir.endswith('.git'):
got_repo_arg = True
repo_dir = git_clone(args.input_dir)
project_template = find_template(repo_dir)
os.chdir(repo_dir)
else:
project_template = args.input_dir
# Create project from local context and project template.
context = generate_context(
json_dir='json/'
)
generate_files(
input_dir=project_template,
context=context
)
# Remove repo if Cookiecutter cloned it in the first place.
# Here the user just wants a project, not a project template.
if got_repo_arg:
generated_project = context['project']['repo_name']
remove_repo(repo_dir, generated_project)
if __name__ == '__main__':
main()
| bsd-3-clause | Python |
07404cabc5ea2cab65be063225a093362175e45f | Bump version number for release | html5lib/html5lib-python,html5lib/html5lib-python,html5lib/html5lib-python | html5lib/__init__.py | html5lib/__init__.py | """
HTML parsing library based on the `WHATWG HTML specification
<https://whatwg.org/html>`_. The parser is designed to be compatible with
existing HTML found in the wild and implements well-defined error recovery that
is largely compatible with modern desktop web browsers.
Example usage::
import html5lib
with open("my_document.html", "rb") as f:
tree = html5lib.parse(f)
For convenience, this module re-exports the following names:
* :func:`~.html5parser.parse`
* :func:`~.html5parser.parseFragment`
* :class:`~.html5parser.HTMLParser`
* :func:`~.treebuilders.getTreeBuilder`
* :func:`~.treewalkers.getTreeWalker`
* :func:`~.serializer.serialize`
"""
from __future__ import absolute_import, division, unicode_literals
from .html5parser import HTMLParser, parse, parseFragment
from .treebuilders import getTreeBuilder
from .treewalkers import getTreeWalker
from .serializer import serialize
__all__ = ["HTMLParser", "parse", "parseFragment", "getTreeBuilder",
"getTreeWalker", "serialize"]
# this has to be at the top level, see how setup.py parses this
#: Distribution version number.
__version__ = "1.0"
| """
HTML parsing library based on the `WHATWG HTML specification
<https://whatwg.org/html>`_. The parser is designed to be compatible with
existing HTML found in the wild and implements well-defined error recovery that
is largely compatible with modern desktop web browsers.
Example usage::
import html5lib
with open("my_document.html", "rb") as f:
tree = html5lib.parse(f)
For convenience, this module re-exports the following names:
* :func:`~.html5parser.parse`
* :func:`~.html5parser.parseFragment`
* :class:`~.html5parser.HTMLParser`
* :func:`~.treebuilders.getTreeBuilder`
* :func:`~.treewalkers.getTreeWalker`
* :func:`~.serializer.serialize`
"""
from __future__ import absolute_import, division, unicode_literals
from .html5parser import HTMLParser, parse, parseFragment
from .treebuilders import getTreeBuilder
from .treewalkers import getTreeWalker
from .serializer import serialize
__all__ = ["HTMLParser", "parse", "parseFragment", "getTreeBuilder",
"getTreeWalker", "serialize"]
# this has to be at the top level, see how setup.py parses this
#: Distribution version number.
__version__ = "0.9999999999-dev"
| mit | Python |
50454e4920798c41759d4d114f65c2c111b8aadb | Add CLI help test | jdgwartney/boundary-api-cli,boundary/pulse-api-cli,boundary/boundary-api-cli,jdgwartney/pulse-api-cli,jdgwartney/boundary-api-cli,boundary/boundary-api-cli,boundary/pulse-api-cli,jdgwartney/pulse-api-cli | tests/unit/boundary/event_create_test.py | tests/unit/boundary/event_create_test.py | #!/usr/bin/env python
#
# Copyright 2015 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from unittest import TestCase
from boundary import EventCreate
from cli_test import CLITest
class EventCreateTest(TestCase):
def setUp(self):
self.cli = EventCreate()
def test_cli_description(self):
CLITest.check_description(self, self.cli)
def test_cli_help(self):
CLITest.check_cli_help(self, self.cli)
| #!/usr/bin/env python
#
# Copyright 2015 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from unittest import TestCase
from boundary import EventCreate
from cli_test import CLITest
class EventCreateTest(TestCase):
def setUp(self):
self.cli = EventCreate()
def test_cli_description(self):
CLITest.check_description(self, self.cli)
| apache-2.0 | Python |
11e6ddf33c3fea07ddb0a42b983da856a9ce2ae2 | Fix SMTP test | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | tests/unit/returners/smtp_return_test.py | tests/unit/returners/smtp_return_test.py | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Mike Place (mp@saltstack.com)`
tests.unit.returners.smtp_return_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
# Import Salt Testing libs
from salttesting import TestCase, skipIf
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch
ensure_in_syspath('../../')
# Import salt libs
from salt.returners import smtp_return as smtp
smtp.__salt__ = {}
try:
import gnupg # pylint: disable=unused-import
HAS_GNUPG = True
except ImportError:
HAS_GNUPG = False
@skipIf(NO_MOCK, NO_MOCK_REASON)
class SMTPReturnerTestCase(TestCase):
'''
Test SMTP returner
'''
def _test_returner(self, mocked_smtplib, *args): # pylint: disable=unused-argument
'''
Test to see if the SMTP returner sends a message
'''
ret = {'id': '12345',
'fun': 'mytest.func',
'fun_args': 'myfunc args',
'jid': '54321',
'return': 'The room is on fire as shes fixing her hair'
}
with patch.dict(smtp.__salt__, {'config.option': MagicMock()}):
smtp.returner(ret)
self.assertTrue(mocked_smtplib.return_value.sendmail.called)
if HAS_GNUPG:
@patch('salt.returners.smtp_return.gnupg')
@patch('salt.returners.smtp_return.smtplib.SMTP')
def test_returner(self, mocked_smtplib, *args):
self._test_returner(mocked_smtplib, *args)
else:
@patch('salt.returners.smtp_return.smtplib.SMTP')
def test_returner(self, mocked_smtplib, *args):
self._test_returner(mocked_smtplib, *args)
SMTPReturnerTestCase.test_returner = test_returner
if __name__ == '__main__':
from integration import run_tests
run_tests(SMTPReturnerTestCase, needs_daemon=False)
| # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Mike Place (mp@saltstack.com)`
tests.unit.returners.smtp_return_test
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
# Import Salt Testing libs
from salttesting import skipIf, TestCase
from salttesting.helpers import ensure_in_syspath
from salttesting.mock import NO_MOCK, NO_MOCK_REASON, MagicMock, patch
ensure_in_syspath('../../')
# Import salt libs
from salt.returners import smtp_return as smtp
smtp.__salt__ = {}
@skipIf(NO_MOCK, NO_MOCK_REASON)
@patch('salt.returners.smtp_return.gnupg')
@patch('salt.returners.smtp_return.smtplib.SMTP')
class SMTPReturnerTestCase(TestCase):
def test_returner(self, mocked_smtplib, mocked_gpg):
'''
Test to see if the SMTP returner sends a message
'''
ret = {'id': '12345',
'fun': 'mytest.func',
'fun_args': 'myfunc args',
'jid': '54321',
'return': 'The room is on fire as shes fixing her hair'
}
with patch.dict(smtp.__salt__, {'config.option': MagicMock()}):
smtp.returner(ret)
self.assertTrue(mocked_smtplib.return_value.sendmail.called)
if __name__ == '__main__':
from integration import run_tests
run_tests(SMTPReturnerTestCase, needs_daemon=False)
| apache-2.0 | Python |
0fddb5d0556ca7bb612747e60a7051c939750b4e | Add SEE_MORE attribute | refeed/coala-bears,coala/coala-bears,refeed/coala-bears,coala-analyzer/coala-bears,coala/coala-bears,coala/coala-bears,refeed/coala-bears,coala-analyzer/coala-bears,coala-analyzer/coala-bears,refeed/coala-bears,coala-analyzer/coala-bears,coala-analyzer/coala-bears,coala/coala-bears,coala-analyzer/coala-bears,coala/coala-bears,coala/coala-bears,refeed/coala-bears,refeed/coala-bears,coala/coala-bears,refeed/coala-bears,coala-analyzer/coala-bears,coala/coala-bears,refeed/coala-bears,coala-analyzer/coala-bears,coala/coala-bears,coala/coala-bears,refeed/coala-bears,refeed/coala-bears,coala/coala-bears,refeed/coala-bears,coala-analyzer/coala-bears,coala/coala-bears | bears/configfiles/DockerfileLintBear.py | bears/configfiles/DockerfileLintBear.py | import json
from coalib.bearlib.abstractions.Linter import linter
from dependency_management.requirements.NpmRequirement import NpmRequirement
from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
from coalib.results.Result import Result
@linter(executable='dockerfile_lint')
class DockerfileLintBear:
"""
Checks file syntax as well as arbitrary semantics and best practice
in Dockerfiles. It also checks LABEL rules against docker images.
Uses ``dockerfile_lint`` to provide the analysis.
"""
LANGUAGES = {'Dockerfile'}
REQUIREMENTS = {NpmRequirement('dockerfile_lint', '0.2.7')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
CAN_DETECT = {'Syntax', 'Smell'}
SEE_MORE = 'https://github.com/projectatomic/dockerfile_lint'
severity_map = {
'error': RESULT_SEVERITY.MAJOR,
'warn': RESULT_SEVERITY.NORMAL,
'info': RESULT_SEVERITY.INFO}
@staticmethod
def create_arguments(filename, file, config_file,
dockerfile_lint_rule_file: str=''):
"""
:param dockerfile_lint_rule_file:
A yaml rule file for `dockerfile_lint`.
"""
args = ('--json', '-f', filename)
if dockerfile_lint_rule_file:
args += ('-r', dockerfile_lint_rule_file)
return args
def process_output(self, output, filename, file):
output = json.loads(output)
for severity in output:
if severity == 'summary':
continue
for issue in output[severity]['data']:
yield Result.from_values(
origin=self,
message=issue['message'],
file=filename,
severity=self.severity_map[issue['level']],
line=issue.get('line'))
| import json
from coalib.bearlib.abstractions.Linter import linter
from dependency_management.requirements.NpmRequirement import NpmRequirement
from coalib.results.RESULT_SEVERITY import RESULT_SEVERITY
from coalib.results.Result import Result
@linter(executable='dockerfile_lint')
class DockerfileLintBear:
"""
Checks file syntax as well as arbitrary semantics and best practice
in Dockerfiles. It also checks LABEL rules against docker images.
Uses ``dockerfile_lint`` to provide the analysis.
See <https://github.com/projectatomic/dockerfile_lint#dockerfile-lint> for
more information .
"""
LANGUAGES = {'Dockerfile'}
REQUIREMENTS = {NpmRequirement('dockerfile_lint', '0.2.7')}
AUTHORS = {'The coala developers'}
AUTHORS_EMAILS = {'coala-devel@googlegroups.com'}
LICENSE = 'AGPL-3.0'
CAN_DETECT = {'Syntax', 'Smell'}
severity_map = {
'error': RESULT_SEVERITY.MAJOR,
'warn': RESULT_SEVERITY.NORMAL,
'info': RESULT_SEVERITY.INFO}
@staticmethod
def create_arguments(filename, file, config_file,
dockerfile_lint_rule_file: str=''):
"""
:param dockerfile_lint_rule_file:
A yaml rule file for `dockerfile_lint`.
"""
args = ('--json', '-f', filename)
if dockerfile_lint_rule_file:
args += ('-r', dockerfile_lint_rule_file)
return args
def process_output(self, output, filename, file):
output = json.loads(output)
for severity in output:
if severity == 'summary':
continue
for issue in output[severity]['data']:
yield Result.from_values(
origin=self,
message=issue['message'],
file=filename,
severity=self.severity_map[issue['level']],
line=issue.get('line'))
| agpl-3.0 | Python |
1c375984a0a6f0ff3e70c358d8a17dd88e71086f | Update doctest. | UI-DataScience/summer2014 | hw7/distributions.py | hw7/distributions.py | #!/usr/bin/python
# Week 7 problem 1. PMF, PDF, and CDF.
# Do not delete the comments.
# Do not chnage the functions names, do not change the input parameters.
# Do not change the return types of the functions.
# Your code goes to the part where it says your code goes here.
# Do not change anything else other than the part where it says your code goes here.
# Do not import other modules other than the ones listed here.
from __future__ import print_function, division
import matplotlib.pyplot as plt
import numpy as np
from stats2 import get_column
def get_histogram(sequence):
'''
Takes a list and returns a dictionary of the form {value: frequency}.
Examples:
>>> get_histogram(['a', 'a', 'b', 'b', 'b', 'c']) == {'a': 2, 'b': 3, 'c': 1}
True
>>> get_histogram([4, 5, 6, 6, 6]) == {4: 1, 5: 1, 6: 3}
True
'''
hist = {}
# your code goes here
return hist
def get_pmf(sequence):
'''
Takes a list and returns a dictionary of the form {value: probability}.
Examples:
>>> get_pmf(['a', 'b', 'b', 'b', 'c']) == {'a': 0.2, 'b': 0.6, 'c': 0.2}
True
>>> get_pmf([4, 5, 6, 6, 6]) == {4: 0.2, 5: 0.2, 6: 0.6}
True
'''
pmf = {}
# your code goes here
return pmf
def get_cdf(sequence):
'''
Takes a Numpy array and returns a tuple that represents
the x and y axes of the empirical distribution function.
Examples:
>>> import numpy as np
>>> x = np.array([4, 3, 1, 2, 5])
>>> get_cdf(x)
(array([1, 2, 3, 4, 5]), array([ 0.2, 0.4, 0.6, 0.8, 1. ]))
'''
# your code goes here
return x, y
if __name__ == '__main__':
filename = 'ss12pil.csv'
# person's age is WKHP, the 72nd column in ss12pil.csv
hours_worked = get_column(filename, 72)
# remove people who didn't work (hours == 0) and some outliers
hours_worked = [h for h in hours_worked if h > 0 and h < 80]
# The PMF.
hist = get_pmf(hours_worked)
# peron's income is PINCP, the 103rd column in ss12pilcsv
income = np.loadtxt(filename, delimiter = ',', skiprows = 1, usecols = (103, ),
converters = {103: lambda x: int(x or 0.0)})
# remove some outliers (income below $1000)
income = income[income > 1000]
# The CDF
cdf_x, cdf_y = get_cdf(income)
# The CCDF
ccdf_x, ccdf_y = cdf_x, 1.0 - cdf_y
# you code goes here
| #!/usr/bin/python
# Week 7 problem 1. PMF, PDF, and CDF.
# Do not delete the comments.
# Do not chnage the functions names, do not change the input parameters.
# Do not change the return types of the functions.
# Your code goes to the part where it says your code goes here.
# Do not change anything else other than the part where it says your code goes here.
# Do not import other modules other than the ones listed here.
from __future__ import print_function, division
import matplotlib.pyplot as plt
import numpy as np
from stats2 import get_column
def get_histogram(sequence):
'''
Takes a list and returns a dictionary of the form {value: frequency}.
Examples:
>>> get_histogram(['a', 'a', 'b', 'b', 'b', 'c'])
{'a': 2, 'b': 3, 'c': 1}
>>> get_histogram([4, 5, 6, 6, 6])
{4: 1, 5: 1, 6: 3}
'''
hist = {}
# your code goes here
return hist
def get_pmf(sequence):
'''
Takes a list and returns a dictionary of the form {value: probability}.
Examples:
>>> get_pmf(['a', 'a', 'b', 'b', 'b', 'c'])
{'a': 0.3333333333333333, 'b': 0.5, 'c': 0.16666666666666666}
>>> get_pmf([4, 5, 6, 6, 6])
{4: 0.2, 5: 0.2, 6: 0.6}
'''
pmf = {}
# your code goes here
return pmf
def get_cdf(sequence):
'''
Takes a Numpy array and returns a tuple that represents
the x and y axes of the empirical distribution function.
Examples:
>>> import numpy as np
>>> x = np.array([4, 3, 1, 2, 5])
>>> get_cdf(x)
(array([1, 2, 3, 4, 5]), array([ 0.2, 0.4, 0.6, 0.8, 1. ]))
'''
# your code goes here
return x, y
if __name__ == '__main__':
filename = 'ss12pil.csv'
# person's age is WKHP, the 72nd column in ss12pil.csv
hours_worked = get_column(filename, 72)
# remove people who didn't work (hours == 0) and some outliers
hours_worked = [h for h in hours_worked if h > 0 and h < 80]
# The PMF.
hist = get_pmf(hours_worked)
# peron's income is PINCP, the 103rd column in ss12pilcsv
income = np.loadtxt(filename, delimiter = ',', skiprows = 1, usecols = (103, ),
converters = {103: lambda x: int(x or 0.0)})
# remove some outliers (income below $1000)
income = income[income > 1000]
# The CDF
cdf_x, cdf_y = get_cdf(income)
# The CCDF
ccdf_x, ccdf_y = cdf_x, 1.0 - cdf_y
# you code goes here
| mit | Python |
1ceffc41f56e92c71760e6c347cfa90d09bb6472 | add 64 bit install | jtdressel/woodhouse | initialize_system.py | initialize_system.py | #!/usr/bin/python
#Currently unstable. Don't use.
#determine where config files are
import subprocess
import struct
import re
#if needed install dropbox
#if dropbox is set up
def install_dropbox():
if((struct.calcsize("P") *8) is 32):
#TODO: insert code to check if already installed
subprocess.call('cd ~ && wget -O - "https://www.dropbox.com/download?plat=lnx.x86" | tar xzf -', shell = True)
subprocess.call('~/.dropbox-dist/dropboxd', shell=True)#TODO download management
elif((struct.calcsize("P") * 8) is 64):
subprocess.call('cd ~ && wget -O - "https://www.dropbox.com/download/?plat=lnx.x86_64" | tar xzf -', shell = True)
subprocess.call('~/.dropbox-dist/dropboxd', shell=True)
#todo determine if you can pass parameters to this.
#download dropbox control script
#TODO: set apt-name
apt = "apt-get"
def install_git():
subprocess.call("sudo " + apt + " install git -y", shell=True)
def config_git(email, name):
subprocess.call("git config --global user.email " + email, shell=True)
subprocess.call("git config --global user.name " + name, shell=True)
# config_git("ubergeek@jamesdressel.com", "'James Dressel'")
def config_git_editor(editor):
#TODO: test this code
p = re.compile(r'"(?!").+"$')
m = p.match(editor)
if(m):
subprocess.call('git config --global core.editor ' + editor, shell=True)
else:
subprocess.call('git config --global core.editor vim', shell=True)
def config_gitignore(gitignore):
subprocess.call('git config --global core.excludesfile ' + gitignore, shell=True)
def set_ssh_config(config):
subprocess.call("ln -ifb " + config + " ~/.ssh/config",shell=True)
def set_aliases(aliases):
subprocess.call("ln -ifb " + aliases + " ~/.bash_aliases", shell=True)
def set_profile(profile):
subprocess.call("ln -ifb " + profile + " ~/.profile", shell=True)
def set_bashrc(bashrc):
subprocess.call("ln -ifb " + bashrc + " ~/.bashrc", shell=True)
def part_a():
install_dropbox()
install_git()
config_git("ubergeek@jamesdressel.com", "'James Dressel'")
config_git_editor('"vim"')
def part_b():
config_gitignore("~/Dropbox/config/.gitignore_global")
set_ssh_config("~/Dropbox/config/ssh_config")
set_aliases("~/Dropbox/config/.bash_aliases")
set_profile("~/Dropbox/config/.profile")
set_bashrc("~/Dropbox/config/.bashrc")
part_a()
#part_b()
| #!/usr/bin/python
#Currently unstable. Don't use.
#determine where config files are
import subprocess
import struct
import re
#if needed install dropbox
#if dropbox is set up
def install_dropbox():
if((struct.calcsize("P") *8) is 32):
#TODO: insert code to check if already installed
subprocess.call('cd ~ && wget -O - "https://www.dropbox.com/download?plat=lnx.x86" | tar xzf -', shell = True)
subprocess.call('~/.dropbox-dist/dropboxd', shell=True)#TODO download management
elif((struct.calcsize("P") * 8) is 64):
pass #TODO: 64bit
#todo determine if you can pass parameters to this.
#download dropbox control script
#TODO: set apt-name
apt = "apt-get"
def install_git():
subprocess.call("sudo " + apt + " install git -y", shell=True)
def config_git(email, name):
subprocess.call("git config --global user.email " + email, shell=True)
subprocess.call("git config --global user.name " + name, shell=True)
# config_git("ubergeek@jamesdressel.com", "'James Dressel'")
def config_git_editor(editor):
#TODO: test this code
p = re.compile(r'"(?!").+"$')
m = p.match(editor)
if(m):
subprocess.call('git config --global core.editor ' + editor, shell=True)
else:
subprocess.call('git config --global core.editor vim', shell=True)
def config_gitignore(gitignore):
subprocess.call('git config --global core.excludesfile ' + gitignore, shell=True)
def set_ssh_config(config):
subprocess.call("ln -ifb " + config + " ~/.ssh/config",shell=True)
def set_aliases(aliases):
subprocess.call("ln -ifb " + aliases + " ~/.bash_aliases", shell=True)
def set_profile(profile):
subprocess.call("ln -ifb " + profile + " ~/.profile", shell=True)
def set_bashrc(bashrc):
subprocess.call("ln -ifb " + bashrc + " ~/.bashrc", shell=True)
def part_a():
install_dropbox()
install_git()
config_git("ubergeek@jamesdressel.com", "'James Dressel'")
config_git_editor('"vim"')
def part_b():
config_gitignore("~/Dropbox/config/.gitignore_global")
set_ssh_config("~/Dropbox/config/ssh_config")
set_aliases("~/Dropbox/config/.bash_aliases")
set_profile("~/Dropbox/config/.profile")
set_bashrc("~/Dropbox/config/.bashrc")
part_a()
#part_b()
| mit | Python |
c7c0148040cf53990bbc481fd96dc475855fcd99 | test using sys.stdin.readline() as input | fonorobert/mailforward | tofile.py | tofile.py | #!/usr/bin/env python3
#import argparse
import sys
from configparser import ConfigParser
#Parse config
config = ConfigParser()
config.read('config.cfg')
#create a list of mailing lists from the config file
# lists = []
# for k, v in config['LISTS'].items():
# #strip whitespaces from email addresses
# this_list = v.split(',')
# new_list = []
# for a in this_list:
# new_list.append(a.strip())
# #append current list to list of all lists (yo dawg!)
# lists.append({k: new_list})
# #Parse stdin
# parser = argparse.ArgumentParser()
# parser.add_argument('mail')
# args = parser.parse_args()
args = sys.stdin.readline()
with open('mailout.txt', mode='w', newline='') as f:
print(args, file=f)
#Next up: Build function that decides whether to forward mail or not Build
#function that splits mail up and resend it address by address
| #!/usr/bin/env python3
import argparse
from configparser import ConfigParser
#Parse config
config = ConfigParser()
config.read('config.cfg')
#create a list of mailing lists from the config file
# lists = []
# for k, v in config['LISTS'].items():
# #strip whitespaces from email addresses
# this_list = v.split(',')
# new_list = []
# for a in this_list:
# new_list.append(a.strip())
# #append current list to list of all lists (yo dawg!)
# lists.append({k: new_list})
#Parse stdin
parser = argparse.ArgumentParser()
parser.add_argument('mail')
args = parser.parse_args()
with open('mailout.txt', mode='w', newline='') as f:
print(args.mail, file=f)
#Next up: Build function that decides whether to forward mail or not Build
#function that splits mail up and resend it address by address
| mit | Python |
0dcc0116a344dd440c1f3536f36bdd399c0a7666 | Add missing name | benjolitz/powerscout,benjolitz/powerscout | powerscout/__main__.py | powerscout/__main__.py | import logging
import os.path
import time
import multiprocessing
from concurrent.futures import ProcessPoolExecutor
from japronto import Application
from .routes import REGISTRY
from .config import load_config, load_environment_variables, PREFIX
from .services.apc import update_apc_status
logger = logging.getLogger('powerscout')
logging.basicConfig(level=logging.DEBUG)
APC_WORKER = multiprocessing.Event()
def apc_worker():
logger.info('Started APC worker')
APC_WORKER.set()
while APC_WORKER.is_set():
logger.debug('Updating status')
try:
update_apc_status()
except Exception as e:
logger.exception('Unable to update the APC status!')
time.sleep(2)
def main():
if f'{PREFIX}CONFIG_PATH' in os.environ:
load_config(os.path.expanduser(os.environ[f'{PREFIX}CONFIG_PATH']))
load_environment_variables()
app = Application()
for path, func in REGISTRY.items():
app.router.add_route(path, func)
with ProcessPoolExecutor(1) as exe:
future = exe.submit(apc_worker)
try:
app.run()
except KeyboardInterrupt:
APC_WORKER.clear()
future.cancel()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('config_path', nargs='?', default=None, type=str, metavar='FILE')
args = parser.parse_args()
if args.config_path:
os.environ[f'{PREFIX}CONFIG_PATH'] = args.config_path
main()
| import logging
import os.path
import time
import multiprocessing
from concurrent.futures import ProcessPoolExecutor
from japronto import Application
from .routes import REGISTRY
from .config import load_config, load_environment_variables, PREFIX
from .services.apc import update_apc_status
logging.basicConfig(level=logging.DEBUG)
APC_WORKER = multiprocessing.Event()
def apc_worker():
logger.info('Started APC worker')
APC_WORKER.set()
while APC_WORKER.is_set():
logger.debug('Updating status')
try:
update_apc_status()
except Exception as e:
logger.exception('Unable to update the APC status!')
time.sleep(2)
def main():
if f'{PREFIX}CONFIG_PATH' in os.environ:
load_config(os.path.expanduser(os.environ[f'{PREFIX}CONFIG_PATH']))
load_environment_variables()
app = Application()
for path, func in REGISTRY.items():
app.router.add_route(path, func)
with ProcessPoolExecutor(1) as exe:
future = exe.submit(apc_worker)
try:
app.run()
except KeyboardInterrupt:
APC_WORKER.clear()
future.cancel()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('config_path', nargs='?', default=None, type=str, metavar='FILE')
args = parser.parse_args()
if args.config_path:
os.environ[f'{PREFIX}CONFIG_PATH'] = args.config_path
main()
| bsd-3-clause | Python |
dd18adf0b4098e749493e97fea23149be16d1fc7 | tweak the tests | tsileo/blobstash,tsileo/blobstash,tsileo/blobstash | integration_tests.py | integration_tests.py | import logging
from tests.client import Blob
from tests.client import Client
from tests.server import BlobStash
MORE_BLOBS = 1000
logging.basicConfig(level=logging.DEBUG)
logging.info('Running integration tests...')
b = BlobStash()
b.cleanup()
c = Client()
logging.info('Start BlobStash')
b.run()
logging.info('[STEP 1] Test the blob store')
logging.info('Insert test blob')
blob = Blob.from_data(b'hello')
resp = c.put_blob(blob)
assert resp.status_code == 200, 'failed to put blob {}'.format(blob.hash)
logging.info('Fetch test blob back')
blob2 = c.get_blob(blob.hash, to_blob=True)
assert blob2.data == blob.data, 'failed to fetch blob {} != {}'.format(blob.data, blob2.data)
logging.info('Enumerating blobs')
blobs_resp = c._get('/api/blobstore/blobs').json()
assert len(blobs_resp['refs']) == 1, 'failed to enumate blobs, expected 1 got {}'.format(len(blobs_resp['refs']))
blob_ref = blobs_resp['refs'][0]
assert blob_ref['Hash'] == blob2.hash, 'failed to enumate blobs, hash does not match, expected {} got {}'.format(
blob_ref['Hash'], blob2.hash
)
logging.info('Now adding more blobs')
more_blobs = [blob]
for _ in range(MORE_BLOBS):
current_blob = Blob.from_random()
more_blobs.append(current_blob)
resp = c.put_blob(current_blob)
assert resp.status_code == 200, 'failed to put blob {}'.format(blob.hash)
logging.info('Restart BlobStash, and enumerate all the blobs')
b.shutdown()
b.run()
blobs_resp = c._get('/api/blobstore/blobs').json()
assert len(blobs_resp['refs']) == len(more_blobs), 'failed to enumate blobs, expected {} got {}'.format(
len(more_blobs),
len(blobs_resp['refs']),
)
logging.info('Ensures we can read them all')
for blob in more_blobs:
blob2 = c.get_blob(blob.hash, to_blob=True)
assert blob2.data == blob.data, 'failed to fetch blob {} != {}'.format(blob.data, blob2.data)
# Shutdown BlobStash
b.shutdown()
logging.info('Success \o/')
| import logging
from tests.client import Blob
from tests.client import Client
from tests.server import BlobStash
MORE_BLOBS = 500
logging.basicConfig(level=logging.DEBUG)
logging.info('Running integration tests...')
b = BlobStash()
b.cleanup()
c = Client()
logging.info('Start BlobStash')
b.run()
logging.info('[STEP 1] Test the blob store')
logging.info('Insert test blob')
blob = Blob.from_data(b'hello')
resp = c.put_blob(blob)
assert resp.status_code == 200, 'failed to put blob {}'.format(blob.hash)
logging.info('Fetch test blob back')
blob2 = c.get_blob(blob.hash, to_blob=True)
assert blob2.data == blob.data, 'failed to fetch blob {} != {}'.format(blob.data, blob2.data)
logging.info('Enumerating blobs')
blobs_resp = c._get('/api/blobstore/blobs').json()
assert len(blobs_resp['refs']) == 1, 'failed to enumate blobs, expected 1 got {}'.format(len(blobs_resp['refs']))
blob_ref = blobs_resp['refs'][0]
assert blob_ref['Hash'] == blob2.hash, 'failed to enumate blobs, hash does not match, expected {} got {}'.format(
blob_ref['Hash'], blob2.hash
)
logging.info('Now adding more blobs')
more_blobs = [blob]
for _ in range(MORE_BLOBS):
current_blob = Blob.from_random()
more_blobs.append(current_blob)
resp = c.put_blob(current_blob)
assert resp.status_code == 200, 'failed to put blob {}'.format(blob.hash)
logging.info('Restart BlobStash, and enumerate all the blobs')
b.shutdown()
b.run()
blobs_resp = c._get('/api/blobstore/blobs').json()
assert len(blobs_resp['refs']) == len(more_blobs), 'failed to enumate blobs, expected {} got {}'.format(
len(more_blobs),
len(blobs_resp['refs']),
)
logging.info('Ensures we can read them all')
for blob in more_blobs:
blob2 = c.get_blob(blob.hash, to_blob=True)
assert blob2.data == blob.data, 'failed to fetch blob {} != {}'.format(blob.data, blob2.data)
# Shutdown BlobStash
b.shutdown()
logging.info('Success \o/')
| mit | Python |
f80d43e0c044af0fe8792d652b8530aa1fd11366 | Add import | dbinetti/barberscore,barberscore/barberscore-api,barberscore/barberscore-api,dbinetti/barberscore-django,dbinetti/barberscore-django,dbinetti/barberscore,barberscore/barberscore-api,barberscore/barberscore-api | project/api/signals.py | project/api/signals.py | # Django
import django_rq
# Third-Party
from collections import defaultdict
from django.db.models.signals import post_save
from django.db.models.signals import pre_delete
from django.dispatch import receiver
from django.conf import settings
# Local
from .models import Person
from .models import User
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
@receiver(post_save, sender=User)
def user_post_save(sender, instance, created, **kwargs):
if not instance.is_staff:
if created:
account, new = instance.update_or_create_account()
if not new:
raise RuntimeError('User problem')
instance.username = account['user_id']
instance.save()
else:
queue = django_rq.get_queue('low')
queue.enqueue(
instance.update_or_create_account
)
return
@receiver(pre_delete, sender=User)
def user_pre_delete(sender, instance, **kwargs):
if not instance.is_staff:
queue = django_rq.get_queue('low')
queue.enqueue(
instance.delete_account
)
return
| # Django
# Third-Party
from collections import defaultdict
from django.db.models.signals import post_save
from django.db.models.signals import pre_delete
from django.dispatch import receiver
from django.conf import settings
# Local
from .models import Person
from .models import User
from django.core.validators import validate_email
from django.core.exceptions import ValidationError
@receiver(post_save, sender=User)
def user_post_save(sender, instance, created, **kwargs):
if not instance.is_staff:
if created:
account, new = instance.update_or_create_account()
if not new:
raise RuntimeError('User problem')
instance.username = account['user_id']
instance.save()
else:
queue = django_rq.get_queue('low')
queue.enqueue(
instance.update_or_create_account
)
return
@receiver(pre_delete, sender=User)
def user_pre_delete(sender, instance, **kwargs):
if not instance.is_staff:
queue = django_rq.get_queue('low')
queue.enqueue(
instance.delete_account
)
return
| bsd-2-clause | Python |
a50e0d234bab6e0e8abc9751578abe1f19a08e09 | add class app, two buttons | brucelau-github/raspberry-pi-proj | window.py | window.py | from Tkinter import *
class App:
def __init__(self, master):
frame = Frame(master)
frame.pack()
self.label = Label(frame, text="quit", fg="red")
self.label.pack(side=TOP)
self.button = Button(frame, text="quit", fg="red", command=frame.quit)
self.button.pack(side=LEFT)
self.hi_there = Button(frame, text="Hello", command=self.say_hi)
self.hi_there.pack(side=LEFT)
def say_hi(self):
print "hi there, everyone!"
root = Tk() #create basic window
app = App(root)
root.mainloop() #keep the windows showing
root.destroy()
| from Tkinter import *
root = Tk() #create basic window
thelabel = Label(root, text="this is very easy") #add a latbel
thelabel.pack()
root.mainloop() #keep the windows showing
| mit | Python |
5134a75065a8ac3b01e3f8bb18e10870efa6f3b7 | remove () | abraham/social-backup | worker.py | worker.py | """Backup personal social activities."""
import os
import time
from config import Config
from plus import Plus
from tweet import Tweet as Twitter
from github import GitHub
from terminal import Terminal
from disk import Disk
from mongo import Mongo
services = {}
storages = {}
config = Config(file_name=os.environ['CONFIG_FILE_NAME'])
if 'plus' in config.get('enabledServices'):
services['plus'] = Plus(**config.get('plus'))
if 'twitter' in config.get('enabledServices'):
services['twitter'] = Twitter(**config.get('twitter'))
if 'github' in config.get('enabledServices'):
services['github'] = GitHub(**config.get('github'))
if 'terminal' in config.get('enabledStorages'):
storages['terminal'] = Terminal(**config.get('terminal'))
if 'disk' in config.get('enabledStorages'):
storages['disk'] = Disk(**config.get('disk'))
if 'mongo' in config.get('enabledStorages'):
storages['mongo'] = Mongo(**config.get('mongo'))
while True:
"""Craw for stuffs."""
totalItems = 0
print 'starting saving', config.get('paginationLimit'), 'items from', \
services.keys(), 'to', storages.keys()
for service in services:
items = []
while (config.get('paginationLimit') > services[service].getTotalItems()):
items = services[service].getItems()
totalItems += len(items)
for item in items:
mutated = services[service].mutateItem(item)
for storage in storages:
use_mutated = config.get(storage)['mutationEnabled']
storages[storage].saveItem(namespace=service,
id_key=services[service].getIdKey(),
item=mutated if use_mutated else item)
print 'fetched and saved', services[service].getTotalItems(), service, \
'items of', config.get('paginationLimit')
print 'Finished saving items', totalItems, 'to', storages.keys()
print '===================='
print 'sleeping for', config.get('sleep'), 'minutes'
time.sleep(int(config.get('sleep')) * 60)
| """Backup personal social activities."""
import os
import time
from config import Config
from plus import Plus
from tweet import Tweet as Twitter
from github import GitHub
from terminal import Terminal
from disk import Disk
from mongo import Mongo
services = {}
storages = {}
config = Config(file_name=os.environ['CONFIG_FILE_NAME'])
if 'plus' in config.get('enabledServices'):
services['plus'] = Plus(**config.get('plus'))
if 'twitter' in config.get('enabledServices'):
services['twitter'] = Twitter(**config.get('twitter'))
if 'github' in config.get('enabledServices'):
services['github'] = GitHub(**config.get('github'))
if 'terminal' in config.get('enabledStorages'):
storages['terminal'] = Terminal(**config.get('terminal'))
if 'disk' in config.get('enabledStorages'):
storages['disk'] = Disk(**config.get('disk'))
if 'mongo' in config.get('enabledStorages'):
storages['mongo'] = Mongo(**config.get('mongo'))
while(True):
"""Craw for stuffs."""
totalItems = 0
print 'starting saving', config.get('paginationLimit'), 'items from', \
services.keys(), 'to', storages.keys()
for service in services:
items = []
while (config.get('paginationLimit') > services[service].getTotalItems()):
items = services[service].getItems()
totalItems += len(items)
for item in items:
mutated = services[service].mutateItem(item)
for storage in storages:
use_mutated = config.get(storage)['mutationEnabled']
storages[storage].saveItem(namespace=service,
id_key=services[service].getIdKey(),
item=mutated if use_mutated else item)
print 'fetched and saved', services[service].getTotalItems(), service, \
'items of', config.get('paginationLimit')
print 'Finished saving items', totalItems, 'to', storages.keys()
print '===================='
print 'sleeping for', config.get('sleep'), 'minutes'
time.sleep(int(config.get('sleep')) * 60)
| mit | Python |
3c0489bdfa7f1cadb62899eddda182b0f602763c | Convert quilt.cli.unapplied into a Command class | bjoernricks/python-quilt,vadmium/python-quilt | quilt/cli/unapplied.py | quilt/cli/unapplied.py | # vim: fileencoding=utf-8 et sw=4 ts=4 tw=80:
# python-quilt - A Python implementation of the quilt patch system
#
# Copyright (C) 2012 Björn Ricks <bjoern.ricks@googlemail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
import os
from quilt.cli.meta import Command
from quilt.db import Db, Series
class UnappliedCommand(Command):
usage = "%prog unapplied"
name = "unapplied"
def run(self, options, args):
db = Db(self.get_pc_dir())
top = db.top_patch()
series = Series(self.get_patches_dir())
if top is None:
patches = series.patches()
else:
patches = series.patches_after(top)
for patch in patches:
print patch
| # vim: fileencoding=utf-8 et sw=4 ts=4 tw=80:
# python-quilt - A Python implementation of the quilt patch system
#
# Copyright (C) 2012 Björn Ricks <bjoern.ricks@googlemail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
import os
from optparse import OptionParser
from quilt.db import Db, Series
def parse(args):
usage = "%prog unapplied"
parser = OptionParser(usage=usage)
(options, pargs) = parser.parse_args(args)
patches = os.environ.get("QUILT_PATCHES")
if not patches:
patches = "patches"
db = Db(".pc")
top = db.top_patch()
series = Series(patches)
if top is None:
patches = series.patches()
else:
patches = series.patches_after(top)
for patch in patches:
print patch
| mit | Python |
8c65f7e2e075bae468401a1eee799251c1d928df | Fix for Delay warnings when using USB audio (untested) | oksome/Home,oksome/Home,oksome/Home | raspberry_pi/config.py | raspberry_pi/config.py | #!/usr/bin/env python2
'''
More complex configuration, to run in addition to 'config.sh'.
'''
if raw_input('Set USB sound card as default ? [y][N]') in 'y', 'Y', 'yes':
original = open('/etc/modprobe.d/alsa-base.conf').read()
modified = original.replace('options snd-usb-audio index=-2', 'options snd-usb-audio index=0')
open('/etc/modprobe.d/alsa-base.conf', 'w').write(modified)
print("Default sound card set to USB via '/etc/modprobe.d/alsa-base.conf'.")
if raw_input('Keep crazy logs due to USB sound in /var/log/debug and kernel ? [y][N]') not in 'y', 'Y', 'yes':
# Documentation from http://root42.blogspot.be/2013/04/delay-warnings-when-using-usb-audio-on.html
open('/etc/modprobe.d/snd_usb_audio.conf', 'a').write('\noptions snd-usb-audio nrpacks=1\n')
print("Anti-log option added.")
| #!/usr/bin/env python2
'''
More complex configuration, to run in addition to 'config.sh'.
'''
if raw_input('Set USB sound card as default ? [y][N]') in 'y', 'Y', 'yes':
original = open('/etc/modprobe.d/alsa-base.conf').read()
modified = original.replace('options snd-usb-audio index=-2', 'options snd-usb-audio index=0')
open('/etc/modprobe.d/alsa-base.conf', 'w').write(modified)
print("Default sound card set to USB via '/etc/modprobe.d/alsa-base.conf'.")
| agpl-3.0 | Python |
bd2979bf87142188eafac3981a11ccd0553735a0 | make i19conf spit out an error message when an n00b like me mis-uses it | johaness/i19,johaness/i19 | i19conf.py | i19conf.py | import sys
from os import path as osp
def main():
"""
Usage: i19conf FILENAME
Returns the absolute path to FILENAME inside the i19 module.
"""
assert len(sys.argv) > 1, main.__doc__
modpath = osp.abspath(osp.dirname(__file__))
fullpath = osp.join(modpath, sys.argv[1])
if not osp.exists(fullpath):
sys.exit('"{}" not found in "{}".'.format(sys.argv[1], modpath))
print fullpath
if __name__ == '__main__':
main()
| import sys
from os import path as osp
def main():
print osp.join(osp.abspath(osp.dirname(__file__)), sys.argv[1])
| bsd-2-clause | Python |
e0b82cf9ed24870cb313328e5539acc5fe7f6508 | Add some (inefective) score maximizing attempts | ForeverWintr/stock_awesome | stock_awesome/levels/chock_a_block.py | stock_awesome/levels/chock_a_block.py | import time
from stock_awesome.obj import market
def main():
"""
Algorithm: Wait for an ask, then send a fill or kill for the quantity of the ask at the ask
price.
"""
m = market.StockAPI('RAJ40214463', 'SSMCEX', 'IPSO')
#collection of orders placed
orders = {}
filled = 0
upper_limit = 2450
#try to buy 100000
to_buy = 100000
while to_buy > 0:
quote = m.quote()
ask = quote.get('ask', 0)
bid = quote.get('bid')
if ask < upper_limit:
r = m.buy(quote['askSize'], ask, order_type='fill-or-kill')
to_buy -= r['totalFilled']
print("Bought {}, {} remaining".format(r['totalFilled'], to_buy))
else:
time.sleep(1)
print('done')
def update_orders(m, orders):
"""
update order status
"""
return {o: m.order_status(o) for o in orders}
def update_filled(orders):
"""
Remove filled orders and update our count.
"""
closed = [o for o in orders if not orders[o]['open']]
#remove and sum filled orders
filled = sum(orders.pop(o)['totalFilled'] for o in closed)
return filled
if __name__ == '__main__':
main()
| import time
from stock_awesome.obj import market
def main():
"""
Algorithm: Wait for an ask, then send a fill or kill for the quantity of the ask at the ask
price.
"""
m = market.StockAPI('WEB29978261', 'NOWUEX', 'BBCM')
#collection of orders placed
orders = {}
filled = 0
upper_limit = 3300
#try to buy 100000
to_send = 1000
while to_send > 0:
quote = m.quote()
ask = quote.get('ask')
if ask and ask < upper_limit:
r = m.buy(quote['askSize'], quote['ask'], order_type='fill-or-kill')
to_send -= 1
orders[r['id']] = r
orders = update_orders(m, orders)
filled += update_filled(orders)
else:
time.sleep(1)
def update_orders(m, orders):
"""
update order status
"""
return {o: m.order_status(o) for o in orders}
def update_filled(orders):
"""
Remove filled orders and update our count.
"""
closed = [o for o in orders if not orders[o]['open']]
#remove and sum filled orders
filled = sum(orders.pop(o)['totalFilled'] for o in closed)
return filled
if __name__ == '__main__':
main()
| mit | Python |
57ae768e5f4fd7c11a37a85cfb040b0b66379f2a | remove flake8 errors | ajaniv/django-core-models,ajaniv/django-core-models | create_super_user.py | create_super_user.py | #!/usr/bin/env python
"""
.. module:: create_super_user
:synopsis: crate default super user.
crate default super user
"""
# @TODO: provide the ability to pass parameters on the command line
# of settings file, user name, email, password
from __future__ import print_function
import os
from django.db.utils import IntegrityError
import django
DJANGO_SETTINGS_MODULE = "DJANGO_SETTINGS_MODULE"
os.environ[DJANGO_SETTINGS_MODULE] = os.environ.get(
DJANGO_SETTINGS_MODULE, "django_core_models.settings")
_username = 'admin'
_password = _username + '123'
_domain = 'example.com'
_email = '%s@%s' % (_username, _domain)
def _create_super_user(username, email, password):
from django.contrib.auth.models import User
try:
User.objects.create_superuser(username=username,
email=email,
password=password)
print("user '%s' created" % username)
except IntegrityError:
print("user '%s' already exists in the database" % username)
if __name__ == "__main__":
if hasattr(django, 'setup'):
django.setup()
_create_super_user(_username, _email, _password)
| #!/usr/bin/env python
"""
.. module:: create_super_user
:synopsis: crate default super user.
crate default super user
"""
# @TODO: provide the ability to pass parameters on the command line
# of settings file, user name, email, password
from __future__ import print_function
import os
from django.db.utils import IntegrityError
import django
DJANGO_SETTINGS_MODULE = "DJANGO_SETTINGS_MODULE"
os.environ[DJANGO_SETTINGS_MODULE] = os.environ.get(
DJANGO_SETTINGS_MODULE, "django_core_models.settings")
_username = 'admin'
_password = _username + '123'
_domain = 'example.com'
_email = '%s@%s' % (_username, _domain)
def _create_super_user(username, email, password):
from django.contrib.auth.models import User
try:
User.objects.create_superuser(username=username,
email=email,
password=password)
print ("user '%s' created" % username)
except IntegrityError:
print ("user '%s' already exists in the database" % username)
if __name__ == "__main__":
if hasattr(django, 'setup'):
django.setup()
_create_super_user(_username, _email, _password)
| mit | Python |
646f854ff226609e0620ffc46e17c5f2cf1b4dbc | fix path bug | imwithye/git-ignore,imwithye/git-ignore | ignores.py | ignores.py | #! /usr/bin/env python2
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2014 Ciel <imwithye@gmail.com>
#
# Distributed under terms of the MIT license.
import os, sys
userpath = os.path.expanduser('~') + '/.git-ignore-templates/'
syspath = sys.path[0] + "/system-templates/"
githubpath = sys.path[0] + "/github-templates/"
# get file list in three search paths
def filelist():
try:
usertemplates = os.listdir(userpath)
except OSError:
os.mkdir(userpath)
usertemplates = os.listdir(userpath)
try:
systemplates = os.listdir(syspath)
except OSError:
os.mkdir(syspath)
systemplates = os.listdir(syspath)
try:
githubtemplates = os.listdir(githubpath)
except OSError:
githubtemplates = []
return (usertemplates, systemplates, githubtemplates)
# search file
def searchfile(language, templates):
language = language.lower() + ".gitignore"
for template in templates:
if language == template.lower():
return template
return ""
# create ignore file path list
def ignorelist(languages):
user, system, github = filelist()
ignorelist = []
for language in languages:
ignorefile = ""
ignorefile = searchfile(language, user)
if ignorefile != "":
ignorelist.append(userpath + ignorefile)
continue
ignorefile = searchfile(language, system)
if ignorefile != "":
ignorelist.append(syspath + ignorefile)
continue
ignorefile = searchfile(language, github)
if ignorefile != "":
ignorelist.append(githubpath + ignorefile)
continue
return ignorelist
def readfile(filepath):
content = ""
try:
ignorefile = open(filepath, 'r')
content = ignorefile.read()
except:
content = ""
finally:
ignorefile.close()
return content
| #! /usr/bin/env python2
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2014 Ciel <imwithye@gmail.com>
#
# Distributed under terms of the MIT license.
import os
userpath = os.path.expanduser('~') + '/.git-ignore-templates/'
syspath = os.getcwd() + "/system-templates/"
githubpath = os.getcwd() + "/github-templates/"
# get file list in three search paths
def filelist():
try:
usertemplates = os.listdir(userpath)
except OSError:
os.mkdir(userpath)
usertemplates = os.listdir(userpath)
try:
systemplates = os.listdir(syspath)
except OSError:
os.mkdir(userpath)
systemplates = os.listdir(syspath)
try:
githubtemplates = os.listdir(githubpath)
except OSError:
githubtemplates = []
return (usertemplates, systemplates, githubtemplates)
# search file
def searchfile(language, templates):
language = language.lower() + ".gitignore"
for template in templates:
if language == template.lower():
return template
return ""
# create ignore file path list
def ignorelist(languages):
user, system, github = filelist()
ignorelist = []
for language in languages:
ignorefile = ""
ignorefile = searchfile(language, user)
if ignorefile != "":
ignorelist.append(userpath + ignorefile)
continue
ignorefile = searchfile(language, system)
if ignorefile != "":
ignorelist.append(syspath + ignorefile)
continue
ignorefile = searchfile(language, github)
if ignorefile != "":
ignorelist.append(githubpath + ignorefile)
continue
return ignorelist
def readfile(filepath):
content = ""
try:
ignorefile = open(filepath, 'r')
content = ignorefile.read()
except:
content = ""
finally:
ignorefile.close()
return content
| mit | Python |
d2b6d8751f822237d7e6f380a22491ffade58439 | implement associativity lookup for operators | mbdriscoll/ctree,ucb-sejits/ctree,ucb-sejits/ctree | ctree/precendence.py | ctree/precendence.py | """
Utilities for determining precedence in C, with the goal of minimizing the
number of parentheses in the generated code.
"""
# ---------------------------------------------------------------------------
# dictionary of node class -> precedence ids, where 1 denotes the highest-
# precedence operator and 18 is the lowest. This is how the table on
# wikipedia does it, but we flip it later so larger numbers mean higher
# precedence. For the origin of this table see
# http://en.wikipedia.org/wiki/Operators_in_C_and_C%2B%2B#Operator_precedence
_EXPR_TO_PRECEDENCE = {
PostInc: 2,
PostDec: 2,
FunctionCall: 2,
ArrayRef: 2,
# foo.bar: 2,
# foo->bar: 2,
PreInc: 3,
PreDec: 3,
Plus: 3,
Minus: 3,
Not: 3,
BitNot: 3,
# cast: 3,
Deref: 3,
Ref: 3,
SizeOf: 3,
Mul: 5,
Div: 5,
Mod: 5,
Add: 6,
Sub: 6,
BitShL: 7,
BitShR: 7,
Lt: 8,
LtE: 8,
Gt: 8,
GtE: 8,
Eq: 9,
NotEq: 9,
BitAnd: 10,
BitXor: 11,
BitOr: 12,
And: 13,
Or: 14,
TernaryOp: 15,
Assign: 16,
AddAssign: 16,
SubAssign: 16,
MulAssign: 16,
DivAssign: 16,
ModAssign: 16,
BitShLAssign: 16,
BitShRAssign: 16,
BitAndAssign: 16,
BitXorAssign: 16,
BitNotAssign: 16,
Comma: 18,
}
def get_precendence(node):
try:
pred = _EXPR_TO_PRECEDENCE[type(node)]
except KeyError:
raise Exception("Unable to determine precedence for %s." % type(node).__name__)
# flip precedence so higher numbers mean higher precedence
return 20 - pred
_PRECEDENCE_ASSOCIATES_LTR = {
2: True,
3: False,
5: True,
6: True,
7: True,
8: True,
9: True,
10: True,
11: True,
12: True,
13: True,
14: True,
15: False,
16: False,
18: True
}
def is_left_associative(node):
try:
pred = get_precedence(node)
ltr = _PRECEDENCE_ASSOCIATES_LTR(pred)
except KeyError:
raise Exception("Cannot determine if operator %s (precedence %d) is left- or right-associative.") \
% (type(node).__name__, pred)
return ltr
| """
Utilities for determining precedence in C, with the goal of minimizing the
number of parentheses in the generated code.
"""
# ---------------------------------------------------------------------------
# dictionary of node class -> precedence ids, where 1 denotes the highest-
# precedence operator and 18 is the lowest. This is how the table on
# wikipedia does it, but we flip it later so larger numbers mean higher
# precedence. For the origin of this table see
# http://en.wikipedia.org/wiki/Operators_in_C_and_C%2B%2B#Operator_precedence
_CTREE_EXPR_TO_PRECEDENCE = {
PostInc: 2,
PostDec: 2,
FunctionCall: 2,
ArrayRef: 2,
# foo.bar: 2,
# foo->bar: 2,
PreInc: 3,
PreDec: 3,
Plus: 3,
Minus: 3,
Not: 3,
BitNot: 3,
# cast: 3,
Deref: 3,
Ref: 3,
SizeOf: 3,
Mul: 5,
Div: 5,
Mod: 5,
Add: 6,
Sub: 6,
BitShL: 7,
BitShR: 7,
Lt: 8,
LtE: 8,
Gt: 8,
GtE: 8,
Eq: 9,
NotEq: 9,
BitAnd: 10,
BitXor: 11,
BitOr: 12,
And: 13,
Or: 14,
TernaryOp: 15,
Assign: 16,
AddAssign: 16,
SubAssign: 16,
MulAssign: 16,
DivAssign: 16,
ModAssign: 16,
BitShLAssign: 16,
BitShRAssign: 16,
BitAndAssign: 16,
BitXorAssign: 16,
BitNotAssign: 16,
Comma: 18,
}
def precendence(node):
try:
pred = _CTREE_EXPR_TO_PRECEDENCE[type(node)]
except KeyError:
raise Exception("Unable to determine precedence for %s." % type(node).__name__)
# flip precedence so higher numbers mean higher precedence
return 20 - pred
| bsd-2-clause | Python |
36ee0c94a4e9812ef62b51097c9bf6a5ec181a9f | Bump version number. | ABcDexter/cython,ChristopherHogan/cython,acrispin/cython,hhsprings/cython,hickford/cython,fperez/cython,dahebolangkuan/cython,achernet/cython,scoder/cython,larsmans/cython,hpfem/cython,roxyboy/cython,fperez/cython,encukou/cython,slonik-az/cython,da-woods/cython,roxyboy/cython,hpfem/cython,hhsprings/cython,da-woods/cython,JelleZijlstra/cython,scoder/cython,roxyboy/cython,hpfem/cython,cython/cython,c-blake/cython,marscher/cython,achernet/cython,cython/cython,ChristopherHogan/cython,encukou/cython,fabianrost84/cython,slonik-az/cython,da-woods/cython,mcanthony/cython,dahebolangkuan/cython,ABcDexter/cython,da-woods/cython,marscher/cython,rguillebert/CythonCTypesBackend,fabianrost84/cython,ABcDexter/cython,c-blake/cython,mcanthony/cython,JelleZijlstra/cython,andreasvc/cython,JelleZijlstra/cython,hickford/cython,slonik-az/cython,c-blake/cython,larsmans/cython,hhsprings/cython,acrispin/cython,c-blake/cython,hickford/cython,mrGeen/cython,achernet/cython,achernet/cython,encukou/cython,marscher/cython,larsmans/cython,acrispin/cython,fperez/cython,andreasvc/cython,marscher/cython,mrGeen/cython,mrGeen/cython,hhsprings/cython,acrispin/cython,madjar/cython,acrispin/cython,andreasvc/cython,madjar/cython,scoder/cython,rguillebert/CythonCTypesBackend,mcanthony/cython,mcanthony/cython,c-blake/cython,marscher/cython,rguillebert/CythonCTypesBackend,dahebolangkuan/cython,mrGeen/cython,roxyboy/cython,fperez/cython,JelleZijlstra/cython,fperez/cython,andreasvc/cython,fabianrost84/cython,achernet/cython,ChristopherHogan/cython,madjar/cython,hickford/cython,cython/cython,hpfem/cython,slonik-az/cython,hpfem/cython,fabianrost84/cython,encukou/cython,madjar/cython,hickford/cython,fabianrost84/cython,cython/cython,JelleZijlstra/cython,larsmans/cython,larsmans/cython,andreasvc/cython,ABcDexter/cython,hhsprings/cython,mcanthony/cython,mrGeen/cython,roxyboy/cython,scoder/cython,dahebolangkuan/cython,rguillebert/CythonCTypesBackend,slonik-az/cython,encukou/cython,dahebolangkuan/cython,ABcDexter/cython,madjar/cython | Cython/__init__.py | Cython/__init__.py | __version__ = "0.15rc0"
# Void cython.* directives (for case insensitive operating systems).
from Cython.Shadow import *
| __version__ = "0.14.1+"
# Void cython.* directives (for case insensitive operating systems).
from Cython.Shadow import *
| apache-2.0 | Python |
c11b5a0c88a574ecc57b278aa71fa66dd28bebb5 | Improve update on hstore | regardscitoyens/nosfinanceslocales,regardscitoyens/nosfinanceslocales,regardscitoyens/nosfinanceslocales | server/localfinance/scripts/compute_adminzonefinance_sql_variable.py | server/localfinance/scripts/compute_adminzonefinance_sql_variable.py | import os
import sys
import transaction
from sqlalchemy import engine_from_config, cast, String
from sqlalchemy.dialects.postgresql import hstore
from pyramid.paster import (
get_appsettings,
setup_logging,
)
from ..models import (
DBSession,
AdminZoneFinance,
)
from ..maps import MAPS_CONFIG
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri> map_id\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def main(argv=sys.argv):
if len(argv) < 2:
usage(argv)
config_uri = argv[1]
map_id = argv[2]
setup_logging(config_uri)
settings = get_appsettings(config_uri)
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
# fill AdminZoneFinance with new variable set in MAPS_CONFIG
map_ids = [MAPS_CONFIG.keys()] if map_id == 'ALL' else [map_id]
for map_id in map_ids:
config = MAPS_CONFIG[map_id]
q = DBSession.query(AdminZoneFinance.data).filter(config['sql_filter'])
store = AdminZoneFinance.data
q.update(
{store: store + hstore(map_id, cast(config['sql_variable'], String))},
synchronize_session=False
)
transaction.commit()
if __name__ == '__main__':
main()
| import os
import sys
import transaction
from sqlalchemy import engine_from_config
from pyramid.paster import (
get_appsettings,
setup_logging,
)
from ..models import (
DBSession,
AdminZoneFinance,
)
from ..maps import MAPS_CONFIG
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri> <variable_name>\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def main(argv=sys.argv):
if len(argv) < 2:
usage(argv)
config_uri = argv[1]
var_name = argv[2]
setup_logging(config_uri)
settings = get_appsettings(config_uri)
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
# fill AdminZoneFinance with new variable set in MAPS_CONFIG
# this is not optimal as sql_variable can already be there... would be
# better to launch a script for one variable!
config = MAPS_CONFIG[var_name]
results = DBSession.query(AdminZoneFinance.id, config['sql_variable'])\
.filter(config['sql_filter']).order_by(AdminZoneFinance.id).all()
nb = len(results)
nb_packets = 100
# commit values by packets
for i in range(nb_packets+1):
print "packet : %i"%i
istart = i*nb/nb_packets
iend = min((i+1)*nb/nb_packets, nb)
subresults = results[istart:iend]
ids, vals = zip(*subresults)
if len(subresults) == 0:
continue
with transaction.manager:
ids = zip(*subresults)[0]
items = DBSession.query(AdminZoneFinance).filter(AdminZoneFinance.id.in_(ids)).order_by(AdminZoneFinance.id).all()
for item, val in zip(items, vals):
item.data[var_name] = unicode(val)
if __name__ == '__main__':
main()
| mit | Python |
c7de1e01b79637ba26dc79a9ff7191b2c0e6f1f4 | Fix deprecation warning | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/apps/userreports/ui/widgets.py | corehq/apps/userreports/ui/widgets.py | from __future__ import absolute_import
import json
from django import forms
import six
class JsonWidget(forms.Textarea):
def render(self, name, value, attrs=None, renderer=None):
if isinstance(value, six.string_types):
# It's probably invalid JSON
return super(JsonWidget, self).render(name, value, attrs, renderer)
return super(JsonWidget, self).render(name, json.dumps(value, indent=2), attrs, renderer)
| from __future__ import absolute_import
import json
from django import forms
import six
class JsonWidget(forms.Textarea):
def render(self, name, value, attrs=None):
if isinstance(value, six.string_types):
# It's probably invalid JSON
return super(JsonWidget, self).render(name, value, attrs)
return super(JsonWidget, self).render(name, json.dumps(value, indent=2), attrs)
| bsd-3-clause | Python |
58208c340a7dfe0619ca331583fafc361b0dd84b | Add a few master's haystack synonyms. | edx/course-discovery,edx/course-discovery,edx/course-discovery,edx/course-discovery | course_discovery/settings/synonyms.py | course_discovery/settings/synonyms.py | # Note: Do not use synonyms with punctuation, search and typeahead do not yet fully support punctuation
SYNONYMS = [
# Organizations
['ACCA', 'ACCA', 'ACCAx'],
['ACLU', 'American Civil Liberties Union'],
['ASU', 'ASUx', 'Arizona State', 'Arizona State University'],
['Berkeley', 'UC BerkeleyX', 'UCBerkeleyX'],
['Georgia Institute of Technology', 'Georgia Tech', 'GTx', 'GT'],
['GFA', 'Global Freshman Academy'],
['Instituto Tecnologico y De Estudios Superiores De Monterrey', 'Monterrey', 'TecdeMonterreyX'],
['Microsoft', 'MicrosoftX', 'msft'],
['MIT', 'MITx'],
['New York Institute of Finance', 'NYIF', 'NYIFx'],
['The University of Michigan', 'MichiganX', 'UMichiganX', 'U Michigan'],
['The University of Texas System', 'UTx'],
['The University of California San Diego', 'UC San DiegoX', 'UCSanDiegoX'],
['The Disque Foundation', 'Save A LifeX', 'SaveALifeX'],
['UC3M', 'UC3Mx', 'Charles III University of Madrid'],
['University of Pennsylvania', 'PennX', 'UPennX', 'UPenn'],
['Universitat Politècnica de València', 'València', 'Valencia'],
['Wharton', 'WhartonX'],
# Master's specific
['masters', 'oms', 'ms'],
['masters degree', 'masters program'],
# Common Mispellings
['cs50x', 'cs50'],
['ilets', 'ielts'],
['phyton', 'python'],
['toefl', 'tofel', 'toelf'],
# Subjects
['a11y', 'accessibility'],
['bi', 'business intelligence'],
['bme', 'biomedical engineering'],
['computer science', 'cs'],
['econ', 'economics'],
['ee', 'electrical engineering'],
['español', 'espanol', 'spanish'],
['français', 'francais', 'french'],
['it', 'information technology'],
['mis', 'management information systems'],
['psych', 'psychology'],
['seo', 'search engine optimization'],
['ux', 'user experience'],
# Other Terms
['autocad', 'auto cad', 'cad'],
['aws', 'amazon web services'],
['css', 'cascading style sheets'],
['excel', 'microsoft excel', 'msft excel'],
['hr', 'human resources'],
['HTML5', 'HTML'],
['iot', 'internet of things'],
['javascript', 'js', 'java script', 'react', 'typescript', 'jquery'],
['management', 'mgmt'],
['os', 'operating system'],
['photo', 'photography'],
['vb', 'visual basic'],
['vba', 'excel'],
['usa', 'united states of america', 'america', 'murika'],
['virtual reality', 'VR'],
['chemistri', 'chemistry']
]
| # Note: Do not use synonyms with punctuation, search and typeahead do not yet fully support punctuation
SYNONYMS = [
# Organizations
['ACCA', 'ACCA', 'ACCAx'],
['ACLU', 'American Civil Liberties Union'],
['ASU', 'ASUx', 'Arizona State', 'Arizona State University'],
['Berkeley', 'UC BerkeleyX', 'UCBerkeleyX'],
['Georgia Institute of Technology', 'Georgia Tech', 'GTx'],
['GFA', 'Global Freshman Academy'],
['Instituto Tecnologico y De Estudios Superiores De Monterrey', 'Monterrey', 'TecdeMonterreyX'],
['Microsoft', 'MicrosoftX', 'msft'],
['MIT', 'MITx'],
['New York Institute of Finance', 'NYIF', 'NYIFx'],
['The University of Michigan', 'MichiganX', 'UMichiganX', 'U Michigan'],
['The University of Texas System', 'UTx'],
['The University of California San Diego', 'UC San DiegoX', 'UCSanDiegoX'],
['The Disque Foundation', 'Save A LifeX', 'SaveALifeX'],
['UC3M', 'UC3Mx', 'Charles III University of Madrid'],
['University of Pennsylvania', 'PennX', 'UPennX', 'UPenn'],
['Universitat Politècnica de València', 'València', 'Valencia'],
['Wharton', 'WhartonX'],
# Common Mispellings
['cs50x', 'cs50'],
['ilets', 'ielts'],
['phyton', 'python'],
['toefl', 'tofel', 'toelf'],
# Subjects
['a11y', 'accessibility'],
['bi', 'business intelligence'],
['bme', 'biomedical engineering'],
['computer science', 'cs'],
['econ', 'economics'],
['ee', 'electrical engineering'],
['español', 'espanol', 'spanish'],
['français', 'francais', 'french'],
['it', 'information technology'],
['mis', 'management information systems'],
['psych', 'psychology'],
['seo', 'search engine optimization'],
['ux', 'user experience'],
# Other Terms
['autocad', 'auto cad', 'cad'],
['aws', 'amazon web services'],
['css', 'cascading style sheets'],
['excel', 'microsoft excel', 'msft excel'],
['hr', 'human resources'],
['HTML5', 'HTML'],
['iot', 'internet of things'],
['javascript', 'js', 'java script', 'react', 'typescript', 'jquery'],
['management', 'mgmt'],
['os', 'operating system'],
['photo', 'photography'],
['vb', 'visual basic'],
['vba', 'excel'],
['usa', 'united states of america', 'america', 'murika'],
['virtual reality', 'VR'],
['chemistri', 'chemistry']
]
| agpl-3.0 | Python |
39162536ba8f3cf02c15a482b19cbf9178d59958 | add a try..finally | Roguelazer/fakemtpd | fakemtpd/better_lockfile.py | fakemtpd/better_lockfile.py | import fcntl
import lockfile
import logging
import os
class BetterLockfile(object):
"""
A lockfile (matching the specification of the builtin lockfile class)
based off of lockf. Only uses a single lock file rather than one per process/thread.
"""
def __init__(self, path):
self.path = path
self.lock_file = None
try:
self.lock_file = open(self.path, 'a')
except:
raise lockfile.LockError()
self._has_lock = False
@property
def file(self):
"""Get a handle to the underlying lock file (to write out data to)"""
return self.lock_file
def acquire(self):
logging.info("Locking %s", self.path)
try:
fcntl.flock(self.lock_file.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB)
self._has_lock = True
except IOError, e:
print e
raise lockfile.AlreadyLocked()
logging.info("Locked %s", self.path)
def break_lock(self):
"""Can't break posix locks, sorry man"""
raise lockfile.LockError()
@property
def i_am_locking(self):
return self._has_lock
@property
def is_locked(self):
if self._has_lock:
return True
try:
fcntl.flock(self.lock_file.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB)
fcntl.flock(self.lock_file.fileno(), fcntl.LOCK_UN)
return False
except IOError:
return True
def release(self):
logging.info("Releasing lock on %s", self.path)
if self.i_am_locking:
fcntl.flock(self.lock_file.fileno(), fcntl.LOCK_UN)
self._has_lock = False
else:
raise lockfile.NotLocked()
logging.info("Unlocked %s", self.path)
def destroy(self):
try:
if self.i_am_locking:
self.release()
self.lock_file.close()
finally:
os.unlink(self.path)
def __enter__(self):
self.acquire()
return self
def __exit__(self, *args):
self.release()
| import fcntl
import lockfile
import logging
import os
class BetterLockfile(object):
"""
A lockfile (matching the specification of the builtin lockfile class)
based off of lockf. Only uses a single lock file rather than one per process/thread.
"""
def __init__(self, path):
self.path = path
self.lock_file = None
try:
self.lock_file = open(self.path, 'a')
except:
raise lockfile.LockError()
self._has_lock = False
@property
def file(self):
"""Get a handle to the underlying lock file (to write out data to)"""
return self.lock_file
def acquire(self):
logging.info("Locking %s", self.path)
try:
fcntl.flock(self.lock_file.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB)
self._has_lock = True
except IOError, e:
print e
raise lockfile.AlreadyLocked()
logging.info("Locked %s", self.path)
def break_lock(self):
"""Can't break posix locks, sorry man"""
raise lockfile.LockError()
@property
def i_am_locking(self):
return self._has_lock
@property
def is_locked(self):
if self._has_lock:
return True
try:
fcntl.flock(self.lock_file.fileno(), fcntl.LOCK_EX|fcntl.LOCK_NB)
fcntl.flock(self.lock_file.fileno(), fcntl.LOCK_UN)
return False
except IOError:
return True
def release(self):
logging.info("Releasing lock on %s", self.path)
if self.i_am_locking:
fcntl.flock(self.lock_file.fileno(), fcntl.LOCK_UN)
self._has_lock = False
else:
raise lockfile.NotLocked()
logging.info("Unlocked %s", self.path)
def destroy(self):
if self.i_am_locking:
self.release()
self.lock_file.close()
os.unlink(self.path)
def __enter__(self):
self.acquire()
return self
def __exit__(self, *args):
self.release()
| isc | Python |
5984db9c3c4da1649719c8e7caa6f2ce4b43bc9a | fix condition for test submission check | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | custom/enikshay/integrations/utils.py | custom/enikshay/integrations/utils.py | from corehq.apps.locations.models import SQLLocation
from custom.enikshay.exceptions import NikshayLocationNotFound, ENikshayCaseNotFound
from custom.enikshay.case_utils import (
get_person_case_from_episode,
get_lab_referral_from_test,
)
def _is_submission_from_test_location(person_case):
try:
phi_location = SQLLocation.objects.get(location_id=person_case.owner_id)
except SQLLocation.DoesNotExist:
raise NikshayLocationNotFound(
"Location with id {location_id} not found. This is the owner for person with id: {person_id}"
.format(location_id=person_case.owner_id, person_id=person_case.case_id)
)
return phi_location.metadata.get('is_test', "yes") == "yes"
def is_valid_person_submission(person_case):
return not _is_submission_from_test_location(person_case)
def is_valid_episode_submission(episode_case):
try:
person_case = get_person_case_from_episode(episode_case.domain, episode_case)
except ENikshayCaseNotFound:
return False
return not _is_submission_from_test_location(person_case)
def is_valid_test_submission(test_case):
try:
lab_referral_case = get_lab_referral_from_test(test_case.domain, test_case.get_id)
except ENikshayCaseNotFound:
return False
try:
dmc_location = SQLLocation.objects.get(location_id=lab_referral_case.owner_id)
except SQLLocation.DoesNotExist:
raise NikshayLocationNotFound(
"Location with id {location_id} not found. This is the owner for lab referral with id: \
{lab_referral_id}"
.format(location_id=lab_referral_case.owner_id, lab_referral_id=lab_referral_case.case_id)
)
return dmc_location.metadata.get('is_test', "yes") == "no"
| from corehq.apps.locations.models import SQLLocation
from custom.enikshay.exceptions import NikshayLocationNotFound, ENikshayCaseNotFound
from custom.enikshay.case_utils import (
get_person_case_from_episode,
get_lab_referral_from_test,
)
def _is_submission_from_test_location(person_case):
try:
phi_location = SQLLocation.objects.get(location_id=person_case.owner_id)
except SQLLocation.DoesNotExist:
raise NikshayLocationNotFound(
"Location with id {location_id} not found. This is the owner for person with id: {person_id}"
.format(location_id=person_case.owner_id, person_id=person_case.case_id)
)
return phi_location.metadata.get('is_test', "yes") == "yes"
def is_valid_person_submission(person_case):
return not _is_submission_from_test_location(person_case)
def is_valid_episode_submission(episode_case):
try:
person_case = get_person_case_from_episode(episode_case.domain, episode_case)
except ENikshayCaseNotFound:
return False
return not _is_submission_from_test_location(person_case)
def is_valid_test_submission(test_case):
try:
lab_referral_case = get_lab_referral_from_test(test_case.domain, test_case.get_id)
except ENikshayCaseNotFound:
return False
try:
dmc_location = SQLLocation.objects.get(location_id=lab_referral_case.owner_id)
except SQLLocation.DoesNotExist:
raise NikshayLocationNotFound(
"Location with id {location_id} not found. This is the owner for lab referral with id: \
{lab_referral_id}"
.format(location_id=lab_referral_case.owner_id, lab_referral_id=lab_referral_case.case_id)
)
return dmc_location.metadata.get('is_test', "yes") == "yes"
| bsd-3-clause | Python |
b2fecb921e04f81544beaca47d150c2b60d0f8a9 | add option to hide a tag | mxm/wgmanager | core/models.py | core/models.py | from django.db import models
from myauth.models import MyUser
from datetime import datetime
from django.utils.translation import ugettext as _
class Bill(models.Model):
name = models.CharField(max_length=1023)
is_open = models.BooleanField(default=True)
def __str__(self):
return self.name
class Payer(models.Model):
user = models.ForeignKey(MyUser)
bill = models.ForeignKey(Bill, related_name="payers")
fraction = models.DecimalField(max_digits=3, decimal_places=2)
def __str__(self):
return _("Bill '%(bill)s': %(user)s with fraction %(fraction)s") % {'bill': self.bill, 'user': self.user, 'fraction': self.fraction}
class Shop(models.Model):
name = models.CharField(max_length=30)
def __str__(self):
return self.name
class Tag(models.Model):
name = models.CharField(max_length=30)
visible = models.BooleanField(default=True)
def __str__(self):
return self.name
class Shopping(models.Model):
user = models.ForeignKey(MyUser)
time = models.DateField(default=datetime.now)
shop = models.ForeignKey(Shop, related_name="shoppings")
expenses = models.DecimalField(max_digits=10,decimal_places=2)
num_products = models.PositiveIntegerField()
tags = models.ManyToManyField(Tag, related_name="tags")
bill = models.ForeignKey(Bill, null=True, blank=True, related_name="shoppings")
comment = models.CharField(max_length=1023,blank=True, null=True)
def __str__(self):
return _("%(expenses)s with %(products)d items by %(user)s at %(shop)s") % {'expenses': self.expenses, 'user': self.user, 'products': self.num_products, 'shop': self.shop}
| from django.db import models
from myauth.models import MyUser
from datetime import datetime
from django.utils.translation import ugettext as _
class Bill(models.Model):
name = models.CharField(max_length=1023)
is_open = models.BooleanField(default=True)
def __str__(self):
return self.name
class Payer(models.Model):
user = models.ForeignKey(MyUser)
bill = models.ForeignKey(Bill, related_name="payers")
fraction = models.DecimalField(max_digits=3, decimal_places=2)
def __str__(self):
return _("Bill '%(bill)s': %(user)s with fraction %(fraction)s") % {'bill': self.bill, 'user': self.user, 'fraction': self.fraction}
class Shop(models.Model):
name = models.CharField(max_length=30)
def __str__(self):
return self.name
class Tag(models.Model):
name = models.CharField(max_length=30)
def __str__(self):
return self.name
class Shopping(models.Model):
user = models.ForeignKey(MyUser)
time = models.DateField(default=datetime.now)
shop = models.ForeignKey(Shop, related_name="shoppings")
expenses = models.DecimalField(max_digits=10,decimal_places=2)
num_products = models.PositiveIntegerField()
tags = models.ManyToManyField(Tag, related_name="tags")
bill = models.ForeignKey(Bill, null=True, blank=True, related_name="shoppings")
comment = models.CharField(max_length=1023,blank=True, null=True)
def __str__(self):
return _("%(expenses)s with %(products)d items by %(user)s at %(shop)s") % {'expenses': self.expenses, 'user': self.user, 'products': self.num_products, 'shop': self.shop}
| agpl-3.0 | Python |
0b456b0544b75f690289559b5e0ec17f7e98c8d6 | bump version to 0.11.0 | ivelum/cub-python | cub/version.py | cub/version.py | version = '0.11.0'
| version = '0.10.0'
| mit | Python |
89193a6571dd74501533160b409cad8835c51625 | Handle a Django deprecation properly. | benspaulding/django-gcframe | gcframe/tests/urls.py | gcframe/tests/urls.py | # -*- coding: utf-8 -*-
""" Simple urls for use in testing the gcframe app. """
from __future__ import unicode_literals
try:
from django.conf.urls import patterns, url
except ImportError: # Django 1.3
from django.conf.urls.defaults import patterns, url
from .views import normal, framed, exempt
urlpatterns = patterns('',
url(r'normal/$', normal, name='gcframe-test-normal'),
url(r'framed/$', framed, name='gcframe-test-framed'),
url(r'exempt/$', exempt, name='gcframe-test-exempt'),
)
| # -*- coding: utf-8 -*-
""" Simple urls for use in testing the gcframe app. """
from __future__ import unicode_literals
# The defaults module is deprecated in Django 1.5, but necessary to
# support Django 1.3. drop ``.defaults`` when dropping 1.3 support.
from django.conf.urls.defaults import patterns, url
from .views import normal, framed, exempt
urlpatterns = patterns('',
url(r'normal/$', normal, name='gcframe-test-normal'),
url(r'framed/$', framed, name='gcframe-test-framed'),
url(r'exempt/$', exempt, name='gcframe-test-exempt'),
)
| bsd-3-clause | Python |
56b761bac5b0d94632b698a1a08be10254b533e3 | Bump to Airflow 2.0.0a1 (#11507) | cfei18/incubator-airflow,nathanielvarona/airflow,mrkm4ntr/incubator-airflow,airbnb/airflow,bolkedebruin/airflow,cfei18/incubator-airflow,nathanielvarona/airflow,apache/airflow,nathanielvarona/airflow,sekikn/incubator-airflow,apache/airflow,Acehaidrey/incubator-airflow,nathanielvarona/airflow,airbnb/airflow,bolkedebruin/airflow,apache/airflow,lyft/incubator-airflow,cfei18/incubator-airflow,nathanielvarona/airflow,Acehaidrey/incubator-airflow,danielvdende/incubator-airflow,dhuang/incubator-airflow,apache/incubator-airflow,bolkedebruin/airflow,Acehaidrey/incubator-airflow,apache/airflow,airbnb/airflow,danielvdende/incubator-airflow,bolkedebruin/airflow,mistercrunch/airflow,apache/airflow,DinoCow/airflow,Acehaidrey/incubator-airflow,lyft/incubator-airflow,Acehaidrey/incubator-airflow,sekikn/incubator-airflow,nathanielvarona/airflow,cfei18/incubator-airflow,danielvdende/incubator-airflow,mistercrunch/airflow,Acehaidrey/incubator-airflow,dhuang/incubator-airflow,sekikn/incubator-airflow,bolkedebruin/airflow,mrkm4ntr/incubator-airflow,apache/incubator-airflow,cfei18/incubator-airflow,mrkm4ntr/incubator-airflow,danielvdende/incubator-airflow,apache/airflow,DinoCow/airflow,DinoCow/airflow,mistercrunch/airflow,cfei18/incubator-airflow,sekikn/incubator-airflow,danielvdende/incubator-airflow,DinoCow/airflow,mrkm4ntr/incubator-airflow,dhuang/incubator-airflow,danielvdende/incubator-airflow,mistercrunch/airflow,lyft/incubator-airflow,lyft/incubator-airflow,apache/incubator-airflow,apache/incubator-airflow,dhuang/incubator-airflow,airbnb/airflow | airflow/version.py | airflow/version.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
version = '2.0.0a1'
| #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
version = '2.0.0.dev0'
| apache-2.0 | Python |
ad4affa18aef40ed7c9551189d3de024c3af30b7 | Complete bubble_sort() & short_bubble_sort() | bowen0701/algorithms_data_structures | alg_bubble_sort.py | alg_bubble_sort.py | def bubble_sort(a_list):
"""Naive Bubble Sort algortihm."""
for pass_num in reversed(range(len(a_list))):
for i in range(pass_num):
if a_list[i] > a_list[i + 1]:
temp = a_list[i]
a_list[i] = a_list[i + 1]
a_list[i + 1] = temp
def short_bubble_sort(a_list):
exchange_bool = True
pass_num = len(a_list) - 1
while pass_num > 0 and exchange_bool:
exchange_bool = False
for i in range(pass_num):
if a_list[i] > a_list[i + 1]:
exchange_bool = True
temp = a_list[i]
a_list[i] = a_list[i + 1]
a_list[i + 1] = temp
pass_num -= 1
def main():
a_list = [54, 26, 93, 17, 77, 31, 44, 55, 20]
print('a_list: {}'.format(a_list))
print('By bubble sort: ')
bubble_sort(a_list)
print(a_list)
a_list = [54, 26, 93, 17, 77, 31, 44, 55, 20]
print('a_list: {}'.format(a_list))
print('By short_bubble sort: ')
short_bubble_sort(a_list)
print(a_list)
if __name__ == '__main__':
main()
| def bubble_sort():
"""Bubble Sort algortihm."""
pass
def main():
pass
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
2343ecec512aa31b0385b4783c00fb8dbb9060e4 | Complete make_change_recur() | bowen0701/algorithms_data_structures | alg_make_change.py | alg_make_change.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
"""Make Change.
Compute how many distinct ways you can make change that amount.
If that amount of money cannot be made up by any combination of the
coins, return -1.
Assume that you have an infinite number of each kind of coin.
"""
def make_change_recur(amount, coins, n):
"""Make change by recursion."""
if amount < 0:
return 0
if amount == 0:
return 1
# When number of coins is 0 but there is still amount remaining.
if n <= 0 and amount >= 1:
return 0
return (make_change_recur(amount - coins[n - 1], coins, n)
+ make_change_recur(amount, coins, n - 1))
def make_change_memo(amount, coins):
"""Make change by top-bottom dynamic programming:
recursion + memoization."""
pass
def make_change_dp(amount, coins):
"""Make change by bottom-up dynamic programming."""
pass
def main():
import time
amount = 5
coins = [1, 2, 3] # counter = 5.
n_coin = len(coins)
start_time = time.time()
print('Make change by recursion: {}'
.format(make_change_recur(amount, coins, n_coin)))
print('Time: {}'.format(time.time() - start_time))
if __name__ == '__main__':
main()
| from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
"""Make Change.
Compute how many ways you can make up that amount.
If that amount of money cannot be made up by any combination of the
coins, return -1.
Assume that you have an infinite number of each kind of coin.
"""
def make_change_recur(amount, coins):
pass
def make_change_memo(amount, coins):
pass
def make_change_dp(amount, coins):
pass
def main():
pass
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
f9b4cd8b5d49168749e5fac470f0e04078b9a7c6 | fix flake8 | brianjgeiger/osf.io,mattclark/osf.io,mfraezz/osf.io,chrisseto/osf.io,chennan47/osf.io,cwisecarver/osf.io,cslzchen/osf.io,felliott/osf.io,laurenrevere/osf.io,aaxelb/osf.io,icereval/osf.io,erinspace/osf.io,brianjgeiger/osf.io,icereval/osf.io,mattclark/osf.io,cwisecarver/osf.io,laurenrevere/osf.io,erinspace/osf.io,crcresearch/osf.io,CenterForOpenScience/osf.io,TomBaxter/osf.io,leb2dg/osf.io,CenterForOpenScience/osf.io,pattisdr/osf.io,chrisseto/osf.io,erinspace/osf.io,pattisdr/osf.io,cwisecarver/osf.io,pattisdr/osf.io,adlius/osf.io,caneruguz/osf.io,felliott/osf.io,Johnetordoff/osf.io,CenterForOpenScience/osf.io,caseyrollins/osf.io,caseyrollins/osf.io,brianjgeiger/osf.io,caneruguz/osf.io,laurenrevere/osf.io,HalcyonChimera/osf.io,cslzchen/osf.io,HalcyonChimera/osf.io,chennan47/osf.io,felliott/osf.io,chrisseto/osf.io,crcresearch/osf.io,caneruguz/osf.io,baylee-d/osf.io,felliott/osf.io,mfraezz/osf.io,aaxelb/osf.io,CenterForOpenScience/osf.io,sloria/osf.io,Johnetordoff/osf.io,crcresearch/osf.io,chrisseto/osf.io,binoculars/osf.io,HalcyonChimera/osf.io,Johnetordoff/osf.io,mfraezz/osf.io,saradbowman/osf.io,aaxelb/osf.io,mfraezz/osf.io,baylee-d/osf.io,Johnetordoff/osf.io,leb2dg/osf.io,binoculars/osf.io,adlius/osf.io,brianjgeiger/osf.io,baylee-d/osf.io,mattclark/osf.io,binoculars/osf.io,adlius/osf.io,leb2dg/osf.io,TomBaxter/osf.io,adlius/osf.io,cwisecarver/osf.io,HalcyonChimera/osf.io,caseyrollins/osf.io,leb2dg/osf.io,saradbowman/osf.io,caneruguz/osf.io,cslzchen/osf.io,sloria/osf.io,chennan47/osf.io,cslzchen/osf.io,sloria/osf.io,icereval/osf.io,aaxelb/osf.io,TomBaxter/osf.io | api_tests/utils.py | api_tests/utils.py | from urlparse import urlparse
from addons.osfstorage import settings as osfstorage_settings
def create_test_file(node, user, filename='test_file', create_guid=True):
osfstorage = node.get_addon('osfstorage')
root_node = osfstorage.get_root()
test_file = root_node.append_file(filename)
if create_guid:
test_file.get_guid(create=True)
test_file.create_version(user, {
'object': '06d80e',
'service': 'cloud',
osfstorage_settings.WATERBUTLER_RESOURCE: 'osf',
}, {
'size': 1337,
'contentType': 'img/png'
}).save()
return test_file
def urlparse_drop_netloc(url):
url = urlparse(url)
if url[4]:
return url[2] + '?' + url[4]
return url[2]
| from urlparse import urlparse
from addons.osfstorage import settings as osfstorage_settings
from framework.auth.core import Auth
def create_test_file(node, user, filename='test_file', create_guid=True):
osfstorage = node.get_addon('osfstorage')
root_node = osfstorage.get_root()
test_file = root_node.append_file(filename)
if create_guid:
test_file.get_guid(create=True)
test_file.create_version(user, {
'object': '06d80e',
'service': 'cloud',
osfstorage_settings.WATERBUTLER_RESOURCE: 'osf',
}, {
'size': 1337,
'contentType': 'img/png'
}).save()
return test_file
def urlparse_drop_netloc(url):
url = urlparse(url)
if url[4]:
return url[2] + '?' + url[4]
return url[2]
| apache-2.0 | Python |
8cdc7ce27f8445da96b13fc20d30da22bf62aa88 | fix unlinked cote-st-luc email | opencivicdata/scrapers-ca,opencivicdata/scrapers-ca | ca_qc_cote_saint_luc/people.py | ca_qc_cote_saint_luc/people.py | # coding: utf8
from pupa.scrape import Scraper
from utils import lxmlize, CanadianLegislator as Legislator
from urlparse import urljoin
import re
COUNCIL_PAGE = 'http://www.cotesaintluc.org/Administration'
class CoteSaintLucPersonScraper(Scraper):
def get_people(self):
page = lxmlize(COUNCIL_PAGE)
mayor_url = page.xpath('//a[contains(text(), "Mayor")]/@href')[0]
yield self.scrape_mayor(mayor_url)
councillors_url = page.xpath('//a[contains(text(), "Councillors")]/@href')[0]
cpage = lxmlize(councillors_url)
councillor_rows = cpage.xpath('//tr[td//img]')[:-1]
for councillor_row in councillor_rows:
img_cell, info_cell = tuple(councillor_row)
name = info_cell.xpath(
'string(.//span[contains(text(), "Councillor")])')[len('Councillor '):]
district = info_cell.xpath('string(.//p[contains(text(), "District")])')
email = info_cell.xpath('string(.//a[contains(@href, "mailto:")])')
if not email:
email = info_cell.xpath('string(.//strong[contains(text(), "E-mail")]/following-sibling::text())')
phone = info_cell.xpath(
'string(.//p[contains(.//text(), "Telephone:")])').split(':')[1]
img_url_rel = img_cell.xpath('string(//img/@href)')
img_url = urljoin(councillors_url, img_url_rel)
p = Legislator(name=name, post_id=district, role='Conseiller')
p.add_source(COUNCIL_PAGE)
p.add_source(councillors_url)
p.add_contact('email', email, None)
p.add_contact('voice', phone, 'legislature')
p.image = img_url
yield p
def scrape_mayor(self, url):
page = lxmlize(url)
name = page.xpath(
'string(//span[contains(text(), "Mayor")])')[len('Mayor '):]
email = page.xpath('.//a[contains(@href, "mailto:")]/text()')[0]
phone = page.xpath('//table[1]/tbody/tr/td[1]/p[last()]/text()')[2].replace('Telephone: ', '')
p = Legislator(name=name, post_id=u'Côte-Saint-Luc', role='Maire')
p.add_source(COUNCIL_PAGE)
p.add_source(url)
p.image = page.xpath('.//img/@src')[0]
p.add_source(url)
p.add_contact('email', email, None)
p.add_contact('voice', phone, 'legislature')
return p
| # coding: utf8
from pupa.scrape import Scraper
from utils import lxmlize, CanadianLegislator as Legislator
from urlparse import urljoin
import re
COUNCIL_PAGE = 'http://www.cotesaintluc.org/Administration'
class CoteSaintLucPersonScraper(Scraper):
def get_people(self):
page = lxmlize(COUNCIL_PAGE)
mayor_url = page.xpath('//a[contains(text(), "Mayor")]/@href')[0]
yield self.scrape_mayor(mayor_url)
councillors_url = page.xpath('//a[contains(text(), "Councillors")]/@href')[0]
cpage = lxmlize(councillors_url)
councillor_rows = cpage.xpath('//tr[td//img]')[:-1]
for councillor_row in councillor_rows:
img_cell, info_cell = tuple(councillor_row)
name = info_cell.xpath(
'string(.//span[contains(text(), "Councillor")])')[len('Councillor '):]
district = info_cell.xpath('string(.//p[contains(text(), "District")])')
email = info_cell.xpath('string(.//a[contains(@href, "mailto:")])')
phone = info_cell.xpath(
'string(.//p[contains(.//text(), "Telephone:")])').split(':')[1]
img_url_rel = img_cell.xpath('string(//img/@href)')
img_url = urljoin(councillors_url, img_url_rel)
p = Legislator(name=name, post_id=district, role='Conseiller')
p.add_source(COUNCIL_PAGE)
p.add_source(councillors_url)
p.add_contact('email', email, None)
p.add_contact('voice', phone, 'legislature')
p.image = img_url
yield p
def scrape_mayor(self, url):
page = lxmlize(url)
name = page.xpath(
'string(//span[contains(text(), "Mayor")])')[len('Mayor '):]
email = page.xpath('.//a[contains(@href, "mailto:")]/text()')[0]
phone = page.xpath('//table[1]/tbody/tr/td[1]/p[last()]/text()')[2].replace('Telephone: ', '')
p = Legislator(name=name, post_id=u'Côte-Saint-Luc', role='Maire')
p.add_source(COUNCIL_PAGE)
p.add_source(url)
p.image = page.xpath('.//img/@src')[0]
p.add_source(url)
p.add_contact('email', email, None)
p.add_contact('voice', phone, 'legislature')
return p
| mit | Python |
37e88488fc8a4cf5e8ea7b63c7cd4b89ca17a60b | Add force_bytes to models | shermanng10/superathletebuilder,shermanng10/superathletebuilder,shermanng10/superathletebuilder,shermanng10/superathletebuilder | athletes/models.py | athletes/models.py | from django.db import models
from django.utils import timezone
from django.utils.encoding import force_bytes
class Sport(models.Model):
name = models.CharField(max_length=20)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return force_bytes(self.name)
class League(models.Model):
name = models.CharField(max_length=20)
sport = models.ForeignKey(Sport)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return force_bytes(self.name)
class Team(models.Model):
name = models.CharField(max_length=20)
sport = models.ForeignKey(Sport)
league = models.ForeignKey(League)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return force_bytes(self.name)
class Athlete(models.Model):
first_name = models.CharField(max_length=25)
last_name = models.CharField(max_length=25)
age = models.PositiveIntegerField()
gender = models.CharField(max_length=10)
website = models.URLField()
sport = models.ForeignKey(Sport)
league = models.ForeignKey(League, blank=True, null=True)
team = models.ForeignKey(Team, blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return force_bytes('%s %s' % (self.first_name, self.last_name))
| from django.db import models
from django.utils import timezone
from django.utils.encoding import force_bytes
class Sport(models.Model):
name = models.CharField(max_length=20)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return force_bytes(self.name)
class League(models.Model):
name = models.CharField(max_length=20)
sport = models.ForeignKey(Sport)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return force_bytes(self.name)
class Team(models.Model):
name = models.CharField(max_length=20)
sport = models.ForeignKey(Sport)
league = models.ForeignKey(League)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return force_bytes(self.name)
class Athlete(models.Model):
first_name = models.CharField(max_length=25)
last_name = models.CharField(max_length=25)
age = models.PositiveIntegerField()
gender = models.CharField(max_length=10)
website = models.URLField()
sport = models.ForeignKey(Sport)
league = models.ForeignKey(League, blank=True, null=True)
team = models.ForeignKey(Team, blank=True, null=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return force_bytes('%s %s' % (self.first_name, self.last_name))
| mit | Python |
13834a2eb8c3669f7b58232a5d837147d612a93c | Fix module name. | AlpacaDB/chainer,jnishi/chainer,pfnet/chainer,AlpacaDB/chainer,hvy/chainer,ktnyt/chainer,tkerola/chainer,okuta/chainer,kiyukuta/chainer,ktnyt/chainer,keisuke-umezawa/chainer,okuta/chainer,wkentaro/chainer,cupy/cupy,cupy/cupy,niboshi/chainer,chainer/chainer,chainer/chainer,kikusu/chainer,hvy/chainer,jnishi/chainer,chainer/chainer,ktnyt/chainer,anaruse/chainer,aonotas/chainer,keisuke-umezawa/chainer,niboshi/chainer,okuta/chainer,keisuke-umezawa/chainer,benob/chainer,cupy/cupy,ronekko/chainer,hvy/chainer,niboshi/chainer,delta2323/chainer,keisuke-umezawa/chainer,benob/chainer,niboshi/chainer,okuta/chainer,wkentaro/chainer,wkentaro/chainer,hvy/chainer,ktnyt/chainer,kikusu/chainer,wkentaro/chainer,ysekky/chainer,chainer/chainer,cupy/cupy,rezoo/chainer,jnishi/chainer,jnishi/chainer,kashif/chainer | chainer/functions/math/clip.py | chainer/functions/math/clip.py | import numpy
from chainer import cuda
from chainer import function
from chainer import utils
from chainer.utils import type_check
class Clip(function.Function):
"""Clips (limits) elements of input variable."""
def __init__(self, x_min, x_max):
if not isinstance(x_min, float):
raise TypeError('x_min must be float value')
if not isinstance(x_max, float):
raise TypeError('x_max must be float value')
# x_min must be lesser than x_max.
assert x_min < x_max
self.x_min = x_min
self.x_max = x_max
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
x_type, = in_types
type_check.expect(x_type.dtype == numpy.float32)
def forward_cpu(self, x):
return utils.force_array(
numpy.clip(x[0], self.x_min, self.x_max)
).astype(numpy.float32),
def backward_cpu(self, x, gy):
return utils.force_array(
gy[0] * (self.x_min < x[0]) * (x[0] < self.x_max)
).astype(numpy.float32),
def forward_gpu(self, x):
return cuda.cupy.clip(x[0], self.x_min, self.x_max),
def backward_gpu(self, x, gy):
gx = cuda.elementwise(
'T x, T gy, T x_min, T x_max', 'T gx',
'gx = ((x > x_min) & (x < x_max)) ? gy : 0',
'clip_bwd')(x[0], gy[0], self.x_min, self.x_max)
return gx,
def clip(x, x_min, x_max):
"""Clips (limits) elements of input variable.
Given an interval ``[x_min, xmax]``, elements outside the interval are
clipped to the interval edges.
Args:
x (~chainer.Variable): Input variable to be clipped.
x_min (float): Minimum value.
x_max (float): Maximum value.
Returns:
~chainer.Variable: Output variable.
"""
return Clip(x_min, x_max)(x)
| import numpy
from chainer import cuda
from chainer import function
from chainer import utils
from chainer.utils import type_check
class Clip(function.Function):
"""Clips (limits) elements of input variable."""
def __init__(self, x_min, x_max):
if not isinstance(x_min, float):
raise TypeError('x_min must be float value')
if not isinstance(x_max, float):
raise TypeError('x_max must be float value')
# x_min must be lesser than x_max.
assert x_min < x_max
self.x_min = x_min
self.x_max = x_max
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
x_type, = in_types
type_check.expect(x_type.dtype == numpy.float32)
def forward_cpu(self, x):
return utils.force_array(
numpy.clip(x[0], self.x_min, self.x_max)
).astype(numpy.float32),
def backward_cpu(self, x, gy):
return utils.force_array(
gy[0] * (self.x_min < x[0]) * (x[0] < self.x_max)
).astype(numpy.float32),
def forward_gpu(self, x):
return cuda.clip(x[0], self.x_min, self.x_max),
def backward_gpu(self, x, gy):
gx = cuda.elementwise(
'T x, T gy, T x_min, T x_max', 'T gx',
'gx = ((x > x_min) & (x < x_max)) ? gy : 0',
'clip_bwd')(x[0], gy[0], self.x_min, self.x_max)
return gx,
def clip(x, x_min, x_max):
"""Clips (limits) elements of input variable.
Given an interval ``[x_min, xmax]``, elements outside the interval are
clipped to the interval edges.
Args:
x (~chainer.Variable): Input variable to be clipped.
x_min (float): Minimum value.
x_max (float): Maximum value.
Returns:
~chainer.Variable: Output variable.
"""
return Clip(x_min, x_max)(x)
| mit | Python |
51d7cb325110fe6f75211909df44319a80752484 | Update commit message | r0h4n/commons,rishubhjain/commons,Tendrl/commons | tendrl/common/tests/test_singleton.py | tendrl/common/tests/test_singleton.py | from tendrl.common.singleton import to_singleton
@to_singleton
class A(object):
def __init__(self):
pass
@to_singleton
class B(object):
def __init__(self):
pass
class TestSingleton(object):
def test_singleton(self, monkeypatch):
assert id(A()) == id(A())
def test_singletonB(self, monkeypatch):
assert id(B()) == id(B())
| from tendrl.common.singleton import to_singleton
@to_singleton
class A(object):
def __init__(self):
pass
class TestSingleton(object):
def test_singleton(self, monkeypatch):
assert id(A()) == id(A())
| lgpl-2.1 | Python |
242b3bfe70d90044d2496cbc8109cd703b3bccab | Make steps and out arguments optional and add defaults | MikeVasmer/GreenGraphCoursework | greengraph/command.py | greengraph/command.py | from argparse import ArgumentParser
from matplotlib import pyplot as plt
from graph import Greengraph
def process():
parser = ArgumentParser(
description="Produce graph quantifying the amount of green land between two locations")
parser.add_argument("--start", required=True,
help="The starting location ")
parser.add_argument("--end", required=True,
help="The ending location")
parser.add_argument("--steps",
help="The number of steps between the starting and ending locations, defaults to 10")
parser.add_argument("--out",
help="The output filename, defaults to graph.png")
arguments = parser.parse_args()
mygraph = Greengraph(arguments.start, arguments.end)
if arguments.steps:
data = mygraph.green_between(arguments.steps)
else:
data = mygraph.green_between(10)
plt.plot(data)
# TODO add a title and axis labels to this graph
if arguments.out:
plt.savefig(arguments.out)
else:
plt.savefig("graph.png")
if __name__ == "__main__":
process()
| from argparse import ArgumentParser
from matplotlib import pyplot as plt
from graph import Greengraph
def process():
parser = ArgumentParser(
description="Produce graph of green land between two locations")
parser.add_argument("--start", required=True,
help="The starting location ")
parser.add_argument("--end", required=True,
help="The ending location")
parser.add_argument("--steps", required=True,
help="The number of steps between the starting and ending locations")
parser.add_argument("--out", required=True,
help="The output filename")
arguments = parser.parse_args()
mygraph = Greengraph(arguments.start, arguments.end)
data = mygraph.green_between(arguments.steps)
plt.plot(data)
# TODO add a title and axis labels to this graph
plt.savefig(arguments.out)
if __name__ == "__main__":
process()
| mit | Python |
6bfb23294c2cc445479f4c8098b8e62647cf01bd | Validate that we can translate a NEWOBJECT into a FETCHOBJECT | richo/groundstation,richo/groundstation,richo/groundstation,richo/groundstation,richo/groundstation | test/test_notification_integration.py | test/test_notification_integration.py | import os
import select
import groundstation.fs_watcher as fs_watcher
from groundstation.peer_socket import PeerSocket
from groundstation.utils import path2id
from integration_fixture import StationIntegrationFixture, \
TestListener, \
TestClient
class StationFSWatcherIntegration(StationIntegrationFixture):
def test_notifies_peer(self):
read_sockets = []
write_sockets = []
def tick():
return select.select(read_sockets, write_sockets, [], 1)
addr = os.path.join(self.dir, "listener")
listener = TestListener(addr)
client = TestClient(addr)
peer = listener.accept(PeerSocket)
watcher = fs_watcher.FSWatcher(self.stations[0].store.object_root)
read_sockets.append(client)
read_sockets.append(watcher)
self.stations[0].write("trolololol")
(sread, _, _) = tick()
self.assertIn(watcher, sread)
obj_name = path2id(watcher.read())
client.notify_new_object(self.stations[0], obj_name)
client.send()
peer.recv()
data = peer.packet_queue.pop()
gizmo = self.stations[1].gizmo_factory.hydrate(data, peer)
assert gizmo is not None, "gizmo_factory returned None"
gizmo.process()
peer.send()
client.recv()
data = client.packet_queue.pop()
gizmo = self.stations[0].gizmo_factory.hydrate(data, peer)
assert gizmo is not None, "gizmo_factory returned None"
self.assertEqual(gizmo.verb, "FETCHOBJECT")
self.assertEqual(gizmo.payload, obj_name)
gizmo.process()
watcher.kill()
| import os
import select
import groundstation.fs_watcher as fs_watcher
from groundstation.peer_socket import PeerSocket
from integration_fixture import StationIntegrationFixture, \
TestListener, \
TestClient
class StationFSWatcherIntegration(StationIntegrationFixture):
def test_notifies_peer(self):
read_sockets = []
write_sockets = []
def tick():
return select.select(read_sockets, write_sockets, [], 1)
addr = os.path.join(self.dir, "listener")
listener = TestListener(addr)
client = TestClient(addr)
peer = listener.accept(PeerSocket)
watcher = fs_watcher.FSWatcher(self.stations[0].store.object_root)
read_sockets.append(client)
read_sockets.append(watcher)
self.stations[0].write("trolololol")
(sread, _, _) = tick()
self.assertIn(watcher, sread)
obj_name = watcher.read()
client.notify_new_object(self.stations[0], obj_name)
client.send()
peer.recv()
data = peer.packet_queue.pop()
gizmo = self.stations[1].gizmo_factory.hydrate(data, peer)
assert gizmo is not None, "gizmo_factory returned None"
gizmo.process()
watcher.kill()
| mit | Python |
66a230ae07495dd6feca1647213ffcb25e7addcf | Add check for custom reason | Safihre/cherrypy,Safihre/cherrypy,cherrypy/cherrypy,cherrypy/cherrypy | cherrypy/test/test_httputil.py | cherrypy/test/test_httputil.py | """Tests for ``cherrypy.lib.httputil``."""
import pytest
from six.moves import http_client
from cherrypy.lib import httputil
@pytest.mark.parametrize(
'script_name,path_info,expected_url',
[
('/sn/', '/pi/', '/sn/pi/'),
('/sn/', '/pi', '/sn/pi'),
('/sn/', '/', '/sn/'),
('/sn/', '', '/sn/'),
('/sn', '/pi/', '/sn/pi/'),
('/sn', '/pi', '/sn/pi'),
('/sn', '/', '/sn/'),
('/sn', '', '/sn'),
('/', '/pi/', '/pi/'),
('/', '/pi', '/pi'),
('/', '/', '/'),
('/', '', '/'),
('', '/pi/', '/pi/'),
('', '/pi', '/pi'),
('', '/', '/'),
('', '', '/'),
]
)
def test_urljoin(script_name, path_info, expected_url):
"""Test all slash+atom combinations for SCRIPT_NAME and PATH_INFO."""
actual_url = httputil.urljoin(script_name, path_info)
assert actual_url == expected_url
@pytest.mark.parametrize(
'status,code,reason',
[
(None, 200, 'OK'),
(200, 200, 'OK'),
('500', 500, 'Internal Server Error'),
(http_client.NOT_FOUND, 404, 'Not Found'),
('444 Non-existent reason', 444, 'Non-existent reason'),
]
)
def test_valid_status(status, code, reason):
"""Valid int and string statuses."""
assert httputil.valid_status(status)[:2] == (code, reason)
@pytest.mark.parametrize(
'status_code,error_msg',
[
('hey', "Illegal response status from server ('hey' is non-numeric)."),
({'hey': 'hi'}, "Illegal response status from server ({'hey': 'hi'} is non-numeric)."),
(1, 'Illegal response status from server (1 is out of range).'),
(600, 'Illegal response status from server (600 is out of range).'),
]
)
def test_invalid_status(status_code, error_msg):
"""Invalid status should raise an error."""
with pytest.raises(ValueError) as excinfo:
httputil.valid_status(status_code)
assert error_msg in str(excinfo)
| """Tests for ``cherrypy.lib.httputil``."""
import pytest
from six.moves import http_client
from cherrypy.lib import httputil
@pytest.mark.parametrize(
'script_name,path_info,expected_url',
[
('/sn/', '/pi/', '/sn/pi/'),
('/sn/', '/pi', '/sn/pi'),
('/sn/', '/', '/sn/'),
('/sn/', '', '/sn/'),
('/sn', '/pi/', '/sn/pi/'),
('/sn', '/pi', '/sn/pi'),
('/sn', '/', '/sn/'),
('/sn', '', '/sn'),
('/', '/pi/', '/pi/'),
('/', '/pi', '/pi'),
('/', '/', '/'),
('/', '', '/'),
('', '/pi/', '/pi/'),
('', '/pi', '/pi'),
('', '/', '/'),
('', '', '/'),
]
)
def test_urljoin(script_name, path_info, expected_url):
"""Test all slash+atom combinations for SCRIPT_NAME and PATH_INFO."""
actual_url = httputil.urljoin(script_name, path_info)
assert actual_url == expected_url
@pytest.mark.parametrize(
'status,code,reason',
[
(None, 200, 'OK'),
(200, 200, 'OK'),
('500', 500, 'Internal Server Error'),
(http_client.NOT_FOUND, 404, 'Not Found'),
]
)
def test_valid_status(status, code, reason):
"""Valid int and string statuses."""
assert httputil.valid_status(status)[:2] == (code, reason)
@pytest.mark.parametrize(
'status_code,error_msg',
[
('hey', "Illegal response status from server ('hey' is non-numeric)."),
({'hey': 'hi'}, "Illegal response status from server ({'hey': 'hi'} is non-numeric)."),
(1, 'Illegal response status from server (1 is out of range).'),
(600, 'Illegal response status from server (600 is out of range).'),
]
)
def test_invalid_status(status_code, error_msg):
"""Invalid status should raise an error."""
with pytest.raises(ValueError) as excinfo:
httputil.valid_status(status_code)
assert error_msg in str(excinfo)
| bsd-3-clause | Python |
0b02e8bfb297cd5d2b172e0a728c068129f0c177 | Tweak assemble_oaipmh_records.py | libris/librisxl,libris/librisxl,libris/librisxl | scripts/assemble_oaipmh_records.py | scripts/assemble_oaipmh_records.py | import csv
import requests
from lxml import etree
from sys import argv, stdout, stderr
import os
baseurl = 'http://data.libris.kb.se/{rectype}/oaipmh/?verb=GetRecord&metadataPrefix=marcxml&identifier=http://libris.kb.se/resource/{record}'
args = argv[1:]
name, passwd = args.pop(0).split(':')
if args[0].endswith('.tsv'):
records = []
with open(args[0], 'rb') as fp:
reader = csv.reader(fp, dialect='excel-tab')
for row in reader:
records.append(row[0])
if args[1]:
outdir = args[1]
else:
outdir = None
records = args
def make_root():
root = etree.Element('OAI-PMH', nsmap={None: "http://www.openarchives.org/OAI/2.0/"})
etree.SubElement(root, 'responseDate').text = "1970-01-01T00:00:00Z"
etree.SubElement(root, 'request', attrib=dict(
verb="ListRecords",
resumptionToken="null|2001-12-11T23:00:00Z|107000|null|null|marcxml",
metadataPrefix="marcxml"
)).text = "http://data.libris.kb.se/auth/oaipmh"
return root
partitions = {}
for record in records:
rectype, recid = record.split('/')
if not rectype in partitions:
root = make_root()
partitions[rectype] = (
root,
etree.SubElement(root, 'ListRecords'))
root, reclist = partitions[rectype]
url = baseurl.format(**vars())
res = requests.get(url, auth=(name, passwd), stream=True)
record_root = etree.parse(res.raw)
record_data = record_root.find('/*/*')
if record_data is None:
print >>stderr, "found nothing for", record
continue
reclist.append(record_data)
for name, (root, reclist) in partitions.items():
if outdir:
fpath = os.path.join(outdir, name, "oaipmh")
fdir = os.path.dirname(fpath)
if not os.path.isdir(fdir):
os.makedirs(fdir)
fp = open(fpath, 'w')
else:
fp = stdout
fp.write(etree.tostring(root, pretty_print=True, encoding='UTF-8'))
if outdir:
fp.close()
else:
print
| import csv
import requests
from lxml import etree
from sys import argv, stdout, stderr
import os
baseurl = 'http://data.libris.kb.se/{rectype}/oaipmh/?verb=GetRecord&metadataPrefix=marcxml&identifier=http://libris.kb.se/resource/{record}'
args = argv[1:]
auth = tuple(args.pop(0).split(':'))
if args[0].endswith('.tsv'):
records = []
with open(args[0], 'rb') as fp:
reader = csv.reader(fp, dialect='excel-tab')
for row in reader:
records.append(row[0])
if args[1]:
outdir = args[1]
else:
outdir = None
records = args
def make_root():
root = etree.Element('OAI-PMH', nsmap={None: "http://www.openarchives.org/OAI/2.0/"})
etree.SubElement(root, 'responseDate').text = "1970-01-01T00:00:00Z"
etree.SubElement(root, 'request', attrib=dict(
verb="ListRecords",
resumptionToken="null|2001-12-11T23:00:00Z|107000|null|null|marcxml",
metadataPrefix="marcxml"
)).text = "http://data.libris.kb.se/auth/oaipmh"
return root
partitions = {}
for record in records:
rectype, recid = record.split('/')
if not rectype in partitions:
root = make_root()
partitions[rectype] = (
root,
etree.SubElement(root, 'ListRecords'))
root, reclist = partitions[rectype]
url = baseurl.format(**vars())
res = requests.get(url, auth=auth, stream=True)
record_root = etree.parse(res.raw)
record_data = record_root.find('/*/*')
if record_data is None:
print >>stderr, "found nothing for", record
continue
reclist.append(record_data)
for name, (root, reclist) in partitions.items():
if outdir:
fpath = os.path.join(outdir, name, "oaipmh")
fdir = os.path.dirname(fpath)
if not os.path.isdir(fdir):
os.makedirs(fdir)
fp = open(fpath, 'w')
else:
fp = stdout
fp.write(etree.tostring(root, pretty_print=True, encoding='UTF-8'))
if outdir:
fp.close()
else:
print
| apache-2.0 | Python |
f8bf7b97965e2918721ef0956ff2f24716af7d0f | change data structure storing greetings from list to deque because list was being used inefficiently (with many `insert(0, x)`) | anl-mcampos/GuestBook,anl-mcampos/GuestBook | guestbook/__init__.py | guestbook/__init__.py | # coding: utf-8
import shelve
from contextlib import closing
from datetime import datetime
from collections import namedtuple, deque
from flask import Flask, request, render_template, redirect, escape, Markup
application = Flask(__name__)
DATA_FILE = 'guestbook.dat'
Post = namedtuple('Post', ['name', 'timestamp', 'comment'])
def save_post(name, timestamp, comment):
with closing(shelve.open(DATA_FILE)) as database:
greeting_list = database.get('greeting_list', deque())
assert isinstance(greeting_list, deque)
greeting_list.appendleft(Post(name, timestamp, comment))
database['greeting_list'] = greeting_list
def load_posts():
with closing(shelve.open(DATA_FILE)) as database:
return database.get('greeting_list', [])
@application.route('/')
def index():
return render_template('index.html', greeting_list=load_posts())
@application.route('/post', methods=['POST'])
def post():
name = request.form.get('name')
comment = request.form.get('comment')
save_post(name, datetime.now(), comment)
return redirect('/')
@application.template_filter('nl2br')
def nl2br_filter(s):
return escape(s).replace('\n', Markup('<br />'))
@application.template_filter('datetime_fmt')
def datetime_fmt_filter(dt):
return dt.strftime('%Y%m%d %H:%M:%S')
def main():
application.run('127.0.0.1', 8000)
if __name__ == "__main__":
application.run('127.0.0.1', 8000, debug=True)
| # coding: utf-8
import shelve
from contextlib import closing
from datetime import datetime
from collections import namedtuple
from flask import Flask, request, render_template, redirect, escape, Markup
application = Flask(__name__)
DATA_FILE = 'guestbook.dat'
Post = namedtuple('Post', ['name', 'timestamp', 'comment'])
def save_post(name, timestamp, comment):
with closing(shelve.open(DATA_FILE)) as database:
greeting_list = database.get('greeting_list', [])
greeting_list.insert(0, Post(name, timestamp, comment))
database['greeting_list'] = greeting_list
def load_posts():
with closing(shelve.open(DATA_FILE)) as database:
return database.get('greeting_list', [])
@application.route('/')
def index():
return render_template('index.html', greeting_list=load_posts())
@application.route('/post', methods=['POST'])
def post():
name = request.form.get('name')
comment = request.form.get('comment')
save_post(name, datetime.now(), comment)
return redirect('/')
@application.template_filter('nl2br')
def nl2br_filter(s):
return escape(s).replace('\n', Markup('<br />'))
@application.template_filter('datetime_fmt')
def datetime_fmt_filter(dt):
return dt.strftime('%Y%m%d %H:%M:%S')
def main():
application.run('127.0.0.1', 8000)
if __name__ == "__main__":
application.run('127.0.0.1', 8000, debug=True)
| mit | Python |
f665f051b936d249a055e1bca28c8b3194324e81 | Add a module docstring for the null auth backend. | henn/haas,kylehogan/hil,henn/hil,henn/hil_sahil,SahilTikale/haas,henn/hil,CCI-MOC/haas,henn/hil_sahil,meng-sun/hil,kylehogan/haas,meng-sun/hil,kylehogan/hil | haas/ext/auth/null.py | haas/ext/auth/null.py | """'Null' auth backend
This backend requires no authentication and permits everything. Useful for
testing, do not use in production."""
from haas import auth
class NullAuthBackend(auth.AuthBackend):
def authenticate(self):
pass
def have_admin(self):
return True
def have_project_access(self, project):
return True
def setup(*args, **kwargs):
auth.set_auth_backend(NullAuthBackend())
| from haas import auth
class NullAuthBackend(auth.AuthBackend):
def authenticate(self):
pass
def have_admin(self):
return True
def have_project_access(self, project):
return True
def setup(*args, **kwargs):
auth.set_auth_backend(NullAuthBackend())
| apache-2.0 | Python |
50b14c25eed76a46cdb0c1a9478cb1bedea0f719 | Revise to _padding() and revise var names | bowen0701/algorithms_data_structures | lc0067_add_binary.py | lc0067_add_binary.py | """Leetcode 67. Add Binary
Easy
URL: https://leetcode.com/problems/add-binary/
Given two binary strings, return their sum (also a binary string).
The input strings are both non-empty and contains only characters 1 or 0.
Example 1:
Input: a = "11", b = "1"
Output: "100"
Example 2:
Input: a = "1010", b = "1011"
Output: "10101"
"""
class SolutionIter(object):
def _padding(self, a, b):
if len(a) < len(b):
a = '0' * (len(b) - len(a)) + a
elif len(a) > len(b):
b = '0' * (len(a) - len(b)) + b
return a, b
def addBinary(self, a, b):
"""
:type a: str
:type b: str
:rtype: str
Time complexity: O(n), where n is the length of the longer string.
Space complexity: O(1).
"""
from collections import deque
# Normalize a and b to equal size by padding 0's to shorer one.
a, b = self._padding(a, b)
# Add binary from backward if not out of boundary or exists carry.
sum_arr = deque([])
carry = 0
i = len(a) - 1
while i >= 0 or carry > 0:
if i >= 0:
val = int(a[i]) + int(b[i]) + carry
else:
val = carry
carry, val = val // 2, val % 2
sum_arr.appendleft(str(val))
i -= 1
return ''.join(list(sum_arr))
def main():
# Output: "100"
a = "11"
b = "1"
print SolutionIter().addBinary(a, b)
# Output: "10101"
a = "1010"
b = "1011"
print SolutionIter().addBinary(a, b)
if __name__ == '__main__':
main()
| """Leetcode 67. Add Binary
Easy
URL: https://leetcode.com/problems/add-binary/
Given two binary strings, return their sum (also a binary string).
The input strings are both non-empty and contains only characters 1 or 0.
Example 1:
Input: a = "11", b = "1"
Output: "100"
Example 2:
Input: a = "1010", b = "1011"
Output: "10101"
"""
class SolutionIter(object):
def _normalize(self, a, b):
if len(a) < len(b):
a = '0' * (len(b) - len(a)) + a
elif len(a) > len(b):
b = '0' * (len(a) - len(b)) + b
return a, b
def addBinary(self, a, b):
"""
:type a: str
:type b: str
:rtype: str
Time complexity: O(n), where n is the length of the longer string.
Space complexity: O(1).
"""
from collections import deque
# Normalize a and b to equal size by padding 0's to shorer one.
a, b = self._normalize(a, b)
# Add numbers from the last digits with carry.
s = deque([])
carry = 0
# Add binary from backward if not out of boundary or exists carry.
i = len(a) - 1
while i >= 0 or carry > 0:
if i >= 0:
total = int(a[i]) + int(b[i]) + carry
else:
total = carry
carry, val = total // 2, total % 2
s.appendleft(str(val))
i -= 1
return ''.join(list(s))
def main():
# Output: "100"
a = "11"
b = "1"
print SolutionIter().addBinary(a, b)
# Output: "10101"
a = "1010"
b = "1011"
print SolutionIter().addBinary(a, b)
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
b20d515c24cdf1079505b0f38b17e4cea752a97b | Implement command.call in terms of call_get_out and skip output, trying to avoid problems with processes that hang | GoogleCloudPlatform/gke-managed-certs,GoogleCloudPlatform/gke-managed-certs | hack/utils/command.py | hack/utils/command.py | #!/usr/bin/env python
"""
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Wrappers around subprocess which simplify calling external commands from Python scripts.
"""
import subprocess
def call(command, info=None):
"""
Calls a command through shell
"""
output, success = call_get_out(command, info)
def call_get_out(command, info=None):
"""
Calls a command through shell and returns a tuple which:
* first element is a list of output lines with empty lines removed
* second element is a flag indicating whether the command succeeded or not
"""
if info is not None:
print("### {0}".format(info))
print("### Executing $ {0}".format(command))
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
output = filter(None, p.communicate()[0].split("\n"))
return (output, p.returncode == 0)
| #!/usr/bin/env python
"""
Copyright 2018 Google LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Wrappers around subprocess which simplify calling external commands from Python scripts.
"""
import subprocess
def call(command, info=None):
"""
Calls a command through shell
"""
if info is not None:
print("### {0}".format(info))
print("### Executing $ {0}".format(command))
subprocess.call(command, shell=True)
def call_get_out(command, info=None):
"""
Calls a command through shell and returns a tuple which:
* first element is a list of output lines with empty lines removed
* second element is a flag indicating whether the command succeeded or not
"""
if info is not None:
print("### {0}".format(info))
print("### Executing $ {0}".format(command))
p = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
output = filter(None, p.communicate()[0].split("\n"))
return (output, p.returncode == 0)
| apache-2.0 | Python |
081e0eaf2f5767a2645b6b0b758cb9f3548a66eb | fix broken build | chfw/pyramid-excel,chfw/pyramid-excel | tests/test_upload_n_download_excel.py | tests/test_upload_n_download_excel.py | import pyexcel as pe
from _compact import BytesIO, PY2
FILE_TYPE_MIME_TABLE = {
"csv": "text/csv",
"tsv": "text/tab-separated-values",
"csvz": "application/zip",
"tsvz": "application/zip",
"ods": "application/vnd.oasis.opendocument.spreadsheet",
"xls": "application/vnd.ms-excel",
"xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"xlsm": "application/vnd.ms-excel.sheet.macroenabled.12"
}
class TestExcelResponse:
def setUp(self):
from myproject import main
from webtest import TestApp
self.raw_app = main({})
self.app = TestApp(self.raw_app)
self.data = [
[1, 2, 3],
[4, 5, 6]
]
def test_download(self):
for upload_file_type in FILE_TYPE_MIME_TABLE.keys():
file_name = 'test.%s' % upload_file_type
for download_file_type in FILE_TYPE_MIME_TABLE.keys():
print("Uploading %s Downloading %s" % (upload_file_type, download_file_type))
io = pe.get_io(upload_file_type)
sheet = pe.Sheet(self.data)
sheet.save_to_memory(upload_file_type, io)
io.seek(0)
if not PY2:
if isinstance(io, BytesIO):
content = io.getvalue()
else:
content = io.getvalue().encode('utf-8')
response = self.app.post('/switch/%s' % download_file_type,
upload_files = [('file', file_name, content)],
)
assert response.content_type == FILE_TYPE_MIME_TABLE[download_file_type]
sheet = pe.get_sheet(file_type=download_file_type, file_content=response.body)
sheet.format(int)
array = sheet.to_array()
assert array == self.data
| import pyexcel as pe
from _compact import BytesIO
FILE_TYPE_MIME_TABLE = {
"csv": "text/csv",
"tsv": "text/tab-separated-values",
"csvz": "application/zip",
"tsvz": "application/zip",
"ods": "application/vnd.oasis.opendocument.spreadsheet",
"xls": "application/vnd.ms-excel",
"xlsx": "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
"xlsm": "application/vnd.ms-excel.sheet.macroenabled.12"
}
class TestExcelResponse:
def setUp(self):
from myproject import main
from webtest import TestApp
self.raw_app = main({})
self.app = TestApp(self.raw_app)
self.data = [
[1, 2, 3],
[4, 5, 6]
]
def test_download(self):
for upload_file_type in FILE_TYPE_MIME_TABLE.keys():
file_name = 'test.%s' % upload_file_type
for download_file_type in ['tsv']:#FILE_TYPE_MIME_TABLE.keys():
print("Uploading %s Downloading %s" % (upload_file_type, download_file_type))
io = BytesIO()
sheet = pe.Sheet(self.data)
sheet.save_to_memory(upload_file_type, io)
io.seek(0)
response = self.app.post('/switch/%s' % download_file_type,
upload_files = [('file', file_name, io.getvalue())],
)
assert response.content_type == FILE_TYPE_MIME_TABLE[download_file_type]
sheet = pe.get_sheet(file_type=download_file_type, file_content=response.body)
sheet.format(int)
array = sheet.to_array()
assert array == self.data
| bsd-3-clause | Python |
03e7fae11a7c5000452cd4154aa2824e8835342b | remove unneeded imports | sassoftware/conary,sassoftware/conary,sassoftware/conary,sassoftware/conary,sassoftware/conary | updatecmd.py | updatecmd.py | #
# Copyright (c) 2004 Specifix, Inc.
#
# This program is distributed under the terms of the Common Public License,
# version 1.0. A copy of this license should have been distributed with this
# source file in a file called LICENSE. If it is not present, the license
# is always available at http://www.opensource.org/licenses/cpl.php.
#
# This program is distributed in the hope that it will be useful, but
# without any waranty; without even the implied warranty of merchantability
# or fitness for a particular purpose. See the Common Public License for
# full details.
#
from repository import changeset
from local import database
from lib import log
import os
from repository import repository
from lib import util
import conaryclient
# FIXME client should instantiated once per execution of the command line
# conary client
def doUpdate(repos, cfg, pkgList, replaceFiles = False, tagScript = None,
keepExisting = False):
client = conaryclient.ConaryClient(repos, cfg)
applyList = []
for pkgStr in pkgList:
if os.path.exists(pkgStr) and os.path.isfile(pkgStr):
cs = changeset.ChangeSetFromFile(pkgStr)
applyList.append(cs)
elif pkgStr.find("=") >= 0:
l = pkgStr.split("=")
if len(l) != 2:
log.error("too many ='s in %s", pkgStr)
return 1
applyList.append((l[0], l[1]))
else:
applyList.append(pkgStr)
try:
client.updateTrove(applyList, replaceFiles, tagScript, keepExisting)
except conaryclient.UpdateError, e:
log.error(e)
except repository.CommitError, e:
log.error(e)
def doErase(db, cfg, pkg, versionStr = None, tagScript = None):
client = conaryclient.ConaryClient(None, cfg)
try:
client.eraseTrove(pkg, versionStr, tagScript)
except repository.PackageNotFound:
log.error("package not found: %s", pkg)
| #
# Copyright (c) 2004 Specifix, Inc.
#
# This program is distributed under the terms of the Common Public License,
# version 1.0. A copy of this license should have been distributed with this
# source file in a file called LICENSE. If it is not present, the license
# is always available at http://www.opensource.org/licenses/cpl.php.
#
# This program is distributed in the hope that it will be useful, but
# without any waranty; without even the implied warranty of merchantability
# or fitness for a particular purpose. See the Common Public License for
# full details.
#
from repository import changeset
from local import database
from lib import log
import os
from repository import repository
import sys
import trove
from lib import util
import versions
import conaryclient
# FIXME client should instantiated once per execution of the command line
# conary client
def doUpdate(repos, cfg, pkgList, replaceFiles = False, tagScript = None,
keepExisting = False):
client = conaryclient.ConaryClient(repos, cfg)
applyList = []
for pkgStr in pkgList:
if os.path.exists(pkgStr) and os.path.isfile(pkgStr):
cs = changeset.ChangeSetFromFile(pkgStr)
applyList.append(cs)
elif pkgStr.find("=") >= 0:
l = pkgStr.split("=")
if len(l) != 2:
log.error("too many ='s in %s", pkgStr)
return 1
applyList.append((l[0], l[1]))
else:
applyList.append(pkgStr)
try:
client.updateTrove(applyList, replaceFiles, tagScript, keepExisting)
except conaryclient.UpdateError, e:
log.error(e)
except repository.CommitError, e:
log.error(e)
def doErase(db, cfg, pkg, versionStr = None, tagScript = None):
client = conaryclient.ConaryClient(None, cfg)
try:
client.eraseTrove(pkg, versionStr, tagScript)
except repository.PackageNotFound:
log.error("package not found: %s", pkg)
| apache-2.0 | Python |
45af812a203fdcd7a825e0d4a044b09230ae71e5 | change user profile url not to be on root to avoid conflicts | hasadna/open-shot,hasadna/open-shot,hasadna/open-shot | user/urls.py | user/urls.py | from django.conf.urls.defaults import patterns, include, url
from .views import *
from .feeds import *
urlpatterns = patterns('',
url(r'^profile/$', edit_profile, name='edit_profile'),
url(r'^candidate/$', edit_candidate, name='edit_candidate'),
url(r'^candidates/$', candidate_list, name="candidate_list"),
url(r'^follow/$', 'user_follow_unfollow', name='user-follow-unfollow'),
url(r'^(?P<entity_id>[-\d]+)/candidates/$', candidate_list, name="candidate_list"),
url(r'^(?P<entity_slug>.*)/candidates/$', candidate_list, name="candidate_list"),
url(r'^candidate/(?P<candidate_id>\d+)/atom/$',
AtomUserAnswerFeed(),
name='user_feed'
),
url(r'^logout/$', 'django.contrib.auth.views.logout',
{'next_page': '/'},
name="logout"),
url(r'^login/$', 'django.contrib.auth.views.login',
name='login'),
url(r'^invitation/(?P<invitation_key>\w+)/$',
InvitationView.as_view(),
name='accept-invitation'),
url(r'^candidate/(?P<candidate_id>\d+)/remove/$',
remove_candidate,
name='remove_candidate'
),
url(r'^profile/(?P<username>.+)/$', public_profile, name="public-profile"),
)
| from django.conf.urls.defaults import patterns, include, url
from .views import *
from .feeds import *
urlpatterns = patterns('',
url(r'^profile/$', edit_profile, name='edit_profile'),
url(r'^candidate/$', edit_candidate, name='edit_candidate'),
url(r'^candidates/$', candidate_list, name="candidate_list"),
url(r'^follow/$', 'user_follow_unfollow', name='user-follow-unfollow'),
url(r'^(?P<entity_id>[-\d]+)/candidates/$', candidate_list, name="candidate_list"),
url(r'^(?P<entity_slug>.*)/candidates/$', candidate_list, name="candidate_list"),
url(r'^candidate/(?P<candidate_id>\d+)/atom/$',
AtomUserAnswerFeed(),
name='user_feed'
),
url(r'^logout/$', 'django.contrib.auth.views.logout',
{'next_page': '/'},
name="logout"),
url(r'^login/$', 'django.contrib.auth.views.login',
name='login'),
url(r'^invitation/(?P<invitation_key>\w+)/$',
InvitationView.as_view(),
name='accept-invitation'),
url(r'^candidate/(?P<candidate_id>\d+)/remove/$',
remove_candidate,
name='remove_candidate'
),
url(r'^(?P<username>.+)/$', public_profile, name="public-profile"),
)
| bsd-3-clause | Python |
715071aa0d6baffab19ee5f8c34a6d8341034737 | Update docstrings. | owtf/ptp,DoomTaper/ptp | libptp/exceptions.py | libptp/exceptions.py | """
:synopsis: Custom exceptions used across the :mod:`ptp` library.
.. moduleauthor:: Tao Sauvage
"""
class PTPError(Exception):
"""General :mod:`ptp` error."""
pass
class ReportNotFoundError(PTPError):
""":mod:`ptp` error raised when the report file(s) was not found."""
pass
class NotSupportedToolError(PTPError):
""":mod:`ptp` error raised when the tool is not supported"""
pass
class NotSupportedVersionError(PTPError):
""":mod:`ptp` error raised when the version of the tool is not supported"""
pass
| """
:synopsis: Custom exceptions used across the :mod:`ptp` library.
.. moduleauthor:: Tao Sauvage
"""
class PTPError(Exception):
"""General PTP error."""
pass
class ReportNotFoundError(PTPError):
"""PTP error raised when the report file(s) was not found."""
pass
class NotSupportedToolError(PTPError):
"""PTP error raised when the tool is not supported"""
pass
class NotSupportedVersionError(PTPError):
"""PTP error raised when the version of the tool is not supported"""
pass
| bsd-3-clause | Python |
cc85995e96de7664648596713f127f5999859f00 | Remove unused variable | abawchen/leetcode | solutions/403.py | solutions/403.py | # -*- coding: utf-8 -*-
"""
https://leetcode.com/problems/frog-jump/description/
A frog is crossing a river. The river is divided into x units and at each unit there may or may not exist a stone. The frog can jump on a stone, but it must not jump into the water.
Given a list of stones' positions (in units) in sorted ascending order, determine if the frog is able to cross the river by landing on the last stone. Initially, the frog is on the first stone and assume the first jump must be 1 unit.
If the frog's last jump was k units, then its next jump must be either k - 1, k, or k + 1 units. Note that the frog can only jump in the forward direction.
Note:
The number of stones is ≥ 2 and is < 1,100.
Each stone's position will be a non-negative integer < 231.
The first stone's position is always 0.
Example 1:
[0,1,3,5,6,8,12,17]
There are a total of 8 stones.
The first stone at the 0th unit, second stone at the 1st unit,
third stone at the 3rd unit, and so on...
The last stone at the 17th unit.
Return true. The frog can jump to the last stone by jumping
1 unit to the 2nd stone,
then 2 units to the 3rd stone,
then 2 units to the 4th stone,
then 3 units to the 6th stone,
then 4 units to the 7th stone,
and 5 units to the 8th stone.
Example 2:
[0,1,2,3,4,8,9,11]
Return false. There is no way to jump to the last stone as
the gap between the 5th and 6th stone is too large.
"""
class Solution:
def canCross(self, stones):
"""
:type stones: List[int]
:rtype: bool
"""
from collections import defaultdict
s = set(stones)
units = defaultdict(set)
units[1] = set([1])
for i in range(1, len(stones)):
stone = stones[i]
for k in units[stone]:
for x in [k-1, k, k+1]:
if x > 0:
units[stone+x].add(x)
t = stone+x
# if t in s:
# units[t].add(x)
return len(units[stones[-1]]) != 0
| # -*- coding: utf-8 -*-
"""
https://leetcode.com/problems/frog-jump/description/
A frog is crossing a river. The river is divided into x units and at each unit there may or may not exist a stone. The frog can jump on a stone, but it must not jump into the water.
Given a list of stones' positions (in units) in sorted ascending order, determine if the frog is able to cross the river by landing on the last stone. Initially, the frog is on the first stone and assume the first jump must be 1 unit.
If the frog's last jump was k units, then its next jump must be either k - 1, k, or k + 1 units. Note that the frog can only jump in the forward direction.
Note:
The number of stones is ≥ 2 and is < 1,100.
Each stone's position will be a non-negative integer < 231.
The first stone's position is always 0.
Example 1:
[0,1,3,5,6,8,12,17]
There are a total of 8 stones.
The first stone at the 0th unit, second stone at the 1st unit,
third stone at the 3rd unit, and so on...
The last stone at the 17th unit.
Return true. The frog can jump to the last stone by jumping
1 unit to the 2nd stone,
then 2 units to the 3rd stone,
then 2 units to the 4th stone,
then 3 units to the 6th stone,
then 4 units to the 7th stone,
and 5 units to the 8th stone.
Example 2:
[0,1,2,3,4,8,9,11]
Return false. There is no way to jump to the last stone as
the gap between the 5th and 6th stone is too large.
"""
class Solution:
def canCross(self, stones):
"""
:type stones: List[int]
:rtype: bool
"""
from collections import defaultdict
s = set(stones)
can = defaultdict(bool)
units = defaultdict(set)
units[1] = set([1])
for i in range(1, len(stones)):
stone = stones[i]
for k in units[stone]:
for x in [k-1, k, k+1]:
if x > 0:
units[stone+x].add(x)
t = stone+x
# if t in s:
# units[t].add(x)
return len(units[stones[-1]]) != 0
| mit | Python |
1ac2360d2e5bf395607db130f52d7475bd15ad8c | Fix QuerySet -> QuerySet[Subscription]. | andersk/zulip,jackrzhang/zulip,kou/zulip,shubhamdhama/zulip,eeshangarg/zulip,showell/zulip,kou/zulip,punchagan/zulip,brockwhittaker/zulip,timabbott/zulip,punchagan/zulip,rishig/zulip,brainwane/zulip,brockwhittaker/zulip,rht/zulip,mahim97/zulip,mahim97/zulip,timabbott/zulip,synicalsyntax/zulip,rht/zulip,shubhamdhama/zulip,timabbott/zulip,dhcrzf/zulip,synicalsyntax/zulip,Galexrt/zulip,brainwane/zulip,dhcrzf/zulip,kou/zulip,showell/zulip,synicalsyntax/zulip,zulip/zulip,synicalsyntax/zulip,showell/zulip,rishig/zulip,eeshangarg/zulip,timabbott/zulip,punchagan/zulip,rishig/zulip,mahim97/zulip,zulip/zulip,tommyip/zulip,tommyip/zulip,andersk/zulip,showell/zulip,andersk/zulip,andersk/zulip,brainwane/zulip,dhcrzf/zulip,brainwane/zulip,zulip/zulip,shubhamdhama/zulip,rishig/zulip,brainwane/zulip,rishig/zulip,hackerkid/zulip,brainwane/zulip,zulip/zulip,Galexrt/zulip,Galexrt/zulip,showell/zulip,kou/zulip,Galexrt/zulip,punchagan/zulip,punchagan/zulip,shubhamdhama/zulip,hackerkid/zulip,mahim97/zulip,rht/zulip,andersk/zulip,hackerkid/zulip,dhcrzf/zulip,Galexrt/zulip,tommyip/zulip,timabbott/zulip,hackerkid/zulip,mahim97/zulip,shubhamdhama/zulip,eeshangarg/zulip,eeshangarg/zulip,tommyip/zulip,Galexrt/zulip,zulip/zulip,brainwane/zulip,tommyip/zulip,rht/zulip,shubhamdhama/zulip,rht/zulip,rht/zulip,dhcrzf/zulip,eeshangarg/zulip,synicalsyntax/zulip,andersk/zulip,kou/zulip,jackrzhang/zulip,brockwhittaker/zulip,jackrzhang/zulip,hackerkid/zulip,tommyip/zulip,jackrzhang/zulip,rht/zulip,hackerkid/zulip,timabbott/zulip,rishig/zulip,Galexrt/zulip,eeshangarg/zulip,synicalsyntax/zulip,zulip/zulip,andersk/zulip,brockwhittaker/zulip,jackrzhang/zulip,brockwhittaker/zulip,synicalsyntax/zulip,kou/zulip,hackerkid/zulip,eeshangarg/zulip,rishig/zulip,tommyip/zulip,mahim97/zulip,dhcrzf/zulip,showell/zulip,zulip/zulip,shubhamdhama/zulip,brockwhittaker/zulip,timabbott/zulip,punchagan/zulip,showell/zulip,jackrzhang/zulip,dhcrzf/zulip,jackrzhang/zulip,punchagan/zulip,kou/zulip | zerver/lib/stream_subscription.py | zerver/lib/stream_subscription.py | from typing import Dict, List, Tuple
from mypy_extensions import TypedDict
from django.db.models.query import QuerySet
from zerver.models import (
Recipient,
Stream,
Subscription,
UserProfile,
)
def get_active_subscriptions_for_stream_id(stream_id):
# type: (int) -> QuerySet[Subscription]
return Subscription.objects.filter(
recipient__type=Recipient.STREAM,
recipient__type_id=stream_id,
active=True,
)
def get_active_subscriptions_for_stream_ids(stream_ids):
# type: (List[int]) -> QuerySet[Subscription]
return Subscription.objects.filter(
recipient__type=Recipient.STREAM,
recipient__type_id__in=stream_ids,
active=True
)
def get_stream_subscriptions_for_user(user_profile):
# type: (UserProfile) -> QuerySet[Subscription]
return Subscription.objects.filter(
user_profile=user_profile,
recipient__type=Recipient.STREAM,
)
def get_stream_subscriptions_for_users(user_profiles):
# type: (List[UserProfile]) -> QuerySet[Subscription]
return Subscription.objects.filter(
user_profile__in=user_profiles,
recipient__type=Recipient.STREAM,
)
SubInfo = TypedDict('SubInfo', {
'sub': Subscription,
'stream': Stream,
})
def get_bulk_stream_subscriber_info(user_profiles, stream_dict):
# type: (List[UserProfile], Dict[int, Stream]) -> Dict[int, List[Tuple[Subscription, Stream]]]
stream_ids = stream_dict.keys()
result = {
user_profile.id: []
for user_profile in user_profiles
} # type: Dict[int, List[Tuple[Subscription, Stream]]]
subs = Subscription.objects.filter(
user_profile__in=user_profiles,
recipient__type=Recipient.STREAM,
recipient__type_id__in=stream_ids,
active=True,
).select_related('user_profile', 'recipient')
for sub in subs:
user_profile_id = sub.user_profile_id
stream_id = sub.recipient.type_id
stream = stream_dict[stream_id]
result[user_profile_id].append((sub, stream))
return result
def num_subscribers_for_stream_id(stream_id):
# type: (int) -> int
return get_active_subscriptions_for_stream_id(stream_id).filter(
user_profile__is_active=True,
).count()
| from typing import Dict, List, Tuple
from mypy_extensions import TypedDict
from django.db.models.query import QuerySet
from zerver.models import (
Recipient,
Stream,
Subscription,
UserProfile,
)
def get_active_subscriptions_for_stream_id(stream_id):
# type: (int) -> QuerySet
return Subscription.objects.filter(
recipient__type=Recipient.STREAM,
recipient__type_id=stream_id,
active=True,
)
def get_active_subscriptions_for_stream_ids(stream_ids):
# type: (List[int]) -> QuerySet
return Subscription.objects.filter(
recipient__type=Recipient.STREAM,
recipient__type_id__in=stream_ids,
active=True
)
def get_stream_subscriptions_for_user(user_profile):
# type: (UserProfile) -> QuerySet
return Subscription.objects.filter(
user_profile=user_profile,
recipient__type=Recipient.STREAM,
)
def get_stream_subscriptions_for_users(user_profiles):
# type: (List[UserProfile]) -> QuerySet
return Subscription.objects.filter(
user_profile__in=user_profiles,
recipient__type=Recipient.STREAM,
)
SubInfo = TypedDict('SubInfo', {
'sub': Subscription,
'stream': Stream,
})
def get_bulk_stream_subscriber_info(user_profiles, stream_dict):
# type: (List[UserProfile], Dict[int, Stream]) -> Dict[int, List[Tuple[Subscription, Stream]]]
stream_ids = stream_dict.keys()
result = {
user_profile.id: []
for user_profile in user_profiles
} # type: Dict[int, List[Tuple[Subscription, Stream]]]
subs = Subscription.objects.filter(
user_profile__in=user_profiles,
recipient__type=Recipient.STREAM,
recipient__type_id__in=stream_ids,
active=True,
).select_related('user_profile', 'recipient')
for sub in subs:
user_profile_id = sub.user_profile_id
stream_id = sub.recipient.type_id
stream = stream_dict[stream_id]
result[user_profile_id].append((sub, stream))
return result
def num_subscribers_for_stream_id(stream_id):
# type: (int) -> int
return get_active_subscriptions_for_stream_id(stream_id).filter(
user_profile__is_active=True,
).count()
| apache-2.0 | Python |
f67c39c094bf196b09fd0c3de6b26dc61c3ca2d5 | Increment version for PyPI | mkoistinen/cmsplugin-sections,mkoistinen/cmsplugin-sections,helderco/cmsplugin-sections,helderco/cmsplugin-sections | cmsplugin_sections/__init__.py | cmsplugin_sections/__init__.py | # -*- coding: utf-8 -*-
__version__='0.1.1' | # -*- coding: utf-8 -*-
__version__='0.1.0' | mit | Python |
49f472e794cac2792a4f8b545331d78ccdaf64e2 | Add 1M to interval_to_milliseconds, simplify logic, add doctest. | sammchardy/python-binance | binance/helpers.py | binance/helpers.py | import dateparser
import pytz
from datetime import datetime
def date_to_milliseconds(date_str):
"""Convert UTC date to milliseconds
If using offset strings add "UTC" to date string e.g. "now UTC", "11 hours ago UTC"
See dateparse docs for formats http://dateparser.readthedocs.io/en/latest/
:param date_str: date in readable format, i.e. "January 01, 2018", "11 hours ago UTC", "now UTC"
:type date_str: str
"""
# get epoch value in UTC
epoch = datetime.utcfromtimestamp(0).replace(tzinfo=pytz.utc)
# parse our date string
d = dateparser.parse(date_str)
# if the date is not timezone aware apply UTC timezone
if d.tzinfo is None or d.tzinfo.utcoffset(d) is None:
d = d.replace(tzinfo=pytz.utc)
# return the difference in time
return int((d - epoch).total_seconds() * 1000.0)
def interval_to_milliseconds(interval):
"""Convert a Binance interval string to milliseconds
:param interval: Binance interval string, e.g.: 1m, 3m, 5m, 15m, 30m, 1h, 2h, 4h, 6h, 8h, 12h, 1d, 3d, 1w, 1M
:type interval: str
:return:
int value of interval in milliseconds
None if interval prefix is not a decimal integer
None if interval suffix is not one of m, h, d, w, or M
Note that a Binance month interval, 1M, is 31 days.
>>> interval_to_milliseconds('1m')
60000
>>> interval_to_milliseconds('2h')
7200000
>>> interval_to_milliseconds('1M') == interval_to_milliseconds('31d')
True
>>> interval_to_milliseconds('3.3d')
>>> interval_to_milliseconds('4Y')
"""
seconds_per_unit = {
"m": 60,
"h": 60 * 60,
"d": 24 * 60 * 60,
"w": 7 * 24 * 60 * 60,
"M": 31 * 24 * 60 * 60,
}
try:
return int(interval[:-1]) * seconds_per_unit[interval[-1]] * 1000
except (ValueError, KeyError):
return None
| import dateparser
import pytz
from datetime import datetime
def date_to_milliseconds(date_str):
"""Convert UTC date to milliseconds
If using offset strings add "UTC" to date string e.g. "now UTC", "11 hours ago UTC"
See dateparse docs for formats http://dateparser.readthedocs.io/en/latest/
:param date_str: date in readable format, i.e. "January 01, 2018", "11 hours ago UTC", "now UTC"
:type date_str: str
"""
# get epoch value in UTC
epoch = datetime.utcfromtimestamp(0).replace(tzinfo=pytz.utc)
# parse our date string
d = dateparser.parse(date_str)
# if the date is not timezone aware apply UTC timezone
if d.tzinfo is None or d.tzinfo.utcoffset(d) is None:
d = d.replace(tzinfo=pytz.utc)
# return the difference in time
return int((d - epoch).total_seconds() * 1000.0)
def interval_to_milliseconds(interval):
"""Convert a Binance interval string to milliseconds
:param interval: Binance interval string 1m, 3m, 5m, 15m, 30m, 1h, 2h, 4h, 6h, 8h, 12h, 1d, 3d, 1w
:type interval: str
:return:
None if unit not one of m, h, d or w
None if string not in correct format
int value of interval in milliseconds
"""
ms = None
seconds_per_unit = {
"m": 60,
"h": 60 * 60,
"d": 24 * 60 * 60,
"w": 7 * 24 * 60 * 60
}
unit = interval[-1]
if unit in seconds_per_unit:
try:
ms = int(interval[:-1]) * seconds_per_unit[unit] * 1000
except ValueError:
pass
return ms
| mit | Python |
40b4a4c54fe4111537d82de69fa115521ef4185b | Fix and add examples to programmatic diff inspection examples | grahamegee/diffr | examples/programmatic_diff_inspection.py | examples/programmatic_diff_inspection.py | from diffr import diff, Diff, unchanged, insert, remove, changed
a = 'change1 same change2'
b = 'modify1 same modify2'
d = diff(a, b)
print('---------------------------------------------------------')
print('The full diff\n')
# the displayed diff
print(d)
print('---------------------------------------------------------')
# the whole diff
print('can access the full diff\n')
print(''.join([str(i) for i in d]))
print('---------------------------------------------------------')
print('diff item states\n')
# inspect diff item state
print('item {} at index {} is a removal: {}'.format(
str(d[0]), 0, d[0].state == remove)
)
print('---------------------------------------------------------')
print('Breaking diffs up into managable chunks')
print('diff item context\n')
print(
'middle unchanged portion of diff = "{}"'.format(
''.join([str(i) for i in d[13:19]])
)
)
print('---------------------------------------------------------')
print('use context attribute to slice the data structures\n')
a_start, _, b_start, _ = d[13].context
_, a_end, _, b_end = d[18].context
print(
'a context slice: "{}", b context slice: "{}"'.format(
a[a_start:a_end], b[b_start:b_end]))
print('---------------------------------------------------------')
print('diff comparison\n')
a = {'a': 3}
b = {'a': 4}
d_nested = diff([1, a], [1, b])
d = diff(a, b)
print(d)
print(d_nested)
print('Item 1 of the nested diff == the diff: {}'.format(d == d_nested[1].item))
print('---------------------------------------------------------')
print('filter on inserts')
a = 'a' * 5 + 'b' * 5
b = 'b' * 5 + 'a' * 5
print(''.join([str(i) for i in diff(a, b) if i.state == insert]))
print('filter on removals')
print(''.join([str(i) for i in diff(a, b) if i.state == remove]))
print('---------------------------------------------------------')
print('Diff evaluates false if it\'s empty or if there are no changes')
empty_diff = diff([], [])
diff_with_no_changes = diff('abc', 'abc')
print('bool({}) == {}'.format(empty_diff, bool(empty_diff)))
print('bool({}) == {}'.format(diff_with_no_changes, bool(diff_with_no_changes)))
print('---------------------------------------------------------')
print('Inspecting diff properties')
a = {'a': 3}
b = {'a': 4}
d_nested = diff([1, a], [1, b])
print('Depth of outer diff is {}'.format(d_nested.depth))
print('Depth of inner diff is {}'.format(d_nested[1].item.depth))
print('---------------------------------------------------------')
print('Type of outer diff is {}'.format(d_nested.type))
print('Type of inner diff is {}'.format(d_nested[1].item.type))
| from diffr import diff, Diff, unchanged, insert, remove, changed
a = 'change1 same change2'
b = 'modify1 same modify2'
d = diff(a, b)
print('---------------------------------------------------------')
print('The full diff\n')
# the displayed diff
print(d)
print('---------------------------------------------------------')
# the whole diff
print('---------------------------------------------------------')
print('can access the full diff\n')
print(''.join([str(i) for i in d.diffs]))
print('---------------------------------------------------------')
print('---------------------------------------------------------')
print('diff item states\n')
# inspect diff item state
print('item {} at index {} is a removal: {}'.format(
str(d.diffs[0]),
0,
d.diffs[0].state == remove)
)
print('---------------------------------------------------------')
print('---------------------------------------------------------')
print('diff item context\n')
print(
'middle unchanged portion of diff = "{}"'.format(
''.join([str(i) for i in d.diffs[13:19]])
)
)
print('---------------------------------------------------------')
print('use context attribute to slice the data structures\n')
a_start, _, b_start, _ = d.diffs[13].context
_, a_end, _, b_end = d.diffs[18].context
print(
'a context slice: "{}", b context slice: "{}"'.format(
a[a_start:a_end], b[b_start:b_end]))
print('---------------------------------------------------------')
print('inspect context blocks\n')
cb1, cb2 = d.context_blocks
a1_start, a1_end, b1_start, b1_end = cb1.context
a2_start, a2_end, b2_start, b2_end = cb2.context
print(
'first context block: a = "{}", b = "{}"'.format(
a[a1_start: a1_end], b[b1_start: b1_end]
)
)
print(
'last context block: a = "{}", b = "{}"'.format(
a[a2_start: a2_end], b[b2_start: b2_end]
)
)
print('---------------------------------------------------------')
print('diff comparison\n')
a = {'a' : 3}
b = {'a' : 4}
d_nested = diff([1, a], [1, b])
d = diff(a, b)
print(d)
print(d_nested)
print(d == d_nested.diffs[1].item)
print('---------------------------------------------------------')
| mit | Python |
59e18de589f00d7cb3d24b8fcf05f4f43c0c90f9 | make example comput. easier | sorig/shogun,karlnapf/shogun,Saurabh7/shogun,Saurabh7/shogun,Saurabh7/shogun,shogun-toolbox/shogun,lambday/shogun,geektoni/shogun,besser82/shogun,Saurabh7/shogun,Saurabh7/shogun,lisitsyn/shogun,geektoni/shogun,geektoni/shogun,shogun-toolbox/shogun,besser82/shogun,lambday/shogun,lisitsyn/shogun,geektoni/shogun,sorig/shogun,shogun-toolbox/shogun,karlnapf/shogun,sorig/shogun,Saurabh7/shogun,karlnapf/shogun,lambday/shogun,karlnapf/shogun,besser82/shogun,besser82/shogun,besser82/shogun,lisitsyn/shogun,karlnapf/shogun,besser82/shogun,shogun-toolbox/shogun,sorig/shogun,geektoni/shogun,lisitsyn/shogun,karlnapf/shogun,lambday/shogun,Saurabh7/shogun,lisitsyn/shogun,sorig/shogun,lambday/shogun,sorig/shogun,Saurabh7/shogun,shogun-toolbox/shogun,Saurabh7/shogun,geektoni/shogun,lambday/shogun,shogun-toolbox/shogun,lisitsyn/shogun | examples/python_modular/custom_kernel.py | examples/python_modular/custom_kernel.py | from numpy import *
from numpy.random import rand
from shogun.Features import RealFeatures, Labels
from shogun.Kernel import CustomKernel
from shogun.Classifier import LibSVM
C=1
dim=7
lab=sign(2*rand(dim) - 1)
data=rand(dim, dim)
symdata=data*data.T
kernel=CustomKernel()
kernel.set_full_kernel_matrix_from_full(data)
labels=Labels(lab)
svm=LibSVM(C, kernel, labels)
svm.train()
out=svm.classify().get_labels()
| from numpy import *
from numpy.random import rand
from shogun.Features import RealFeatures, Labels
from shogun.Kernel import CustomKernel
from shogun.Classifier import LibSVM
C=1
dim=7
lab=sign(2*rand(dim) - 1)
data=rand(dim, dim)
symdata=data+data.T
kernel=CustomKernel()
kernel.set_full_kernel_matrix_from_full(data)
labels=Labels(lab)
svm=LibSVM(C, kernel, labels)
svm.train()
out=svm.classify().get_labels()
| bsd-3-clause | Python |
9a4983912f9f6130e91c0dd7e4f40165f41e42ba | Remove encoding line | mitmedialab/MediaCloud-Web-Tools,mitmedialab/MediaCloud-Web-Tools,mitmedialab/MediaCloud-Web-Tools,mitmedialab/MediaCloud-Web-Tools | server/views/topics/permissions.py | server/views/topics/permissions.py | import logging
from flask import jsonify, request
import flask_login
import json
from mediacloud.error import MCException
from server import app
from server.util.request import json_error_response, api_error_handler
from server.auth import user_admin_mediacloud_client
logger = logging.getLogger(__name__)
@app.route('/api/topics/<topics_id>/permissions/list', methods=['GET'])
@flask_login.login_required
@api_error_handler
def topic_permissions_list(topics_id):
user_mc = user_admin_mediacloud_client()
results = user_mc.topicPermissionsList(topics_id)
return jsonify(results)
@app.route('/api/topics/<topics_id>/permissions/update', methods=['PUT'])
@flask_login.login_required
@api_error_handler
def topic_update_permission(topics_id):
user_mc = user_admin_mediacloud_client()
new_permissions = json.loads(request.form["permissions"])
current_permissions = user_mc.topicPermissionsList(topics_id)['permissions']
# first remove any people that you need to
new_emails = [p['email'] for p in new_permissions]
current_emails = [p['email'] for p in current_permissions]
for email in current_emails:
if email not in new_emails:
user_mc.topicPermissionsUpdate(topics_id, email, 'none')
# now update the remaining permissions
for permission in new_permissions:
if permission['permission'] not in ['read', 'write', 'admin', 'none']:
return json_error_response('Invalid permission value')
try:
user_mc.topicPermissionsUpdate(topics_id, permission['email'], permission['permission'])
except MCException as e:
# show a nice error if they type the email wrong
if 'Unknown email' in e.message:
return jsonify({'success': 0, 'results': e.message})
return jsonify({'success': 1, 'results': user_mc.topicPermissionsList(topics_id)})
| # -*- coding: utf-8 -*-
import logging
from flask import jsonify, request
import flask_login
import json
from mediacloud.error import MCException
from server import app
from server.util.request import json_error_response, api_error_handler
from server.auth import user_admin_mediacloud_client
logger = logging.getLogger(__name__)
@app.route('/api/topics/<topics_id>/permissions/list', methods=['GET'])
@flask_login.login_required
@api_error_handler
def topic_permissions_list(topics_id):
user_mc = user_admin_mediacloud_client()
results = user_mc.topicPermissionsList(topics_id)
return jsonify(results)
@app.route('/api/topics/<topics_id>/permissions/update', methods=['PUT'])
@flask_login.login_required
@api_error_handler
def topic_update_permission(topics_id):
user_mc = user_admin_mediacloud_client()
new_permissions = json.loads(request.form["permissions"])
current_permissions = user_mc.topicPermissionsList(topics_id)['permissions']
# first remove any people that you need to
new_emails = [p['email'] for p in new_permissions]
current_emails = [p['email'] for p in current_permissions]
for email in current_emails:
if email not in new_emails:
user_mc.topicPermissionsUpdate(topics_id, email, 'none')
# now update the remaining permissions
for permission in new_permissions:
if permission['permission'] not in ['read', 'write', 'admin', 'none']:
return json_error_response('Invalid permission value')
try:
user_mc.topicPermissionsUpdate(topics_id, permission['email'], permission['permission'])
except MCException as e:
# show a nice error if they type the email wrong
if 'Unknown email' in e.message:
return jsonify({'success': 0, 'results': e.message})
return jsonify({'success': 1, 'results': user_mc.topicPermissionsList(topics_id)})
| apache-2.0 | Python |
bf67c53c5d2d41895bdb8896bb4b3e4cebeea403 | Bump to 0.4.7 | dinoperovic/djangocms-blogit,dinoperovic/djangocms-blogit,dinoperovic/djangocms-blogit | blogit/__init__.py | blogit/__init__.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""
Release logic:
1. Remove ".devX" from __version__ (below)
2. git add blogit/__init__.py
3. git commit -m 'Bump to <version>'
4. git tag <version>
5. git push && git push --tags
6. python setup.py sdist upload
7. bump the __version__, append ".dev0"
8. git add blogit/__init__.py
9. git commit -m 'Start with <version>'
10. git push
"""
__version__ = '0.4.7'
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""
Release logic:
1. Remove ".devX" from __version__ (below)
2. git add blogit/__init__.py
3. git commit -m 'Bump to <version>'
4. git tag <version>
5. git push && git push --tags
6. python setup.py sdist upload
7. bump the __version__, append ".dev0"
8. git add blogit/__init__.py
9. git commit -m 'Start with <version>'
10. git push
"""
__version__ = '0.4.7.dev0'
| bsd-3-clause | Python |
5b1459ed130ff3868f63d274b36a5d73c880ef9f | Create 'latest' reference to newest patch. | davidstrauss/branch-and-commit-to-bare-repo | branchandcommit.py | branchandcommit.py | import tempfile
import pygit2
def add_commit(repo, ref, msg, author, path=None, content=None, parent=None):
tb = repo.TreeBuilder()
if path is not None:
blob = repo.create_blob(file_content)
tb.insert(file_path, blob, pygit2.GIT_FILEMODE_BLOB)
parents = []
if parent is not None:
parents = [parent]
precommit = tb.write()
commit = repo.create_commit(ref, author, author, msg, precommit, parents)
# Set incoming parameters. These would be in the web request.
committer_name = 'Drupal Druplicon'
committer_email = 'druplicon@example.com'
git_short_name = 'druplicon'
issue_number = 123456
comment_number = 45678
file_path = 'hello.txt'
file_content = 'Hello, world!'
commit_message = 'Adding hello.txt.'
# Create an author.
author = pygit2.Signature(committer_name, committer_email)
# Preliminary setup for proof-of-concept purposes. Creates a master
# branch with a single, empty commit.
tmpdir = tempfile.TemporaryDirectory(suffix='.git', prefix='branchandcommit-')
print('Creating bare repository: {}'.format(tmpdir.name))
repo = pygit2.init_repository(tmpdir.name, True)
# This should always be true for the temporary repo, but it's a good check.
if repo.head_is_unborn:
add_commit(repo, 'refs/heads/master', 'Initial commit.', author)
# Create the branch (if non-existent).
ref= 'namespaces/issue_{}/refs/heads/{}/comment_{}'.format(
issue_number,
git_short_name,
comment_number)
print('Reference name: {}'.format(ref))
branch = repo.create_branch(ref, repo.head.get_object(), True)
# Commit the change.
#parent = repo.head.target
#parent = None
add_commit(repo, ref, commit_message, author, file_path, file_content, branch.target)
repo.create_reference('refs/heads/latest', ref)
input("Press Enter to clean up the temporary git repository and exit...")
| import tempfile
import pygit2
def add_commit(repo, ref, msg, author, path=None, content=None, parent=None):
tb = repo.TreeBuilder()
if path is not None:
blob = repo.create_blob(file_content)
tb.insert(file_path, blob, pygit2.GIT_FILEMODE_BLOB)
parents = []
if parent is not None:
parents = [parent]
precommit = tb.write()
commit = repo.create_commit(ref, author, author, msg, precommit, parents)
# Set incoming parameters. These would be in the web request.
committer_name = 'Drupal Druplicon'
committer_email = 'druplicon@example.com'
git_short_name = 'druplicon'
issue_number = 123456
comment_number = 45678
file_path = 'hello.txt'
file_content = 'Hello, world!'
commit_message = 'Adding hello.txt.'
# Create an author.
author = pygit2.Signature(committer_name, committer_email)
# Preliminary setup for proof-of-concept purposes. Creates a master
# branch with a single, empty commit.
tmpdir = tempfile.TemporaryDirectory(suffix='.git', prefix='branchandcommit-')
print('Creating bare repository: {}'.format(tmpdir.name))
repo = pygit2.init_repository(tmpdir.name, True)
# This should always be true for the temporary repo, but it's a good check.
if repo.head_is_unborn:
add_commit(repo, 'refs/heads/master', 'Initial commit.', author)
# Create the branch (if non-existent).
ref= 'namespaces/issue_{}/refs/heads/{}/comment_{}'.format(
issue_number,
git_short_name,
comment_number)
print('Reference name: {}'.format(ref))
branch = repo.create_branch(ref, repo.head.get_object(), True)
# Commit the change.
#parent = repo.head.target
#parent = None
add_commit(repo, ref, commit_message, author, file_path, file_content, branch.target)
input("Press Enter to clean up the temporary git repository and exit...")
| mit | Python |
29a1828afee59a23fe07636cf99863d3dda1c3a4 | Update python test | enjin/contracts | solidity/python/FormulaTestSale.py | solidity/python/FormulaTestSale.py | from sys import argv
from decimal import Decimal
from random import randrange
from Formula import calculateSaleReturn
def formulaTest(supply,reserve,ratio,amount):
fixed = Decimal(calculateSaleReturn(supply,reserve,ratio,amount))
real = Decimal(reserve)*(1-(1-Decimal(amount)/Decimal(supply))**(100/Decimal(ratio)))
if fixed > real:
error = []
error.append('error occurred on:')
error.append('supply = {}'.format(supply))
error.append('reserve = {}'.format(reserve))
error.append('ratio = {}'.format(ratio))
error.append('amount = {}'.format(amount))
error.append('fixed = {}'.format(fixed))
error.append('real = {}'.format(real))
raise BaseException('\n'.join(error))
return fixed/real
size = int(argv[1]) if len(argv) > 1 else 0
if size == 0:
size = input('How many test-cases would you like to execute? ')
n = 0
worstAccuracy = 1
numOfFailures = 0
while n < size: # avoid creating a large range in memory
supply = randrange(2,10**26)
reserve = randrange(1,10**23)
ratio = randrange(1,99)
amount = randrange(1,supply)
try:
accuracy = formulaTest(supply,reserve,ratio,amount)
worstAccuracy = min(worstAccuracy,accuracy)
except Exception,error:
accuracy = 0
numOfFailures += 1
except BaseException,error:
print error
break
print 'Test #{}: accuracy = {:.12f}, worst accuracy = {:.12f}, num of failures = {}'.format(n,accuracy,worstAccuracy,numOfFailures)
n += 1
| from sys import argv
from decimal import Decimal
from random import randrange
from Formula import calculateSaleReturn
def formulaTest(_supply, _reserveBalance, _reserveRatio, _amount):
fixed = calculateSaleReturn(_supply, _reserveBalance, _reserveRatio, _amount)
real = Decimal(_reserveBalance)*(1-(1-Decimal(_amount)/Decimal(_supply))**(100/Decimal(_reserveRatio)))
if fixed > real:
error = []
error.append('error occurred on:')
error.append('_supply = {}'.format(_supply))
error.append('_reserveBalance = {}'.format(_reserveBalance))
error.append('_reserveRatio = {}'.format(_reserveRatio))
error.append('_amount = {}'.format(_amount))
error.append('fixed result = {}'.format(fixed))
error.append('real result = {}'.format(real))
raise BaseException('\n'.join(error))
return float(fixed / real)
size = int(argv[1]) if len(argv) > 1 else 0
if size == 0:
size = input('How many test-cases would you like to execute? ')
n = 0
worstAccuracy = 1
numOfFailures = 0
while n < size: # avoid creating a large range in memory
_supply = randrange(1,10**26)
_reserveBalance = randrange(1,10**23)
_reserveRatio = randrange(1,99)
_amount = randrange(1,_supply)
try:
accuracy = formulaTest(_supply, _reserveBalance, _reserveRatio, _amount)
worstAccuracy = min(worstAccuracy,accuracy)
except Exception,error:
accuracy = 0
numOfFailures += 1
except BaseException,error:
print error
break
print 'Test #{}: accuracy = {:.12f}, worst accuracy = {:.12f}, num of failures = {}'.format(n,accuracy,worstAccuracy,numOfFailures)
n += 1
| apache-2.0 | Python |
6f6574d338fc1601e3504719ac303a4da8892e4f | Remove references to removed classes [#134476913] | janebeckman/gpdb,janebeckman/gpdb,ashwinstar/gpdb,rvs/gpdb,Chibin/gpdb,Chibin/gpdb,50wu/gpdb,kaknikhil/gpdb,greenplum-db/gpdb,yuanzhao/gpdb,yuanzhao/gpdb,ashwinstar/gpdb,Quikling/gpdb,lisakowen/gpdb,lisakowen/gpdb,Quikling/gpdb,xinzweb/gpdb,rvs/gpdb,Chibin/gpdb,50wu/gpdb,rvs/gpdb,Chibin/gpdb,Quikling/gpdb,lisakowen/gpdb,yuanzhao/gpdb,xinzweb/gpdb,jmcatamney/gpdb,Chibin/gpdb,xinzweb/gpdb,adam8157/gpdb,yuanzhao/gpdb,Chibin/gpdb,cjcjameson/gpdb,greenplum-db/gpdb,Chibin/gpdb,cjcjameson/gpdb,xinzweb/gpdb,kaknikhil/gpdb,Quikling/gpdb,jmcatamney/gpdb,kaknikhil/gpdb,xinzweb/gpdb,edespino/gpdb,Quikling/gpdb,ashwinstar/gpdb,adam8157/gpdb,jmcatamney/gpdb,lisakowen/gpdb,janebeckman/gpdb,yuanzhao/gpdb,jmcatamney/gpdb,ashwinstar/gpdb,yuanzhao/gpdb,adam8157/gpdb,janebeckman/gpdb,adam8157/gpdb,edespino/gpdb,greenplum-db/gpdb,xinzweb/gpdb,jmcatamney/gpdb,Quikling/gpdb,janebeckman/gpdb,lintzc/gpdb,Chibin/gpdb,lintzc/gpdb,Quikling/gpdb,cjcjameson/gpdb,edespino/gpdb,kaknikhil/gpdb,cjcjameson/gpdb,xinzweb/gpdb,rvs/gpdb,Quikling/gpdb,edespino/gpdb,greenplum-db/gpdb,ashwinstar/gpdb,janebeckman/gpdb,rvs/gpdb,xinzweb/gpdb,cjcjameson/gpdb,lintzc/gpdb,cjcjameson/gpdb,yuanzhao/gpdb,kaknikhil/gpdb,cjcjameson/gpdb,edespino/gpdb,ashwinstar/gpdb,lisakowen/gpdb,rvs/gpdb,ashwinstar/gpdb,yuanzhao/gpdb,greenplum-db/gpdb,lintzc/gpdb,edespino/gpdb,janebeckman/gpdb,lintzc/gpdb,Quikling/gpdb,greenplum-db/gpdb,50wu/gpdb,lintzc/gpdb,yuanzhao/gpdb,janebeckman/gpdb,edespino/gpdb,janebeckman/gpdb,rvs/gpdb,jmcatamney/gpdb,edespino/gpdb,lisakowen/gpdb,ashwinstar/gpdb,adam8157/gpdb,kaknikhil/gpdb,rvs/gpdb,cjcjameson/gpdb,kaknikhil/gpdb,Chibin/gpdb,50wu/gpdb,janebeckman/gpdb,rvs/gpdb,greenplum-db/gpdb,jmcatamney/gpdb,lintzc/gpdb,rvs/gpdb,50wu/gpdb,greenplum-db/gpdb,adam8157/gpdb,Chibin/gpdb,50wu/gpdb,adam8157/gpdb,cjcjameson/gpdb,lintzc/gpdb,jmcatamney/gpdb,lisakowen/gpdb,lintzc/gpdb,kaknikhil/gpdb,kaknikhil/gpdb,50wu/gpdb,lisakowen/gpdb,kaknikhil/gpdb,cjcjameson/gpdb,edespino/gpdb,edespino/gpdb,Quikling/gpdb,adam8157/gpdb,yuanzhao/gpdb,50wu/gpdb | concourse/scripts/test_gpdb.py | concourse/scripts/test_gpdb.py | #!/usr/bin/python2
import optparse
import subprocess
import sys
import shutil
from builds.GpBuild import GpBuild
def install_gpdb(dependency_name):
status = subprocess.call("mkdir -p /usr/local/gpdb", shell=True)
if status:
return status
status = subprocess.call(
"tar -xzf " + dependency_name + "/*.tar.gz -C /usr/local/gpdb",
shell=True)
return status
def create_gpadmin_user():
status = subprocess.call("gpdb_src/concourse/scripts/setup_gpadmin_user.bash")
if status:
return status
def copy_output():
shutil.copyfile("gpdb_src/src/test/regress/regression.diffs", "icg_output/regression.diffs")
shutil.copyfile("gpdb_src/src/test/regress/regression.out", "icg_output/regression.out")
def main():
parser = optparse.OptionParser()
parser.add_option("--build_type", dest="build_type", default="RELEASE")
parser.add_option("--mode", choices=['orca', 'codegen', 'orca_codegen', 'planner'])
parser.add_option("--compiler", dest="compiler")
parser.add_option("--cxxflags", dest="cxxflags")
parser.add_option("--output_dir", dest="output_dir", default="install")
parser.add_option("--gpdb_name", dest="gpdb_name")
(options, args) = parser.parse_args()
if options.mode == 'orca':
ciCommon = GpBuild(options.mode)
elif options.mode == 'planner':
ciCommon = GpBuild(options.mode)
status = ciCommon.install_system_deps()
if status:
return status
for dependency in args:
status = ciCommon.install_dependency(dependency)
if status:
return status
status = install_gpdb(options.gpdb_name)
if status:
return status
status = ciCommon.configure()
if status:
return status
status = create_gpadmin_user()
if status:
return status
status = ciCommon.icg()
if status:
copy_output()
return status
if __name__ == "__main__":
sys.exit(main())
| #!/usr/bin/python2
import optparse
import subprocess
import sys
import shutil
from builds import GpBuild, GpcodegenBuild, GporcacodegenBuild
def install_gpdb(dependency_name):
status = subprocess.call("mkdir -p /usr/local/gpdb", shell=True)
if status:
return status
status = subprocess.call(
"tar -xzf " + dependency_name + "/*.tar.gz -C /usr/local/gpdb",
shell=True)
return status
def create_gpadmin_user():
status = subprocess.call("gpdb_src/concourse/scripts/setup_gpadmin_user.bash")
if status:
return status
def copy_output():
shutil.copyfile("gpdb_src/src/test/regress/regression.diffs", "icg_output/regression.diffs")
shutil.copyfile("gpdb_src/src/test/regress/regression.out", "icg_output/regression.out")
def main():
parser = optparse.OptionParser()
parser.add_option("--build_type", dest="build_type", default="RELEASE")
parser.add_option("--mode", choices=['orca', 'codegen', 'orca_codegen', 'planner'])
parser.add_option("--compiler", dest="compiler")
parser.add_option("--cxxflags", dest="cxxflags")
parser.add_option("--output_dir", dest="output_dir", default="install")
parser.add_option("--gpdb_name", dest="gpdb_name")
(options, args) = parser.parse_args()
if options.mode == 'orca':
ciCommon = GpBuild(options.mode)
elif options.mode == 'planner':
ciCommon = GpBuild(options.mode)
elif options.mode == 'codegen':
ciCommon = GpcodegenBuild()
elif options.mode == 'orca_codegen':
ciCommon = GporcacodegenBuild()
status = ciCommon.install_system_deps()
if status:
return status
for dependency in args:
status = ciCommon.install_dependency(dependency)
if status:
return status
status = install_gpdb(options.gpdb_name)
if status:
return status
status = ciCommon.configure()
if status:
return status
status = create_gpadmin_user()
if status:
return status
status = ciCommon.icg()
if status:
copy_output()
return status
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 | Python |
39061f2ce81563d5a60f1d2359c9eb6f8b2a50e3 | Disable buggy recursion detection | sprymix/importkit | importkit/yaml/validator/types/choice.py | importkit/yaml/validator/types/choice.py | ##
# Copyright (c) 2008-2010 Sprymix Inc.
# All rights reserved.
#
# See LICENSE for details.
##
import copy
from .composite import CompositeType
from ..error import SchemaValidationError
class ChoiceType(CompositeType):
__slots__ = ['choice', 'checked']
def __init__(self, schema):
super().__init__(schema)
self.choice = None
self.checked = {}
def load(self, dct):
super().load(dct)
self.choice = []
for choice in dct['choice']:
self.choice.append(self.schema._build(choice))
def check(self, node):
super().check(node)
"""
did = id(node)
if did in self.checked:
return node
self.checked[did] = True
"""
errors = []
tmp = None
for choice in self.choice:
try:
tmp = copy.deepcopy(node)
tmp = choice.check(tmp)
except SchemaValidationError as error:
errors.append(str(error))
else:
break
else:
raise SchemaValidationError('Choice block errors:\n' + '\n'.join(errors), node)
node.value = tmp.value
node.tag = tmp.tag
node.tags = getattr(tmp, 'tags', None)
return node
| ##
# Copyright (c) 2008-2010 Sprymix Inc.
# All rights reserved.
#
# See LICENSE for details.
##
import copy
from .composite import CompositeType
from ..error import SchemaValidationError
class ChoiceType(CompositeType):
__slots__ = ['choice', 'checked']
def __init__(self, schema):
super().__init__(schema)
self.choice = None
self.checked = {}
def load(self, dct):
super().load(dct)
self.choice = []
for choice in dct['choice']:
self.choice.append(self.schema._build(choice))
def check(self, node):
super().check(node)
did = id(node)
if did in self.checked:
return node
self.checked[did] = True
errors = []
tmp = None
for choice in self.choice:
try:
tmp = copy.deepcopy(node)
tmp = choice.check(tmp)
except SchemaValidationError as error:
errors.append(str(error))
else:
break
else:
raise SchemaValidationError('Choice block errors:\n' + '\n'.join(errors), node)
node.value = tmp.value
node.tag = tmp.tag
node.tags = getattr(tmp, 'tags', None)
return node
| mit | Python |
c204b94927000f6124d3ddf66bd932be70ecd1fe | add version 0.14.0 to r-globals (#21026) | LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack,LLNL/spack | var/spack/repos/builtin/packages/r-globals/package.py | var/spack/repos/builtin/packages/r-globals/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RGlobals(RPackage):
"""Identify Global Objects in R Expressions
Identifies global ("unknown" or "free") objects in R expressions by code
inspection using various strategies, e.g. conservative or liberal. The
objective of this package is to make it as simple as possible to identify
global objects for the purpose of exporting them in distributed compute
environments."""
homepage = "https://github.com/HenrikBengtsson/globals"
url = "https://cloud.r-project.org/src/contrib/globals_0.12.4.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/globals"
version('0.14.0', sha256='203dbccb829ca9cc6aedb6f5e79cb126ea31f8dd379dff9111ec66e3628c32f3')
version('0.12.4', sha256='7985356ad75afa1f795f8267a20dee847020c0207252dc075c614cef55d8fe6b')
depends_on('r@3.1.2:', type=('build', 'run'))
depends_on('r-codetools', type=('build', 'run'))
| # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class RGlobals(RPackage):
"""Identifies global ("unknown" or "free") objects in R expressions by code
inspection using various strategies, e.g. conservative or liberal. The
objective of this package is to make it as simple as possible to identify
global objects for the purpose of exporting them in distributed compute
environments."""
homepage = "https://github.com/HenrikBengtsson/globals"
url = "https://cloud.r-project.org/src/contrib/globals_0.12.4.tar.gz"
list_url = "https://cloud.r-project.org/src/contrib/Archive/globals"
version('0.12.4', sha256='7985356ad75afa1f795f8267a20dee847020c0207252dc075c614cef55d8fe6b')
depends_on('r@3.1.2:', type=('build', 'run'))
depends_on('r-codetools', type=('build', 'run'))
| lgpl-2.1 | Python |
18c7dce57dd49d3ad1c5392f901ecce2c1ea4e8c | correct formatting | cwacek/python-jsonschema-objects | test/test_wrong_exception_protocolbase_getitem.py | test/test_wrong_exception_protocolbase_getitem.py | import pytest
import python_jsonschema_objects as pjo
@pytest.fixture
def base_schema():
return {
'title': 'example',
'type': 'object',
"additionalProperties": False,
"properties": {
"dictLike": {
"additionalProperties": {
"type": "integer"
},
"type": "object"
}
}
}
def test_wrong_exception_protocolbase_getitem(base_schema):
"""
to declare a dict like object in json-schema, we are supposed
to declare it as an object of additional properties.
When trying to use it as dict, for instance testing if a key is inside
the dictionary, methods like __contains__ in the ProtocolBase expect
__getitem__ to raise a KeyError. getitem calls __getattr__ without any
exception handling, which raises an AttributeError (necessary for proper
behaviour of getattr, for instance).
Solution found is to handle AttributeError in getitem and to raise KeyError
"""
builder = pjo.ObjectBuilder(base_schema)
ns = builder.build_classes()
t = ns.Example(dictLike={'a': 0, 'b': 1})
t.validate()
assert 'a' in t.dictLike
assert not 'c' in t.dictLike
assert getattr(t, 'not_present', None) is None
if __name__ == '__main__':
test_wrong_exception_protocolbase_getitem(base_schema())
| import pytest
import python_jsonschema_objects as pjo
import json
@pytest.fixture
def base_schema():
return {
'title': 'example',
'type': 'object',
"additionalProperties": False,
"properties": {
"dictLike": {
"additionalProperties": {
"type": "integer"
},
"type": "object"
}
}
}
def test_wrong_exception_protocolbase_getitem(base_schema):
"""
to declare a dict like object in json-schema, we are supposed
to declare it as an object of additional properties.
When trying to use it as dict, for instance testing if a key is inside
the dictionary, methods like __contains__ in the ProtocolBase expect
__getitem__ to raise a KeyError. getitem calls __getattr__ without any
exception handling, which raises an AttributeError (necessary for proper
behaviour of getattr, for instance).
Solution found is to handle AttributeError in getitem and to raise KeyError
"""
builder = pjo.ObjectBuilder(base_schema)
ns = builder.build_classes()
t = ns.Example(dictLike={'a': 0,'b': 1})
t.validate()
assert 'a' in t.dictLike
assert not 'c' in t.dictLike
assert getattr(t,'not_present',None) == None
if __name__ == '__main__':
test_wrong_exception_protocolbase_getitem(base_schema())
| mit | Python |
7955405a2377468804d4aacd5d476c0dc617f0e1 | improve unit tests for version() (#59626) | thaim/ansible,thaim/ansible | test/units/cli/arguments/test_optparse_helpers.py | test/units/cli/arguments/test_optparse_helpers.py | # -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import sys
import pytest
from ansible import constants as C
from ansible.cli.arguments import option_helpers as opt_help
from ansible import __path__ as ansible_path
from ansible.release import __version__ as ansible_version
if C.DEFAULT_MODULE_PATH is None:
cpath = u'Default w/o overrides'
else:
cpath = C.DEFAULT_MODULE_PATH
FAKE_PROG = u'ansible-cli-test'
VERSION_OUTPUT = opt_help.version(prog=FAKE_PROG)
@pytest.mark.parametrize(
'must_have', [
FAKE_PROG + u' %s' % ansible_version,
u'config file = %s' % C.CONFIG_FILE,
u'configured module search path = %s' % cpath,
u'ansible python module location = %s' % ':'.join(ansible_path),
u'executable location = ',
u'python version = %s' % ''.join(sys.version.splitlines()),
]
)
def test_option_helper_version(must_have):
assert must_have in VERSION_OUTPUT
| # -*- coding: utf-8 -*-
# Copyright: (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.cli.arguments import option_helpers as opt_help
def test_version():
ver = opt_help.version('ansible-cli-test')
assert 'ansible-cli-test' in ver
assert 'python version' in ver
| mit | Python |
dc3eb4e077ffd61d6fea785d9aaeee4e3ce4b0b3 | add util for getting major/minor device numbers | cloudsigma/cgroupspy,islavov/cgroupspy | cgroupspy/utils.py | cgroupspy/utils.py | """
Copyright (c) 2014, CloudSigma AG
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the CloudSigma AG nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL CLOUDSIGMA AG BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os
def walk_tree(root):
yield root
for child in root.children:
for el in walk_tree(child):
yield el
def get_device_major_minor(dev_path):
"""
Returns the device (major, minor) tuple for simplicity
:param dev_path: Path to the device
:return: (device major, device minor)
:rtype: (int, int)
"""
stat = os.lstat(dev_path)
return os.major(stat.st_rdev), os.minor(stat.st_rdev)
| """
Copyright (c) 2014, CloudSigma AG
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the CloudSigma AG nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL CLOUDSIGMA AG BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
def walk_tree(root):
yield root
for child in root.children:
for el in walk_tree(child):
yield el
| bsd-3-clause | Python |
8587c13f298714d68f26acf7e5a2beb457addf99 | Update message.py | tnstrssnr/telegram-chatter | chatter/message.py | chatter/message.py | #!usr/bin/python3
"""
Repräsentiert ein Nachrichten-Objekt.
Attribute:
id -- einmalige, zur Identifikation benötigte, Nachrichten-ID
sender -- der Sender der Nachricht (User-Objekt)
datum -- Datum/Uhrzeit an dem die NAchricht gesendet wurde (UNIX-Zeitformat)
chat -- Chat, an den die Nachricht gesandt worden ist (Chat-Objekt)
antwort -- Originalnachricht, falls die Nachricht eine Antwort war, sonst 'None'
inhalt -- Der Inhalt der Nachricht
"""
from chatter.chat import Chat
from chatter.user import User
__author__ = 'Tina Maria Stroessner'
__license__ = 'MIT'
__version__ = 'v1.0'
class Nachricht(object):
def __init__(self, msg):
self.id = msg["message_id"]
self.sender = User(msg["from"])
self.datum = msg["date"]
self.chat = Chat(msg["chat"])
try:
self.antwort = Nachricht(msg["reply_to_message"])
except KeyError:
self.antwort = None
try:
self.inhalt = msg["text"].strip()
except KeyError:
self.inhalt = None
| #!usr/bin/python3
"""
Repräsentiert ein Nachrichten-Objekt.
Attribute:
id -- einmalige, zur Identifikation benötigte, Nachrichten-ID
sender -- der Sender der Nachricht (User-Objekt)
datum -- Datum/Uhrzeit an dem die NAchricht gesendet wurde (UNIX-Zeitformat)
chat -- Chat, an den die Nachricht gesandt worden ist (Chat-Objekt)
antwort -- Originalnachricht, falls die Nachricht eine Antwort war, sonst 'None'
inhalt -- Der Inhalt der Nachricht
"""
from chatter.chat import Chat
from chatter.user import User
__author__ = 'Tina Maria Stroessner'
__license__ = 'MIT'
__version__ = 'v1.0'
class Nachricht(object):
def __init__(self, msg):
self.id = msg["message_id"]
self.sender = User(msg["from"])
self.datum = msg["date"]
self.chat = Chat(msg["chat"])
try:
self.antwort = Nachricht(msg["reply_to_message"])
except KeyError:
self.antwort = None
try:
self.inhalt = msg["text"].strip().lower()
except KeyError:
self.inhalt = None
| mit | Python |
75279f2f19fd0efa338547442b9390ee542009e3 | Add a docstring. | brian-rose/climlab,cjcardinale/climlab,cjcardinale/climlab,brian-rose/climlab,cjcardinale/climlab | climlab/utils/data_source.py | climlab/utils/data_source.py | from __future__ import division, print_function
try:
from urllib.request import urlretrieve # Python 3
except ImportError:
from urllib import urlretrieve # Python 2
def load_data_source(local_path,
remote_source_list,
open_method,
open_method_kwargs=dict(),
verbose=True):
'''Flexible data retreiver to download and cache the data files locally.
Usage:
```
from climlab.utils.data_source import load_data_source
from xarray import open_dataset
ozonename = 'apeozone_cam3_5_54.nc'
ozonepath = 'http://thredds.atmos.albany.edu:8080/thredds/fileServer/CLIMLAB/ozone/' + ozonename
data, path = load_data_source(local_path=ozonename,
remote_source_list=[ozonepath],
open_method=open_dataset)
print(data)
```
(this makes a local copy of the ozone data file)
The order of operations is
1. Try to read the data directly from ``local_path``
2. If the file doesn't exist then iterate through ``remote_source_list``.
Try to download and save the file to ``local_path`` using http request
If that works then open the data from ``local_path``.
3. As a last resort, try to read the data remotely from URLs in ``remote_source_list``
In all cases the file is opened and read by the user-supplied ``open_method``,
e.g. xarray.open_dataset()
Additional keyword arguments in ``open_method_kwargs`` are passed to ``open_method``.
Quiet all output by passing ``verbose=False``.
Returns:
- ``data`` is the data object returned by the successful call to ``open_method``
- ``path`` is the path that resulted in a successful call to ``open_method``.
'''
try:
path = local_path
data = open_method(path, **open_method_kwargs)
if verbose:
print('Opened data from {}'.format(path))
except FileNotFoundError:
# First try to load from remote sources and cache the file locally
for source in remote_source_list:
try:
urlretrieve(source, local_path)
path = local_path
data = open_method(path, **open_method_kwargs)
if verbose:
print('Data retrieved from {} and saved locally.'.format(source))
break
except Exception:
pass
else:
# as a final resort, try opening the source remotely
for source in remote_source_list:
path = source
data = open_method(path, **open_method_kwargs)
if verbose:
print('Opened data remotely from {}'.format(source))
finally:
return data, path
| from __future__ import division, print_function
try:
from urllib.request import urlretrieve # Python 3
except ImportError:
from urllib import urlretrieve # Python 2
def load_data_source(local_path,
remote_source_list,
open_method,
open_method_kwargs=dict(),
verbose=True):
try:
path = local_path
data = open_method(path, **open_method_kwargs)
if verbose:
print('Opened data from {}'.format(path))
except FileNotFoundError:
# First try to load from remote sources and cache the file locally
for source in remote_source_list:
try:
urlretrieve(source, local_path)
path = local_path
data = open_method(path, **open_method_kwargs)
if verbose:
print('Data retrieved from {} and saved locally.'.format(source))
break
except Exception:
pass
else:
# as a final resort, try opening the source remotely
for source in remote_source_list:
path = source
data = open_method(path, **open_method_kwargs)
if verbose:
print('Opened data remotely from {}'.format(source))
finally:
return data, path
| mit | Python |
c17f3f7867119d327f438ab74c8c779849927db4 | add new parameter code | cf-platform-eng/aws-pcf-quickstart,cf-platform-eng/aws-pcf-quickstart,cf-platform-eng/aws-pcf-quickstart | ci/create-stack.py | ci/create-stack.py | import jinja2
import os
import sys
from subprocess import call
pcfkeypairprivate = os.environ['AWS_CF_PCFKEYPAIRPRIVATE'],
password = os.environ['AWS_CF_PASSWORD'],
domain = os.environ['AWS_CF_DOMAIN'],
hostedzoneid = os.environ['AWS_CF_HOSTEDZONEID'],
sslcertificatearn = os.environ['AWS_CF_SSLCERTIFICATEARN'],
natkeypair = os.environ['AWS_CF_NATKEYPAIR'],
pivnettoken = os.environ['AWS_CF_PIVNETTOKEN'],
with open('ci/parameters.j2.json', 'r') as template_file:
template = template_file.read()
rendered = jinja2.Template(template).render({
"pcfkeypairprivate": pcfkeypairprivate,
"password": password,
"domain": domain,
"hostedzoneid": hostedzoneid,
"sslcertificatearn": sslcertificatearn,
"natkeypair": natkeypair,
"pivnettoken": pivnettoken
})
print("---------------------------")
print(rendered)
print("---------------------------")
with open('parameters.json', 'w') as rendered_file:
rendered_file.write(rendered)
cmd = """
aws cloudformation create-stack \
--stack-name pcf-int-`date +%s` \
--capabilities CAPABILITY_IAM \
--template-body file:///`pwd`/cloudformation/quickstart-template.yml \
--parameters file:///`pwd`/parameters.json
"""
sys.exit(call(cmd, shell=True))
| import jinja2
import os
import sys
from subprocess import call
pcfkeypairprivate = os.environ['AWS_CF_PCFKEYPAIRPRIVATE'],
password = os.environ['AWS_CF_PASSWORD'],
domain = os.environ['AWS_CF_DOMAIN'],
hostedzoneid = os.environ['AWS_CF_HOSTEDZONEID'],
sslcertificatearn = os.environ['AWS_CF_SSLCERTIFICATEARN'],
natkeypair = os.environ['AWS_CF_NATKEYPAIR'],
pivnettoken = os.environ['AWS_CF_PIVNETTOKEN'],
with open('ci/parameters.j2.json', 'r') as template_file:
template = template_file.read()
rendered = jinja2.Template(template).render(
pcfkeypairprivate='{{aws_cf_pcfkeypairprivate}}',
password='{{aws_cf_password}}',
domain='{{aws_cf_domain}}',
hostedzoneid='{{aws_cf_hostedzoneid}}',
sslcertificatearn='{{aws_cf_sslcertificatearn}}',
natkeypair='{{aws_cf_natkeypair}}',
pivnettoken='{{aws_cf_pivnettoken}}'
)
print("---------------------------")
print(rendered)
print("---------------------------")
with open('parameters.json', 'w') as rendered_file:
rendered_file.write(rendered)
cmd = """
aws cloudformation create-stack \
--stack-name pcf-int-`date +%s` \
--capabilities CAPABILITY_IAM \
--template-body file:///`pwd`/cloudformation/quickstart-template.yml \
--parameters file:///`pwd`/parameters.json
"""
sys.exit(call(cmd, shell=True))
| apache-2.0 | Python |
718c0008ce9766611fa27e43fa0f6c8c79ea8ba3 | bump to v1.6.3 | Cal-CS-61A-Staff/ok-client | client/__init__.py | client/__init__.py | __version__ = 'v1.6.3'
FILE_NAME = 'ok'
import os
import sys
sys.path.insert(0, '')
# Add directory in which the ok.zip is stored to sys.path.
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
| __version__ = 'v1.6.2'
FILE_NAME = 'ok'
import os
import sys
sys.path.insert(0, '')
# Add directory in which the ok.zip is stored to sys.path.
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
| apache-2.0 | Python |
9ed97ebbd9884971da0c069b49e3a698b337c9b6 | update cltk_dir path | coderbhupendra/cltk,eamonnbell/cltk,mbevila/cltk,cltk/cltk,LBenzahia/cltk,TylerKirby/cltk,diyclassics/cltk,LBenzahia/cltk,marpozzi/cltk,TylerKirby/cltk,D-K-E/cltk,kylepjohnson/cltk | cltk/stem/lemma.py | cltk/stem/lemma.py | """Lemmatize Latin words."""
__author__ = 'Kyle P. Johnson <kyle@kyle-p-johnson.com>'
__license__ = 'MIT License. See LICENSE.'
import importlib.machinery
import os
import re
AVAILABLE_LANGUAGES = ['latin']
class LemmaReplacer(object): # pylint: disable=R0903
"""Lemmatize Latin words by replacing input words with corresponding
values from a replacement list.
"""
def __init__(self, language):
"""Import replacement patterns into a list."""
self.language = language
self._patterns = self._setup_language_variables()
def _setup_language_variables(self):
"""Check for availability of lemmatizer for a language.
TODO: Turn 'lemma_list' file a simple csv and importing on the fly.
"""
assert self.language in AVAILABLE_LANGUAGES, \
'Corpora not available for %s language.' % self.language
if self.language == 'latin':
rel_path = os.path.join('~/cltk_data',
self.language,
'trained_model/cltk_linguistic_data/lemmata/lemma_list.py') # pylint: disable=C0301
path = os.path.expanduser(rel_path)
print('Loading lemmata. This may take a minute.')
loader = importlib.machinery.SourceFileLoader('lemma_list', path)
module = loader.load_module()
patterns = module.REPLACEMENT_PATTERNS
return [(re.compile(regex), repl) for (regex, repl) in patterns]
def lemmatize(self, text):
"""Replacer of text via the dict.
:type text: str
:param text: Input text to be lemmatized.
:rtype : str
"""
for (pattern, repl) in self._patterns:
text = re.subn(pattern, repl, text)[0]
return text
| """Lemmatize Latin words."""
__author__ = 'Kyle P. Johnson <kyle@kyle-p-johnson.com>'
__license__ = 'MIT License. See LICENSE.'
import importlib.machinery
import os
import re
AVAILABLE_LANGUAGES = ['latin']
class LemmaReplacer(object): # pylint: disable=R0903
"""Lemmatize Latin words by replacing input words with corresponding
values from a replacement list.
"""
def __init__(self, language):
"""Import replacement patterns into a list."""
self.language = language
self._patterns = self._setup_language_variables()
def _setup_language_variables(self):
"""Check for availability of lemmatizer for a language.
TODO: Turn 'lemma_list' file a simple csv and importing on the fly.
"""
assert self.language in AVAILABLE_LANGUAGES, \
'Corpora not available for %s language.' % self.language
if self.language == 'latin':
rel_path = os.path.join('~/cltk_data',
self.language,
'lemmata/lemma_list.py')
path = os.path.expanduser(rel_path)
print('Loading lemmata. This may take a minute.')
loader = importlib.machinery.SourceFileLoader('lemma_list', path)
module = loader.load_module()
patterns = module.REPLACEMENT_PATTERNS
return [(re.compile(regex), repl) for (regex, repl) in patterns]
def lemmatize(self, text):
"""Replacer of text via the dict.
:type text: str
:param text: Input text to be lemmatized.
:rtype : str
"""
for (pattern, repl) in self._patterns:
text = re.subn(pattern, repl, text)[0]
return text
| mit | Python |
289d89bd92ef9c3b0a4d7683bc9c684a915e621d | Update version. | kmee/python-sped,sped-br/python-sped | sped/__init__.py | sped/__init__.py | # -*- coding: utf-8 -*-
__version__ = '0.3.4'
| # -*- coding: utf-8 -*-
__version__ = '0.3.2'
| mit | Python |
33913411433068cc358953f8c1211c8abe1c96a2 | implement separate key iterator | vmx/spring,mikewied/spring,mikewied/spring,couchbase/spring,couchbase/spring,vmx/spring | spring/docgen.py | spring/docgen.py | import math
import random
import string
class Iterator(object):
def __iter__(self):
return self
class RandKeyGen(Iterator):
def __init__(self, items):
self.items = items
def next(self):
return random.randint(1, self.items)
class DocGen(Iterator):
CHARS = list(string.letters + string.digits)
SIZE_VARIATION = 0.25 # 25%
KEY_LENGTH = 10
def __init__(self, avg_size):
self.avg_size = avg_size
@classmethod
def _get_variation_coeff(cls):
return random.uniform(1 - cls.SIZE_VARIATION, 1 + cls.SIZE_VARIATION)
@classmethod
def _build_short_string(cls):
return ''.join(cls.CHARS)[-cls.KEY_LENGTH:]
@classmethod
def _build_long_string(cls, length):
l_int = int(length)
num_slices = int(math.ceil(length / len(cls.CHARS)))
rand_chars = num_slices * cls.CHARS
return ''.join(rand_chars)[:l_int]
def next(self):
random.shuffle(self.CHARS)
next_length = self._get_variation_coeff() * self.avg_size
return {
self._build_short_string(): self._build_long_string(next_length)
}
| import math
import random
import string
class DocGen(object):
CHARS = list(string.letters + string.digits)
SIZE_VARIATION = 0.25 # 25%
KEY_LENGTH = 10
def __init__(self, avg_size):
self.avg_size = avg_size
def __iter__(self):
return self
@classmethod
def _get_variation_coeff(cls):
return random.uniform(1 - cls.SIZE_VARIATION, 1 + cls.SIZE_VARIATION)
@classmethod
def _build_short_string(cls):
return ''.join(cls.CHARS)[-cls.KEY_LENGTH:]
@classmethod
def _build_long_string(cls, length):
l_int = int(length)
num_slices = int(math.ceil(length / len(cls.CHARS)))
rand_chars = num_slices * cls.CHARS
return ''.join(rand_chars)[:l_int]
def next(self):
random.shuffle(self.CHARS)
next_length = self._get_variation_coeff() * self.avg_size
key = self._build_short_string()
doc = {key: self._build_long_string(next_length)}
return key, doc
| apache-2.0 | Python |
3fbfb354bedd8a8806c5a8ff37e4c100fcdbf514 | fix 0010 | luoxufeiyan/python,Yrthgze/prueba-sourcetree2,Yrthgze/prueba-sourcetree2,haiyangd/python-show-me-the-code-,Jaccorot/python,renzongxian/Show-Me-the-Code,Jaccorot/python,Show-Me-the-Code/python,Jaccorot/python,renzongxian/Show-Me-the-Code,haiyangd/python-show-me-the-code-,renzongxian/Show-Me-the-Code,starlightme/python,luoxufeiyan/python,Yrthgze/prueba-sourcetree2,haiyangd/python-show-me-the-code-,JiYouMCC/python,haiyangd/python-show-me-the-code-,Yrthgze/prueba-sourcetree2,JiYouMCC/python,Show-Me-the-Code/python,renzongxian/Show-Me-the-Code,Yrthgze/prueba-sourcetree2,starlightme/python,Jaccorot/python,luoxufeiyan/python,Show-Me-the-Code/python,Show-Me-the-Code/python,JiYouMCC/python,starlightme/python,luoxufeiyan/python,Show-Me-the-Code/python,haiyangd/python-show-me-the-code-,Jaccorot/python,starlightme/python,JiYouMCC/python,luoxufeiyan/python,Yrthgze/prueba-sourcetree2,JiYouMCC/python,Show-Me-the-Code/python,starlightme/python,renzongxian/Show-Me-the-Code | Jaccorot/0010/0010.py | Jaccorot/0010/0010.py | #!/usr/bin/python
#coding=utf-8
"""
第 0010 题:使用 Python 生成字母验证码图片
"""
from PIL import Image, ImageDraw, ImageFont, ImageFilter
import random
IMAGE_MODE = 'RGB'
IMAGE_BG_COLOR = (255,255,255)
Image_Font = 'arial.ttf'
text = ''.join(random.sample('abcdefghijklmnopqrstuvwxyz\
ABCDEFGHIJKLMNOPQRSTUVWXYZ',4))
def colorRandom():
return (random.randint(32,127),random.randint(32,127),random.randint(32,127))
#change 噪点频率(%)
def create_identifying_code(strs, width=400, height=200, chance=2):
im = Image.new(IMAGE_MODE, (width, height), IMAGE_BG_COLOR)
draw = ImageDraw.Draw(im)
#绘制背景噪点
for w in xrange(width):
for h in xrange(height):
if chance < random.randint(1, 100):
draw.point((w, h), fill=colorRandom())
font = ImageFont.truetype(Image_Font, 80)
font_width, font_height = font.getsize(strs)
strs_len = len(strs)
x = (width - font_width)/2
y = (height - font_height)/2
#逐个绘制文字
for i in strs:
draw.text((x,y), i, colorRandom(), font)
x += font_width/strs_len
#模糊
im = im.filter(ImageFilter.BLUR)
im.save('identifying_code_pic.jpg')
if __name__ == '__main__':
create_identifying_code(text)
| #!/usr/bin/python
#coding=utf-8
from PIL import Image, ImageDraw, ImageFont, ImageFilter
import random
IMAGE_MODE = 'RGB'
IMAGE_BG_COLOR = (255,255,255)
Image_Font = 'arial.ttf'
text = ''.join(random.sample('abcdefghijklmnopqrstuvwxyz\
ABCDEFGHIJKLMNOPQRSTUVWXYZ',4))
def colorRandom():
return (random.randint(32,127),random.randint(32,127),random.randint(32,127))
#change 噪点频率(%)
def create_identifying_code(strs, width=400, height=200, chance=2):
im = Image.new(IMAGE_MODE, (width, height), IMAGE_BG_COLOR)
draw = ImageDraw.Draw(im)
#绘制背景噪点
for w in xrange(width):
for h in xrange(height):
if chance < random.randint(1, 100):
draw.point((w, h), fill=colorRandom())
font = ImageFont.truetype(Image_Font, 80)
font_width, font_height = font.getsize(strs)
strs_len = len(strs)
x = (width - font_width)/2
y = (height - font_height)/2
#逐个绘制文字
for i in strs:
draw.text((x,y), i, colorRandom(), font)
x += font_width/strs_len
#模糊
im = im.filter(ImageFilter.BLUR)
im.save('identifying_code_pic.jpg')
if __name__ == '__main__':
create_identifying_code(text)
| mit | Python |
5af3b9d31fc3af8f9781e4d63e52dbbcc622c311 | Update TrackingAdafruit16CServoDriver.py | MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab | home/Mats/TrackingAdafruit16CServoDriver.py | home/Mats/TrackingAdafruit16CServoDriver.py | port = "COM4"
xServoPin = 2
yServoPin = 3
mouthServoPin = 4
# Create the Tracker service to be able setup how it should connect before starting it
tracker = Runtime.create("tracker","Tracking")
sleep(1)
servoX = tracker.getX()
servoY = tracker.getY()
# Create the Adafruit16CServoDriver service to be able setup how it should connect before starting it
servodriver = Runtime.create("servodriver","Adafruit16CServoDriver")
sleep(1)
servodriver.attach(tracker.arduino)
servodriver.startService()
servodriver.arduino.connect(port)
# Connect the servos to the pins
print "Attaching servos"
servoX.setMinMax(40,140)
servoX.setInverted(True)
servoX.attach(servodriver,xServoPin)
servoY.setMinMax(60,120)
servoY.setInverted(True)
servoY.attach(servodriver,yServoPin)
# Start face tracking
tracker.startService()
tracker.faceDetect()
| port = "COM4"
xServoPin = 2
yServoPin = 3
mouthServoPin = 4
# Create the Tracker service to be able setup how it should connect before starting it
tracker = Runtime.create("tracker","Tracking")
sleep(1)
servoX = tracker.getX()
servoY = tracker.getY()
# Create the Adafruit16CServoDriver service to be able setup how it should connect before starting it
servodriver = Runtime.create("servodriver","Adafruit16CServoDriver")
sleep(1)
servodriver.attach(tracker.arduino)
servodriver.startService()
servodriver.arduino.connect(port)
tracker.startService()
# Connect the servos to the pins
print "Attaching servos"
servoX.setMinMax(40,140)
servoX.setInverted(True)
servoX.attach(servodriver,xServoPin)
servoY.setMinMax(60,120)
servoY.setInverted(True)
servoY.attach(servodriver,yServoPin)
# Start face tracking
tracker.faceDetect()
| apache-2.0 | Python |
2ce0c1eee73ee17b5bfe8befa24c2c8f1ace8225 | Fix typo. | sk-/python2.7-type-annotator,sk-/python2.7-type-annotator,sk-/python2.7-type-annotator | Lib/encodings/mbcs.py | Lib/encodings/mbcs.py | """ Python 'mbcs' Codec for Windows
Cloned by Mark Hammond (mhammond@skippinet.com.au) from ascii.py,
which was written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.mbcs_encode
decode = codecs.mbcs_decode
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.mbcs_encode(input,self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.mbcs_decode(input,self.errors)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
class StreamConverter(StreamWriter,StreamReader):
encode = codecs.mbcs_decode
decode = codecs.mbcs_encode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mbcs',
encode=Codec.encode,
decode=Codec.decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| """ Python 'mbcs' Codec for Windows
Cloned by Mark Hammond (mhammond@skippinet.com.au) from ascii.py,
which was written by Marc-Andre Lemburg (mal@lemburg.com).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.mbcs_encode
decode = codecs.mbcs_decode
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.mbs_encode(input,self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.mbs_decode(input,self.errors)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
class StreamConverter(StreamWriter,StreamReader):
encode = codecs.mbcs_decode
decode = codecs.mbcs_encode
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mbcs',
encode=Codec.encode,
decode=Codec.decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
| mit | Python |
a6d1326241bb43733ddccaef38713513d419282c | Add spacing for cache instance | vim-awesome/vim-awesome,starcraftman/vim-awesome,starcraftman/vim-awesome,divad12/vim-awesome,starcraftman/vim-awesome,vim-awesome/vim-awesome,jonafato/vim-awesome,starcraftman/vim-awesome,jonafato/vim-awesome,shaialon/vim-awesome,jonafato/vim-awesome,divad12/vim-awesome,divad12/vim-awesome,jonafato/vim-awesome,divad12/vim-awesome,vim-awesome/vim-awesome,vim-awesome/vim-awesome,shaialon/vim-awesome,shaialon/vim-awesome,shaialon/vim-awesome,vim-awesome/vim-awesome | web/cache.py | web/cache.py | """Module to hold the instance of the cache"""
from flask.ext.cache import Cache
cache = Cache()
| """Module to hold the instance of the cache"""
from flask.ext.cache import Cache
cache = Cache()
| mit | Python |
1cd908682f77d93d60f15b26f018eb956840b8f0 | Print scp destination. | Jarn/jarn.mkrelease | jarn/mkrelease/scp.py | jarn/mkrelease/scp.py | from process import Process
from exit import err_exit
class SCP(object):
"""Secure copy abstraction."""
def __init__(self, process=None):
self.process = process or Process()
def has_host(self, location):
colon = location.find(':')
slash = location.find('/')
return colon > 0 and (slash < 0 or slash > colon)
def join(distbase, location):
sep = ''
if distbase and distbase[-1] not in (':', '/'):
sep = '/'
return distbase + sep + location
def run_scp(self, distfile, location):
if not self.process.quiet:
print 'copying to %(location)s' % locals()
rc = self.process.os_system(
'scp "%(distfile)s" "%(location)s"' % locals())
if rc != 0:
err_exit('scp failed')
return rc
| from process import Process
from exit import err_exit
class SCP(object):
"""Secure copy abstraction."""
def __init__(self, process=None):
self.process = process or Process()
def has_host(self, location):
colon = location.find(':')
slash = location.find('/')
return colon > 0 and (slash < 0 or slash > colon)
def join(distbase, location):
sep = ''
if distbase and distbase[-1] not in (':', '/'):
sep = '/'
return distbase + sep + location
def run_scp(self, distfile, location):
if not self.process.quiet:
print 'running scp'
rc = self.process.os_system(
'scp "%(distfile)s" "%(location)s"' % locals())
if rc != 0:
err_exit('scp failed')
return rc
| bsd-2-clause | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.