commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
8de7697b3b8a73e79a73ec34f17ef0fa842cfbb2 | Update setup.py for release. | google/osv.dev,google/osv.dev,google/osv.dev,google/osv.dev,google/osv.dev | lib/setup.py | lib/setup.py | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""setup.py for OSV."""
import setuptools
with open('README.md', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name='osv',
version='0.0.14',
author='OSV authors',
author_email='osv-discuss@googlegroups.com',
description='Open Source Vulnerabilities library',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/google/osv',
packages=setuptools.find_packages(),
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
],
install_requires=[
'google-cloud-ndb',
'pygit2',
'PyYAML',
'semver',
],
package_dir={
'': '.',
},
python_requires='>=3.7',
zip_safe=False,
)
| # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""setup.py for OSV."""
import setuptools
with open('README.md', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name='osv',
version='0.0.13',
author='OSV authors',
author_email='osv-discuss@googlegroups.com',
description='Open Source Vulnerabilities library',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/google/osv',
packages=setuptools.find_packages(),
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
],
install_requires=[
'google-cloud-ndb',
'pygit2',
'PyYAML',
'semver',
],
package_dir={
'': '.',
},
python_requires='>=3.7',
zip_safe=False,
)
| apache-2.0 | Python |
786cc9b8f793f608098c1da97cb564e7d24b15cd | add IPython.start_ipython | ipython/ipython,ipython/ipython | IPython/__init__.py | IPython/__init__.py | # encoding: utf-8
"""
IPython: tools for interactive and parallel computing in Python.
http://ipython.org
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2008-2011, IPython Development Team.
# Copyright (c) 2001-2007, Fernando Perez <fernando.perez@colorado.edu>
# Copyright (c) 2001, Janko Hauser <jhauser@zscout.de>
# Copyright (c) 2001, Nathaniel Gray <n8gray@caltech.edu>
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import os
import sys
#-----------------------------------------------------------------------------
# Setup everything
#-----------------------------------------------------------------------------
# Don't forget to also update setup.py when this changes!
if sys.version[0:3] < '2.6':
raise ImportError('Python Version 2.6 or above is required for IPython.')
# Make it easy to import extensions - they are always directly on pythonpath.
# Therefore, non-IPython modules can be added to extensions directory.
# This should probably be in ipapp.py.
sys.path.append(os.path.join(os.path.dirname(__file__), "extensions"))
#-----------------------------------------------------------------------------
# Setup the top level names
#-----------------------------------------------------------------------------
from .config.loader import Config
from .core.getipython import get_ipython
from .core import release
from .core.application import Application
from .terminal.embed import embed
from .core.error import TryNext
from .core.interactiveshell import InteractiveShell
from .testing import test
from .utils.sysinfo import sys_info
from .utils.frame import extract_module_locals
# Release data
__author__ = '%s <%s>' % (release.author, release.author_email)
__license__ = release.license
__version__ = release.version
version_info = release.version_info
def embed_kernel(module=None, local_ns=None, **kwargs):
"""Embed and start an IPython kernel in a given scope.
Parameters
----------
module : ModuleType, optional
The module to load into IPython globals (default: caller)
local_ns : dict, optional
The namespace to load into IPython user namespace (default: caller)
kwargs : various, optional
Further keyword args are relayed to the IPKernelApp constructor,
allowing configuration of the Kernel. Will only have an effect
on the first embed_kernel call for a given process.
"""
(caller_module, caller_locals) = extract_module_locals(1)
if module is None:
module = caller_module
if local_ns is None:
local_ns = caller_locals
# Only import .zmq when we really need it
from IPython.kernel.zmq.embed import embed_kernel as real_embed_kernel
real_embed_kernel(module=module, local_ns=local_ns, **kwargs)
def start_ipython(argv=None, **kwargs):
"""launch a normal IPython instance (as opposed to embedded)
This is a public API method, and will survive implementation changes.
Parameters
----------
argv : list or None, optional
If unspecified or None, IPython will parse command-line options from sys.argv.
To prevent any command-line parsing, pass an empty list: `argv=[]`.
kwargs : various, optional
Any other kwargs will be passed to the Application constructor,
such as `config`.
"""
from IPython.terminal.ipapp import launch_new_instance
return launch_new_instance(argv=argv, **kwargs)
| # encoding: utf-8
"""
IPython: tools for interactive and parallel computing in Python.
http://ipython.org
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2008-2011, IPython Development Team.
# Copyright (c) 2001-2007, Fernando Perez <fernando.perez@colorado.edu>
# Copyright (c) 2001, Janko Hauser <jhauser@zscout.de>
# Copyright (c) 2001, Nathaniel Gray <n8gray@caltech.edu>
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import
import os
import sys
#-----------------------------------------------------------------------------
# Setup everything
#-----------------------------------------------------------------------------
# Don't forget to also update setup.py when this changes!
if sys.version[0:3] < '2.6':
raise ImportError('Python Version 2.6 or above is required for IPython.')
# Make it easy to import extensions - they are always directly on pythonpath.
# Therefore, non-IPython modules can be added to extensions directory.
# This should probably be in ipapp.py.
sys.path.append(os.path.join(os.path.dirname(__file__), "extensions"))
#-----------------------------------------------------------------------------
# Setup the top level names
#-----------------------------------------------------------------------------
from .config.loader import Config
from .core.getipython import get_ipython
from .core import release
from .core.application import Application
from .terminal.embed import embed
from .core.error import TryNext
from .core.interactiveshell import InteractiveShell
from .testing import test
from .utils.sysinfo import sys_info
from .utils.frame import extract_module_locals
# Release data
__author__ = '%s <%s>' % (release.author, release.author_email)
__license__ = release.license
__version__ = release.version
version_info = release.version_info
def embed_kernel(module=None, local_ns=None, **kwargs):
"""Embed and start an IPython kernel in a given scope.
Parameters
----------
module : ModuleType, optional
The module to load into IPython globals (default: caller)
local_ns : dict, optional
The namespace to load into IPython user namespace (default: caller)
kwargs : various, optional
Further keyword args are relayed to the IPKernelApp constructor,
allowing configuration of the Kernel. Will only have an effect
on the first embed_kernel call for a given process.
"""
(caller_module, caller_locals) = extract_module_locals(1)
if module is None:
module = caller_module
if local_ns is None:
local_ns = caller_locals
# Only import .zmq when we really need it
from IPython.kernel.zmq.embed import embed_kernel as real_embed_kernel
real_embed_kernel(module=module, local_ns=local_ns, **kwargs)
| bsd-3-clause | Python |
14276843ce67d53a4f9600914872368bc29b145b | Fix bug where log_quantities in _CustomAction was a list | joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue,joaander/hoomd-blue | hoomd/custom_action.py | hoomd/custom_action.py | from abc import ABC, abstractmethod
from hoomd.parameterdicts import ParameterDict
class _CustomAction(ABC):
flags = []
log_quantities = {}
def __init__(self):
pass
def attach(self, simulation):
self._state = simulation.state
def detach(self):
if hasattr(self, '_state'):
del self._state
@abstractmethod
def act(self, timestep):
pass
class _InternalCustomAction(_CustomAction):
_reserved_attrs_with_dft = {'_param_dict': ParameterDict,
'_typeparam_dict': dict}
def __getattr__(self, attr):
if attr in self._reserved_attrs_with_dft.keys():
setattr(self, attr, self._reserved_attrs_with_dft[attr]())
return self.__dict__[attr]
elif attr in self._param_dict.keys():
return self._getattr_param(attr)
elif attr in self._typeparam_dict.keys():
return self._getattr_typeparam(attr)
else:
raise AttributeError("Object {} has no attribute {}"
"".format(self, attr))
def __setattr__(self, attr, value):
if attr in self._reserved_attrs_with_dft.keys():
super().__setattr__(attr, value)
elif attr in self._param_dict.keys():
self._param_dict[attr] = value
elif attr in self._typeparam_dict.keys():
self._setattr_typeparam(attr, value)
else:
super().__setattr__(attr, value)
def _setattr_typeparam(self, attr, value):
try:
for k, v in value.items():
self._typeparam_dict[attr][k] = v
except TypeError:
raise ValueError("To set {}, you must use a dictionary "
"with types as keys.".format(attr))
| from abc import ABC, abstractmethod
from hoomd.parameterdicts import ParameterDict
class _CustomAction(ABC):
flags = []
log_quantities = []
def __init__(self):
pass
def attach(self, simulation):
self._state = simulation.state
def detach(self):
if hasattr(self, '_state'):
del self._state
@abstractmethod
def act(self, timestep):
pass
class _InternalCustomAction(_CustomAction):
_reserved_attrs_with_dft = {'_param_dict': ParameterDict,
'_typeparam_dict': dict}
def __getattr__(self, attr):
if attr in self._reserved_attrs_with_dft.keys():
setattr(self, attr, self._reserved_attrs_with_dft[attr]())
return self.__dict__[attr]
elif attr in self._param_dict.keys():
return self._getattr_param(attr)
elif attr in self._typeparam_dict.keys():
return self._getattr_typeparam(attr)
else:
raise AttributeError("Object {} has no attribute {}"
"".format(self, attr))
def __setattr__(self, attr, value):
if attr in self._reserved_attrs_with_dft.keys():
super().__setattr__(attr, value)
elif attr in self._param_dict.keys():
self._param_dict[attr] = value
elif attr in self._typeparam_dict.keys():
self._setattr_typeparam(attr, value)
else:
super().__setattr__(attr, value)
def _setattr_typeparam(self, attr, value):
try:
for k, v in value.items():
self._typeparam_dict[attr][k] = v
except TypeError:
raise ValueError("To set {}, you must use a dictionary "
"with types as keys.".format(attr))
| bsd-3-clause | Python |
8622a5e0734c3f5ef1538362971824218fb8aa19 | add target module | vitareinforce/flask-bluemix,vitareinforce/flask-bluemix | welcome.py | welcome.py | # Copyright 2015 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from flask import Flask, jsonify, Response, json
from flask_cors import CORS, cross_origin
from flightdata import flightdata
from target import target
app = Flask(__name__)
app.register_blueprint(flightdata)
app.register_blueprint(target)
CORS(app)
@app.route('/')
def Welcome():
return app.send_static_file('index.html')
@app.route('/myapp')
def WelcomeToMyapp():
return 'Welcome again to my app running on Bluemix!'
@app.route('/api/people', methods = ['GET'])
def GetPeople():
list_data = [
{'name': 'Vitradisa Pratama', 'nim': '23215331'},
{'name': 'Ridwan Suhud', 'nim': '23215343'}
]
json_data = json.dumps(list_data)
resp = Response(json_data, status=200, mimetype="application/json")
return resp
@app.route('/api/people/<name>')
def SayHello(name):
message = {
'message': 'Hello ' + name
}
return jsonify(results=message)
@app.route('/api/temp/<suhu>')
def UkurSuhu(suhu):
suhu_akhir = {
'message': 'Suhu input : ' + suhu
}
return jsonify(results=suhu_akhir)
port = os.getenv('PORT', '5000')
if __name__ == "__main__":
app.run(host='0.0.0.0', port=int(port))
| # Copyright 2015 IBM Corp. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from flask import Flask, jsonify, Response, json
from flask_cors import CORS, cross_origin
from flightdata import flightdata
app = Flask(__name__)
app.register_blueprint(flightdata)
CORS(app)
@app.route('/')
def Welcome():
return app.send_static_file('index.html')
@app.route('/myapp')
def WelcomeToMyapp():
return 'Welcome again to my app running on Bluemix!'
@app.route('/api/people', methods = ['GET'])
def GetPeople():
list_data = [
{'name': 'Vitradisa Pratama', 'nim': '23215331'},
{'name': 'Ridwan Suhud', 'nim': '23215343'}
]
json_data = json.dumps(list_data)
resp = Response(json_data, status=200, mimetype="application/json")
return resp
@app.route('/api/people/<name>')
def SayHello(name):
message = {
'message': 'Hello ' + name
}
return jsonify(results=message)
@app.route('/api/temp/<suhu>')
def UkurSuhu(suhu):
suhu_akhir = {
'message': 'Suhu input : ' + suhu
}
return jsonify(results=suhu_akhir)
port = os.getenv('PORT', '5000')
if __name__ == "__main__":
app.run(host='0.0.0.0', port=int(port))
| apache-2.0 | Python |
8ff90c8f55ff8cfd772b664a6271453e47c1fd78 | bump version | corbinfanning/mustaine,bgilmore/mustaine,cyrusmg/python-hessian,cyrusmg/python-hessian,heqingpan/mustaine,cyrusmg/python-hessian | mustaine/__init__.py | mustaine/__init__.py | # Copyright (c) 2010, Brandon Gilmore
# Copyright (c) 2010, Phill Tornroth
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the software nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
version_info = (0, 1, 3)
__version__ = ".".join(map(str, version_info))
| # Copyright (c) 2010, Brandon Gilmore
# Copyright (c) 2010, Phill Tornroth
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the software nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
version_info = (0, 1, 2)
__version__ = ".".join(map(str, version_info))
| bsd-3-clause | Python |
621224db245a705b12f4345e70c8e242e3336969 | Add timestamp in spam index | webkom/holonet,webkom/holonet,webkom/holonet | holonet/core/message.py | holonet/core/message.py | # -*- coding: utf8 -*-
from io import BytesIO
import time
from django.utils import timezone
class HolonetEmailMessage(object):
encoding = None
def __init__(self, msg, list_recipients, connection=None):
self.msg = msg
self.list_recipients = list_recipients
self.connection = connection
super(HolonetEmailMessage, self).__init__()
def __getitem__(self, item):
return self.msg.get(item)
def keys(self):
return self.msg.keys()
def values(self):
return self.msg.values()
def __contains__(self, item):
return item in self.msg
def get(self, key, default=None):
if key in self:
return self[key]
else:
return default
def recipients(self):
return self.list_recipients
def message(self):
return self
def get_connection(self, fail_silently=False):
from django.core.mail import get_connection
if not self.connection:
self.connection = get_connection(fail_silently=fail_silently)
return self.connection
def send(self, fail_silently=False):
if not self.recipients():
return 0
return self.get_connection(fail_silently).send_messages([self])
def as_bytes(self, unixfrom=False, policy=None, linesep='\n'):
from email.generator import BytesGenerator
policy = self.msg.policy if policy is None else policy
fp = BytesIO()
g = BytesGenerator(fp, mangle_from_=False, policy=policy)
g.flatten(self.msg, unixfrom=unixfrom, linesep=linesep)
return fp.getvalue()
def index(self):
body = {
'source': self.msg.as_string(),
'X-List-Recipients': self.recipients(),
'@timestamp': int(time.mktime(timezone.now().timetuple()) * 1000)
}
for key in self.keys():
body[key] = self[key]
return body
| # -*- coding: utf8 -*-
from io import BytesIO
class HolonetEmailMessage(object):
encoding = None
def __init__(self, msg, list_recipients, connection=None):
self.msg = msg
self.list_recipients = list_recipients
self.connection = connection
super(HolonetEmailMessage, self).__init__()
def __getitem__(self, item):
return self.msg.get(item)
def keys(self):
return self.msg.keys()
def values(self):
return self.msg.values()
def __contains__(self, item):
return item in self.msg
def get(self, key, default=None):
if key in self:
return self[key]
else:
return default
def recipients(self):
return self.list_recipients
def message(self):
return self
def get_connection(self, fail_silently=False):
from django.core.mail import get_connection
if not self.connection:
self.connection = get_connection(fail_silently=fail_silently)
return self.connection
def send(self, fail_silently=False):
if not self.recipients():
return 0
return self.get_connection(fail_silently).send_messages([self])
def as_bytes(self, unixfrom=False, policy=None, linesep='\n'):
from email.generator import BytesGenerator
policy = self.msg.policy if policy is None else policy
fp = BytesIO()
g = BytesGenerator(fp, mangle_from_=False, policy=policy)
g.flatten(self.msg, unixfrom=unixfrom, linesep=linesep)
return fp.getvalue()
def index(self):
body = {
'source': self.msg.as_string(),
'X-List-Recipients': self.recipients()
}
for key in self.keys():
body[key] = self[key]
return body
| mit | Python |
783784f31eca55c223929acb0a9c8106c8ac9d46 | duplicate code - change the sample function to a somewhat more whimsical example. more serious stuff can be seen in 'real' functions. | hsr-ba-fs15-dat/opendatahub,hsr-ba-fs15-dat/opendatahub,hsr-ba-fs15-dat/opendatahub,hsr-ba-fs15-dat/opendatahub,hsr-ba-fs15-dat/opendatahub | src/main/python/plugins/odhql_function.tmpl.py | src/main/python/plugins/odhql_function.tmpl.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""
Example function for ODHQL. Copy this file and use a valid Python module name (e.g. my_function.py).
Your function will then be automatically loaded and made available within the ODH Query Language Interpreter.
See hub.odhql.function package for more concrete implementation examples.
"""
from hub.odhql.functions.core import VectorizedFunction
class ExampleFunction(VectorizedFunction):
# __doc__ is used to generate function documentation (formatted as reStructured Text)
# By convention, function names and other keywords should be written in all-caps.
"""
Beispiel-Funktion für ODHQL, welche prüft, ob ein Integer-Feld den Wert 42 enthält.
Parameter
- `values`: Integer-Spalte
Beispiel
.. code:: sql
IS42(t.some_field) AS is42
"""
name = 'IS42'
def apply(self, values):
self.assert_int(0, values)
return values == 42
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
"""
Example CONCAT function used in ODHQL. Copy this file and use a valid Python module name (e.g. my_function.py).
Your function will then be automatically loaded and made available within the ODH Query Language Interpreter.
See hub.odhql.function package for more concrete implementation examples.
"""
from hub.odhql.functions.core import VectorizedFunction
class Concat(VectorizedFunction):
# __doc__ is used to generate function documentation (formatted as reStructured Text)
# By convention, function names and other keywords should be written in all-caps.
"""
Konkateniert eine Liste von TEXT-Spalten oder Werten.
Parameter
- `args`: Liste von TEXT-Spalten oder -Werten
Beispiel
.. code:: sql
CONCAT(ODH5.given_name, ' ', ODH5.surname) AS name
"""
name = 'CONCAT'
def apply(self, a, b, *args):
args = [self.expand(arg) for arg in [a, b] + list(args)]
for arg in args:
self.assert_str('string', arg)
return args[0].str.cat(args[1:])
| mit | Python |
9187979699a0f49ec5fc1659e9e2f87c97699a74 | Remove unused import | tsurumeso/waifu2x-chainer,tsurumeso/waifu2x-chainer | lib/utils.py | lib/utils.py | from __future__ import print_function
import os
import random
import numpy as np
class Namespace(object):
def __init__(self, kwargs):
self.kwargs = kwargs
for key in kwargs.keys():
setattr(self, key, kwargs[key])
def __repr__(self):
str = []
for key in self.kwargs.keys():
str.append('%s=%s' % (key, self.kwargs[key]))
return ', '.join(str)
def append(self, key, value):
self.kwargs[key] = value
setattr(self, key, value)
def set_random_seed(seed, gpu=-1):
random.seed(seed)
np.random.seed(seed)
if gpu >= 0:
import cupy
cupy.random.seed(seed)
def load_datalist(dir, shuffle=False):
files = os.listdir(dir)
datalist = []
for file in files:
datalist.append(os.path.join(dir, file))
if shuffle:
random.shuffle(datalist)
return datalist
| from __future__ import print_function
import os
import random
import chainer
import numpy as np
class Namespace(object):
def __init__(self, kwargs):
self.kwargs = kwargs
for key in kwargs.keys():
setattr(self, key, kwargs[key])
def __repr__(self):
str = []
for key in self.kwargs.keys():
str.append('%s=%s' % (key, self.kwargs[key]))
return ', '.join(str)
def append(self, key, value):
self.kwargs[key] = value
setattr(self, key, value)
def set_random_seed(seed, gpu=-1):
random.seed(seed)
np.random.seed(seed)
if gpu >= 0:
import cupy
cupy.random.seed(seed)
def load_datalist(dir, shuffle=False):
files = os.listdir(dir)
datalist = []
for file in files:
datalist.append(os.path.join(dir, file))
if shuffle:
random.shuffle(datalist)
return datalist
| mit | Python |
ad8eaacd2e022cb904834bb6ae1faa38618c5cbf | Add events to csv output | aguinane/nem-reader | nemreader/outputs.py | nemreader/outputs.py | """
nemreader.outputs
~~~~~
Output results in different formats
"""
import csv
from nemreader import read_nem_file
def output_as_csv(file_name, nmi=None, output_file=None):
"""
Transpose all channels and output a csv that is easier
to read and do charting on
:param file_name: The NEM file to process
:param nmi: Which NMI to output if more than one
:param output_file: Specify different output location
:returns: The file that was created
"""
m = read_nem_file(file_name)
if nmi is None:
nmi = list(m.readings.keys())[0] # Use first NMI
channels = list(m.transactions[nmi].keys())
num_records = len(m.readings[nmi][channels[0]])
last_date = m.readings[nmi][channels[0]][-1].t_end
if output_file is None:
output_file = '{}_{}_transposed.csv'.format(
nmi, last_date.strftime('%Y%m%d'))
with open(output_file, 'w', newline='') as csvfile:
cwriter = csv.writer(
csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
heading_list = ['period_start', 'period_end']
for channel in channels:
heading_list.append(channel)
heading_list.append('quality_method')
heading_list.append('event')
cwriter.writerow(heading_list)
for i in range(0, num_records):
t_start = m.readings[nmi][channels[0]][i].t_start
t_end = m.readings[nmi][channels[0]][i].t_end
quality_method = m.readings[nmi][channels[0]][i].quality_method
event_code = m.readings[nmi][channels[0]][i].event_code
event_desc = m.readings[nmi][channels[0]][i].event_desc
row_list = [t_start, t_end]
for ch in channels:
val = m.readings[nmi][ch][i].read_value
row_list.append(val)
row_list.append(quality_method)
row_list.append(f'{event_code} {event_desc}')
cwriter.writerow(row_list)
return output_file
| """
nemreader.outputs
~~~~~
Output results in different formats
"""
import csv
from nemreader import read_nem_file
def output_as_csv(file_name, nmi=None, output_file=None):
"""
Transpose all channels and output a csv that is easier
to read and do charting on
:param file_name: The NEM file to process
:param nmi: Which NMI to output if more than one
:param output_file: Specify different output location
:returns: The file that was created
"""
m = read_nem_file(file_name)
if nmi is None:
nmi = list(m.readings.keys())[0] # Use first NMI
channels = list(m.transactions[nmi].keys())
num_records = len(m.readings[nmi][channels[0]])
last_date = m.readings[nmi][channels[0]][-1].t_end
if output_file is None:
output_file = '{}_{}_transposed.csv'.format(
nmi, last_date.strftime('%Y%m%d'))
with open(output_file, 'w', newline='') as csvfile:
cwriter = csv.writer(
csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
heading_list = ['period_start', 'period_end']
for channel in channels:
heading_list.append(channel)
heading_list.append('quality_method')
cwriter.writerow(heading_list)
for i in range(0, num_records):
t_start = m.readings[nmi][channels[0]][i].t_start
t_end = m.readings[nmi][channels[0]][i].t_end
quality_method = m.readings[nmi][channels[0]][i].quality_method
row_list = [t_start, t_end]
for ch in channels:
val = m.readings[nmi][ch][i].read_value
row_list.append(val)
row_list.append(quality_method)
cwriter.writerow(row_list)
return output_file
| mit | Python |
a789e8bf574973259c0461b99fb9a486abed6e23 | Fix a bug that would add updated control ip address instead of replace | jcshen007/cloudstack,resmo/cloudstack,resmo/cloudstack,GabrielBrascher/cloudstack,resmo/cloudstack,jcshen007/cloudstack,jcshen007/cloudstack,jcshen007/cloudstack,GabrielBrascher/cloudstack,resmo/cloudstack,GabrielBrascher/cloudstack,GabrielBrascher/cloudstack,wido/cloudstack,resmo/cloudstack,DaanHoogland/cloudstack,GabrielBrascher/cloudstack,DaanHoogland/cloudstack,wido/cloudstack,resmo/cloudstack,wido/cloudstack,resmo/cloudstack,DaanHoogland/cloudstack,jcshen007/cloudstack,jcshen007/cloudstack,wido/cloudstack,DaanHoogland/cloudstack,DaanHoogland/cloudstack,wido/cloudstack,GabrielBrascher/cloudstack,wido/cloudstack,DaanHoogland/cloudstack,jcshen007/cloudstack,DaanHoogland/cloudstack,GabrielBrascher/cloudstack,wido/cloudstack | systemvm/patches/debian/config/opt/cloud/bin/cs_ip.py | systemvm/patches/debian/config/opt/cloud/bin/cs_ip.py | from pprint import pprint
from netaddr import *
def merge(dbag, ip):
added = False
for dev in dbag:
if dev == "id":
continue
for address in dbag[dev]:
if address['public_ip'] == ip['public_ip']:
dbag[dev].remove(address)
if ip['add']:
ipo = IPNetwork(ip['public_ip'] + '/' + ip['netmask'])
ip['device'] = 'eth' + str(ip['nic_dev_id'])
ip['cidr'] = str(ipo.ip) + '/' + str(ipo.prefixlen)
ip['network'] = str(ipo.network) + '/' + str(ipo.prefixlen)
if 'nw_type' not in ip.keys():
ip['nw_type'] = 'public'
if ip['nw_type'] == 'control':
dbag['eth' + str(ip['nic_dev_id'])] = [ ip ]
else:
dbag.setdefault('eth' + str(ip['nic_dev_id']), []).append( ip )
return dbag
| from pprint import pprint
from netaddr import *
def merge(dbag, ip):
added = False
for dev in dbag:
if dev == "id":
continue
for address in dbag[dev]:
if address['public_ip'] == ip['public_ip']:
dbag[dev].remove(address)
if ip['add']:
ipo = IPNetwork(ip['public_ip'] + '/' + ip['netmask'])
ip['device'] = 'eth' + str(ip['nic_dev_id'])
ip['cidr'] = str(ipo.ip) + '/' + str(ipo.prefixlen)
ip['network'] = str(ipo.network) + '/' + str(ipo.prefixlen)
if 'nw_type' not in ip.keys():
ip['nw_type'] = 'public'
dbag.setdefault('eth' + str(ip['nic_dev_id']), []).append( ip )
return dbag
| apache-2.0 | Python |
67ebe8da58384529c49673e2314d4fc228aebe9e | Fix test data for PyPackageRequirementsInspectionTest.testImportsNotInRequirementsTxt. | apixandru/intellij-community,da1z/intellij-community,mglukhikh/intellij-community,allotria/intellij-community,ibinti/intellij-community,suncycheng/intellij-community,apixandru/intellij-community,da1z/intellij-community,ibinti/intellij-community,vvv1559/intellij-community,ibinti/intellij-community,ThiagoGarciaAlves/intellij-community,asedunov/intellij-community,ibinti/intellij-community,suncycheng/intellij-community,da1z/intellij-community,suncycheng/intellij-community,allotria/intellij-community,allotria/intellij-community,asedunov/intellij-community,ibinti/intellij-community,apixandru/intellij-community,apixandru/intellij-community,xfournet/intellij-community,ThiagoGarciaAlves/intellij-community,ThiagoGarciaAlves/intellij-community,xfournet/intellij-community,asedunov/intellij-community,ibinti/intellij-community,mglukhikh/intellij-community,da1z/intellij-community,suncycheng/intellij-community,apixandru/intellij-community,xfournet/intellij-community,ThiagoGarciaAlves/intellij-community,mglukhikh/intellij-community,da1z/intellij-community,vvv1559/intellij-community,allotria/intellij-community,ibinti/intellij-community,ibinti/intellij-community,allotria/intellij-community,ibinti/intellij-community,vvv1559/intellij-community,vvv1559/intellij-community,xfournet/intellij-community,ThiagoGarciaAlves/intellij-community,xfournet/intellij-community,vvv1559/intellij-community,suncycheng/intellij-community,ibinti/intellij-community,vvv1559/intellij-community,vvv1559/intellij-community,allotria/intellij-community,xfournet/intellij-community,ThiagoGarciaAlves/intellij-community,mglukhikh/intellij-community,da1z/intellij-community,ThiagoGarciaAlves/intellij-community,ThiagoGarciaAlves/intellij-community,xfournet/intellij-community,asedunov/intellij-community,apixandru/intellij-community,xfournet/intellij-community,apixandru/intellij-community,suncycheng/intellij-community,xfournet/intellij-community,da1z/intellij-community,mglukhikh/intellij-community,asedunov/intellij-community,suncycheng/intellij-community,mglukhikh/intellij-community,da1z/intellij-community,apixandru/intellij-community,da1z/intellij-community,vvv1559/intellij-community,asedunov/intellij-community,asedunov/intellij-community,da1z/intellij-community,vvv1559/intellij-community,vvv1559/intellij-community,da1z/intellij-community,asedunov/intellij-community,da1z/intellij-community,asedunov/intellij-community,asedunov/intellij-community,mglukhikh/intellij-community,allotria/intellij-community,ibinti/intellij-community,vvv1559/intellij-community,xfournet/intellij-community,mglukhikh/intellij-community,vvv1559/intellij-community,ThiagoGarciaAlves/intellij-community,allotria/intellij-community,apixandru/intellij-community,ThiagoGarciaAlves/intellij-community,xfournet/intellij-community,apixandru/intellij-community,allotria/intellij-community,suncycheng/intellij-community,asedunov/intellij-community,allotria/intellij-community,allotria/intellij-community,mglukhikh/intellij-community,ThiagoGarciaAlves/intellij-community,asedunov/intellij-community,suncycheng/intellij-community,xfournet/intellij-community,apixandru/intellij-community,mglukhikh/intellij-community,suncycheng/intellij-community,allotria/intellij-community,xfournet/intellij-community,ThiagoGarciaAlves/intellij-community,vvv1559/intellij-community,suncycheng/intellij-community,ibinti/intellij-community,apixandru/intellij-community,allotria/intellij-community,ibinti/intellij-community,mglukhikh/intellij-community,mglukhikh/intellij-community,mglukhikh/intellij-community,da1z/intellij-community,apixandru/intellij-community,apixandru/intellij-community,asedunov/intellij-community,suncycheng/intellij-community | python/testData/inspections/PyPackageRequirementsInspection/ImportsNotInRequirementsTxt/test1.py | python/testData/inspections/PyPackageRequirementsInspection/ImportsNotInRequirementsTxt/test1.py | import pip
import <weak_warning descr="Package containing module 'opster' is not listed in project requirements">opster</weak_warning>
from <weak_warning descr="Package containing module 'clevercss' is not listed in project requirements">clevercss</weak_warning> import convert
import <weak_warning descr="Package containing module 'django' is not listed in project requirements">django</weak_warning>.conf
import httplib
import <weak_warning descr="Package containing module 'test3' is not listed in project requirements">test3</weak_warning>
print('Hello, World!')
| import pip
import <weak_warning descr="Package 'opster' is not listed in project requirements">opster</weak_warning>
from <weak_warning descr="Package 'clevercss' is not listed in project requirements">clevercss</weak_warning> import convert
import <weak_warning descr="Package 'django' is not listed in project requirements">django</weak_warning>.conf
import httplib
import <weak_warning descr="Package 'test3' is not listed in project requirements">test3</weak_warning>
print('Hello, World!')
| apache-2.0 | Python |
ed91a6832d459dffa18d1b2d7b827b6aa6da2627 | Add team project list to docs | BuildingLink/sentry,JamesMura/sentry,mvaled/sentry,beeftornado/sentry,JamesMura/sentry,ngonzalvez/sentry,Kryz/sentry,fuziontech/sentry,nicholasserra/sentry,TedaLIEz/sentry,JackDanger/sentry,songyi199111/sentry,BayanGroup/sentry,songyi199111/sentry,1tush/sentry,kevinlondon/sentry,gg7/sentry,jokey2k/sentry,Natim/sentry,vperron/sentry,daevaorn/sentry,wong2/sentry,JTCunning/sentry,Natim/sentry,nicholasserra/sentry,daevaorn/sentry,fuziontech/sentry,gg7/sentry,boneyao/sentry,kevinastone/sentry,gencer/sentry,mvaled/sentry,pauloschilling/sentry,kevinastone/sentry,BuildingLink/sentry,TedaLIEz/sentry,fotinakis/sentry,gg7/sentry,drcapulet/sentry,drcapulet/sentry,JTCunning/sentry,BayanGroup/sentry,argonemyth/sentry,fuziontech/sentry,looker/sentry,gencer/sentry,jokey2k/sentry,wujuguang/sentry,boneyao/sentry,JamesMura/sentry,vperron/sentry,felixbuenemann/sentry,ewdurbin/sentry,daevaorn/sentry,ifduyue/sentry,1tush/sentry,zenefits/sentry,korealerts1/sentry,zenefits/sentry,alexm92/sentry,JackDanger/sentry,mitsuhiko/sentry,wujuguang/sentry,llonchj/sentry,ngonzalvez/sentry,JamesMura/sentry,mvaled/sentry,fotinakis/sentry,hongliang5623/sentry,pauloschilling/sentry,looker/sentry,Kryz/sentry,gencer/sentry,jean/sentry,boneyao/sentry,Kryz/sentry,looker/sentry,wong2/sentry,BuildingLink/sentry,korealerts1/sentry,ewdurbin/sentry,JTCunning/sentry,ifduyue/sentry,ewdurbin/sentry,beeftornado/sentry,JackDanger/sentry,gencer/sentry,hongliang5623/sentry,jokey2k/sentry,ifduyue/sentry,1tush/sentry,BuildingLink/sentry,imankulov/sentry,fotinakis/sentry,daevaorn/sentry,felixbuenemann/sentry,songyi199111/sentry,imankulov/sentry,beeftornado/sentry,jean/sentry,vperron/sentry,Natim/sentry,zenefits/sentry,felixbuenemann/sentry,imankulov/sentry,looker/sentry,alexm92/sentry,mvaled/sentry,gencer/sentry,ifduyue/sentry,kevinlondon/sentry,kevinlondon/sentry,pauloschilling/sentry,JamesMura/sentry,BayanGroup/sentry,ngonzalvez/sentry,jean/sentry,zenefits/sentry,drcapulet/sentry,korealerts1/sentry,mvaled/sentry,wong2/sentry,ifduyue/sentry,fotinakis/sentry,argonemyth/sentry,alexm92/sentry,jean/sentry,hongliang5623/sentry,argonemyth/sentry,llonchj/sentry,looker/sentry,llonchj/sentry,jean/sentry,kevinastone/sentry,nicholasserra/sentry,BuildingLink/sentry,zenefits/sentry,mvaled/sentry,mitsuhiko/sentry,TedaLIEz/sentry,wujuguang/sentry | src/sentry/api/endpoints/team_project_index.py | src/sentry/api/endpoints/team_project_index.py | from __future__ import absolute_import
from rest_framework import serializers, status
from rest_framework.response import Response
from sentry.api.base import DocSection
from sentry.api.bases.team import TeamEndpoint
from sentry.api.permissions import assert_perm
from sentry.api.serializers import serialize
from sentry.constants import MEMBER_ADMIN
from sentry.models import Project
from sentry.permissions import can_create_projects
class ProjectSerializer(serializers.ModelSerializer):
class Meta:
model = Project
fields = ('name', 'slug')
class TeamProjectIndexEndpoint(TeamEndpoint):
doc_section = DocSection.TEAMS
def get(self, request, team):
"""
List a team's projects
Return a list of projects bound to a team.
{method} {path}
"""
assert_perm(team, request.user, request.auth)
results = list(Project.objects.get_for_user(team=team, user=request.user))
return Response(serialize(results, request.user))
def post(self, request, team):
"""
Create a new project
Create a new project bound to a team.
{method} {path}
{{
"name": "My project"
}}
"""
assert_perm(team, request.user, request.auth, access=MEMBER_ADMIN)
if not can_create_projects(user=request.user, team=team):
return Response(status=403)
serializer = ProjectSerializer(data=request.DATA)
if serializer.is_valid():
project = serializer.object
project.team = team
project.organization = team.organization
project.save()
return Response(serialize(project, request.user), status=201)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| from __future__ import absolute_import
from rest_framework import serializers, status
from rest_framework.response import Response
from sentry.api.bases.team import TeamEndpoint
from sentry.api.permissions import assert_perm
from sentry.api.serializers import serialize
from sentry.constants import MEMBER_ADMIN
from sentry.models import Project
from sentry.permissions import can_create_projects
class ProjectSerializer(serializers.ModelSerializer):
class Meta:
model = Project
fields = ('name', 'slug')
class TeamProjectIndexEndpoint(TeamEndpoint):
def get(self, request, team):
assert_perm(team, request.user, request.auth)
results = list(Project.objects.get_for_user(team=team, user=request.user))
return Response(serialize(results, request.user))
def post(self, request, team):
assert_perm(team, request.user, request.auth, access=MEMBER_ADMIN)
if not can_create_projects(user=request.user, team=team):
return Response(status=403)
serializer = ProjectSerializer(data=request.DATA)
if serializer.is_valid():
project = serializer.object
project.team = team
project.organization = team.organization
project.save()
return Response(serialize(project, request.user), status=201)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| bsd-3-clause | Python |
f5668fec4423179eac8bd996b0ae906af66c5563 | Remove arg. | Julian/Great,Julian/Great,Julian/Great | great/cli.py | great/cli.py | import argparse
from great import extract
from great.models.core import db
from great.app import create_app
def main(arguments):
app = create_app()
if arguments.command == "itunes":
tracks = extract.itunes_tracks(arguments.library_file)
for track in tracks:
db.session.add(extract.track_from_itunes(db.session, track))
db.session.commit()
parser = argparse.ArgumentParser(description="Great: a ratings collector")
parser.add_argument(
"--db-uri", default="sqlite:///great.db", help="The, database URI to use.",
)
subparsers = parser.add_subparsers(dest="command")
itunes = subparsers.add_parser("itunes")
itunes.add_argument(
"--library-file",
help="An iTunes Library (XML) file to parse",
default="iTunes Library File.xml",
)
| import argparse
from great import extract
from great.models.core import db
from great.app import create_app
def main(arguments):
app = create_app(arguments.db_uri)
if arguments.command == "itunes":
tracks = extract.itunes_tracks(arguments.library_file)
for track in tracks:
db.session.add(extract.track_from_itunes(db.session, track))
db.session.commit()
parser = argparse.ArgumentParser(description="Great: a ratings collector")
parser.add_argument(
"--db-uri", default="sqlite:///great.db", help="The, database URI to use.",
)
subparsers = parser.add_subparsers(dest="command")
itunes = subparsers.add_parser("itunes")
itunes.add_argument(
"--library-file",
help="An iTunes Library (XML) file to parse",
default="iTunes Library File.xml",
)
| mit | Python |
eac175e8650078ee17d7520725f2fe9604b30dac | Adjust frame rate if needed | voc/voctomix,voc/voctomix | voctocore/lib/sources/loopsource.py | voctocore/lib/sources/loopsource.py | #!/usr/bin/env python3
import logging
import re
from gi.repository import Gst
from lib.config import Config
from lib.sources.avsource import AVSource
class LoopSource(AVSource):
timer_resolution = 0.5
def __init__(self, name, has_audio=True, has_video=True,
force_num_streams=None):
super().__init__('LoopSource', name, has_audio, has_video, show_no_signal=True)
self.location = Config.getLocation(name)
self.build_pipeline()
def __str__(self):
return 'LoopSource[{name}] displaying {location}'.format(
name=self.name,
location=self.location
)
def port(self):
m = re.search('.*/([^/]*)', self.location)
return self.location
def num_connections(self):
return 1
def video_channels(self):
return 1
def build_source(self):
return """
multifilesrc
location={location}
loop=true
! decodebin
name=videoloop-{name}
""".format(
name=self.name,
location=self.location
)
def build_videoport(self):
return """
videoloop-{name}.
! videoconvert
! videorate
! videoscale
""".format(name=self.name)
def build_audioport(self):
return """
videoloop-{name}.
! audioconvert
! audioresample
""".format(name=self.name) | #!/usr/bin/env python3
import logging
import re
from gi.repository import Gst
from lib.config import Config
from lib.sources.avsource import AVSource
class LoopSource(AVSource):
timer_resolution = 0.5
def __init__(self, name, has_audio=True, has_video=True,
force_num_streams=None):
super().__init__('LoopSource', name, has_audio, has_video, show_no_signal=True)
self.location = Config.getLocation(name)
self.build_pipeline()
def __str__(self):
return 'LoopSource[{name}] displaying {location}'.format(
name=self.name,
location=self.location
)
def port(self):
m = re.search('.*/([^/]*)', self.location)
return self.location
def num_connections(self):
return 1
def video_channels(self):
return 1
def build_source(self):
return """
multifilesrc
location={location}
loop=true
! decodebin
name=videoloop-{name}
""".format(
name=self.name,
location=self.location
)
def build_videoport(self):
return """
videoloop-{name}.
! videoconvert
! videoscale
""".format(name=self.name)
def build_audioport(self):
return """
videoloop-{name}.
! audioconvert
! audioresample
""".format(name=self.name) | mit | Python |
c194d9b2244f3c9779442d71605a5c9f17ad763b | FIX make brat.utils compatible with python 3 (and 2.6+) | HazyResearch/snorkel,HazyResearch/snorkel,HazyResearch/snorkel | snorkel/contrib/brat/utils.py | snorkel/contrib/brat/utils.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import *
from six.moves.urllib.request import urlopen
from six.moves.urllib.error import URLError, HTTPError
def download(url, outfname):
"""
Download target URL
:param url:
:param outfname:
:return:
"""
try:
data = urlopen(url)
with open(outfname, "wb") as f:
f.write(data.read())
except HTTPError as e:
print("HTTP Error: {} {}".format(e.code, url))
except URLError as e:
print("URL Error: {} {}".format(e.reason, url))
| from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import *
from urllib2 import urlopen, URLError, HTTPError
def download(url, outfname):
"""
Download target URL
:param url:
:param outfname:
:return:
"""
try:
data = urlopen(url)
with open(outfname, "wb") as f:
f.write(data.read())
except HTTPError, e:
print("HTTP Error: {} {}".format(e.code, url))
except URLError, e:
print("URL Error: {} {}".format(e.reason, url))
| apache-2.0 | Python |
9aee6d2fa65859811e44fc1410053f27243f5610 | Edit to test_logit | datasnakes/Datasnakes-Scripts,datasnakes/Datasnakes-Scripts,datasnakes/Datasnakes-Scripts,datasnakes/Datasnakes-Scripts,datasnakes/Datasnakes-Scripts,datasnakes/Datasnakes-Scripts | Tests/test_tools.py | Tests/test_tools.py | """This is the test suite for Tools."""
import unittest
import os
from Datasnakes.Tools import LogIt, Multiprocess
class TestTools(unittest.TestCase):
"""Test the Tools module."""
def setUp(self, logfile='test.log'):
self.logfile = logfile
def test_logit(self):
logit = LogIt()
test = logit.default(logname='testlog', logfile=self.logfile)
self.assertEqual(str(test.name), 'TESTLOG')
self.assertTrue(os.path.isfile(self.logfile))
logit.shutdown()
logit.deletelog(self.logfile)
def test_multiprocess(self):
print()
if __name__ == '__main__':
unittest.main()
| """This is the test suite for Tools."""
import unittest
import os
from Datasnakes.Tools import LogIt, Multiprocess
class TestTools(unittest.TestCase):
"""Test the Tools module."""
def setUp(self, logfile='test.log'):
self.logfile = logfile
def test_logit(self):
logit = LogIt()
test = logit.default('testlog', self.logfile)
self.assertEqual(str(test.name), 'TESTLOG')
self.assertTrue(os.path.isfile(self.logfile))
logit.shutdown()
logit.deletelog(self.logfile)
def test_multiprocess(self):
print()
if __name__ == '__main__':
unittest.main()
| mit | Python |
5baae6dded891bc12c5676078fedd4ee222ae016 | fix tests and add some more | ministryofjustice/courtfinder-govuk-publisher-test,ministryofjustice/courtfinder-govuk-publisher-test | scripts/test.py | scripts/test.py | #!/usr/bin/env python
import requests
import json
import sys
base_url='http://127.0.0.1:8000/court/'
oauth_token='foobar'
#with open('../data/sample_court.json') as f:
# sample_court_json = f.read()
sample_court_json_1 = '{"name":"blah","slug":"blah","updated_at": "2014-03-18T12:33:12.176Z","closed":false,"alert":"","lat":0.0,"lon":0.0,"number":"200","DX":"2039D"}'
headers = {'Authorization': 'Bearer '+oauth_token}
def same_arrays(a,b):
for i,item in enumerate(a):
if b[i] and b[i] != item:
return False
else:
return True
def check(request, status_code, body=None, win='.'):
if request.status_code != status_code:
print "Different status codes: expected %d, got %d" % (status_code, request.status_code)
print request.text
elif body:
b = json.loads(body)
r = json.loads(request.text)
if type(b) == type([]) and not same_arrays(r,b):
print "Different arrays: expected %s, got %s" % (body, request.text)
elif r != b:
print "Different objects: expected %s, got %s" % (body, request.text)
else:
sys.stdout.write(win)
else:
sys.stdout.write(win)
def list():
return requests.get(base_url, headers=headers)
def get(uuid):
return requests.get(base_url+uuid, headers=headers)
def put(uuid, json, auth=True):
if auth:
return requests.put(base_url+uuid, headers=headers, data=json)
else:
return requests.put(base_url+uuid, data=json)
def delete(uuid):
return requests.delete(base_url+uuid, headers=headers)
if __name__ == "__main__":
check(delete('all-the-things'), 200)
check(put('foo-bar', '[]', auth=False), 403)
check(list(), 200, '[]')
check(get('22984u-3482u49u'), 404)
check(put('22984u-3482u49u', sample_court_json_1), 201)
check(put('22984u-3482u49u', sample_court_json_1), 200)
check(list(), 200, '['+sample_court_json_1+']')
check(get('22984u-3482u49u'), 200, sample_court_json_1)
check(list(), 200, '['+sample_court_json_1+']')
check(delete('22984u-3482u49u'), 200)
check(list(), 200, '[]')
print "done"
| #!/usr/bin/env python
import requests
import json
base_url='http://127.0.0.1:8000/court/'
court_file='../data/sample_court.json'
oauth_token='foobar'
with open('../data/sample_court.json') as f:
sample_court_json = f.read()
headers = {'Authorization': 'Bearer '+oauth_token}
def is_in(small, big):
s = json.loads(small)
b = json.loads(big)
return all(item in b.items() for item in s.items())
def check(request, status_code, body=None, win='.'):
if request.status_code != status_code:
print "Different status codes: expected %d, got %d" % (status_code, request.status_code)
elif body:
if not is_in(request.text, body):
print "Different objects: expected %s, got %s" % (body, request.text)
else:
print win,
def list():
return requests.get(base_url, headers=headers)
def get(uuid):
return requests.get(base_url+uuid, headers=headers)
def put(uuid, json, auth=True):
if auth:
return requests.put(base_url+uuid, headers=headers, data=json)
else:
return requests.put(base_url+uuid, data=json)
def delete(uuid):
return requests.delete(base_url+uuid, headers=headers)
if __name__ == "__main__":
# hidden uuid to delete every court in the database
check(delete('all-the-things'), 200)
check(put('foo-bar', '[]', auth=False), 403)
check(list(), 200, '[]')
check(get('22984u-3482u49u'), 404)
check(put('22984u-3482u49u', sample_court_json), 201)
check(list(), 200, sample_court_json)
| mit | Python |
c39b0aa2314d354d85735e9ef024443e58088133 | add docstrings | sequana/sequana,sequana/sequana,sequana/sequana,sequana/sequana,sequana/sequana | sequana/kmer.py | sequana/kmer.py | # -*- coding: utf-8 -*-
#
# This file is part of Sequana software
#
# Copyright (c) 2016 - Sequana Development Team
#
# File author(s):
# Thomas Cokelaer <thomas.cokelaer@pasteur.fr>
# Dimitri Desvillechabrol <dimitri.desvillechabrol@pasteur.fr>,
# <d.desvillechabrol@gmail.com>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
import itertools
def build_kmer(length=6, letters='CG'):
"""Return list of kmer of given length based on a set of letters
:return: list of kmers
"""
# build permutations of CG letters with a sequence of given lengths
# TODO include N other letters
combos = list(itertools.product(letters, repeat=length))
return ["".join(this) for this in combos]
def get_kmer(sequence, k=7):
"""Given a sequence, return consecutive kmers
:return: iterator of kmers
"""
for i in range(0, len(sequence)-k+1):
yield sequence[i:i+k]
| # -*- coding: utf-8 -*-
#
# This file is part of Sequana software
#
# Copyright (c) 2016 - Sequana Development Team
#
# File author(s):
# Thomas Cokelaer <thomas.cokelaer@pasteur.fr>
# Dimitri Desvillechabrol <dimitri.desvillechabrol@pasteur.fr>,
# <d.desvillechabrol@gmail.com>
#
# Distributed under the terms of the 3-clause BSD license.
# The full license is in the LICENSE file, distributed with this software.
#
# website: https://github.com/sequana/sequana
# documentation: http://sequana.readthedocs.io
#
##############################################################################
import itertools
def build_kmer(length=6, letters='CG'):
# build permutations of CG letters with a sequence of given lengths
# TODO include N other letters
combos = list(itertools.product(letters, repeat=length))
return ["".join(this) for this in combos]
def get_kmer(sequence, k=7):
for i in range(0, len(sequence)-k+1):
yield sequence[i:i+k]
| bsd-3-clause | Python |
9b330d37243036f95d5a07c40fd8e49c009c7554 | change target function for TIM algorithm | simp1eton/CS224W_Final_Project,simp1eton/CS224W_Final_Project,simp1eton/CS224W_Final_Project | PYTHON/tim.py | PYTHON/tim.py | from util import read_file
import random
def generate_set(adj_list, threshold, start):
queue = [(start, 0)]
visited = set()
visited.add(start)
while len(queue) > 0:
front = queue.pop(0)
if front[1] > threshold:
return visited
for adj in adj_list[front[0]]:
if adj in visited: continue
visited.add(adj)
queue.append((adj, front[1] + 1))
return visited
if __name__ == "__main__":
random.seed()
graph = read_file("input.txt")
graph.preprocess()
adj_list = graph.G
num_sets = 500
threshold = 1
budget = 10
possible_nodes = set()
start_nodes = random.sample(range(graph.N), num_sets)
sets = []
for start in start_nodes:
rr_set = generate_set(adj_list, threshold, start)
possible_nodes = possible_nodes.union(rr_set)
sets.append(rr_set)
results = []
for _ in range(budget):
max_cover_node = None
max_cover_count = 0
for node in possible_nodes:
count = 0
for rr_set in sets:
if node in rr_set: count += 1
if count > max_cover_count:
max_cover_count = count
max_cover_node = node
if max_cover_node != None:
for rr_set in sets:
if max_cover_node in rr_set:
sets.remove(rr_set)
possible_nodes.remove(max_cover_node)
results.append(max_cover_node)
print results
print graph.avg_dist(results)
| from util import read_file
import random
def generate_set(adj_list, threshold, start):
queue = [(start, 0)]
visited = set()
visited.add(start)
while len(queue) > 0:
front = queue.pop(0)
if front[1] > threshold:
return visited
for adj in adj_list[front[0]]:
if adj in visited: continue
visited.add(adj)
queue.append((adj, front[1] + 1))
return visited
if __name__ == "__main__":
graph = read_file("input.txt")
adj_list = graph.G
num_sets = 100
threshold = 2
budget = 10
possible_nodes = set()
start_nodes = random.sample(range(graph.N), num_sets)
sets = []
for start in start_nodes:
rr_set = generate_set(adj_list, threshold, start)
possible_nodes = possible_nodes.union(rr_set)
sets.append(rr_set)
results = []
for _ in range(budget):
max_cover_node = None
max_cover_count = 0
for node in possible_nodes:
count = 0
for rr_set in sets:
if node in rr_set: count += 1
if count > max_cover_count:
max_cover_count = count
max_cover_node = node
if max_cover_node != None:
for rr_set in sets:
if max_cover_node in rr_set:
sets.remove(rr_set)
possible_nodes.remove(max_cover_node)
results.append(max_cover_node)
print results
print graph.max_dist(results)
| mit | Python |
a8aa8ca27e5690ed6a58a15490e605af57b5f32b | Undo change to SO name | jdmcbr/Shapely,mindw/shapely,jdmcbr/Shapely,mouadino/Shapely,abali96/Shapely,mindw/shapely,abali96/Shapely,mouadino/Shapely | shapely/geos.py | shapely/geos.py | """
Exports the libgeos_c shared lib, GEOS-specific exceptions, and utilities.
"""
import atexit
from ctypes import cdll, CDLL, CFUNCTYPE, c_char_p
from ctypes.util import find_library
import os
import sys
import shapely
if sys.platform == 'win32':
try:
local_dlls = os.path.abspath(os.__file__ + "../../../DLLs")
original_path = os.environ['PATH']
os.environ['PATH'] = "%s;%s" % (local_dlls, original_path)
lgeos = CDLL("geos.dll")
except (ImportError, WindowsError):
raise
def free(m):
try:
cdll.msvcrt.free(m)
except WindowsError:
# XXX: See http://trac.gispython.org/projects/PCL/ticket/149
pass
elif sys.platform == 'darwin':
lgeos = CDLL(find_library('geos_c'))
free = CDLL(find_library('libc')).free
else:
# Try the major versioned name first, falling back on the unversioned name.
try:
lgeos = CDLL('libgeos_c.so.1')
except (OSError, ImportError):
lgeos = CDLL('libgeos_c.so')
except:
raise
free = CDLL('libc.so.6').free
class allocated_c_char_p(c_char_p):
pass
# Exceptions
class ReadingError(Exception):
pass
class DimensionError(Exception):
pass
class TopologicalError(Exception):
pass
class PredicateError(Exception):
pass
# GEOS error handlers, which currently do nothing.
def error_handler(fmt, list):
pass
error_h = CFUNCTYPE(None, c_char_p, c_char_p)(error_handler)
def notice_handler(fmt, list):
pass
notice_h = CFUNCTYPE(None, c_char_p, c_char_p)(notice_handler)
# Register a cleanup function
def cleanup():
lgeos.finishGEOS()
atexit.register(cleanup)
lgeos.initGEOS(notice_h, error_h)
| """
Exports the libgeos_c shared lib, GEOS-specific exceptions, and utilities.
"""
import atexit
from ctypes import cdll, CDLL, CFUNCTYPE, c_char_p
from ctypes.util import find_library
import os
import sys
import shapely
if sys.platform == 'win32':
try:
local_dlls = os.path.abspath(os.__file__ + "../../../DLLs")
original_path = os.environ['PATH']
os.environ['PATH'] = "%s;%s" % (local_dlls, original_path)
lgeos = CDLL("geos.dll")
except (ImportError, WindowsError):
raise
def free(m):
try:
cdll.msvcrt.free(m)
except WindowsError:
# XXX: See http://trac.gispython.org/projects/PCL/ticket/149
pass
elif sys.platform == 'darwin':
lgeos = CDLL(find_library('geos_c'))
free = CDLL(find_library('libc')).free
else:
# Try the major versioned name first, falling back on the unversioned name.
try:
lgeos = CDLL('libgeos_c.so.2')
except (OSError, ImportError):
lgeos = CDLL('libgeos_c.so')
except:
raise
free = CDLL('libc.so.6').free
class allocated_c_char_p(c_char_p):
pass
# Exceptions
class ReadingError(Exception):
pass
class DimensionError(Exception):
pass
class TopologicalError(Exception):
pass
class PredicateError(Exception):
pass
# GEOS error handlers, which currently do nothing.
def error_handler(fmt, list):
pass
error_h = CFUNCTYPE(None, c_char_p, c_char_p)(error_handler)
def notice_handler(fmt, list):
pass
notice_h = CFUNCTYPE(None, c_char_p, c_char_p)(notice_handler)
# Register a cleanup function
def cleanup():
lgeos.finishGEOS()
atexit.register(cleanup)
lgeos.initGEOS(notice_h, error_h)
| bsd-3-clause | Python |
38917c4a3cb3c120cbe26b35bd5483d99c5392ba | support matrix conversion syntax for r_ and c_ | simupy/simupy | simupy/array.py | simupy/array.py | from sympy.tensor.array import Array
from sympy import ImmutableDenseMatrix as Matrix
from numpy.lib.index_tricks import RClass, CClass, AxisConcatenator
class SymAxisConcatenatorMixin:
"""
A mix-in to convert numpy AxisConcatenator classes to use with sympy N-D
arrays.
"""
# support numpy >= 1.13
concatenate = staticmethod(
lambda *args, **kwargs: Array(
AxisConcatenator.concatenate(*args, **kwargs)
)
)
makemat = staticmethod(Matrix)
def _retval(self, res): # support numpy < 1.13
if self.matrix:
cls = Matrix
else:
cls = Array
return cls(super()._retval(res))
def __getitem__(self, key):
return super().__getitem__(tuple(
k if isinstance(k, str) else
Array(k) if hasattr(k, '__len__')
else Array([k])
for k in key
))
class SymRClass(SymAxisConcatenatorMixin, RClass):
pass
class SymCClass(SymAxisConcatenatorMixin, CClass):
pass
r_ = SymRClass()
c_ = SymCClass()
def empty_array():
"""
Construct an empty array, which is often needed as a place-holder
"""
a = Array([0])
a._shape = tuple()
a._rank = 0
a._loop_size = 0
a._array = []
a.__str__ = lambda *args, **kwargs: "[]"
return a
| from sympy.tensor.array import Array
from numpy.lib.index_tricks import RClass, CClass
class SymAxisConcatenatorMixin:
"""
A mix-in to convert numpy AxisConcatenator classes to use with sympy N-D
arrays.
"""
def __getitem__(self, key):
return Array(super().__getitem__(tuple(
k if isinstance(k, str) else
Array(k) if hasattr(k, '__len__')
else Array([k])
for k in key
)))
class SymRClass(SymAxisConcatenatorMixin, RClass):
pass
class SymCClass(SymAxisConcatenatorMixin, CClass):
pass
r_ = SymRClass()
c_ = SymCClass()
def empty_array():
"""
Construct an empty array, which is often needed as a place-holder
"""
a = Array([0])
a._shape = tuple()
a._rank = 0
a._loop_size = 0
a._array = []
a.__str__ = lambda *args, **kwargs: "[]"
return a
| bsd-2-clause | Python |
2db6df175c82be21a243e80893f04bab6b754d87 | Create the logging directory | stevenburgess/zfs-tests,datto/zfs-tests | TestConfig.py | TestConfig.py | import os
import sys
import subprocess
import Configs
import ZfsApi
# This class will quickly test if your machine is properly configured for
# these perf tests.
# TODO check that zfs is installed, perhaps get version
def check_all():
permissions_check()
check_filesystems()
def permissions_check():
# Check that the calling user has permissions to run zfs commands this is
# established by having read permissions on the /dev/zfs device
if not os.access('/dev/zfs' ,os.R_OK):
print("You do not have read permissions to /dev/zfs, can you run zfs"
+ " commands?")
sys.exit(1)
def check_filesystems():
# Check that the area we are going to be working in exists. If it does not
# offer to set it up for the user.
if not os.path.isdir(Configs.mount_point):
print("Could not find the pref_tests directory " +
Configs.mount_point)
result = raw_input("Create it? [Y/n] ")
if result == "Y":
setup_system()
else:
print("Exiting tests")
sys.exit(1)
def setup_system():
# This function will setup the zfs filesystems, it does not perform
# any checks, call it when you know this machine needs to be set up
subprocess.check_call(['zfs', 'create', '-p',
Configs.test_filesystem_path,
'-o', "mountpoint=" + Configs.mount_point])
# Create the corpus directory, currently setting primarycahce=none
# since not doing so results in abnormalities in test timing. I
# think this will become especially useful when this process
# becomes multithreaded.
subprocess.check_call(['zfs', 'create',
Configs.test_filesystem_path + '/corpus',
'-o', 'primarycache=none'])
# Create the area for test runs to go. I keep this in a separate
# area to ensure that cleanup is easy
ZfsApi.create_filesystem(Configs.test_filesystem_path + '/runs')
# Create the log directory, and its two sub directories
ZfsApi.create_filesystem(Configs.test_filesystem_path + '/logs')
# The two sub directories are not zfs filesystems
os.mkdir(Configs.results_directory)
os.mkdir(Configs.stats_directory)
def check_testfile():
'''Perfomr tests to ensure the test file will be usable'''
# Check that the specified test file exists
if not os.path.isfile(Configs.test_file_full_path):
print("The test file does not exits. It is set to " +
Configs.test_file_full_path + " please check config.cfg")
sys.exit(1)
# Check that the specified test file is readable
if not os.access(Configs.test_file_full_path, os.R_OK):
print("Cannot read the test file. It is set to " +
Configs.test_file_full_path + " please check config.cfg")
sys.exit(1)
| import os
import sys
import subprocess
import Configs
import ZfsApi
# This class will quickly test if your machine is properly configured for
# these perf tests.
# TODO check that zfs is installed, perhaps get version
def check_all():
permissions_check()
check_filesystems()
def permissions_check():
# Check that the calling user has permissions to run zfs commands this is
# established by having read permissions on the /dev/zfs device
if not os.access('/dev/zfs' ,os.R_OK):
print("You do not have read permissions to /dev/zfs, can you run zfs"
+ " commands?")
sys.exit(1)
def check_filesystems():
# Check that the area we are going to be working in exists. If it does not
# offer to set it up for the user.
if not os.path.isdir(Configs.mount_point):
print("Could not find the pref_tests directory " +
Configs.mount_point)
result = raw_input("Create it? [Y/n] ")
if result == "Y":
setup_system()
else:
print("Exiting tests")
sys.exit(1)
def setup_system():
# This function will setup the zfs filesystems, it does not perform
# any checks, call it when you know this machine needs to be set up
subprocess.check_call(['zfs', 'create', '-p',
Configs.test_filesystem_path,
'-o', "mountpoint=" + Configs.mount_point])
# Create the corpus directory, currently setting primarycahce=none
# since not doing so results in abnormalities in test timing. I
# think this will become especially useful when this process
# becomes multithreaded.
subprocess.check_call(['zfs', 'create',
Configs.test_filesystem_path + '/corpus',
'-o', 'primarycache=none'])
# Create the area for test runs to go. I keep this in a separate
# area to ensure that cleanup is easy
ZfsApi.create_filesystem(Configs.test_filesystem_path + '/runs')
def check_testfile():
'''Perfomr tests to ensure the test file will be usable'''
# Check that the specified test file exists
if not os.path.isfile(Configs.test_file_full_path):
print("The test file does not exits. It is set to " +
Configs.test_file_full_path + " please check config.cfg")
sys.exit(1)
# Check that the specified test file is readable
if not os.access(Configs.test_file_full_path, os.R_OK):
print("Cannot read the test file. It is set to " +
Configs.test_file_full_path + " please check config.cfg")
sys.exit(1)
| mit | Python |
33894846ce841497eb17aca2973d0f58de94d748 | Bump version to Development 0.3 | fulfilio/nereid-cms,priyankarani/nereid-cms,tarunbhardwaj/nereid-cms | __tryton__.py | __tryton__.py | #This file is part of Tryton. The COPYRIGHT file at the top level of
#this repository contains the full copyright notices and license terms.
{
'name': 'Nereid CMS',
'version': '2.0.0.3',
'author': '''Open Labs Business Solutions,
Openlabs Technologies & Consulting (P) Ltd.''',
'email': 'info@openlabs.co.in',
'website': 'http://www.openlabs.co.in/',
'description': '''Nereid CMS''',
'depends': [
'nereid',
],
'xml': [
'cms.xml',
'urls.xml',
],
'translation': [
],
}
| #This file is part of Tryton. The COPYRIGHT file at the top level of
#this repository contains the full copyright notices and license terms.
{
'name': 'Nereid CMS',
'version': '2.0.0.2',
'author': '''Open Labs Business Solutions,
Openlabs Technologies & Consulting (P) Ltd.''',
'email': 'info@openlabs.co.in',
'website': 'http://www.openlabs.co.in/',
'description': '''Nereid CMS''',
'depends': [
'nereid',
],
'xml': [
'cms.xml',
'urls.xml',
],
'translation': [
],
}
| bsd-3-clause | Python |
56b8e94f651ee6204d62323716d3272ab747d087 | remove package-mimicking syntax | TheChymera/LabbookDB,TheChymera/LabbookDB | labbookdb/report/selection.py | labbookdb/report/selection.py | from .db import query
def data_selection(db_path, data_type, treatment_start_dates=[]):
"""Select dataframe from a LabbookDB style database.
Parameters
----------
db_path : string
Path to a LabbookDB formatted database.
data_type : string
What type of data should be selected values can be:
"sucrose preference"
"forced swim"
treatment_start_dates : list, optional
A list containing the treatment start date or dates by which to filter the cages for the sucrose preference measurements.
Items should be strings in datetime format, e.g. "2016,4,25,19,30".
"""
if data_type == "sucrose preference":
col_entries=[
("Cage","id"),
("Treatment",),
("SucrosePreferenceMeasurement",),
("TreatmentProtocol","code"),
]
join_entries=[
("Cage.treatments",),
("SucrosePreferenceMeasurement",),
("Treatment.protocol",),
]
elif data_type == "forced swim":
col_entries=[
("Animal","id"),
("Cage","id"),
("Treatment",),
("TreatmentProtocol","code"),
("ForcedSwimTestMeasurement",),
("Evaluation",),
]
join_entries=[
("Animal.cage_stays",),
("ForcedSwimTestMeasurement",),
("Evaluation",),
("CageStay.cage",),
("Cage.treatments",),
("Treatment.protocol",),
]
if treatment_start_dates:
my_filter = ["Treatment","start_date"]
my_filter.extend(treatment_start_dates)
else:
my_filter = None
df = query.get_df(db_path,col_entries=col_entries, join_entries=join_entries, filters=[my_filter])
return df
| if not __package__:
import os, sys
print(os.path.realpath(__file__))
pkg_root = os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)),"../.."))
sys.path.insert(0,pkg_root)
from labbookdb.db import query
def data_selection(db_path, data_type, treatment_start_dates=[]):
"""Select dataframe from a LabbookDB style database.
Parameters
----------
db_path : string
Path to a LabbookDB formatted database.
data_type : string
What type of data should be selected values can be:
"sucrose preference"
"forced swim"
treatment_start_dates : list, optional
A list containing the treatment start date or dates by which to filter the cages for the sucrose preference measurements.
Items should be strings in datetime format, e.g. "2016,4,25,19,30".
"""
if data_type == "sucrose preference":
col_entries=[
("Cage","id"),
("Treatment",),
("SucrosePreferenceMeasurement",),
("TreatmentProtocol","code"),
]
join_entries=[
("Cage.treatments",),
("SucrosePreferenceMeasurement",),
("Treatment.protocol",),
]
elif data_type == "forced swim":
col_entries=[
("Animal","id"),
("Cage","id"),
("Treatment",),
("TreatmentProtocol","code"),
("ForcedSwimTestMeasurement",),
("Evaluation",),
]
join_entries=[
("Animal.cage_stays",),
("ForcedSwimTestMeasurement",),
("Evaluation",),
("CageStay.cage",),
("Cage.treatments",),
("Treatment.protocol",),
]
if treatment_start_dates:
my_filter = ["Treatment","start_date"]
my_filter.extend(treatment_start_dates)
else:
my_filter = None
df = query.get_df(db_path,col_entries=col_entries, join_entries=join_entries, filters=[my_filter])
return df
if __name__ == '__main__' and __package__ is None:
sucrose_prefernce()
| bsd-3-clause | Python |
a9533109d48214ad4dcf3048e175aa16e89731f5 | Increase version to 0.3.2 | ccampbell/storm | storm/__init__.py | storm/__init__.py | version = '0.3.2'
| version = '0.3.1'
| mit | Python |
9d0d6a9c895876f28ec719ea54feb614b408bf5c | Test project: fix import of djmercadopago urls | data-tsunami/django-mercadopago,data-tsunami/django-mercadopago | test_project/test_project/urls.py | test_project/test_project/urls.py | from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns('',
url(r'^$', 'test_app.views.home', name='home'),
url(r'^mp/', include('djmercadopago.urls')),
url(r'^admin/', include(admin.site.urls)),
)
| from django.conf.urls import patterns, include, url
from django.contrib import admin
import djmercadopago
urlpatterns = patterns('',
url(r'^$', 'test_app.views.home', name='home'),
url(r'^mp/', include(djmercadopago.urls)),
url(r'^admin/', include(admin.site.urls)),
)
| bsd-3-clause | Python |
d3998e64c837f87ad574964b4325335172ca8c5d | add a create and delete view for newsletter subscription models | byteweaver/django-newsletters | newsletters/views.py | newsletters/views.py | from django.views.generic import CreateView, DeleteView
from newsletters.models import Subscription
from newsletters.forms import SubscriptionForm
class SubscriptionCreateView(CreateView):
form_class = SubscriptionForm
class SubscriptionDeleteView(DeleteView):
model = Subscription
| from django.shortcuts import render
# Create your views here.
| bsd-3-clause | Python |
c70678cafb9f6d2bb1db2c64513de5e9853d58e0 | check uuid_generate | hwaf/hwaf,hwaf/hwaf | find_uuid.py | find_uuid.py | # -*- python -*-
# stdlib imports ---
import os
import os.path as osp
# waf imports ---
import waflib.Utils
import waflib.Logs as msg
from waflib.Configure import conf
#
_heptooldir = osp.dirname(osp.abspath(__file__))
def options(ctx):
ctx.load('hep-waftools-base', tooldir=_heptooldir)
ctx.add_option(
'--with-uuid',
default=None,
help="Look for UUID at the given path")
return
def configure(ctx):
ctx.load('hep-waftools-base', tooldir=_heptooldir)
return
@conf
def find_uuid(ctx, **kwargs):
if not ctx.env.CC or not ctx.env.CXX:
msg.fatal('load a C or C++ compiler first')
pass
ctx.load('hep-waftools-base', tooldir=_heptooldir)
# find libuuid
ctx.check_with(
ctx.check,
"uuid",
features='cxx cxxprogram',
header_name="uuid/uuid.h",
lib='uuid',
uselib_store='uuid',
**kwargs
)
# test uuid
ctx.check_cxx(
msg="Checking uuid_generate",
okmsg="ok",
fragment='''\
#include "uuid/uuid.h"
#include <iostream>
int main(int argc, char* argv[]) {
uuid_t out;
uuid_generate(out);
return 0;
}
''',
use="uuid",
execute = True,
mandatory= True,
)
ctx.env.HEPWAF_FOUND_UUID = 1
return
## EOF ##
| # -*- python -*-
# stdlib imports ---
import os
import os.path as osp
# waf imports ---
import waflib.Utils
import waflib.Logs as msg
from waflib.Configure import conf
#
_heptooldir = osp.dirname(osp.abspath(__file__))
def options(ctx):
ctx.load('hep-waftools-base', tooldir=_heptooldir)
return
def configure(ctx):
ctx.load('hep-waftools-base', tooldir=_heptooldir)
return
@conf
def find_uuid(ctx, **kwargs):
if not ctx.env.CC or not ctx.env.CXX:
msg.fatal('load a C or C++ compiler first')
pass
ctx.load('hep-waftools-base', tooldir=_heptooldir)
# find libuuid
ctx.check_with(
ctx.check,
"uuid",
features='cxx cxxprogram',
header_name="uuid/uuid.h",
lib='uuid',
uselib_store='uuid',
**kwargs
)
# test uuid
ctx.check_cxx(
msg="Checking uuid_init",
okmsg="ok",
fragment='''\
#include "uuid/uuid.h"
#include <iostream>
int main(int argc, char* argv[]) {
return 0;
}
''',
use="uuid",
execute = True,
mandatory= True,
)
ctx.env.HEPWAF_FOUND_UUID = 1
return
## EOF ##
| bsd-3-clause | Python |
fbe72e14b9d53570614538b59e5d9128d0a3832d | Add a catchall dialplan entry when matching users. | Eyepea/aiosip,sangoma/aiosip | aiosip/dialplan.py | aiosip/dialplan.py | import logging
from . import utils
from collections import MutableMapping
LOG = logging.getLogger(__name__)
class Dialplan:
def __init__(self, default=None):
self._users = {}
self.default = default
async def resolve(self, username, protocol, local_addr, remote_addr):
LOG.debug('Resolving dialplan for %s connecting on %s from %s via %s',
username, local_addr, remote_addr, protocol)
router = self._users.get(username)
if not router:
router = self._users.get('*', self.default)
return router
def add_user(self, username, router):
self._users[username] = router
class Router(MutableMapping):
def __init__(self, default=None):
self._routes = {}
if default:
self._routes['*'] = default
# MutableMapping API
def __eq__(self, other):
return self is other
def __getitem__(self, key):
try:
return self._routes[key.lower()]
except KeyError:
return self._routes['*']
def __setitem__(self, key, value):
self._routes[key.lower()] = value
def __delitem__(self, key):
del self._routes[key.lower()]
def __len__(self):
return len(self._routes)
def __iter__(self):
return iter(self._routes)
class ProxyRouter(Router):
def __init__(self):
super().__init__(default=self.proxy)
async def proxy(self, dialog, msg, timeout=5):
peer = await utils.get_proxy_peer(dialog, msg)
LOG.debug('Proxying "%s, %s, %s" from "%s" to "%s"', msg.cseq, msg.method, dialog.call_id, dialog.peer, peer)
async for proxy_response in peer.proxy_request(dialog, msg, timeout=timeout):
if proxy_response:
dialog.peer.proxy_response(proxy_response)
| import logging
from . import utils
from collections import MutableMapping
LOG = logging.getLogger(__name__)
class Dialplan:
def __init__(self, default=None):
self._users = {}
self.default = default
async def resolve(self, username, protocol, local_addr, remote_addr):
LOG.debug('Resolving dialplan for %s connecting on %s from %s via %s',
username, local_addr, remote_addr, protocol)
return self._users.get(username, self.default)
def add_user(self, username, router):
self._users[username] = router
class Router(MutableMapping):
def __init__(self, default=None):
self._routes = {}
if default:
self._routes['*'] = default
# MutableMapping API
def __eq__(self, other):
return self is other
def __getitem__(self, key):
try:
return self._routes[key.lower()]
except KeyError:
return self._routes['*']
def __setitem__(self, key, value):
self._routes[key.lower()] = value
def __delitem__(self, key):
del self._routes[key.lower()]
def __len__(self):
return len(self._routes)
def __iter__(self):
return iter(self._routes)
class ProxyRouter(Router):
def __init__(self):
super().__init__(default=self.proxy)
async def proxy(self, dialog, msg, timeout=5):
peer = await utils.get_proxy_peer(dialog, msg)
LOG.debug('Proxying "%s, %s, %s" from "%s" to "%s"', msg.cseq, msg.method, dialog.call_id, dialog.peer, peer)
async for proxy_response in peer.proxy_request(dialog, msg, timeout=timeout):
if proxy_response:
dialog.peer.proxy_response(proxy_response)
| apache-2.0 | Python |
11e23916b47cf8b9c35f32ad379e66336cf53f8b | Change from double to single quotes | imuchnik/cfgov-refresh,imuchnik/cfgov-refresh,imuchnik/cfgov-refresh,imuchnik/cfgov-refresh | _lib/wordpress_view_processor.py | _lib/wordpress_view_processor.py | import sys
import json
import os.path
from string import Template
from wordpress_post_processor import process_post
import requests
import dateutil.parser
def posts_at_url(url):
url = os.path.expandvars(url)
current_page = 1
max_page = sys.maxint
while current_page <= max_page:
resp = requests.get(url, params={'page': current_page})
results = json.loads(resp.content)
current_page += 1
max_page = results['pages']
for p in results['posts']:
yield p
def documents(name, url, **kwargs):
for view in posts_at_url(url):
yield process_view(view)
def process_view(post):
post['_id'] = post['slug']
custom_fields = post['custom_fields']
# limit popular posts to five items
if 'popular_posts' in custom_fields:
popular_posts = [slug for slug in custom_fields['popular_posts'][:5]]
post['popular_posts'] = popular_posts
# convert related links into a proper list
related = []
for x in xrange(0, 5):
key = 'related_link_%s' % x
if key in custom_fields:
related.append(custom_fields[key])
post['related_links'] = related
# append the hero information
if 'related_hero' in custom_fields and custom_fields['related_hero'][0] != '':
hero_id = custom_fields['related_hero'][0]
hero_url = os.path.expandvars("$WORDPRESS/hero/" + hero_id + "/?json=1")
response = requests.get(hero_url)
hero_data = json.loads(response.content)
if hero_data['status'] is 'ok':
hero_data = hero_data['post']
hero_data['related_posts'] = hero_data['custom_fields']['related_post']
post['hero'] = hero_data
return post
| import sys
import json
import os.path
from string import Template
from wordpress_post_processor import process_post
import requests
import dateutil.parser
def posts_at_url(url):
url = os.path.expandvars(url)
current_page = 1
max_page = sys.maxint
while current_page <= max_page:
resp = requests.get(url, params={'page': current_page})
results = json.loads(resp.content)
current_page += 1
max_page = results['pages']
for p in results['posts']:
yield p
def documents(name, url, **kwargs):
for view in posts_at_url(url):
yield process_view(view)
def process_view(post):
post['_id'] = post['slug']
custom_fields = post['custom_fields']
# limit popular posts to five items
if 'popular_posts' in custom_fields:
popular_posts = [slug for slug in custom_fields['popular_posts'][:5]]
post['popular_posts'] = popular_posts
# convert related links into a proper list
related = []
for x in xrange(0, 5):
key = 'related_link_%s' % x
if key in custom_fields:
related.append(custom_fields[key])
post['related_links'] = related
# append the hero information
if 'related_hero' in custom_fields and custom_fields['related_hero'][0] != '':
hero_id = custom_fields['related_hero'][0]
hero_url = os.path.expandvars("$WORDPRESS/hero/" + hero_id + "/?json=1")
response = requests.get(hero_url)
hero_data = json.loads(response.content)
if hero_data['status'] is "ok":
hero_data = hero_data['post']
hero_data['related_posts'] = hero_data['custom_fields']['related_post']
post['hero'] = hero_data
return post
| cc0-1.0 | Python |
8f9a466dee5cd9bcd794e95f3d6cc22103dc4fb7 | Add incubating specifier to version | sdiazb/airflow,edgarRd/incubator-airflow,jfantom/incubator-airflow,mrkm4ntr/incubator-airflow,alexvanboxel/airflow,jfantom/incubator-airflow,AllisonWang/incubator-airflow,zoyahav/incubator-airflow,artwr/airflow,edgarRd/incubator-airflow,NielsZeilemaker/incubator-airflow,andrewmchen/incubator-airflow,rishibarve/incubator-airflow,easytaxibr/airflow,hgrif/incubator-airflow,brandsoulmates/incubator-airflow,Fokko/incubator-airflow,MetrodataTeam/incubator-airflow,jlowin/airflow,AllisonWang/incubator-airflow,adamhaney/airflow,gtoonstra/airflow,CloverHealth/airflow,zack3241/incubator-airflow,jhsenjaliya/incubator-airflow,KL-WLCR/incubator-airflow,alexvanboxel/airflow,yati-sagade/incubator-airflow,vijaysbhat/incubator-airflow,dgies/incubator-airflow,yk5/incubator-airflow,ProstoMaxim/incubator-airflow,edgarRd/incubator-airflow,preete-dixit-ck/incubator-airflow,holygits/incubator-airflow,dmitry-r/incubator-airflow,OpringaoDoTurno/airflow,RealImpactAnalytics/airflow,subodhchhabra/airflow,aminghadersohi/airflow,cfei18/incubator-airflow,mrkm4ntr/incubator-airflow,adamhaney/airflow,malmiron/incubator-airflow,skudriashev/incubator-airflow,apache/incubator-airflow,mrares/incubator-airflow,spektom/incubator-airflow,zodiac/incubator-airflow,preete-dixit-ck/incubator-airflow,andrewmchen/incubator-airflow,jgao54/airflow,holygits/incubator-airflow,jesusfcr/airflow,dmitry-r/incubator-airflow,wooga/airflow,yk5/incubator-airflow,asnir/airflow,cfei18/incubator-airflow,DinoCow/airflow,zodiac/incubator-airflow,apache/airflow,spektom/incubator-airflow,hamedhsn/incubator-airflow,gritlogic/incubator-airflow,yati-sagade/incubator-airflow,btallman/incubator-airflow,mattuuh7/incubator-airflow,danielvdende/incubator-airflow,airbnb/airflow,NielsZeilemaker/incubator-airflow,mrkm4ntr/incubator-airflow,gtoonstra/airflow,dmitry-r/incubator-airflow,apache/airflow,fenglu-g/incubator-airflow,KL-WLCR/incubator-airflow,wolfier/incubator-airflow,Tagar/incubator-airflow,malmiron/incubator-airflow,malmiron/incubator-airflow,wndhydrnt/airflow,RealImpactAnalytics/airflow,zoyahav/incubator-airflow,ronfung/incubator-airflow,zack3241/incubator-airflow,mrares/incubator-airflow,skudriashev/incubator-airflow,gritlogic/incubator-airflow,mattuuh7/incubator-airflow,Twistbioscience/incubator-airflow,Fokko/incubator-airflow,Fokko/incubator-airflow,DinoCow/airflow,wolfier/incubator-airflow,wndhydrnt/airflow,MortalViews/incubator-airflow,cjqian/incubator-airflow,mtagle/airflow,CloverHealth/airflow,dmitry-r/incubator-airflow,preete-dixit-ck/incubator-airflow,andyxhadji/incubator-airflow,btallman/incubator-airflow,bolkedebruin/airflow,zodiac/incubator-airflow,jiwang576/incubator-airflow,subodhchhabra/airflow,N3da/incubator-airflow,NielsZeilemaker/incubator-airflow,mistercrunch/airflow,ronfung/incubator-airflow,hgrif/incubator-airflow,gritlogic/incubator-airflow,sdiazb/airflow,asnir/airflow,sergiohgz/incubator-airflow,akosel/incubator-airflow,lxneng/incubator-airflow,janczak10/incubator-airflow,owlabs/incubator-airflow,wooga/airflow,aminghadersohi/airflow,airbnb/airflow,MortalViews/incubator-airflow,dgies/incubator-airflow,criccomini/airflow,DinoCow/airflow,andyxhadji/incubator-airflow,stverhae/incubator-airflow,gtoonstra/airflow,RealImpactAnalytics/airflow,jlowin/airflow,jhsenjaliya/incubator-airflow,hamedhsn/incubator-airflow,cfei18/incubator-airflow,sid88in/incubator-airflow,gilt/incubator-airflow,CloverHealth/airflow,bolkedebruin/airflow,r39132/airflow,adrpar/incubator-airflow,sid88in/incubator-airflow,saguziel/incubator-airflow,stverhae/incubator-airflow,nathanielvarona/airflow,zack3241/incubator-airflow,andyxhadji/incubator-airflow,adamhaney/airflow,rishibarve/incubator-airflow,stverhae/incubator-airflow,nathanielvarona/airflow,gilt/incubator-airflow,MortalViews/incubator-airflow,fenglu-g/incubator-airflow,yk5/incubator-airflow,r39132/airflow,jiwang576/incubator-airflow,N3da/incubator-airflow,r39132/airflow,criccomini/airflow,malmiron/incubator-airflow,mrkm4ntr/incubator-airflow,bolkedebruin/airflow,mistercrunch/airflow,jesusfcr/airflow,jiwang576/incubator-airflow,mattuuh7/incubator-airflow,yati-sagade/incubator-airflow,Acehaidrey/incubator-airflow,cjqian/incubator-airflow,jesusfcr/airflow,nathanielvarona/airflow,lxneng/incubator-airflow,wooga/airflow,jesusfcr/airflow,aminghadersohi/airflow,danielvdende/incubator-airflow,adrpar/incubator-airflow,cjqian/incubator-airflow,lyft/incubator-airflow,wileeam/airflow,CloverHealth/airflow,cfei18/incubator-airflow,RealImpactAnalytics/airflow,fenglu-g/incubator-airflow,alexvanboxel/airflow,subodhchhabra/airflow,DinoCow/airflow,jlowin/airflow,cfei18/incubator-airflow,subodhchhabra/airflow,OpringaoDoTurno/airflow,rishibarve/incubator-airflow,hamedhsn/incubator-airflow,lyft/incubator-airflow,dgies/incubator-airflow,MetrodataTeam/incubator-airflow,dhuang/incubator-airflow,zack3241/incubator-airflow,sdiazb/airflow,jlowin/airflow,saguziel/incubator-airflow,fenglu-g/incubator-airflow,Twistbioscience/incubator-airflow,hgrif/incubator-airflow,Acehaidrey/incubator-airflow,owlabs/incubator-airflow,janczak10/incubator-airflow,btallman/incubator-airflow,gritlogic/incubator-airflow,sdiazb/airflow,adrpar/incubator-airflow,easytaxibr/airflow,KL-WLCR/incubator-airflow,nathanielvarona/airflow,apache/airflow,cjqian/incubator-airflow,mtagle/airflow,gilt/incubator-airflow,ProstoMaxim/incubator-airflow,Acehaidrey/incubator-airflow,ronfung/incubator-airflow,wooga/airflow,vijaysbhat/incubator-airflow,saguziel/incubator-airflow,N3da/incubator-airflow,dgies/incubator-airflow,yati-sagade/incubator-airflow,lxneng/incubator-airflow,airbnb/airflow,owlabs/incubator-airflow,bolkedebruin/airflow,wndhydrnt/airflow,adrpar/incubator-airflow,apache/incubator-airflow,dhuang/incubator-airflow,apache/airflow,apache/airflow,zoyahav/incubator-airflow,akosel/incubator-airflow,zodiac/incubator-airflow,mistercrunch/airflow,jfantom/incubator-airflow,lyft/incubator-airflow,bolkedebruin/airflow,yk5/incubator-airflow,owlabs/incubator-airflow,Acehaidrey/incubator-airflow,mrares/incubator-airflow,nathanielvarona/airflow,Acehaidrey/incubator-airflow,holygits/incubator-airflow,cfei18/incubator-airflow,airbnb/airflow,easytaxibr/airflow,danielvdende/incubator-airflow,holygits/incubator-airflow,artwr/airflow,gilt/incubator-airflow,jgao54/airflow,Acehaidrey/incubator-airflow,andrewmchen/incubator-airflow,MetrodataTeam/incubator-airflow,sekikn/incubator-airflow,akosel/incubator-airflow,Twistbioscience/incubator-airflow,saguziel/incubator-airflow,brandsoulmates/incubator-airflow,adamhaney/airflow,sekikn/incubator-airflow,sekikn/incubator-airflow,jgao54/airflow,artwr/airflow,jhsenjaliya/incubator-airflow,alexvanboxel/airflow,mrares/incubator-airflow,AllisonWang/incubator-airflow,janczak10/incubator-airflow,wileeam/airflow,AllisonWang/incubator-airflow,wileeam/airflow,skudriashev/incubator-airflow,jfantom/incubator-airflow,easytaxibr/airflow,mattuuh7/incubator-airflow,artwr/airflow,MortalViews/incubator-airflow,apache/airflow,preete-dixit-ck/incubator-airflow,wileeam/airflow,stverhae/incubator-airflow,N3da/incubator-airflow,spektom/incubator-airflow,Fokko/incubator-airflow,gtoonstra/airflow,hamedhsn/incubator-airflow,janczak10/incubator-airflow,brandsoulmates/incubator-airflow,dhuang/incubator-airflow,OpringaoDoTurno/airflow,KL-WLCR/incubator-airflow,OpringaoDoTurno/airflow,akosel/incubator-airflow,jgao54/airflow,sergiohgz/incubator-airflow,apache/incubator-airflow,criccomini/airflow,btallman/incubator-airflow,ronfung/incubator-airflow,aminghadersohi/airflow,r39132/airflow,andyxhadji/incubator-airflow,hgrif/incubator-airflow,wndhydrnt/airflow,Tagar/incubator-airflow,mtagle/airflow,asnir/airflow,mistercrunch/airflow,nathanielvarona/airflow,mtagle/airflow,apache/incubator-airflow,danielvdende/incubator-airflow,danielvdende/incubator-airflow,ProstoMaxim/incubator-airflow,MetrodataTeam/incubator-airflow,sergiohgz/incubator-airflow,Tagar/incubator-airflow,vijaysbhat/incubator-airflow,wolfier/incubator-airflow,sid88in/incubator-airflow,brandsoulmates/incubator-airflow,skudriashev/incubator-airflow,criccomini/airflow,Twistbioscience/incubator-airflow,spektom/incubator-airflow,vijaysbhat/incubator-airflow,zoyahav/incubator-airflow,andrewmchen/incubator-airflow,danielvdende/incubator-airflow,edgarRd/incubator-airflow,Tagar/incubator-airflow,lxneng/incubator-airflow,sid88in/incubator-airflow,asnir/airflow,jiwang576/incubator-airflow,wolfier/incubator-airflow,rishibarve/incubator-airflow,sekikn/incubator-airflow,dhuang/incubator-airflow,NielsZeilemaker/incubator-airflow,ProstoMaxim/incubator-airflow,sergiohgz/incubator-airflow,lyft/incubator-airflow,jhsenjaliya/incubator-airflow | airflow/version.py | airflow/version.py | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
version = '1.8.0b1+apache.incubating'
| # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
version = '1.8.0b1'
| apache-2.0 | Python |
cabc82eb1b371bb9c732907e19cd64ef7eaf9956 | remove unused and invalid import | eEcoLiDAR/eEcoLiDAR | laserchicken/test_read_las.py | laserchicken/test_read_las.py | import os
import shutil
import unittest
import numpy as np
import pytest
from laserchicken.read_las import read
class TestReadWriteLas(unittest.TestCase):
_test_dir = 'TestLoad_dir'
_test_file_name = '5points.las'
_test_data_source = 'testdata'
test_file_path = os.path.join(_test_dir, _test_file_name)
def test_load_containsPoints(self):
""" Should run without exception and return points. """
point_cloud = read(self.test_file_path)
self.assertIn('points', point_cloud)
def test_load_PointsContainX(self):
""" Should run without exception and return points. """
point_cloud = read(self.test_file_path)
print(point_cloud)
self.assertIn('data', point_cloud['points']['x'])
def test_load_CorrectFirstX(self):
""" Should . """
point_cloud = read(self.test_file_path)
point = [point_cloud['points']['x']['data'][0],
point_cloud['points']['y']['data'][0],
point_cloud['points']['z']['data'][0]]
np.testing.assert_allclose(np.array(point),
np.array([-1870.480059509277, 338897.281499328557, 192.363999260664]))
def test_load_nonexistentFile(self):
""" Should raise exception. """
with pytest.raises(OSError):
read('nonexistent.las')
def setUp(self):
os.mkdir(self._test_dir)
shutil.copyfile(os.path.join(self._test_data_source, self._test_file_name), self.test_file_path)
def tearDown(self):
shutil.rmtree(self._test_dir)
| import os
import shutil
import unittest
import pytest
import numpy as np
from laserchicken.read_las import read
from laserchicken.write_las import write
from laserchicken.test_utils import generate_test_point_cloud
class TestReadWriteLas(unittest.TestCase):
_test_dir = 'TestLoad_dir'
_test_file_name = '5points.las'
_test_data_source = 'testdata'
test_file_path = os.path.join(_test_dir, _test_file_name)
def test_load_containsPoints(self):
""" Should run without exception and return points. """
point_cloud = read(self.test_file_path)
self.assertIn('points', point_cloud)
def test_load_PointsContainX(self):
""" Should run without exception and return points. """
point_cloud = read(self.test_file_path)
print(point_cloud)
self.assertIn('data', point_cloud['points']['x'])
def test_load_CorrectFirstX(self):
""" Should . """
point_cloud = read(self.test_file_path)
point = [point_cloud['points']['x']['data'][0],
point_cloud['points']['y']['data'][0],
point_cloud['points']['z']['data'][0]]
np.testing.assert_allclose(np.array(point),
np.array([-1870.480059509277, 338897.281499328557, 192.363999260664]))
def test_load_nonexistentFile(self):
""" Should raise exception. """
with pytest.raises(OSError):
read('nonexistent.las')
def setUp(self):
os.mkdir(self._test_dir)
shutil.copyfile(os.path.join(self._test_data_source, self._test_file_name), self.test_file_path)
def tearDown(self):
shutil.rmtree(self._test_dir)
| apache-2.0 | Python |
b04f3bd19b508140b0b4feee46d590b61da46bed | Switch Swift trunk to 1.4.1, now that the 1.4.0 release branch is branched out. | mja054/swift_plugin,iostackproject/IO-Bandwidth-Differentiation,williamthegrey/swift,openstack/swift,notmyname/swift,matthewoliver/swift,orion/swift-config,thiagodasilva/swift,zackmdavis/swift,smerritt/swift,bkolli/swift,Em-Pan/swift,Khushbu27/Tutorial,openstack/swift,larsbutler/swift,aerwin3/swift,redhat-openstack/swift,hbhdytf/mac,scality/ScalitySproxydSwift,rackerlabs/swift,eatbyte/Swift,citrix-openstack-build/swift,psachin/swift,revoer/keystone-8.0.0,dpgoetz/swift,smerritt/swift,bkolli/swift,matthewoliver/swift,clayg/swift,Seagate/swift,IPVL/swift-kilo,psachin/swift,nadeemsyed/swift,smerritt/swift,sarvesh-ranjan/swift,sarvesh-ranjan/swift,NeCTAR-RC/swift,bradleypj823/swift,bouncestorage/swift,matthewoliver/swift,citrix-openstack/build-swift,ceph/swift,orion/swift-config,openstack/swift,revoer/keystone-8.0.0,hurricanerix/swift,levythu/swift,dpgoetz/swift,hurricanerix/swift,clayg/swift,Khushbu27/Tutorial,hbhdytf/mac2,mjwtom/swift,notmyname/swift,AfonsoFGarcia/swift,Akanoa/swift,shibaniahegde/OpenStak_swift,xiaoguoai/ec-dev-swift,williamthegrey/swift,anishnarang/gswift,Mirantis/swift-encrypt,prashanthpai/swift,mja054/swift_plugin,Akanoa/swift,NewpTone/StackLab-swift,AfonsoFGarcia/swift,dencaval/swift,zackmdavis/swift,NewpTone/StackLab-swift,levythu/swift,NeCTAR-RC/swift,shibaniahegde/OpenStak_swift,notmyname/swift,gold3bear/swift,gold3bear/swift,mjwtom/swift,zaitcev/swift-lfs,SUSE/swift,matthewoliver/swift,redbo/swift,JioCloud/swift,VictorLowther/swift,Seagate/swift,maginatics/swift,prashanthpai/swift,notmyname/swift,psachin/swift,redbo/swift,mja054/swift_plugin,nadeemsyed/swift,tipabu/swift,wenhuizhang/swift,Intel-bigdata/swift,SUSE/swift,hbhdytf/mac2,dencaval/swift,tipabu/swift,aerwin3/swift,openstack/swift,Em-Pan/swift,takeshineshiro/swift,houseurmusic/my-swift,rackerlabs/swift,swiftstack/swift,clayg/swift,anishnarang/gswift,hbhdytf/mac2,Intel-bigdata/swift,hbhdytf/mac2,eatbyte/Swift,houseurmusic/my-swift,psachin/swift,VictorLowther/swift,hurricanerix/swift,Triv90/SwiftUml,zaitcev/swift-lfs,mjzmjz/swift,maginatics/swift,clayg/swift,hurricanerix/swift,ceph/swift,citrix-openstack-build/swift,nadeemsyed/swift,scality/ScalitySproxydSwift,nadeemsyed/swift,iostackproject/IO-Bandwidth-Differentiation,swiftstack/swift,smerritt/swift,tsli/test,tipabu/swift,wenhuizhang/swift,bradleypj823/swift,Triv90/SwiftUml,takeshineshiro/swift,redhat-openstack/swift,IPVL/swift-kilo,bouncestorage/swift,thiagodasilva/swift,tsli/test,xiaoguoai/ec-dev-swift,swiftstack/swift,Mirantis/swift-encrypt,daasbank/swift,tipabu/swift,daasbank/swift,hbhdytf/mac,larsbutler/swift,citrix-openstack/build-swift,mjzmjz/swift,JioCloud/swift | swift/__init__.py | swift/__init__.py | import gettext
class Version(object):
def __init__(self, canonical_version, final):
self.canonical_version = canonical_version
self.final = final
@property
def pretty_version(self):
if self.final:
return self.canonical_version
else:
return '%s-dev' % (self.canonical_version,)
_version = Version('1.4.1', False)
__version__ = _version.pretty_version
__canonical_version__ = _version.canonical_version
gettext.install('swift')
| import gettext
class Version(object):
def __init__(self, canonical_version, final):
self.canonical_version = canonical_version
self.final = final
@property
def pretty_version(self):
if self.final:
return self.canonical_version
else:
return '%s-dev' % (self.canonical_version,)
_version = Version('1.4.0', False)
__version__ = _version.pretty_version
__canonical_version__ = _version.canonical_version
gettext.install('swift')
| apache-2.0 | Python |
aa9a7e1d794967e9fec91711772d03cb6042b277 | Improve documentation of the 'constants' module | IATI/iati.core,IATI/iati.core | iati/core/constants.py | iati/core/constants.py | """A module containing constants required throughout IATI library code.
The contents of this file are not designed to be user-editable. Only edit if you know what you are doing!
Warning:
This contents of this module should currently be deemed private.
Todo:
Allow logging constants to be user-definable.
"""
STANDARD_VERSIONS = ['1.04', '1.05', '2.01', '2.02']
"""Define all versions of the Standard.
Todo:
This constant to be populated by the values in the Version codelist, rather than hard-coded.
Consider if functionality should extend to working with development versions of the Standard (e.g. during an upgrade process).
"""
STANDARD_VERSION_LATEST = max(STANDARD_VERSIONS)
"""The latest version of the IATI Standard."""
LOG_FILE_NAME = 'iatilib.log'
"""The location of the primary IATI log file.
Warning:
Logging should be clearly user-definable.
"""
LOGGER_NAME = 'iati'
"""The name of the primary IATI Logger.
Warning:
This should be better based on specific packages.
"""
NAMESPACE = '{http://www.w3.org/2001/XMLSchema}'
"""The namespace that IATI Schema XSD files are specified within."""
NSMAP = {'xsd': 'http://www.w3.org/2001/XMLSchema'}
"""A dictionary for interpreting namespaces in IATI Schemas."""
| """A module containing constants required throughout IATI library code.
The contents of this file are not designed to be user-editable. Only edit if you know what you are doing!
Warning:
This contents of this module should currently be deemed private.
Todo:
Allow logging constants to be user-definable.
"""
STANDARD_VERSIONS = ['1.04', '1.05', '2.01', '2.02']
"""Define all versions of the Standard.
Todo:
This constant to be populated by the values in the Version codelist, rather than hard-coded.
Consider if functionality should extend to working with development versions of the Standard (e.g. during an upgrade process).
"""
STANDARD_VERSION_LATEST = max(STANDARD_VERSIONS)
"""The latest version of the IATI Standard."""
LOG_FILE_NAME = 'iatilib.log'
"""The location of the primary IATI log file.
Warning:
Logging should be clearly user-definable.
"""
LOGGER_NAME = 'iati'
"""The name of the primary IATI Logger.
Warning:
This should be better based on specific packages.
"""
NAMESPACE = '{http://www.w3.org/2001/XMLSchema}'
"""The namespace that IATI Schema XSD files are specified within."""
NSMAP = {'xsd': 'http://www.w3.org/2001/XMLSchema'}
"""A dictionary for interpreting namespaces in IATI Schemas."""
| mit | Python |
6ef01afcc9e27a16d20dea5b516462a0a1261265 | bump version number | bashu/wagtail-metadata-mixin,bashu/wagtail-metadata-mixin | wagtailmetadata/__init__.py | wagtailmetadata/__init__.py | __version__ = "0.0.2"
| __version__ = "0.0.1"
| mit | Python |
e9b422c74382d88787114796e7e4b6dfd2b25225 | Correct path to frontend views | disqus/codebox,disqus/codebox | codesharer/app.py | codesharer/app.py | from flask import Flask
from flaskext.redis import Redis
def create_app():
from codesharer.apps.snippets.views import frontend
app = Flask(__name__)
app.config.from_object('codesharer.conf.Config')
app.register_module(frontend)
db = Redis(app)
db.init_app(app)
app.db = db
return app | from flask import Flask
from flaskext.redis import Redis
def create_app():
from codesharer.apps.classifier.views import frontend
app = Flask(__name__)
app.config.from_object('codesharer.conf.Config')
app.register_module(frontend)
db = Redis(app)
db.init_app(app)
app.db = db
return app | apache-2.0 | Python |
90ec1e2c521e82b1c6fc6ba9e14983a0294261b5 | bump version to 3.0.0dev | altair-viz/altair,jakevdp/altair | altair/__init__.py | altair/__init__.py | # flake8: noqa
__version__ = '3.0.0dev0'
from .vegalite import *
from . import examples
def load_ipython_extension(ipython):
from ._magics import vega, vegalite
ipython.register_magic_function(vega, 'cell')
ipython.register_magic_function(vegalite, 'cell')
| # flake8: noqa
__version__ = '2.5.0dev0'
from .vegalite import *
from . import examples
def load_ipython_extension(ipython):
from ._magics import vega, vegalite
ipython.register_magic_function(vega, 'cell')
ipython.register_magic_function(vegalite, 'cell')
| bsd-3-clause | Python |
b9f43757b4d6bd63d15b75f5a40f26fb56904ba7 | Fix fields import | N-litened/amocrm_api,Krukov/amocrm_api | amocrm/__init__.py | amocrm/__init__.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import sys
import logging
from .settings import settings as amo_settings
from .api import *
from .apimodels import *
from . import fields
__all__ = [
'BaseCompany', 'BaseContact', 'BaseLead', 'amo_settings', 'AmoApi', 'ContactNote', 'ContactTask',
'LeadNote', 'LeadTask', 'fields',
]
logger = logging.getLogger('amocrm')
if not logger.handlers:
formatter = logging.Formatter(
'%(asctime)s.%(msecs)d %(levelname)s: %(message)s', '%Y-%m-%d %H:%M:%S'
)
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(formatter)
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
logger.setLevel(logging.WARNING)
| # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import sys
import logging
from .settings import settings as amo_settings
from .api import *
from .apimodels import *
__all__ = [
'BaseCompany', 'BaseContact', 'BaseLead', 'amo_settings', 'AmoApi', 'ContactNote', 'ContactTask',
'LeadNote', 'LeadTask', 'fields',
]
logger = logging.getLogger('amocrm')
if not logger.handlers:
formatter = logging.Formatter(
'%(asctime)s.%(msecs)d %(levelname)s: %(message)s', '%Y-%m-%d %H:%M:%S'
)
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(formatter)
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
logger.setLevel(logging.WARNING)
| mit | Python |
df02879c51c4f87514e376e3a2a0f7db7a75a285 | Improve configuration schema for Geniushub integration (#23155) | fbradyirl/home-assistant,GenericStudent/home-assistant,rohitranjan1991/home-assistant,adrienbrault/home-assistant,tboyce021/home-assistant,aronsky/home-assistant,kennedyshead/home-assistant,nkgilley/home-assistant,turbokongen/home-assistant,qedi-r/home-assistant,aequitas/home-assistant,Danielhiversen/home-assistant,mKeRix/home-assistant,balloob/home-assistant,pschmitt/home-assistant,rohitranjan1991/home-assistant,mezz64/home-assistant,aronsky/home-assistant,titilambert/home-assistant,Teagan42/home-assistant,postlund/home-assistant,Cinntax/home-assistant,tboyce1/home-assistant,adrienbrault/home-assistant,postlund/home-assistant,pschmitt/home-assistant,w1ll1am23/home-assistant,auduny/home-assistant,titilambert/home-assistant,mKeRix/home-assistant,balloob/home-assistant,soldag/home-assistant,home-assistant/home-assistant,Teagan42/home-assistant,mKeRix/home-assistant,aequitas/home-assistant,sander76/home-assistant,fbradyirl/home-assistant,home-assistant/home-assistant,jawilson/home-assistant,mKeRix/home-assistant,auduny/home-assistant,robbiet480/home-assistant,lukas-hetzenecker/home-assistant,toddeye/home-assistant,lukas-hetzenecker/home-assistant,jawilson/home-assistant,rohitranjan1991/home-assistant,fbradyirl/home-assistant,tboyce1/home-assistant,partofthething/home-assistant,auduny/home-assistant,tchellomello/home-assistant,aequitas/home-assistant,tboyce1/home-assistant,Cinntax/home-assistant,w1ll1am23/home-assistant,GenericStudent/home-assistant,partofthething/home-assistant,toddeye/home-assistant,mezz64/home-assistant,sdague/home-assistant,sander76/home-assistant,joopert/home-assistant,turbokongen/home-assistant,balloob/home-assistant,jabesq/home-assistant,nkgilley/home-assistant,FreekingDean/home-assistant,leppa/home-assistant,FreekingDean/home-assistant,tchellomello/home-assistant,tboyce021/home-assistant,robbiet480/home-assistant,sdague/home-assistant,joopert/home-assistant,tboyce1/home-assistant,Danielhiversen/home-assistant,qedi-r/home-assistant,kennedyshead/home-assistant,leppa/home-assistant,jabesq/home-assistant,jabesq/home-assistant,soldag/home-assistant | homeassistant/components/geniushub/__init__.py | homeassistant/components/geniushub/__init__.py | """This module connects to a Genius hub and shares the data."""
import logging
import voluptuous as vol
from homeassistant.const import (
CONF_HOST, CONF_PASSWORD, CONF_TOKEN, CONF_USERNAME)
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.discovery import async_load_platform
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'geniushub'
_V1_API_SCHEMA = vol.Schema({
vol.Required(CONF_TOKEN): cv.string,
})
_V3_API_SCHEMA = vol.Schema({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Any(
_V3_API_SCHEMA,
_V1_API_SCHEMA,
)
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, hass_config):
"""Create a Genius Hub system."""
from geniushubclient import GeniusHubClient # noqa; pylint: disable=no-name-in-module
geniushub_data = hass.data[DOMAIN] = {}
kwargs = dict(hass_config[DOMAIN])
if CONF_HOST in kwargs:
args = (kwargs.pop(CONF_HOST), )
else:
args = (kwargs.pop(CONF_TOKEN), )
try:
client = geniushub_data['client'] = GeniusHubClient(
*args, **kwargs, session=async_get_clientsession(hass)
)
await client.hub.update()
except AssertionError: # assert response.status == HTTP_OK
_LOGGER.warning(
"setup(): Failed, check your configuration.",
exc_info=True)
return False
hass.async_create_task(async_load_platform(
hass, 'climate', DOMAIN, {}, hass_config))
return True
| """This module connects to the Genius hub and shares the data."""
import logging
import voluptuous as vol
from homeassistant.const import (
CONF_HOST, CONF_PASSWORD, CONF_USERNAME)
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.discovery import async_load_platform
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'geniushub'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Required(CONF_HOST): cv.string,
}),
}, extra=vol.ALLOW_EXTRA)
async def async_setup(hass, hass_config):
"""Create a Genius Hub system."""
from geniushubclient import GeniusHubClient # noqa; pylint: disable=no-name-in-module
host = hass_config[DOMAIN].get(CONF_HOST)
username = hass_config[DOMAIN].get(CONF_USERNAME)
password = hass_config[DOMAIN].get(CONF_PASSWORD)
geniushub_data = hass.data[DOMAIN] = {}
try:
client = geniushub_data['client'] = GeniusHubClient(
host, username, password,
session=async_get_clientsession(hass)
)
await client.hub.update()
except AssertionError: # assert response.status == HTTP_OK
_LOGGER.warning(
"setup(): Failed, check your configuration.",
exc_info=True)
return False
hass.async_create_task(async_load_platform(
hass, 'climate', DOMAIN, {}, hass_config))
return True
| apache-2.0 | Python |
c382b43b5e5c442d9d3c6bc6af398b829e07f1b5 | Fix chunk | muddyfish/PYKE,muddyfish/PYKE | node/floor_divide.py | node/floor_divide.py | #!/usr/bin/env python
from nodes import Node
import math
class FloorDiv(Node):
char = "f"
args = 2
results = 1
@Node.test_func([3,2], [1])
@Node.test_func([6,-3], [-2])
def func(self, a:Node.number,b:Node.number):
"""a/b. Rounds down, returns an int."""
return a//b
@Node.test_func(["test", "e"], [["t", "e", "st"]])
def partition(self, string:str, sep:str):
"""Split the string at the first occurrence of sep,
return a 3-list containing the part before the separator,
the separator itself, and the part after the separator.
If the separator is not found,
return a 3-list containing the string itself,
followed by two empty strings."""
return [list(string.partition(sep))]
@Node.test_func(["134", 1], [["134"]])
@Node.test_func(["1234", 2], [["12", "34"]])
@Node.test_func(["1234", 3], [["1", "2", "34"]])
@Node.test_func([[4,8,15,16,23,42], 5], [[[4],[8],[15],[16],[23,42]]])
def chunk(self, inp:Node.indexable, num:int):
"""Return inp seperated into num groups"""
rtn = []
size = len(inp)//num
for i in range(0, num*size, size):
rtn.append(inp[i:i+size])
if len(rtn) != num:
rtn.append(inp[i+size:])
else:
rtn[-1] += inp[i+size:]
return [rtn] | #!/usr/bin/env python
from nodes import Node
class FloorDiv(Node):
char = "f"
args = 2
results = 1
@Node.test_func([3,2], [1])
@Node.test_func([6,-3], [-2])
def func(self, a:Node.number,b:Node.number):
"""a/b. Rounds down, returns an int."""
return a//b
@Node.test_func(["test", "e"], [["t", "e", "st"]])
def partition(self, string:str, sep:str):
"""Split the string at the first occurrence of sep,
return a 3-list containing the part before the separator,
the separator itself, and the part after the separator.
If the separator is not found,
return a 3-list containing the string itself,
followed by two empty strings."""
return [list(string.partition(sep))]
@Node.test_func(["134", 1], [["134"]])
@Node.test_func(["1234", 2], [["12", "34"]])
@Node.test_func(["1234", 3], [["1", "2", "34"]])
@Node.test_func([[4,8,15,16,23,42], 5], [[[4],[8],[15],[16],[23,42]]])
def chunk(self, inp:Node.indexable, num:int):
"""Return inp seperated into num groups"""
rtn = []
last = 0
size = len(inp)//num
for i in range(size, len(inp), size):
rtn.append(inp[last:i])
last = i
if len(rtn) != num:
rtn.append(inp[last:])
else:
rtn[-1] += inp[last:]
if len(rtn):
if isinstance(inp, str):
rtn[-1] = "".join(rtn[-1])
else:
rtn[-1] = type(inp)(rtn[-1])
return [rtn] | mit | Python |
f0962f53e517cb24fdab5253ae60ee7fd7be0cfe | Remove unnecessry print in tests | AtteqCom/zsl,AtteqCom/zsl | tests/interface/cli/exec_task_from_cli_test.py | tests/interface/cli/exec_task_from_cli_test.py | from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import *
from unittest.case import TestCase
from click.testing import CliRunner
from zsl import inject
from zsl.application.containers.core_container import CoreContainer
from zsl.application.modules.cli_module import ZslCli
from zsl.router.task import TaskConfiguration
from zsl.testing.db import IN_MEMORY_DB_SETTINGS
from zsl.testing.zsl import ZslTestCase, ZslTestConfiguration
class TestCliContainer(CoreContainer):
pass
CONFIG = IN_MEMORY_DB_SETTINGS.copy()
CONFIG.update(
TASKS=TaskConfiguration().create_namespace('task').add_packages(['zsl.tasks']).get_configuration()
)
class ExecTaskFromCliTestCase(ZslTestCase, TestCase):
ZSL_TEST_CONFIGURATION = ZslTestConfiguration(
app_name="ExecTaskFromCliTestCase",
config_object=CONFIG,
container=TestCliContainer
)
@inject(zsl_cli=ZslCli)
def testRunningTestTask(self, zsl_cli):
# type:(ZslCli)->None
runner = CliRunner()
result = runner.invoke(zsl_cli.cli, ['task', 'task/zsl/test_task'])
self.assertEqual(0, result.exit_code, "No error is expected.")
self.assertEqual('ok', result.output.strip(), "Valid task output must be shown")
@inject(zsl_cli=ZslCli)
def testRunningTaskWithListInput(self, zsl_cli):
# type:(ZslCli)->None
runner = CliRunner()
result = runner.invoke(zsl_cli.cli, ['task', 'task/zsl/with_request_task', '{"list_of_numbers": [1,2,3] }'])
self.assertEqual(0, result.exit_code, "No error is expected.")
self.assertEqual('[1, 2, 3]', result.output.strip(), "Valid task output must be shown")
| from __future__ import absolute_import, division, print_function, unicode_literals
from builtins import *
from unittest.case import TestCase
from click.testing import CliRunner
from zsl import inject
from zsl.application.containers.core_container import CoreContainer
from zsl.application.modules.cli_module import ZslCli
from zsl.router.task import TaskConfiguration
from zsl.testing.db import IN_MEMORY_DB_SETTINGS
from zsl.testing.zsl import ZslTestCase, ZslTestConfiguration
class TestCliContainer(CoreContainer):
pass
CONFIG = IN_MEMORY_DB_SETTINGS.copy()
CONFIG.update(
TASKS=TaskConfiguration().create_namespace('task').add_packages(['zsl.tasks']).get_configuration()
)
class ExecTaskFromCliTestCase(ZslTestCase, TestCase):
ZSL_TEST_CONFIGURATION = ZslTestConfiguration(
app_name="ExecTaskFromCliTestCase",
config_object=CONFIG,
container=TestCliContainer
)
@inject(zsl_cli=ZslCli)
def testRunningTestTask(self, zsl_cli):
# type:(ZslCli)->None
runner = CliRunner()
result = runner.invoke(zsl_cli.cli, ['task', 'task/zsl/test_task'])
self.assertEqual(0, result.exit_code, "No error is expected.")
self.assertEqual('ok', result.output.strip(), "Valid task output must be shown")
@inject(zsl_cli=ZslCli)
def testRunningTaskWithListInput(self, zsl_cli):
# type:(ZslCli)->None
runner = CliRunner()
result = runner.invoke(zsl_cli.cli, ['task', 'task/zsl/with_request_task', '{"list_of_numbers": [1,2,3] }'])
print(result)
self.assertEqual(0, result.exit_code, "No error is expected.")
self.assertEqual('[1, 2, 3]', result.output.strip(), "Valid task output must be shown")
| mit | Python |
a8a0dd55a5289825aae34aa45765ea328811523e | Exclude Fast test for Python 2 | bees4ever/spotpy,bees4ever/spotpy,thouska/spotpy,thouska/spotpy,bees4ever/spotpy,thouska/spotpy | spotpy/unittests/test_fast.py | spotpy/unittests/test_fast.py | import unittest
try:
import spotpy
except ImportError:
import sys
sys.path.append(".")
import spotpy
from spotpy.examples.spot_setup_hymod_python import spot_setup
# Test only untder Python 3 as Python >2.7.10 results in a strange fft error
if sys.version_info >= (3, 5):
class TestFast(unittest.TestCase):
def setUp(self):
self.spot_setup = spot_setup()
self.rep = 200 # REP must be a multiply of amount of parameters which are in 7 if using hymod
self.timeout = 10 # Given in Seconds
def test_fast(self):
sampler = spotpy.algorithms.fast(self.spot_setup, parallel="seq", dbname='test_FAST', dbformat="ram",
sim_timeout=self.timeout)
results = []
sampler.sample(self.rep)
results = sampler.getdata()
self.assertEqual(203,len(results))
if __name__ == '__main__':
unittest.main()
| import unittest
try:
import spotpy
except ImportError:
import sys
sys.path.append(".")
import spotpy
from spotpy.examples.spot_setup_hymod_python import spot_setup
class TestFast(unittest.TestCase):
def setUp(self):
self.spot_setup = spot_setup()
self.rep = 200 # REP must be a multiply of amount of parameters which are in 7 if using hymod
self.timeout = 10 # Given in Seconds
def test_fast(self):
sampler = spotpy.algorithms.fast(self.spot_setup, parallel="seq", dbname='test_FAST', dbformat="ram",
sim_timeout=self.timeout)
results = []
sampler.sample(self.rep)
results = sampler.getdata()
self.assertEqual(203,len(results))
if __name__ == '__main__':
unittest.main()
| mit | Python |
f3e6a8099c7eb8104371a936fba05804e0e76572 | Fix hostnames with a dash getting rejected | maphy-psd/python-webuntis,untitaker/python-webuntis | webuntis/utils/userinput.py | webuntis/utils/userinput.py | '''
This file is part of python-webuntis
:copyright: (c) 2013 by Markus Unterwaditzer.
:license: BSD, see LICENSE for more details.
'''
import re
from .logger import log
from .third_party import urlparse
def server(url):
if not re.match(r'^http(s?)\:\/\/', url): # if we just have the hostname
log('debug', 'The URL given doesn\'t seem to be a valid URL, just '
'gonna prepend "https://"')
# append the http prefix and hope for the best
url = 'https://' + url
urlobj = urlparse.urlparse(url)
if not urlobj.scheme or not urlobj.netloc:
# urlparse failed
raise ValueError('Not a valid URL or hostname')
if not re.match(r'^[a-zA-Z0-9\.\:\-_]+$', urlobj.netloc):
# That's not even a valid hostname
raise ValueError('Not a valid hostname')
if urlobj.path == '/':
log('warning', 'You specified that the API endpoint should be "/".'
'That is uncommon. If you didn\'t mean to do so,'
'remove the slash at the end of your "server"'
'parameter.')
return urlobj.scheme + \
u'://' + \
urlobj.netloc + \
(urlobj.path or u'/WebUntis/jsonrpc.do')
def string(value):
'''Make the string unicode'''
if isinstance(value, unicode_string):
return value
return value.decode('ascii')
config_keys = {
'username': string,
'password': string,
'jsessionid': string,
'school': string,
'server': server,
'useragent': string,
'login_repeat': int,
'_http_session': None
}
try:
unicode_string = unicode
bytestring = str
except NameError:
unicode_string = str
bytestring = bytes
| '''
This file is part of python-webuntis
:copyright: (c) 2013 by Markus Unterwaditzer.
:license: BSD, see LICENSE for more details.
'''
import re
from .logger import log
from .third_party import urlparse
def server(url):
if not re.match(r'^http(s?)\:\/\/', url): # if we just have the hostname
log('debug', 'The URL given doesn\'t seem to be a valid URL, just '
'gonna prepend "https://"')
# append the http prefix and hope for the best
url = 'https://' + url
urlobj = urlparse.urlparse(url)
if not urlobj.scheme or not urlobj.netloc:
# urlparse failed
raise ValueError('Not a valid URL or hostname')
if not re.match(r'^[a-zA-Z0-9\.\:-_]+$', urlobj.netloc):
# That's not even a valid hostname
raise ValueError('Not a valid hostname')
if urlobj.path == '/':
log('warning', 'You specified that the API endpoint should be "/".'
'That is uncommon. If you didn\'t mean to do so,'
'remove the slash at the end of your "server"'
'parameter.')
return urlobj.scheme + \
u'://' + \
urlobj.netloc + \
(urlobj.path or u'/WebUntis/jsonrpc.do')
def string(value):
'''Make the string unicode'''
if isinstance(value, unicode_string):
return value
return value.decode('ascii')
config_keys = {
'username': string,
'password': string,
'jsessionid': string,
'school': string,
'server': server,
'useragent': string,
'login_repeat': int,
'_http_session': None
}
try:
unicode_string = unicode
bytestring = str
except NameError:
unicode_string = str
bytestring = bytes
| bsd-3-clause | Python |
e1855bb410275c6ba5b0f04934ea21c615de38dc | fix for last commit | MeirKriheli/Open-Knesset,OriHoch/Open-Knesset,daonb/Open-Knesset,Shrulik/Open-Knesset,habeanf/Open-Knesset,Shrulik/Open-Knesset,OriHoch/Open-Knesset,daonb/Open-Knesset,navotsil/Open-Knesset,otadmor/Open-Knesset,habeanf/Open-Knesset,DanaOshri/Open-Knesset,OriHoch/Open-Knesset,ofri/Open-Knesset,noamelf/Open-Knesset,MeirKriheli/Open-Knesset,DanaOshri/Open-Knesset,habeanf/Open-Knesset,ofri/Open-Knesset,otadmor/Open-Knesset,alonisser/Open-Knesset,Shrulik/Open-Knesset,OriHoch/Open-Knesset,ofri/Open-Knesset,navotsil/Open-Knesset,DanaOshri/Open-Knesset,noamelf/Open-Knesset,Shrulik/Open-Knesset,daonb/Open-Knesset,jspan/Open-Knesset,otadmor/Open-Knesset,noamelf/Open-Knesset,daonb/Open-Knesset,MeirKriheli/Open-Knesset,navotsil/Open-Knesset,jspan/Open-Knesset,alonisser/Open-Knesset,noamelf/Open-Knesset,ofri/Open-Knesset,habeanf/Open-Knesset,otadmor/Open-Knesset,alonisser/Open-Knesset,navotsil/Open-Knesset,MeirKriheli/Open-Knesset,alonisser/Open-Knesset,jspan/Open-Knesset,jspan/Open-Knesset,DanaOshri/Open-Knesset | committees/api.py | committees/api.py | '''
Api for the committees app
'''
from tastypie.api import Api
from tastypie.constants import ALL
from tastypie.bundle import Bundle
import tastypie.fields as fields
from apis.resources.base import BaseResource
from models import Committee, CommitteeMeeting, ProtocolPart
from mks.api import MemberResource
class CommitteeResource(BaseResource):
''' Committee API
'''
recent_meetings = fields.ListField()
future_meetings = fields.ListField()
class Meta:
queryset = Committee.objects.all()
allowed_methods = ['get']
include_absolute_url = True
def dehydrate_recent_meetings(self, bundle):
return [ { 'url': x.get_absolute_url(),
'title': x.title(),
'date': x.date }
for x in bundle.obj.recent_meetings() ]
def dehydrate_future_meetings(self, bundle):
return [ { 'title': x.what,
'date': x.when }
for x in bundle.obj.future_meetings() ]
class CommitteeMeetingResource(BaseResource):
''' Committee Meeting API
'''
committee = fields.ForeignKey(CommitteeResource, 'committee')
mks_attended = fields.ToManyField(MemberResource, 'mks_attended')
protocol = fields.ToManyField('committees.api.ProtocolPartResource',
'parts', full=True)
class Meta:
queryset = CommitteeMeeting.objects.all().select_related('committee',
'mks_attended',
).prefetch_related('parts')
allowed_methods = ['get']
include_absolute_url = True
list_fields = ['committee','mks_attended','date','topics']
excludes = ['protocol_text']
class ProtocolPartResource(BaseResource):
header = fields.CharField(attribute='header')
body = fields.CharField(attribute='body')
class Meta:
queryset = ProtocolPart.objects.all().order_by('order')
allowed_methods = ['get']
fields = list_fields = ['header','body']
include_resource_uri = False
| '''
Api for the committees app
'''
from tastypie.api import Api
from tastypie.constants import ALL
from tastypie.bundle import Bundle
import tastypie.fields as fields
from apis.resources.base import BaseResource
from models import Committee, CommitteeMeeting, ProtocolPart
from mks.api import MemberResource
class CommitteeResource(BaseResource):
''' Committee API
'''
#recent_meetings = fields.ListField()
future_meetings = fields.ListField()
meetings = fields.ToManyField('committees.api.CommitteeMeetingResource', 'meetings')
class Meta:
queryset = Committee.objects.all()
allowed_methods = ['get']
include_absolute_url = True
def dehydrate_recent_meetings(self, bundle):
return [ { 'url': x.get_absolute_url(),
'title': x.title(),
'date': x.date }
for x in bundle.obj.recent_meetings() ]
def dehydrate_future_meetings(self, bundle):
return [ { 'title': x.what,
'date': x.when }
for x in bundle.obj.future_meetings() ]
class CommitteeMeetingResource(BaseResource):
''' Committee Meeting API
'''
committee = fields.ForeignKey(CommitteeResource, 'committee')
mks_attended = fields.ToManyField(MemberResource, 'mks_attended')
protocol = fields.ToManyField('committees.api.ProtocolPartResource',
'parts', full=True)
class Meta:
queryset = CommitteeMeeting.objects.all().select_related('committee',
'mks_attended',
).prefetch_related('parts')
allowed_methods = ['get']
include_absolute_url = True
list_fields = ['committee','mks_attended','date','topics']
excludes = ['protocol_text']
class ProtocolPartResource(BaseResource):
header = fields.CharField(attribute='header')
body = fields.CharField(attribute='body')
class Meta:
queryset = ProtocolPart.objects.all().order_by('order')
allowed_methods = ['get']
fields = list_fields = ['header','body']
include_resource_uri = False
| bsd-3-clause | Python |
e736482bec7be7871bd4edd270f8c064961c20fc | Update minimum support boto version. | gjtempleton/moto,botify-labs/moto,dbfr3qs/moto,botify-labs/moto,heddle317/moto,braintreeps/moto,spulec/moto,rocky4570/moto,ZuluPro/moto,kennethd/moto,okomestudio/moto,Affirm/moto,alexdebrie/moto,Brett55/moto,riccardomc/moto,2mf/moto,heddle317/moto,rocky4570/moto,dbfr3qs/moto,botify-labs/moto,dbfr3qs/moto,2rs2ts/moto,jszwedko/moto,ImmobilienScout24/moto,okomestudio/moto,Affirm/moto,kefo/moto,gjtempleton/moto,whummer/moto,heddle317/moto,okomestudio/moto,ZuluPro/moto,whummer/moto,william-richard/moto,spulec/moto,jrydberg/moto,william-richard/moto,rouge8/moto,ludia/moto,2rs2ts/moto,okomestudio/moto,botify-labs/moto,behanceops/moto,heddle317/moto,gjtempleton/moto,2rs2ts/moto,jotes/moto,okomestudio/moto,spulec/moto,pior/moto,im-auld/moto,zonk1024/moto,spulec/moto,spulec/moto,kefo/moto,Brett55/moto,silveregg/moto,rocky4570/moto,william-richard/moto,ZuluPro/moto,okomestudio/moto,tootedom/moto,Affirm/moto,Brett55/moto,rocky4570/moto,heddle317/moto,whummer/moto,kefo/moto,william-richard/moto,EarthmanT/moto,dbfr3qs/moto,rocky4570/moto,Brett55/moto,Affirm/moto,dbfr3qs/moto,whummer/moto,ZuluPro/moto,william-richard/moto,gjtempleton/moto,dbfr3qs/moto,araines/moto,rocky4570/moto,william-richard/moto,gjtempleton/moto,botify-labs/moto,IlyaSukhanov/moto,Brett55/moto,whummer/moto,ZuluPro/moto,2rs2ts/moto,kefo/moto,whummer/moto,mrucci/moto,2rs2ts/moto,spulec/moto,botify-labs/moto,kefo/moto,Affirm/moto,Brett55/moto,ZuluPro/moto,Affirm/moto | setup.py | setup.py | #!/usr/bin/env python
from __future__ import unicode_literals
from setuptools import setup, find_packages
install_requires = [
"Jinja2",
"boto>=2.20.0",
"flask",
"httpretty>=0.6.1",
"requests",
"xmltodict",
"six",
"werkzeug",
]
import sys
if sys.version_info < (2, 7):
# No buildint OrderedDict before 2.7
install_requires.append('ordereddict')
setup(
name='moto',
version='0.4.1',
description='A library that allows your python tests to easily'
' mock out the boto library',
author='Steve Pulec',
author_email='spulec@gmail',
url='https://github.com/spulec/moto',
entry_points={
'console_scripts': [
'moto_server = moto.server:main',
],
},
packages=find_packages(exclude=("tests", "tests.*")),
install_requires=install_requires,
license="Apache",
test_suite="tests",
classifiers=[
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"License :: OSI Approved :: Apache Software License",
"Topic :: Software Development :: Testing",
],
)
| #!/usr/bin/env python
from __future__ import unicode_literals
from setuptools import setup, find_packages
install_requires = [
"Jinja2",
"boto",
"flask",
"httpretty>=0.6.1",
"requests",
"xmltodict",
"six",
"werkzeug",
]
import sys
if sys.version_info < (2, 7):
# No buildint OrderedDict before 2.7
install_requires.append('ordereddict')
setup(
name='moto',
version='0.4.1',
description='A library that allows your python tests to easily'
' mock out the boto library',
author='Steve Pulec',
author_email='spulec@gmail',
url='https://github.com/spulec/moto',
entry_points={
'console_scripts': [
'moto_server = moto.server:main',
],
},
packages=find_packages(exclude=("tests", "tests.*")),
install_requires=install_requires,
license="Apache",
test_suite="tests",
classifiers=[
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"License :: OSI Approved :: Apache Software License",
"Topic :: Software Development :: Testing",
],
)
| apache-2.0 | Python |
6d0240fbb329e649210578e7c194a79e1f495267 | copy unprocessed files to key folders | DreadfulDeveloper/src-helpers,DreadfulDeveloper/src-helpers,DreadfulDeveloper/src-helpers | koda/main.py | koda/main.py | import os
import sys
from glob import glob
from mutagen.easyid3 import EasyID3
import shutil
CAMELOT_FOLDERS = ["1A - 1B", "2A - 2B", "3A - 3B", "4A - 4B", "5A - 5B", "6A - 6B", "7A - 7B", "8A - 8B", "9A - 9B", "10A - 10B", "11A - 11B", "12A - 12B"]
def createOutputDirs():
outputdir = "processed"
workdir = os.path.join(os.path.dirname(os.path.realpath(__file__)), outputdir)
for f in CAMELOT_FOLDERS:
if not os.path.exists(os.path.join(workdir, f)):
os.makedirs(os.path.join(workdir, f))
def getDirContents(directory):
"get list of file info objects for files of particular extensions"
workdir = os.path.join(os.path.dirname(os.path.realpath(__file__)), directory)
return [y for x in os.walk(workdir) for y in glob(os.path.join(x[0], '*.mp3'))]
def copyFileToFolder(src, dest):
workdir = os.path.join(os.path.dirname(os.path.realpath(__file__)), "processed", dest)
shutil.copy2(src, workdir)
def copyFilesToKeyFolders():
createOutputDirs()
[copyFileToFolder(f, CAMELOT_FOLDERS[i]) for f in getDirContents("unprocessed") for y in f.split(' - ') for i in range(len(CAMELOT_FOLDERS)) for key in CAMELOT_FOLDERS[i].split(' - ') if key in y]
def main():
copyFilesToKeyFolders()
if __name__== "__main__":
main()
| import os
import sys
from mutagen.easyid3 import EasyID3
def listDirectory(directory):
"get list of file info objects for files of particular extensions"
workdir = os.path.join(os.path.dirname(os.path.realpath(__file__)), directory)
fileList = [os.path.normcase(f) for f in os.listdir(workdir)]
return [EasyID3(os.path.join(workdir, f)) for f in fileList]
def main():
print listDirectory("unprocessed")
if __name__== "__main__":
main()
| mit | Python |
ba83df5099879576db2f074cb2b046bbad3a482c | fix identation issue after auto format | GabiGrin/SublimeInsertTsReference,GabiGrin/SublimeInsertTsReference | insert_ts_reference.py | insert_ts_reference.py | import sublime, sublime_plugin, os.path
class InsertTsReferenceCommand(sublime_plugin.TextCommand):
def run(self, edit):
comment_template = '/// <reference path="$PATH$" />'
view = self.view
references_file_name = sublime.load_settings('InsertTsReference').get('references_file_name', 'references.ts');
if view is not None:
local_references_name = view.settings().get('InsertTsReference', {}).get('references_file_name')
if local_references_name is not None: references_file_name = local_references_name
max_attempts = 50
file_folder = os.path.realpath(view.file_name() + '/..')
root_folders = view.window().folders()
attempts = 0
current_folder = file_folder
while attempts < max_attempts:
# print('Trying ', current_folder)
if os.path.isfile(current_folder + '/' + references_file_name):
# print('Found', file_folder, i)
file_folder = os.path.relpath(current_folder, file_folder) + '/' if current_folder != file_folder else '';
view.insert(edit, 0, comment_template.replace('$PATH$', file_folder + references_file_name) + '\n\n');
break;
else:
current_folder = current_folder + '/..'
if os.path.realpath(current_folder) in root_folders:
sublime.error_message('unable to find references file named "' + references_file_name + '" while traversing this files directory tree!')
break;
attempts = attempts + 1
if attempts == max_attempts:
sublime.error_message('unable to find references file named "' + references_file_name + '" while traversing this files directory tree!')
#allcontent = sublime.Region(0, self.view.size())
#self.view.replace(edit, allcontent, self.view.file_name() + self.view.scope_name(0))
| import sublime, sublime_plugin, os.path
class InsertTsReferenceCommand(sublime_plugin.TextCommand):
def run(self, edit):
comment_template = '/// <reference path="$PATH$" />'
view = self.view
references_file_name = sublime.load_settings('InsertTsReference').get('references_file_name', 'references.ts');
if view is not None:
local_references_name = view.settings().get('InsertTsReference', {}).get('references_file_name')
if local_references_name is not None: references_file_name = local_references_name
max_attempts = 50
file_folder = os.path.realpath(view.file_name() + '/..')
root_folders = view.window().folders()
attemps = 0
current_folder = file_folder
while attemps < max_attempts:
# print('Trying ', current_folder)
if os.path.isfile(current_folder + '/' + references_file_name):
# print('Found', file_folder, i)
file_folder = os.path.relpath(current_folder, file_folder) + '/' if current_folder != file_folder else '';
view.insert(edit, 0, comment_template.replace('$PATH$', file_folder + references_file_name) + '\n\n');
break;
else:
current_folder = current_folder + '/..'
if os.path.realpath(current_folder) in root_folders:
sublime.error_message('unable to find references file named "' + references_file_name + '" while traversing this files directory tree!')
break;
attemps = attemps + 1
if attemps == max_attempts:
sublime.error_message('unable to find references file named "' + references_file_name + '" while traversing this files directory tree!')
#allcontent = sublime.Region(0, self.view.size())
#self.view.replace(edit, allcontent, self.view.file_name() + self.view.scope_name(0))
| mit | Python |
a6a74ceadf855bdb47fd4962f955af73017c7e26 | Add docstring for writers API | boltzj/movies-in-sf | app/api/writers.py | app/api/writers.py | # Core
from app.api import api
from flask import json, abort
from flask.ext.cors import cross_origin
from urllib import parse
# Models
from app.models.writer import Writer
@cross_origin()
@api.route('/writers', methods=['GET'])
def get_writer_names():
"""
Return all writer names existing in the database
:return: JSON with all writer names
"""
# Get all movies from DB
writers = Writer.query.all()
# Store writes names in an array
writers_names = []
for writer in writers:
writers_names.append(writer.name)
# return writers names in a JSON array
return json.JSONEncoder.encode(json.JSONEncoder(), writers_names)
@cross_origin()
@api.route('/writers/<name>', methods=['GET'])
def get_writer(name):
"""
Return information about the writer
:param name of the writer (URL encoded)
:return: JSON with writer information
"""
# Get the writer in the Database
writer = Writer.query.filter(Writer.name == parse.unquote(name)).first()
# If the writer doesn't exist error 404
if not writer:
return abort(404)
writer_info = {
'name': writer.name
}
# return writer information in a JSON array
return json.JSONEncoder.encode(json.JSONEncoder(), writer_info)
@cross_origin()
@api.route('/writers/<name>/movies', methods=['GET'])
def get_writer_movies(name):
"""
Return the list all writer's movies
:param name of the writer (URL encoded)
:return: JSON with movies information
"""
# Get the writer in the Database
writer = Writer.query.filter(Writer.name == parse.unquote(name)).first()
# If the writer doesn't exist error 404
if not writer:
return abort(404)
# Store writer's movies in an array
movies = []
for movie in writer.movies:
movies.append({
'title': movie.title,
'year': movie.release_year,
})
# return movies in a JSON array
return json.JSONEncoder.encode(json.JSONEncoder(), movies)
@cross_origin()
@api.route('/writers/<name>/locations', methods=['GET'])
def get_writer_locations(name):
"""
Return the list of all locations linked to a writer
:param name of the writer (URL encoded)
:return: JSON with locations
"""
# Get the writer in the Database
writer = Writer.query.filter(Writer.name == parse.unquote(name)).first()
# If the writer doesn't exist error 404
if not writer:
return abort(404)
# Store the locations in an array
locations = []
for movie in writer.movies:
for location in movie.locations:
locations.append({
'title': movie.title,
'content': location.fun_facts,
'location': location.name,
'lat': location.latitude,
'lng': location.longitude
})
# return locations in a JSON array
return json.JSONEncoder.encode(json.JSONEncoder(), locations)
| # Core
from app.api import api
from flask import json, abort
from flask.ext.cors import cross_origin
from urllib import parse
# Models
from app.models.writer import Writer
@cross_origin()
@api.route('/writers', methods=['GET'])
def get_writers_names():
"""
:return: Return a list of all writers names
"""
# Get all movies from DB
writers = Writer.query.all()
# Store writes names in an array
writers_names = []
for writer in writers:
writers_names.append(writer.name)
# return writers names in a JSON array
return json.JSONEncoder.encode(json.JSONEncoder(), writers_names)
@cross_origin()
@api.route('/writers/<name>', methods=['GET'])
def get_writer(name):
# Get the writer in the Database
writer = Writer.query.filter(Writer.name == parse.unquote(name)).first()
# If the writer doesn't exist error 404
if not writer:
return abort(404)
writer_info = {
'name': writer.name
}
# return writer information in a JSON array
return json.JSONEncoder.encode(json.JSONEncoder(), writer_info)
@cross_origin()
@api.route('/writers/<name>/movies', methods=['GET'])
def get_writer_movies(name):
# Get the writer in the Database
writer = Writer.query.filter(Writer.name == parse.unquote(name)).first()
# If the writer doesn't exist error 404
if not writer:
return abort(404)
# Store writer's movies in an array
movies = []
for movie in writer.movies:
movies.append({
'title': movie.title,
'year': movie.release_year,
})
# return movies in a JSON array
return json.JSONEncoder.encode(json.JSONEncoder(), movies)
@cross_origin()
@api.route('/writers/<name>/locations', methods=['GET'])
def get_writer_locations(name):
# Get the writer in the Database
writer = Writer.query.filter(Writer.name == parse.unquote(name)).first()
# If the writer doesn't exist error 404
if not writer:
return abort(404)
# Store the locations in an array
locations = []
for movie in writer.movies:
for location in movie.locations:
locations.append({
'title': movie.title,
'content': location.fun_facts,
'location': location.name,
'lat': location.latitude,
'lng': location.longitude
})
# return locations in a JSON array
return json.JSONEncoder.encode(json.JSONEncoder(), locations)
| mit | Python |
522b197500bffb748dc2daf3bf0ea448b3094af7 | Add another missing heroku config variable | paulocheque/guides-cms,pluralsight/guides-cms,paulocheque/guides-cms,pluralsight/guides-cms,pluralsight/guides-cms,paulocheque/guides-cms | example_config.py | example_config.py | """
File to easily switch between configurations between production and
development, etc.
"""
import os
# You must set each of these in your heroku environment with the heroku
# config:set command. See README.md for more information.
HEROKU_ENV_REQUIREMENTS = ('HEROKU', 'SECRET_KEY', 'GITHUB_CLIENT_ID',
'GITHUB_SECRET', 'DATABASE_URL',
'SQLALCHEMY_DATABASE_URI')
class Config(object):
DEBUG = False
CSRF_ENABLED = True
GITHUB_CLIENT_ID = 'replace-me'
GITHUB_SECRET = 'replace-me'
HEROKU = False
SECRET_KEY = 'not-a-good-value'
# This should automatically be set by heroku if you've added a database to
# your app.
SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']
class DevelopmentConfig(Config):
DEBUG = True
| """
File to easily switch between configurations between production and
development, etc.
"""
import os
# You must set each of these in your heroku environment with the heroku
# config:set command. See README.md for more information.
HEROKU_ENV_REQUIREMENTS = ('HEROKU', 'SECRET_KEY', 'GITHUB_CLIENT_ID',
'GITHUB_SECRET', 'DATABASE_URL')
class Config(object):
DEBUG = False
CSRF_ENABLED = True
GITHUB_CLIENT_ID = 'replace-me'
GITHUB_SECRET = 'replace-me'
HEROKU = False
SECRET_KEY = 'not-a-good-value'
# This should automatically be set by heroku if you've added a database to
# your app.
SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']
class DevelopmentConfig(Config):
DEBUG = True
| agpl-3.0 | Python |
52841517a575ba7df354b809cb23bc2851c9fcd4 | Remove carriage returns from the output | jdgwartney/boundary-plugin-shell,boundary/boundary-plugin-shell,boundary/boundary-plugin-shell,jdgwartney/boundary-plugin-shell | exec_proc.py | exec_proc.py | #!/usr/bin/env python
# Copyright 2014 Boundary, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from subprocess import Popen,PIPE
import shlex
import logging
class ExecProc:
def __init__(self):
self.command = None
self.debug = False
def setDebug(self,debug):
self.debug = debug
def setCommand(self,command):
if type(command) != str:
raise ValueError
self.command = command
def execute(self):
if self.command == None:
raise ValueError
# Remove Carriage Returns
args = shlex.split(self.command)
if self.debug == True:
logging.info("command=\"%s\"",args)
p = Popen(args,stdout=PIPE)
o,e = p.communicate()
o = o.strip('\r')
if self.debug == True:
logging.info("output=\"%s\"",o)
logging.info(':'.join(x.encode('hex') for x in o))
return o
| #!/usr/bin/env python
# Copyright 2014 Boundary, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from subprocess import Popen,PIPE
import shlex
import logging
class ExecProc:
def __init__(self):
self.command = None
self.debug = False
def setDebug(self,debug):
self.debug = debug
def setCommand(self,command):
if type(command) != str:
raise ValueError
self.command = command
def execute(self):
if self.command == None:
raise ValueError
# Remove Carriage Returns
command = self.command.strip('\r')
args = shlex.split(command)
if self.debug == True:
logging.info("command=\"%s\"",args)
p = Popen(args,stdout=PIPE)
o,e = p.communicate()
if self.debug == True:
logging.info("output=\"%s\"",o)
logging.info(':'.join(x.encode('hex') for x in o))
return o
| apache-2.0 | Python |
cb31b795a816acd3b52952649a3fd628bd70ae10 | Update runner | kiyomaro927/stdbt | source/build.py | source/build.py | #!/usr/bin/env python
from utils import stream
from utils import preprocessor
from utils import vader
from utils import select
from utils import sort
class DBBuilder:
def __init__(self, config_path):
self.stream_api = stream.StreamAPI(config_path)
self.tweet_stream = self.stream_api.stream()
self.sorter = sort.TweetSorter()
self.preprocessor = preprocessor.Preprocessor()
self.selector = select.AttributeSelector()
self.vader = vader.SentimentAnalyzer()
self.db = []
def set_limit(self, limit):
self.limit = limit
def stream(self):
tweets = self.stream_api.get_tweets(self.tweet_stream)
sorted_tweets = self.sorter(tweets)
selected_tweets = [self.selector(item) for item in sorted_tweets]
return selected_tweets
if __name__ == '__main__':
dbbuilder = DBBuilder('config/twitter_api_config.json')
for item in dbbuilder.stream():
print item
print "---"
| #!/usr/bin/env python
| mit | Python |
21fad559f02c3eee41de736636cefc97027184dc | Fix type hint for display_name in common | andrewlin16/duckbot,andrewlin16/duckbot | app/common.py | app/common.py | import random
from discord.ext.commands import Context
rand = random.SystemRandom()
def display_name(ctx: Context) -> str:
return ctx.message.author.nick or ctx.message.author.name
| import random
import string
from discord.ext.commands import Context
rand = random.SystemRandom()
def display_name(ctx: Context) -> string:
return ctx.message.author.nick or ctx.message.author.name
| mit | Python |
f6f6ff80a1ac052eb327d8d6dd237c288ad30b1d | Tag v0.2.0 | macbre/index-digest,macbre/index-digest | indexdigest/__init__.py | indexdigest/__init__.py | """
index_digest Python module
"""
VERSION = '0.2.0'
| """
index_digest Python module
"""
VERSION = '0.1.0'
| mit | Python |
a0cfa7b79b97f02d90f23dd612e57491765ab1cf | Fix #386 (too many file handlers) | matthiask/south,matthiask/south | south/logger.py | south/logger.py | import sys
import logging
from django.conf import settings
# Create a dummy handler to use for now.
class NullHandler(logging.Handler):
def emit(self, record):
pass
_logger = logging.getLogger("south")
_logger.addHandler(NullHandler())
_logger.setLevel(logging.DEBUG)
def get_logger():
"Attach a file handler to the logger if there isn't one already."
debug_on = getattr(settings, "SOUTH_LOGGING_ON", False)
logging_file = getattr(settings, "SOUTH_LOGGING_FILE", False)
if debug_on:
if logging_file and len(_logger.handlers) < 2:
_logger.addHandler(logging.FileHandler(logging_file))
_logger.setLevel(logging.DEBUG)
else:
raise IOError, "SOUTH_LOGGING_ON is True. You also need a SOUTH_LOGGING_FILE setting."
return _logger
def close_logger():
"Closes the logger handler for the file, so we can remove the file after a test."
for handler in _logger.handlers:
_logger.removeHandler(handler)
if isinstance(handler, logging.FileHandler):
handler.close() | import sys
import logging
from django.conf import settings
class NullHandler(logging.Handler):
def emit(self, record):
pass
h = NullHandler()
_logger = logging.getLogger("south")
_logger.addHandler(h)
_logger.setLevel(logging.DEBUG)
# TODO: Add a log formatter?
def get_logger():
debug_on = getattr(settings, "SOUTH_LOGGING_ON", False)
logging_file = getattr(settings, "SOUTH_LOGGING_FILE", False)
if debug_on:
if logging_file:
_logger.addHandler( logging.FileHandler(logging_file) )
_logger.setLevel(logging.DEBUG)
else:
raise IOError, "SOUTH_LOGGING_ON is True. You also need a SOUTH_LOGGING_FILE setting."
return _logger
def close_logger():
"Closes the logger handler for the file, so we can remove the file after a test."
for handler in _logger.handlers:
_logger.removeHandler(handler)
if isinstance(handler, logging.FileHandler):
handler.close() | apache-2.0 | Python |
69fc5b4608ea6da566dac7aba87312fe9c27c717 | Update saliency_interpreter.py (#4286) | allenai/allennlp,allenai/allennlp,allenai/allennlp,allenai/allennlp | allennlp/interpret/saliency_interpreters/saliency_interpreter.py | allennlp/interpret/saliency_interpreters/saliency_interpreter.py | from allennlp.common import Registrable
from allennlp.common.util import JsonDict
from allennlp.predictors import Predictor
class SaliencyInterpreter(Registrable):
"""
A `SaliencyInterpreter` interprets an AllenNLP Predictor's outputs by assigning a saliency
score to each input token.
"""
def __init__(self, predictor: Predictor) -> None:
self.predictor = predictor
def saliency_interpret_from_json(self, inputs: JsonDict) -> JsonDict:
"""
This function finds saliency values for each input token.
# Parameters
inputs : `JsonDict`
The input you want to interpret (the same as the argument to a Predictor, e.g., predict_json()).
# Returns
interpretation : `JsonDict`
Contains the normalized saliency values for each input token. The dict has entries for
each instance in the inputs JsonDict, e.g., `{instance_1: ..., instance_2:, ... }`.
Each one of those entries has entries for the saliency of the inputs, e.g.,
`{grad_input_1: ..., grad_input_2: ... }`.
"""
raise NotImplementedError("Implement this for saliency interpretations")
| from allennlp.common import Registrable
from allennlp.common.util import JsonDict
from allennlp.predictors import Predictor
class SaliencyInterpreter(Registrable):
"""
A `SaliencyInterpreter` interprets an AllenNLP Predictor's outputs by assigning a saliency
score to each input token.
"""
def __init__(self, predictor: Predictor) -> None:
self.predictor = predictor
def saliency_interpret_from_json(self, inputs: JsonDict) -> JsonDict:
"""
This function finds a modification to the input text that would change the model's
prediction in some desired manner (e.g., an adversarial attack).
# Parameters
inputs : `JsonDict`
The input you want to interpret (the same as the argument to a Predictor, e.g., predict_json()).
# Returns
interpretation : `JsonDict`
Contains the normalized saliency values for each input token. The dict has entries for
each instance in the inputs JsonDict, e.g., `{instance_1: ..., instance_2:, ... }`.
Each one of those entries has entries for the saliency of the inputs, e.g.,
`{grad_input_1: ..., grad_input_2: ... }`.
"""
raise NotImplementedError("Implement this for saliency interpretations")
| apache-2.0 | Python |
5ab0c1c1323b2b12a19ef58de4c03236db84644d | Remove old views and replace with new PasswordResetConfirmView | haematologic/cellcounter,haematologic/cellcounter,cellcounter/cellcounter,cellcounter/cellcounter,cellcounter/cellcounter,haematologic/cellcounter,cellcounter/cellcounter | cellcounter/accounts/urls.py | cellcounter/accounts/urls.py | from django.conf.urls import patterns, url
from cellcounter.accounts import views
urlpatterns = patterns('',
url('^new/$', views.RegistrationView.as_view(), name='register'),
url('^(?P<pk>[0-9]+)/$', views.UserDetailView.as_view(), name='user-detail'),
url('^(?P<pk>[0-9]+)/delete/$', views.UserDeleteView.as_view(), name='user-delete'),
url('^(?P<pk>[0-9]+)/edit/$', views.UserUpdateView.as_view(), name='user-update'),
url('^password/reset/$', views.PasswordResetView.as_view(),
name='password-reset'),
url('^password/reset/confirm/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[\d\w\-]+)/$',
views.PasswordResetConfirmView.as_view(),
name='password-reset-confirm'),
url('^password/change/$', views.PasswordChangeView.as_view(), name='change-password'),
) | from django.conf.urls import patterns, url
from cellcounter.accounts import views
urlpatterns = patterns('',
url('^new/$', views.RegistrationView.as_view(), name='register'),
url('^(?P<pk>[0-9]+)/$', views.UserDetailView.as_view(), name='user-detail'),
url('^(?P<pk>[0-9]+)/delete/$', views.UserDeleteView.as_view(), name='user-delete'),
url('^(?P<pk>[0-9]+)/edit/$', views.UserUpdateView.as_view(), name='user-update'),
url('^password/reset/$', views.PasswordResetView.as_view(),
name='password-reset'),
url('^password/reset/done/$', views.password_reset_done, name='password-reset-done'),
url('^password/reset/confirm/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[\d\w\-]+)/$',
'django.contrib.auth.views.password_reset_confirm', {
'template_name': 'accounts/reset_confirm.html',
'post_reset_redirect': 'password-reset-done',
},
name='password-reset-confirm'),
url('^password/change/$', views.PasswordChangeView.as_view(), name='change-password'),
) | mit | Python |
d913afd26ca817efe6b0630f9d5fdb49a0cc4284 | Update utils.py | praekelt/molo-gem,praekelt/molo-gem,praekelt/molo-gem | gem/utils.py | gem/utils.py | from django.utils.http import urlencode
from django.conf import settings
def provider_logout_url(request):
"""
This function is used to construct a logout URL that can be used to
log the user out of
the Identity Provider (Authentication Service).
:param request:
:return:
"""
site = request.site
if not hasattr(site, "oidcsettings"):
raise RuntimeError("Site {} has no settings configured.".format(site))
parameters = {
"post_logout_redirect_uri": site.oidcsettings.wagtail_redirect_url
}
# The OIDC_STORE_ID_TOKEN setting must be set to true if we want
# to be able to read
# it from the session.
if "oidc_id_token" in request.session:
parameters["id_token_hint"] = request.session["oidc_id_token"]
redirect_url = settings.OIDC_OP_LOGOUT_URL + "?" + urlencode(
parameters, doseq=True)
return redirect_url
| from django.utils.http import urlencode
from django.conf import settings
def provider_logout_url(request):
"""
This function is used to construct a logout URL that can be used to
log the user out of
the Identity Provider (Authentication Service).
:param request:
:return:
"""
site = request.site
if not hasattr(site, "oidcsettings"):
raise RuntimeError("Site {} has no settings configured.".format(site))
parameters = {
"post_logout_redirect_uri": site.oidc_settings.wagtail_redirect_url
}
# The OIDC_STORE_ID_TOKEN setting must be set to true if we want
# to be able to read
# it from the session.
if "oidc_id_token" in request.session:
parameters["id_token_hint"] = request.session["oidc_id_token"]
redirect_url = settings.OIDC_OP_LOGOUT_URL + "?" + urlencode(
parameters, doseq=True)
return redirect_url
| bsd-2-clause | Python |
f0acf5023db56e8011a6872f230514a69ec9f311 | Use new MainApplication Tk class. | adambiser/snes-wolf3d-extractor | extractor.py | extractor.py | import extractor.ui.mainapplication as ui
import Tkinter as tk
def main():
## root = tk.Tk()
## root.title('SNES Wolfenstein 3D Extractor')
## root.minsize(400, 100)
## ui.MainApplication(root).pack(side="top", fill="both", expand=True)
## root.mainloop()
ui.MainApplication().mainloop()
main()
| import extractor.ui.mainapplication as ui
import Tkinter as tk
def main():
root = tk.Tk()
root.title('SNES Wolfenstein 3D Extractor')
root.minsize(400, 100)
ui.MainApplication(root).pack(side="top", fill="both", expand=True)
root.mainloop()
main()
| mit | Python |
cc7cd9accce7aa53f1bff9416c341a01f615dd35 | Remove CSV from supported formats | jqnatividad/ckanext-officedocs,jqnatividad/ckanext-officedocs,jqnatividad/ckanext-officedocs | ckanext/officedocs/plugin.py | ckanext/officedocs/plugin.py | import ckan.lib.helpers as h
import ckan.plugins as p
import ckan.plugins.toolkit as tk
from six.moves.urllib.parse import quote_plus
class OfficeDocsPlugin(p.SingletonPlugin):
p.implements(p.IConfigurer)
p.implements(p.IResourceView)
def update_config(self, config_):
tk.add_template_directory(config_, "templates")
tk.add_public_directory(config_, "public")
tk.add_resource("fanstatic", "officedocs")
def info(self):
return {
"name": "officedocs_view",
"title": tk._("Office Previewer"),
"default_title": tk._("Preview"),
"icon": "compass",
"always_available": True,
"iframed": False,
}
def setup_template_variables(self, context, data_dict):
url = quote_plus(data_dict["resource"]["url"])
return {
"resource_url": url
}
def can_view(self, data_dict):
supported_formats = [
"DOC", "DOCX", "XLS",
"XLSX", "XLSB", "PPT", "PPTX",
"PPS", "PPSX", "ODT", "ODS", "ODP"
]
try:
res = data_dict["resource"].get("format", "").upper()
return res in supported_formats
except:
return False
def view_template(self, context, data_dict):
return "officedocs/preview.html"
def form_template(self, context, data_dict):
return "officedocs/form.html"
| import ckan.lib.helpers as h
import ckan.plugins as p
import ckan.plugins.toolkit as tk
from six.moves.urllib.parse import quote_plus
class OfficeDocsPlugin(p.SingletonPlugin):
p.implements(p.IConfigurer)
p.implements(p.IResourceView)
def update_config(self, config_):
tk.add_template_directory(config_, "templates")
tk.add_public_directory(config_, "public")
tk.add_resource("fanstatic", "officedocs")
def info(self):
return {
"name": "officedocs_view",
"title": tk._("Office Previewer"),
"default_title": tk._("Preview"),
"icon": "compass",
"always_available": True,
"iframed": False,
}
def setup_template_variables(self, context, data_dict):
url = quote_plus(data_dict["resource"]["url"])
return {
"resource_url": url
}
def can_view(self, data_dict):
supported_formats = [
"DOC", "DOCX", "XLS", "CSV",
"XLSX", "XLSB", "PPT", "PPTX",
"PPS", "PPSX", "ODT", "ODS", "ODP"
]
try:
res = data_dict["resource"].get("format", "").upper()
return res in supported_formats
except:
return False
def view_template(self, context, data_dict):
return "officedocs/preview.html"
def form_template(self, context, data_dict):
return "officedocs/form.html"
| agpl-3.0 | Python |
aed4c22faff1f4b2db13c6717ca8fe97db9448ac | Fix spec_version generation | ensonic/ev3dev-lang-python-1,rhempel/ev3dev-lang-python,ensonic/ev3dev-lang-python,dwalton76/ev3dev-lang-python,ddemidov/ev3dev-lang-python-1,dwalton76/ev3dev-lang-python | spec_version.py | spec_version.py | #~autogen spec_version
spec_version = "spec: 0.9.3-pre-r2, kernel: v3.16.7-ckt16-7-ev3dev-ev3"
#~autogen
| #~autogen ../python/templates/python_spec_version
spec_version = "spec: 0.9.3-pre-r1, kernel: v3.16.7-ckt10-4-ev3dev-ev3"
#~autogen
| mit | Python |
fde18f54087dcf79b8a1ff0c36cc6214ce366386 | Update P04_updateProduce fixed AttributeError by changing depreciated method | JoseALermaIII/python-tutorials,JoseALermaIII/python-tutorials | books/AutomateTheBoringStuffWithPython/Chapter12/P04_updateProduce.py | books/AutomateTheBoringStuffWithPython/Chapter12/P04_updateProduce.py | #! python3
# P04_updateProduce.py - Corrects costs in produce sales spreadsheet.
#
# Note:
# - The produceSales.xlsx workbook can be downloaded from
# https://nostarch.com/automatestuff/
import openpyxl
wb = openpyxl.load_workbook("produceSales.xlsx")
sheet = wb.get_sheet_by_name("Sheet")
# The produce types and their updated prices
PRICE_UPDATES = {"Garlic": 3.07,
"Celery": 1.19,
"Lemon": 1.27}
# Loop through the rows and update the prices
for rowNum in range(2, sheet.max_row): # skip the first row
produceName = sheet.cell(row=rowNum, column=1).value
if produceName in PRICE_UPDATES:
sheet.cell(row=rowNum, column=2).value = PRICE_UPDATES[produceName]
wb.save("updatedProduceSales.xlsx")
| #! python3
# P04_updateProduce.py - Corrects costs in produce sales spreadsheet.
#
# Note:
# - The produceSales.xlsx workbook can be downloaded from
# https://nostarch.com/automatestuff/
import openpyxl
wb = openpyxl.load_workbook("produceSales.xlsx")
sheet = wb.get_sheet_by_name("Sheet")
# The produce types and their updated prices
PRICE_UPDATES = {"Garlic": 3.07,
"Celery": 1.19,
"Lemon": 1.27}
# Loop through the rows and update the prices
for rowNum in range(2, sheet.get_highest_row()): # skip the first row
produceName = sheet.cell(row=rowNum, column=1).value
if produceName in PRICE_UPDATES:
sheet.cell(row=rowNum, column=2).value = PRICE_UPDATES[produceName]
wb.save("updatedProduceSales.xlsx")
| mit | Python |
4f6640ad2c33f0db3f2c6d40aa9a07d28710bda6 | fix channel name | openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro,openhumanoids/oh-distro | software/control/fall_detect_demo/fall_detector.py | software/control/fall_detect_demo/fall_detector.py | from __future__ import division
import lcm
import os
import sys
sys.path.append(os.path.expanduser('~/drc/software/build/lib/python2.7/dist-packages'))
import drc
m = lcm.LCM()
class FallDetector:
def __init__(self):
self.decay_rate = 0.2
self.zmp_error = 0
self.last_t = None
def handle(self, channel, data):
msg = drc.controller_zmp_status_t.decode(data)
t = msg.utime / 1e9
if self.last_t is None:
self.last_t = t
else:
self.zmp_error = max(0, self.zmp_error + (t - self.last_t) * (int(not msg.zmp_ok) - self.decay_rate))
self.last_t = t
# print "{:.2f} {:b} {:.3f}".format(t, msg.zmp_ok, self.zmp_error)
if self.zmp_error > 0.1:
print "WARNING: FALL DETECTED"
f = FallDetector()
m.subscribe("CONTROLLER_ZMP_STATUS", f.handle)
while True:
m.handle()
| from __future__ import division
import lcm
import os
import sys
sys.path.append(os.path.expanduser('~/drc/software/build/lib/python2.7/dist-packages'))
import drc
m = lcm.LCM()
class FallDetector:
def __init__(self):
self.decay_rate = 0.2
self.zmp_error = 0
self.last_t = None
def handle(self, channel, data):
msg = drc.controller_zmp_status_t.decode(data)
t = msg.utime / 1e9
if self.last_t is None:
self.last_t = t
else:
self.zmp_error = max(0, self.zmp_error + (t - self.last_t) * (int(not msg.zmp_ok) - self.decay_rate))
self.last_t = t
# print "{:.2f} {:b} {:.3f}".format(t, msg.zmp_ok, self.zmp_error)
if self.zmp_error > 0.1:
print "WARNING: FALL DETECTED"
f = FallDetector()
m.subscribe("ZMP_STATUS", f.handle)
while True:
m.handle()
| bsd-3-clause | Python |
31071fb6ca4e894dc42aefe7218c722f4f0c0e26 | Update file. | satnet-project/propagators,satnet-project/propagators,satnet-project/propagators,satnet-project/propagators,satnet-project/propagators | get_names.py | get_names.py |
class Get_names:
def __init__(self):
import os
import sys
directorio_actual = os.getcwd()
print directorio_actual
from time import sleep
sleep(5)
os.chdir(directorio_actual + '/TLEs')
print os.getcwd()
sleep(5)
print sys.argv[1]
sleep(5)
abrir_tle = open(sys.argv[1], 'r')
lista_nombres_satelites = abrir_tle.readlines()
lista_nombres_satelites = [item.rstrip('\n') for item in lista_nombres_satelites]
os.chdir(directorio_actual)
tamano_lista = len(lista_nombres_satelites)
y = tamano_lista/3
numeros_lista = map(self.devuelve_lista, range(y))
lista_satelites = []
i = 0
for i in range(len(numeros_lista)):
lista_satelites.append(lista_nombres_satelites[numeros_lista[i]])
self.save_list(lista_satelites)
def devuelve_lista(self, x):
return 3*x
def save_list(self, lista):
import os
directorio_script = os.getcwd()
# PyEphem
os.chdir(directorio_script + '/results/PyEphem')
create_file_pyephem = open('temp', 'w')
create_file_pyephem.writelines(["%s\n" % item for item in lista])
# predict
os.chdir(directorio_script)
os.chdir(directorio_script + '/results/predict')
create_file_predict = open('temp', 'w')
create_file_predict.writelines(["%s\n" % item for item in lista])
# pyorbital
os.chdir(directorio_script)
os.chdir(directorio_script + '/results/PyOrbital')
create_file_pyorbital = open('temp', 'w')
create_file_pyorbital.writelines(["%s\n" % item for item in lista])
# Orbitron
os.chdir(directorio_script)
os.chdir(directorio_script + '/results/Orbitron')
create_file_orbitron = open('temp', 'w')
create_file_orbitron.writelines(["%s\n" % item for item in lista])
os.chdir(directorio_script)
if __name__ == '__main__':
get_name = Get_names()
|
class Get_names:
def __init__(self):
import os
import sys
directorio_actual = os.getcwd()
print directorio_actual
from time import sleep
sleep(5)
os.chdir(directorio_actual + '/TLEs')
print os.getcwd()
sleep(5)
print sys.argv[1]
sleep(5)
abrir_tle = open(sys.argv[1], 'r')
lista_nombres_satelites = abrir_tle.readlines()
lista_nombres_satelites = [item.rstrip('\n') for item in lista_nombres_satelites]
os.chdir(directorio_actual)
tamano_lista = len(lista_nombres_satelites)
y = tamano_lista/3
numeros_lista = map(self.devuelve_lista, range(y))
lista_satelites = []
i = 0
for i in range(len(numeros_lista)):
lista_satelites.append(lista_nombres_satelites[numeros_lista[i]])
self.save_list(lista_satelites)
def devuelve_lista(self, x):
return 3*x
def save_list(self, lista):
import os
directorio_script = os.getcwd()
# PyEphem
os.chdir(directorio_script + '/results/PyEphem')
create_file_pyephem = open('temp', 'w')
create_file_pyephem.writelines(["%s\n" % item for item in lista])
# predict
os.chdir(directorio_script)
os.chdir(directorio_script + '/results/predict')
create_file_predict = open('temp', 'w')
create_file_predict.writelines(["%s\n" % item for item in lista])
# pyorbital
os.chdir(directorio_script)
os.chdir(directorio_script + '/results/pyorbital')
create_file_pyorbital = open('temp', 'w')
create_file_pyorbital.writelines(["%s\n" % item for item in lista])
# Orbitron
# os.chdir(directorio_script)
# os.chdir(directorio_script + '/Orbitron/Output')
# create_file_orbitron = open('temp', 'w')
# create_file_orbitron.writelines(["%s\n" % item for item in lista])
os.chdir(directorio_script)
if __name__ == '__main__':
get_name = Get_names()
| apache-2.0 | Python |
98ee4e6a25bf82ac32c2f89f17278993a14bcfba | create / drop LDAP in tests | UGentPortaal/django-ldapdb-archived,UGentPortaal/django-ldapdb,crito/django-ldapdb,chronossc/django-ldapdb,crito/django-ldapdb | examples/tests.py | examples/tests.py | # -*- coding: utf-8 -*-
#
# django-ldapdb
# Copyright (C) 2009-2010 Bolloré telecom
# See AUTHORS file for a full list of contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import ldap
from django.test import TestCase
from ldapdb import connection
from examples.models import LdapUser, LdapGroup
class BaseTestCase(TestCase):
def setUp(self):
cursor = connection._cursor()
for base in [LdapGroup.base_dn, LdapUser.base_dn]:
ou = base.split(',')[0].split('=')[1]
attrs = [('objectClass', ['top', 'organizationalUnit']), ('ou', [ou])]
try:
cursor.connection.add_s(base, attrs)
except ldap.ALREADY_EXISTS:
pass
def tearDown(self):
cursor = connection._cursor()
for base in [LdapGroup.base_dn, LdapUser.base_dn]:
results = cursor.connection.search_s(base, ldap.SCOPE_SUBTREE)
for dn, attrs in reversed(results):
cursor.connection.delete_s(dn)
class GroupTestCase(BaseTestCase):
def test_create(self):
g = LdapGroup()
g.name = "foogroup"
g.gid = 1000
g.usernames = ['foouser']
g.save()
class UserTestCase(BaseTestCase):
def test_create(self):
u = LdapUser()
u.first_name = "Foo"
u.last_name = "User"
u.full_name = "Foo User"
u.group = 1000
u.home_directory = "/home/foouser"
u.uid = 1000
u.username = "foouser"
u.save()
| # -*- coding: utf-8 -*-
#
# django-ldapdb
# Copyright (C) 2009-2010 Bolloré telecom
# See AUTHORS file for a full list of contributors.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from django.test import TestCase
from examples.models import LdapUser, LdapGroup
class GroupTestCase(TestCase):
def test_create(self):
g = LdapGroup()
g.name = "foogroup"
g.gid = 1000
g.usernames = ['foouser']
g.save()
class UserTestCase(TestCase):
def test_create(self):
u = LdapUser()
u.first_name = "Foo"
u.last_name = "User"
u.full_name = "Foo User"
u.group = 1000
u.home_directory = "/home/foouser"
u.uid = 1000
u.username = "foouser"
u.save()
| bsd-3-clause | Python |
5e53f1e86fc7c4f1c7b42479684ac393c997ce52 | Fix exit code of unittest. | qiuwch/unrealcv,qiuwch/unrealcv,unrealcv/unrealcv,qiuwch/unrealcv,qiuwch/unrealcv,qiuwch/unrealcv,qiuwch/unrealcv,unrealcv/unrealcv,unrealcv/unrealcv,unrealcv/unrealcv,unrealcv/unrealcv | client/test/test-unrealcv.py | client/test/test-unrealcv.py | # TODO: Test robustness, test speed
import unittest, time, sys
from common_conf import *
from test_server import EchoServer, MessageServer
import argparse
import threading
from test_server import TestMessageServer
from test_client import TestClientWithDummyServer
from test_commands import TestCommands
from test_realistic_rendering import TestRealisticRendering
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--travis', action='store_true') # Only run test availabe to travis CI
args = parser.parse_args()
suites = []
load = unittest.TestLoader().loadTestsFromTestCase
s = load(TestMessageServer); suites.append(s)
s = load(TestClientWithDummyServer); suites.append(s)
if not args.travis:
s = load(TestCommands); suites.append(s)
s = load(TestRealisticRendering); suites.append(s)
suite_obj = unittest.TestSuite(suites)
ret = not unittest.TextTestRunner(verbosity = 2).run(suite_obj).wasSucessful()
sys.exit(ret)
| # TODO: Test robustness, test speed
import unittest, time, sys
from common_conf import *
from test_server import EchoServer, MessageServer
import argparse
import threading
from test_server import TestMessageServer
from test_client import TestClientWithDummyServer
from test_commands import TestCommands
from test_realistic_rendering import TestRealisticRendering
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--travis', action='store_true') # Only run test availabe to travis CI
args = parser.parse_args()
suites = []
load = unittest.TestLoader().loadTestsFromTestCase
s = load(TestMessageServer); suites.append(s)
s = load(TestClientWithDummyServer); suites.append(s)
if not args.travis:
s = load(TestCommands); suites.append(s)
s = load(TestRealisticRendering); suites.append(s)
suite_obj = unittest.TestSuite(suites)
unittest.TextTestRunner(verbosity = 2).run(suite_obj)
| mit | Python |
8aca952c1c1e2668df58a0844896267f7e69b8f6 | remove assert | rmariano/compr,rmariano/compr | compressor/cli.py | compressor/cli.py | """
Compressor CLI (command-line interface) module.
Exposes the entry point to the program for executing as command line.
"""
import argparse
import sys
from compressor.constants import VERSION
from compressor.lib import compress_file, extract_file
def argument_parser() -> argparse.ArgumentParser:
"""Create the argument parser object to be used for parsing the arguments
from sys.argv
"""
parser = argparse.ArgumentParser(
prog='PyCompress',
description="Compress text files.",
)
parser.add_argument(
'filename',
type=str,
help="Name of the file to process"
)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
'-c', '--compress',
action='store_true',
help="Compress the file"
)
group.add_argument(
'-x', '--extract',
action='store_true',
help="Extract the file"
)
parser.add_argument(
'-d', '--dest-file',
type=str,
default=None,
help="Destination File Name"
)
parser.add_argument(
'-v', '--version',
action='version',
version='%(prog)s {version}'.format(version=VERSION)
)
return parser
def parse_arguments(args=None) -> dict:
"""Parse the command-line (cli) provided arguments, and return a mapping of
the options selected by the user with their values.
:return: dict with the kwargs provided in cli
"""
parser = argument_parser()
args = parser.parse_args(args)
return vars(args)
def main_engine(filename: str, extract: bool = False,
compress: bool = True, dest_file=None) -> int:
"""
Main functionality for the program cli or call as library.
`extract` & `compress` must have opposite values.
:param filename: Path to the source file to process.
:param extract: If True, sets the program for a extraction.
:param compress: If True, the program should compress a file.
:param dest_file: Optional name of the target file.
:return: 0 if executed without problems.
"""
if compress:
compress_file(filename, dest_file)
if extract:
extract_file(filename, dest_file)
return 0
def main() -> int: # pragma: nocover
"""Program cli
:return: Status code of the program.
:rtype: int
"""
return main_engine(**parse_arguments())
if __name__ == '__main__': # pragma: nocover
sys.exit(main())
| """
Compressor CLI (command-line interface) module.
Exposes the entry point to the program for executing as command line.
"""
import argparse
import sys
from compressor.constants import VERSION
from compressor.lib import compress_file, extract_file
def argument_parser() -> argparse.ArgumentParser:
"""Create the argument parser object to be used for parsing the arguments
from sys.argv
"""
parser = argparse.ArgumentParser(
prog='PyCompress',
description="Compress text files.",
)
parser.add_argument(
'filename',
type=str,
help="Name of the file to process"
)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
'-c', '--compress',
action='store_true',
help="Compress the file"
)
group.add_argument(
'-x', '--extract',
action='store_true',
help="Extract the file"
)
parser.add_argument(
'-d', '--dest-file',
type=str,
default=None,
help="Destination File Name"
)
parser.add_argument(
'-v', '--version',
action='version',
version='%(prog)s {version}'.format(version=VERSION)
)
return parser
def parse_arguments(args=None) -> dict:
"""Parse the command-line (cli) provided arguments, and return a mapping of
the options selected by the user with their values.
:return: dict with the kwargs provided in cli
"""
parser = argument_parser()
args = parser.parse_args(args)
return vars(args)
def main_engine(filename: str, extract: bool = False,
compress: bool = True, dest_file=None) -> int:
"""
Main functionality for the program cli or call as library.
`extract` & `compress` must have opposite values.
:param filename: Path to the source file to process.
:param extract: If True, sets the program for a extraction.
:param compress: If True, the program should compress a file.
:param dest_file: Optional name of the target file.
:return: 0 if executed without problems.
"""
assert extract is not compress, "Cannot both extract & compress"
if compress:
compress_file(filename, dest_file)
if extract:
extract_file(filename, dest_file)
return 0
def main() -> int: # pragma: nocover
"""Program cli
:return: Status code of the program.
:rtype: int
"""
return main_engine(**parse_arguments())
if __name__ == '__main__': # pragma: nocover
sys.exit(main())
| mit | Python |
75b8f0ec2a1f3c6468153180c70eabc4ecff642a | fix flake8 | juju4/ansible-bro-ids,juju4/ansible-bro-ids,juju4/ansible-bro-ids | files/api.py | files/api.py | #!/usr/bin/python
# from http://nullsecure.org/building-your-own-passivedns-feed/
import requests
import json
import sys
indicator = sys.argv[1]
url = "http://localhost:8081/dns/"
r = requests.get(url+indicator)
j = json.loads(r.text)
print("+------------------------+------------------------+--------+-------+-----------------------------+")
print("| First Seen | Last Seen | Type | TTL | Answer |")
print("+------------------------+------------------------+--------+-------+-----------------------------+")
for record in j['records']:
print(" ", record['first'], "\t ", record['last'], " ",
record['type'], " ", record['ttl'], " ", record['answer'])
| #!/usr/bin/python
## from http://nullsecure.org/building-your-own-passivedns-feed/
import requests
import json
import sys
indicator = sys.argv[1]
url = "http://localhost:8081/dns/"
r = requests.get(url+indicator)
j = json.loads(r.text)
print "+------------------------+------------------------+--------+-------+-----------------------------+"
print "| First Seen | Last Seen | Type | TTL | Answer |"
print "+------------------------+------------------------+--------+-------+-----------------------------+"
for record in j['records']:
print " ",record['first'],"\t ",record['last']," ",record['type']," ",record['ttl']," ",record['answer']
| bsd-2-clause | Python |
e4caa80cc6b6ee2b9c031a7d743d61b4830f2a7e | put typing_extensions.get_types() behind python version guard | gitpython-developers/gitpython,gitpython-developers/GitPython,gitpython-developers/GitPython,gitpython-developers/gitpython | git/types.py | git/types.py | # -*- coding: utf-8 -*-
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
import os
import sys
from typing import (Callable, Dict, NoReturn, Tuple, Union, Any, Iterator, # noqa: F401
NamedTuple, TYPE_CHECKING, TypeVar) # noqa: F401
if sys.version_info[:2] >= (3, 8):
from typing import Final, Literal, SupportsIndex, TypedDict, Protocol, get_args # noqa: F401
else:
from typing_extensions import Final, Literal, SupportsIndex, TypedDict, Protocol, get_args # noqa: F401
if sys.version_info[:2] >= (3, 10):
from typing import TypeGuard # noqa: F401
else:
from typing_extensions import TypeGuard # noqa: F401
if sys.version_info[:2] < (3, 9):
PathLike = Union[str, os.PathLike]
elif sys.version_info[:2] >= (3, 9):
# os.PathLike only becomes subscriptable from Python 3.9 onwards
PathLike = Union[str, 'os.PathLike[str]'] # forward ref as pylance complains unless editing with py3.9+
if TYPE_CHECKING:
from git.objects import Commit, Tree, TagObject, Blob
# from git.refs import SymbolicReference
TBD = Any
Tree_ish = Union['Commit', 'Tree']
Commit_ish = Union['Commit', 'TagObject', 'Blob', 'Tree']
Lit_config_levels = Literal['system', 'global', 'user', 'repository']
class ConfigLevels_NT(NamedTuple):
"""NamedTuple of allowed CONFIG_LEVELS"""
# works for pylance, but not mypy
system: Literal['system']
user: Literal['user']
global_: Literal['global']
repository: Literal['repository']
ConfigLevels_Tup = Tuple[Lit_config_levels, Lit_config_levels, Lit_config_levels, Lit_config_levels]
# Typing this as specific literals breaks for mypy
def is_config_level(inp: str) -> TypeGuard[Lit_config_levels]:
return inp in get_args(Lit_config_levels)
def assert_never(inp: NoReturn, exc: Union[Exception, None] = None) -> NoReturn:
if exc is None:
assert False, f"An unhandled Literal ({inp}) in an if else chain was found"
else:
raise exc
class Files_TD(TypedDict):
insertions: int
deletions: int
lines: int
class Total_TD(TypedDict):
insertions: int
deletions: int
lines: int
files: int
class HSH_TD(TypedDict):
total: Total_TD
files: Dict[PathLike, Files_TD]
| # -*- coding: utf-8 -*-
# This module is part of GitPython and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php
import os
import sys
from typing import (Callable, Dict, NoReturn, Tuple, Union, Any, Iterator, # noqa: F401
NamedTuple, TYPE_CHECKING, get_args, TypeVar) # noqa: F401
if sys.version_info[:2] >= (3, 8):
from typing import Final, Literal, SupportsIndex, TypedDict, Protocol # noqa: F401
else:
from typing_extensions import Final, Literal, SupportsIndex, TypedDict, Protocol # noqa: F401
if sys.version_info[:2] >= (3, 10):
from typing import TypeGuard # noqa: F401
else:
from typing_extensions import TypeGuard # noqa: F401
if sys.version_info[:2] < (3, 9):
PathLike = Union[str, os.PathLike]
elif sys.version_info[:2] >= (3, 9):
# os.PathLike only becomes subscriptable from Python 3.9 onwards
PathLike = Union[str, 'os.PathLike[str]'] # forward ref as pylance complains unless editing with py3.9+
if TYPE_CHECKING:
from git.objects import Commit, Tree, TagObject, Blob
# from git.refs import SymbolicReference
TBD = Any
Tree_ish = Union['Commit', 'Tree']
Commit_ish = Union['Commit', 'TagObject', 'Blob', 'Tree']
Lit_config_levels = Literal['system', 'global', 'user', 'repository']
class ConfigLevels_NT(NamedTuple):
"""NamedTuple of allowed CONFIG_LEVELS"""
# works for pylance, but not mypy
system: Literal['system']
user: Literal['user']
global_: Literal['global']
repository: Literal['repository']
ConfigLevels_Tup = Tuple[Lit_config_levels, Lit_config_levels, Lit_config_levels, Lit_config_levels]
# Typing this as specific literals breaks for mypy
def is_config_level(inp: str) -> TypeGuard[Lit_config_levels]:
return inp in get_args(Lit_config_levels)
def assert_never(inp: NoReturn, exc: Union[Exception, None] = None) -> NoReturn:
if exc is None:
assert False, f"An unhandled Literal ({inp}) in an if else chain was found"
else:
raise exc
class Files_TD(TypedDict):
insertions: int
deletions: int
lines: int
class Total_TD(TypedDict):
insertions: int
deletions: int
lines: int
files: int
class HSH_TD(TypedDict):
total: Total_TD
files: Dict[PathLike, Files_TD]
| bsd-3-clause | Python |
cb28aa3a0272bdcd2db94d2d25468458420a544a | Revise to bool is_carry | bowen0701/algorithms_data_structures | lc0066_plus_one.py | lc0066_plus_one.py | """Leetcode 66. Plus One
Easy
URL: https://leetcode.com/problems/plus-one/
Given a non-empty array of digits representing a non-negative integer,
plus one to the integer.
The digits are stored such that the most significant digit is at the
head of the list, and each element in the array contain a single digit.
You may assume the integer does not contain any leading zero,
except the number 0 itself.
Example 1:
Input: [1,2,3]
Output: [1,2,4]
Explanation: The array represents the integer 123.
Example 2:
Input: [4,3,2,1]
Output: [4,3,2,2]
Explanation: The array represents the integer 4321.
"""
class Solution(object):
def plusOne(self, digits):
"""
:type digits: List[int]
:rtype: List[int]
Time complexity: O(n), where n is the length of digits.
Space complexity: O(n), for worse case of total carry.
"""
is_carry = False
# Start from the last digit and reverse back to the 1st one.
for i in reversed(range(len(digits))):
# Plus one only for the last digit or when carry.
if is_carry or i == len(digits) - 1:
if digits[i] + 1 < 10:
digits[i] += 1
return digits
else:
is_carry = True
digits[i] = 0
# If there is total carry, plus to the head of digits.
if is_carry:
digits.insert(0, 1)
return digits
def main():
# Ans: [1,2,4]
digits = [1,2,3]
print Solution().plusOne(digits)
# Ans: [4,3,2,2]
digits = [4,3,2,1]
print Solution().plusOne(digits)
# Ans: [3, 0]
digits = [2, 9]
print Solution().plusOne(digits)
# Ans: [1, 0, 0]
digits = [9, 9]
print Solution().plusOne(digits)
if __name__ == '__main__':
main()
| """Leetcode 66. Plus One
Easy
URL: https://leetcode.com/problems/plus-one/
Given a non-empty array of digits representing a non-negative integer,
plus one to the integer.
The digits are stored such that the most significant digit is at the
head of the list, and each element in the array contain a single digit.
You may assume the integer does not contain any leading zero,
except the number 0 itself.
Example 1:
Input: [1,2,3]
Output: [1,2,4]
Explanation: The array represents the integer 123.
Example 2:
Input: [4,3,2,1]
Output: [4,3,2,2]
Explanation: The array represents the integer 4321.
"""
class Solution(object):
def plusOne(self, digits):
"""
:type digits: List[int]
:rtype: List[int]
Time complexity: O(n), where n is the length of digits.
Space complexity: O(n), for worse case of total overflow.
"""
overflow = False
# Start from the last digit and reverse back to the 1st one.
for i in reversed(range(len(digits))):
# Plus one only for the last digit or when overflow.
if overflow or i == len(digits) - 1:
if digits[i] + 1 < 10:
digits[i] += 1
return digits
else:
overflow = True
digits[i] = 0
# If there is total overflow, plus to the head of digits.
if overflow:
digits.insert(0, 1)
return digits
def main():
# Ans: [1,2,4]
digits = [1,2,3]
print Solution().plusOne(digits)
# Ans: [4,3,2,2]
digits = [4,3,2,1]
print Solution().plusOne(digits)
# Ans: [3, 0]
digits = [2, 9]
print Solution().plusOne(digits)
# Ans: [1, 0, 0]
digits = [9, 9]
print Solution().plusOne(digits)
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
4b1c356a27adc68e62d801986e67ef60336685db | add and configure infernal dependency (#14502) | LLNL/spack,iulian787/spack,iulian787/spack,LLNL/spack,iulian787/spack,LLNL/spack,LLNL/spack,LLNL/spack,iulian787/spack,iulian787/spack | var/spack/repos/builtin/packages/trnascan-se/package.py | var/spack/repos/builtin/packages/trnascan-se/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class TrnascanSe(AutotoolsPackage):
"""Seaching for tRNA genes in genomic sequence"""
homepage = "http://lowelab.ucsc.edu/tRNAscan-SE/"
url = "http://trna.ucsc.edu/software/trnascan-se-2.0.0.tar.gz"
version('2.0.0', sha256='0dde1c07142e4bf77b21d53ddf3eeb1ef8c52248005a42323d13f8d7c798100c')
depends_on('infernal@1.1.2', type='run', when='@2.0.0')
def patch(self):
filter_file('infernal_dir: {bin_dir}',
'infernal_dir: %s' % self.spec['infernal'].prefix.bin,
'tRNAscan-SE.conf.src', string=True)
| # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class TrnascanSe(AutotoolsPackage):
"""Seaching for tRNA genes in genomic sequence"""
homepage = "http://lowelab.ucsc.edu/tRNAscan-SE/"
url = "http://trna.ucsc.edu/software/trnascan-se-2.0.0.tar.gz"
version('2.0.0', sha256='0dde1c07142e4bf77b21d53ddf3eeb1ef8c52248005a42323d13f8d7c798100c')
| lgpl-2.1 | Python |
3a26c641d1c3988123babbeb56b978b449fa6c98 | Update pipeline.py | pkug/intelmq,robcza/intelmq,certtools/intelmq,certtools/intelmq,sch3m4/intelmq,robcza/intelmq,robcza/intelmq,aaronkaplan/intelmq,aaronkaplan/intelmq,sch3m4/intelmq,robcza/intelmq,sch3m4/intelmq,pkug/intelmq,certtools/intelmq,pkug/intelmq,aaronkaplan/intelmq,pkug/intelmq,sch3m4/intelmq | intelmq/lib/pipeline.py | intelmq/lib/pipeline.py | import redis
import time
class Pipeline():
def __init__(self, host="127.0.0.1", port="6379", db=2):
self.host = host
self.port = port
self.db = db
self.redis = redis.Redis(
host = self.host,
port = int(self.port),
db = self.db,
socket_timeout = 50000
)
def queues(self, source_queue, destination_queues):
if destination_queues and type(destination_queues) is not list:
destination_queues = destination_queues.split()
self.source_queue = source_queue
if source_queue:
self.internal_queue = source_queue + "-internal"
self.destination_queues = destination_queues
def disconnect(self):
pass
def sleep(self, interval):
time.sleep(interval)
def send(self, message):
for destination_queue in self.destination_queues:
self.redis.rpush(destination_queue, message)
def receive(self):
if self.redis.llen(self.internal_queue) > 0:
return self.redis.lindex(self.internal_queue, -1)
return self.redis.brpoplpush(self.source_queue, self.internal_queue, 0)
def acknowledge(self):
return self.redis.rpop(self.internal_queue)
def count_queued_messages(self, queues):
qdict = dict()
for queue in queues:
qdict[queue] = self.redis.llen(queue)
return qdict
# Algorithm
# ---------
# [Receive] B RPOP LPUSH source_queue -> internal_queue
# [Send] LPUSH message -> destination_queue
# [Acknowledge] RPOP message <- internal_queue
| import redis
import time
class Pipeline():
def __init__(self, host="127.0.0.1", port="6379", db=2):
self.host = host
self.port = port
self.db = db
self.redis = redis.Redis(
host = self.host,
port = int(self.port),
db = self.db,
socket_timeout = 50000
)
def queues(self, source_queue, destination_queues):
if destination_queues and type(destination_queues) is not list:
destination_queues = destination_queues.split()
self.source_queue = source_queue
if source_queue:
self.internal_queue = source_queue + "-internal"
self.destination_queues = destination_queues
def disconnect(self):
pass
def sleep(self, interval):
time.sleep(interval)
def send(self, message):
for destination_queue in self.destination_queues:
self.redis.rpush(destination_queue, message)
def receive(self):
while True:
try:
return self.redis.brpoplpush(self.source_queue, self.internal_queue, 0)
except redis.TimeoutError:
self.connect()
def acknowledge(self):
return self.redis.rpop(self.internal_queue)
def count_queued_messages(self, queues):
qdict = dict()
for queue in queues:
qdict[queue] = self.redis.llen(queue)
return qdict
# -----------------------
# Receive
# B RPOP LPUSH source_queue -> source_queue_internal
# -----------------------
# Send
# LPUSH object -> destination_queue
# -----------------------
# Acknowledge
# RPOP object <- source_queue_internal
# -----------------------
| agpl-3.0 | Python |
227be653c21a703d8ed0864d23573c25dd3859ce | bump repo version | omry/omegaconf | omegaconf/version.py | omegaconf/version.py | import sys # pragma: no cover
__version__ = "2.0.0rc19"
msg = """OmegaConf 2.0 and above is compatible with Python 3.6 and newer.
You have the following options:
1. Upgrade to Python 3.6 or newer.
This is highly recommended. new features will not be added to OmegaConf 1.4.
2. Continue using OmegaConf 1.4:
You can pip install 'OmegaConf<1.5' to do that.
"""
if sys.version_info < (3, 6):
raise ImportError(msg) # pragma: no cover
| import sys # pragma: no cover
__version__ = "2.0.0rc18"
msg = """OmegaConf 2.0 and above is compatible with Python 3.6 and newer.
You have the following options:
1. Upgrade to Python 3.6 or newer.
This is highly recommended. new features will not be added to OmegaConf 1.4.
2. Continue using OmegaConf 1.4:
You can pip install 'OmegaConf<1.5' to do that.
"""
if sys.version_info < (3, 6):
raise ImportError(msg) # pragma: no cover
| bsd-3-clause | Python |
ce9da309294f2520f297980d80773160f050e8bf | Add __all__ in exceptions module. | Kami/python-yubico-client | yubico/yubico_exceptions.py | yubico/yubico_exceptions.py | __all___ = [
'YubicoError',
'StatusCodeError',
'InvalidClientIdError',
'SignatureVerificationError'
]
class YubicoError(Exception):
""" Base class for Yubico related exceptions. """
pass
class StatusCodeError(YubicoError):
def __init__(self, status_code):
self.status_code = status_code
def __str__(self):
return ('Yubico server returned the following status code: %s' %
(self.status_code))
class InvalidClientIdError(YubicoError):
def __init__(self, client_id):
self.client_id = client_id
def __str__(self):
return 'The client with ID %s does not exist' % (self.client_id)
class SignatureVerificationError(YubicoError):
def __init__(self, generated_signature, response_signature):
self.generated_signature = generated_signature
self.response_signature = response_signature
def __str__(self):
return repr('Server response message signature verification failed' +
'(expected %s, got %s)' % (self.generated_signature,
self.response_signature))
| class YubicoError(Exception):
""" Base class for Yubico related exceptions. """
pass
class StatusCodeError(YubicoError):
def __init__(self, status_code):
self.status_code = status_code
def __str__(self):
return ('Yubico server returned the following status code: %s' %
(self.status_code))
class InvalidClientIdError(YubicoError):
def __init__(self, client_id):
self.client_id = client_id
def __str__(self):
return 'The client with ID %s does not exist' % (self.client_id)
class SignatureVerificationError(YubicoError):
def __init__(self, generated_signature, response_signature):
self.generated_signature = generated_signature
self.response_signature = response_signature
def __str__(self):
return repr('Server response message signature verification failed' +
'(expected %s, got %s)' % (self.generated_signature,
self.response_signature))
| bsd-3-clause | Python |
b20320301eb311bb1345061a8b74ac63495051b1 | Add trailing slash to /docs/api/ url pattern. This provides more flexibility for end users in that they can choose to navigate to /docs/api or /docs/api/ successfully. Without the trailing slash in the url pattern, /docs/api/ returns a 404. | juanique/django-tastydocs,juanique/django-tastydocs,juanique/django-tastydocs,juanique/django-tastydocs | tastydocs/urls.py | tastydocs/urls.py | from django.conf.urls.defaults import patterns
from views import doc
urlpatterns = patterns(
'',
(r'^api/$', doc),
(r'^example/(?P<resource_name>\w+)/', 'tastydocs.views.example_data'),
)
| from django.conf.urls.defaults import patterns
from views import doc
urlpatterns = patterns(
'',
(r'^api$', doc),
(r'^example/(?P<resource_name>\w+)/', 'tastydocs.views.example_data'),
)
| bsd-3-clause | Python |
6f5df9830482e8b9ffcf810dcc14131d349c219e | update api config | apipanda/openssl,apipanda/openssl,apipanda/openssl,apipanda/openssl | apps/api/config.py | apps/api/config.py | from __future__ import unicode_literals
from django.apps import AppConfig
class AppConfig(AppConfig):
name = 'api'
verbose_name = 'Api Application'
def ready(self):
from .signals import create_api_key
| from __future__ import unicode_literals
from django.apps import AppConfig
class AppConfig(AppConfig):
name = 'app'
verbose_name = 'Api Application'
def ready(self):
from .signals import create_api_key
| mit | Python |
4c94da43060f0c5a3dd59102c1499bc9c947a9d9 | Update mail_mail.py | eicher31/compassion-modules,maxime-beck/compassion-modules,ecino/compassion-modules,eicher31/compassion-modules,emgirardin/compassion-modules,emgirardin/compassion-modules,emgirardin/compassion-modules,eicher31/compassion-modules,ecino/compassion-modules,maxime-beck/compassion-modules,ecino/compassion-modules,maxime-beck/compassion-modules,CompassionCH/compassion-modules,maxime-beck/compassion-modules,ecino/compassion-modules,eicher31/compassion-modules,eicher31/compassion-modules,CompassionCH/compassion-modules,CompassionCH/compassion-modules,CompassionCH/compassion-modules,ecino/compassion-modules | mail_sendgrid_mass_mailing/models/mail_mail.py | mail_sendgrid_mass_mailing/models/mail_mail.py | # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __openerp__.py
#
##############################################################################
from openerp import models
class MailMail(models.Model):
_inherit = "mail.mail"
def _prepare_sendgrid_tracking(self):
track_vals = super(MailMail, self)._prepare_sendgrid_tracking()
track_vals.update({
'mail_id_int': self.id,
'mass_mailing_id': self.mailing_id.id,
'mail_stats_id': self.statistics_ids[:1].id
if self.statistics_ids else False
})
return track_vals
def _track_sendgrid_emails(self):
""" Push tracking_email in mass_mail_statistic """
tracking_emails = super(MailMail, self)._track_sendgrid_emails()
for tracking in tracking_emails.filtered('mail_stats_id'):
tracking.mail_stats_id.mail_tracking_id = tracking.id
return tracking_emails
| # -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2016 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emanuel Cino <ecino@compassion.ch>
#
# The licence is in the file __openerp__.py
#
##############################################################################
from openerp import models
class MailMail(models.Model):
_inherit = "mail.mail"
def _prepare_sendgrid_tracking(self):
track_vals = super(MailMail, self)._prepare_sendgrid_tracking()
track_vals.update({
'mail_id_int': self.id,
'mass_mailing_id': self.mailing_id.id,
'mail_stats_id': self.statistics_ids[:1].id
if self.statistics_ids else False
})
return track_vals
def _track_sendgrid_emails(self):
""" Push tracking_email in mass_mail_statistic """
tracking_emails = super(MailMail, self)._track_sendgrid_emails()
for tracking in tracking_emails:
tracking.mail_stats_id.mail_tracking_id = tracking.id
return tracking_emails
| agpl-3.0 | Python |
682f45ffd222dc582ee770a0326c962540657c68 | Fix an error in __unicode__ | rdegges/django-twilio,aditweb/django-twilio | django_twilio/models.py | django_twilio/models.py | # -*- coding: utf-8 -*-
from django.db import models
from phonenumber_field.modelfields import PhoneNumberField
class Caller(models.Model):
"""A caller is defined uniquely by their phone number.
:param bool blacklisted: Designates whether the caller can use our
services.
:param char phone_number: Unique phone number in `E.164
<http://en.wikipedia.org/wiki/E.164>`_ format.
"""
blacklisted = models.BooleanField()
phone_number = PhoneNumberField(unique=True)
def __unicode__(self):
name = str(self.phone_number)
if self.blacklisted:
name += ' (blacklisted)'
return name
| # -*- coding: utf-8 -*-
from django.db import models
from phonenumber_field.modelfields import PhoneNumberField
class Caller(models.Model):
"""A caller is defined uniquely by their phone number.
:param bool blacklisted: Designates whether the caller can use our
services.
:param char phone_number: Unique phone number in `E.164
<http://en.wikipedia.org/wiki/E.164>`_ format.
"""
blacklisted = models.BooleanField()
phone_number = PhoneNumberField(unique=True)
def __unicode__(self):
return str(self.phone_number) + ' (blacklisted)' if self.blacklisted else ''
| unlicense | Python |
0534a96d7872ec13b14d084c62454c4cdfa711ed | fix breakage ("TypeError: cannot use a string pattern on a bytes-like object") | mmetak/streamlink,gravyboat/streamlink,bastimeyer/streamlink,melmorabity/streamlink,bastimeyer/streamlink,melmorabity/streamlink,beardypig/streamlink,gravyboat/streamlink,sbstp/streamlink,mmetak/streamlink,beardypig/streamlink,fishscene/streamlink,javiercantero/streamlink,streamlink/streamlink,fishscene/streamlink,back-to/streamlink,sbstp/streamlink,javiercantero/streamlink,back-to/streamlink,chhe/streamlink,ethanhlc/streamlink,wlerin/streamlink,chhe/streamlink,streamlink/streamlink,wlerin/streamlink,ethanhlc/streamlink | src/livestreamer/plugins/livecodingtv.py | src/livestreamer/plugins/livecodingtv.py | import re
from livestreamer.plugin import Plugin
from livestreamer.stream import RTMPStream, HTTPStream
from livestreamer.plugin.api import http
_vod_re = re.compile('\"(http(s)?://.*\.mp4\?t=.*)\"')
_rtmp_re = re.compile('rtmp://[^"]+/(?P<channel>\w+)+[^/"]+')
_url_re = re.compile('http(s)?://(?:\w+.)?\livecoding\.tv')
class LivecodingTV(Plugin):
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url)
def _get_streams(self):
res = http.get(self.url)
match = _rtmp_re.search(res.content.decode('utf-8'))
if match:
params = {
"rtmp": match.group(0),
"pageUrl": self.url,
"live": True,
}
yield 'live', RTMPStream(self.session, params)
return
match = _vod_re.search(res.content.decode('utf-8'))
if match:
yield 'vod', HTTPStream(self.session, match.group(1))
__plugin__ = LivecodingTV
| import re
from livestreamer.plugin import Plugin
from livestreamer.stream import RTMPStream, HTTPStream
from livestreamer.plugin.api import http
_vod_re = re.compile('\"(http(s)?://.*\.mp4\?t=.*)\"')
_rtmp_re = re.compile('rtmp://[^"]+/(?P<channel>\w+)+[^/"]+')
_url_re = re.compile('http(s)?://(?:\w+.)?\livecoding\.tv')
class LivecodingTV(Plugin):
@classmethod
def can_handle_url(cls, url):
return _url_re.match(url)
def _get_streams(self):
res = http.get(self.url)
match = _rtmp_re.search(res.content)
if match:
params = {
"rtmp": match.group(0),
"pageUrl": self.url,
"live": True,
}
yield 'live', RTMPStream(self.session, params)
return
match = _vod_re.search(res.content)
if match:
yield 'vod', HTTPStream(self.session, match.group(1))
__plugin__ = LivecodingTV
| bsd-2-clause | Python |
aadf177df60fa9cf1470732edf8ea9cab6eef79e | Remove commented fixture code | praekelt/ndoh-control,praekelt/ndoh-control,praekelt/ndoh-control,praekelt/ndoh-control | subscription/tests.py | subscription/tests.py | """
Tests for Subscription Application
"""
from tastypie.test import ResourceTestCase
from django.contrib.auth.models import User
class SubscriptionResourceTest(ResourceTestCase):
def setUp(self):
super(SubscriptionResourceTest, self).setUp()
# Create a user.
self.username = 'testuser'
self.password = 'testpass'
self.user = User.objects.create_user(self.username,
'testuser@example.com', self.password)
self.api_key = self.user.api_key.key
def get_credentials(self):
return self.create_apikey(self.username, self.api_key)
def test_get_list_unauthorzied(self):
self.assertHttpUnauthorized(self.api_client.get('/api/v1/subscription/', format='json'))
def test_get_list_json(self):
resp = self.api_client.get('/api/v1/subscription/', format='json', authentication=self.get_credentials())
self.assertValidJSONResponse(resp)
# Scope out the data for correctness.
self.assertEqual(len(self.deserialize(resp)['objects']), 0)
| """
Tests for Subscription Application
"""
from tastypie.test import ResourceTestCase
from django.contrib.auth.models import User
class SubscriptionResourceTest(ResourceTestCase):
# fixtures = ['test_subscription.json']
def setUp(self):
super(SubscriptionResourceTest, self).setUp()
# Create a user.
self.username = 'testuser'
self.password = 'testpass'
self.user = User.objects.create_user(self.username,
'testuser@example.com', self.password)
self.api_key = self.user.api_key.key
def get_credentials(self):
return self.create_apikey(self.username, self.api_key)
def test_get_list_unauthorzied(self):
self.assertHttpUnauthorized(self.api_client.get('/api/v1/subscription/', format='json'))
def test_get_list_json(self):
resp = self.api_client.get('/api/v1/subscription/', format='json', authentication=self.get_credentials())
self.assertValidJSONResponse(resp)
# Scope out the data for correctness.
self.assertEqual(len(self.deserialize(resp)['objects']), 0)
| bsd-3-clause | Python |
07556d5cb011c16bc0d73f2e2dca753481b390a4 | add supplement product and ingredient comp | jeffshek/betterself,jeffshek/betterself,jeffshek/betterself,jeffshek/betterself | supplements/models.py | supplements/models.py | from django.db import models
from betterself.mixins import BaseTimeModel
class Ingredient(BaseTimeModel):
# if some ingredient is longer than 300 characters, prob shouldn't take it.
name = models.CharField(max_length=300)
user_generated = models.BooleanField(default=False)
half_life_minutes = models.PositiveIntegerField(null=True, blank=True)
class MeasurementUnit(BaseTimeModel):
name = models.CharField(max_length=100) # 'milligram'
short_name = models.CharField(max_length=100) # 'ml'
is_liquid = models.BooleanField(default=False)
class IngredientComposition(BaseTimeModel):
"""
Creatine, 5, grams
"""
ingredient = models.ManyToManyField(Ingredient)
measurement_unit = models.ForeignKey(MeasurementUnit)
quantity = models.FloatField(default=1)
class SupplementProduct(BaseTimeModel):
"""
Could be a stack like BCAA (which would have 4 ingredient comps)
Or could just be something simple like Caffeine.
"""
ingredient_composition = models.ForeignKey(IngredientComposition)
user_generated = models.BooleanField(default=False)
# company = models.ForeignKey()
# is this the best place to denote user derived products?
# user = models.ForeignKey()
| from django.db import models
from betterself.mixins import BaseTimeModel
class Ingredient(BaseTimeModel):
# if some ingredient is longer than 300 characters, prob shouldn't take it.
name = models.CharField(max_length=300)
user_generated = models.BooleanField(default=False)
half_life_minutes = models.PositiveIntegerField(null=True, blank=True)
| mit | Python |
046ffb6edcf660879681926dd9a0b0c5774ef4e8 | Use a new classification instead of input and output | benjello/openfisca-france-indirect-taxation,openfisca/openfisca-france-indirect-taxation,antoinearnoud/openfisca-france-indirect-taxation,thomasdouenne/openfisca-france-indirect-taxation | openfisca_france_indirect_taxation/__init__.py | openfisca_france_indirect_taxation/__init__.py | # -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from openfisca_core.formulas import make_reference_formula_decorator
from openfisca_core.taxbenefitsystems import AbstractTaxBenefitSystem
from .entities import entity_class_by_symbol
from .param.param import legislation_json
from .scenarios import Scenario
COUNTRY_DIR = os.path.dirname(os.path.abspath(__file__))
CURRENCY = u"€"
# TaxBenefitSystems
def init_country():
class TaxBenefitSystem(AbstractTaxBenefitSystem):
entity_class_by_key_plural = {
entity_class.key_plural: entity_class
for entity_class in entity_class_by_symbol.itervalues()
}
legislation_json = legislation_json
# Define class attributes after class declaration to avoid "name is not defined" exceptions.
TaxBenefitSystem.Scenario = Scenario
from .model import model # noqa analysis:ignore
return TaxBenefitSystem
reference_formula = make_reference_formula_decorator(entity_class_by_symbol = entity_class_by_symbol)
| # -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from openfisca_core.formulas import make_reference_formula_decorator
from openfisca_core.taxbenefitsystems import AbstractTaxBenefitSystem
from .entities import entity_class_by_symbol
from .param.param import legislation_json
from .scenarios import Scenario
COUNTRY_DIR = os.path.dirname(os.path.abspath(__file__))
CURRENCY = u"€"
# TaxBenefitSystems
def init_country():
class TaxBenefitSystem(AbstractTaxBenefitSystem):
entity_class_by_key_plural = {
entity_class.key_plural: entity_class
for entity_class in entity_class_by_symbol.itervalues()
}
legislation_json = legislation_json
# Define class attributes after class declaration to avoid "name is not defined" exceptions.
TaxBenefitSystem.Scenario = Scenario
from model import input_variables # noqa analysis:ignore
from model import output_variables # noqa analysis:ignore
return TaxBenefitSystem
reference_formula = make_reference_formula_decorator(entity_class_by_symbol = entity_class_by_symbol)
| agpl-3.0 | Python |
082464c30dc34a7b36cd4763c324af55a8ed31e5 | Fix a typo in comment | hashbangstudio/Python-Minecraft-Examples | 03-sendTextFromFileToChat.py | 03-sendTextFromFileToChat.py | # We have to import the minecraft api module to do anything in the minecraft world
from mcpi.minecraft import *
# We have to import sys module to get the command line arguments
import sys
if __name__ == "__main__":
"""
First thing you do is create a connection to minecraft
This is like dialling a phone.
It sets up a communication line between your script and the minecraft world
"""
# Create a connection to Minecraft
# Any communication with the world must use this object
mc = Minecraft.create()
# Check that have the appropriate number of command line arguments (one in this case)
# sys.argv is a list of the command line arguments given
numOfArguments = len(sys.argv)
if (numOfArguments == 2):
# get the name of the file to open
filenameToOpen = sys.argv[1]
# print to the python interpreter standard output (terminal or IDLE probably)
print ("Opening " + filenameToOpen+'\n')
"""
Open the file
Read through the file line by line and post it to the chat
This uses a try catch statement when opening the file
The code will only send to chat if the file is opened and read successfully
If an error (Exception) occurs it is printed out to the console
"""
try:
# this opens up the text file in read only ('r') mode
textFile = open(filenameToOpen, mode='r')
# Each line in the text file must be sent as a separate message
# go through the text file line by line
for line in textFile:
# send message to the minecraft chat
mc.postToChat(line)
except IOError as e:
print("Failed to open file")
print "I/O error({0}): {1}".format(e.errno, e.strerror)
except:
print "Unexpected error:", sys.exc_info()[0]
raise
finally:
textFile.close()
else:
# use num of arguments -1 as first argument is the name of this script
print("Expected only one filename as argument, but received "+ str(numOfArguments-1))
| # We have to import the minecraft api module to do anything in the minecraft world
from mcpi.minecraft import *
# We have to import sys module to get the command line arguments
import sys
if __name__ == "__main__":
"""
First thing you do is create a connection to minecraft
This is like dialling a phone.
It sets up a communication line between your script and the minecraft world
"""
# Create a connection to Minecraft
# Any communication with the world must use this object
mc = Minecraft.create()
# Check that have the appropriate number of command line arguments (one in this case)
# sys.argv is a list of the command line arguments given
numOfArguments = len(sys.argv)
if (numOfArguments == 2):
# get the name of the file to open
filenameToOpen = sys.argv[1]
# print to the python interpreter standard output (terminal or IDLE probably)
print ("Opening " + filenameToOpen+'\n')
"""
Open the file
Read through the file line by line and post it to the chat
This uses a try catch statement when opening the file
The code will only send to chat if the file is opened and read successfully
If an error (Exception) occurs it is printed out to the console
"""
try:
# this opup the text file in read only ('r') mode
textFile = open(filenameToOpen, mode='r')
# Each line in the text file must be sent as a separate message
# go through the text file line by line
for line in textFile:
# send message to the minecraft chat
mc.postToChat(line)
except IOError as e:
print("Failed to open file")
print "I/O error({0}): {1}".format(e.errno, e.strerror)
except:
print "Unexpected error:", sys.exc_info()[0]
raise
finally:
textFile.close()
else:
# use num of arguments -1 as first argument is the name of this script
print("Expected only one filename as argument, but received "+ str(numOfArguments-1))
| bsd-3-clause | Python |
e9bf339261636647a2af096d8cf45c2ee6cf202f | use emacs report style | SublimeLinter/SublimeLinter-phpcs | linter.py | linter.py | #
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Dmitry Tsoy
# Copyright (c) 2013 Dmitry Tsoy
#
# License: MIT
#
"""This module exports the Phpcs plugin class."""
from SublimeLinter.lint import Linter
class Phpcs(Linter):
"""Provides an interface to phpcs."""
syntax = ('php', 'html', 'html 5')
cmd = ('phpcs', '--report=emacs', '${args}', '-')
regex = r'^.*:(?P<line>[0-9]+):(?P<col>[0-9]+): (?:(?P<error>error)|(?P<warning>warning)) - (?P<message>.+)'
defaults = {
# we want auto-substitution of the filename, but `cmd` does not support that yet
'--stdin-path=': '${file}',
'--standard=': 'PSR2',
}
| #
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Dmitry Tsoy
# Copyright (c) 2013 Dmitry Tsoy
#
# License: MIT
#
"""This module exports the Phpcs plugin class."""
from SublimeLinter.lint import Linter
class Phpcs(Linter):
"""Provides an interface to phpcs."""
syntax = ('php', 'html', 'html 5')
cmd = ('phpcs', '--report=checkstyle', '${args}', '-')
regex = (
r'.*line="(?P<line>\d+)" '
r'column="(?P<col>\d+)" '
r'severity="(?:(?P<error>error)|(?P<warning>warning))" '
r'message="(?P<message>.*)" source'
)
defaults = {
# we want auto-substitution of the filename, but `cmd` does not support that yet
'--stdin-path=': '${file}',
'--standard=': 'PSR2',
}
| mit | Python |
50a860a9286e03a3deb6b93161cf351d80d3f2ce | Fix pep257 | sirreal/SublimeLinter-contrib-govet | linter.py | linter.py | #
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Jon Surrell
# Copyright (c) 2014 Jon Surrell
#
# License: MIT
#
"""This module exports the Govet plugin class."""
from SublimeLinter.lint import Linter, util
class Govet(Linter):
"""Provides an interface to go vet."""
syntax = ('go', 'gosublime-go')
cmd = ('go', 'tool', 'vet')
regex = r'^.+:(?P<line>\d+):\s+(?P<message>.+)'
tempfile_suffix = 'go'
error_stream = util.STREAM_STDERR
"""Let the linter higlight column 0"""
def split_match(self, match):
match, line, col, error, warning, message, near = super().split_match(match)
return match, line, 0, error, warning, message, near
| #
# linter.py
# Linter for SublimeLinter3, a code checking framework for Sublime Text 3
#
# Written by Jon Surrell
# Copyright (c) 2014 Jon Surrell
#
# License: MIT
#
"""This module exports the Govet plugin class."""
from SublimeLinter.lint import Linter, util
class Govet(Linter):
"""Provides an interface to go vet."""
syntax = ('go', 'gosublime-go')
cmd = ('go', 'tool', 'vet')
regex = r'^.+:(?P<line>\d+):\s+(?P<message>.+)'
tempfile_suffix = 'go'
error_stream = util.STREAM_STDERR
def split_match(self, match):
match, line, col, error, warning, message, near = super().split_match(match)
return match, line, 0, error, warning, message, near
| mit | Python |
0ea00d932ef16aad3cb2fdbacedd30181c786778 | increase timeout of autotest | cladmi/RIOT,miri64/RIOT,ant9000/RIOT,authmillenon/RIOT,OTAkeys/RIOT,josephnoir/RIOT,mtausig/RIOT,kaspar030/RIOT,yogo1212/RIOT,x3ro/RIOT,OTAkeys/RIOT,OlegHahm/RIOT,kYc0o/RIOT,authmillenon/RIOT,ant9000/RIOT,mfrey/RIOT,yogo1212/RIOT,OlegHahm/RIOT,OlegHahm/RIOT,A-Paul/RIOT,mfrey/RIOT,smlng/RIOT,kYc0o/RIOT,authmillenon/RIOT,BytesGalore/RIOT,OlegHahm/RIOT,kYc0o/RIOT,mtausig/RIOT,rfuentess/RIOT,toonst/RIOT,yogo1212/RIOT,rfuentess/RIOT,RIOT-OS/RIOT,aeneby/RIOT,x3ro/RIOT,yogo1212/RIOT,josephnoir/RIOT,josephnoir/RIOT,jasonatran/RIOT,cladmi/RIOT,jasonatran/RIOT,smlng/RIOT,mtausig/RIOT,yogo1212/RIOT,A-Paul/RIOT,x3ro/RIOT,josephnoir/RIOT,toonst/RIOT,kaspar030/RIOT,kaspar030/RIOT,authmillenon/RIOT,lazytech-org/RIOT,lazytech-org/RIOT,x3ro/RIOT,kaspar030/RIOT,RIOT-OS/RIOT,basilfx/RIOT,RIOT-OS/RIOT,A-Paul/RIOT,jasonatran/RIOT,kYc0o/RIOT,A-Paul/RIOT,smlng/RIOT,cladmi/RIOT,jasonatran/RIOT,mfrey/RIOT,rfuentess/RIOT,jasonatran/RIOT,BytesGalore/RIOT,basilfx/RIOT,josephnoir/RIOT,toonst/RIOT,x3ro/RIOT,aeneby/RIOT,miri64/RIOT,smlng/RIOT,lazytech-org/RIOT,BytesGalore/RIOT,miri64/RIOT,mtausig/RIOT,mtausig/RIOT,rfuentess/RIOT,yogo1212/RIOT,ant9000/RIOT,ant9000/RIOT,basilfx/RIOT,ant9000/RIOT,aeneby/RIOT,toonst/RIOT,mfrey/RIOT,authmillenon/RIOT,cladmi/RIOT,rfuentess/RIOT,OTAkeys/RIOT,kYc0o/RIOT,authmillenon/RIOT,BytesGalore/RIOT,basilfx/RIOT,kaspar030/RIOT,smlng/RIOT,cladmi/RIOT,BytesGalore/RIOT,basilfx/RIOT,aeneby/RIOT,toonst/RIOT,OTAkeys/RIOT,RIOT-OS/RIOT,RIOT-OS/RIOT,miri64/RIOT,OTAkeys/RIOT,lazytech-org/RIOT,aeneby/RIOT,miri64/RIOT,OlegHahm/RIOT,lazytech-org/RIOT,mfrey/RIOT,A-Paul/RIOT | tests/pkg_libcose/tests/01-run.py | tests/pkg_libcose/tests/01-run.py | #!/usr/bin/env python3
# Copyright (C) 2017 Freie Universität Berlin
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import os
import sys
from testrunner import run
# on real hardware, this test application can take several minutes to
# complete (~4min on microbit)
HW_TIMEOUT = 300
def testfunc(child):
board = os.environ['BOARD']
# Increase timeout on "real" hardware
timeout = HW_TIMEOUT if board is not 'native' else -1
child.expect('OK \(\d+ tests\)', timeout=timeout)
if __name__ == "__main__":
sys.exit(run(testfunc))
| #!/usr/bin/env python3
# Copyright (C) 2017 Freie Universität Berlin
#
# This file is subject to the terms and conditions of the GNU Lesser
# General Public License v2.1. See the file LICENSE in the top level
# directory for more details.
import os
import sys
from testrunner import run
def testfunc(child):
board = os.environ['BOARD']
# Increase timeout on "real" hardware
timeout = 120 if board is not 'native' else -1
child.expect('OK \(\d+ tests\)', timeout=timeout)
if __name__ == "__main__":
sys.exit(run(testfunc))
| lgpl-2.1 | Python |
444ce71f2ca92a535270ac34d7201e81de002af5 | fix tests | throwable-one/teamcity-messages | tests/unit-tests/messages_test.py | tests/unit-tests/messages_test.py | from teamcity.messages import TeamcityServiceMessages
from datetime import datetime
class StreamStub(object):
def __init__(self):
self.observed_output = ''
def write(self, msg):
self.observed_output += msg
def flush(self):
pass
fixed_date = datetime(2000, 11, 2, 10, 23, 1, 556789)
def test_no_properties():
stream = StreamStub()
messages = TeamcityServiceMessages(output=stream, now=lambda: fixed_date)
messages.message('dummyMessage')
assert stream.observed_output == "\n##teamcity[dummyMessage timestamp='2000-11-02T10:23:01.556']\n"
def test_one_property():
stream = StreamStub()
messages = TeamcityServiceMessages(output=stream, now=lambda: fixed_date)
messages.message('dummyMessage', fruit='apple')
assert stream.observed_output == "\n##teamcity[dummyMessage timestamp='2000-11-02T10:23:01.556' fruit='apple']\n"
def test_three_properties():
stream = StreamStub()
messages = TeamcityServiceMessages(output=stream, now=lambda: fixed_date)
messages.message('dummyMessage', fruit='apple', meat='steak', pie='raspberry')
assert stream.observed_output == "\n##teamcity[dummyMessage timestamp='2000-11-02T10:23:01.556' fruit='apple' meat='steak' pie='raspberry']\n"
| from teamcity.messages import TeamcityServiceMessages
from datetime import datetime
class StreamStub(object):
def __init__(self):
self.observed_output = ''
def write(self, msg):
self.observed_output += msg
fixed_date = datetime(2000, 11, 2, 10, 23, 1, 556789)
def test_no_properties():
stream = StreamStub()
messages = TeamcityServiceMessages(output=stream, now=lambda: fixed_date)
messages.message('dummyMessage')
assert stream.observed_output == "\n##teamcity[dummyMessage timestamp='2000-11-02T10:23:01.556']\n"
def test_one_property():
stream = StreamStub()
messages = TeamcityServiceMessages(output=stream, now=lambda: fixed_date)
messages.message('dummyMessage', fruit='apple')
assert stream.observed_output == "\n##teamcity[dummyMessage timestamp='2000-11-02T10:23:01.556' fruit='apple']\n"
def test_three_properties():
stream = StreamStub()
messages = TeamcityServiceMessages(output=stream, now=lambda: fixed_date)
messages.message('dummyMessage', fruit='apple', meat='steak', pie='raspberry')
assert stream.observed_output == "\n##teamcity[dummyMessage timestamp='2000-11-02T10:23:01.556' fruit='apple' meat='steak' pie='raspberry']\n"
| apache-2.0 | Python |
b4e6cdbf7eb8cab80352d6058fe370b386e38928 | remove uuid from test | sk2/autonetkit | tests/webserver/test_webserver.py | tests/webserver/test_webserver.py | import autonetkit
import os
import autonetkit.load.graphml as graphml
import shutil
automated = True # whether to open ksdiff, log to file...
if __name__ == "__main__":
automated = False
dirname, filename = os.path.split(os.path.abspath(__file__))
parent_dir = os.path.abspath(os.path.join(dirname, os.pardir))
anm = autonetkit.ANM()
input_file = os.path.join(parent_dir, "small_internet.graphml")
input_graph = graphml.load_graphml(input_file)
import autonetkit.build_network as build_network
anm = build_network.initialise(input_graph)
anm = build_network.apply_design_rules(anm)
try:
from websocket import create_connection
except ImportError:
print "websocket-client package not installed"
else:
autonetkit.update_http(anm)
ws = create_connection("ws://localhost:8000/ws")
ws.send("overlay_list")
result = ws.recv()
expected = '{"overlay_list": ["bgp", "ebgp", "ebgp_v4", "ebgp_v6", "eigrp", graphics", "ibgp_v4", "ibgp_v6", "ibgp_vpn_v4", "input", "input_directed", "ip", "ipv4", "ipv6", "isis", "l3_conn", "ospf", "phy", "vrf"]}'
assert(result == expected)
overlay_id = "phy"
ws.send("overlay_id=" + overlay_id)
result = ws.recv()
with open(os.path.join(dirname, "expected_phy.json"), "r") as fh:
expected = fh.read()
assert(result == expected)
ws.close()
import autonetkit.console_script as console_script
render_hostname = "localhost"
nidb = console_script.create_nidb(anm)
nidb._graph.graph['timestamp'] = "123456"
import autonetkit.compilers.platform.netkit as pl_netkit
nk_compiler = pl_netkit.NetkitCompiler(nidb, anm, render_hostname)
nk_compiler.compile()
autonetkit.update_http(anm, nidb)
ws = create_connection("ws://localhost:8000/ws")
ws.send("overlay_list")
result = ws.recv()
expected = '{"overlay_list": ["bgp", "ebgp", "ebgp_v4", "ebgp_v6", "eigrp", graphics", "ibgp_v4", "ibgp_v6", "ibgp_vpn_v4", "input", "input_directed", "ip", "ipv4", "ipv6", "isis", "l3_conn", "nidb", "ospf", "phy", "vrf"]}'
assert(result == expected)
overlay_id = "nidb"
ws.send("overlay_id=" + overlay_id)
result = ws.recv()
with open(os.path.join(dirname, "expected_nidb.json"), "r") as fh:
expected = fh.read()
assert(result == expected)
ws.close()
#TODO: test highlight, and getting response back (needs callback, refer https://pypi.python.org/pypi/websocket-client/0.7.0)
| import autonetkit
import os
import autonetkit.load.graphml as graphml
import shutil
automated = True # whether to open ksdiff, log to file...
if __name__ == "__main__":
automated = False
dirname, filename = os.path.split(os.path.abspath(__file__))
parent_dir = os.path.abspath(os.path.join(dirname, os.pardir))
anm = autonetkit.ANM()
input_file = os.path.join(parent_dir, "small_internet.graphml")
input_graph = graphml.load_graphml(input_file)
import autonetkit.build_network as build_network
anm = build_network.initialise(input_graph)
anm = build_network.apply_design_rules(anm)
anm['phy'].data.uuid = "my_uuid"
try:
from websocket import create_connection
except ImportError:
print "websocket-client package not installed"
else:
autonetkit.update_http(anm)
ws = create_connection("ws://localhost:8000/ws")
ws.send("overlay_list")
result = ws.recv()
expected = '{"overlay_list": ["bgp", "ebgp", "ebgp_v4", "ebgp_v6", "eigrp", graphics", "ibgp_v4", "ibgp_v6", "ibgp_vpn_v4", "input", "input_directed", "ip", "ipv4", "ipv6", "isis", "l3_conn", "ospf", "phy", "vrf"]}'
assert(result == expected)
overlay_id = "phy"
ws.send("overlay_id=" + overlay_id)
result = ws.recv()
with open(os.path.join(dirname, "expected_phy.json"), "r") as fh:
expected = fh.read()
assert(result == expected)
ws.close()
import autonetkit.console_script as console_script
render_hostname = "localhost"
nidb = console_script.create_nidb(anm)
nidb._graph.graph['timestamp'] = "123456"
import autonetkit.compilers.platform.netkit as pl_netkit
nk_compiler = pl_netkit.NetkitCompiler(nidb, anm, render_hostname)
nk_compiler.compile()
autonetkit.update_http(anm, nidb)
ws = create_connection("ws://localhost:8000/ws")
ws.send("overlay_list")
result = ws.recv()
expected = '{"overlay_list": ["bgp", "ebgp", "ebgp_v4", "ebgp_v6", "eigrp", graphics", "ibgp_v4", "ibgp_v6", "ibgp_vpn_v4", "input", "input_directed", "ip", "ipv4", "ipv6", "isis", "l3_conn", "nidb", "ospf", "phy", "vrf"]}'
assert(result == expected)
overlay_id = "nidb"
ws.send("overlay_id=" + overlay_id)
result = ws.recv()
with open(os.path.join(dirname, "expected_nidb.json"), "r") as fh:
expected = fh.read()
assert(result == expected)
ws.close()
#TODO: test highlight, and getting response back (needs callback, refer https://pypi.python.org/pypi/websocket-client/0.7.0)
| bsd-3-clause | Python |
a8e445f24e662d34cf9b76e6e52212bf0dda1d2a | Connect and disconnect every time we need to send aprs data. | elielsardanons/dstar_sniffer,elielsardanons/dstar_sniffer | dstar_sniffer/aprs_lib/aprsis.py | dstar_sniffer/aprs_lib/aprsis.py | import aprslib
import logging
import nmea
from passcode import passcode_generator
def to_aprs_callsign(dstar_callsign):
module = dstar_callsign[-1:]
return dstar_callsign[:-1].strip() + "-" + module
def aprsis_dstar_callback(dstar_stream):
if 'D74' in dstar_stream['sfx'] and '$GPGGA' in dstar_stream['gps']:
# detect kenwood HTs and send aprs beacons.
# Connect to APRS-IS network if not already connected for the specific rpt module.
frame = get_beacon_gpgga(dstar_stream['my'], dstar_stream['sfx'], dstar_stream['message'], dstar_stream['gps']['$GPGGA'])
elif 'DPRS' in dstar_stream['gps']:
#detect ICOM GPS-A dprs format and send aprs beacon
frame = get_beacon_dprs(dstar_stream['gps']['DPRS'])
else:
logger.info("Nothing to do with: %s /%s" % (dstar_stream['my'], dstar_stream['sfx']))
return
rpt_callsign = to_aprs_callsign(dstar_stream['rpt1'])
logger.info("Sending frame: %s" % frame)
aprs = aprslib.IS(rpt_callsign, passcode_generator(rpt_callsign))
aprs.connect()
aprs.sendall(frame)
aprs.close()
def get_beacon_dprs(dprs_sentence):
aprs_frame = dprs_sentence.split(",", 1)[1]
return aprs_frame
def get_beacon_gpgga(callsign, sfx, message, gpgga):
position = nmea.gpgga_get_position(gpgga)
height = ''
if 'height' in position:
height = '/A=' + position['height']
aprs_frame = callsign.strip()+'>APK'+sfx.strip()+',DSTAR*:!'+position['lat'] + position['lat_coord'] + '\\'+position['long']+position['long_coord']+'a' + height + message.strip()
return aprs_frame
| import aprslib
import logging
import nmea
from passcode import passcode_generator
def to_aprs_callsign(dstar_callsign):
module = dstar_callsign[-1:]
return dstar_callsign[:-1].strip() + "-" + module
def aprsis_dstar_callback(dstar_stream):
rpt_callsign = to_aprs_callsign(dstar_stream['rpt1'])
if 'D74' in dstar_stream['sfx'] and '$GPGGA' in dstar_stream['gps']:
# detect kenwood HTs and send aprs beacons.
# Connect to APRS-IS network if not already connected for the specific rpt module.
aprsIS = AprsIS(rpt_callsign)
aprsIS.send_beacon_gpgga(rpt_callsign, dstar_stream['my'], dstar_stream['sfx'], dstar_stream['message'], dstar_stream['gps']['$GPGGA'])
elif 'DPRS' in dstar_stream['gps']:
#detect ICOM GPS-A dprs format and send aprs beacon
aprsIS = AprsIS(rpt_callsign)
aprsIS.send_beacon_dprs(rpt_callsign, dstar_stream['gps']['DPRS'])
else:
logger.info("Nothing to do with: %s /%s" % (dstar_stream['my'], dstar_stream['sfx']))
class AprsIS:
instance = None
class __AprsIS:
aprs_connection = {}
logger = {}
def __init__(self):
pass
def add_connection(self, callsign):
self.logger[callsign] = logging.getLogger(__name__ + "-" + callsign)
self.aprs_connection[callsign] = aprslib.IS(callsign, passcode_generator(callsign))
self.aprs_connection[callsign].connect()
def __init__(self, callsign):
if AprsIS.instance == None:
AprsIS.instance = AprsIS.__AprsIS()
if callsign not in AprsIS.instance.aprs_connection:
AprsIS.instance.add_connection(callsign)
def __getattr__(self, name):
return getattr(self.instance, name)
def send_beacon_dprs(self, rpt1, dprs_sentence):
aprs_frame = dprs_sentence.split(",", 1)[1]
self.logger[rpt1].info("Sending APRS Frame from DPRS: " + aprs_frame)
try:
self.aprs_connection[rpt1].sendall(aprs_frame)
self.logger[rpt1].info("APRS Beacon sent!")
except Exception, e:
self.logger[rpt1].info("Invalid aprs frame [%s] - %s" % (aprs_frame, str(e)))
def send_beacon_gpgga(self, rpt1, callsign, sfx, message, gpgga):
position = nmea.gpgga_get_position(gpgga)
height = ''
if 'height' in position:
height = '/A=' + position['height']
aprs_frame = callsign.strip()+'>APK'+sfx.strip()+',DSTAR*:!'+position['lat'] + position['lat_coord'] + '\\'+position['long']+position['long_coord']+'a' + height + message.strip()
self.logger[rpt1].info("Sending APRS Frame: " + aprs_frame)
try:
self.aprs_connection[rpt1].sendall(aprs_frame)
self.logger[rpt1].info("APRS Beacon sent!")
except Exception, e:
self.logger[rpt1].info("Invalid aprs frame [%s] - %s" % (aprs_frame, str(e)))
| mit | Python |
946f8ff1c475ebf6f339c4df5eb5f7069c5633e9 | Fix did not return HttpResponse when comment | vuonghv/brs,vuonghv/brs,vuonghv/brs,vuonghv/brs | apps/core/views.py | apps/core/views.py | from django.views.generic.detail import ContextMixin
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from apps.categories.models import Category
from apps.books.models import Book
class BaseView(ContextMixin):
"""docstring for BaseView"""
model = Book
def get_context_data(self, **kwargs):
context = super(BaseView, self).get_context_data(**kwargs)
info = {
'list_book_recommendations': Book.objects.all()[0:5],
'list_top_review': Book.objects.all()[0:5],
'list_category': Category.objects.all()
}
context.update(info)
return context
class LoginRequiredMixin(object):
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super().dispatch(request, *args, **kwargs)
| from django.views.generic.detail import ContextMixin
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from apps.categories.models import Category
from apps.books.models import Book
class BaseView(ContextMixin):
"""docstring for BaseView"""
model = Book
def get_context_data(self, **kwargs):
context = super(BaseView, self).get_context_data(**kwargs)
info = {
'list_book_recommendations': Book.objects.all()[0:5],
'list_top_review': Book.objects.all()[0:5],
'list_category': Category.objects.all()
}
context.update(info)
return context
class LoginRequiredMixin(object):
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
super().dispatch(request, *args, **kwargs)
| mit | Python |
0d4fda58516d8b52f6ecb67e9dc29d38990e778a | Fix regular expression escaping | ministryofjustice/bai2 | bai2/utils.py | bai2/utils.py | import datetime
import re
from .constants import TypeCodes
def parse_date(value):
"""
YYMMDD Format.
"""
return datetime.datetime.strptime(value, '%y%m%d').date()
def write_date(date):
return date.strftime('%y%m%d')
def parse_time(value):
clock_pattern = re.compile(r'\d\d:\d\d:\d\d')
if clock_pattern.match(value):
return parse_clock_time(value)
else:
return parse_military_time(value)
def parse_clock_time(value):
return datetime.datetime.strptime(value, '%H:%M:%S').time()
def parse_military_time(value):
"""
Military Format, 24 hours. 0001 through 2400.
Times are stated in military format (0000 through 2400).
0000 indicates the beginning of the day and 2400 indicates the end of the day
for the date indicated.
Some processors use 9999 to indicate the end of the day.
Be prepared to recognize 9999 as end-of-day when receiving transmissions.
"""
# 9999 indicates end of the day
# 2400 indicates end of the day but 24:00 not allowed so
# it's really 23:59
if value == '9999' or value == '2400':
return datetime.time.max
return datetime.datetime.strptime(value, '%H%M').time()
def write_time(time, clock_format_for_intra_day=False):
if clock_format_for_intra_day and time != datetime.time.max:
return write_clock_time(time)
else:
return write_military_time(time)
def write_clock_time(time):
date = datetime.datetime.now().replace(hour=time.hour,
minute=time.minute,
second=time.second)
return datetime.datetime.strftime(date, '%H:%M:%S')
def write_military_time(time):
if time == datetime.time.max:
return '2400'
else:
date = datetime.datetime.now().replace(hour=time.hour, minute=time.minute)
return datetime.datetime.strftime(date, '%H%M')
def parse_type_code(value):
return TypeCodes[value]
def convert_to_string(value):
if value is None:
return ''
else:
return str(value)
| import datetime
import re
from .constants import TypeCodes
def parse_date(value):
"""
YYMMDD Format.
"""
return datetime.datetime.strptime(value, '%y%m%d').date()
def write_date(date):
return date.strftime('%y%m%d')
def parse_time(value):
clock_pattern = re.compile('\d\d:\d\d:\d\d')
if clock_pattern.match(value):
return parse_clock_time(value)
else:
return parse_military_time(value)
def parse_clock_time(value):
return datetime.datetime.strptime(value, '%H:%M:%S').time()
def parse_military_time(value):
"""
Military Format, 24 hours. 0001 through 2400.
Times are stated in military format (0000 through 2400).
0000 indicates the beginning of the day and 2400 indicates the end of the day
for the date indicated.
Some processors use 9999 to indicate the end of the day.
Be prepared to recognize 9999 as end-of-day when receiving transmissions.
"""
# 9999 indicates end of the day
# 2400 indicates end of the day but 24:00 not allowed so
# it's really 23:59
if value == '9999' or value == '2400':
return datetime.time.max
return datetime.datetime.strptime(value, '%H%M').time()
def write_time(time, clock_format_for_intra_day=False):
if clock_format_for_intra_day and time != datetime.time.max:
return write_clock_time(time)
else:
return write_military_time(time)
def write_clock_time(time):
date = datetime.datetime.now().replace(hour=time.hour,
minute=time.minute,
second=time.second)
return datetime.datetime.strftime(date, '%H:%M:%S')
def write_military_time(time):
if time == datetime.time.max:
return '2400'
else:
date = datetime.datetime.now().replace(hour=time.hour, minute=time.minute)
return datetime.datetime.strftime(date, '%H%M')
def parse_type_code(value):
return TypeCodes[value]
def convert_to_string(value):
if value is None:
return ''
else:
return str(value)
| mit | Python |
214f0e17e0bf6701f5a78c92e1a6583d729da709 | Cut 0.16 | pyinvoke/invocations | invocations/_version.py | invocations/_version.py | __version_info__ = (0, 16, 0)
__version__ = '.'.join(map(str, __version_info__))
| __version_info__ = (0, 15, 0)
__version__ = '.'.join(map(str, __version_info__))
| bsd-2-clause | Python |
646a25054b670d1e71199ad8c32b81b9db186404 | Fix snapshot search | globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service,globocom/database-as-a-service | dbaas/backup/admin/snapshot.py | dbaas/backup/admin/snapshot.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import logging
from django.conf.urls import url
from django.contrib import admin
from django.http import HttpResponseRedirect, Http404
from django.core.urlresolvers import reverse
from backup.tasks import make_databases_backup
from system.models import Configuration
LOG = logging.getLogger(__name__)
class SnapshotAdmin(admin.ModelAdmin):
actions = None
list_filter = ("start_at", "database_name", "environment", "status")
list_display = ("database_name", "instance", "start_at",
"end_at", "purge_at", "type", "status", "environment")
search_fields = ("database_name", "instance__dns", 'volume__identifier')
readonly_fields = (
"database_name", "instance", "start_at", "end_at", "purge_at", "type",
"status", "snapshopt_id", "snapshot_name", "size", "environment",
"error", "is_automatic", "group", 'volume'
)
ordering = ["-start_at"]
def has_delete_permission(self, request, obj=None):
return False
def has_add_permission(self, request, obj=None):
return False
def get_changelist(self, request, **kwargs):
from .views.main import ChangeList
return ChangeList
def backup_databases(self, request):
if not self.is_backup_available:
raise Http404
make_databases_backup.delay()
return HttpResponseRedirect(
reverse('admin:notification_taskhistory_changelist')
)
def get_urls(self):
urls = super(SnapshotAdmin, self).get_urls()
my_urls = [
url(
r'backup_databases/$',
self.admin_site.admin_view(self.backup_databases))
]
return my_urls + urls
def changelist_view(self, request, extra_context=None):
extra_context = extra_context or {}
extra_context['backup_available'] = self.is_backup_available
return super(SnapshotAdmin, self).changelist_view(
request, extra_context=extra_context
)
@property
def is_backup_available(self):
backup_available = Configuration.get_by_name_as_int('backup_available')
return backup_available == 1
| # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import logging
from django.conf.urls import url
from django.contrib import admin
from django.http import HttpResponseRedirect, Http404
from django.core.urlresolvers import reverse
from backup.tasks import make_databases_backup
from system.models import Configuration
LOG = logging.getLogger(__name__)
class SnapshotAdmin(admin.ModelAdmin):
actions = None
list_filter = ("start_at", "database_name", "environment", "status")
list_display = ("database_name", "instance", "start_at",
"end_at", "purge_at", "type", "status", "environment")
search_fields = ("database_name", "instance__dns", 'volume')
readonly_fields = (
"database_name", "instance", "start_at", "end_at", "purge_at", "type",
"status", "snapshopt_id", "snapshot_name", "size", "environment",
"error", "is_automatic", "group", 'volume'
)
ordering = ["-start_at"]
def has_delete_permission(self, request, obj=None):
return False
def has_add_permission(self, request, obj=None):
return False
def get_changelist(self, request, **kwargs):
from .views.main import ChangeList
return ChangeList
def backup_databases(self, request):
if not self.is_backup_available:
raise Http404
make_databases_backup.delay()
return HttpResponseRedirect(
reverse('admin:notification_taskhistory_changelist')
)
def get_urls(self):
urls = super(SnapshotAdmin, self).get_urls()
my_urls = [
url(
r'backup_databases/$',
self.admin_site.admin_view(self.backup_databases))
]
return my_urls + urls
def changelist_view(self, request, extra_context=None):
extra_context = extra_context or {}
extra_context['backup_available'] = self.is_backup_available
return super(SnapshotAdmin, self).changelist_view(
request, extra_context=extra_context
)
@property
def is_backup_available(self):
backup_available = Configuration.get_by_name_as_int('backup_available')
return backup_available == 1
| bsd-3-clause | Python |
0848439552f40ca55bf06e3c01f77d557cb90c9b | make M5_PATH a real search path | andrewfu0325/gem5-aladdin,andrewfu0325/gem5-aladdin,andrewfu0325/gem5-aladdin,andrewfu0325/gem5-aladdin,andrewfu0325/gem5-aladdin,andrewfu0325/gem5-aladdin,andrewfu0325/gem5-aladdin | configs/common/SysPaths.py | configs/common/SysPaths.py | # Copyright (c) 2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ali Saidi
import os, sys
from os.path import isdir, join as joinpath
from os import environ as env
config_path = os.path.dirname(os.path.abspath(__file__))
config_root = os.path.dirname(config_path)
def searchpath(path, file):
for p in path:
f = joinpath(p, file)
if os.path.exists(f):
return f
raise IOError, "Can't find file '%s' on path." % file
def disk(file):
system()
return searchpath(disk.path, file)
def binary(file):
system()
return searchpath(binary.path, file)
def script(file):
system()
return searchpath(script.path, file)
def system():
if not system.path:
try:
path = env['M5_PATH'].split(':')
except KeyError:
path = [ '/dist/m5/system', '/n/poolfs/z/dist/m5/system' ]
# filter out non-existent directories
system.path = filter(os.path.isdir, path)
if not system.path:
raise IOError, "Can't find a path to system files."
if not binary.path:
binary.path = [joinpath(p, 'binaries') for p in system.path]
if not disk.path:
disk.path = [joinpath(p, 'disks') for p in system.path]
if not script.path:
script.path = [joinpath(config_root, 'boot')]
system.path = None
binary.path = None
disk.path = None
script.path = None
| # Copyright (c) 2006 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Ali Saidi
import os, sys
from os.path import isdir, join as joinpath
from os import environ as env
config_path = os.path.dirname(os.path.abspath(__file__))
config_root = os.path.dirname(config_path)
def disk(file):
system()
return joinpath(disk.dir, file)
def binary(file):
system()
return joinpath(binary.dir, file)
def script(file):
system()
return joinpath(script.dir, file)
def system():
if not system.dir:
try:
path = env['M5_PATH'].split(':')
except KeyError:
path = [ '/dist/m5/system', '/n/poolfs/z/dist/m5/system' ]
for system.dir in path:
if os.path.isdir(system.dir):
break
else:
raise ImportError, "Can't find a path to system files."
if not binary.dir:
binary.dir = joinpath(system.dir, 'binaries')
if not disk.dir:
disk.dir = joinpath(system.dir, 'disks')
if not script.dir:
script.dir = joinpath(config_root, 'boot')
system.dir = None
binary.dir = None
disk.dir = None
script.dir = None
| bsd-3-clause | Python |
004cfe5528749209ff0763d2e5859a83ae4e05fe | fix encoding autodetection | alihalabyah/grab,giserh/grab,SpaceAppsXploration/grab,kevinlondon/grab,alihalabyah/grab,istinspring/grab,subeax/grab,pombredanne/grab-1,shaunstanislaus/grab,raybuhr/grab,subeax/grab,lorien/grab,pombredanne/grab-1,huiyi1990/grab,maurobaraldi/grab,SpaceAppsXploration/grab,shaunstanislaus/grab,huiyi1990/grab,maurobaraldi/grab,DDShadoww/grab,raybuhr/grab,subeax/grab,giserh/grab,liorvh/grab,codevlabs/grab,istinspring/grab,liorvh/grab,kevinlondon/grab,lorien/grab,codevlabs/grab,DDShadoww/grab | grab/html.py | grab/html.py | # -*- coding: utf-8 -*-
import re
from htmlentitydefs import name2codepoint
import logging
def decode_entities(text):
"""
Convert HTML entities to their unicode analogs.
"""
re_entity = re.compile(r'(&[a-z]+;)')
re_num_entity = re.compile(r'(&#\d+;)')
def process_entity(match):
entity = match.group(1)
name = entity[1:-1]
if name in name2codepoint:
return unichr(name2codepoint[name])
else:
return entity
def process_num_entity(match):
entity = match.group(1)
num = entity[2:-1]
try:
return unichr(int(num))
except ValueError:
return entity
text = re_num_entity.sub(process_num_entity, text)
text = re_entity.sub(process_entity, text)
return text
def make_unicode(html, guess_encodings):
"""
Convert byte stream to unicode.
"""
RE_CONTENT_TYPE_TAG = re.compile(r'<meta[^>]+http-equiv\s*=\s*.?Content-Type[^>]+', re.I)
RE_CHARSET = re.compile(r'charset\s*=\s*([-_a-z0-9]+)', re.I)
match = RE_CONTENT_TYPE_TAG.search(html)
if match:
match = RE_CHARSET.search(match.group(0))
if match:
guess_encodings = [match.group(1)] + list(guess_encodings)
for encoding in guess_encodings:
try:
return html.decode(encoding)
except UnicodeDecodeError:
pass
else:
return u'OMG!'
def find_refresh_url(html):
"""
Find value of redirect url from http-equiv refresh meta tag.
"""
# We should decode quote values to correctly find
# the url value
html = html.replace(''', '\'')
html = html.replace('"', '"').replace('"', '"')
RE_REFRESH_TAG = re.compile(r'<meta[^>]+http-equiv\s*=\s*["\']*Refresh[^>]+', re.I)
RE_REFRESH_URL = re.compile(r'url=["\']*([^\'"> ]+)')
match = RE_REFRESH_TAG.search(html)
if match:
match = RE_REFRESH_URL.search(match.group(0))
if match:
return match.group(1)
return None
| # -*- coding: utf-8 -*-
import re
from htmlentitydefs import name2codepoint
import logging
def decode_entities(text):
"""
Convert HTML entities to their unicode analogs.
"""
re_entity = re.compile(r'(&[a-z]+;)')
re_num_entity = re.compile(r'(&#\d+;)')
def process_entity(match):
entity = match.group(1)
name = entity[1:-1]
if name in name2codepoint:
return unichr(name2codepoint[name])
else:
return entity
def process_num_entity(match):
entity = match.group(1)
num = entity[2:-1]
try:
return unichr(int(num))
except ValueError:
return entity
text = re_num_entity.sub(process_num_entity, text)
text = re_entity.sub(process_entity, text)
return text
def make_unicode(html, guess_encodings):
"""
Convert byte stream to unicode.
"""
RE_CONTENT_TYPE_TAG = re.compile(r'<meta[^>]+http-equiv\s*=\s*.?Content-Type[^>]+', re.I)
RE_CHARSET = re.compile(r'charset\s*=\s*([-_a-z0-9]+)', re.I)
match = RE_CONTENT_TYPE_TAG.search(html)
if match:
match = RE_CHARSET.search(match.group(0))
if match:
guess_encodings = [match.group(1)] + list(guess_encodings))
for encoding in guess_encodings:
try:
return html.decode(encoding)
except UnicodeDecodeError:
pass
else:
return u'OMG!'
def find_refresh_url(html):
"""
Find value of redirect url from http-equiv refresh meta tag.
"""
# We should decode quote values to correctly find
# the url value
html = html.replace(''', '\'')
html = html.replace('"', '"').replace('"', '"')
RE_REFRESH_TAG = re.compile(r'<meta[^>]+http-equiv\s*=\s*["\']*Refresh[^>]+', re.I)
RE_REFRESH_URL = re.compile(r'url=["\']*([^\'"> ]+)')
match = RE_REFRESH_TAG.search(html)
if match:
match = RE_REFRESH_URL.search(match.group(0))
if match:
return match.group(1)
return None
| mit | Python |
56567351793a433ed7d3ea8853e523289fa64615 | Use simple function instead of Module for filtering muons | tamasgal/km3pipe,tamasgal/km3pipe | examples/plot_dom_hits.py | examples/plot_dom_hits.py | """
==================
DOM hits.
==================
This example shows how to create DOM hits statistics to estimate track
distances.
"""
from collections import defaultdict, Counter
import km3pipe as kp
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from km3modules import StatusBar
from km3pipe.tools import pld3
import km3pipe.style
km3pipe.style.use('default')
filename = "data/km3net_jul13_90m_muatm50T655.km3_v5r1.JTE_r2356.root.0-499.h5"
geo = kp.Geometry(filename="data/km3net_jul13_90m_r1494_corrected.detx")
def filter_muons(blob):
"""Write all muons from McTracks to Muons."""
tracks = blob['McTracks']
muons = [t for t in blob['McTracks'] if t.type == 5]
blob["Muons"] = kp.dataclasses.TrackSeries(muons, tracks.event_id)
return blob
class DOMHits(kp.Module):
"""Create histogram with n_hits and distance of hit to track."""
def configure(self):
self.hit_statistics = defaultdict(list)
def process(self, blob):
hits = blob['Hits']
muons = blob['Muons']
highest_energetic_muon = max(muons, key=lambda x: x.energy)
muon = highest_energetic_muon
triggered_hits = hits.triggered_hits
dom_hits = Counter(triggered_hits.dom_id)
for dom_id, n_hits in dom_hits.items():
distance = pld3(geo.detector.dom_positions[dom_id],
muon.pos,
muon.dir)
self.hit_statistics['n_hits'].append(n_hits)
self.hit_statistics['distance'].append(distance)
return blob
def finish(self):
df = pd.DataFrame(self.hit_statistics)
sdf = df[(df['distance'] < 200) & (df['n_hits'] < 50)]
plt.hist2d(sdf['distance'], sdf['n_hits'], cmap='plasma',
bins=(max(sdf['distance'])-1, max(sdf['n_hits'])-1),
norm=LogNorm())
plt.xlabel('Distance between hit and muon track [m]')
plt.ylabel('Number of hits on DOM')
plt.show()
pipe = kp.Pipeline()
pipe.attach(kp.io.HDF5Pump, filename=filename)
pipe.attach(StatusBar, every=100)
pipe.attach(filter_muons)
pipe.attach(DOMHits)
pipe.drain()
| """
==================
DOM hits.
==================
This example shows how to create DOM hits statistics to estimate track
distances.
"""
from collections import defaultdict, Counter
import km3pipe as kp
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from km3modules import StatusBar
from km3pipe.tools import pld3
import km3pipe.style
km3pipe.style.use('default')
filename = "data/km3net_jul13_90m_muatm50T655.km3_v5r1.JTE_r2356.root.0-499.h5"
geo = kp.Geometry(filename="data/km3net_jul13_90m_r1494_corrected.detx")
class MuonFilter(kp.Module):
"""Write all muons from MCTracks to Muons."""
def process(self, blob):
tracks = blob['McTracks']
muons = [t for t in blob['McTracks'] if t.type == 5]
blob["Muons"] = kp.dataclasses.TrackSeries(muons, tracks.event_id)
return blob
class DOMHits(kp.Module):
"""Create histogram with n_hits and distance of hit to track."""
def configure(self):
self.hit_statistics = defaultdict(list)
def process(self, blob):
hits = blob['Hits']
muons = blob['Muons']
highest_energetic_muon = max(muons, key=lambda x: x.energy)
muon = highest_energetic_muon
triggered_hits = hits.triggered_hits
dom_hits = Counter(triggered_hits.dom_id)
for dom_id, n_hits in dom_hits.items():
distance = pld3(geo.detector.dom_positions[dom_id],
muon.pos,
muon.dir)
self.hit_statistics['n_hits'].append(n_hits)
self.hit_statistics['distance'].append(distance)
return blob
def finish(self):
df = pd.DataFrame(self.hit_statistics)
sdf = df[(df['distance'] < 200) & (df['n_hits'] < 50)]
plt.hist2d(sdf['distance'], sdf['n_hits'], cmap='plasma',
bins=(max(sdf['distance'])-1, max(sdf['n_hits'])-1),
norm=LogNorm())
plt.xlabel('Distance between hit and muon track [m]')
plt.ylabel('Number of hits on DOM')
plt.show()
pipe = kp.Pipeline()
pipe.attach(kp.io.HDF5Pump, filename=filename)
pipe.attach(StatusBar, every=100)
pipe.attach(MuonFilter)
pipe.attach(DOMHits)
pipe.drain()
| mit | Python |
07eb518143f72d1b14d8f6c1aef74688e383a229 | Remove invalid escaped newline | nickgzzjr/powerpool,simplecrypto/powerpool,sigwo/powerpool,nickgzzjr/powerpool,cinnamoncoin/powerpool,cinnamoncoin/powerpool,simplecrypto/powerpool,sigwo/powerpool | contrib/deploy.py | contrib/deploy.py | #!/usr/bin/env python
import argparse
import os
import subprocess
parser = argparse.ArgumentParser(prog='simplecoin RPC')
parser.add_argument('-l', '--log-level',
choices=['DEBUG', 'INFO', 'WARN', 'ERROR'])
subparsers = parser.add_subparsers(title='main subcommands', dest='action')
subparsers.add_parser('create')
link = subparsers.add_parser('link', help='links a list of executable names to the current git revision')
link.add_argument('names', help='names of the hardlinks you\'d like to create', action='append')
args = parser.parse_args()
githash = subprocess.check_output("git rev-parse HEAD", shell=True).strip()
print("{} hash identified as current!".format(githash))
assert len(githash) == 40
basedir = "../{}".format(githash)
def req(call):
ret = system(call)
if ret:
print "\n#### ERROR ####\n"
exit(ret)
def system(call):
print "-- {}".format(call)
return os.system(call)
def try_pip(pip_fragment):
pip_inst = ("{}/bin/pip install --no-index --use-wheel --find-links='../wheelhouse/'"
" --download-cache='../pipcache' {}".format(basedir, pip_fragment))
# If error (non zero ret)
if system(pip_inst):
req("{}/bin/pip wheel --download-cache='../pipcache' "
" --wheel-dir='../wheelhouse' {}".format(basedir, pip_fragment))
req(pip_inst)
if args.action == "create":
req(r'echo "__sha__ = \"{}\"" >> powerpool/__init__.py'.format(githash))
req("virtualenv {}".format(basedir))
req("{}/bin/pip install wheel".format(basedir))
try_pip("-r requirements.txt")
try_pip("vtc_scrypt ltc_scrypt drk_hash")
req("{}/bin/pip install .".format(basedir))
req("git checkout -- powerpool/__init__.py")
print "\n#### SUCCESS ####\n"
elif args.action == "link":
for name in args.names:
req("ln -f {}/bin/pp {}".format(basedir, name))
| #!/usr/bin/env python
import argparse
import os
import subprocess
parser = argparse.ArgumentParser(prog='simplecoin RPC')
parser.add_argument('-l', '--log-level',
choices=['DEBUG', 'INFO', 'WARN', 'ERROR'])
subparsers = parser.add_subparsers(title='main subcommands', dest='action')
subparsers.add_parser('create')
link = subparsers.add_parser('link', help='links a list of executable names to the current git revision')
link.add_argument('names', help='names of the hardlinks you\'d like to create', action='append')
args = parser.parse_args()
githash = subprocess.check_output("git rev-parse HEAD", shell=True).strip()
print("{} hash identified as current!".format(githash))
assert len(githash) == 40
basedir = "../{}".format(githash)
def req(call):
ret = system(call)
if ret:
print "\n#### ERROR ####\n"
exit(ret)
def system(call):
print "-- {}".format(call)
return os.system(call)
def try_pip(pip_fragment):
pip_inst = ("{}/bin/pip install --no-index --use-wheel --find-links='../wheelhouse/'"
" --download-cache='../pipcache' {}".format(basedir, pip_fragment))
# If error (non zero ret)
if system(pip_inst):
req("{}/bin/pip wheel --download-cache='../pipcache' "
" --wheel-dir='../wheelhouse' {}".format(basedir, pip_fragment))
req(pip_inst)
if args.action == "create":
req(r'echo "__sha__ = \"{}\"\n" >> powerpool/__init__.py'.format(githash))
req("virtualenv {}".format(basedir))
req("{}/bin/pip install wheel".format(basedir))
try_pip("-r requirements.txt")
try_pip("vtc_scrypt ltc_scrypt drk_hash")
req("{}/bin/pip install .".format(basedir))
req("git checkout -- powerpool/__init__.py")
print "\n#### SUCCESS ####\n"
elif args.action == "link":
for name in args.names:
req("ln -f {}/bin/pp {}".format(basedir, name))
| bsd-2-clause | Python |
4779a4aa84012d0326a4bfd896d0ad503407f5f9 | update initial migration with non-deprecated IP field (#3) | GeoNode/geonode-dialogos | dialogos/migrations/0001_initial.py | dialogos/migrations/0001_initial.py | # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('email', models.CharField(max_length=255, blank=True)),
('website', models.CharField(max_length=255, blank=True)),
('object_id', models.IntegerField()),
('comment', models.TextField()),
('submit_date', models.DateTimeField(default=datetime.datetime.now)),
('ip_address', models.GenericIPAddressField(null=True)),
('public', models.BooleanField(default=True)),
('author', models.ForeignKey(related_name='comments', to=settings.AUTH_USER_MODEL, null=True)),
('content_type', models.ForeignKey(to='contenttypes.ContentType')),
],
),
]
| # -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
from __future__ import unicode_literals
from django.db import migrations, models
import datetime
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=100)),
('email', models.CharField(max_length=255, blank=True)),
('website', models.CharField(max_length=255, blank=True)),
('object_id', models.IntegerField()),
('comment', models.TextField()),
('submit_date', models.DateTimeField(default=datetime.datetime.now)),
('ip_address', models.IPAddressField(null=True)),
('public', models.BooleanField(default=True)),
('author', models.ForeignKey(related_name='comments', to=settings.AUTH_USER_MODEL, null=True)),
('content_type', models.ForeignKey(to='contenttypes.ContentType')),
],
),
]
| bsd-3-clause | Python |
4a782f94e8fe5a26e2998408c2cb013f2aebe9ac | Remove reference to old, bad migration that was in my local tree. | jayoshih/content-curation,jayoshih/content-curation,jayoshih/content-curation,fle-internal/content-curation,DXCanas/content-curation,fle-internal/content-curation,DXCanas/content-curation,fle-internal/content-curation,fle-internal/content-curation,jayoshih/content-curation,DXCanas/content-curation,DXCanas/content-curation | contentcuration/contentcuration/migrations/0091_auto_20180724_2243.py | contentcuration/contentcuration/migrations/0091_auto_20180724_2243.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2018-07-24 22:43
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('contentcuration', '0089_auto_20180706_2242'),
]
operations = [
migrations.AlterField(
model_name='channel',
name='content_defaults',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
migrations.AlterField(
model_name='user',
name='content_defaults',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2018-07-24 22:43
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('contentcuration', '0090_auto_20180724_1625'),
]
operations = [
migrations.AlterField(
model_name='channel',
name='content_defaults',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
migrations.AlterField(
model_name='user',
name='content_defaults',
field=django.contrib.postgres.fields.jsonb.JSONField(default=dict),
),
]
| mit | Python |
1e06d42ed92d6aac0ab3311d5cc5845d5860dca2 | fix showing user in admin | praekelt/jmbo-your-words,praekelt/jmbo-your-words | jmboyourwords/admin.py | jmboyourwords/admin.py | from django.contrib import admin
from jmboyourwords.models import YourStoryCompetition, YourStoryEntry
from ckeditor.widgets import CKEditorWidget
from django.db import models
class YourStoryCompetitionAdmin(admin.ModelAdmin):
list_filter = ('created', 'publish_on', 'retract_on')
list_display = ('title', 'published', 'publish_on', 'retract_on', 'created')
prepopulated_fields = {'slug': ('title',)}
exclude = [
'published',
]
ordering = ('-published', '-publish_on', '-updated', 'created',)
save_on_top = True
formfield_overrides = {
models.TextField: {'widget': CKEditorWidget},
}
fieldsets = (
(None, {
'fields': ('title', 'description', 'publish_on', 'retract_on'),
}),
('Advanced', {
'fields': ('slug',),
'classes': ('collapse',)
}),
(None, {
'fields': ('image', 'content'),
}),
('Advanced', {
'fields': ('categories', 'tags',),
'classes': ('collapse',)
}),
(None, {
'fields': ('sites',),
})
)
def save_model(self, request, obj, form, change):
if not obj.author:
obj.author = request.user
obj.save()
class YourStoryEntryAdmin(admin.ModelAdmin):
list_filter = ('created', 'your_story_competition')
list_display = ('name', 'user', 'text', 'created',)
raw_id_fields = ('user', )
admin.site.register(YourStoryEntry, YourStoryEntryAdmin)
admin.site.register(YourStoryCompetition, YourStoryCompetitionAdmin)
| from django.contrib import admin
from jmboyourwords.models import YourStoryCompetition, YourStoryEntry
from ckeditor.widgets import CKEditorWidget
from django.db import models
class YourStoryCompetitionAdmin(admin.ModelAdmin):
list_filter = ('created', 'publish_on', 'retract_on')
list_display = ('title', 'published', 'publish_on', 'retract_on', 'created')
prepopulated_fields = {'slug': ('title',)}
exclude = [
'published',
]
ordering = ('-published', '-publish_on', '-updated', 'created',)
save_on_top = True
formfield_overrides = {
models.TextField: {'widget': CKEditorWidget},
}
fieldsets = (
(None, {
'fields': ('title', 'description', 'publish_on', 'retract_on'),
}),
('Advanced', {
'fields': ('slug',),
'classes': ('collapse',)
}),
(None, {
'fields': ('image', 'content'),
}),
('Advanced', {
'fields': ('categories', 'tags',),
'classes': ('collapse',)
}),
(None, {
'fields': ('sites',),
})
)
def save_model(self, request, obj, form, change):
if not obj.author:
obj.author = request.user
obj.save()
class YourStoryEntryAdmin(admin.ModelAdmin):
list_filter = ('created', 'your_story_competition')
list_display = ('name', 'user__username', 'text', 'created',)
raw_id_fields = ('user', )
admin.site.register(YourStoryEntry, YourStoryEntryAdmin)
admin.site.register(YourStoryCompetition, YourStoryCompetitionAdmin)
| bsd-3-clause | Python |
69a8528801ae5c3fdde57b9766917fcf8690c54e | Fix args manipulation in when translating ksdefs | Metaswitch/Telephus,driftx/Telephus,ClearwaterCore/Telephus,driftx/Telephus,Metaswitch/Telephus,ClearwaterCore/Telephus | telephus/translate.py | telephus/translate.py | class APIMismatch(Exception):
pass
def translateArgs(request, api_version):
args = request.args
if request.method == 'system_add_keyspace' \
or request.method == 'system_update_keyspace':
adapted_ksdef = adapt_ksdef_rf(args[0])
args = (adapted_ksdef,) + args[1:]
return args
def postProcess(results, method):
if method == 'describe_keyspace':
results = adapt_ksdef_rf(results)
elif method == 'describe_keyspaces':
results = map(adapt_ksdef_rf, results)
return results
def adapt_ksdef_rf(ksdef):
"""
try to always have both KsDef.strategy_options['replication_factor'] and
KsDef.replication_factor available, and let the thrift api code and client
code work out what they want to use.
"""
if getattr(ksdef, 'strategy_options', None) is None:
ksdef.strategy_options = {}
if 'replication_factor' in ksdef.strategy_options:
if ksdef.replication_factor is None:
ksdef.replication_factor = int(ksdef.strategy_options['replication_factor'])
elif ksdef.replication_factor is not None:
ksdef.strategy_options['replication_factor'] = str(ksdef.replication_factor)
return ksdef
| class APIMismatch(Exception):
pass
def translateArgs(request, api_version):
args = request.args
if request.method == 'system_add_keyspace' \
or request.method == 'system_update_keyspace':
args = adapt_ksdef_rf(args[0]) + args[1:]
return args
def postProcess(results, method):
if method == 'describe_keyspace':
results = adapt_ksdef_rf(results)
elif method == 'describe_keyspaces':
results = map(adapt_ksdef_rf, results)
return results
def adapt_ksdef_rf(ksdef):
"""
try to always have both KsDef.strategy_options['replication_factor'] and
KsDef.replication_factor available, and let the thrift api code and client
code work out what they want to use.
"""
if getattr(ksdef, 'strategy_options', None) is None:
ksdef.strategy_options = {}
if 'replication_factor' in ksdef.strategy_options:
if ksdef.replication_factor is None:
ksdef.replication_factor = int(ksdef.strategy_options['replication_factor'])
elif ksdef.replication_factor is not None:
ksdef.strategy_options['replication_factor'] = str(ksdef.replication_factor)
return ksdef
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.