commit
stringlengths
40
40
old_file
stringlengths
4
236
new_file
stringlengths
4
236
old_contents
stringlengths
1
3.26k
new_contents
stringlengths
16
4.43k
subject
stringlengths
16
624
message
stringlengths
17
3.29k
lang
stringclasses
5 values
license
stringclasses
13 values
repos
stringlengths
5
91.5k
82756e5314c2768bb3acf03cf542929d23b73f82
bot/logger/message_sender/synchronized.py
bot/logger/message_sender/synchronized.py
import threading from bot.logger.message_sender import MessageSender, IntermediateMessageSender class SynchronizedMessageSender(IntermediateMessageSender): """ Thread-safe message sender. Wrap your `MessageSender` with this class and its :func:`send` function will be called in a synchronized way, only by one thread at the same time. """ def __init__(self, sender: MessageSender): super().__init__(sender) self.lock = threading.Lock() def send(self, text): with self.lock: self.sender.send(text)
import threading from bot.logger.message_sender import MessageSender, IntermediateMessageSender class SynchronizedMessageSender(IntermediateMessageSender): """ Thread-safe message sender. Wrap your `MessageSender` with this class and its :func:`send` function will be called in a synchronized way, only by one thread at the same time. """ def __init__(self, sender: MessageSender): super().__init__(sender) # Using a reentrant lock to play safe in case the send function somewhat invokes this send function again # maybe because a send triggers another send on the same message sender. # Note that if this send throws an exception the lock is released when dealing with it from outside, # so this is not a problem. # But if the exception is handled inside this send call, the lock is still hold. self.lock = threading.RLock() def send(self, text): with self.lock: self.sender.send(text)
Use reentrant lock on SynchronizedMessageSender
Use reentrant lock on SynchronizedMessageSender
Python
agpl-3.0
alvarogzp/telegram-bot,alvarogzp/telegram-bot
721703801654af88e8b5064d1bc65569ce1555cf
thumbnails/engines/__init__.py
thumbnails/engines/__init__.py
# -*- coding: utf-8 -*- def get_current_engine(): return None
# -*- coding: utf-8 -*- from thumbnails.engines.pillow import PillowEngine def get_current_engine(): return PillowEngine()
Set pillow engine as default
Set pillow engine as default
Python
mit
python-thumbnails/python-thumbnails,relekang/python-thumbnails
4c1b96865f3e5e6660fc41f9170939a02f9b7735
fabfile.py
fabfile.py
from fabric.api import * from fabric.contrib.console import confirm cfg = dict( appengine_dir='appengine-web/src', goldquest_dir='src', appengine_token='', ) def update(): # update to latest code from repo local('git pull') def test(): local("nosetests -m 'Test|test_' -w %(goldquest_dir)s" % cfg) # jslint # pychecker # run jasmine tests def compile(): # Minimize javascript using google closure. local("java -jar ~/bin/compiler.jar --js %(appengine_dir)s/javascript/game.js --js_output_file %(appengine_dir)s/javascript/game.min.js" % cfg) def deploy_appengine(): local("appcfg.py --oauth2_refresh_token=%(appengine_token)s update %(appengine_dir)s" % cfg) def prepare_deploy(): test() compile() def deploy(): update() prepare_deploy() deploy_appengine() # tweet about release
from fabric.api import * from fabric.contrib.console import confirm import simplejson cfg = dict( appengine_dir='appengine-web/src', goldquest_dir='src', oauth_cfg_path='/Users/olle/.appcfg_oauth2_tokens', appengine_refresh_token='', ) def read_appcfg_oauth(): fp = open(cfg['oauth_cfg_path']) oauth_cfg = simplejson.load(fp) cfg['appengine_refresh_token'] = oauth_cfg['refresh_token'] def update(): # update to latest code from repo local('git pull') def test(): local("nosetests -m 'Test|test_' -w %(goldquest_dir)s" % cfg) # jslint # pychecker # run jasmine tests def compile(): # Minimize javascript using google closure. local("java -jar ~/bin/compiler.jar --js %(appengine_dir)s/javascript/game.js --js_output_file %(appengine_dir)s/javascript/game.min.js" % cfg) def deploy_appengine(): read_appcfg_oauth() local("appcfg.py --oauth2_refresh_token=%(appengine_refresh_token)s update %(appengine_dir)s" % cfg) def prepare_deploy(): test() compile() def deploy(): update() prepare_deploy() deploy_appengine() # tweet about release
Read appengine refresh_token from oauth file automatically.
NEW: Read appengine refresh_token from oauth file automatically.
Python
mit
ollej/GoldQuest,ollej/GoldQuest,ollej/GoldQuest,ollej/GoldQuest
670227590ceaf6eb52d56809f8bcc1b1f6ae6f7f
prettyplotlib/_eventplot.py
prettyplotlib/_eventplot.py
__author__ = 'jgosmann' from matplotlib.cbook import iterable from prettyplotlib.utils import remove_chartjunk, maybe_get_ax from prettyplotlib.colors import set2 def eventplot(*args, **kwargs): ax, args, kwargs = maybe_get_ax(*args, **kwargs) show_ticks = kwargs.pop('show_ticks', False) if len(args) > 0: positions = args[0] else: positions = kwargs['positions'] if any(iterable(p) for p in positions): size = len(positions) else: size = 1 kwargs.setdefault('colors', [c + (1.0,) for c in set2[:size]]) event_collections = ax.eventplot(*args, **kwargs) remove_chartjunk(ax, ['top', 'right'], show_ticks=show_ticks) return event_collections
__author__ = 'jgosmann' from matplotlib.cbook import iterable from prettyplotlib.utils import remove_chartjunk, maybe_get_ax from prettyplotlib.colors import set2 def eventplot(*args, **kwargs): ax, args, kwargs = maybe_get_ax(*args, **kwargs) show_ticks = kwargs.pop('show_ticks', False) alpha = kwargs.pop('alpha', 1.0) if len(args) > 0: positions = args[0] else: positions = kwargs['positions'] if any(iterable(p) for p in positions): size = len(positions) else: size = 1 kwargs.setdefault('colors', [c + (alpha,) for c in set2[:size]]) event_collections = ax.eventplot(*args, **kwargs) remove_chartjunk(ax, ['top', 'right'], show_ticks=show_ticks) return event_collections
Add alpha argument to eventplot().
Add alpha argument to eventplot().
Python
mit
olgabot/prettyplotlib,olgabot/prettyplotlib
c814fe264c93dfa09276474960aa83cdb26e7754
polyaxon/api/searches/serializers.py
polyaxon/api/searches/serializers.py
from rest_framework import serializers from db.models.searches import Search class SearchSerializer(serializers.ModelSerializer): class Meta: model = Search fields = ['id', 'name', 'query', 'meta']
from rest_framework import serializers from rest_framework.exceptions import ValidationError from api.utils.serializers.names import NamesMixin from db.models.searches import Search class SearchSerializer(serializers.ModelSerializer, NamesMixin): class Meta: model = Search fields = ['id', 'name', 'query', 'meta'] def create(self, validated_data): validated_data = self.validated_name(validated_data, project=validated_data['project'], query=Search.all) try: return super().create(validated_data) except Exception as e: raise ValidationError(e)
Add graceful handling for creating search with similar names
Add graceful handling for creating search with similar names
Python
apache-2.0
polyaxon/polyaxon,polyaxon/polyaxon,polyaxon/polyaxon
e459a42af1c260986c7333047efd40294dbd23d3
akaudit/clidriver.py
akaudit/clidriver.py
#!/usr/bin/env python # Copyright 2015 Chris Fordham # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import argparse import akaudit from akaudit.audit import Auditer def main(argv = sys.argv, log = sys.stderr): parser = argparse.ArgumentParser(description='Audit who has access to your homes.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-l', '--log', default='info', help='log level') parser.add_argument('-i', '--interactive', help='interactive mode (prompts asking if to delete each key)', action="store_true") parser.add_argument('-v', '--version', action="version", version='%(prog)s ' + akaudit.__version__) args = parser.parse_args() auditer = Auditer() auditer.run_audit(args) if __name__ == "__main__": main(sys.argv[1:])
#!/usr/bin/env python # Copyright 2015 Chris Fordham # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import argparse import akaudit from akaudit.audit import Auditer def main(argv = sys.argv, log = sys.stderr): parser = argparse.ArgumentParser(description=akaudit.__description__, formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('-l', '--log', default='info', help='log level') parser.add_argument('-i', '--interactive', help='interactive mode (prompts asking if to delete each key)', action="store_true") parser.add_argument('-v', '--version', action="version", version='%(prog)s ' + akaudit.__version__) args = parser.parse_args() auditer = Auditer() auditer.run_audit(args) if __name__ == "__main__": main(sys.argv[1:])
Use __description__ with parser instantiation.
Use __description__ with parser instantiation.
Python
apache-2.0
flaccid/akaudit
d90f249e0865dab0cc9a224f413ea90df8a648ed
srsly/util.py
srsly/util.py
from pathlib import Path from typing import Union, Dict, Any, List, Tuple from collections import OrderedDict # fmt: off FilePath = Union[str, Path] # Superficial JSON input/output types # https://github.com/python/typing/issues/182#issuecomment-186684288 JSONOutput = Union[str, int, float, bool, None, Dict[str, Any], List[Any]] JSONOutputBin = Union[bytes, str, int, float, bool, None, Dict[str, Any], List[Any]] # For input, we also accept tuples, ordered dicts etc. JSONInput = Union[str, int, float, bool, None, Dict[str, Any], List[Any], Tuple[Any], OrderedDict] JSONInputBin = Union[bytes, str, int, float, bool, None, Dict[str, Any], List[Any], Tuple[Any], OrderedDict] YAMLInput = JSONInput YAMLOutput = JSONOutput # fmt: on def force_path(location, require_exists=True): if not isinstance(location, Path): location = Path(location) if require_exists and not location.exists(): raise ValueError(f"Can't read file: {location}") return location def force_string(location): if isinstance(location, str): return location return str(location)
from pathlib import Path from typing import Union, Dict, Any, List, Tuple from collections import OrderedDict # fmt: off FilePath = Union[str, Path] # Superficial JSON input/output types # https://github.com/python/typing/issues/182#issuecomment-186684288 JSONOutput = Union[str, int, float, bool, None, Dict[str, Any], List[Any]] JSONOutputBin = Union[bytes, str, int, float, bool, None, Dict[str, Any], List[Any]] # For input, we also accept tuples, ordered dicts etc. JSONInput = Union[str, int, float, bool, None, Dict[str, Any], List[Any], Tuple[Any, ...], OrderedDict] JSONInputBin = Union[bytes, str, int, float, bool, None, Dict[str, Any], List[Any], Tuple[Any, ...], OrderedDict] YAMLInput = JSONInput YAMLOutput = JSONOutput # fmt: on def force_path(location, require_exists=True): if not isinstance(location, Path): location = Path(location) if require_exists and not location.exists(): raise ValueError(f"Can't read file: {location}") return location def force_string(location): if isinstance(location, str): return location return str(location)
Fix typing for JSONInput and JSONInputBin.
Fix typing for JSONInput and JSONInputBin.
Python
mit
explosion/srsly,explosion/srsly,explosion/srsly,explosion/srsly
20e096ac5261cb7fd4197f6cdeb8b171753c82a7
landlab/values/tests/conftest.py
landlab/values/tests/conftest.py
import pytest from landlab import NetworkModelGrid, RasterModelGrid @pytest.fixture def four_by_four_raster(): mg = RasterModelGrid((4, 4)) return mg @pytest.fixture def simple_network(): y_of_node = (0, 1, 2, 2) x_of_node = (0, 0, -1, 1) nodes_at_link = ((1, 0), (2, 1), (3, 1)) mg = NetworkModelGrid((y_of_node, x_of_node), nodes_at_link) return mg
import pytest from landlab import NetworkModelGrid, RasterModelGrid from landlab.values.synthetic import _STATUS @pytest.fixture def four_by_four_raster(): mg = RasterModelGrid((4, 4)) return mg @pytest.fixture def simple_network(): y_of_node = (0, 1, 2, 2) x_of_node = (0, 0, -1, 1) nodes_at_link = ((1, 0), (2, 1), (3, 1)) mg = NetworkModelGrid((y_of_node, x_of_node), nodes_at_link) return mg def pytest_generate_tests(metafunc): if "at" in metafunc.fixturenames: metafunc.parametrize("at", ("node", "link", "patch", "corner", "face", "cell")) if "node_bc" in metafunc.fixturenames: metafunc.parametrize("node_bc", list(_STATUS["node"].keys())) if "link_bc" in metafunc.fixturenames: metafunc.parametrize("link_bc", list(_STATUS["link"].keys()))
Add parametrized fixture for at, node_bc, link_bc.
Add parametrized fixture for at, node_bc, link_bc.
Python
mit
landlab/landlab,cmshobe/landlab,landlab/landlab,cmshobe/landlab,amandersillinois/landlab,landlab/landlab,amandersillinois/landlab,cmshobe/landlab
bcde8104bd77f18d7061f7f4d4831ad49644a913
common/management/commands/build_index.py
common/management/commands/build_index.py
from django.core.management import BaseCommand from django.db.models import get_app, get_models from django.conf import settings from common.utilities.search_utils import index_instance class Command(BaseCommand): def add_arguments(self, parser): parser.add_argument( '--test', action='store_true', dest='test', default=False, help='Provide this if you want to create a test index') def handle(self, *args, **options): # optimize this to index in bulk apps_lists = settings.LOCAL_APPS for app_name in apps_lists: app = get_app(app_name) for model in get_models(app): all_instances = model.objects.all()[0:3] \ if options.get('test') else model.objects.all() [index_instance(obj) for obj in all_instances] message = "Indexed {} {}".format( all_instances.count(), model._meta.verbose_name_plural.capitalize()) self.stdout.write(message) self.stdout.write("Finished indexing")
from django.core.management import BaseCommand from django.db.models import get_app, get_models from django.conf import settings from common.utilities.search_utils import index_instance class Command(BaseCommand): def add_arguments(self, parser): parser.add_argument( '--test', action='store_true', dest='test', default=False, help='Provide this if you want to create a test index') def handle(self, *args, **options): # optimize this to index in bulk apps_lists = settings.LOCAL_APPS for app_name in apps_lists: app = get_app(app_name) for model in get_models(app): if model.__name__.lower() != 'testmodel': all_instances = model.objects.all()[0:3] \ if options.get('test') else model.objects.all() [index_instance(obj) for obj in all_instances] message = "Indexed {} {}".format( all_instances.count(), model._meta.verbose_name_plural.capitalize()) self.stdout.write(message) else: # relation "common_testmodel" does not exist # Will be fixed pass self.stdout.write("Finished indexing")
Check the model beig indexed
Check the model beig indexed
Python
mit
urandu/mfl_api,MasterFacilityList/mfl_api,MasterFacilityList/mfl_api,urandu/mfl_api,MasterFacilityList/mfl_api,MasterFacilityList/mfl_api,MasterFacilityList/mfl_api,urandu/mfl_api,urandu/mfl_api
ccb1759a205a4cdc8f5eb2c28adcf49503221135
ecpy/tasks/api.py
ecpy/tasks/api.py
# -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # Copyright 2015 by Ecpy Authors, see AUTHORS for more details. # # Distributed under the terms of the BSD license. # # The full license is in the file LICENCE, distributed with this software. # ----------------------------------------------------------------------------- """Tasks package public interface. """ from __future__ import (division, unicode_literals, print_function, absolute_import) import enaml from .base_tasks import BaseTask, SimpleTask, ComplexTask, RootTask from .task_interface import (InterfaceableTaskMixin, TaskInterface, InterfaceableInterfaceMixin, IInterface) from .manager.declarations import (Tasks, Task, Interfaces, Interface, TaskConfig) from .manager.filters import (TaskFilter, SubclassTaskFilter, GroupTaskFilter, MetadataTaskFilter) from .manager.configs.base_configs import BaseTaskConfig with enaml.imports(): from .manager.configs.base_config_views import BaseConfigView from .base_views import BaseTaskView __all__ = ['BaseTask', 'SimpleTask', 'ComplexTask', 'RootTask', 'BaseTaskView', 'InterfaceableTaskMixin', 'TaskInterface', 'InterfaceableInterfaceMixin', 'IInterface', 'Tasks', 'Task', 'Interfaces', 'Interface', 'TaskConfig', 'TaskFilter', 'SubclassTaskFilter', 'GroupTaskFilter', 'MetadataTaskFilter', 'BaseTaskConfig', 'BaseConfigView']
# -*- coding: utf-8 -*- # ----------------------------------------------------------------------------- # Copyright 2015 by Ecpy Authors, see AUTHORS for more details. # # Distributed under the terms of the BSD license. # # The full license is in the file LICENCE, distributed with this software. # ----------------------------------------------------------------------------- """Tasks package public interface. """ from __future__ import (division, unicode_literals, print_function, absolute_import) import enaml from .base_tasks import BaseTask, SimpleTask, ComplexTask, RootTask from .task_interface import (InterfaceableTaskMixin, TaskInterface, InterfaceableInterfaceMixin, IInterface) from .manager.declarations import (Tasks, Task, Interfaces, Interface, TaskConfig) from .manager.filters import (TaskFilter, SubclassTaskFilter, GroupTaskFilter, MetadataTaskFilter) from .manager.configs.base_configs import BaseTaskConfig from .manager.utils.building import build_task_from_config with enaml.imports(): from .manager.configs.base_config_views import BaseConfigView from .base_views import BaseTaskView __all__ = ['BaseTask', 'SimpleTask', 'ComplexTask', 'RootTask', 'BaseTaskView', 'InterfaceableTaskMixin', 'TaskInterface', 'InterfaceableInterfaceMixin', 'IInterface', 'Tasks', 'Task', 'Interfaces', 'Interface', 'TaskConfig', 'TaskFilter', 'SubclassTaskFilter', 'GroupTaskFilter', 'MetadataTaskFilter', 'BaseTaskConfig', 'BaseConfigView', 'build_task_from_config']
Add tasks/build_from_config to the public API.
Add tasks/build_from_config to the public API.
Python
bsd-3-clause
Ecpy/ecpy,Ecpy/ecpy
7e15896c14cbbab36862c8000b0c25c6a48fedcd
cref/structure/__init__.py
cref/structure/__init__.py
# import porter_paleale def write_pdb(aa_sequence, fragment_angles, gap_length, filepath): """ Generate pdb file with results :param aa_sequence: Amino acid sequence :param fragment_angles: Backbone torsion angles :param gap_length: Length of the gap at the sequence start and end :param filepath: Path to the file to save the pdb """ pass
from peptide import PeptideBuilder import Bio.PDB def write_pdb(aa_sequence, fragment_angles, gap_length, filepath): """ Generate pdb file with results :param aa_sequence: Amino acid sequence :param fragment_angles: Backbone torsion angles :param gap_length: Length of the gap at the sequence start and end :param filepath: Path to the file to save the pdb """ phi, psi = zip(*fragment_angles) structure = PeptideBuilder.make_structure(aa_sequence, phi, psi) out = Bio.PDB.PDBIO() out.set_structure(structure) out.save(filepath)
Write pdb result to disk
Write pdb result to disk
Python
mit
mchelem/cref2,mchelem/cref2,mchelem/cref2
810a43c859264e3d5e1af8b43888bf89c06bee1d
ipybind/stream.py
ipybind/stream.py
# -*- coding: utf-8 -*- import contextlib import sys try: import fcntl except ImportError: fcntl = None from ipybind.common import is_kernel from ipybind.ext.wurlitzer import Wurlitzer _fwd = None class Forwarder(Wurlitzer): def __init__(self, handler=None): self._data_handler = handler if handler is not None else lambda x: x super().__init__(stdout=sys.stdout, stderr=sys.stderr) def _handle_data(self, data, stream): data = self._data_handler(self._decode(data)) if data and stream: stream.write(data) def _handle_stdout(self, data): self._handle_data(data, self._stdout) def _handle_stderr(self, data): self._handle_data(data, self._stderr) @contextlib.contextmanager def suppress(): if fcntl: with Forwarder(handler=lambda _: None): yield else: yield @contextlib.contextmanager def forward(handler=None): global _fwd if _fwd is None and is_kernel() and fcntl: with Forwarder(handler=handler): yield else: yield def start_forwarding(handler=None): global _fwd if fcntl: if _fwd is None: _fwd = Forwarder(handler=handler) _fwd.__enter__() def stop_forwarding(handler=None): global _fwd if fcntl: if _fwd is not None: _fwd.__exit__(None, None, None) _fwd = None
# -*- coding: utf-8 -*- import contextlib import sys try: import fcntl except ImportError: fcntl = None from ipybind.common import is_kernel from ipybind.ext.wurlitzer import Wurlitzer _fwd = None class Forwarder(Wurlitzer): def __init__(self, handler=None): self._data_handler = handler if handler is not None else lambda x: x super().__init__(stdout=sys.stdout, stderr=sys.stderr) def _handle_data(self, data, stream): data = self._data_handler(self._decode(data)) if data and stream: stream.write(data) def _handle_stdout(self, data): self._handle_data(data, self._stdout) def _handle_stderr(self, data): self._handle_data(data, self._stderr) @contextlib.contextmanager def forward(handler=None): global _fwd if _fwd is None and is_kernel() and fcntl: with Forwarder(handler=handler): yield else: yield def start_forwarding(handler=None): global _fwd if fcntl: if _fwd is None: _fwd = Forwarder(handler=handler) _fwd.__enter__() def stop_forwarding(handler=None): global _fwd if fcntl: if _fwd is not None: _fwd.__exit__(None, None, None) _fwd = None
Remove suppress() as it's no longer required
Remove suppress() as it's no longer required
Python
mit
aldanor/ipybind,aldanor/ipybind,aldanor/ipybind
db19dfa17261c3d04de0202b2809ba8abb70326b
tests/unit/test_moxstubout.py
tests/unit/test_moxstubout.py
# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslotest import base from oslotest import moxstubout class TestMoxStubout(base.BaseTestCase): def _stubable(self): pass def test_basic_stubout(self): f = self.useFixture(moxstubout.MoxStubout()) before = TestMoxStubout._stubable f.mox.StubOutWithMock(TestMoxStubout, '_stubable') after = TestMoxStubout._stubable self.assertNotEqual(before, after) f.cleanUp() after2 = TestMoxStubout._stubable self.assertEqual(before, after2)
# Copyright 2014 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslotest import base from oslotest import moxstubout class TestMoxStubout(base.BaseTestCase): def _stubable(self): pass def test_basic_stubout(self): f = self.useFixture(moxstubout.MoxStubout()) before = TestMoxStubout._stubable f.mox.StubOutWithMock(TestMoxStubout, '_stubable') after = TestMoxStubout._stubable self.assertNotEqual(before, after) f.cleanUp() after2 = TestMoxStubout._stubable self.assertEqual(before, after2) f._clear_cleanups()
Fix build break with Fixtures 1.3
Fix build break with Fixtures 1.3 Our explicit call to cleanUp messes things up in latest fixture, so we need to call _clear_cleanups to stop the test from breaking Change-Id: I8ce2309a94736b47fb347f37ab4027857e19c8a8
Python
apache-2.0
openstack/oslotest,openstack/oslotest
5ac84c4e9d8d68b7e89ebf344d2c93a5f7ef4c4c
notebooks/galapagos_to_pandas.py
notebooks/galapagos_to_pandas.py
# coding: utf-8 def galapagos_to_pandas(in_filename='/home/ppzsb1/quickdata/GAMA_9_all_combined_gama_only_bd6.fits', out_filename=None): """Convert a GALAPAGOS multi-band catalogue to a pandas-compatible HDF5 file""" from astropy.io import fits import pandas as pd import re import tempfile if out_filename is None: out_filename = re.sub('.fits$', '', in_filename)+'.h5' data = fits.getdata(in_filename, 1) with tempfile.NamedTemporaryFile() as tmp: with pd.get_store(tmp.name, mode='w') as tmpstore: for n in data.names: d = data[n] if len(d.shape) == 1: new_cols = pd.DataFrame(d, columns=[n]) else: new_cols = pd.DataFrame(d, columns=['{}_{}'.format(n,b) for b in 'RUGIZYJHK']) tmpstore[n] = new_cols with pd.get_store(out_filename, mode='w', complib='blosc', complevel=5) as store: # Use format='table' on next line to save as a pytables table store.put('data', pd.concat([tmpstore[n] for n in data.names], axis=1)) return pd.HDFStore(out_filename)
# coding: utf-8 def galapagos_to_pandas(in_filename='/home/ppzsb1/quickdata/GAMA_9_all_combined_gama_only_bd6.fits', out_filename=None, bands='RUGIZYJHK'): """Convert a GALAPAGOS multi-band catalogue to a pandas-compatible HDF5 file""" from astropy.io import fits import pandas as pd import re import tempfile if out_filename is None: out_filename = re.sub('.fits$', '', in_filename)+'.h5' data = fits.getdata(in_filename, 1) with tempfile.NamedTemporaryFile() as tmp: with pd.get_store(tmp.name, mode='w') as tmpstore: for n in data.names: d = data[n] if len(d.shape) == 1: new_cols = pd.DataFrame(d, columns=[n]) else: new_cols = pd.DataFrame(d, columns=['{}_{}'.format(n,b) for b in bands]) tmpstore[n] = new_cols with pd.get_store(out_filename, mode='w', complib='blosc', complevel=5) as store: # Use format='table' on next line to save as a pytables table store.put('data', pd.concat([tmpstore[n] for n in data.names], axis=1)) return pd.HDFStore(out_filename)
Allow specification of GALAPAGOS bands
Allow specification of GALAPAGOS bands
Python
mit
MegaMorph/megamorph-analysis
3136f7e37b339252d4c1f5642974e180070c452d
kirppu/signals.py
kirppu/signals.py
# -*- coding: utf-8 -*- from django.db.models.signals import pre_save, pre_delete from django.dispatch import receiver @receiver(pre_save) def save_handler(sender, instance, using, **kwargs): # noinspection PyProtectedMember if instance._meta.app_label in ("kirppu", "kirppuauth") and using != "default": raise ValueError("Saving objects in non-default database should not happen") @receiver(pre_delete) def delete_handler(sender, instance, using, **kwargs): # noinspection PyProtectedMember if instance._meta.app_label in ("kirppu", "kirppuauth") and using != "default": raise ValueError("Deleting objects from non-default database should not happen")
# -*- coding: utf-8 -*- from django.db.models.signals import pre_migrate, post_migrate from django.dispatch import receiver ENABLE_CHECK = True @receiver(pre_migrate) def pre_migrate_handler(*args, **kwargs): global ENABLE_CHECK ENABLE_CHECK = False @receiver(post_migrate) def post_migrate_handler(*args, **kwargs): global ENABLE_CHECK ENABLE_CHECK = True def save_handler(sender, instance, using, **kwargs): # noinspection PyProtectedMember if ENABLE_CHECK and instance._meta.app_label in ("kirppu", "kirppuauth") and using != "default": raise ValueError("Saving objects in non-default database should not happen") def delete_handler(sender, instance, using, **kwargs): # noinspection PyProtectedMember if ENABLE_CHECK and instance._meta.app_label in ("kirppu", "kirppuauth") and using != "default": raise ValueError("Deleting objects from non-default database should not happen")
Allow migrations to be run on extra databases.
Allow migrations to be run on extra databases. - Remove duplicate registration of save and delete signals. Already registered in apps.
Python
mit
jlaunonen/kirppu,jlaunonen/kirppu,jlaunonen/kirppu,jlaunonen/kirppu
a5baa5f333625244c1e0935745dadedb7df444c3
setup.py
setup.py
#!/usr/bin/env python import os from distutils.core import setup def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() setup( name='whack', version='0.3.0', description='Utility for installing binaries from source with a single command', long_description=read("README"), author='Michael Williamson', url='http://github.com/mwilliamson/whack', scripts=["scripts/whack"], packages=['whack'], install_requires=['blah>=0.1.10,<0.2', 'requests', "catchy==0.1.0"], )
#!/usr/bin/env python import os from distutils.core import setup def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() setup( name='whack', version='0.3.0', description='Utility for installing binaries from source with a single command', long_description=read("README"), author='Michael Williamson', url='http://github.com/mwilliamson/whack', scripts=["scripts/whack"], packages=['whack'], install_requires=['blah>=0.1.10,<0.2', 'requests>=1,<2', "catchy>=0.1.0,<0.2"], )
Update install_requires to be more accurate
Update install_requires to be more accurate
Python
bsd-2-clause
mwilliamson/whack
fc6042cf57752ca139c52889ec5e00c02b618d0d
setup.py
setup.py
from distutils.core import setup, Command class PyTest(Command): user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): import sys import subprocess errno = subprocess.call([sys.executable, 'runtests.py']) raise SystemExit(errno) with open('README.rst') as file: long_description = file.read() setup( name='webpay', packages=['webpay'], version='0.1.0', author='webpay', author_email='administrators@webpay.jp', url='https://github.com/webpay/webpay-python', description='WebPay Python bindings', cmdclass={'test': PyTest}, long_description=long_description, classifiers=[ 'Development Status :: 4 - Beta', 'Operating System :: OS Independent', 'License :: OSI Approved :: MIT License', 'Intended Audience :: Developers', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: Implementation :: CPython', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Software Development :: Libraries :: Python Modules' ], requires=[ 'requests (== 2.0.1)' ] )
from distutils.core import setup, Command class PyTest(Command): user_options = [] def initialize_options(self): pass def finalize_options(self): pass def run(self): import sys import subprocess errno = subprocess.call([sys.executable, 'runtests.py']) raise SystemExit(errno) with open('README.rst') as file: long_description = file.read() setup( name='webpay', packages=['webpay', 'webpay.api', 'webpay.model'], version='0.1.0', author='webpay', author_email='administrators@webpay.jp', url='https://github.com/webpay/webpay-python', description='WebPay Python bindings', cmdclass={'test': PyTest}, long_description=long_description, classifiers=[ 'Development Status :: 4 - Beta', 'Operating System :: OS Independent', 'License :: OSI Approved :: MIT License', 'Intended Audience :: Developers', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: Implementation :: CPython', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Software Development :: Libraries :: Python Modules' ], requires=[ 'requests (== 2.0.1)' ] )
Add api and model to packages
Add api and model to packages
Python
mit
yamaneko1212/webpay-python
a4cacaba81dda523fb6e24f8a4382a334cc549a8
textinator.py
textinator.py
from PIL import Image from os import get_terminal_size default_palette = list('░▒▓█') print(get_terminal_size()) def scale(val, src, dst): """ Scale the given value from the scale of src to the scale of dst. """ return ((val - src[0]) / (src[1]-src[0])) * (dst[1]-dst[0]) + dst[0] def value_to_char(value, palette=default_palette, value_range=(0, 256)): palette_range = (0, len(palette)) mapped = int(scale(value, value_range, palette_range)) return palette[mapped] def convert_image(image_path): original = Image.open(image_path) width, height = original.size thumb = original.copy() thumb.thumbnail(get_terminal_size()) bw = thumb.convert(mode="L") width, height = bw.size for y in range(height): line = '' for x in range(width): line += value_to_char(bw.getpixel((x, y))) print(line) bw.show() if __name__ == '__main__': convert_image('doge.jpg')
import click from PIL import Image def scale(val, src, dst): """ Scale the given value from the scale of src to the scale of dst. """ return ((val - src[0]) / (src[1]-src[0])) * (dst[1]-dst[0]) + dst[0] def value_to_char(value, palette, value_range=(0, 256)): palette_range = (0, len(palette)) mapped = int(scale(value, value_range, palette_range)) return palette[mapped] @click.command() @click.argument('image', type=click.File('rb')) @click.argument('out', type=click.File('r'), default='-', required=False) @click.option('-p', '--palette', default='█▓▒░ ', help="A custom palette for rendering images. Goes from dark to bright.") @click.option('-w', '--width', type=click.INT, help="Width of output. If height is not given, the image will be proportionally scaled.") @click.option('-h', '--height', type=click.INT, help="Height of output. If width is not given, the image will be proportionally scaled.") def convert(image, palette, out, width, height): """ Converts an input image to a text representation. Writes to stdout by default. Optionally takes another file as a second output. Supported filetypes: anything PIL supports. For JPEG etc., install the prerequisites. """ original = Image.open(image) width, height = original.size thumb = original.copy() thumb.thumbnail(80) bw = thumb.convert(mode="L") width, height = bw.size for y in range(height): line = '' for x in range(width): pixel = bw.getpixel((x, y)) line += value_to_char(pixel, palette) click.echo(line)
Add commandline interface with Click.
Add commandline interface with Click.
Python
mit
ijks/textinator
1e68f5f1fd565a812ef3fdf10c4c40649e3ef398
foundation/organisation/search_indexes.py
foundation/organisation/search_indexes.py
from haystack import indexes from .models import Person, Project, WorkingGroup, NetworkGroup class PersonIndex(indexes.SearchIndex, indexes.Indexable): text = indexes.CharField(document=True, use_template=True) twitter = indexes.CharField(model_attr='twitter') url = indexes.CharField(model_attr='url') def get_model(self): return Person class ProjectIndex(indexes.SearchIndex, indexes.Indexable): text = indexes.CharField(document=True, use_template=True) twitter = indexes.CharField(model_attr='twitter') homepage_url = indexes.CharField(model_attr='homepage_url') mailinglist_url = indexes.CharField(model_attr='mailinglist_url') sourcecode_url = indexes.CharField(model_attr='sourcecode_url') def get_model(self): return Project class WorkingGroupIndex(indexes.SearchIndex, indexes.Indexable): text = indexes.CharField(document=True, use_template=True) incubation = indexes.BooleanField(model_attr='incubation') def get_model(self): return WorkingGroup class NetworkGroupIndex(indexes.SearchIndex, indexes.Indexable): text = indexes.CharField(document=True, use_template=True) mailinglist = indexes.CharField(model_attr='mailinglist') homepage = indexes.CharField(model_attr='homepage') twitter = indexes.CharField(model_attr='twitter') def get_model(self): return NetworkGroup
from haystack import indexes from .models import Person, Project, WorkingGroup, NetworkGroup class PersonIndex(indexes.SearchIndex, indexes.Indexable): text = indexes.CharField(document=True, use_template=True) twitter = indexes.CharField(model_attr='twitter') url = indexes.CharField(model_attr='url') def get_model(self): return Person class ProjectIndex(indexes.SearchIndex, indexes.Indexable): text = indexes.CharField(document=True, use_template=True) twitter = indexes.CharField(model_attr='twitter') homepage_url = indexes.CharField(model_attr='homepage_url') mailinglist_url = indexes.CharField(model_attr='mailinglist_url') sourcecode_url = indexes.CharField(model_attr='sourcecode_url') def get_model(self): return Project class WorkingGroupIndex(indexes.SearchIndex, indexes.Indexable): text = indexes.CharField(document=True, use_template=True) incubation = indexes.BooleanField(model_attr='incubation') def get_model(self): return WorkingGroup class NetworkGroupIndex(indexes.SearchIndex, indexes.Indexable): text = indexes.CharField(document=True, use_template=True) twitter = indexes.CharField(model_attr='twitter') homepage_url = indexes.CharField(model_attr='homepage_url') mailinglist_url = indexes.CharField(model_attr='mailinglist_url') def get_model(self): return NetworkGroup
Fix references to old model fields
organisation: Fix references to old model fields
Python
mit
okfn/foundation,okfn/foundation,okfn/foundation,okfn/website,MjAbuz/foundation,okfn/website,okfn/foundation,okfn/website,okfn/website,MjAbuz/foundation,MjAbuz/foundation,MjAbuz/foundation
82239a844462e721c7034ec42cb4905662f4efb4
bin/mergeSegToCtm.py
bin/mergeSegToCtm.py
#!/usr/bin/python # vim : set fileencoding=utf-8 : # # mergeSegToCtm.py # # Enhance the Bck file by adding extra fields with the diarisation # information # import sys with open(sys.argv[1], 'r', encoding='iso-8859-1') as seg: with open(sys.argv[2], 'r', encoding='iso-8859-1') as ctm: # For each frame, we will create an entry in a dictionnary # It will help the lookup later on # We don't really care about memory issues here, should we? frames = {} for line in seg: values = line.split() start = int(values[2]) duration = int(values[3]) for i in range(start, start + duration): frames[i] = values[4], values[5], values[7] for line in ctm: values = line.split() # Use the same start format than in the .seg file start = int(float(values[2])*100) print(line.strip(), end="") if start in frames: print(" " + frames[start][0] + " " + frames[start][1] + " " + frames[start][2]) else: print(" N/A N/A N/A")
#!/usr/bin/python # vim : set fileencoding=utf-8 : # # mergeSegToCtm.py # # Enhance the CTM file by adding extra fields with the diarisation # information # # First argument is the seg file # Second argument is the ctm file # import sys with open(sys.argv[1], 'r', encoding='iso-8859-1') as seg: with open(sys.argv[2], 'r', encoding='iso-8859-1') as ctm: # For each frame, we will create an entry in a dictionnary # It will help the lookup later on # We don't really care about memory issues here, should we? frames = {} for line in seg: values = line.split() start = int(values[2]) duration = int(values[3]) for i in range(start, start + duration): frames[i] = values[4], values[5], values[7] for line in ctm: values = line.split() # Use the same start format than in the .seg file start = int(float(values[2])*100) print(line.strip(), end="") if start in frames: print(" " + frames[start][0] + " " + frames[start][1] + " " + frames[start][2]) else: print(" N/A N/A N/A")
Fix typo in the script
Fix typo in the script
Python
mit
SG-LIUM/SGL-SpeechWeb-Demo,SG-LIUM/SGL-SpeechWeb-Demo,bsalimi/speech-recognition-api,SG-LIUM/SGL-SpeechWeb-Demo,bsalimi/speech-recognition-api,bsalimi/speech-recognition-api,bsalimi/speech-recognition-api
0ee59d04cb2cbe93a3f4f87a34725fbcd1a66fc0
core/Reader.py
core/Reader.py
# coding: utf8 from io import StringIO from collections import deque class StreamReader: def __init__(self, *args, stream_class=StringIO, **kwargs): self.streamClass = stream_class self.args = args self.kwargs = kwargs def read(self, parsing_pipeline): parsing_pipeline.reset() stream = self.streamClass(*self.args, **self.kwargs) min_position = parsing_pipeline.get_min_position() max_position = parsing_pipeline.get_max_position() length = max_position - min_position + 1 current_position = -min_position ar_index = list() element = deque(stream.read(length)) while True: result = parsing_pipeline.check(element, ref_position=-min_position) if result is not None and result[0]: ar_index.append((current_position, element[-min_position])) next_character = stream.read(1) current_position += 1 if next_character and result is not None: element.popleft() element.append(next_character) else: break stream.close() return ar_index
# coding: utf8 from io import StringIO from collections import deque class StreamReader: def __init__(self, *args, stream_class=StringIO, **kwargs): self.streamClass = stream_class self.args = args self.kwargs = kwargs def read(self, parsing_pipeline): parsing_pipeline.reset() min_position = parsing_pipeline.get_min_position() max_position = parsing_pipeline.get_max_position() length = max_position - min_position + 1 stream = self.streamClass(*self.args, **self.kwargs) current_position = -min_position ar_index = list() element = deque(stream.read(length)) if len(element) == length: while True: result = parsing_pipeline.check(element, ref_position=-min_position) if result is not None and result[0]: ar_index.append((current_position, element[-min_position])) next_character = stream.read(1) current_position += 1 if next_character and result is not None: element.popleft() element.append(next_character) else: break stream.close() return ar_index else: stream.close() raise ValueError("Not enough characters to parse : " + str(len(element)))
Add not enough characters condition
Add not enough characters condition
Python
mit
JCH222/matriochkas
6d32f609379febe2fdad690adc75a90e26b8d416
backend/backend/serializers.py
backend/backend/serializers.py
from rest_framework import serializers from .models import Animal class AnimalSerializer(serializers.ModelSerializer): class Meta: model = Animal fields = ('id', 'name', 'dob', 'gender', 'active', 'own', 'father', 'mother')
from rest_framework import serializers from .models import Animal class AnimalSerializer(serializers.ModelSerializer): class Meta: model = Animal fields = ('id', 'name', 'dob', 'gender', 'active', 'own', 'father', 'mother') def validate_father(self, father): if (father.gender != Animal.MALE): raise serializers.ValidationError('The father has to be male.') def validate_mother(self, mother): if (mother.gender != Animal.FEMALE): raise serializers.ValidationError('The mother has to be female.') def validate_dob(self, dob): father_id = self.context['request'].data['father'] if (father_id): father = Animal.objects.get(pk = father_id) if (father and father.dob > dob): raise serializers.ValidationError('Animal can\'t be older than it\'s father') mother_id = self.context['request'].data['mother'] if (mother_id): mother = Animal.objects.get(pk = mother_id) if (mother and mother.dob > dob): raise serializers.ValidationError('Animal can\'t be older than it\'s mother')
Add validator that selected father is male and mother is female. Validate that the animal is younger than it's parents.
Add validator that selected father is male and mother is female. Validate that the animal is younger than it's parents.
Python
apache-2.0
mmlado/animal_pairing,mmlado/animal_pairing
f2cd1d531a1cefdc5da4b418c866be0d76aa349b
basil_common/str_support.py
basil_common/str_support.py
def as_int(value): try: return int(value) except ValueError: return None
def as_int(value): try: return int(value) except ValueError: return None def urljoin(*parts): url = parts[0] for p in parts[1:]: if url[-1] != '/': url += '/' url += p return url
Add url join which serves our needs
Add url join which serves our needs Existing functions in common libraries add extra slashes.
Python
apache-2.0
eve-basil/common
a40c617ea605bd667a9906f6c9400fc9562d7c0a
salt/daemons/flo/reactor.py
salt/daemons/flo/reactor.py
# -*- coding: utf-8 -*- ''' Start the reactor! ''' # Import salt libs import salt.utils.reactor # Import ioflo libs import ioflo.base.deeding @ioflo.base.deeding.deedify( 'SaltRaetReactorFork', ioinit={ 'opts': '.salt.opts', 'proc_mgr': '.salt.usr.proc_mgr'}) def reactor_fork(self): ''' Add a reactor object to the process manager ''' self.proc_mgr.add_process( salt.utils.reactor.Reactor, args=(self.opts.value,))
# -*- coding: utf-8 -*- ''' Start the reactor! ''' # Import salt libs import salt.utils.reactor import salt.utils.event # Import ioflo libs import ioflo.base.deeding @ioflo.base.deeding.deedify( 'SaltRaetReactorFork', ioinit={ 'opts': '.salt.opts', 'proc_mgr': '.salt.usr.proc_mgr'}) def reactor_fork(self): ''' Add a reactor object to the process manager ''' self.proc_mgr.add_process( salt.utils.reactor.Reactor, args=(self.opts.value,)) @ioflo.base.deeding.deedify( 'SaltRaetEventReturnFork', ioinit={ 'opts': '.salt.opts', 'proc_mgr': '.salt.usr.proc_mgr'}) def event_return_fork(self): ''' Add a reactor object to the process manager ''' self.proc_mgr.add_process( salt.utils.event.EventReturn, args=(self.opts.value,))
Add event return fork behavior
Add event return fork behavior
Python
apache-2.0
saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt
14e9bda5de10ef5a1c6dd96692d083f4e0f16025
python/ql/test/experimental/library-tests/frameworks/yaml/Decoding.py
python/ql/test/experimental/library-tests/frameworks/yaml/Decoding.py
import yaml from yaml import SafeLoader yaml.load(payload) # $decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML decodeMayExecuteInput yaml.load(payload, SafeLoader) # $ decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML yaml.load(payload, Loader=SafeLoader) # $decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML yaml.load(payload, Loader=yaml.BaseLoader) # $decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML yaml.safe_load(payload) # $ decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML yaml.unsafe_load(payload) # $ decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML decodeMayExecuteInput yaml.full_load(payload) # $ decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML decodeMayExecuteInput yaml.load_all(payload) # $ decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML decodeMayExecuteInput yaml.safe_load_all(payload) # $ decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML yaml.unsafe_load_all(payload) # $ decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML decodeMayExecuteInput yaml.full_load_all(payload) # $ decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML decodeMayExecuteInput
import yaml # Unsafe: yaml.load(payload) # $decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML decodeMayExecuteInput yaml.load(payload, yaml.Loader) # $decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML decodeMayExecuteInput yaml.unsafe_load(payload) # $ decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML decodeMayExecuteInput yaml.full_load(payload) # $ decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML decodeMayExecuteInput # Safe yaml.load(payload, yaml.SafeLoader) # $ decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML yaml.load(payload, Loader=yaml.SafeLoader) # $decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML yaml.load(payload, yaml.BaseLoader) # $decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML yaml.safe_load(payload) # $ decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML # load_all variants yaml.load_all(payload) # $ decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML decodeMayExecuteInput yaml.safe_load_all(payload) # $ decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML yaml.unsafe_load_all(payload) # $ decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML decodeMayExecuteInput yaml.full_load_all(payload) # $ decodeInput=payload decodeOutput=Attribute() decodeFormat=YAML decodeMayExecuteInput
Refactor PyYAML tests a bit
Python: Refactor PyYAML tests a bit
Python
mit
github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql,github/codeql
0997055c591d7bd4ad4334874292f8977ba778bf
cashew/exceptions.py
cashew/exceptions.py
class CashewException(Exception): pass class InternalCashewException(CashewException): pass class UserFeedback(CashewException): pass class InactivePlugin(UserFeedback): def __init__(self, plugin_instance_or_alias): if isinstance(plugin_instance_or_alias, basestring): self.message = plugin_instance_or_alias else: self.message = plugin_instance_or_alias.alias class NoPlugin(UserFeedback): pass
class CashewException(Exception): pass class InternalCashewException(CashewException): pass class UserFeedback(CashewException): pass class InactivePlugin(UserFeedback): def __init__(self, plugin_instance_or_alias): if isinstance(plugin_instance_or_alias, basestring): self.alias = plugin_instance_or_alias else: self.alias = plugin_instance_or_alias.alias def __str__(self): return "%s is inactive. Some additional software might need to be installed." % (self.alias) class NoPlugin(UserFeedback): pass
Improve error message when alias not available.
Improve error message when alias not available.
Python
mit
dexy/cashew
2d82280460c50d50f6be8d8c8405506b4706cd8a
securethenews/blog/tests.py
securethenews/blog/tests.py
from django.test import TestCase # Create your tests here.
import datetime from django.test import TestCase from wagtail.wagtailcore.models import Page from .models import BlogIndexPage, BlogPost class BlogTest(TestCase): def setUp(self): home_page = Page.objects.get(slug='home') blog_index_page = BlogIndexPage( title='Blog', slug='blog', show_in_menus=True ) home_page.add_child(instance=blog_index_page) blog_posts = [ BlogPost(title='First Blog Post', slug='first-blog-post', date=datetime.date.today(), byline='Author'), BlogPost(title='Second Blog Post', slug='second-blog-post', date=datetime.date.today(), byline='Author') ] for blog_post in blog_posts: blog_index_page.add_child(instance=blog_post) def test_ordering_of_same_day_blogs_on_index(self): """Verify that blog posts posted on the same day are ordered with the most recent at the top of the page.""" blog_index = BlogIndexPage.objects.first() self.assertEqual(blog_index.posts[0].title, 'Second Blog Post') self.assertEqual(blog_index.posts[1].title, 'First Blog Post')
Add unit test to verify that blog posts are ordered by most recent
Add unit test to verify that blog posts are ordered by most recent Verifies that blog posts are ordered by most recent first even if the blog posts are posted on the same day.
Python
agpl-3.0
freedomofpress/securethenews,freedomofpress/securethenews,freedomofpress/securethenews,freedomofpress/securethenews
42221c7b858951376ba59385fa42cac11d542fdd
plugin/script/sphinxexampleae.py
plugin/script/sphinxexampleae.py
def process( node_name, handle ): handle.editorTemplate( beginScrollLayout=True ) handle.editorTemplate( beginLayout="Float Attributes" ) handle.editorTemplate( "floatAttr", addControl=True ) handle.editorTemplate( endLayout=True ) handle.editorTemplate( addExtraControls=True ) handle.editorTemplate( endScrollLayout=True ) handle.editorTemplate( suppress="caching" ) handle.editorTemplate( suppress="nodeState" ) def ae_template( node_name ): from maya import cmds maya_handle = MayaHandle( cmds ) process( node_name, maya_handle )
float_attr_help = """ This is the *annotation* for the floatAttr attribute Here are some bullet points pertaining to this attribute - The help is written in rst - I don't know what else to put in the list """ string_attr_help = """ This is the *annotation* for the stringAttr attribute """ def process( node_name, handle ): handle.editorTemplate( beginScrollLayout=True ) handle.editorTemplate( beginLayout="Float Attributes" ) handle.editorTemplate( "floatAttr", addControl=True, annotation=float_attr_help ) handle.editorTemplate( endLayout=True ) handle.editorTemplate( beginLayout="String Attributes" ) handle.editorTemplate( "stringAttr", addControl=True, annotation=string_attr_help ) handle.editorTemplate( endLayout=True ) handle.editorTemplate( addExtraControls=True ) handle.editorTemplate( endScrollLayout=True ) handle.editorTemplate( suppress="caching" ) handle.editorTemplate( suppress="nodeState" ) def ae_template( node_name ): from maya import cmds maya_handle = MayaHandle( cmds ) process( node_name, maya_handle )
Add another attribute and some annotations
Add another attribute and some annotations We write the annotations in rst for the moment.
Python
bsd-3-clause
michaeljones/sphinx-maya-node
59030daa60a4d2006cae6192219071e2a8017364
test/conftest.py
test/conftest.py
from os.path import join, dirname, abspath default_base_dir = join(dirname(abspath(__file__)), 'completion') import run def pytest_addoption(parser): parser.addoption( "--base-dir", default=default_base_dir, help="Directory in which integration test case files locate.") parser.addoption( "--thirdparty", help="Include integration tests that requires third party modules.") def pytest_generate_tests(metafunc): """ :type metafunc: _pytest.python.Metafunc """ if 'case' in metafunc.fixturenames: base_dir = metafunc.config.option.base_dir test_files = {} thirdparty = metafunc.config.option.thirdparty metafunc.parametrize( 'case', run.collect_dir_tests(base_dir, test_files, thirdparty))
from os.path import join, dirname, abspath default_base_dir = join(dirname(abspath(__file__)), 'completion') import run def pytest_addoption(parser): parser.addoption( "--base-dir", default=default_base_dir, help="Directory in which integration test case files locate.") parser.addoption( "--test-files", "-T", default=[], action='append', help=( "Specify test files using FILE_NAME[:LINE[,LINE[,...]]]. " "For example: -T generators.py:10,13,19. " "Note that you can use -m to specify the test case by id.")) parser.addoption( "--thirdparty", help="Include integration tests that requires third party modules.") def parse_test_files_option(opt): """ Parse option passed to --test-files into a key-value pair. >>> parse_test_files_option('generators.py:10,13,19') ('generators.py', [10, 13, 19]) """ opt = str(opt) if ':' in opt: (f_name, rest) = opt.split(':', 1) return (f_name, list(map(int, rest.split(',')))) else: return (opt, []) def pytest_generate_tests(metafunc): """ :type metafunc: _pytest.python.Metafunc """ if 'case' in metafunc.fixturenames: base_dir = metafunc.config.option.base_dir test_files = dict(map(parse_test_files_option, metafunc.config.option.test_files)) thirdparty = metafunc.config.option.thirdparty metafunc.parametrize( 'case', run.collect_dir_tests(base_dir, test_files, thirdparty))
Add --test-files option to py.test
Add --test-files option to py.test At this point, py.test should be equivalent to test/run.py
Python
mit
tjwei/jedi,jonashaag/jedi,mfussenegger/jedi,jonashaag/jedi,dwillmer/jedi,WoLpH/jedi,tjwei/jedi,mfussenegger/jedi,dwillmer/jedi,flurischt/jedi,WoLpH/jedi,flurischt/jedi
4aa1623e08519127a06f49060d546c5ef18e906c
vcs/models.py
vcs/models.py
from django.db import models class Activity(models.Model): group = models.CharField(max_length=4) grouptype = models.TextField() groupdetail = models.TextField() details = models.TextField() disabled = models.BooleanField() time = models.DecimalField(decimal_places=2, max_digits=10) unique_together = (("group", "grouptype", "disabled", "time"),) class ActivityEntry(models.Model): user = models.ManyToManyField( 'tracker.Tbluser', related_name="user_foreign" ) activity = models.ManyToManyField( Activity, related_name="activity_foreign" ) amount = models.BigIntegerField() def time(self): return self.activity.time * self.amount
from django.db import models class Activity(models.Model): group = models.CharField(max_length=4) grouptype = models.TextField() groupdetail = models.TextField() details = models.TextField() disabled = models.BooleanField() time = models.DecimalField(decimal_places=2, max_digits=10) unique_together = (("group", "grouptype", "disabled", "time"),) class ActivityEntry(models.Model): user = models.OneToOneField( 'tracker.Tbluser', related_name="user_foreign" ) activity = models.OneToOneField( Activity, related_name="activity_foreign" ) amount = models.BigIntegerField() def time(self): return self.activity.time * self.amount
Use a OneToMany field for the activity joiner.
Use a OneToMany field for the activity joiner.
Python
bsd-3-clause
AeroNotix/django-timetracker,AeroNotix/django-timetracker,AeroNotix/django-timetracker
98e824af43b729eb5b5737597506a5ca87312814
apps/polls/tests.py
apps/polls/tests.py
""" This file demonstrates writing tests using the unittest module. These will pass when you run "manage.py test". Replace this with more appropriate tests for your application. """ from django.test import TestCase class SimpleTest(TestCase): def test_basic_addition(self): """ Tests that 1 + 1 always equals 2. """ self.assertEqual(1 + 1, 2)
import datetime from django.test import TestCase from django.utils import timezone from apps.polls.models import Poll class PollMethoTests(TestCase): def test_was_published_recently_with_future_poll(self): """ was_published_recently() should return False for polls whose pub_date is in the future """ future_poll = Poll(pub_date=timezone.now() + datetime.timedelta(days=30)) self.assertEqual(future_poll.was_published_recently(),False)
Create a test to expose the bug
Create a test to expose the bug
Python
bsd-3-clause
hoale/teracy-tutorial,hoale/teracy-tutorial
990008a6fb2788d25445ee9ec21375515527bdc8
nodeconductor/backup/utils.py
nodeconductor/backup/utils.py
import pkg_resources from django.utils import six from django.utils.lru_cache import lru_cache @lru_cache() def get_backup_strategies(): entry_points = pkg_resources.get_entry_map('nodeconductor').get('backup_strategies', {}) strategies = dict((name.upper(), entry_point.load()) for name, entry_point in entry_points.iteritems()) return strategies def has_object_backup_strategy(obj): strategies = get_backup_strategies() return obj.__class__.__name__.upper() in strategies def get_object_backup_strategy(obj): strategies = get_backup_strategies() return strategies[obj.__class__.__name__.upper()] def get_backupable_models(): strategies = get_backup_strategies() return [strategy.get_model() for strategy in six.itervalues(strategies)]
import pkg_resources from django.utils import six from django.utils.lru_cache import lru_cache @lru_cache() def get_backup_strategies(): entry_points = pkg_resources.get_entry_map('nodeconductor').get('backup_strategies', {}) strategies = {name.upper(): entry_point.load() for name, entry_point in six.iteritems(entry_points)} return strategies def has_object_backup_strategy(obj): strategies = get_backup_strategies() return obj.__class__.__name__.upper() in strategies def get_object_backup_strategy(obj): strategies = get_backup_strategies() return strategies[obj.__class__.__name__.upper()] def get_backupable_models(): strategies = get_backup_strategies() return [strategy.get_model() for strategy in six.itervalues(strategies)]
Use new comprehension syntax and six (nc-263)
Use new comprehension syntax and six (nc-263)
Python
mit
opennode/nodeconductor,opennode/nodeconductor,opennode/nodeconductor
f376eb478783448b5e372c2c4a7f7ee0e4891e88
examples/python/values.py
examples/python/values.py
#! /usr/bin/env python # # values.py # """ An example of using values via Python API """ from opencog.atomspace import AtomSpace, TruthValue from opencog.type_constructors import * a = AtomSpace() set_type_ctor_atomspace(a) a = FloatValue([1.0, 2.0, 3.0]) b = FloatValue([1.0, 2.0, 3.0]) c = FloatValue(1.0) print('{} == {}: {}'.format(a, b, a == b)) print('{} == {}: {}'.format(a, c, a == c)) featureValue = FloatValue([1.0, 2]) print('new value created: {}'.format(featureValue)) boundingBox = ConceptNode('boundingBox') featureKey = PredicateNode('features') boundingBox.set_value(featureKey, featureValue) print('set value to atom: {}'.format(boundingBox)) print('get value from atom: {}'.format(boundingBox.get_value(featureKey)))
#! /usr/bin/env python # # values.py # """ An example of using values via Python API """ from opencog.atomspace import AtomSpace, TruthValue from opencog.type_constructors import * from opencog.scheme_wrapper import scheme_eval_v atomspace = AtomSpace() set_type_ctor_atomspace(atomspace) a = FloatValue([1.0, 2.0, 3.0]) b = FloatValue([1.0, 2.0, 3.0]) c = FloatValue(1.0) print('{} == {}: {}'.format(a, b, a == b)) print('{} == {}: {}'.format(a, c, a == c)) featureValue = FloatValue([1.0, 2]) print('new value created: {}'.format(featureValue)) boundingBox = ConceptNode('boundingBox') featureKey = PredicateNode('features') boundingBox.set_value(featureKey, featureValue) print('set value to atom: {}'.format(boundingBox)) print('get value from atom: {}'.format(boundingBox.get_value(featureKey))) value = scheme_eval_v(atomspace, '(ValueOf (ConceptNode "boundingBox") ' '(PredicateNode "features"))') value = boundingBox.get_value(featureKey) print('get value from atom using Scheme program: {}'.format(value))
Add example of scheme_eval_v usage
Add example of scheme_eval_v usage
Python
agpl-3.0
rTreutlein/atomspace,rTreutlein/atomspace,rTreutlein/atomspace,AmeBel/atomspace,rTreutlein/atomspace,AmeBel/atomspace,AmeBel/atomspace,AmeBel/atomspace,rTreutlein/atomspace,AmeBel/atomspace
385d7a5734e91217e9d8c0464327dedb30a69621
profile_python.py
profile_python.py
# coding: utf8 # Copyright 2013-2015 Vincent Jacques <vincent@vincent-jacques.net> import cProfile as profile import pstats import cairo from DrawTurksHead import TurksHead stats_filename = "profiling/profile_python.stats" img = cairo.ImageSurface(cairo.FORMAT_RGB24, 3200, 2400) ctx = cairo.Context(img) ctx.set_source_rgb(1, 1, 0xBF / 255.) ctx.paint() ctx.translate(1600, 1200) ctx.scale(1, -1) profile.run("TurksHead(24, 18, 190, 1190, 20).draw(ctx)", stats_filename) img.write_to_png("profiling/reference.png") p = pstats.Stats(stats_filename) p.strip_dirs().sort_stats("cumtime").print_stats().print_callees()
# coding: utf8 # Copyright 2013-2015 Vincent Jacques <vincent@vincent-jacques.net> import cProfile as profile import pstats import cairo from DrawTurksHead import TurksHead stats_filename = "/tmp/profile.stats" img = cairo.ImageSurface(cairo.FORMAT_RGB24, 3200, 2400) ctx = cairo.Context(img) ctx.set_source_rgb(1, 1, 0xBF / 255.) ctx.paint() ctx.translate(1600, 1200) ctx.scale(1, -1) profile.run("TurksHead(24, 18, 190, 1190, 20).draw(ctx)", stats_filename) img.write_to_png("profiling/reference.png") p = pstats.Stats(stats_filename) p.strip_dirs().sort_stats("cumtime").print_stats().print_callees()
Change name of stats file
Change name of stats file
Python
mit
jacquev6/DrawTurksHead,jacquev6/DrawTurksHead,jacquev6/DrawTurksHead
dcd36fab023ac2530cbfa17449e3ce8f61ad6bdc
ssl-cert-parse.py
ssl-cert-parse.py
#!/usr/bin/env python3 import datetime import ssl import OpenSSL def GetCert(SiteName, Port): return ssl.get_server_certificate((SiteName, Port)) def ParseCert(CertRaw): Cert = OpenSSL.crypto.load_certificate( OpenSSL.crypto.FILETYPE_PEM, CertRaw) print(str(Cert.get_subject())[18:-2]) print(datetime.datetime.strptime(str(Cert.get_notBefore())[2:-1], '%Y%m%d%H%M%SZ')) print(datetime.datetime.strptime(str(Cert.get_notAfter())[2:-1], '%Y%m%d%H%M%SZ')) print(str(Cert.get_issuer())[18:-2]) CertRaw = GetCert('some.domain.tld', 443) print(CertRaw) ParseCert(CertRaw)
#!/usr/bin/env python3 import datetime import ssl import OpenSSL def GetCert(SiteName, Port): return ssl.get_server_certificate((SiteName, Port)) def ParseCert(CertRaw): Cert = OpenSSL.crypto.load_certificate( OpenSSL.crypto.FILETYPE_PEM, CertRaw) CertSubject = str(Cert.get_subject())[18:-2] CertStartDate = datetime.datetime.strptime(str(Cert.get_notBefore())[2:-1], '%Y%m%d%H%M%SZ') CertEndDate = datetime.datetime.strptime(str(Cert.get_notAfter())[2:-1], '%Y%m%d%H%M%SZ') CertIssuer = str(Cert.get_issuer())[18:-2] return {'CertSubject': CertIssuer, 'CertStartDate': CertStartDate, 'CertEndDate': CertEndDate, 'CertIssuer': CertIssuer} CertRaw = GetCert('some.domain.tld', 443) print(CertRaw) Out = ParseCert(CertRaw) print(Out) print(Out['CertSubject']) print(Out['CertStartDate'])
Fix ParseCert() function, add variables, add a return statement
Fix ParseCert() function, add variables, add a return statement
Python
apache-2.0
ivuk/ssl-cert-parse
572dca82aab583e91e5b8402d1334bae55244d16
hs_tracking/middleware.py
hs_tracking/middleware.py
from .models import Session class Tracking(object): """The default tracking middleware logs all successful responses as a 'visit' variable with the URL path as its value.""" def process_response(self, request, response): if response.status_code == 200: session = Session.objects.for_request(request) session.record("visit", request.path) return response
from .models import Session class Tracking(object): """The default tracking middleware logs all successful responses as a 'visit' variable with the URL path as its value.""" def process_response(self, request, response): if request.path.startswith('/heartbeat/'): return response if response.status_code == 200: session = Session.objects.for_request(request) session.record("visit", request.path) return response
Disable use tracking of all heartbeat app urls.
Disable use tracking of all heartbeat app urls.
Python
bsd-3-clause
RENCI/xDCIShare,FescueFungiShare/hydroshare,ResearchSoftwareInstitute/MyHPOM,RENCI/xDCIShare,FescueFungiShare/hydroshare,hydroshare/hydroshare,hydroshare/hydroshare,FescueFungiShare/hydroshare,ResearchSoftwareInstitute/MyHPOM,FescueFungiShare/hydroshare,hydroshare/hydroshare,RENCI/xDCIShare,RENCI/xDCIShare,ResearchSoftwareInstitute/MyHPOM,ResearchSoftwareInstitute/MyHPOM,RENCI/xDCIShare,ResearchSoftwareInstitute/MyHPOM,hydroshare/hydroshare,FescueFungiShare/hydroshare,hydroshare/hydroshare
93d3a2f19cfb3ef9ae62d04ce24901df81bafc3e
luigi/rfam/families_csv.py
luigi/rfam/families_csv.py
# -*- coding: utf-8 -*- """ Copyright [2009-2017] EMBL-European Bioinformatics Institute Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import attr import luigi from rfam import utils from rfam.csv_writer import CsvWriter class FamiliesCSV(CsvWriter): headers = [ 'id', 'name', 'description', 'clan', 'seed_count', 'full_count', 'length', 'domain', 'is_supressed', 'rna_type', ] def data(self): for family in utils.load_families(): data = attr.asdict(family) data['is_suppressed'] = int(family.is_suppressed) data['rna_type'] = family.guess_insdc() yield data if __name__ == '__main__': luigi.run(main_task_cls=FamiliesCSV)
# -*- coding: utf-8 -*- """ Copyright [2009-2017] EMBL-European Bioinformatics Institute Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import attr import luigi from rfam import utils from rfam.csv_writer import CsvWriter class FamiliesCSV(CsvWriter): headers = [ 'id', 'name', 'description', 'clan', 'seed_count', 'full_count', 'length', 'domain', 'is_suppressed', 'rna_type', ] def data(self): for family in utils.load_families(): data = attr.asdict(family) data['name'] = family.pretty_name data['is_suppressed'] = int(family.is_suppressed) data['rna_type'] = family.guess_insdc() yield data if __name__ == '__main__': luigi.run(main_task_cls=FamiliesCSV)
Fix typo and use correct name
Fix typo and use correct name We want to use the pretty name, not the standard one for import. In addition, I fix a typo in the name of the the is_suppressed column.
Python
apache-2.0
RNAcentral/rnacentral-import-pipeline,RNAcentral/rnacentral-import-pipeline,RNAcentral/rnacentral-import-pipeline,RNAcentral/rnacentral-import-pipeline
49155373b9eea3812c295c9d89c40a7c9c1c1c13
migrations/versions/20170214191843_pubmed_rename_identifiers_list_to_article_ids.py
migrations/versions/20170214191843_pubmed_rename_identifiers_list_to_article_ids.py
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from alembic import op # revision identifiers, used by Alembic. revision = '3dbb46f23ed7' down_revision = u'0087dc1eb534' branch_labels = None depends_on = None def upgrade(): op.alter_column('pubmed', 'identifiers_list', new_column_name='article_ids') def downgrade(): op.alter_column('pubmed', 'article_ids', new_column_name='identifiers_list')
# -*- coding: utf-8 -*- from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from alembic import op # revision identifiers, used by Alembic. revision = '3dbb46f23ed7' down_revision = u'b32475938a2d' branch_labels = None depends_on = None def upgrade(): op.alter_column('pubmed', 'identifiers_list', new_column_name='article_ids') def downgrade(): op.alter_column('pubmed', 'article_ids', new_column_name='identifiers_list')
Fix migrations to have a single path
Fix migrations to have a single path As it took us a while to merge some PRs, the migrations ended branching in two parts. This commit fixes to use a single path. It shouldn't cause any issues, as we're only messing with the `down` migrations and the migrations aren't dependent on each other.
Python
mit
opentrials/scraper,opentrials/collectors
60f9acad7610ee8bed324d1e142cc4801a9e3713
ibmcnx/doc/DataSources.py
ibmcnx/doc/DataSources.py
###### # Check ExId (GUID) by Email through JDBC # # Author: Christoph Stoettner # Mail: christoph.stoettner@stoeps.de # Documentation: http://scripting101.stoeps.de # # Version: 2.0 # Date: 2014-06-04 # # License: Apache 2.0 # # Check ExId of a User in all Connections Applications import ibmcnx.functions ds = AdminConfig.list('DataSource') dsArray = ds.splitlines() AdminControl.getPropertiesForDataSource(dsArray[0])
###### # Check ExId (GUID) by Email through JDBC # # Author: Christoph Stoettner # Mail: christoph.stoettner@stoeps.de # Documentation: http://scripting101.stoeps.de # # Version: 2.0 # Date: 2014-06-04 # # License: Apache 2.0 # # Check ExId of a User in all Connections Applications import ibmcnx.functions ds = AdminConfig.list('DataSource') dsArray = ds.splitlines() test = AdminControl.getPropertiesForDataSource(dsArray[0]) print dsArray print '\n' print test
Create script to save documentation to a file
4: Create script to save documentation to a file Task-Url: http://github.com/stoeps13/ibmcnx2/issues/issue/4
Python
apache-2.0
stoeps13/ibmcnx2,stoeps13/ibmcnx2
2cbffa60c0b12a268e0347a6a4ecfc6d5acb29e3
lamor_flexbe_states/src/lamor_flexbe_states/detect_person_state.py
lamor_flexbe_states/src/lamor_flexbe_states/detect_person_state.py
#!/usr/bin/env python from flexbe_core import EventState, Logger from flexbe_core.proxy import ProxySubscriberCached from geometry_msgs.msg import PoseStamped class DetectPersonState(EventState): ''' Detects the nearest person and provides their pose. -- wait_timeout float Time (seconds) to wait for a person before giving up. #> person_pose PoseStamped Pose of the nearest person if one is detected, else None. <= detected Detected a person. <= not_detected No person detected, but time ran out. ''' def __init__(self, wait_timeout): super(MetricSweepState, self).__init__(outcomes = ['detected', 'not_detected'] output_keys = ['person_pose']) self._wait_timeout = rospy.Duration(wait_timeout) self._topic = '/people_tracker/pose' self._sub = ProxySubscriberCached({self._topic: PoseStamped}) self._start_waiting_time = None def execute(self, userdata): if rospy.Time.now() > self._start_waiting_time + self._wait_timeout: return 'not_detected' if self._sub.has_msgs(self._topic): userdata.person_pose = self._sub.get_last_msg(self._topic) return 'detected' def on_enter(self, userdata): self._start_waiting_time = rospy.Time.now()
#!/usr/bin/env python from flexbe_core import EventState, Logger from flexbe_core.proxy import ProxySubscriberCached from geometry_msgs.msg import PoseStamped class DetectPersonState(EventState): ''' Detects the nearest person and provides their pose. -- wait_timeout float Time (seconds) to wait for a person before giving up. #> person_pose PoseStamped Pose of the nearest person if one is detected, else None. <= detected Detected a person. <= not_detected No person detected, but time ran out. ''' def __init__(self, wait_timeout): super(MetricSweepState, self).__init__(outcomes = ['detected', 'not_detected'], output_keys = ['person_pose']) self._wait_timeout = rospy.Duration(wait_timeout) self._topic = '/people_tracker/pose' self._sub = ProxySubscriberCached({self._topic: PoseStamped}) self._start_waiting_time = None def execute(self, userdata): if rospy.Time.now() > self._start_waiting_time + self._wait_timeout: userdata.person_pose = None return 'not_detected' if self._sub.has_msgs(self._topic): userdata.person_pose = self._sub.get_last_msg(self._topic) return 'detected' def on_enter(self, userdata): self._start_waiting_time = rospy.Time.now()
Set person pose to None if no person is present
[lamor_flexbe_state] Set person pose to None if no person is present
Python
mit
marinaKollmitz/lamor15,pschillinger/lamor15,pschillinger/lamor15,marinaKollmitz/lamor15,pschillinger/lamor15,marinaKollmitz/lamor15,pschillinger/lamor15,marinaKollmitz/lamor15,marinaKollmitz/lamor15,pschillinger/lamor15
206a59c838623aae5e0b0f91f8089ffc13e2cfd0
pipenv/vendor/pythonfinder/environment.py
pipenv/vendor/pythonfinder/environment.py
# -*- coding=utf-8 -*- from __future__ import print_function, absolute_import import os import platform import sys def is_type_checking(): from typing import TYPE_CHECKING return TYPE_CHECKING PYENV_INSTALLED = bool(os.environ.get("PYENV_SHELL")) or bool( os.environ.get("PYENV_ROOT") ) ASDF_INSTALLED = bool(os.environ.get("ASDF_DIR")) PYENV_ROOT = os.path.expanduser( os.path.expandvars(os.environ.get("PYENV_ROOT", "~/.pyenv")) ) ASDF_DATA_DIR = os.path.expanduser( os.path.expandvars(os.environ.get("ASDF_DATA_DIR", "~/.asdf")) ) IS_64BIT_OS = None SYSTEM_ARCH = platform.architecture()[0] if sys.maxsize > 2 ** 32: IS_64BIT_OS = platform.machine() == "AMD64" else: IS_64BIT_OS = False IGNORE_UNSUPPORTED = bool(os.environ.get("PYTHONFINDER_IGNORE_UNSUPPORTED", False)) MYPY_RUNNING = os.environ.get("MYPY_RUNNING", is_type_checking())
# -*- coding=utf-8 -*- from __future__ import print_function, absolute_import import os import platform import sys def is_type_checking(): try: from typing import TYPE_CHECKING except ImportError: return False return TYPE_CHECKING PYENV_INSTALLED = bool(os.environ.get("PYENV_SHELL")) or bool( os.environ.get("PYENV_ROOT") ) ASDF_INSTALLED = bool(os.environ.get("ASDF_DIR")) PYENV_ROOT = os.path.expanduser( os.path.expandvars(os.environ.get("PYENV_ROOT", "~/.pyenv")) ) ASDF_DATA_DIR = os.path.expanduser( os.path.expandvars(os.environ.get("ASDF_DATA_DIR", "~/.asdf")) ) IS_64BIT_OS = None SYSTEM_ARCH = platform.architecture()[0] if sys.maxsize > 2 ** 32: IS_64BIT_OS = platform.machine() == "AMD64" else: IS_64BIT_OS = False IGNORE_UNSUPPORTED = bool(os.environ.get("PYTHONFINDER_IGNORE_UNSUPPORTED", False)) MYPY_RUNNING = os.environ.get("MYPY_RUNNING", is_type_checking())
Fix typing check for pythonfinder
Fix typing check for pythonfinder Signed-off-by: Dan Ryan <2591e5f46f28d303f9dc027d475a5c60d8dea17a@danryan.co>
Python
mit
kennethreitz/pipenv
c8a1b25c1579eba5cb68c1a4cdd60116b3496429
pre_commit_robotframework_tidy/rf_tidy.py
pre_commit_robotframework_tidy/rf_tidy.py
from __future__ import print_function import argparse from robot.errors import DataError from robot.tidy import Tidy def main(argv=None): parser = argparse.ArgumentParser() parser.add_argument('filenames', nargs='*', help='Filenames to run') parser.add_argument('--use-pipes', action='store_true', dest='use_pipes', default=False) parser.add_argument('--space-count', type=int, dest='space_count', default=4) args = parser.parse_args(argv) tidier = Tidy(use_pipes=args.use_pipes, space_count=args.space_count) for filename in args.filenames: try: tidier.inplace(filename) except DataError: pass return 0 if __name__ == '__main__': exit(main())
from __future__ import print_function import argparse from robot.errors import DataError from robot.tidy import Tidy def main(argv=None): parser = argparse.ArgumentParser() parser.add_argument('filenames', nargs='*', help='Filenames to run') parser.add_argument('--use-pipes', action='store_true', dest='use_pipes', default=False) parser.add_argument('--space-count', type=int, dest='space_count', default=4) args = parser.parse_args(argv) tidier = Tidy(use_pipes=args.use_pipes, space_count=args.space_count, format='robot') for filename in args.filenames: try: tidier.inplace(filename) except DataError: pass return 0 if __name__ == '__main__': exit(main())
Format results as robot files
Format results as robot files
Python
mit
guykisel/pre-commit-robotframework-tidy
ff011f280f6f6aaf74dd2f4ff3cdfb3831aa147c
ideskeleton/builder.py
ideskeleton/builder.py
def build(source_path, overwrite = True): pass
import os.path def build(source_path, overwrite = True): if not os.path.exists(source_path): raise IOError("source_path does not exist so not skeleton can be built") ''' for root, dirs, files in os.walk("."): path = root.split('/') print (len(path) - 1) *'---' , os.path.basename(root) for file in files: print len(path)*'---', file '''
Make the first test pass by checking if source_path argument exists
Make the first test pass by checking if source_path argument exists
Python
mit
jruizaranguren/ideskeleton
3bbe101f609349c2475079f052d5400e77822237
common/my_filters.py
common/my_filters.py
from google.appengine.ext import webapp import re # More info on custom Django template filters here: # https://docs.djangoproject.com/en/dev/howto/custom-template-tags/#registering-custom-filters register = webapp.template.create_template_register() @register.filter def digits(value): return re.sub('[^0-9]', '', value) @register.filter def mul(value, arg): return value * arg @register.filter def yt_start(value): return value.replace("?t=", "?start=")
from google.appengine.ext import webapp from helpers.youtube_video_helper import YouTubeVideoHelper import re # More info on custom Django template filters here: # https://docs.djangoproject.com/en/dev/howto/custom-template-tags/#registering-custom-filters register = webapp.template.create_template_register() @register.filter def digits(value): return re.sub('[^0-9]', '', value) @register.filter def mul(value, arg): return value * arg @register.filter def yt_start(value): if '?t=' in value: # Treat ?t= the same as #t= value = value.replace('?t=', '#t=') if '#t=' in value: sp = value.split('#t=') video_id = sp[0] old_ts = sp[1] total_seconds = YouTubeVideoHelper.time_to_seconds(old_ts) value = '%s?start=%i' % (video_id, total_seconds) return value
Fix video suggestion review showing wrong time
Fix video suggestion review showing wrong time
Python
mit
nwalters512/the-blue-alliance,verycumbersome/the-blue-alliance,tsteward/the-blue-alliance,tsteward/the-blue-alliance,fangeugene/the-blue-alliance,the-blue-alliance/the-blue-alliance,nwalters512/the-blue-alliance,fangeugene/the-blue-alliance,jaredhasenklein/the-blue-alliance,bdaroz/the-blue-alliance,the-blue-alliance/the-blue-alliance,nwalters512/the-blue-alliance,verycumbersome/the-blue-alliance,bdaroz/the-blue-alliance,phil-lopreiato/the-blue-alliance,tsteward/the-blue-alliance,phil-lopreiato/the-blue-alliance,verycumbersome/the-blue-alliance,bdaroz/the-blue-alliance,jaredhasenklein/the-blue-alliance,the-blue-alliance/the-blue-alliance,jaredhasenklein/the-blue-alliance,tsteward/the-blue-alliance,fangeugene/the-blue-alliance,nwalters512/the-blue-alliance,tsteward/the-blue-alliance,jaredhasenklein/the-blue-alliance,jaredhasenklein/the-blue-alliance,nwalters512/the-blue-alliance,tsteward/the-blue-alliance,phil-lopreiato/the-blue-alliance,fangeugene/the-blue-alliance,nwalters512/the-blue-alliance,fangeugene/the-blue-alliance,phil-lopreiato/the-blue-alliance,bdaroz/the-blue-alliance,verycumbersome/the-blue-alliance,verycumbersome/the-blue-alliance,the-blue-alliance/the-blue-alliance,jaredhasenklein/the-blue-alliance,the-blue-alliance/the-blue-alliance,fangeugene/the-blue-alliance,phil-lopreiato/the-blue-alliance,phil-lopreiato/the-blue-alliance,verycumbersome/the-blue-alliance,bdaroz/the-blue-alliance,bdaroz/the-blue-alliance,the-blue-alliance/the-blue-alliance
34575124ea6b16f7a7d4f2ae5e073a87709843d2
engine/meta.py
engine/meta.py
registered = {} class GObjectMeta(type): def __new__(cls, name, bases, dict): c = super().__new__(cls, name, bases, dict) qualname = '{}.{}'.format(c.__module__, c.__qualname__) if qualname in registered: print(cls, qualname) c = type(name, (registered[qualname], c), {}) return c def register(name): def decorator(cls): registered[name] = cls return cls return decorator
registered = {} created = {} class GObjectMeta(type): def __new__(cls, name, bases, dict): c = super().__new__(cls, name, bases, dict) # Do not handle classes that are already decorated if c.__module__.startswith('<meta>'): return c # Fullname of the class (base module + qualified name) fullname = '{}.{}'.format(c.__module__, c.__qualname__) # Decorate registered classes if fullname in registered: print(cls, fullname) c = type(name, (registered[fullname], c), {'__module__': '<meta>.{}'.format(fullname)}) # Set fullname, save class and return c.__fullname__ = fullname created[fullname] = c return c def register(name): def decorator(cls): registered[name] = cls return cls return decorator
Add __fullname__ attribute on all game classes
Add __fullname__ attribute on all game classes
Python
bsd-3-clause
entwanne/NAGM
95ccab69cfff30c24932c4cd156983a29639435d
nginxauthdaemon/crowdauth.py
nginxauthdaemon/crowdauth.py
import crowd from auth import Authenticator class CrowdAuthenticator(Authenticator): """Atlassian Crowd authenticator. Requires configuration options CROWD_URL, CROWD_APP_NAME, CROWD_APP_PASSWORD""" def __init__(self, config): super(CrowdAuthenticator, self).__init__(config) app_url = config['CROWD_URL'] app_user = config['CROWD_APP_NAME'] app_pass = config['CROWD_APP_PASSWORD'] self._cs = crowd.CrowdServer(app_url, app_user, app_pass) def authenticate(self, username, password): result = self._cs.auth_user(username, password) return result.get('name') == username
import crowd from auth import Authenticator class CrowdAuthenticator(Authenticator): """Atlassian Crowd authenticator. Requires configuration options CROWD_URL, CROWD_APP_NAME, CROWD_APP_PASSWORD""" def __init__(self, config): super(CrowdAuthenticator, self).__init__(config) app_url = config['CROWD_URL'] app_user = config['CROWD_APP_NAME'] app_pass = config['CROWD_APP_PASSWORD'] self._cs = crowd.CrowdServer(app_url, app_user, app_pass) def authenticate(self, username, password): result = self._cs.auth_user(username, password) if result == None: # auth failed return False # auth succeeded return result.get('name') == username
Fix 500 error when Crowd auth is failed
Fix 500 error when Crowd auth is failed
Python
mit
akurdyukov/nginxauthdaemon,akurdyukov/nginxauthdaemon
6c2a154bf902b5f658b2c2cbf4b65c6ed33e6c1b
pywineds/utils.py
pywineds/utils.py
""" Exposes utility functions. """ from contextlib import contextmanager import logging import timeit log = logging.getLogger("wineds") @contextmanager def time_it(task_desc): """ A context manager for timing chunks of code and logging it. Arguments: task_desc: task description for logging purposes """ start_time = timeit.default_timer() yield elapsed = timeit.default_timer() - start_time log.info("elapsed (%s): %.4f seconds" % (task_desc, elapsed))
""" Exposes utility functions. """ from contextlib import contextmanager import logging import timeit REPORTING_TYPE_ALL = "" REPORTING_TYPE_ELD = "TC-Election Day Reporting" REPORTING_TYPE_VBM = "TC-VBM Reporting" REPORTING_KEYS_SIMPLE = (REPORTING_TYPE_ALL, ) REPORTING_KEYS_COMPLETE = (REPORTING_TYPE_ELD, REPORTING_TYPE_VBM) REPORTING_INDICES = { REPORTING_TYPE_ALL: 0, REPORTING_TYPE_ELD: 1, REPORTING_TYPE_VBM: 2, } log = logging.getLogger("wineds") @contextmanager def time_it(task_desc): """ A context manager for timing chunks of code and logging it. Arguments: task_desc: task description for logging purposes """ start_time = timeit.default_timer() yield elapsed = timeit.default_timer() - start_time log.info("elapsed (%s): %.4f seconds" % (task_desc, elapsed))
Add some reporting_type global variables.
Add some reporting_type global variables.
Python
bsd-3-clause
cjerdonek/wineds-converter
21e9254abeebb7260f74db9c94e480cc2b5bbcc9
tests/conftest.py
tests/conftest.py
import pytest @pytest.fixture(scope='session') def base_url(base_url, request): return base_url or 'https://developer.allizom.org'
import pytest VIEWPORT = { 'large': {'width': 1201, 'height': 1024}, # also nav-break-ends 'desktop': {'width': 1025, 'height': 1024}, 'tablet': {'width': 851, 'height': 1024}, # also nav-block-ends 'mobile': {'width': 481, 'height': 1024}, 'small': {'width': 320, 'height': 480}} @pytest.fixture(scope='session') def base_url(base_url, request): return base_url or 'https://developer.allizom.org' @pytest.fixture def selenium(request, selenium): viewport = VIEWPORT['large'] if request.keywords.get('viewport') is not None: viewport = VIEWPORT[request.keywords.get('viewport').args[0]] selenium.set_window_size(viewport['width'], viewport['height']) return selenium
Add viewport sizes fixture to tests.
Add viewport sizes fixture to tests.
Python
mpl-2.0
safwanrahman/kuma,Elchi3/kuma,mozilla/kuma,jwhitlock/kuma,SphinxKnight/kuma,SphinxKnight/kuma,Elchi3/kuma,mozilla/kuma,SphinxKnight/kuma,a2sheppy/kuma,safwanrahman/kuma,Elchi3/kuma,mozilla/kuma,yfdyh000/kuma,yfdyh000/kuma,SphinxKnight/kuma,safwanrahman/kuma,a2sheppy/kuma,yfdyh000/kuma,yfdyh000/kuma,safwanrahman/kuma,SphinxKnight/kuma,jwhitlock/kuma,safwanrahman/kuma,SphinxKnight/kuma,a2sheppy/kuma,a2sheppy/kuma,escattone/kuma,Elchi3/kuma,Elchi3/kuma,jwhitlock/kuma,jwhitlock/kuma,safwanrahman/kuma,escattone/kuma,escattone/kuma,a2sheppy/kuma,jwhitlock/kuma,yfdyh000/kuma,mozilla/kuma,mozilla/kuma,yfdyh000/kuma
534633d078fe6f81e67ead075ac31faac0c3c60d
tests/__init__.py
tests/__init__.py
import pycurl def setup_package(): print('Testing %s' % pycurl.version)
def setup_package(): # import here, not globally, so that running # python -m tests.appmanager # to launch the app manager is possible without having pycurl installed # (as the test app does not depend on pycurl) import pycurl print('Testing %s' % pycurl.version)
Make it possible to run test app without pycurl being installed
Make it possible to run test app without pycurl being installed
Python
lgpl-2.1
pycurl/pycurl,pycurl/pycurl,pycurl/pycurl
1b9622cedecef0c6c45c11a84bd178adcff752e2
squadron/exthandlers/download.py
squadron/exthandlers/download.py
import urllib from extutils import get_filename from template import render import requests import yaml import jsonschema SCHEMA = { '$schema': 'http://json-schema.org/draft-04/schema#', 'description': 'Describes the extract extension handler input', 'type':'object', 'properties': { 'url': { 'description': 'Where to download the tarball/zip/etc from', 'type':'string' }, 'username': { 'description': 'Username to login with BASIC Auth', 'type':'string' }, 'password': { 'description': 'Password to use with BASIC Auth', 'type':'string' } }, 'required': ['url'] } def _download_file(url, handle, auth=None): r = requests.get(url, auth=auth, stream=True) for chunk in r.iter_content(chunk_size=4096): if chunk: # filter out keep-alive new chunks handle.write(chunk) handle.close() def ext_download(loader, inputhash, abs_source, dest, **kwargs): """ Downloads a ~download file""" contents = yaml.load(render(abs_source, inputhash, loader)) jsonschema.validate(contents, SCHEMA) finalfile = get_filename(dest) handle = open(finalfile, 'w') auth = None if 'username' in contents and 'password' in contents: auth = (contents['username'], contents['password']) _download_file(contents['url'], handle, auth) return finalfile
import urllib from extutils import get_filename from template import render import requests import yaml import jsonschema SCHEMA = { '$schema': 'http://json-schema.org/draft-04/schema#', 'description': 'Describes the extract extension handler input', 'type':'object', 'properties': { 'url': { 'description': 'Where to download the tarball/zip/etc from', 'type':'string' }, 'username': { 'description': 'Username to login with BASIC Auth', 'type':'string' }, 'password': { 'description': 'Password to use with BASIC Auth', 'type':'string' } }, 'required': ['url'] } def _download_file(url, handle, auth=None): r = requests.get(url, auth=auth, stream=True) r.raise_for_status() for chunk in r.iter_content(chunk_size=4096): if chunk: # filter out keep-alive new chunks handle.write(chunk) handle.close() def ext_download(loader, inputhash, abs_source, dest, **kwargs): """ Downloads a ~download file""" contents = yaml.load(render(abs_source, inputhash, loader)) jsonschema.validate(contents, SCHEMA) finalfile = get_filename(dest) handle = open(finalfile, 'w') auth = None if 'username' in contents and 'password' in contents: auth = (contents['username'], contents['password']) _download_file(contents['url'], handle, auth) return finalfile
Raise Exception when there's an HTTP error
Raise Exception when there's an HTTP error
Python
mit
gosquadron/squadron,gosquadron/squadron
7e51d073952d10d3994fb93458e60c03b6746099
app/services/g6importService.py
app/services/g6importService.py
import json import jsonschema from flask import current_app from jsonschema import validate with open("schemata/g6-scs-schema.json") as json_file1: G6_SCS_SCHEMA = json.load(json_file1) with open("schemata/g6-saas-schema.json") as json_file2: G6_SAAS_SCHEMA = json.load(json_file2) with open("schemata/g6-iaas-schema.json") as json_file3: G6_IAAS_SCHEMA = json.load(json_file3) with open("schemata/g6-paas-schema.json") as json_file4: G6_PAAS_SCHEMA = json.load(json_file4) def validate_json(submitted_json): #current_app.logger.info('Validating JSON:' + str(submitted_json)) try: validate(submitted_json, G6_SCS_SCHEMA) return 'G6-SCS' except jsonschema.ValidationError as e1: try: validate(submitted_json, G6_SAAS_SCHEMA) return 'G6-SaaS' except jsonschema.ValidationError as e2: try: validate(submitted_json, G6_IAAS_SCHEMA) return 'G6-IaaS' except jsonschema.ValidationError as e3: try: validate(submitted_json, G6_PAAS_SCHEMA) return 'G6-PaaS' except jsonschema.ValidationError as e4: print e4.message print 'Failed validation' return False else: return True
import json import jsonschema from jsonschema import validate with open("schemata/g6-scs-schema.json") as json_file1: G6_SCS_SCHEMA = json.load(json_file1) with open("schemata/g6-saas-schema.json") as json_file2: G6_SAAS_SCHEMA = json.load(json_file2) with open("schemata/g6-iaas-schema.json") as json_file3: G6_IAAS_SCHEMA = json.load(json_file3) with open("schemata/g6-paas-schema.json") as json_file4: G6_PAAS_SCHEMA = json.load(json_file4) def validate_json(submitted_json): if validates_against_schema(G6_SCS_SCHEMA,submitted_json): return 'G6-SCS' elif validates_against_schema(G6_SAAS_SCHEMA,submitted_json): return 'G6-SaaS' elif validates_against_schema(G6_PAAS_SCHEMA,submitted_json): return 'G6-PaaS' elif validates_against_schema(G6_IAAS_SCHEMA,submitted_json): return 'G6-IaaS' else: print 'Failed validation' return False def validates_against_schema(schema, submitted_json): try: validate(submitted_json, schema) except jsonschema.ValidationError: return False else: return True
Improve code by avoiding flow through exception handling
Improve code by avoiding flow through exception handling
Python
mit
RichardKnop/digitalmarketplace-api,mtekel/digitalmarketplace-api,mtekel/digitalmarketplace-api,alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api,RichardKnop/digitalmarketplace-api,alphagov/digitalmarketplace-api,RichardKnop/digitalmarketplace-api,mtekel/digitalmarketplace-api,RichardKnop/digitalmarketplace-api,mtekel/digitalmarketplace-api
135d4ff79a9a650442548fa5acf44f2dbcd20c0e
voltron/common.py
voltron/common.py
import os import logging import logging.config LOG_CONFIG = { 'version': 1, 'formatters': { 'standard': {'format': 'voltron: [%(levelname)s] %(message)s'} }, 'handlers': { 'default': { 'class': 'logging.StreamHandler', 'formatter': 'standard' } }, 'loggers': { 'voltron': { 'handlers': ['default'], 'level': 'INFO', 'propogate': True, } } } VOLTRON_DIR = os.path.expanduser('~/.voltron/') VOLTRON_CONFIG = os.path.join(VOLTRON_DIR, 'config') def configure_logging(): logging.config.dictConfig(LOG_CONFIG) log = logging.getLogger('voltron') return log
import os import logging import logging.config LOG_CONFIG = { 'version': 1, 'formatters': { 'standard': {'format': 'voltron: [%(levelname)s] %(message)s'} }, 'handlers': { 'default': { 'class': 'logging.StreamHandler', 'formatter': 'standard' } }, 'loggers': { 'voltron': { 'handlers': ['default'], 'level': 'INFO', 'propogate': True, } } } VOLTRON_DIR = os.path.expanduser('~/.voltron/') VOLTRON_CONFIG = os.path.join(VOLTRON_DIR, 'config') def configure_logging(): logging.config.dictConfig(LOG_CONFIG) log = logging.getLogger('voltron') return log # Python 3 shims if not hasattr(__builtins__, "xrange"): xrange = range
Create some shims for py3k
Create some shims for py3k
Python
mit
snare/voltron,snare/voltron,snare/voltron,snare/voltron
05ed4d54d48ddf6540f8dc0d162e4fc95337dbb6
blah/commands.py
blah/commands.py
import os import subprocess import sys def find_command(name): return commands[name] def what_is_this_command(): repository = find_current_repository() if repository is None: print "Could not find source control repository" else: print "{0}+file://{1}".format(repository.type, repository.path) def find_current_repository(): directory = os.getcwd() while directory is not None: files = os.listdir(directory) if ".git" in files: return Repository(os.path.join(directory, ".git"), "git") directory = parent(directory) return None class Repository(object): def __init__(self, repo_path, repo_type): self.path = repo_path self.type = repo_type def parent(file_path): parent = os.path.dirname(file_path) if file_path == parent: return None else: return parent commands = { "whatisthis": what_is_this_command, "what-is-this": what_is_this_command }
import os import subprocess import sys def find_command(name): return commands[name] def what_is_this_command(): directory = sys.argv[2] if len(sys.argv) > 2 else os.getcwd() repository = find_repository(directory) if repository is None: print "Could not find source control repository" else: print "{0}+file://{1}".format(repository.type, repository.path) def find_repository(directory): directory = os.path.abspath(directory) while directory is not None: files = os.listdir(directory) if ".git" in files: return Repository(os.path.join(directory, ".git"), "git") directory = parent(directory) return None class Repository(object): def __init__(self, repo_path, repo_type): self.path = repo_path self.type = repo_type def parent(file_path): parent = os.path.dirname(file_path) if file_path == parent: return None else: return parent commands = { "whatisthis": what_is_this_command, "what-is-this": what_is_this_command }
Allow path to be explicitly set when using what-is-this
Allow path to be explicitly set when using what-is-this
Python
bsd-2-clause
mwilliamson/mayo
675364683c5415f1db7a5599d8ad97f72f69aaf0
buckets/utils.py
buckets/utils.py
import string import random from django.conf import settings def validate_settings(): assert settings.AWS, \ "No AWS settings found" assert settings.AWS.get('ACCESS_KEY'), \ "AWS access key is not set in settings" assert settings.AWS.get('SECRET_KEY'), \ "AWS secret key is not set in settings" assert settings.AWS.get('BUCKET'), \ "AWS bucket name is not set in settings" ID_FIELD_LENGTH = 24 alphabet = string.ascii_lowercase + string.digits for loser in 'l1o0': i = alphabet.index(loser) alphabet = alphabet[:i] + alphabet[i + 1:] def byte_to_base32_chr(byte): return alphabet[byte & 31] def random_id(): rand_id = [random.randint(0, 0xFF) for i in range(ID_FIELD_LENGTH)] return ''.join(map(byte_to_base32_chr, rand_id))
import string import random from django.conf import settings def validate_settings(): assert settings.AWS, \ "No AWS settings found" assert settings.AWS.get('ACCESS_KEY'), \ "AWS access key is not set in settings" assert settings.AWS.get('SECRET_KEY'), \ "AWS secret key is not set in settings" assert settings.AWS.get('BUCKET'), \ "AWS bucket name is not set in settings" ID_FIELD_LENGTH = 24 alphabet = string.ascii_lowercase + string.digits alphabet0 = string.ascii_lowercase + string.ascii_lowercase for loser in 'l1o0': i = alphabet.index(loser) alphabet = alphabet[:i] + alphabet[i + 1:] for loser in 'lo': i = alphabet0.index(loser) alphabet0 = alphabet0[:i] + alphabet0[i + 1:] def byte_to_base32_chr(byte): return alphabet[byte & 31] def byte_to_letter(byte): return alphabet0[byte & 31] def random_id(): rand_id = [random.randint(0, 0xFF) for i in range(ID_FIELD_LENGTH)] return (byte_to_letter(rand_id[0]) + ''.join(map(byte_to_base32_chr, rand_id[1:])))
Make random IDs start with a letter
Make random IDs start with a letter
Python
agpl-3.0
Cadasta/django-buckets,Cadasta/django-buckets,Cadasta/django-buckets
c3e2fccbc2a7afa0d146041c0b3392dd89aa5deb
analysis/plot-marker-trajectories.py
analysis/plot-marker-trajectories.py
import climate import lmj.plot import numpy as np import source import plots @climate.annotate( root='load experiment data from this directory', pattern=('plot data from files matching this pattern', 'option'), markers=('plot traces of these markers', 'option'), spline=('interpolate data with a spline of this order', 'option', None, int), accuracy=('fit spline with this accuracy', 'option', None, float), ) def main(root, pattern='*/*block00/*circuit00.csv.gz', markers='r-fing-index l-fing-index r-heel r-knee', spline=1, accuracy=1): with plots.space() as ax: for t in source.Experiment(root).trials_matching(pattern): t.normalize(order=spline, accuracy=accuracy) for i, marker in enumerate(markers.split()): df = t.trajectory(marker) ax.plot(np.asarray(df.x), np.asarray(df.z), zs=np.asarray(df.y), color=lmj.plot.COLOR11[i], alpha=0.7) if __name__ == '__main__': climate.call(main)
import climate import lmj.plot import numpy as np import source import plots @climate.annotate( root='load experiment data from this directory', pattern=('plot data from files matching this pattern', 'option'), markers=('plot traces of these markers', 'option'), spline=('interpolate data with a spline of this order', 'option', None, int), accuracy=('fit spline with this accuracy', 'option', None, float), svt_threshold=('trajectory-SVT threshold', 'option', None, float), svt_frames=('number of trajectory-SVT frames', 'option', None, int), ) def main(root, pattern='*/*block00/*circuit00.csv.gz', markers='r-fing-index l-fing-index r-heel r-knee', spline=None, accuracy=0.01, svt_threshold=1000, svt_frames=5): with plots.space() as ax: for t in source.Experiment(root).trials_matching(pattern): if spline: t.normalize(order=spline, accuracy=accuracy) else: t.reindex() t.svt(svt_threshold, accuracy, svt_frames) for i, marker in enumerate(markers.split()): df = t.trajectory(marker) ax.plot(np.asarray(df.x), np.asarray(df.z), zs=np.asarray(df.y), color=lmj.plot.COLOR11[i], alpha=0.7) if __name__ == '__main__': climate.call(main)
Add SVT options to plotting script.
Add SVT options to plotting script.
Python
mit
lmjohns3/cube-experiment,lmjohns3/cube-experiment,lmjohns3/cube-experiment
d06adea5117eb3ebfddd8592889346089c7391f7
dictlearn/wordnik_api_demo.py
dictlearn/wordnik_api_demo.py
from wordnik import swagger, WordApi, AccountApi client = swagger.ApiClient( 'dd3d32ae6b4709e1150040139c308fb77446e0a8ecc93db31', 'https://api.wordnik.com/v4') word_api = WordApi.WordApi(client) words = ['paint', 'mimic', 'mimics', 'francie', 'frolic', 'funhouse'] for word in words: print('=== {} ==='.format(word)) defs = word_api.getDefinitions(word) if not defs: print("no definitions") continue for def_ in defs: fmt_str = "{} --- {}" print(fmt_str.format(def_.sourceDictionary, def_.text.encode('utf-8'))) account_api = AccountApi.AccountApi(client) for i in range(5): print("Attempt {}".format(i)) status = account_api.getApiTokenStatus() print("Remaining_calls: {}".format(status.remainingCalls))
import nltk from wordnik import swagger, WordApi, AccountApi client = swagger.ApiClient( 'dd3d32ae6b4709e1150040139c308fb77446e0a8ecc93db31', 'https://api.wordnik.com/v4') word_api = WordApi.WordApi(client) toktok = nltk.ToktokTokenizer() words = ['paint', 'mimic', 'mimics', 'francie', 'frolic', 'funhouse'] for word in words: print('=== {} ==='.format(word)) defs = word_api.getDefinitions(word) if not defs: print("no definitions") continue for def_ in defs: fmt_str = "{} --- {}" tokenized_def = toktok.tokenize(def_.text.lower()) tokenized_def = [s.encode('utf-8') for s in tokenized_def] print(fmt_str.format(def_.sourceDictionary, tokenized_def)) account_api = AccountApi.AccountApi(client) for i in range(5): print("Attempt {}".format(i)) status = account_api.getApiTokenStatus() print("Remaining_calls: {}".format(status.remainingCalls))
Add tokenization to the WordNik demo
Add tokenization to the WordNik demo
Python
mit
tombosc/dict_based_learning,tombosc/dict_based_learning
062e65a161f9c84e5cd18b85790b195eec947b99
social_website_django_angular/social_website_django_angular/urls.py
social_website_django_angular/social_website_django_angular/urls.py
"""social_website_django_angular URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.10/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import url from django.contrib import admin urlpatterns = [ url(r'^admin/', admin.site.urls), ]
"""social_website_django_angular URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.10/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) """ from django.conf.urls import url from django.contrib import admin from social_website_django_angular.views import IndexView urlpatterns = [ url(r'^admin/', admin.site.urls), url('^.*$', IndexView.as_view(), name='index') ]
Set up url for index page
Set up url for index page
Python
mit
tomaszzacharczuk/social-website-django-angular,tomaszzacharczuk/social-website-django-angular,tomaszzacharczuk/social-website-django-angular
6fd1305f2a4a2e08b51c421b1c2cfdd33b407119
src/puzzle/problems/problem.py
src/puzzle/problems/problem.py
from data import meta class Problem(object): def __init__(self, name, lines): self.name = name self.lines = lines self._solutions = None self._constraints = [] @property def kind(self): return str(type(self)).strip("'<>").split('.').pop() @property def solution(self): return self.solutions().peek() def constrain(self, fn): self._constraints.append(fn) # Invalidate solutions. self._solutions = None def solutions(self): if self._solutions is None: self._solutions = meta.Meta( (k, v) for k, v in self._solve().items() if all( [fn(k, v) for fn in self._constraints] ) ) return self._solutions def _solve(self): """Solves Problem. Returns: dict Dict mapping solution to score. """ raise NotImplementedError() def __repr__(self): return '%s()' % self.__class__.__name__
from data import meta _THRESHOLD = 0.01 class Problem(object): def __init__(self, name, lines, threshold=_THRESHOLD): self.name = name self.lines = lines self._threshold = threshold self._solutions = None self._constraints = [ lambda k, v: v > self._threshold ] @property def kind(self): return str(type(self)).strip("'<>").split('.').pop() @property def solution(self): return self.solutions().peek() def constrain(self, fn): self._constraints.append(fn) # Invalidate solutions. self._solutions = None self._solutions_iter = None def solutions(self): if self._solutions is None: self._solutions_iter = self._solve_iter() results = [] for k, v in self._solutions_iter: if all(fn(k, v) for fn in self._constraints): results.append((k, v)) self._solutions = meta.Meta(results) return self._solutions def _solve_iter(self): return iter(self._solve().items()) def _solve(self): """Solves Problem. Returns: dict Dict mapping solution to score. """ raise NotImplementedError() def __repr__(self): return '%s()' % self.__class__.__name__
Set a threshold on Problem and enforce it.
Set a threshold on Problem and enforce it.
Python
mit
PhilHarnish/forge,PhilHarnish/forge,PhilHarnish/forge,PhilHarnish/forge,PhilHarnish/forge,PhilHarnish/forge
d44fee53020470e2d9a8cd2393f5f0125dbd1fab
python/client.py
python/client.py
import grpc import hello_pb2 import hello_pb2_grpc def run(): channel = grpc.insecure_channel('localhost:50051') stub = hello_pb2_grpc.HelloServiceStub(channel) # ideally, you should have try catch block here too response = stub.SayHello(hello_pb2.HelloReq(Name='Euler')) print(response.Result) try: response = stub.SayHelloStrict(hello_pb2.HelloReq( Name='Leonhard Euler')) except grpc.RpcError as e: # ouch! # lets print the gRPC error message # which is "Length of `Name` cannot be more than 10 characters" print(e.details()) # lets access the error code, which is `INVALID_ARGUMENT` # `type` of `status_code` is `grpc.StatusCode` status_code = e.code() # should print `INVALID_ARGUMENT` print(status_code.name) # should print `(3, 'invalid argument')` print(status_code.value) else: print(response.Result) if __name__ == '__main__': run()
import grpc import hello_pb2 import hello_pb2_grpc def run(): channel = grpc.insecure_channel('localhost:50051') stub = hello_pb2_grpc.HelloServiceStub(channel) # ideally, you should have try catch block here too response = stub.SayHello(hello_pb2.HelloReq(Name='Euler')) print(response.Result) try: response = stub.SayHelloStrict(hello_pb2.HelloReq( Name='Leonhard Euler')) except grpc.RpcError as e: # ouch! # lets print the gRPC error message # which is "Length of `Name` cannot be more than 10 characters" print(e.details()) # lets access the error code, which is `INVALID_ARGUMENT` # `type` of `status_code` is `grpc.StatusCode` status_code = e.code() # should print `INVALID_ARGUMENT` print(status_code.name) # should print `(3, 'invalid argument')` print(status_code.value) # want to do some specific action based on the error? if grpc.StatusCode.INVALID_ARGUMENT == status_code: # do your stuff here pass else: print(response.Result) if __name__ == '__main__': run()
Update python version for better error handling
Update python version for better error handling
Python
mit
avinassh/grpc-errors,avinassh/grpc-errors,avinassh/grpc-errors,avinassh/grpc-errors,avinassh/grpc-errors,avinassh/grpc-errors,avinassh/grpc-errors,avinassh/grpc-errors
97229a7e51279906254a7befa0456a4c89a9b0ea
planner/models.py
planner/models.py
from django.db import models # Route model # Start and end locations with additional stop-overs class Route(models.Model): origin = models.CharField(max_length=63) destination = models.CharField(max_length=63) class Waypoint(models.Model): waypoint = models.CharField(max_length=63) route = models.ForeignKey(Route) # RoadTrip model # Start and end dates, Route and TripDetails class RoadTrip(models.Model): start_date = models.DateTimeField() end_date = models.DateTimeField() route = models.OneToOneField(Route) # TripDetail model # Additional trip details, such as traveling with children or pets class TripDetail(models.Model): description = models.CharField(max_length=127) trip = models.ForeignKey(RoadTrip)
from django.db import models # Route model # Start and end locations with additional stop-overs class Route(models.Model): origin = models.CharField(max_length=63) destination = models.CharField(max_length=63) def __unicode__(self): return "{} to {}".format( self.origin, self.destination ) class Waypoint(models.Model): waypoint = models.CharField(max_length=63) route = models.ForeignKey(Route) def __unicode__(self): return str(self.waypoint) # RoadTrip model # Start and end dates, Route and TripDetails class RoadTrip(models.Model): start_date = models.DateTimeField() end_date = models.DateTimeField() route = models.OneToOneField(Route) def __unicode__(self): return "{} from {} to {}".format( self.route, self.start_date, self.end_date ) # TripDetail model # Additional trip details, such as traveling with children or pets class TripDetail(models.Model): description = models.CharField(max_length=127) trip = models.ForeignKey(RoadTrip) def __unicode__(self): return str(self.description)
Add unicode methods to model classes
Add unicode methods to model classes
Python
apache-2.0
jwarren116/RoadTrip,jwarren116/RoadTrip,jwarren116/RoadTrip
1aef29a64886522d81d2f6a15bd4e48419a66545
ziggy/__init__.py
ziggy/__init__.py
# -*- coding: utf-8 -*- """ Ziggy ~~~~~~~~ :copyright: (c) 2012 by Rhett Garber :license: ISC, see LICENSE for more details. """ __title__ = 'ziggy' __version__ = '0.0.1' __build__ = 0 __author__ = 'Rhett Garber' __license__ = 'ISC' __copyright__ = 'Copyright 2012 Rhett Garber' import logging from . import utils from . import network from .context import Context, set, append, add from . import context as _context_mod from .errors import Error from .timer import timeit log = logging.getLogger(__name__) def configure(host, port, recorder=None): """Initialize ziggy This instructs the ziggy system where to send it's logging data. If ziggy is not configured, log data will be silently dropped. Currently we support logging through the network (and the configured host and port) to a ziggyd instances, or to the specified recorder function """ global _record_function if recorder: context._recorder_function = recorder elif host and port: network.init(host, port) context._recorder_function = network.send else: log.warning("Empty ziggy configuration")
# -*- coding: utf-8 -*- """ Ziggy ~~~~~~~~ :copyright: (c) 2012 by Rhett Garber :license: ISC, see LICENSE for more details. """ __title__ = 'ziggy' __version__ = '0.0.1' __build__ = 0 __author__ = 'Rhett Garber' __license__ = 'ISC' __copyright__ = 'Copyright 2012 Rhett Garber' import logging from . import utils from . import network from .context import Context, set, append, add from . import context as _context_mod from .errors import Error from .timer import timeit log = logging.getLogger(__name__) def configure(host, port, recorder=None): """Initialize ziggy This instructs the ziggy system where to send it's logging data. If ziggy is not configured, log data will be silently dropped. Currently we support logging through the network (and the configured host and port) to a ziggyd instances, or to the specified recorder function """ global _record_function if recorder: context._recorder_function = recorder elif host and port: network.init(host, port) context._recorder_function = network.send else: log.warning("Empty ziggy configuration") context._recorder_function = None
Allow unsetting of configuration (for testing)
Allow unsetting of configuration (for testing)
Python
isc
rhettg/Ziggy,rhettg/BlueOx
168a7c9b9f5c0699009d8ef6eea0078c2a6a19cc
oonib/handlers.py
oonib/handlers.py
import types from cyclone import escape from cyclone import web class OONIBHandler(web.RequestHandler): def write_error(self, status_code, exception=None, **kw): self.set_status(status_code) if exception: self.write({'error': exception.log_message}) def write(self, chunk): """ This is a monkey patch to RequestHandler to allow us to serialize also json list objects. """ if isinstance(chunk, types.ListType): chunk = escape.json_encode(chunk) web.RequestHandler.write(self, chunk) self.set_header("Content-Type", "application/json") else: web.RequestHandler.write(self, chunk)
import types from cyclone import escape from cyclone import web class OONIBHandler(web.RequestHandler): def write_error(self, status_code, exception=None, **kw): self.set_status(status_code) if hasattr(exception, 'log_message'): self.write({'error': exception.log_message}) else: self.write({'error': 'error'}) def write(self, chunk): """ This is a monkey patch to RequestHandler to allow us to serialize also json list objects. """ if isinstance(chunk, types.ListType): chunk = escape.json_encode(chunk) web.RequestHandler.write(self, chunk) self.set_header("Content-Type", "application/json") else: web.RequestHandler.write(self, chunk)
Handle writing exceptions that don't have log_exception attribute
Handle writing exceptions that don't have log_exception attribute
Python
bsd-2-clause
dstufft/ooni-backend,dstufft/ooni-backend
8c90485e5cab6294a38cfc9332eda6fe8ca15483
project/config.py
project/config.py
import os config = {} system_mongo_host = os.environ.get('MONGODB_PORT_27017_TCP_ADDR') system_elastic_host = os.environ.get('ELASTIC_PORT_9300_TCP_ADDR') config['HOST'] = '' config['PORT'] = 5000 config['MONGODB_HOST'] = system_mongo_host if system_mongo_host else 'localhost' config['MONGODB_PORT'] = 27017 config['ELASTIC_HOST'] = system_elastic_host if system_elastic_host else 'localhost' config['ELASTIC_PORT'] = 9200 config['ACCEPTED_ORIGINS'] = ['http://104.236.77.225', 'http://localhost:3000']
import os config = {} system_mongo_host = os.environ.get('MONGODB_PORT_27017_TCP_ADDR') system_elastic_host = os.environ.get('ELASTIC_PORT_9300_TCP_ADDR') config['HOST'] = '' config['PORT'] = 5000 config['MONGODB_HOST'] = system_mongo_host if system_mongo_host else 'localhost' config['MONGODB_PORT'] = 27017 config['ELASTIC_HOST'] = system_elastic_host if system_elastic_host else 'localhost' config['ELASTIC_PORT'] = 9200 config['ACCEPTED_ORIGINS'] = ['http://beta.founderati.io', 'http://beta.thehookemup.com', 'http://104.236.77.225', 'http://localhost:3000']
Add two new domains to whitelist for CORS.
Add two new domains to whitelist for CORS.
Python
apache-2.0
AustinStoneProjects/Founderati-Server,AustinStoneProjects/Founderati-Server
616bd7c5ff8ba5fe5dd190a459b93980613a3ad4
myuw_mobile/restclients/dao_implementation/hfs.py
myuw_mobile/restclients/dao_implementation/hfs.py
from os.path import dirname from restclients.dao_implementation.mock import get_mockdata_url from restclients.dao_implementation.live import get_con_pool, get_live_url class File(object): """ This implementation returns mock/static content. Use this DAO with this configuration: RESTCLIENTS_HFS_DAO_CLASS = 'myuw_mobile.restclients.dao_implementation.hfs.File' """ def getURL(self, url, headers): """ Return the url for accessing the mock data in local file :param url: in the format of "hfs/servlet/hfservices?sn=<student number>" """ return get_mockdata_url("hfs", "file", url, headers, dir_base=dirname(__file__)) class Live(object): """ This DAO provides real data. Access is restricted to localhost. """ pool = None def getURL(self, url, headers): """ Return the absolute url for accessing live data :param url: in the format of "hfs/servlet/hfservices?sn=<student number>" """ host = 'http://localhost/' if Live.pool == None: Live.pool = get_con_pool(host, None, None) return get_live_url (Live.pool, 'GET', host, url, headers=headers)
from os.path import dirname from restclients.dao_implementation.mock import get_mockdata_url from restclients.dao_implementation.live import get_con_pool, get_live_url import logging from myuw_mobile.logger.logback import log_info class File(object): """ This implementation returns mock/static content. Use this DAO with this configuration: RESTCLIENTS_HFS_DAO_CLASS = 'myuw_mobile.restclients.dao_implementation.hfs.File' """ def getURL(self, url, headers): """ Return the url for accessing the mock data in local file :param url: in the format of "hfs/servlet/hfservices?sn=<student number>" """ return get_mockdata_url("hfs", "file", url, headers, dir_base=dirname(__file__)) class Live(object): """ This DAO provides real data. Access is restricted to localhost. """ logger = logging.getLogger('myuw_mobile.restclients.dao_implementation.hfs.Live') pool = None def getURL(self, url, headers): """ Return the absolute url for accessing live data :param url: in the format of "hfs/servlet/hfservices?sn=<student number>" """ host = 'http://localhost:80/' if Live.pool is None: Live.pool = get_con_pool(host, None, None, socket_timeout=5.0, max_pool_size=5) log_info(Live.logger, Live.pool) return get_live_url (Live.pool, 'GET', host, url, headers=headers)
Fix bug: must specify the port number.
Fix bug: must specify the port number.
Python
apache-2.0
uw-it-aca/myuw,uw-it-aca/myuw,fanglinfang/myuw,uw-it-aca/myuw,fanglinfang/myuw,fanglinfang/myuw,uw-it-aca/myuw
882175afea0e2c35e2b223e15feb195a005f7d42
common/config.py
common/config.py
# # Poet Configurations # # client authentication token AUTH = 'b9c39a336bb97a9c9bda2b82bdaacff3' # directory to save output files to ARCHIVE_DIR = 'archive' # # The below configs let you bake in the server IP and beacon interval # into the final executable so it can simply be executed without supplying # command line arguments. # # server IP # # if this is None, it *must* be specified as a command line argument # when client is executed # # SERVER_IP = '1.2.3.4' # example SERVER_IP = None # client beacon interval # # if this is None, it *may* be specified as a command line argument, # otherwise, it will take the default value # # BEACON_INTERVAL = 300 # example BEACON_INTERVAL = None
# # Poet Configurations # # default client authentication token. change this to whatever you want! AUTH = 'b9c39a336bb97a9c9bda2b82bdaacff3' # directory to save output files to ARCHIVE_DIR = 'archive' # # The below configs let you bake in the server IP and beacon interval # into the final executable so it can simply be executed without supplying # command line arguments. # # server IP # # if this is None, it *must* be specified as a command line argument # when client is executed # # SERVER_IP = '1.2.3.4' # example SERVER_IP = None # client beacon interval # # if this is None, it *may* be specified as a command line argument, # otherwise, it will take the default value # # BEACON_INTERVAL = 300 # example BEACON_INTERVAL = None
Add comment about changing auth token
Add comment about changing auth token
Python
mit
mossberg/poet
0e99654d606038098d45fb83cc40405742e43ae8
readthedocs/builds/filters.py
readthedocs/builds/filters.py
from django.utils.translation import ugettext_lazy as _ import django_filters from builds import constants from builds.models import Build, Version ANY_REPO = ( ('', _('Any')), ) BUILD_TYPES = ANY_REPO + constants.BUILD_TYPES class VersionFilter(django_filters.FilterSet): project = django_filters.CharFilter(name='project__name', lookup_type="icontains") slug= django_filters.CharFilter(label=_("Slug"), name='slug', lookup_type='icontains') class Meta: model = Version fields = ['project', 'slug'] class BuildFilter(django_filters.FilterSet): date = django_filters.DateRangeFilter(label=_("Build Date"), name="date") type = django_filters.ChoiceFilter(label=_("Build Type"), choices=BUILD_TYPES) class Meta: model = Build fields = ['type', 'date', 'version', 'success']
from django.utils.translation import ugettext_lazy as _ import django_filters from builds import constants from builds.models import Build, Version ANY_REPO = ( ('', _('Any')), ) BUILD_TYPES = ANY_REPO + constants.BUILD_TYPES class VersionFilter(django_filters.FilterSet): project = django_filters.CharFilter(name='project__name', lookup_type="icontains") slug= django_filters.CharFilter(label=_("Slug"), name='slug', lookup_type='icontains') class Meta: model = Version fields = ['project', 'slug'] class BuildFilter(django_filters.FilterSet): date = django_filters.DateRangeFilter(label=_("Build Date"), name="date") type = django_filters.ChoiceFilter(label=_("Build Type"), choices=BUILD_TYPES) class Meta: model = Build fields = ['type', 'date', 'success']
Remove version from Build filter.
Remove version from Build filter.
Python
mit
agjohnson/readthedocs.org,fujita-shintaro/readthedocs.org,GovReady/readthedocs.org,nyergler/pythonslides,Tazer/readthedocs.org,techtonik/readthedocs.org,takluyver/readthedocs.org,nyergler/pythonslides,GovReady/readthedocs.org,nikolas/readthedocs.org,gjtorikian/readthedocs.org,cgourlay/readthedocs.org,d0ugal/readthedocs.org,sid-kap/readthedocs.org,CedarLogic/readthedocs.org,gjtorikian/readthedocs.org,sils1297/readthedocs.org,singingwolfboy/readthedocs.org,kdkeyser/readthedocs.org,kenshinthebattosai/readthedocs.org,VishvajitP/readthedocs.org,hach-que/readthedocs.org,laplaceliu/readthedocs.org,KamranMackey/readthedocs.org,techtonik/readthedocs.org,kdkeyser/readthedocs.org,ojii/readthedocs.org,asampat3090/readthedocs.org,michaelmcandrew/readthedocs.org,dirn/readthedocs.org,attakei/readthedocs-oauth,LukasBoersma/readthedocs.org,rtfd/readthedocs.org,techtonik/readthedocs.org,tddv/readthedocs.org,sunnyzwh/readthedocs.org,emawind84/readthedocs.org,davidfischer/readthedocs.org,jerel/readthedocs.org,clarkperkins/readthedocs.org,mhils/readthedocs.org,takluyver/readthedocs.org,sils1297/readthedocs.org,Carreau/readthedocs.org,wijerasa/readthedocs.org,SteveViss/readthedocs.org,singingwolfboy/readthedocs.org,Tazer/readthedocs.org,pombredanne/readthedocs.org,rtfd/readthedocs.org,nyergler/pythonslides,nikolas/readthedocs.org,michaelmcandrew/readthedocs.org,CedarLogic/readthedocs.org,safwanrahman/readthedocs.org,takluyver/readthedocs.org,royalwang/readthedocs.org,espdev/readthedocs.org,hach-que/readthedocs.org,Carreau/readthedocs.org,takluyver/readthedocs.org,dirn/readthedocs.org,d0ugal/readthedocs.org,sid-kap/readthedocs.org,fujita-shintaro/readthedocs.org,kdkeyser/readthedocs.org,tddv/readthedocs.org,espdev/readthedocs.org,asampat3090/readthedocs.org,LukasBoersma/readthedocs.org,kenshinthebattosai/readthedocs.org,nyergler/pythonslides,titiushko/readthedocs.org,techtonik/readthedocs.org,wanghaven/readthedocs.org,stevepiercy/readthedocs.org,rtfd/readthedocs.org,sunnyzwh/readthedocs.org,mhils/readthedocs.org,d0ugal/readthedocs.org,titiushko/readthedocs.org,istresearch/readthedocs.org,SteveViss/readthedocs.org,mrshoki/readthedocs.org,raven47git/readthedocs.org,atsuyim/readthedocs.org,wanghaven/readthedocs.org,soulshake/readthedocs.org,agjohnson/readthedocs.org,Carreau/readthedocs.org,tddv/readthedocs.org,KamranMackey/readthedocs.org,laplaceliu/readthedocs.org,nikolas/readthedocs.org,kenwang76/readthedocs.org,clarkperkins/readthedocs.org,VishvajitP/readthedocs.org,jerel/readthedocs.org,kenshinthebattosai/readthedocs.org,hach-que/readthedocs.org,asampat3090/readthedocs.org,stevepiercy/readthedocs.org,VishvajitP/readthedocs.org,ojii/readthedocs.org,agjohnson/readthedocs.org,agjohnson/readthedocs.org,jerel/readthedocs.org,attakei/readthedocs-oauth,michaelmcandrew/readthedocs.org,istresearch/readthedocs.org,safwanrahman/readthedocs.org,asampat3090/readthedocs.org,wanghaven/readthedocs.org,nikolas/readthedocs.org,emawind84/readthedocs.org,rtfd/readthedocs.org,wijerasa/readthedocs.org,jerel/readthedocs.org,cgourlay/readthedocs.org,sils1297/readthedocs.org,ojii/readthedocs.org,stevepiercy/readthedocs.org,LukasBoersma/readthedocs.org,raven47git/readthedocs.org,clarkperkins/readthedocs.org,wijerasa/readthedocs.org,CedarLogic/readthedocs.org,GovReady/readthedocs.org,singingwolfboy/readthedocs.org,kenwang76/readthedocs.org,mrshoki/readthedocs.org,istresearch/readthedocs.org,dirn/readthedocs.org,mrshoki/readthedocs.org,kenwang76/readthedocs.org,mhils/readthedocs.org,Tazer/readthedocs.org,sils1297/readthedocs.org,cgourlay/readthedocs.org,royalwang/readthedocs.org,sid-kap/readthedocs.org,emawind84/readthedocs.org,mrshoki/readthedocs.org,pombredanne/readthedocs.org,SteveViss/readthedocs.org,attakei/readthedocs-oauth,ojii/readthedocs.org,davidfischer/readthedocs.org,istresearch/readthedocs.org,hach-que/readthedocs.org,raven47git/readthedocs.org,fujita-shintaro/readthedocs.org,titiushko/readthedocs.org,LukasBoersma/readthedocs.org,singingwolfboy/readthedocs.org,royalwang/readthedocs.org,atsuyim/readthedocs.org,safwanrahman/readthedocs.org,VishvajitP/readthedocs.org,wanghaven/readthedocs.org,davidfischer/readthedocs.org,GovReady/readthedocs.org,clarkperkins/readthedocs.org,royalwang/readthedocs.org,atsuyim/readthedocs.org,dirn/readthedocs.org,sunnyzwh/readthedocs.org,stevepiercy/readthedocs.org,kdkeyser/readthedocs.org,gjtorikian/readthedocs.org,fujita-shintaro/readthedocs.org,sunnyzwh/readthedocs.org,wijerasa/readthedocs.org,attakei/readthedocs-oauth,michaelmcandrew/readthedocs.org,davidfischer/readthedocs.org,kenshinthebattosai/readthedocs.org,Tazer/readthedocs.org,atsuyim/readthedocs.org,KamranMackey/readthedocs.org,laplaceliu/readthedocs.org,pombredanne/readthedocs.org,CedarLogic/readthedocs.org,soulshake/readthedocs.org,cgourlay/readthedocs.org,d0ugal/readthedocs.org,emawind84/readthedocs.org,soulshake/readthedocs.org,espdev/readthedocs.org,kenwang76/readthedocs.org,espdev/readthedocs.org,raven47git/readthedocs.org,SteveViss/readthedocs.org,mhils/readthedocs.org,Carreau/readthedocs.org,laplaceliu/readthedocs.org,KamranMackey/readthedocs.org,sid-kap/readthedocs.org,soulshake/readthedocs.org,safwanrahman/readthedocs.org,espdev/readthedocs.org,gjtorikian/readthedocs.org,titiushko/readthedocs.org
0b7636422c632172dfc68ea2a5f21ec649248c8c
nimp/commands/vs_build.py
nimp/commands/vs_build.py
# -*- coding: utf-8 -*- from nimp.commands._command import * from nimp.utilities.build import * #------------------------------------------------------------------------------- class VsBuildCommand(Command): def __init__(self): Command.__init__(self, 'vs-build', 'Builds a Visual Studio project') #--------------------------------------------------------------------------- def configure_arguments(self, env, parser): parser.add_argument('solution', help = 'Solution file', metavar = '<FILE>') parser.add_argument('project', help = 'Project', metavar = '<FILE>', default = 'None') parser.add_argument('--target', help = 'Target', metavar = '<TARGET>', default = 'Build') parser.add_argument('-c', '--configuration', help = 'configuration to build', metavar = '<configuration>', default = 'release') parser.add_argument('-p', '--platform', help = 'platform to build', metavar = '<platform>', default = 'Win64') parser.add_argument('--vs-version', help = 'VS version to use', metavar = '<VERSION>', default = '12') return True #--------------------------------------------------------------------------- def run(self, env): return vsbuild(env.solution, env.platform, env.configuration, env.project, env.vs_version, env.target)
# -*- coding: utf-8 -*- from nimp.commands._command import * from nimp.utilities.build import * #------------------------------------------------------------------------------- class VsBuildCommand(Command): def __init__(self): Command.__init__(self, 'vs-build', 'Builds a Visual Studio project') #--------------------------------------------------------------------------- def configure_arguments(self, env, parser): parser.add_argument('solution', help = 'Solution file', metavar = '<FILE>') parser.add_argument('project', help = 'Project', metavar = '<FILE>', default = 'None') parser.add_argument('--target', help = 'Target', metavar = '<TARGET>', default = 'Build') parser.add_argument('-c', '--vs-configuration', help = 'configuration to build', metavar = '<vs-configuration>', default = 'release') parser.add_argument('-p', '--vs-platform', help = 'platform to build', metavar = '<vs-platform>', default = 'Win64') parser.add_argument('--vs-version', help = 'VS version to use', metavar = '<VERSION>', default = '12') return True #--------------------------------------------------------------------------- def run(self, env): return vsbuild(env.solution, env.vs_platform, env.vs_configuration, env.project, env.vs_version, env.target)
Use separate variable names for Visual Studio config/platform.
Use separate variable names for Visual Studio config/platform.
Python
mit
dontnod/nimp
84b01f0015163dc016293162f1525be76329e602
pythonforandroid/recipes/cryptography/__init__.py
pythonforandroid/recipes/cryptography/__init__.py
from pythonforandroid.recipe import CompiledComponentsPythonRecipe, Recipe class CryptographyRecipe(CompiledComponentsPythonRecipe): name = 'cryptography' version = '2.4.2' url = 'https://github.com/pyca/cryptography/archive/{version}.tar.gz' depends = ['openssl', 'idna', 'asn1crypto', 'six', 'setuptools', 'enum34', 'ipaddress', 'cffi'] call_hostpython_via_targetpython = False def get_recipe_env(self, arch): env = super(CryptographyRecipe, self).get_recipe_env(arch) openssl_recipe = Recipe.get_recipe('openssl', self.ctx) env['CFLAGS'] += openssl_recipe.include_flags(arch) env['LDFLAGS'] += openssl_recipe.link_flags(arch) return env recipe = CryptographyRecipe()
from pythonforandroid.recipe import CompiledComponentsPythonRecipe, Recipe class CryptographyRecipe(CompiledComponentsPythonRecipe): name = 'cryptography' version = '2.4.2' url = 'https://github.com/pyca/cryptography/archive/{version}.tar.gz' depends = ['openssl', 'idna', 'asn1crypto', 'six', 'setuptools', 'enum34', 'ipaddress', 'cffi'] call_hostpython_via_targetpython = False def get_recipe_env(self, arch): env = super(CryptographyRecipe, self).get_recipe_env(arch) openssl_recipe = Recipe.get_recipe('openssl', self.ctx) env['CFLAGS'] += openssl_recipe.include_flags(arch) env['LDFLAGS'] += openssl_recipe.link_dirs_flags(arch) env['LIBS'] = openssl_recipe.link_libs_flags() return env recipe = CryptographyRecipe()
Move libraries from LDFLAGS to LIBS for cryptography recipe
Move libraries from LDFLAGS to LIBS for cryptography recipe Because this is how you are supposed to do it, you must use LDFLAGS for linker flags and LDLIBS (or the equivalent LOADLIBES) for the libraries
Python
mit
kronenpj/python-for-android,rnixx/python-for-android,PKRoma/python-for-android,germn/python-for-android,PKRoma/python-for-android,kronenpj/python-for-android,rnixx/python-for-android,germn/python-for-android,rnixx/python-for-android,kivy/python-for-android,PKRoma/python-for-android,rnixx/python-for-android,germn/python-for-android,kronenpj/python-for-android,kivy/python-for-android,germn/python-for-android,kivy/python-for-android,kronenpj/python-for-android,PKRoma/python-for-android,kivy/python-for-android,germn/python-for-android,rnixx/python-for-android,kivy/python-for-android,germn/python-for-android,kronenpj/python-for-android,PKRoma/python-for-android,rnixx/python-for-android
72301067306d6baf4aab0315a769c75dd585b46d
pi_setup/boot_config.py
pi_setup/boot_config.py
#!/usr/bin/env python from utils import file_templates from utils.validation import is_valid_gpu_mem def main(): gpu_mem = 0 while gpu_mem == 0: user_input = raw_input("Enter GPU memory in MB (16/32/64/128/256): ") if is_valid_gpu_mem(user_input): gpu_mem = user_input else: print("Acceptable memory values are: 16/32/64/128/256") update_file('/boot/config.txt', gpu_mem) def update_file(path, gpu_mem): data = { 'gpu_mem': gpu_mem } template_name = path.split('/')[-1] new_file_data = file_templates.build(template_name, data) with open(path, 'w') as f: f.write(new_file_data) if __name__ == '__main__': main()
#!/usr/bin/env python from utils import file_templates from utils.validation import is_valid_gpu_mem def main(): user_input = raw_input("Want to change the GPU memory split? (Y/N): ") if user_input == 'Y': gpu_mem = 0 while gpu_mem == 0: mem_split = raw_input("Enter GPU memory in MB (16/32/64/128/256): ") if is_valid_gpu_mem(mem_split): gpu_mem = mem_split else: print("Acceptable memory values are: 16/32/64/128/256") update_file('/boot/config.txt', gpu_mem) else: print("Skipping GPU memory split...") def update_file(path, gpu_mem): data = { 'gpu_mem': gpu_mem } template_name = path.split('/')[-1] new_file_data = file_templates.build(template_name, data) with open(path, 'w') as f: f.write(new_file_data) if __name__ == '__main__': main()
Make GPU mem split optional
Make GPU mem split optional
Python
mit
projectweekend/Pi-Setup,projectweekend/Pi-Setup
2fe5f960f4998a0337bceabd7db930ac5d5a4fd1
qipipe/qiprofile/helpers.py
qipipe/qiprofile/helpers.py
import re from datetime import datetime TRAILING_NUM_REGEX = re.compile("(\d+)$") """A regular expression to extract the trailing number from a string.""" DATE_REGEX = re.compile("(0?\d|1[12])/(0?\d|[12]\d|3[12])/((19|20)?\d\d)$") class DateError(Exception): pass def trailing_number(s): """ :param s: the input string :return: the trailing number in the string, or None if there is none """ match = TRAILING_NUM_REGEX.search(s) if match: return int(match.group(1)) def default_parser(attribute): """ Retuns the default parser, determined as follows: * If the attribute ends in ``date``, then a MM/DD/YYYY datetime parser :param attribute: the row attribute :return: the function or lambda value parser, or None if none """ if attribute.endswith('date'): return _parse_date def _parse_date(s): """ :param s: the input date string :return: the parsed datetime :rtype: datetime """ match = DATE_REGEX.match(s) if not match: raise DateError("Date is not in a supported format: %s" % s) m, d, y = map(int, match.groups()[:3]) if y < 20: y += 2000 elif y < 100: y += 1900 return datetime(y, m, d)
import re from datetime import datetime TRAILING_NUM_REGEX = re.compile("(\d+)$") """A regular expression to extract the trailing number from a string.""" DATE_REGEX = re.compile("(0?\d|1[12])/(0?\d|[12]\d|3[12])/((19|20)?\d\d)$") class DateError(Exception): pass def trailing_number(s): """ :param s: the input string :return: the trailing number in the string, or None if there is none """ match = TRAILING_NUM_REGEX.search(s) if match: return int(match.group(1)) def default_parser(attribute): """ Retuns the default parser, determined as follows: * If the attribute ends in ``date``, then a MM/DD/YYYY datetime parser :param attribute: the row attribute :return: the value parser function, or None if none """ if attribute.endswith('date'): return _parse_date def _parse_date(s): """ :param s: the input date string :return: the parsed datetime :rtype: datetime """ match = DATE_REGEX.match(s) if not match: raise DateError("Date is not in a supported format: %s" % s) m, d, y = map(int, match.groups()[:3]) if y < 20: y += 2000 elif y < 100: y += 1900 return datetime(y, m, d)
Change lambda to function in doc.
Change lambda to function in doc.
Python
bsd-2-clause
ohsu-qin/qipipe
2f360d9986c13adaaf670b80b27dad995823b849
bandstructure/system/tightbindingsystem.py
bandstructure/system/tightbindingsystem.py
import numpy as np from .system import System class TightBindingSystem(System): def setDefaultParams(self): self.params.setdefault('t', 1) # nearest neighbor tunneling strength self.params.setdefault('t2', 0) # next-nearest neighbor .. def tunnelingRate(self, dr): t = self.get("t") t2 = self.get("t2") # Nearest neighbors: # Only with newest numpy version: # nn = np.linalg.norm(dr, axis=3) == 1 # TODO! get the real nearest neighbor distance # nnn = np.linalg.norm(dr, axis=3) == 2 # TODO! nn = np.sqrt(np.sum(dr ** 2, axis=3)) == 1 # TODO! get the real nearest neighbor distance nnn = np.sqrt(np.sum(dr ** 2, axis=3)) == 2 # TODO # Orbital matrix m = np.array([[1, 0], [0, -1]]) # m = np.array([-t]) return t * m * nn[:, :, :, None, None] + t2 * m * nnn[:, :, :, None, None]
import numpy as np from .system import System class TightBindingSystem(System): def setDefaultParams(self): self.params.setdefault('t', 1) # nearest neighbor tunneling strength self.params.setdefault('t2', 0) # next-nearest neighbor .. def tunnelingRate(self, dr): t = self.get("t") t2 = self.get("t2") # Orbital matrix m = np.array([[1, 0], [0, -1]]) # m = np.array([-t]) nn = dr.getNeighborsMask(1) nnn = dr.getNeighborsMask(2) return t * m * nn[:, :, :, None, None] + t2 * m * nnn[:, :, :, None, None]
Use new functions for getting (next) nearest neighbors
Use new functions for getting (next) nearest neighbors
Python
mit
sharkdp/bandstructure,sharkdp/bandstructure
611f95b0c72e436ebf056329349216625c61e133
wagtail/tests/testapp/migrations/0009_defaultstreampage.py
wagtail/tests/testapp/migrations/0009_defaultstreampage.py
# -*- coding: utf-8 -*- # Generated by Django 1.10.1 on 2016-09-21 11:37 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion import wagtail.wagtailcore.blocks import wagtail.wagtailcore.fields import wagtail.wagtailimages.blocks class Migration(migrations.Migration): dependencies = [ ('wagtailcore', '0030_index_on_pagerevision_created_at'), ('tests', '0008_inlinestreampage_inlinestreampagesection'), ] operations = [ migrations.CreateModel( name='DefaultStreamPage', fields=[ ('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')), ('body', wagtail.wagtailcore.fields.StreamField((('text', wagtail.wagtailcore.blocks.CharBlock()), ('rich_text', wagtail.wagtailcore.blocks.RichTextBlock()), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock())), default='')), ], options={ 'abstract': False, }, bases=('wagtailcore.page',), ), ]
# -*- coding: utf-8 -*- # Generated by Django 1.10.1 on 2016-09-21 11:37 from __future__ import unicode_literals from django.db import migrations, models import django.db.models.deletion import wagtail.wagtailcore.blocks import wagtail.wagtailcore.fields import wagtail.wagtailimages.blocks class Migration(migrations.Migration): dependencies = [ ('wagtailcore', '0029_unicode_slugfield_dj19'), ('tests', '0008_inlinestreampage_inlinestreampagesection'), ] operations = [ migrations.CreateModel( name='DefaultStreamPage', fields=[ ('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')), ('body', wagtail.wagtailcore.fields.StreamField((('text', wagtail.wagtailcore.blocks.CharBlock()), ('rich_text', wagtail.wagtailcore.blocks.RichTextBlock()), ('image', wagtail.wagtailimages.blocks.ImageChooserBlock())), default='')), ], options={ 'abstract': False, }, bases=('wagtailcore.page',), ), ]
Adjust testapp migration dependency to be valid on 1.6.x
Adjust testapp migration dependency to be valid on 1.6.x
Python
bsd-3-clause
mixxorz/wagtail,nutztherookie/wagtail,rsalmaso/wagtail,torchbox/wagtail,chrxr/wagtail,gasman/wagtail,zerolab/wagtail,Toshakins/wagtail,takeflight/wagtail,FlipperPA/wagtail,iansprice/wagtail,takeflight/wagtail,wagtail/wagtail,nilnvoid/wagtail,nealtodd/wagtail,FlipperPA/wagtail,nilnvoid/wagtail,chrxr/wagtail,takeflight/wagtail,wagtail/wagtail,iansprice/wagtail,iansprice/wagtail,thenewguy/wagtail,nimasmi/wagtail,gasman/wagtail,Toshakins/wagtail,torchbox/wagtail,nealtodd/wagtail,mixxorz/wagtail,wagtail/wagtail,mixxorz/wagtail,nimasmi/wagtail,jnns/wagtail,nutztherookie/wagtail,timorieber/wagtail,kaedroho/wagtail,rsalmaso/wagtail,kaedroho/wagtail,mixxorz/wagtail,chrxr/wagtail,FlipperPA/wagtail,nealtodd/wagtail,rsalmaso/wagtail,torchbox/wagtail,nilnvoid/wagtail,jnns/wagtail,nimasmi/wagtail,zerolab/wagtail,gasman/wagtail,mikedingjan/wagtail,Toshakins/wagtail,rsalmaso/wagtail,timorieber/wagtail,jnns/wagtail,nealtodd/wagtail,iansprice/wagtail,torchbox/wagtail,nimasmi/wagtail,mixxorz/wagtail,mikedingjan/wagtail,nutztherookie/wagtail,kaedroho/wagtail,Toshakins/wagtail,zerolab/wagtail,kaedroho/wagtail,thenewguy/wagtail,thenewguy/wagtail,timorieber/wagtail,chrxr/wagtail,mikedingjan/wagtail,gasman/wagtail,rsalmaso/wagtail,zerolab/wagtail,gasman/wagtail,nutztherookie/wagtail,wagtail/wagtail,zerolab/wagtail,kaedroho/wagtail,wagtail/wagtail,FlipperPA/wagtail,jnns/wagtail,nilnvoid/wagtail,mikedingjan/wagtail,timorieber/wagtail,thenewguy/wagtail,takeflight/wagtail,thenewguy/wagtail
c5946e378147f6d4d42c7a3e531388e6203f29e4
fantasyStocks/static/stockCleaner.py
fantasyStocks/static/stockCleaner.py
import json with open("stocks.json") as f:
from pprint import pprint import json import re REGEXP = re.compile("(?P<symbol>[A-Z]{1,4}).*") with open("stocks.json") as f: l = json.loads(f.read()) out = [] for i in l: if not "^" in i["symbol"]: out.append(i) with open("newStocks.json", "w") as w: w.write(json.dumps(out))
Write script to remove duplicates from stocks.json
Write script to remove duplicates from stocks.json
Python
apache-2.0
ddsnowboard/FantasyStocks,ddsnowboard/FantasyStocks,ddsnowboard/FantasyStocks
d1c3f195e455b926429aadf84cfd9fc51db2802f
fluent_contents/tests/test_models.py
fluent_contents/tests/test_models.py
from django.contrib.contenttypes.models import ContentType from fluent_contents.models import ContentItem from fluent_contents.tests.utils import AppTestCase class ModelTests(AppTestCase): """ Testing the data model. """ def test_stale_model_str(self): """ No matter what, the ContentItem.__str__() should work. This would break the admin delete screen otherwise. """ c = ContentType() c.save() a = ContentItem(polymorphic_ctype=c) self.assertEqual(str(a), "'(type deleted) 0' in 'None None'")
import django from django.contrib.contenttypes.models import ContentType from fluent_contents.models import ContentItem from fluent_contents.tests.utils import AppTestCase class ModelTests(AppTestCase): """ Testing the data model. """ def test_stale_model_str(self): """ No matter what, the ContentItem.__str__() should work. This would break the admin delete screen otherwise. """ c = ContentType() if django.VERSION >= (1, 8): c.save() a = ContentItem(polymorphic_ctype=c) self.assertEqual(str(a), "'(type deleted) 0' in 'None None'")
Improve tests for older Django versions
Improve tests for older Django versions
Python
apache-2.0
edoburu/django-fluent-contents,edoburu/django-fluent-contents,django-fluent/django-fluent-contents,django-fluent/django-fluent-contents,django-fluent/django-fluent-contents,edoburu/django-fluent-contents
2c8b60569d20a350b33f3c5e8ba00bdc3d9bbee4
ask_sweden/lambda_function.py
ask_sweden/lambda_function.py
import logging logger = logging.getLogger() logger.setLevel(logging.INFO) from ask import alexa def lambda_handler(request_obj, context=None): return alexa.route_request(request_obj) @alexa.default def default_handler(request): logger.info('default_handler') return alexa.respond('There were 42 accidents in 2016.') @alexa.request("LaunchRequest") def launch_request_handler(request): logger.info('launch_request_handler') return alexa.create_response(message='You can ask me about car accidents.') @alexa.request("SessionEndedRequest") def session_ended_request_handler(request): logger.info('session_ended_request_handler') return alexa.create_response(message="Goodbye!") @alexa.intent('AMAZON.CancelIntent') def cancel_intent_handler(request): logger.info('cancel_intent_handler') return alexa.create_response(message='ok', end_session=True) @alexa.intent('AMAZON.HelpIntent') def help_intent_handler(request): logger.info('help_intent_handler') return alexa.create_response(message='You can ask me about car accidents.') @alexa.intent('AMAZON.StopIntent') def stop_intent_handler(request): logger.info('stop_intent_handler') return alexa.create_response(message='ok', end_session=True)
import logging logger = logging.getLogger() logger.setLevel(logging.INFO) from ask import alexa def lambda_handler(request_obj, context=None): return alexa.route_request(request_obj) @alexa.default def default_handler(request): logger.info('default_handler') return alexa.respond('There were 42 accidents in 2016.') @alexa.request("LaunchRequest") def launch_request_handler(request): logger.info('launch_request_handler') return alexa.respond('You can ask me about car accidents.') @alexa.request("SessionEndedRequest") def session_ended_request_handler(request): logger.info('session_ended_request_handler') return alexa.respond('Goodbye.') @alexa.intent('AMAZON.CancelIntent') def cancel_intent_handler(request): logger.info('cancel_intent_handler') return alexa.respond('Okay.', end_session=True) @alexa.intent('AMAZON.HelpIntent') def help_intent_handler(request): logger.info('help_intent_handler') return alexa.respond('You can ask me about car accidents.') @alexa.intent('AMAZON.StopIntent') def stop_intent_handler(request): logger.info('stop_intent_handler') return alexa.respond('Okay.', end_session=True)
Use respond instead of create_response
Use respond instead of create_response
Python
mit
geoaxis/ask-sweden,geoaxis/ask-sweden
aa82f91d220e8985c7f6dc68433ad65e70a71d15
froide/foirequest/tests/test_mail.py
froide/foirequest/tests/test_mail.py
# -*- coding: utf-8 -*- from __future__ import with_statement from django.test import TestCase from foirequest.tasks import _process_mail from foirequest.models import FoiRequest class MailTest(TestCase): fixtures = ['publicbodies.json', "foirequest.json"] def test_working(self): with file("foirequest/tests/test_mail_01.txt") as f: _process_mail(f.read()) request = FoiRequest.objects.get_by_secret_mail("s.wehrmeyer+axb4afh@fragdenstaat.de") messages = request.foimessage_set.all() self.assertEqual(len(messages), 2) def test_working_with_attachment(self): with file("foirequest/tests/test_mail_02.txt") as f: _process_mail(f.read()) request = FoiRequest.objects.get_by_secret_mail("s.wehrmeyer+axb4afh@fragdenstaat.de") messages = request.foimessage_set.all() self.assertEqual(len(messages), 2) self.assertEqual(messages[1].subject, u"Fwd: Informationsfreiheitsgesetz des Bundes, Antragsvordruck für Open Data") self.assertEqual(len(message[1].attachments), 1)
# -*- coding: utf-8 -*- from __future__ import with_statement from django.test import TestCase from foirequest.tasks import _process_mail from foirequest.models import FoiRequest class MailTest(TestCase): fixtures = ['publicbodies.json', "foirequest.json"] def test_working(self): with file("foirequest/tests/test_mail_01.txt") as f: _process_mail(f.read()) request = FoiRequest.objects.get_by_secret_mail("s.wehrmeyer+axb4afh@fragdenstaat.de") messages = request.foimessage_set.all() self.assertEqual(len(messages), 2) def test_working_with_attachment(self): with file("foirequest/tests/test_mail_02.txt") as f: _process_mail(f.read()) request = FoiRequest.objects.get_by_secret_mail("s.wehrmeyer+axb4afh@fragdenstaat.de") messages = request.foimessage_set.all() self.assertEqual(len(messages), 2) self.assertEqual(messages[1].subject, u"Fwd: Informationsfreiheitsgesetz des Bundes, Antragsvordruck für Open Data") self.assertEqual(len(messages[1].attachments), 1) self.assertEqual(messages[1].attachments[0].name, u"TI - IFG-Antrag, Vordruck.docx")
Test for attachment in mail test
Test for attachment in mail test
Python
mit
catcosmo/froide,okfse/froide,fin/froide,stefanw/froide,catcosmo/froide,fin/froide,LilithWittmann/froide,LilithWittmann/froide,okfse/froide,LilithWittmann/froide,ryankanno/froide,stefanw/froide,LilithWittmann/froide,catcosmo/froide,catcosmo/froide,ryankanno/froide,CodeforHawaii/froide,okfse/froide,ryankanno/froide,ryankanno/froide,CodeforHawaii/froide,okfse/froide,ryankanno/froide,stefanw/froide,okfse/froide,CodeforHawaii/froide,stefanw/froide,CodeforHawaii/froide,CodeforHawaii/froide,LilithWittmann/froide,fin/froide,stefanw/froide,catcosmo/froide,fin/froide
20f0d90f5c64322864ad5fda4b4c9314e6c1cb11
run.py
run.py
#!/usr/bin/env python # coding=utf-8 import sys from kitchen.text.converters import getwriter from utils.log import getLogger, open_log, close_log from utils.misc import output_exception from system.factory_manager import Manager sys.stdout = getwriter('utf-8')(sys.stdout) sys.stderr = getwriter('utf-8')(sys.stderr) open_log("output.log") logger = getLogger("System") logger.info("Starting up..") try: manager = Manager() except Exception: logger.critical("Runtime error - process cannot continue!") output_exception(logger) finally: close_log("output.log") try: raw_input("Press enter to exit.") except: pass
#!/usr/bin/env python # coding=utf-8 import os import sys from kitchen.text.converters import getwriter from utils.log import getLogger, open_log, close_log from utils.misc import output_exception from system.factory_manager import Manager sys.stdout = getwriter('utf-8')(sys.stdout) sys.stderr = getwriter('utf-8')(sys.stderr) if not os.path.exists("logs"): os.mkdir("logs") open_log("output.log") logger = getLogger("System") logger.info("Starting up..") try: manager = Manager() except Exception: logger.critical("Runtime error - process cannot continue!") output_exception(logger) finally: close_log("output.log") try: raw_input("Press enter to exit.") except: pass
Create logs folder if it doesn't exist (to prevent errors)
Create logs folder if it doesn't exist (to prevent errors)
Python
artistic-2.0
UltrosBot/Ultros,UltrosBot/Ultros
80215a593c2fdcf0a0ae8b1c61c4342faffd6dac
run.py
run.py
import bb2gh import time config_yaml = 'config.yaml' for issue_id in range(1, 500): while True: try: bb2gh.migrate(config_yaml, verbose=True, issue_ids=[issue_id]) except Exception as inst: print 'issue_id',issue_id print type(inst) print inst.data['message'] print 'waiting for 60 seconds' print time.sleep(60) else: break
import bb2gh import time config_yaml = 'config.yaml' for issue_id in range(190, 500): while True: try: bb2gh.migrate(config_yaml, verbose=True, issue_ids=[issue_id]) except Exception as inst: print 'issue_id',issue_id print type(inst) print inst print 'waiting for 60 seconds' print time.sleep(60) else: break
Fix bug, 'message' key throwing error.
Fix bug, 'message' key throwing error.
Python
mit
wd15/bb2gh
ba983dea1e20409d403a86d62c300ea3d257b58a
parserscripts/phage.py
parserscripts/phage.py
import re class Phage: supported_databases = { # European Nucleotide Archive phage database "ENA": r"^gi\|[0-9]+\|ref\|([^\|]+)\|\ ([^,]+)[^$]*$", # National Center for Biotechnology Information phage database "NCBI": r"^ENA\|([^\|]+)\|[^\ ]+\ ([^,]+)[^$]*$", # Actinobacteriophage Database "AD": r"^([^\ ]+)\ [^,]*,[^,]*,\ Cluster\ ([^$]+)$" } def __init__(self, raw_text, phage_finder): self.raw = raw_text.strip() self.refseq = None self.name = None self.db = None self._parse_phage(raw_text, phage_finder) def _parse_phage(self, raw_text, phage_finder): for db, regex in Phage.supported_databases.items(): match = re.search(regex, raw_text) if match is not None: if db is not "AD": self.name = match.group(2) self.refseq = match.group(1) else: short_name = match.group(1) cluster = match.group(2) self.name = "Mycobacteriophage " + short_name self.refseq = phage_finder.find_by_phage(short_name, cluster) self.db = db
import re class Phage: SUPPORTED_DATABASES = { # European Nucleotide Archive phage database "ENA": r"^gi\|[0-9]+\|ref\|([^\|]+)\|\ ([^,]+)[^$]*$", # National Center for Biotechnology Information phage database "NCBI": r"^ENA\|([^\|]+)\|[^\ ]+\ ([^,]+)[^$]*$", # Actinobacteriophage Database "AD": r"^([^\ ]+)\ [^,]*,[^,]*,\ Cluster\ ([^$]+)$" } def __init__(self, raw_text, phage_finder): self.raw = raw_text.strip() self.refseq = None self.name = None self.db = None self._parse_phage(raw_text, phage_finder) def _parse_phage(self, raw_text, phage_finder): for db, regex in Phage.SUPPORTED_DATABASES.items(): match = re.search(regex, raw_text) if match is not None: if db is not "AD": self.name = match.group(2) self.refseq = match.group(1) else: short_name = match.group(1) cluster = match.group(2) self.name = "Mycobacteriophage " + short_name self.refseq = phage_finder.find_by_phage(short_name, cluster) self.db = db
Rename to follow constant naming conventions
Rename to follow constant naming conventions
Python
mit
mbonsma/phageParser,mbonsma/phageParser,phageParser/phageParser,mbonsma/phageParser,phageParser/phageParser,goyalsid/phageParser,goyalsid/phageParser,phageParser/phageParser,phageParser/phageParser,mbonsma/phageParser,goyalsid/phageParser
8c05cb85c47db892dd13abbd91b3948c09b9a954
statsmodels/tools/__init__.py
statsmodels/tools/__init__.py
from tools import add_constant, categorical from datautils import Dataset from statsmodels import NoseWrapper as Tester test = Tester().test
from tools import add_constant, categorical from statsmodels import NoseWrapper as Tester test = Tester().test
Remove import of moved file
REF: Remove import of moved file
Python
bsd-3-clause
josef-pkt/statsmodels,adammenges/statsmodels,saketkc/statsmodels,DonBeo/statsmodels,edhuckle/statsmodels,saketkc/statsmodels,wkfwkf/statsmodels,wzbozon/statsmodels,huongttlan/statsmodels,kiyoto/statsmodels,astocko/statsmodels,musically-ut/statsmodels,bsipocz/statsmodels,wwf5067/statsmodels,jstoxrocky/statsmodels,cbmoore/statsmodels,statsmodels/statsmodels,jseabold/statsmodels,bzero/statsmodels,yl565/statsmodels,ChadFulton/statsmodels,nguyentu1602/statsmodels,saketkc/statsmodels,astocko/statsmodels,bert9bert/statsmodels,DonBeo/statsmodels,Averroes/statsmodels,gef756/statsmodels,edhuckle/statsmodels,jseabold/statsmodels,waynenilsen/statsmodels,hainm/statsmodels,bashtage/statsmodels,nvoron23/statsmodels,huongttlan/statsmodels,detrout/debian-statsmodels,yarikoptic/pystatsmodels,bavardage/statsmodels,wzbozon/statsmodels,YihaoLu/statsmodels,phobson/statsmodels,rgommers/statsmodels,YihaoLu/statsmodels,bavardage/statsmodels,wwf5067/statsmodels,bsipocz/statsmodels,edhuckle/statsmodels,statsmodels/statsmodels,nvoron23/statsmodels,adammenges/statsmodels,wwf5067/statsmodels,yl565/statsmodels,alekz112/statsmodels,waynenilsen/statsmodels,bert9bert/statsmodels,detrout/debian-statsmodels,alekz112/statsmodels,wzbozon/statsmodels,jseabold/statsmodels,rgommers/statsmodels,ChadFulton/statsmodels,bashtage/statsmodels,YihaoLu/statsmodels,ChadFulton/statsmodels,bavardage/statsmodels,musically-ut/statsmodels,nguyentu1602/statsmodels,bsipocz/statsmodels,waynenilsen/statsmodels,nvoron23/statsmodels,huongttlan/statsmodels,bashtage/statsmodels,Averroes/statsmodels,adammenges/statsmodels,hlin117/statsmodels,wwf5067/statsmodels,gef756/statsmodels,cbmoore/statsmodels,hainm/statsmodels,wdurhamh/statsmodels,wdurhamh/statsmodels,josef-pkt/statsmodels,phobson/statsmodels,alekz112/statsmodels,bsipocz/statsmodels,saketkc/statsmodels,wdurhamh/statsmodels,musically-ut/statsmodels,kiyoto/statsmodels,phobson/statsmodels,wdurhamh/statsmodels,gef756/statsmodels,kiyoto/statsmodels,statsmodels/statsmodels,rgommers/statsmodels,josef-pkt/statsmodels,hainm/statsmodels,edhuckle/statsmodels,hlin117/statsmodels,gef756/statsmodels,josef-pkt/statsmodels,cbmoore/statsmodels,gef756/statsmodels,YihaoLu/statsmodels,statsmodels/statsmodels,hlin117/statsmodels,bert9bert/statsmodels,edhuckle/statsmodels,hlin117/statsmodels,jstoxrocky/statsmodels,bzero/statsmodels,yarikoptic/pystatsmodels,yl565/statsmodels,saketkc/statsmodels,kiyoto/statsmodels,huongttlan/statsmodels,Averroes/statsmodels,nvoron23/statsmodels,astocko/statsmodels,wzbozon/statsmodels,bzero/statsmodels,detrout/debian-statsmodels,wkfwkf/statsmodels,bzero/statsmodels,ChadFulton/statsmodels,bert9bert/statsmodels,yl565/statsmodels,bashtage/statsmodels,jseabold/statsmodels,kiyoto/statsmodels,phobson/statsmodels,bzero/statsmodels,cbmoore/statsmodels,musically-ut/statsmodels,ChadFulton/statsmodels,DonBeo/statsmodels,yarikoptic/pystatsmodels,nguyentu1602/statsmodels,jstoxrocky/statsmodels,alekz112/statsmodels,adammenges/statsmodels,bavardage/statsmodels,wkfwkf/statsmodels,josef-pkt/statsmodels,bashtage/statsmodels,jstoxrocky/statsmodels,yl565/statsmodels,bashtage/statsmodels,astocko/statsmodels,nguyentu1602/statsmodels,wkfwkf/statsmodels,Averroes/statsmodels,nvoron23/statsmodels,DonBeo/statsmodels,wzbozon/statsmodels,rgommers/statsmodels,YihaoLu/statsmodels,rgommers/statsmodels,ChadFulton/statsmodels,josef-pkt/statsmodels,bavardage/statsmodels,DonBeo/statsmodels,bert9bert/statsmodels,statsmodels/statsmodels,hainm/statsmodels,statsmodels/statsmodels,jseabold/statsmodels,wkfwkf/statsmodels,cbmoore/statsmodels,waynenilsen/statsmodels,detrout/debian-statsmodels,phobson/statsmodels,wdurhamh/statsmodels
b090c7ae0f5407562e3adc818d2f65ccd4ea7e02
src/arc_utilities/listener.py
src/arc_utilities/listener.py
from copy import deepcopy from threading import Lock import rospy from arc_utilities.ros_helpers import wait_for class Listener: def __init__(self, topic_name, topic_type, wait_for_data=False): """ Listener is a wrapper around a subscriber where the callback simply records the latest msg. Listener does not consume the message (for consuming behavior, use the standard ros callback pattern) Listener does not check timestamps of message headers Parameters: topic_name (str): name of topic to subscribe to topic_type (msg_type): type of message received on topic wait_for_data (bool): block constructor until a message has been received """ self.data = None self.lock = Lock() self.topic_name = topic_name self.subscriber = rospy.Subscriber(topic_name, topic_type, self.callback) self.get(wait_for_data) def callback(self, msg): with self.lock: self.data = msg def get(self, block_until_data=True): """ Returns the latest msg from the subscribed topic Parameters: block_until_data (bool): block if no message has been received yet. Guarantees a msg is returned (not None) """ wait_for(lambda: not (block_until_data and self.data is None), 10, f"Listener({self.topic_name})") with self.lock: return deepcopy(self.data)
from copy import deepcopy from threading import Lock import rospy from arc_utilities.ros_helpers import wait_for class Listener: def __init__(self, topic_name, topic_type, wait_for_data=False, callback=None): """ Listener is a wrapper around a subscriber where the callback simply records the latest msg. Listener does not consume the message (for consuming behavior, use the standard ros callback pattern) Listener does not check timestamps of message headers Parameters: topic_name (str): name of topic to subscribe to topic_type (msg_type): type of message received on topic wait_for_data (bool): block constructor until a message has been received callback (function taking msg_type): optional callback to be called on the data as we receive it """ self.data = None self.lock = Lock() self.topic_name = topic_name self.subscriber = rospy.Subscriber(topic_name, topic_type, self.callback) self.custom_callback = callback self.get(wait_for_data) def callback(self, msg): with self.lock: self.data = msg if self.custom_callback is not None: self.custom_callback(self.data) def get(self, block_until_data=True): """ Returns the latest msg from the subscribed topic Parameters: block_until_data (bool): block if no message has been received yet. Guarantees a msg is returned (not None) """ wait_for(lambda: not (block_until_data and self.data is None), 10, f"Listener({self.topic_name})") with self.lock: return deepcopy(self.data)
Allow optional callbacks for Listeners
Allow optional callbacks for Listeners
Python
bsd-2-clause
WPI-ARC/arc_utilities,UM-ARM-Lab/arc_utilities,UM-ARM-Lab/arc_utilities,UM-ARM-Lab/arc_utilities,WPI-ARC/arc_utilities,WPI-ARC/arc_utilities
7fc62edee40ecedc49b0529e17ac04e4d7bf6865
door/models.py
door/models.py
from django.db import models from django.utils import timezone class DoorStatus(models.Model): datetime = models.DateTimeField() status = models.BooleanField(default=False) name = models.CharField(max_length=20) def __str__(self): return self.name @staticmethod def get_door_by_name(name): # Creates the object if it does not exist try: door = DoorStatus.objects.get(name=name) return door except DoorStatus.DoesNotExist: door = DoorStatus.objects.create(name=name, datetime=timezone.now()) return door class OpenData(models.Model): opened = models.DateTimeField() closed = models.DateTimeField() def __str__(self): return str(self.opened)
from django.db import models from django.utils import timezone class DoorStatus(models.Model): datetime = models.DateTimeField() status = models.BooleanField(default=False) name = models.CharField(max_length=20) def __str__(self): return self.name @staticmethod def get_door_by_name(name): # Creates the object if it does not exist try: door = DoorStatus.objects.get(name=name) return door except DoorStatus.DoesNotExist: door = DoorStatus.objects.create(name=name, datetime=timezone.now()) return door class Meta: verbose_name_plural = "Door Statuses" class OpenData(models.Model): opened = models.DateTimeField() closed = models.DateTimeField() def __str__(self): return str(self.opened)
Change plural name of DoorStatus model
Change plural name of DoorStatus model
Python
mit
hackerspace-ntnu/website,hackerspace-ntnu/website,hackerspace-ntnu/website
4e727b52828122c37b8c398f16ad914898968e83
examples/rmg/minimal/input.py
examples/rmg/minimal/input.py
# Data sources database( thermoLibraries = ['primaryThermoLibrary'], reactionLibraries = [], seedMechanisms = [], kineticsDepositories = ['training'], kineticsFamilies = ['!Intra_Disproportionation','!Substitution_O'], kineticsEstimator = 'rate rules', ) # List of species species( label='ethane', reactive=True, structure=SMILES("CC"), ) # Reaction systems simpleReactor( temperature=(1350,'K'), pressure=(1.0,'bar'), initialMoleFractions={ "ethane": 1.0, }, terminationConversion={ 'ethane': 0.9, }, terminationTime=(1e6,'s'), ) solvation( solvent='water' ) simulator( atol=1e-16, rtol=1e-8, ) model( toleranceKeepInEdge=0.0, toleranceMoveToCore=0.1, toleranceInterruptSimulation=0.1, maximumEdgeSpecies=100000 ) options( units='si', saveRestartPeriod=None, drawMolecules=False, generatePlots=False, )
# Data sources database( thermoLibraries = ['primaryThermoLibrary'], reactionLibraries = [], seedMechanisms = [], kineticsDepositories = ['training'], kineticsFamilies = ['!Intra_Disproportionation','!Substitution_O'], kineticsEstimator = 'rate rules', ) # List of species species( label='ethane', reactive=True, structure=SMILES("CC"), ) # Reaction systems simpleReactor( temperature=(1350,'K'), pressure=(1.0,'bar'), initialMoleFractions={ "ethane": 1.0, }, terminationConversion={ 'ethane': 0.9, }, terminationTime=(1e6,'s'), ) simulator( atol=1e-16, rtol=1e-8, ) model( toleranceKeepInEdge=0.0, toleranceMoveToCore=0.1, toleranceInterruptSimulation=0.1, maximumEdgeSpecies=100000 ) options( units='si', saveRestartPeriod=None, drawMolecules=False, generatePlots=False, )
Remove solvent(water) from minimal example.
Remove solvent(water) from minimal example. Minimal should be just that - minimal. This hides issue #165
Python
mit
enochd/RMG-Py,nickvandewiele/RMG-Py,faribas/RMG-Py,comocheng/RMG-Py,nyee/RMG-Py,chatelak/RMG-Py,pierrelb/RMG-Py,faribas/RMG-Py,comocheng/RMG-Py,pierrelb/RMG-Py,nyee/RMG-Py,enochd/RMG-Py,nickvandewiele/RMG-Py,KEHANG/RMG-Py,chatelak/RMG-Py,KEHANG/RMG-Py
f8b8f3a223f195704f8cc9753963fbe82f1e4674
feincms/content/rss/models.py
feincms/content/rss/models.py
from datetime import datetime from django.db import models from django.utils.safestring import mark_safe from django.utils.translation import ugettext_lazy as _ from django.template.loader import render_to_string import feedparser class RSSContent(models.Model): link = models.URLField(_('link')) rendered_content = models.TextField(_('Pre-rendered content'), blank=True, editable=False) last_updated = models.DateTimeField(_('Last updated'), blank=True, null=True) class Meta: abstract = True def render(self, **kwargs): return mark_safe(self.rendered_content) #u'<div class="rsscontent"> RSS: <a href="'+self.link+'">'+self.link+'</a></div') def cache_content(self): print u"Getting RSS feed at %s" % (self.link,) feed = feedparser.parse(self.link) print u"Pre-rendering content" self.rendered_content = render_to_string('rsscontent.html', { 'feed': feed}) self.last_updated = datetime.now() self.save()
from datetime import datetime from django.db import models from django.utils.safestring import mark_safe from django.utils.translation import ugettext_lazy as _ from django.template.loader import render_to_string import feedparser class RSSContent(models.Model): title = models.CharField(help_text=_('The rss field is updated several times a day. A change in the title will only be visible on the home page after the next feed update.'), max_length=50) link = models.URLField(_('link')) rendered_content = models.TextField(_('Pre-rendered content'), blank=True, editable=False) last_updated = models.DateTimeField(_('Last updated'), blank=True, null=True) class Meta: abstract = True def render(self, **kwargs): return mark_safe(self.rendered_content) #u'<div class="rsscontent"> RSS: <a href="'+self.link+'">'+self.link+'</a></div') def cache_content(self): print u"Getting RSS feed at %s" % (self.link,) feed = feedparser.parse(self.link) print u"Pre-rendering content" self.rendered_content = render_to_string('rsscontent.html', { 'title':self.title, 'feed': feed}) self.last_updated = datetime.now() self.save()
Add a title field to the RSSContent
Add a title field to the RSSContent
Python
bsd-3-clause
hgrimelid/feincms,joshuajonah/feincms,michaelkuty/feincms,matthiask/feincms2-content,nickburlett/feincms,michaelkuty/feincms,nickburlett/feincms,matthiask/django-content-editor,michaelkuty/feincms,nickburlett/feincms,hgrimelid/feincms,feincms/feincms,feincms/feincms,joshuajonah/feincms,matthiask/django-content-editor,michaelkuty/feincms,mjl/feincms,matthiask/feincms2-content,matthiask/feincms2-content,mjl/feincms,nickburlett/feincms,matthiask/django-content-editor,joshuajonah/feincms,hgrimelid/feincms,pjdelport/feincms,feincms/feincms,joshuajonah/feincms,pjdelport/feincms,pjdelport/feincms,mjl/feincms,matthiask/django-content-editor
fae3e55b1c472cd314676431a34fe6e160418626
tests/test_command_line.py
tests/test_command_line.py
#!/usr/bin/python # -*- coding: utf-8 -*- import os import subprocess class TestCommandLine(object): def setup(self): """Set up the environment by moving to the demos directory.""" os.chdir("demos") def teardown(self): os.chdir("..") def add(self, *args): self.db.add_all(args) self.db.commit() def test_dallinger_help(self): output = subprocess.check_output("dallinger", shell=True) assert("Usage: dallinger [OPTIONS] COMMAND [ARGS]" in output)
#!/usr/bin/python # -*- coding: utf-8 -*- import os import subprocess from dallinger.command_line import heroku_id class TestCommandLine(object): def setup(self): """Set up the environment by moving to the demos directory.""" os.chdir("demos") def teardown(self): os.chdir("..") def add(self, *args): self.db.add_all(args) self.db.commit() def test_dallinger_help(self): output = subprocess.check_output("dallinger", shell=True) assert("Usage: dallinger [OPTIONS] COMMAND [ARGS]" in output) def test_heroku_app_id(self): id = "8fbe62f5-2e33-4274-8aeb-40fc3dd621a0" assert(len(heroku_id(id)) < 30)
Test for Heroku app name length
Test for Heroku app name length
Python
mit
jcpeterson/Dallinger,Dallinger/Dallinger,Dallinger/Dallinger,jcpeterson/Dallinger,Dallinger/Dallinger,jcpeterson/Dallinger,jcpeterson/Dallinger,Dallinger/Dallinger,jcpeterson/Dallinger,Dallinger/Dallinger
ccc6c983411f951ef3906d55d6a0946c7ef93c75
app/brief_utils.py
app/brief_utils.py
from flask import abort from .models import Service from .validation import get_validation_errors from .service_utils import filter_services def validate_brief_data(brief, enforce_required=True, required_fields=None): errs = get_validation_errors( 'briefs-{}-{}'.format(brief.framework.slug, brief.lot.slug), brief.data, enforce_required=enforce_required, required_fields=required_fields ) if errs: abort(400, errs) def is_supplier_eligible_for_brief(supplier, brief): services = filter_services( framework_slugs=[brief.framework.slug], statuses=["published"], lot_slug=brief.lot.slug, location=brief.data["location"], role=brief.data["specialistRole"] if brief.lot.slug == "digital-specialists" else None ) services = services.filter(Service.supplier_id == supplier.supplier_id) return services.count() > 0
from flask import abort from .models import Service from .validation import get_validation_errors from .service_utils import filter_services def validate_brief_data(brief, enforce_required=True, required_fields=None): errs = get_validation_errors( 'briefs-{}-{}'.format(brief.framework.slug, brief.lot.slug), brief.data, enforce_required=enforce_required, required_fields=required_fields ) criteria_weighting_keys = ['technicalWeighting', 'culturalWeighting', 'priceWeighting'] # Only check total if all weightings are set if all(key in brief.data for key in criteria_weighting_keys): criteria_weightings = sum(brief.data[key] for key in criteria_weighting_keys) if criteria_weightings != 100: for key in set(criteria_weighting_keys) - set(errs): errs[key] = 'total_should_be_100' if errs: abort(400, errs) def is_supplier_eligible_for_brief(supplier, brief): services = filter_services( framework_slugs=[brief.framework.slug], statuses=["published"], lot_slug=brief.lot.slug, location=brief.data["location"], role=brief.data["specialistRole"] if brief.lot.slug == "digital-specialists" else None ) services = services.filter(Service.supplier_id == supplier.supplier_id) return services.count() > 0
Add criteria weighting 100% total validation
Add criteria weighting 100% total validation Checks the criteria weighting sum if all criteria fields are set. This relies on all three fields being required. If the fields don't add up to a 100 an error is added for each field that doesn't have any other validation errors.
Python
mit
alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api
e378902b85bf865e0b020bd4afe0e12d593a95a8
github-keys-check.py
github-keys-check.py
#!/usr/bin/python3 import urllib.request import argparse import pwd import sys def key_for_user(user): url = 'https://github.com/%s.keys' % user with urllib.request.urlopen(url) as f: return f.read().decode('utf-8') def validate_user(username, min_uid): """ Validates that a given username is: 1. A valid, existing user 2. Has uid > min_uid """ user = pwd.getpwnam(username) return user.pw_uid > min_uid if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('username') parser.add_argument( '--min-uid', type=int, default=999, help='uid must be > this to be allowed ssh access. \ Helps keep system users non-sshable' ) args = parser.parse_args() if validate_user(args.username, args.min_uid): print(key_for_user(args.username)) else: print("Not a valid user") sys.exit(1)
#!/usr/bin/python3 import urllib.request import argparse import pwd import grp import sys def key_for_user(user): url = 'https://github.com/%s.keys' % user with urllib.request.urlopen(url) as f: return f.read().decode('utf-8') def validate_user(username, min_uid, in_group): """ Validates that a given username is: 1. A valid, existing user 2. Is a member of the group in_group 3. Has uid > min_uid """ user = pwd.getpwnam(username) if in_group is None or username in grp.getgrnam(in_group).gr_mem: return user.pw_uid > min_uid if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('username') parser.add_argument( '--min-uid', type=int, default=999, help='uid must be > this to be allowed ssh access. \ Helps keep system users non-sshable' ) parser.add_argument( '--in-group', default=None, help='Only users in this group can login via github keys' ) args = parser.parse_args() if validate_user(args.username, args.min_uid, args.in_group): print(key_for_user(args.username)) else: print("Not a valid user") sys.exit(1)
Add --in-group parameter to validate users
Add --in-group parameter to validate users Allows github login only for users in a certain group. This can be used to whitelist users who are allowed to ssh in
Python
apache-2.0
yuvipanda/github-ssh-auth
d5eccc801634f1b841fbc31de545e530b6d4bd54
startup.py
startup.py
#!/usr/bin/env python # -*- encoding: utf-8 -*- from collections import Counter import pandas as pd XLS_NAME = 'startup.xls' SHEET_NAME = 'STARTUP_15092014' COL_NAME = 'nat.giuridica' def main(): xls = pd.ExcelFile(XLS_NAME) sheet = xls.parse(SHEET_NAME, index_col=None) for k,v in Counter(sheet[COL_NAME]).most_common(): print "%4d\t%s" % (v, k) if __name__ == '__main__': main()
#!/usr/bin/env python # -*- encoding: utf-8 -*- from collections import Counter import pandas as pd XLS_NAME = 'startup.xls' SHEET_NAME = 'STARTUP_15092014' def main(): xls = pd.ExcelFile(XLS_NAME) sheet = xls.parse(SHEET_NAME, index_col=None, convert_float=False) data = [el for el in sheet['nat.giuridica']] for k,v in Counter(data).most_common(): print "%4d\t%s" % (v, k) print data = [el for el in sheet['classe di valore della produzione ultimo anno (1)'] if el in ['A', 'B', 'C', 'D', 'E']] for k,v in Counter(data).most_common(): print "%4d\t%s" % (v, k) print data = [el for el in sheet['classe di addetti ultimo anno (2)'] if el in ['A', 'B', 'C', 'D', 'E']] for k,v in Counter(data).most_common(): print "%4d\t%s" % (v, k) if __name__ == '__main__': main()
Add pretty output for two more fields.
Add pretty output for two more fields.
Python
mit
jacquerie/italian-startups-report
34db881007bf0dad3b7e870d36ab4e4a68b0fd3d
emcee/run_emcee.py
emcee/run_emcee.py
#! /usr/bin/python from os.path import abspath, join as pathjoin from shutil import copy from subprocess import call from tempfile import mkdtemp install_dir = mkdtemp(prefix='emcee.') games_dir = abspath(pathjoin('..', 'games')) libraries_dir = abspath('libraries') infra_dir = abspath('infra') print 'Installing emcee in %s' % (install_dir,) print 'Games in %s' % (games_dir,) print 'Libraries in %s' % (libraries_dir,) print 'Infrastructure files in %s' % (infra_dir,) copy('emcee.py', install_dir) copy(pathjoin('libraries', 'pubsub.py'), install_dir) call([pathjoin(install_dir, 'emcee.py'), games_dir, libraries_dir, infra_dir], cwd=install_dir)
#! /usr/bin/python from os.path import abspath, join as pathjoin from shutil import copy from subprocess import Popen from tempfile import mkdtemp install_dir = mkdtemp(prefix='emcee.') games_dir = abspath(pathjoin('..', 'games')) libraries_dir = abspath('libraries') infra_dir = abspath('infra') print 'Installing emcee in %s' % (install_dir,) print 'Games in %s' % (games_dir,) print 'Libraries in %s' % (libraries_dir,) print 'Infrastructure files in %s' % (infra_dir,) copy('emcee.py', install_dir) copy('pubsub_ws.py', install_dir) copy('pubsub_ws_doc.html', install_dir) copy(pathjoin('libraries', 'pubsub.py'), install_dir) processes = [] processes.append(Popen([pathjoin(install_dir, 'emcee.py'), games_dir, libraries_dir, infra_dir], cwd=install_dir)) processes.append(Popen([pathjoin(install_dir, 'pubsub_ws.py'), infra_dir], cwd=install_dir)) print 'Now running' raw_input('Press Enter to stop') map(lambda(p): p.kill(), processes)
Deploy pubsub_ws along with emcee
Deploy pubsub_ws along with emcee
Python
mit
douglassquirrel/alexandra,douglassquirrel/alexandra,douglassquirrel/alexandra
d5a2a11d23b9f5393b0b39ca2f90978276311f52
app/slot/routes.py
app/slot/routes.py
from app import app from app.slot import controller as con import config from auth import requires_auth from flask import render_template from flask.ext.login import login_required @app.route('/dashboard') # @requires_auth @login_required def index(): return con.index() @app.route('/new', methods=['GET', 'POST']) @requires_auth def render_new_procedure_form(): return con.render_new_procedure_form() @app.route('/sms', methods=['POST']) @requires_auth def receive_sms(): return con.receive_sms()
from app import app from app.slot import controller as con import config from auth import requires_auth from flask import render_template from flask.ext.login import login_required @app.route('/') @app.route('/dashboard') @login_required def index(): return con.index() @app.route('/new', methods=['GET', 'POST']) @requires_auth def render_new_procedure_form(): return con.render_new_procedure_form() @app.route('/sms', methods=['POST']) @requires_auth def receive_sms(): return con.receive_sms()
Add / route to index. Remove old requires_auth decorator.
Add / route to index. Remove old requires_auth decorator.
Python
mit
nhshd-slot/SLOT,nhshd-slot/SLOT,nhshd-slot/SLOT
ecc816295154a3756e87349b4cff397ebd17b95f
sipa/base.py
sipa/base.py
# -*- coding: utf-8 -*- """ Basic utilities for the Flask app These are basic utilities necessary for the Flask app which are disjoint from any blueprint. """ from flask import request, session from flask_login import AnonymousUserMixin, LoginManager from werkzeug.routing import IntegerConverter as BaseIntegerConverter from sipa.model import backends login_manager = LoginManager() class IntegerConverter(BaseIntegerConverter): """IntegerConverter supporting negative values This is a Modification of the standard IntegerConverter which does not support negative values. See the corresponding `werkzeug documentation <http://werkzeug.pocoo.org/docs/0.10/routing/#werkzeug.routing.IntegerConverter>`_. """ regex = r'-?\d+' @login_manager.user_loader def load_user(username): """Loads a User object from/into the session at every request """ if request.blueprint == "documents" or request.endpoint == "static": return AnonymousUserMixin() dormitory = backends.get_dormitory(session.get('dormitory', None)) if dormitory: return dormitory.datasource.user_class.get(username) else: return AnonymousUserMixin()
# -*- coding: utf-8 -*- """ Basic utilities for the Flask app These are basic utilities necessary for the Flask app which are disjoint from any blueprint. """ from flask import request, session from flask_login import AnonymousUserMixin, LoginManager from flask_babel import gettext from werkzeug.routing import IntegerConverter as BaseIntegerConverter from sipa.model import backends login_manager = LoginManager() login_manager.login_view = "generic.login" login_manager.localize_callback = gettext login_manager.login_message = "Bitte melde Dich an, um die Seite zu sehen." class IntegerConverter(BaseIntegerConverter): """IntegerConverter supporting negative values This is a Modification of the standard IntegerConverter which does not support negative values. See the corresponding `werkzeug documentation <http://werkzeug.pocoo.org/docs/0.10/routing/#werkzeug.routing.IntegerConverter>`_. """ regex = r'-?\d+' @login_manager.user_loader def load_user(username): """Loads a User object from/into the session at every request """ if request.blueprint == "documents" or request.endpoint == "static": return AnonymousUserMixin() dormitory = backends.get_dormitory(session.get('dormitory', None)) if dormitory: return dormitory.datasource.user_class.get(username) else: return AnonymousUserMixin()
Set up flask to handle login redirects.
Set up flask to handle login redirects. Fix #147.
Python
mit
lukasjuhrich/sipa,agdsn/sipa,agdsn/sipa,lukasjuhrich/sipa,lukasjuhrich/sipa,agdsn/sipa,agdsn/sipa,lukasjuhrich/sipa,MarauderXtreme/sipa,MarauderXtreme/sipa,MarauderXtreme/sipa
4bf614e072a603f4b46038e2f59459c305844553
ReversiTest.py
ReversiTest.py
import unittest import reversi class TestUM(unittest.TestCase): def setUp(self): self.board = reversi.ReversiBoard().set_default_board() def tearDown(self): self.board = None def test_up(self): tuple = (4, 3) result = self.board.up(tuple) self.assertEqual(result, (4, 2)) def test_up_right(self): self.assertEqual(self.board.up_right((2, 2)), (3, 1)) def test_right(self): self.assertEqual(self.board.right((2, 2)), (3, 2)) def test_out_of_bounds(self): self.assertIsNone(self.board.right((7, 0))) if __name__ == '__main__': unittest.main()
#!/usr/bin/python import unittest import reversi class ReversiTest(unittest.TestCase): def setUp(self): self.board = reversi.ReversiBoard().set_default_board() def tearDown(self): self.board = None def test_up(self): tuple = (4, 3) result = self.board.up(tuple) self.assertEqual(result, (4, 2)) def test_up_right(self): self.assertEqual(self.board.up_right((2, 2)), (3, 1)) def test_right(self): self.assertEqual(self.board.right((2, 2)), (3, 2)) def test_out_of_bounds(self): self.assertIsNone(self.board.right((7, 0))) if __name__ == '__main__': unittest.main()
Update class name in the unit tester
Update class name in the unit tester
Python
mit
dmitrym0/reversi-py
38603c8b35c15c134a0499ac92a7c1f7dee4f526
send_test_data.py
send_test_data.py
#!/usr/bin/env python import requests import datetime import time import json import random from random import choice random.seed(datetime.datetime.now()) names = ("vehicle_speed", "fuel_consumed_since_restart", "latitude", "longitude") while True: data = {"records": [ {"timestamp": time.time() * 1000, "name": choice(names), "value": random.randint(0, 100)} ]} print "Sending %s" % data headers = {'content-type': 'application/json'} r = requests.post('http://localhost:5000/records', data=json.dumps(data), headers=headers) print r time.sleep(.1)
#!/usr/bin/env python import requests import datetime import time import json import sys from util import massage_record names = ("vehicle_speed", "fuel_consumed_since_restart", "latitude", "longitude") def send_records(records): data = {"records": records} print "Sending %s" % data headers = {'content-type': 'application/json'} r = requests.post('http://localhost:5000/records', data=json.dumps(data), headers=headers) print r time.sleep(1) while True: filename = sys.argv[1] try: records = [] with open(filename, 'r') as trace_file: for line in trace_file: timestamp, record = line.split(':', 1) record = massage_record(json.loads(record), float(timestamp)) records.append(record) if len(records) == 25: send_records(records) records = [] except IOError: print("No active trace file found at %s" % filename)
Send test data from actual trace files.
Send test data from actual trace files.
Python
bsd-3-clause
openxc/web-logging-example,openxc/web-logging-example
b9ac30b0e428038986de64e069954ee340b991a9
integration/group.py
integration/group.py
from spec import Spec, eq_ from fabric import ThreadingGroup as Group class Group_(Spec): def simple_command_on_multiple_hosts(self): """ Run command on localhost...twice! """ group = Group('localhost', 'localhost') result = group.run('echo foo', hide=True) # NOTE: currently, the result will only be 1 object, because both of # them will end up as the same key. Derp. eq_(result[group[0]].stdout, "foo\n")
from spec import Spec, eq_ from fabric import ThreadingGroup as Group class Group_(Spec): def simple_command(self): group = Group('localhost', '127.0.0.1') result = group.run('echo foo', hide=True) eq_( [x.stdout.strip() for x in result.values()], ['foo', 'foo'], )
Tidy up existing integration test
Tidy up existing integration test
Python
bsd-2-clause
fabric/fabric
58a96c65f1e9868fb607cd3ce56dbf60905f62a7
autoencoder/api.py
autoencoder/api.py
from .io import preprocess from .train import train from .network import mlp def autoencode(count_matrix, kfold=None, dimreduce=True, reconstruct=True, mask=None, type='normal', activation='relu', testset=False, learning_rate=1e-2, hidden_size=(256,64,256), l2_coef=0., epochs=200, batch_size=32, **kwargs): x = preprocess(count_matrix, kfold=kfold, mask=mask, testset=testset) net = mlp(x['shape'][1], hidden_size=hidden_size, l2_coef=l2_coef, activation=activation, masking=(mask is not None), loss_type=type) model, encoder, decoder, loss, extras = net['model'], net['encoder'], \ net['decoder'], net['loss'], \ net['extra_models'] losses = train(x, model, loss, learning_rate=learning_rate, epochs=epochs, batch_size=batch_size, **kwargs) ret = {'model': model, 'encoder': encoder, 'decoder': decoder, 'extra_models': extras, 'losses': losses} if dimreduce: ret['reduced'] = encoder.predict(count_matrix) if reconstruct: ret['reconstructed'] = model.predict(count_matrix) return ret
from .io import preprocess from .train import train from .network import mlp def autoencode(count_matrix, kfold=None, dimreduce=True, reconstruct=True, mask=None, type='normal', activation='relu', testset=False, learning_rate=1e-2, hidden_size=(256,64,256), l2_coef=0., epochs=200, batch_size=32, optimizer=None, **kwargs): x = preprocess(count_matrix, kfold=kfold, mask=mask, testset=testset) net = mlp(x['shape'][1], hidden_size=hidden_size, l2_coef=l2_coef, activation=activation, masking=(mask is not None), loss_type=type) model, encoder, decoder, loss, extras = net['model'], net['encoder'], \ net['decoder'], net['loss'], \ net['extra_models'] losses = train(x, model, loss, learning_rate=learning_rate, epochs=epochs, batch_size=batch_size, optimizer=optimizer, **kwargs) ret = {'model': model, 'encoder': encoder, 'decoder': decoder, 'extra_models': extras, 'losses': losses} if dimreduce: ret['reduced'] = encoder.predict(count_matrix) if reconstruct: ret['reconstructed'] = model.predict(count_matrix) return ret
Add optimizer to the API
Add optimizer to the API Former-commit-id: 3e06c976ad6a7d4409817fb0fa1472237bfa28b7
Python
apache-2.0
theislab/dca,theislab/dca,theislab/dca
1ed040f9d64e12adf964e9f86cc1e18bd8d21593
scripts/rename.py
scripts/rename.py
import logging from scripts.util import documents from scrapi import settings from scrapi.linter import RawDocument from scrapi.processing.elasticsearch import es from scrapi.tasks import normalize, process_normalized, process_raw logger = logging.getLogger(__name__) def rename(source, target, dry=True): assert source != target, "Can't rename {} to {}, names are the same".format(source, target) count = 0 exceptions = [] for doc in documents(source): count += 1 try: raw = RawDocument({ 'doc': doc.doc, 'docID': doc.docID, 'source': target, 'filetype': doc.filetype, 'timestamps': doc.timestamps, 'versions': doc.versions }) if not dry: process_raw(raw) process_normalized(normalize(raw, raw['source']), raw) logger.info('Processed document from {} with id {}'.format(source, raw['docID'])) except Exception as e: logger.exception(e) exceptions.append(e) else: if not dry: doc.delete() es.delete(index=settings.ELASTIC_INDEX, doc_type=source, id=raw['docID'], ignore=[404]) logger.info('Deleted document from {} with id {}'.format(source, raw['docID'])) if dry: logger.info('Dry run complete') for ex in exceptions: logger.exception(e) logger.info('{} documents processed, with {} exceptions'.format(count, len(exceptions)))
import logging from scripts.util import documents from scrapi import settings from scrapi.linter import RawDocument from scrapi.processing.elasticsearch import es from scrapi.tasks import normalize, process_normalized, process_raw logger = logging.getLogger(__name__) def rename(source, target, dry=True): assert source != target, "Can't rename {} to {}, names are the same".format(source, target) count = 0 exceptions = [] for doc in documents(source): count += 1 try: raw = RawDocument({ 'doc': doc.doc, 'docID': doc.docID, 'source': target, 'filetype': doc.filetype, 'timestamps': doc.timestamps, 'versions': doc.versions }) if not dry: process_raw(raw) process_normalized(normalize(raw, raw['source']), raw) logger.info('Processed document from {} with id {}'.format(source, raw['docID'])) except Exception as e: logger.exception(e) exceptions.append(e) else: if not dry: # doc.delete() es.delete(index=settings.ELASTIC_INDEX, doc_type=source, id=raw['docID'], ignore=[404]) es.delete(index='share_v1', doc_type=source, id=raw['docID'], ignore=[404]) logger.info('Deleted document from {} with id {}'.format(source, raw['docID'])) if dry: logger.info('Dry run complete') for ex in exceptions: logger.exception(e) logger.info('{} documents processed, with {} exceptions'.format(count, len(exceptions)))
Stop cassandra from deleting documents, delete documents from old index as well
Stop cassandra from deleting documents, delete documents from old index as well
Python
apache-2.0
erinspace/scrapi,mehanig/scrapi,alexgarciac/scrapi,felliott/scrapi,fabianvf/scrapi,icereval/scrapi,jeffreyliu3230/scrapi,CenterForOpenScience/scrapi,erinspace/scrapi,mehanig/scrapi,CenterForOpenScience/scrapi,ostwald/scrapi,fabianvf/scrapi,felliott/scrapi
473121ce5a3caa20576d02c79669408fd4177a43
features/steps/interactive.py
features/steps/interactive.py
import time, pexpect, re import nose.tools as nt import subprocess as spr PROMPT = "root@\w+:[^\r]+" UP_ARROW = "\x1b[A" def type(process, input_): process.send(input_.encode()) process.expect(PROMPT) # Remove the typed input from the returned standard out return re.sub(re.escape(input_.strip()), '', process.before).strip() @when(u'I run the interactive command') def step_impl(context): process = pexpect.spawn(context.text) time.sleep(0.5) type(process, UP_ARROW) class Output(object): pass context.output = Output() context.output.stderr = "" context.output.stdout = "" context.process = process @when(u'I type') def step_impl(context): cmd = context.text.strip() + "\n" context.output.stdout = type(context.process, cmd) @when(u'I exit the shell') def step_impl(context): context.process.send("exit\n")
import time, pexpect, re import nose.tools as nt import subprocess as spr PROMPT = "root@\w+:[^\r]+" ENTER = "\n" def type(process, input_): process.send(input_.encode()) process.expect(PROMPT) # Remove the typed input from the returned standard out return re.sub(re.escape(input_.strip()), '', process.before).strip() @when(u'I run the interactive command') def step_impl(context): process = pexpect.spawn(context.text) time.sleep(0.5) type(process, ENTER) class Output(object): pass context.output = Output() context.output.stderr = "" context.output.stdout = "" context.process = process @when(u'I type') def step_impl(context): cmd = context.text.strip() + "\n" context.output.stdout = type(context.process, cmd) @when(u'I exit the shell') def step_impl(context): context.process.send("exit\n")
Use "\n" to fix waiting for prompt in feature tests on CI
Use "\n" to fix waiting for prompt in feature tests on CI
Python
mit
michaelbarton/command-line-interface,pbelmann/command-line-interface,bioboxes/command-line-interface,pbelmann/command-line-interface,michaelbarton/command-line-interface,bioboxes/command-line-interface
624276b80b6d69b788b2f48691941cd89847237b
software/Pi/ui.py
software/Pi/ui.py
""" Handles LED output for the Raspberry Pi 3 Image tracking software. Imported using 'import ui' Version: 5/06/17 Dependencies: RPi.GPIO Note: will only work on a Raspberry Pi! """ import RPi.GPIO as gpio import time ledPin = 16 #GPIO23 #Set up RPi GPIO def setup(): gpio.setmode(gpio.BOARD) gpio.setup(ledPin, gpio.OUT) def blink(n): for i in range(0, n): gpio.output(ledPin, True) time.sleep(0.5) gpio.output(ledPin, False) time.sleep(0.5)
""" Handles LED output for the Raspberry Pi 3 Image tracking software. Imported using 'import ui' Version: 5/06/17 Dependencies: RPi.GPIO Note: will only work on a Raspberry Pi! """ import RPi.GPIO as gpio import time ledPin = 16 #GPIO23 #Set up RPi GPIO def setup(): gpio.setmode(gpio.BOARD) gpio.setwarnings(False) gpio.setup(ledPin, gpio.OUT) def blink(n): for i in range(0, n): gpio.output(ledPin, True) time.sleep(0.5) gpio.output(ledPin, False) time.sleep(0.5)
Disable warnings for GPIO channels...
Disable warnings for GPIO channels...
Python
mit
AdlerFarHorizons/eclipse-tracking,AdlerFarHorizons/eclipse-tracking,AdlerFarHorizons/eclipse-tracking,AdlerFarHorizons/eclipse-tracking
c266fbd7a3478d582dc0d6c88fc5e3d8b7a8f62f
survey/views/survey_result.py
survey/views/survey_result.py
# -*- coding: utf-8 -*- import datetime import os from django.http.response import HttpResponse from django.shortcuts import get_object_or_404 from survey.management.survey2csv import Survey2CSV from survey.models import Survey def serve_result_csv(request, pk): survey = get_object_or_404(Survey, pk=pk) try: latest_answer = survey.latest_answer_date() csv_modification_time = os.path.getmtime(Survey2CSV.file_name(survey)) csv_time = datetime.datetime.fromtimestamp(csv_modification_time) csv_time = csv_time.replace(tzinfo=latest_answer.tzinfo) if latest_answer > csv_time: # If the file was generated before the last answer, generate it. Survey2CSV.generate_file(survey) except OSError: # If the file do not exist, generate it. Survey2CSV.generate_file(survey) with open(Survey2CSV.file_name(survey), 'r') as f: response = HttpResponse(f.read(), content_type='text/csv') response['mimetype='] = 'application/force-download' cd = u'attachment; filename="{}.csv"'.format(survey.name) response['Content-Disposition'] = cd return response
# -*- coding: utf-8 -*- import datetime import os from django.http.response import HttpResponse from django.shortcuts import get_object_or_404 from survey.management.survey2csv import Survey2CSV from survey.models import Survey def serve_result_csv(request, pk): survey = get_object_or_404(Survey, pk=pk) try: latest_answer = survey.latest_answer_date() csv_modification_time = os.path.getmtime(Survey2CSV.file_name(survey)) csv_time = datetime.datetime.fromtimestamp(csv_modification_time) csv_time = csv_time.replace(tzinfo=latest_answer.tzinfo) if latest_answer > csv_time: # If the file was generated before the last answer, generate it. Survey2CSV.generate_file(survey) except OSError: # If the file do not exist, generate it. Survey2CSV.generate_file(survey) with open(Survey2CSV.file_name(survey), 'r') as f: response = HttpResponse(f.read(), content_type='text/csv') cd = u'attachment; filename="{}.csv"'.format(survey.name) response['Content-Disposition'] = cd return response
Fix - Apache error AH02429
Fix - Apache error AH02429 Response header name 'mimetype=' contains invalid characters, aborting request
Python
agpl-3.0
Pierre-Sassoulas/django-survey,Pierre-Sassoulas/django-survey,Pierre-Sassoulas/django-survey
9a121f309ded039f770339d51b43d0933a98d982
app/main/views.py
app/main/views.py
from flask import render_template, current_app, flash, redirect, url_for from . import main from forms import ContactForm from ..email import send_email @main.route('/') def index(): return render_template('index.html') @main.route('/about') def about(): return render_template('about.html') @main.route('/menu') def menu(): return render_template('menu.html') @main.route('/hours-and-directions') def hours(): return render_template('hours-and-directions.html') @main.route('/contact', methods=['GET', 'POST']) def contact(): contact_form = ContactForm() if contact_form.validate_on_submit(): name = contact_form.name.data email = contact_form.email.data phone = contact_form.phone.data message = contact_form.message.data send_email(current_app.config['MAIL_USERNAME'], 'Robata Grill Inquiry', 'mail/message', name=name, email=email, phone=phone, message=message) flash('Your message has been sent. We will be in contact with you shortly.') return redirect(url_for('main.contact')) return render_template('contact.html', contact_form = contact_form) @main.route('/imageScroll') def imageScroll(): return render_template('imageScroll.html')
from flask import render_template, current_app, flash, redirect, url_for, send_from_directory from . import main from forms import ContactForm from ..email import send_email @main.route('/<path:filename>') def static_from_root(filename): return send_from_directory(current_app.static_folder, filename) @main.route('/') def index(): return render_template('index.html') @main.route('/about') def about(): return render_template('about.html') @main.route('/menu') def menu(): return render_template('menu.html') @main.route('/hours-and-directions') def hours(): return render_template('hours-and-directions.html') @main.route('/contact', methods=['GET', 'POST']) def contact(): contact_form = ContactForm() if contact_form.validate_on_submit(): name = contact_form.name.data email = contact_form.email.data phone = contact_form.phone.data message = contact_form.message.data send_email(current_app.config['MAIL_USERNAME'], 'Robata Grill Inquiry', 'mail/message', name=name, email=email, phone=phone, message=message) flash('Your message has been sent. We will be in contact with you shortly.') return redirect(url_for('main.contact')) return render_template('contact.html', contact_form = contact_form) @main.route('/imageScroll') def imageScroll(): return render_template('imageScroll.html')
Add additional view for sitemap.xml
Add additional view for sitemap.xml
Python
mit
jordandietch/workforsushi,jordandietch/workforsushi,jordandietch/workforsushi,jordandietch/workforsushi