commit stringlengths 40 40 | old_file stringlengths 4 236 | new_file stringlengths 4 236 | old_contents stringlengths 1 3.26k | new_contents stringlengths 16 4.43k | subject stringlengths 16 624 | message stringlengths 17 3.29k | lang stringclasses 5
values | license stringclasses 13
values | repos stringlengths 5 91.5k |
|---|---|---|---|---|---|---|---|---|---|
1b338aa716311b4c91281993e35e9beda376735a | addons/osfstorage/settings/defaults.py | addons/osfstorage/settings/defaults.py | # encoding: utf-8
import importlib
import os
import logging
from website import settings
logger = logging.getLogger(__name__)
WATERBUTLER_CREDENTIALS = {
'storage': {}
}
WATERBUTLER_SETTINGS = {
'storage': {
'provider': 'filesystem',
'folder': os.path.join(settings.BASE_PATH, 'osfstoragecache'),
}
}
WATERBUTLER_RESOURCE = 'folder'
DISK_SAVING_MODE = settings.DISK_SAVING_MODE
try:
importlib.import_module('.{}'.format(settings.MIGRATION_ENV))
except:
logger.warn('No migration settings loaded for OSFStorage, falling back to local dev.')
| # encoding: utf-8
import importlib
import os
import logging
from website import settings
logger = logging.getLogger(__name__)
WATERBUTLER_CREDENTIALS = {
'storage': {}
}
WATERBUTLER_SETTINGS = {
'storage': {
'provider': 'filesystem',
'folder': os.path.join(settings.BASE_PATH, 'osfstoragecache'),
}
}
WATERBUTLER_RESOURCE = 'folder'
DISK_SAVING_MODE = settings.DISK_SAVING_MODE
try:
mod = importlib.import_module('.{}'.format(settings.MIGRATION_ENV), package='addons.osfstorage.settings')
globals().update({k: getattr(mod, k) for k in dir(mod)})
except Exception as ex:
logger.warn('No migration settings loaded for OSFStorage, falling back to local dev. {}'.format(ex))
| Make environmental override of osfstorage settings work | Make environmental override of osfstorage settings work
Signed-off-by: Chris Wisecarver <5fccdd17c1f7bcc7e393d2cb5e2fad37705ca69f@cos.io>
| Python | apache-2.0 | CenterForOpenScience/osf.io,binoculars/osf.io,monikagrabowska/osf.io,caneruguz/osf.io,TomBaxter/osf.io,chrisseto/osf.io,monikagrabowska/osf.io,sloria/osf.io,crcresearch/osf.io,erinspace/osf.io,caseyrollins/osf.io,Nesiehr/osf.io,TomBaxter/osf.io,leb2dg/osf.io,pattisdr/osf.io,baylee-d/osf.io,adlius/osf.io,felliott/osf.io,mfraezz/osf.io,chrisseto/osf.io,acshi/osf.io,CenterForOpenScience/osf.io,cslzchen/osf.io,CenterForOpenScience/osf.io,cslzchen/osf.io,acshi/osf.io,HalcyonChimera/osf.io,mattclark/osf.io,icereval/osf.io,brianjgeiger/osf.io,chennan47/osf.io,Nesiehr/osf.io,hmoco/osf.io,monikagrabowska/osf.io,felliott/osf.io,baylee-d/osf.io,leb2dg/osf.io,leb2dg/osf.io,felliott/osf.io,mfraezz/osf.io,caseyrollins/osf.io,caneruguz/osf.io,sloria/osf.io,monikagrabowska/osf.io,aaxelb/osf.io,acshi/osf.io,aaxelb/osf.io,cwisecarver/osf.io,adlius/osf.io,mattclark/osf.io,aaxelb/osf.io,saradbowman/osf.io,HalcyonChimera/osf.io,hmoco/osf.io,brianjgeiger/osf.io,pattisdr/osf.io,laurenrevere/osf.io,adlius/osf.io,Johnetordoff/osf.io,cslzchen/osf.io,Johnetordoff/osf.io,cwisecarver/osf.io,saradbowman/osf.io,TomBaxter/osf.io,hmoco/osf.io,cwisecarver/osf.io,monikagrabowska/osf.io,HalcyonChimera/osf.io,mattclark/osf.io,icereval/osf.io,chrisseto/osf.io,leb2dg/osf.io,Johnetordoff/osf.io,binoculars/osf.io,hmoco/osf.io,crcresearch/osf.io,Nesiehr/osf.io,brianjgeiger/osf.io,icereval/osf.io,mfraezz/osf.io,baylee-d/osf.io,pattisdr/osf.io,laurenrevere/osf.io,acshi/osf.io,chennan47/osf.io,binoculars/osf.io,cwisecarver/osf.io,brianjgeiger/osf.io,mfraezz/osf.io,chennan47/osf.io,chrisseto/osf.io,caseyrollins/osf.io,adlius/osf.io,felliott/osf.io,Johnetordoff/osf.io,Nesiehr/osf.io,erinspace/osf.io,HalcyonChimera/osf.io,CenterForOpenScience/osf.io,acshi/osf.io,cslzchen/osf.io,caneruguz/osf.io,aaxelb/osf.io,laurenrevere/osf.io,erinspace/osf.io,caneruguz/osf.io,sloria/osf.io,crcresearch/osf.io |
5a45a312eebe9e432b066b99d914b49a2adb920c | openfaas/yaml2json/function/handler.py | openfaas/yaml2json/function/handler.py | # Author: Milos Buncic
# Date: 2017/10/14
# Description: Convert YAML to JSON and vice versa (OpenFaaS function)
import os
import sys
import json
import yaml
def handle(data, **parms):
def yaml2json(ydata):
"""
Convert YAML to JSON (output: JSON)
"""
try:
d = yaml.load(ydata, Loader=yaml.BaseLoader)
except Exception as e:
d = {'error': '{}'.format(e)}
return json.dumps(d)
def json2yaml(jdata):
"""
Convert JSON to YAML (output: YAML)
"""
try:
d = json.loads(jdata)
except Exception as e:
d = {'error': '{}'.format(e)}
return yaml.dump(d, default_flow_style=False)
if parms.get('reverse') == 'true':
print(json2yaml(data))
else:
print(yaml2json(data))
| # Author: Milos Buncic
# Date: 2017/10/14
# Description: Convert YAML to JSON and vice versa (OpenFaaS function)
import os
import sys
import json
import yaml
def yaml2json(data):
"""
Convert YAML to JSON (output: JSON)
"""
try:
d = yaml.load(data, Loader=yaml.BaseLoader)
except Exception as e:
d = {'error': '{}'.format(e)}
return json.dumps(d)
def json2yaml(data):
"""
Convert JSON to YAML (output: YAML)
"""
try:
d = json.loads(data)
except Exception as e:
d = {'error': '{}'.format(e)}
return yaml.dump(d, default_flow_style=False)
def handle(data, **parms):
if parms.get('reverse') == 'true':
print(json2yaml(data))
else:
print(yaml2json(data))
| Make handle function to behave similar as main function | Make handle function to behave similar as main function
| Python | mit | psyhomb/serverless |
bd9d08870ec3db09c41c825029c6a513ecc4d1c7 | packs/asserts/actions/object_equals.py | packs/asserts/actions/object_equals.py | import pprint
import sys
from st2actions.runners.pythonrunner import Action
__all__ = [
'AssertObjectEquals'
]
class AssertObjectEquals(Action):
def run(self, object, expected):
ret = cmp(object, expected)
if ret == 0:
sys.stdout.write('EQUAL.')
else:
pprint.pprint('Input: \n%s' % object, stream=sys.stderr)
pprint.pprint('Expected: \n%s' % expected, stream=sys.stderr)
raise ValueError('Objects not equal. Input: %s, Expected: %s.' % (object, expected))
| import pprint
import sys
from st2actions.runners.pythonrunner import Action
__all__ = [
'AssertObjectEquals'
]
def cmp(x, y):
return (x > y) - (x < y)
class AssertObjectEquals(Action):
def run(self, object, expected):
ret = cmp(object, expected)
if ret == 0:
sys.stdout.write('EQUAL.')
else:
pprint.pprint('Input: \n%s' % object, stream=sys.stderr)
pprint.pprint('Expected: \n%s' % expected, stream=sys.stderr)
raise ValueError('Objects not equal. Input: %s, Expected: %s.' % (object, expected))
| Make action python 3 compatible | Make action python 3 compatible
| Python | apache-2.0 | StackStorm/st2tests,StackStorm/st2tests,StackStorm/st2tests |
22412b3f46451177286f8fc58509a69bb2d95731 | numpy/testing/setup.py | numpy/testing/setup.py | #!/usr/bin/env python3
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('testing', parent_package, top_path)
config.add_subpackage('_private')
config.add_subpackage('tests')
config.add_data_files('*.pyi')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(maintainer="NumPy Developers",
maintainer_email="numpy-dev@numpy.org",
description="NumPy test module",
url="https://www.numpy.org",
license="NumPy License (BSD Style)",
configuration=configuration,
)
| #!/usr/bin/env python3
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('testing', parent_package, top_path)
config.add_subpackage('_private')
config.add_subpackage('tests')
config.add_data_files('*.pyi')
config.add_data_files('_private/*.pyi')
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(maintainer="NumPy Developers",
maintainer_email="numpy-dev@numpy.org",
description="NumPy test module",
url="https://www.numpy.org",
license="NumPy License (BSD Style)",
configuration=configuration,
)
| Add support for *.pyi data-files to `np.testing._private` | BLD: Add support for *.pyi data-files to `np.testing._private`
| Python | bsd-3-clause | rgommers/numpy,numpy/numpy,endolith/numpy,pdebuyl/numpy,simongibbons/numpy,simongibbons/numpy,mhvk/numpy,seberg/numpy,mattip/numpy,jakirkham/numpy,jakirkham/numpy,mattip/numpy,mhvk/numpy,charris/numpy,seberg/numpy,anntzer/numpy,rgommers/numpy,jakirkham/numpy,mhvk/numpy,charris/numpy,anntzer/numpy,pdebuyl/numpy,rgommers/numpy,anntzer/numpy,charris/numpy,jakirkham/numpy,mattip/numpy,endolith/numpy,endolith/numpy,simongibbons/numpy,endolith/numpy,numpy/numpy,mhvk/numpy,charris/numpy,mattip/numpy,jakirkham/numpy,numpy/numpy,simongibbons/numpy,pdebuyl/numpy,simongibbons/numpy,seberg/numpy,anntzer/numpy,mhvk/numpy,rgommers/numpy,numpy/numpy,pdebuyl/numpy,seberg/numpy |
3c2663d4c8ca523d072b6e82bf872f412aba9321 | mrgeo-python/src/main/python/pymrgeo/rastermapop.py | mrgeo-python/src/main/python/pymrgeo/rastermapop.py | import copy
import json
from py4j.java_gateway import JavaClass, java_import
from pymrgeo.instance import is_instance_of as iio
class RasterMapOp(object):
mapop = None
gateway = None
context = None
job = None
def __init__(self, gateway=None, context=None, mapop=None, job=None):
self.gateway = gateway
self.context = context
self.mapop = mapop
self.job = job
@staticmethod
def nan():
return float('nan')
def clone(self):
return copy.copy(self)
def is_instance_of(self, java_object, java_class):
return iio(self.gateway, java_object, java_class)
def metadata(self):
if self.mapop is None:
return None
jvm = self.gateway.jvm
java_import(jvm, "org.mrgeo.mapalgebra.raster.RasterMapOp")
java_import(jvm, "org.mrgeo.image.MrsPyramidMetadata")
meta = self.mapop.metadata().getOrElse(None)
if meta is None:
return None
java_import(jvm, "com.fasterxml.jackson.databind.ObjectMapper")
mapper = jvm.com.fasterxml.jackson.databind.ObjectMapper()
jsonstr = mapper.writeValueAsString(meta)
# print(mapper.writerWithDefaultPrettyPrinter().writeValueAsString(meta))
return json.loads(jsonstr)
| import copy
import json
from py4j.java_gateway import java_import
from pymrgeo.instance import is_instance_of as iio
class RasterMapOp(object):
mapop = None
gateway = None
context = None
job = None
def __init__(self, gateway=None, context=None, mapop=None, job=None):
self.gateway = gateway
self.context = context
self.mapop = mapop
self.job = job
@staticmethod
def nan():
return float('nan')
def clone(self):
return copy.copy(self)
def is_instance_of(self, java_object, java_class):
return iio(self.gateway, java_object, java_class)
def metadata(self):
if self.mapop is None:
return None
jvm = self.gateway.jvm
java_import(jvm, "org.mrgeo.mapalgebra.raster.RasterMapOp")
java_import(jvm, "org.mrgeo.image.MrsPyramidMetadata")
if self.mapop.metadata().isEmpty():
return None
meta = self.mapop.metadata().get()
java_import(jvm, "com.fasterxml.jackson.databind.ObjectMapper")
mapper = jvm.com.fasterxml.jackson.databind.ObjectMapper()
jsonstr = mapper.writeValueAsString(meta)
# print(mapper.writerWithDefaultPrettyPrinter().writeValueAsString(meta))
return json.loads(jsonstr)
| Implement empty metadata a little differently | Implement empty metadata a little differently
| Python | apache-2.0 | ngageoint/mrgeo,ngageoint/mrgeo,ngageoint/mrgeo |
a78f56e5c4dedc4148ff3503a05705a8d343b638 | qmpy/web/views/analysis/calculation.py | qmpy/web/views/analysis/calculation.py | from django.shortcuts import render_to_response
from django.template import RequestContext
import os.path
from qmpy.models import Calculation
from ..tools import get_globals
from bokeh.embed import components
def calculation_view(request, calculation_id):
calculation = Calculation.objects.get(pk=calculation_id)
data = get_globals()
data['calculation'] = calculation
data['stdout'] = ''
data['stderr'] = ''
if os.path.exists(calculation.path+'/stdout.txt'):
data['stdout'] = open(calculation.path+'/stdout.txt').read()
if os.path.exists(calculation.path+'/stderr.txt'):
data['stderr'] = open(calculation.path+'/stderr.txt').read()
#if not calculation.dos is None:
# data['dos'] = calculation.dos.plot.get_flot_script()
if not calculation.dos is None:
script, div = components(calculation.dos.bokeh_plot)
data['dos'] = script
data['dosdiv'] = div
## Get exact INCAR settings from INCAR file
data['incar'] = ''.join(calculation.read_incar())
return render_to_response('analysis/calculation.html',
data, RequestContext(request))
| from django.shortcuts import render_to_response
from django.template import RequestContext
import os
from qmpy.models import Calculation
from ..tools import get_globals
from bokeh.embed import components
def calculation_view(request, calculation_id):
calculation = Calculation.objects.get(pk=calculation_id)
data = get_globals()
data['calculation'] = calculation
data['stdout'] = ''
data['stderr'] = ''
if os.path.exists(os.path.join(calculation.path, 'stdout.txt')):
with open(os.path.join(calculation.path, 'stdout.txt')) as fr:
data['stdout'] = fr.read()
if os.path.exists(os.path.join(calculation.path, 'stderr.txt')):
with open(os.path.join(calculation.path, 'stderr.txt')) as fr:
data['stderr'] = fr.read()
try:
data['incar'] = ''.join(calculation.read_incar())
except FileNotFoundError:
data['incar'] = 'Could not read INCAR'
if not calculation.dos is None:
script, div = components(calculation.dos.bokeh_plot)
data['dos'] = script
data['dosdiv'] = div
return render_to_response('analysis/calculation.html',
data, RequestContext(request))
| Handle missing INCAR files gracefully | Handle missing INCAR files gracefully
| Python | mit | wolverton-research-group/qmpy,wolverton-research-group/qmpy,wolverton-research-group/qmpy,wolverton-research-group/qmpy,wolverton-research-group/qmpy |
eb453010915f6700edd1baa0febcc634deec81dc | src/viewsapp/views.py | src/viewsapp/views.py | from decorator_plus import (
require_form_methods, require_safe_methods)
from django.shortcuts import (
get_object_or_404, redirect, render)
from .forms import ExampleForm
from .models import ExampleModel
@require_safe_methods
def model_detail(request, *args, **kwargs):
request_slug = kwargs.get('slug')
example_obj = get_object_or_404(
ExampleModel, slug=request_slug)
return render(
request,
'viewsapp/detail.html',
{'object': example_obj})
@require_form_methods
def model_create(request, *args, **kwargs):
if request.method == 'POST':
form = ExampleForm(request.POST)
if form.is_valid():
new_obj = form.save()
return redirect(new_obj)
else:
form = ExampleForm()
return render(
request,
'viewsapp/form.html',
{'form': form})
| from decorator_plus import require_http_methods
from django.shortcuts import (
get_object_or_404, redirect, render)
from .forms import ExampleForm
from .models import ExampleModel
@require_http_methods(['GET'])
def model_detail(request, *args, **kwargs):
request_slug = kwargs.get('slug')
example_obj = get_object_or_404(
ExampleModel, slug=request_slug)
return render(
request,
'viewsapp/detail.html',
{'object': example_obj})
@require_http_methods(['GET', 'POST'])
def model_create(request, *args, **kwargs):
if request.method == 'POST':
form = ExampleForm(request.POST)
if form.is_valid():
new_obj = form.save()
return redirect(new_obj)
else:
form = ExampleForm()
return render(
request,
'viewsapp/form.html',
{'form': form})
| Switch to using require_http_methods decorator. | Switch to using require_http_methods decorator.
| Python | bsd-2-clause | jambonrose/djangocon2015-views,jambonrose/djangocon2015-views |
94e44e18832311bc063830c0a6bfe23e04e40a8d | srsly/_msgpack_api.py | srsly/_msgpack_api.py | # coding: utf8
from __future__ import unicode_literals
import gc
from . import msgpack
from .util import force_path
def msgpack_dumps(data):
return msgpack.dumps(data, use_bin_type=True)
def msgpack_loads(data):
# msgpack-python docs suggest disabling gc before unpacking large messages
gc.disable()
msg = msgpack.loads(data, raw=False)
gc.enable()
return msg
def read_msgpack(location):
file_path = force_path(location)
with file_path.open("rb") as f:
gc.disable()
msg = msgpack.load(f, raw=False)
gc.enable()
return msg
def write_msgpack(location, data):
file_path = force_path(location, require_exists=False)
with file_path.open("wb") as f:
msgpack.dump(data, f, use_bin_type=True)
| # coding: utf8
from __future__ import unicode_literals
import gc
from . import msgpack
from .util import force_path
def msgpack_dumps(data):
return msgpack.dumps(data, use_bin_type=True)
def msgpack_loads(data, use_list=True):
# msgpack-python docs suggest disabling gc before unpacking large messages
gc.disable()
msg = msgpack.loads(data, raw=False, use_list=use_list)
gc.enable()
return msg
def read_msgpack(location):
file_path = force_path(location)
with file_path.open("rb") as f:
gc.disable()
msg = msgpack.load(f, raw=False)
gc.enable()
return msg
def write_msgpack(location, data):
file_path = force_path(location, require_exists=False)
with file_path.open("wb") as f:
msgpack.dump(data, f, use_bin_type=True)
| Allow passing use_list to msgpack_loads | Allow passing use_list to msgpack_loads
| Python | mit | explosion/srsly,explosion/srsly,explosion/srsly,explosion/srsly |
e78b3f53150a5f1c170b860f8719e982cf1c6f9e | integration/main.py | integration/main.py | import os
import sys
from spec import Spec, skip
from invoke import run
class Integration(Spec):
def setup(self):
from tessera.application import db
# Ensure we have a clean db target.
self.dbpath = db.engine.url.database
msg = "You seem to have a db in the default location ({0}) - please (re)move it before running tests to avoid collisions."
assert not os.path.exists(self.dbpath), msg.format(self.dbpath)
def teardown(self):
# Teardown only runs if setup completed, so the below will not nuke
# pre-existing dbs that cause setup's check to fail.
if os.path.exists(self.dbpath):
os.remove(self.dbpath)
def is_importable(self):
import tessera
assert tessera.app
assert tessera.db
def can_initdb(self):
from tessera.application import db
from tessera.model.database import Dashboard
# Make sure we can create and look at the DB
db.create_all()
assert len(Dashboard.query.all()) == 0
| import os
import sys
from spec import Spec, skip, eq_
from invoke import run
class Integration(Spec):
def setup(self):
from tessera.application import db
# Ensure we have a clean db target.
self.dbpath = db.engine.url.database
msg = "You seem to have a db in the default location ({0}) - please (re)move it before running tests to avoid collisions."
assert not os.path.exists(self.dbpath), msg.format(self.dbpath)
def teardown(self):
from tessera.application import db
# Teardown only runs if setup completed, so the below will not nuke
# pre-existing dbs that cause setup's check to fail.
if os.path.exists(self.dbpath):
os.remove(self.dbpath)
# Ensure no cached session crap
db.session.close_all()
def is_importable(self):
import tessera
assert tessera.app
assert tessera.db
def can_initdb(self):
from tessera.application import db
from tessera.model.database import Dashboard
# Make sure we can create and look at the DB
db.create_all()
eq_(len(Dashboard.query.all()), 0)
def can_import_fixtures(self):
from tessera.application import db
from tessera.importer.json import JsonImporter
from tessera.model.database import Dashboard
db.create_all()
path = os.path.abspath(os.path.join(
os.path.dirname(__file__), '..', 'demo', 'demo-gallery.json'
))
JsonImporter.import_file(path)
eq_(len(Dashboard.query.all()), 1)
| Fix state bleed, add fixture import test | Fix state bleed, add fixture import test
| Python | apache-2.0 | section-io/tessera,urbanairship/tessera,tessera-metrics/tessera,jmptrader/tessera,section-io/tessera,urbanairship/tessera,aalpern/tessera,section-io/tessera,jmptrader/tessera,filippog/tessera,aalpern/tessera,aalpern/tessera,Slach/tessera,Slach/tessera,urbanairship/tessera,urbanairship/tessera,Slach/tessera,jmptrader/tessera,jmptrader/tessera,tessera-metrics/tessera,aalpern/tessera,section-io/tessera,urbanairship/tessera,tessera-metrics/tessera,jmptrader/tessera,tessera-metrics/tessera,Slach/tessera,filippog/tessera,filippog/tessera,aalpern/tessera,tessera-metrics/tessera |
0ad29aa9945448236500b221fc489c1627e6693b | api.py | api.py | from collections import namedtuple
import requests
QUERY_TEMPLATE = '?token={}&domain={}'
BASE_URL = 'https://pddimp.yandex.ru/api2/'
Connection = namedtuple('Connection', ['auth', 'domain'])
def list_emails(connection):
url = '{}admin/email/list'.format(BASE_URL) + QUERY_TEMPLATE.format(*connection)
ret = requests.get(url)
return ret.json()
| import json
import random
import string
from collections import namedtuple
import requests
QUERY_TEMPLATE = '?token={}&domain={}'
BASE_URL = 'https://pddimp.yandex.ru/api2/'
Connection = namedtuple('Connection', ['auth', 'domain'])
class YandexException(Exception):
pass
rndstr = lambda: ''.join(random.sample(string.ascii_letters + string.hexdigits, 17))
def _check_error(ret_json):
if ret_json.get('success') == 'error':
raise YandexException(ret_json['error'])
def list_emails(connection):
url = '{}admin/email/list'.format(BASE_URL) + QUERY_TEMPLATE.format(*connection)
ret = requests.get(url).json()
_check_error(ret)
return ret
def create_email(connection, email, password=None):
if not password:
password = rndstr()
url = '{}admin/email/add'.format(BASE_URL) + QUERY_TEMPLATE.format(*connection)
url += '&login={}&password={}'.format(email, password)
ret = requests.post(url).json()
_check_error(ret)
return ret, password
def delete_email(connection, email=None, uid=None):
if not email and uid:
raise YandexException('Must specify email or uid')
url = '{}admin/email/del'.format(BASE_URL) + QUERY_TEMPLATE.format(*connection)
if email:
url += '&login={}'.format(email)
else:
url += '&uid={}'.format(uid)
ret = requests.post(url).json()
_check_error(ret)
return ret
| Add create email and delete email methods | Add create email and delete email methods
| Python | mit | chrisseto/dinosaurs.sexy,chrisseto/dinosaurs.sexy |
cbddfe308f4e0da728974777f10b245a966520b6 | summarize/__init__.py | summarize/__init__.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from itertools import combinations
from operator import itemgetter
from distance import jaccard
from networkx import Graph, pagerank
from nltk import tokenize
from .utils import get_stopwords, get_words
def summarize(text, sentence_count=5, language='english'):
stopwords = get_stopwords(language)
sentence_list = tokenize.sent_tokenize(text, language)
wordsets = [get_words(sentence, stopwords) for sentence in sentence_list]
graph = Graph()
pairs = combinations(enumerate(filter(None, wordsets)), 2)
for (index_a, words_a), (index_b, words_b) in pairs:
similarity = 1 - jaccard(words_a, words_b)
if similarity > 0:
graph.add_edge(index_a, index_b, weight=similarity)
ranked_sentence_indexes = pagerank(graph).items()
sentences_by_rank = sorted(
ranked_sentence_indexes, key=itemgetter(1), reverse=True)
best_sentences = map(itemgetter(0), sentences_by_rank[:sentence_count])
best_sentences_in_order = sorted(best_sentences)
return ' '.join(sentence_list[index] for index in best_sentences_in_order)
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from itertools import combinations
from operator import itemgetter
from distance import jaccard
from networkx import Graph, pagerank
from nltk import tokenize
from .utils import get_stopwords, get_words
def summarize(text, sentence_count=5, language='english'):
stopwords = get_stopwords(language)
sentence_list = tokenize.sent_tokenize(text, language)
wordsets = [get_words(sentence, stopwords) for sentence in sentence_list]
graph = Graph()
pairs = combinations(enumerate(wordsets), 2)
for (index_a, words_a), (index_b, words_b) in pairs:
if words_a and words_b:
similarity = 1 - jaccard(words_a, words_b)
if similarity > 0:
graph.add_edge(index_a, index_b, weight=similarity)
ranked_sentence_indexes = pagerank(graph).items()
sentences_by_rank = sorted(
ranked_sentence_indexes, key=itemgetter(1), reverse=True)
best_sentences = map(itemgetter(0), sentences_by_rank[:sentence_count])
best_sentences_in_order = sorted(best_sentences)
return ' '.join(sentence_list[index] for index in best_sentences_in_order)
| Fix sentence index shifting with empty sentences | Fix sentence index shifting with empty sentences
| Python | mit | despawnerer/summarize |
dc68813d5f555a01f1bdd2511d9d2de820369573 | conditional/blueprints/spring_evals.py | conditional/blueprints/spring_evals.py | from flask import Blueprint
from flask import render_template
from flask import request
spring_evals_bp = Blueprint('spring_evals_bp', __name__)
@spring_evals_bp.route('/spring_evals/')
def display_spring_evals():
# get user data
user_name = request.headers.get('x-webauth-user')
members = [
{
'name': "Liam Middlebrook",
'uid': 'loothelion',
'committee_meetings': 24,
'house_meetings_missed': [{'date': "aprial fools fayas ads", 'reason': "I was playing videogames"}],
'major_project': 'open_container',
'major_project_passed': True,
'social_events': "",
'comments': "please don't fail me",
'result': 'Pending'
},
{
'name': "Julien Eid",
'uid': 'jeid',
'committee_meetings': 69,
'house_meetings_missed': [],
'major_project': 'wii-u shit',
'major_project_passed': True,
'social_events': "Manipulation and Opportunism",
'comments': "imdabes",
'result': 'Passed'
}
]
# return names in 'first last (username)' format
return render_template('spring_evals.html',
username = user_name,
members = members)
| from flask import Blueprint
from flask import render_template
from flask import request
spring_evals_bp = Blueprint('spring_evals_bp', __name__)
@spring_evals_bp.route('/spring_evals/')
def display_spring_evals():
# get user data
user_name = request.headers.get('x-webauth-user')
members = [
{
'name': "Liam Middlebrook",
'uid': 'loothelion',
'committee_meetings': 24,
'house_meetings_missed': [{'date': "aprial fools fayas ads", 'reason': "I was playing videogames"}],
'major_projects': [
{
'name': "open container",
'status': "Passed",
'description': "Riding With A Flask"
}],
'major_projects_len': 1,
'major_project_passed': True,
'social_events': "",
'comments': "please don't fail me",
'result': 'Pending'
},
{
'name': "Julien Eid",
'uid': 'jeid',
'committee_meetings': 69,
'house_meetings_missed': [],
'major_projects': [
{
'name': "wii-u shit",
'status': "Failed",
'description': "Rot 3 Encryption"
}],
'major_projects_len': 1,
'major_project_passed': False,
'social_events': "Manipulation and Opportunism",
'comments': "imdabes",
'result': 'Passed'
}
]
# return names in 'first last (username)' format
return render_template('spring_evals.html',
username = user_name,
members = members)
| Restructure major projects for spring evals 👷 | Restructure major projects for spring evals 👷
| Python | mit | RamZallan/conditional,ComputerScienceHouse/conditional,RamZallan/conditional,ComputerScienceHouse/conditional,RamZallan/conditional,ComputerScienceHouse/conditional |
3ffea173c206d4c8a58953eceb34d80fb66d609b | utils/tasks.py | utils/tasks.py | from celery.task import task
from utils import send_templated_email
@task
def send_templated_email_async(to, subject, body_template, body_dict,
from_email=None, ct="html", fail_silently=False):
return send_templated_email(to,subject, body_template, body_dict,
from_email=None, ct="html", fail_silently=False)
| from celery.task import task
from utils import send_templated_email
@task
def send_templated_email_async(to, subject, body_template, body_dict,
from_email=None, ct="html", fail_silently=False, check_user_preference=True):
return send_templated_email(to,subject, body_template, body_dict,
from_email=None, ct="html", fail_silently=False)
| Allow check_user_preferences for email to be passed from async templated email send | Allow check_user_preferences for email to be passed from async templated email send
| Python | agpl-3.0 | ReachingOut/unisubs,norayr/unisubs,wevoice/wesub,eloquence/unisubs,ujdhesa/unisubs,ujdhesa/unisubs,ReachingOut/unisubs,pculture/unisubs,eloquence/unisubs,wevoice/wesub,ReachingOut/unisubs,ofer43211/unisubs,ofer43211/unisubs,ofer43211/unisubs,wevoice/wesub,ujdhesa/unisubs,pculture/unisubs,wevoice/wesub,ujdhesa/unisubs,norayr/unisubs,ReachingOut/unisubs,pculture/unisubs,pculture/unisubs,norayr/unisubs,eloquence/unisubs,eloquence/unisubs,ofer43211/unisubs,norayr/unisubs |
45963022a39f8c0f3d57199017adc78b39005d6a | openspending/lib/unicode_dict_reader.py | openspending/lib/unicode_dict_reader.py | # work around python2's csv.py's difficulty with utf8
# partly cribbed from http://stackoverflow.com/questions/5478659/python-module-like-csv-dictreader-with-full-utf8-support
class EmptyCSVError(Exception):
pass
def UnicodeDictReader(file_or_str, encoding='utf8', **kwargs):
import csv
def decode(s, encoding):
if s is None:
return None
return s.decode(encoding)
csv_reader = csv.DictReader(file_or_str, **kwargs)
if not csv_reader.fieldnames:
raise EmptyCSVError("No fieldnames in CSV reader: empty file?")
keymap = dict((k, k.decode(encoding)) for k in csv_reader.fieldnames)
for row in csv_reader:
yield dict((keymap[k], decode(v, encoding)) for k, v in row.iteritems())
| # work around python2's csv.py's difficulty with utf8
# partly cribbed from http://stackoverflow.com/questions/5478659/python-module-like-csv-dictreader-with-full-utf8-support
import csv
class EmptyCSVError(Exception):
pass
class UnicodeDictReader(object):
def __init__(self, file_or_str, encoding='utf8', **kwargs):
self.encoding = encoding
self.reader = csv.DictReader(file_or_str, **kwargs)
if not self.reader.fieldnames:
raise EmptyCSVError("No fieldnames in CSV reader: empty file?")
self.keymap = dict((k, k.decode(encoding)) for k in self.reader.fieldnames)
def __iter__(self):
return (self._decode_row(row) for row in self.reader)
def _decode_row(self, row):
return dict(
(self.keymap[k], self._decode_str(v)) for k, v in row.iteritems()
)
def _decode_str(self, s):
if s is None:
return None
return s.decode(self.encoding)
| Convert UnicodeDictReader to an iterator class, so that EmptyCSVError will get thrown on instantiation, rather than on iteration. | Convert UnicodeDictReader to an iterator class, so that EmptyCSVError will get thrown on instantiation, rather than on iteration.
| Python | agpl-3.0 | pudo/spendb,nathanhilbert/FPA_Core,USStateDept/FPA_Core,openspending/spendb,CivicVision/datahub,nathanhilbert/FPA_Core,CivicVision/datahub,spendb/spendb,johnjohndoe/spendb,johnjohndoe/spendb,spendb/spendb,USStateDept/FPA_Core,nathanhilbert/FPA_Core,pudo/spendb,openspending/spendb,johnjohndoe/spendb,openspending/spendb,pudo/spendb,spendb/spendb,CivicVision/datahub,USStateDept/FPA_Core |
db935a152efc8ab730491fc860db6d4c9cf65c5f | test/test_packages.py | test/test_packages.py | import pytest
@pytest.mark.parametrize("name", [
("apt-file"),
("apt-transport-https"),
("atom"),
("blktrace"),
("ca-certificates"),
("chromium-browser"),
("cron"),
("curl"),
("diod"),
("docker-ce"),
("fonts-font-awesome"),
("git"),
("gnupg"),
("gnupg2"),
("gnupg-agent"),
("handbrake"),
("handbrake-cli"),
("haveged"),
("htop"),
("i3"),
("iotop"),
("language-pack-en-base"),
("laptop-mode-tools"),
("nfs-common"),
("ntop"),
("ntp"),
("openssh-client"),
("openssh-server"),
("openssh-sftp-server"),
("openssl"),
("pinta"),
("python"),
("python-pip"),
("scrot"),
("software-properties-common"),
("suckless-tools"),
("sysstat"),
("tree"),
("vagrant"),
("vim"),
("virtualbox"),
("vlc"),
("wget"),
("whois"),
("x264"),
("xfce4-terminal"),
("xfonts-terminus"),
("xinit"),
])
def test_packages(Package, name):
assert Package(name).is_installed
| import pytest
@pytest.mark.parametrize("name", [
("apt-file"),
("apt-transport-https"),
("atom"),
("blktrace"),
("ca-certificates"),
("chromium-browser"),
("cron"),
("curl"),
("diod"),
("docker-ce"),
("fonts-font-awesome"),
("git"),
("gnupg"),
("gnupg2"),
("gnupg-agent"),
("handbrake"),
("handbrake-cli"),
("haveged"),
("htop"),
("i3"),
("iotop"),
("language-pack-en-base"),
("laptop-mode-tools"),
("nfs-common"),
("ntop"),
("ntp"),
("openssh-client"),
("openssh-server"),
("openssh-sftp-server"),
("openssl"),
("pinta"),
("python"),
("python-pip"),
("scrot"),
("software-properties-common"),
("suckless-tools"),
("sysstat"),
("tree"),
("vagrant"),
("vim"),
("virtualbox"),
("vlc"),
("wget"),
("whois"),
("x264"),
("xfce4-terminal"),
("xfonts-terminus"),
("xinit"),
])
def test_packages(host, name):
pkg = host.package(name)
assert pkg.is_installed
| Change test function as existing method deprecated | Change test function as existing method deprecated
| Python | mit | wicksy/laptop-build,wicksy/laptop-build,wicksy/laptop-build,wicksy/laptop-build |
7d621db3618db90679461550fb0c952417616402 | bot.py | bot.py | import asyncio
import configparser
import discord
from discord.ext import commands
class Bot(commands.Bot):
def __init__(self, config_filepath, command_prefix):
super().__init__(command_prefix)
self._load_config_data(config_filepath)
def _load_config_data(self, filepath):
config = configparser.ConfigParser()
config.read(filepath)
self.email = config['LOGIN']['email']
self.password = config['LOGIN']['password']
self.owner_ID = config['OWNER']['id']
self.twitch_ID = config['TWITCH']['client_id']
self.carbon_key = config['CARBON']['key']
def run(self):
pass | import asyncio
import configparser
import discord
from discord.ext import commands
class Bot(commands.Bot):
def __init__(self, config_filepath, command_prefix):
super().__init__(command_prefix)
self._load_config_data(config_filepath)
def _load_config_data(self, filepath):
config = configparser.ConfigParser()
config.read(filepath)
self.email = config['LOGIN']['email']
self.password = config['LOGIN']['password']
self.owner_ID = config['OWNER']['id']
self.twitch_ID = config['TWITCH']['client_id']
self.carbon_key = config['CARBON']['key']
async def on_ready(self):
print("Logged in as {}".format(self.user.name))
print("User ID: {}".format(self.user.id))
print("Library: {} - {}".format(discord.__title__, discord.__version__))
def run(self):
try:
self.loop.run_until_complete(self.start(self.email, self.password))
except KeyboardInterrupt:
self.loop.run_until_complete(self.logout())
finally:
self.loop.close() | Add Bot.run and an on_ready message | Add Bot.run and an on_ready message
| Python | mit | MagiChau/ZonBot |
ee2ed250fdf42d0e4616f0e783ff1ace0a201514 | scripts/spat_conlisk_univariate_fig2.py | scripts/spat_conlisk_univariate_fig2.py | """
Purpose: to recreate the univariate pdfs of Conlisk et al. (2007)
in figure 2. This provides a test that the univariate pdfs are
working correctly
"""
import numpy as np
import mete
import csv
n0 = 617
c = 256
sing_pdfs = np.zeros((4, 7))
psi = [0.01, 0.25, 0.5, 0.75]
for i in range(0, len(psi)):
sing_pdfs[i, : ] = [mete.single_prob(n, n0, psi[i], c) for n in range(0, 7)]
writer = open('./data/conlisk_data_fig2a.csv', 'wb')
datawriter = csv.writer(writer)
for i in range(0, np.shape(sing_pdfs)[0]):
datawriter.writerow(sing_pdfs[i, ])
writer.close()
| """
Purpose: to recreate the univariate pdfs of Conlisk et al. (2007)
in figure 2. This provides a test that the univariate pdfs are
working correctly
"""
import numpy as np
import mete
import matplotlib.pyplot as plt
n0 = 617
c = 256
sing_pdfs = np.zeros((4, 8))
psi = [0.01, 0.25, 0.5, 0.75]
for i in range(0, len(psi)):
sing_pdfs[i, : ] = [mete.single_prob(n, n0, psi[i], c) for n in range(0, 8)]
n = range(0, 8)
plt.plot(n, sing_pdfs[0, :], color='black', linewidth=1)
plt.plot(n, sing_pdfs[1, :], color='black', linewidth=1, ls='--')
plt.plot(n, sing_pdfs[2, :], color='black', linewidth=2)
plt.plot(n, sing_pdfs[3, :], color='lightgrey', linewidth=2)
plt.axis([0, 7, 0, 0.9])
plt.xlabel('n')
plt.ylabel('P(n)')
plt.legend(['psi = 0.01', 'psi = 0.25', 'psi = 0.50','psi = 0.75'])
plt.savefig('../figs/conlisk_univaritate_fig2a.pdf')
| Create a pdf of the univariate pdfs instead of writing to a .csv file. | Create a pdf of the univariate pdfs instead of writing to a .csv file.
| Python | mit | weecology/mete-spatial,weecology/mete-spatial,weecology/mete-spatial,weecology/mete-spatial |
54e715f26ed62e62e8794d8084110091c8db580b | oauth_provider/utils.py | oauth_provider/utils.py | import oauth.oauth as oauth
from django.conf import settings
from django.http import HttpResponse
from stores import DataStore
OAUTH_REALM_KEY_NAME = 'OAUTH_REALM_KEY_NAME'
def initialize_server_request(request):
"""Shortcut for initialization."""
oauth_request = oauth.OAuthRequest.from_request(request.method,
request.build_absolute_uri(),
headers=request.META,
parameters=dict(request.REQUEST.items()),
query_string=request.environ.get('QUERY_STRING', ''))
if oauth_request:
oauth_server = oauth.OAuthServer(DataStore(oauth_request))
oauth_server.add_signature_method(oauth.OAuthSignatureMethod_PLAINTEXT())
oauth_server.add_signature_method(oauth.OAuthSignatureMethod_HMAC_SHA1())
else:
oauth_server = None
return oauth_server, oauth_request
def send_oauth_error(err=None):
"""Shortcut for sending an error."""
# send a 401 error
response = HttpResponse(err.message.encode('utf-8'), mimetype="text/plain")
response.status_code = 401
# return the authenticate header
realm = getattr(settings, OAUTH_REALM_KEY_NAME, '')
header = oauth.build_authenticate_header(realm=realm)
for k, v in header.iteritems():
response[k] = v
return response
| import oauth.oauth as oauth
from django.conf import settings
from django.http import HttpResponse
from stores import DataStore
OAUTH_REALM_KEY_NAME = 'OAUTH_REALM_KEY_NAME'
def initialize_server_request(request):
"""Shortcut for initialization."""
# Django converts Authorization header in HTTP_AUTHORIZATION
# Warning: it doesn't happen in tests but it's useful, do not remove!
auth_header = {}
if 'Authorization' in request.META:
auth_header = {'Authorization': request.META['Authorization']}
elif 'HTTP_AUTHORIZATION' in request.META:
auth_header = {'Authorization': request.META['HTTP_AUTHORIZATION']}
oauth_request = oauth.OAuthRequest.from_request(request.method,
request.build_absolute_uri(),
headers=auth_header,
parameters=dict(request.REQUEST.items()),
query_string=request.environ.get('QUERY_STRING', ''))
if oauth_request:
oauth_server = oauth.OAuthServer(DataStore(oauth_request))
oauth_server.add_signature_method(oauth.OAuthSignatureMethod_PLAINTEXT())
oauth_server.add_signature_method(oauth.OAuthSignatureMethod_HMAC_SHA1())
else:
oauth_server = None
return oauth_server, oauth_request
def send_oauth_error(err=None):
"""Shortcut for sending an error."""
# send a 401 error
response = HttpResponse(err.message.encode('utf-8'), mimetype="text/plain")
response.status_code = 401
# return the authenticate header
realm = getattr(settings, OAUTH_REALM_KEY_NAME, '')
header = oauth.build_authenticate_header(realm=realm)
for k, v in header.iteritems():
response[k] = v
return response
| Fix a bug introduced in the latest revision, testing auth header in initialize_server_request now, thanks Chris McMichael for the report and patch | Fix a bug introduced in the latest revision, testing auth header in initialize_server_request now, thanks Chris McMichael for the report and patch
| Python | bsd-3-clause | lukegb/django-oauth-plus,amrox/django-oauth-plus |
73fe535416dbf744752d745f0186d5406bd15d8c | test/client/local_recognizer_test.py | test/client/local_recognizer_test.py | import unittest
import os
from speech_recognition import WavFile
from mycroft.client.speech.listener import RecognizerLoop
__author__ = 'seanfitz'
DATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data")
class LocalRecognizerTest(unittest.TestCase):
def setUp(self):
self.recognizer = RecognizerLoop.create_mycroft_recognizer(16000,
"en-us")
def testRecognizerWrapper(self):
source = WavFile(os.path.join(DATA_DIR, "hey_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
source = WavFile(os.path.join(DATA_DIR, "mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
def testRecognitionInLongerUtterance(self):
source = WavFile(os.path.join(DATA_DIR, "weather_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
| import unittest
import os
from speech_recognition import WavFile
from mycroft.client.speech.listener import RecognizerLoop
__author__ = 'seanfitz'
DATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), "data")
class LocalRecognizerTest(unittest.TestCase):
def setUp(self):
rl = RecognizerLoop()
self.recognizer = RecognizerLoop.create_mycroft_recognizer(rl,
16000,
"en-us")
def testRecognizerWrapper(self):
source = WavFile(os.path.join(DATA_DIR, "hey_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
source = WavFile(os.path.join(DATA_DIR, "mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
def testRecognitionInLongerUtterance(self):
source = WavFile(os.path.join(DATA_DIR, "weather_mycroft.wav"))
with source as audio:
hyp = self.recognizer.transcribe(audio.stream.read())
assert self.recognizer.key_phrase in hyp.hypstr.lower()
| Fix init of local recognizer | Fix init of local recognizer
| Python | apache-2.0 | linuxipho/mycroft-core,aatchison/mycroft-core,MycroftAI/mycroft-core,aatchison/mycroft-core,forslund/mycroft-core,linuxipho/mycroft-core,Dark5ide/mycroft-core,Dark5ide/mycroft-core,MycroftAI/mycroft-core,forslund/mycroft-core |
eb06e85c7dcb93febe22d20cd7e3e694939449ba | tests/test_xelatex.py | tests/test_xelatex.py | from latex.build import LatexMkBuilder
def test_xelatex():
min_latex = r"""
\documentclass{article}
\begin{document}
Hello, world!
\end{document}
"""
builder = LatexMkBuilder(variant='xelatex')
pdf = builder.build_pdf(min_latex)
assert pdf
| from latex.build import LatexMkBuilder
def test_xelatex():
# the example below should not compile on pdflatex, but on xelatex
min_latex = r"""
\documentclass[12pt]{article}
\usepackage{fontspec}
\setmainfont{Times New Roman}
\title{Sample font document}
\author{Hubert Farnsworth}
\date{this month, 2014}
\begin{document}
\maketitle
This an \textit{example} of document compiled
with \textbf{xelatex} compiler. LuaLaTeX should
work fine also.
\end{document}
"""
builder = LatexMkBuilder(variant='xelatex')
pdf = builder.build_pdf(min_latex)
assert pdf
| Make XeLaTeX test harder (to actually test xelatex being used). | Make XeLaTeX test harder (to actually test xelatex being used).
| Python | bsd-3-clause | mbr/latex |
d95d71f996483bdde4f0b27d9d9c023aef706c65 | freebasics/tests/test_env_variables.py | freebasics/tests/test_env_variables.py | from django.test import TestCase, RequestFactory
from molo.core.tests.base import MoloTestCaseMixin
from freebasics.views import HomeView
from freebasics.templatetags import freebasics_tags
class EnvTestCase(TestCase, MoloTestCaseMixin):
def setUp(self):
self.mk_main()
def test_block_ordering(self):
with self.settings(BLOCK_POSITION_BANNER=4,
BLOCK_POSITION_LATEST=3,
BLOCK_POSITION_QUESTIONS=2,
BLOCK_POSITION_SECTIONS=1):
factory = RequestFactory()
request = factory.get('/')
request.site = self.site
home = HomeView()
home.request = request
context = home.get_context_data()
self.assertEquals(context['blocks'][0], (
'blocks/sections.html', 1))
self.assertEquals(context['blocks'][1], (
'blocks/questions.html', 2))
self.assertEquals(context['blocks'][2], ('blocks/latest.html', 3))
self.assertEquals(context['blocks'][3], ('blocks/banners.html', 4))
def test_css_vars(self):
with self.settings(CUSTOM_CSS_BLOCK_TEXT_TRANSFORM="lowercase",
CUSTOM_CSS_ACCENT_2="red"):
styles = freebasics_tags.custom_css(context='')
self.assertEquals(styles['accent_2'], 'red')
self.assertEquals(styles['text_transform'], 'lowercase')
| from django.test import TestCase, RequestFactory
from molo.core.tests.base import MoloTestCaseMixin
from freebasics.views import HomeView
from freebasics.templatetags import freebasics_tags
class EnvTestCase(TestCase, MoloTestCaseMixin):
def setUp(self):
self.mk_main()
def test_block_ordering(self):
with self.settings(BLOCK_POSITION_BANNER=4,
BLOCK_POSITION_LATEST=3,
BLOCK_POSITION_QUESTIONS=2,
BLOCK_POSITION_SECTIONS=1):
factory = RequestFactory()
request = factory.get('/')
request.site = self.site
home = HomeView()
home.request = request
context = home.get_context_data()
self.assertEquals(context['blocks'][0], (
'blocks/sections.html', 1))
self.assertEquals(context['blocks'][1], (
'blocks/questions.html', 2))
self.assertEquals(context['blocks'][2], ('blocks/latest.html', 3))
self.assertEquals(context['blocks'][3], ('blocks/banners.html', 4))
def test_css_vars(self):
with self.settings(CUSTOM_CSS_BLOCK_TEXT_TRANSFORM="lowercase",
CUSTOM_CSS_ACCENT_2="red"):
styles = freebasics_tags.custom_css(context='')
self.assertEquals(styles['accent_2'], 'red')
self.assertEquals(styles['text_transform'], 'lowercase')
def test_custom_css(self):
response = self.client.get('/')
self.assertContains(response, '.fb-body .base-bcolor')
self.assertContains(response, '.fb-body .block-heading')
self.assertContains(response, '.section-nav__items')
| Add test for custom css | Add test for custom css
| Python | bsd-2-clause | praekelt/molo-freebasics,praekelt/molo-freebasics,praekelt/molo-freebasics,praekelt/molo-freebasics |
59c66d0e172b23ea7106a70866871d20bbcabe5b | timezones/__init__.py | timezones/__init__.py |
import pytz
TIMEZONE_CHOICES = zip(pytz.all_timezones, pytz.all_timezones)
|
import pytz
TIMEZONE_CHOICES = zip(pytz.common_timezones, pytz.common_timezones)
| Use common timezones and not all timezones pytz provides. | Use common timezones and not all timezones pytz provides.
git-svn-id: 13ea2b4cf383b32e0a7498d153ee882d068671f7@13 86ebb30f-654e-0410-bc0d-7bf82786d749
| Python | bsd-2-clause | mfogel/django-timezone-field,brosner/django-timezones |
8bd738972cebd27b068250bd52db8aacea6c7876 | src/condor_tests/ornithology/plugin.py | src/condor_tests/ornithology/plugin.py | import pytest
from .scripts import SCRIPTS
# This module is loaded as a "plugin" by pytest by a setting in conftest.py
# Any fixtures defined here will be globally available in tests,
# as if they were defined in conftest.py itself.
@pytest.fixture(scope="session")
def path_to_sleep():
return SCRIPTS["sleep"]
| import sys
import pytest
from .scripts import SCRIPTS
# This module is loaded as a "plugin" by pytest by a setting in conftest.py
# Any fixtures defined here will be globally available in tests,
# as if they were defined in conftest.py itself.
@pytest.fixture(scope="session")
def path_to_sleep():
return SCRIPTS["sleep"]
@pytest.fixture(scope="session")
def path_to_python():
return sys.executable
| Add path_to_python fixture to make writing multiplatform job scripts easier. | Add path_to_python fixture to make writing multiplatform job scripts easier.
| Python | apache-2.0 | htcondor/htcondor,htcondor/htcondor,htcondor/htcondor,htcondor/htcondor,htcondor/htcondor,htcondor/htcondor,htcondor/htcondor,htcondor/htcondor |
775170d69862aaff63231b669639a872596ed2cd | test_interpreter.py | test_interpreter.py | import unittest
import brainfuck
test_cases = [("++++++++[>++++[>++>+++>+++>+<<<<-]>+>+>->>+[<]<-]>>.>---.+++++++..+++.>>.<-.<.+++.------.--------.>>+.>++.", "Hello World!\n")]
class InterpreterTestCase(unittest.TestCase):
def setUp(self):
self.interpreter = brainfuck.BrainfuckInterpreter()
def runTest(self):
for case in test_cases:
self.assertEqual(case[1], self.interpreter.eval(case[0]))
| import unittest
import brainfuck
hello_case = ("++++++++[>++++[>++>+++>+++>+<<<<-]>+>+>->>+[<]<-]>>.>---.+++++++..+++.>>.<-.<.+++.------.--------.>>+.>++.", "Hello World!\n")
class InterpreterTestCase(unittest.TestCase):
def setUp(self):
self.interpreter = brainfuck.BrainfuckInterpreter()
def test_hello_world(self):
self.assertEqual(hello_case[1], self.interpreter.eval(hello_case[0]))
def test_missing_parenthesis(self):
self.assertRaises(SyntaxError, self.interpreter.eval, '[++]+]')
| Add unittest for missing parenthesis | Add unittest for missing parenthesis
| Python | bsd-3-clause | handrake/brainfuck |
131454ffaec010442c17f748365ab491668d947f | plumeria/plugins/bing_images.py | plumeria/plugins/bing_images.py | from aiohttp import BasicAuth
from plumeria import config, scoped_config
from plumeria.command import commands, CommandError
from plumeria.config.common import nsfw
from plumeria.message import Response
from plumeria.util import http
from plumeria.util.ratelimit import rate_limit
SEARCH_URL = "https://api.datamarket.azure.com/Bing/Search/v1/Image"
api_key = config.create("bing", "key",
fallback="unset",
comment="An API key from Bing")
@commands.register("image", "images", "i", category="Search")
@rate_limit()
async def image(message):
"""
Search Bing for an image and returns a URL to that image.
Example::
/image socially awkward penguin
"""
q = message.content.strip()
if not q:
raise CommandError("Search term required!")
r = await http.get(SEARCH_URL, params=[
('$format', 'json'),
('$top', '10'),
('Adult', "'Off'" if scoped_config.get(nsfw, message.channel) else "'Strict'"),
('Query', "'{}'".format(q)),
], auth=BasicAuth("", password=api_key()))
data = r.json()['d']
if len(data['results']):
return Response(data['results'][0]['MediaUrl'])
else:
raise CommandError("no results found")
| import random
from aiohttp import BasicAuth
from plumeria import config, scoped_config
from plumeria.command import commands, CommandError
from plumeria.config.common import nsfw
from plumeria.message import Response
from plumeria.util import http
from plumeria.util.ratelimit import rate_limit
SEARCH_URL = "https://api.datamarket.azure.com/Bing/Search/v1/Image"
api_key = config.create("bing", "key",
fallback="unset",
comment="An API key from Bing")
@commands.register("image", "images", "i", category="Search")
@rate_limit()
async def image(message):
"""
Search Bing for an image and returns a URL to that image.
Example::
/image socially awkward penguin
"""
q = message.content.strip()
if not q:
raise CommandError("Search term required!")
r = await http.get(SEARCH_URL, params=[
('$format', 'json'),
('$top', '20'),
('Adult', "'Off'" if scoped_config.get(nsfw, message.channel) else "'Strict'"),
('Query', "'{}'".format(q)),
], auth=BasicAuth("", password=api_key()))
data = r.json()['d']
if len(data['results']):
return Response(random.choice(data['results'])['MediaUrl'])
else:
raise CommandError("no results found")
| Make the Bing images search pick a random top 20 image. | Make the Bing images search pick a random top 20 image.
| Python | mit | sk89q/Plumeria,sk89q/Plumeria,sk89q/Plumeria |
f7caec9d0c0058b5d760992172b434b461f70d90 | molecule/default/tests/test_default.py | molecule/default/tests/test_default.py | import os
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_docker_service_enabled(host):
service = host.service('docker')
assert service.is_enabled
def test_docker_service_running(host):
service = host.service('docker')
assert service.is_running
@pytest.mark.parametrize('socket_def', [
# listening on localhost, for tcp sockets on port 1883 (MQQT)
('tcp://127.0.0.1:1883'),
# all IPv4 tcp sockets on port 8883 (MQQTS)
('tcp://8883'),
])
def test_listening_sockets(host, socket_def):
socket = host.socket(socket_def)
assert socket.is_listening
# Tests to write:
# - using the localhost listener:
# - verify that a message can be published
# - verify that a published message can be subscribed to
| import os
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_docker_service_enabled(host):
service = host.service('docker')
assert service.is_enabled
def test_docker_service_running(host):
service = host.service('docker')
assert service.is_running
@pytest.mark.parametrize('socket_def', [
# all IPv4 tcp sockets on port 8883 (MQQTS)
('tcp://8883'),
])
def test_listening_sockets(host, socket_def):
socket = host.socket(socket_def)
assert socket.is_listening
# Tests to write:
# - using the localhost listener:
# - verify that a message can be published
# - verify that a published message can be subscribed to
| Remove test enforcing listening on port 1883 | Remove test enforcing listening on port 1883
| Python | mit | triplepoint/ansible-mosquitto |
3dc06581d07a204a3044e3a78deb84950a6ebf74 | mtp_transaction_uploader/api_client.py | mtp_transaction_uploader/api_client.py | from urllib.parse import urljoin
from oauthlib.oauth2 import LegacyApplicationClient
from requests_oauthlib import OAuth2Session
import slumber
from . import settings
REQUEST_TOKEN_URL = urljoin(settings.API_URL, '/oauth2/token/')
def get_authenticated_connection():
"""
Returns:
an authenticated slumber connection
"""
session = OAuth2Session(
client=LegacyApplicationClient(
client_id=settings.API_CLIENT_ID
)
)
session.fetch_token(
token_url=REQUEST_TOKEN_URL,
username=settings.API_USERNAME,
password=settings.API_PASSWORD,
client_id=settings.API_CLIENT_ID,
client_secret=settings.API_CLIENT_SECRET
)
return slumber.API(
base_url=settings.API_URL, session=session
)
| from urllib.parse import urljoin
from oauthlib.oauth2 import LegacyApplicationClient
from requests.auth import HTTPBasicAuth
from requests_oauthlib import OAuth2Session
import slumber
from . import settings
REQUEST_TOKEN_URL = urljoin(settings.API_URL, '/oauth2/token/')
def get_authenticated_connection():
"""
Returns:
an authenticated slumber connection
"""
session = OAuth2Session(
client=LegacyApplicationClient(
client_id=settings.API_CLIENT_ID
)
)
session.fetch_token(
token_url=REQUEST_TOKEN_URL,
username=settings.API_USERNAME,
password=settings.API_PASSWORD,
auth=HTTPBasicAuth(settings.API_CLIENT_ID, settings.API_CLIENT_SECRET)
)
return slumber.API(
base_url=settings.API_URL, session=session
)
| Use HTTPBasicAuth when connecting to the API | Use HTTPBasicAuth when connecting to the API
| Python | mit | ministryofjustice/money-to-prisoners-transaction-uploader |
e6933a46086f83913de307fb8803ccbfd3c55114 | mysite/mysite/tests/test_middleware.py | mysite/mysite/tests/test_middleware.py | from django.contrib.auth.models import User
from django.test import TestCase
from DjangoLibrary.middleware import FactoryBoyMiddleware
from mock import Mock
import json
class TestFactoryBoyMiddleware(TestCase):
def setUp(self):
self.middleware = FactoryBoyMiddleware()
self.request = Mock()
self.request.session = {}
def test_process_request_creates_object(self):
self.request.configure_mock(
**{
'GET': {
'FACTORY_BOY_MODEL_PATH': 'mysite.tests.factories.UserFactory', # noqa
'FACTORY_BOY_ARGS': ''
}
}
)
response = self.middleware.process_request(self.request)
self.assertEqual(201, response.status_code)
self.assertEqual(
'johndoe',
json.loads(response.content).get('username')
)
self.assertEqual(1, len(User.objects.values()))
self.assertEqual('johndoe', User.objects.values()[0]['username'])
| from django.contrib.auth.models import User
from django.test import TestCase
from DjangoLibrary.middleware import FactoryBoyMiddleware
from mock import Mock
import json
class TestFactoryBoyMiddleware(TestCase):
def setUp(self):
self.middleware = FactoryBoyMiddleware()
self.request = Mock()
self.request.session = {}
def test_process_request_creates_object(self):
self.request.configure_mock(
**{
'GET': {
'FACTORY_BOY_MODEL_PATH': 'mysite.tests.factories.UserFactory', # noqa
'FACTORY_BOY_ARGS': ''
}
}
)
response = self.middleware.process_request(self.request)
self.assertEqual(201, response.status_code)
self.assertEqual(
'johndoe',
json.loads(response.content.decode('utf-8')).get('username')
)
self.assertEqual(1, len(User.objects.values()))
self.assertEqual('johndoe', User.objects.values()[0]['username'])
| Fix integration tests for Python 3. | Fix integration tests for Python 3.
| Python | apache-2.0 | kitconcept/robotframework-djangolibrary |
e68c38428c055f7c001011c6cc325593d2a26a81 | pyFxTrader/strategy/__init__.py | pyFxTrader/strategy/__init__.py | # -*- coding: utf-8 -*-
class Strategy(object):
TIMEFRAMES = [] # e.g. ['M30', 'H2']
def __init__(self, instrument):
self.instrument = instrument
if not self.TIMEFRAMES:
raise ValueError('Please define TIMEFRAMES variable.')
def start(self):
"""Called on strategy start."""
raise NotImplementedError()
def new_bar(self, instrument, cur_index):
"""Called on every bar of every instrument that client is subscribed on."""
raise NotImplementedError()
def execute(self, engine, instruments, cur_index):
"""Called on after all indicators have been updated for this bar's index"""
raise NotImplementedError()
def end(self, engine):
"""Called on strategy stop."""
raise NotImplementedError()
| # -*- coding: utf-8 -*-
from collections import deque
from logbook import Logger
log = Logger('pyFxTrader')
class Strategy(object):
TIMEFRAMES = [] # e.g. ['M30', 'H2']
BUFFER_SIZE = 500
feeds = {}
def __init__(self, instrument):
self.instrument = instrument
if not self.TIMEFRAMES:
raise ValueError('Please define TIMEFRAMES variable.')
for tf in self.TIMEFRAMES:
self.feeds[tf] = deque(maxlen=self.BUFFER_SIZE)
log.info('Initialized %s feed for %s' % (tf, self.instrument))
def start(self):
"""Called on strategy start."""
raise NotImplementedError()
def new_bar(self, instrument, cur_index):
"""Called on every bar of every instrument that client is subscribed on."""
raise NotImplementedError()
def execute(self, engine, instruments, cur_index):
"""Called on after all indicators have been updated for this bar's index"""
raise NotImplementedError()
def end(self, engine):
"""Called on strategy stop."""
raise NotImplementedError()
| Add default BUFFER_SIZE for feeds | Add default BUFFER_SIZE for feeds
| Python | mit | jmelett/pyfx,jmelett/pyFxTrader,jmelett/pyfx |
73d9049dea55ddfa32e4cb09f969b6ff083fee2c | tests/redisdl_test.py | tests/redisdl_test.py | import redisdl
import unittest
import json
import os.path
class RedisdlTest(unittest.TestCase):
def setUp(self):
import redis
self.r = redis.Redis()
for key in self.r.keys('*'):
self.r.delete(key)
def test_roundtrip(self):
path = os.path.join(os.path.dirname(__file__), 'fixtures', 'dump.json')
with open(path) as f:
dump = f.read()
redisdl.loads(dump)
redump = redisdl.dumps()
expected = json.loads(dump)
actual = json.loads(redump)
self.assertEqual(expected, actual)
| import redisdl
import unittest
import json
import os.path
class RedisdlTest(unittest.TestCase):
def setUp(self):
import redis
self.r = redis.Redis()
for key in self.r.keys('*'):
self.r.delete(key)
def test_roundtrip(self):
path = os.path.join(os.path.dirname(__file__), 'fixtures', 'dump.json')
with open(path) as f:
dump = f.read()
redisdl.loads(dump)
redump = redisdl.dumps()
expected = json.loads(dump)
actual = json.loads(redump)
self.assertEqual(expected, actual)
def test_dump_string_value(self):
self.r.set('key', 'value')
dump = redisdl.dumps()
actual = json.loads(dump)
expected = {'key': {'type': 'string', 'value': 'value'}}
self.assertEqual(expected, actual)
def test_dump_unicode_value(self):
self.r.set('key', u"\u041c\u043e\u0441\u043a\u0432\u0430")
dump = redisdl.dumps()
actual = json.loads(dump)
expected = {'key': {'type': 'string', 'value': u"\u041c\u043e\u0441\u043a\u0432\u0430"}}
self.assertEqual(expected, actual)
| Add tests for dumping string and unicode values | Add tests for dumping string and unicode values
| Python | bsd-2-clause | p/redis-dump-load,p/redis-dump-load,hyunchel/redis-dump-load,hyunchel/redis-dump-load |
23a0db627060afc3e1563d298c733edd8bb106a1 | src/ConfigLoader.py | src/ConfigLoader.py | import json
import sys
def load_config_file(out=sys.stdout):
default_filepath = "../resources/config/default-config.json"
user_filepath = "../resources/config/user-config.json"
try:
default_json = read_json(default_filepath)
user_json = read_json(user_filepath)
for property in user_json:
default_json[property] = user_json[property]
except FileNotFoundError as e:
out.write("Cannot find file: " + e.filename)
else:
out.write("Read styling config JSON correctly.")
return default_json
def read_json(filepath):
config_string = ''
with open(filepath) as f:
for line in f:
line = line.lstrip()
if not line.startswith("//"):
config_string += line
config_json = json.loads(config_string)
return config_json
if __name__ == "__main__":
load_config_file() | import json
import sys
def load_config_file(out=sys.stdout):
if sys.argv[0].endswith('nosetests'):
default_filepath = "./resources/config/default-config.json"
user_filepath = "./resources/config/user-config.json"
else:
default_filepath = "../resources/config/default-config.json"
user_filepath = "../resources/config/user-config.json"
try:
default_json = read_json(default_filepath)
user_json = read_json(user_filepath)
for property in user_json:
default_json[property] = user_json[property]
except FileNotFoundError as e:
out.write("Cannot find file: " + e.filename)
else:
out.write("Read styling config JSON correctly.")
return default_json
def read_json(filepath):
config_string = ''
with open(filepath) as f:
for line in f:
line = line.lstrip()
if not line.startswith("//"):
config_string += line
config_json = json.loads(config_string)
return config_json
if __name__ == "__main__":
load_config_file() | Fix nosetests for config file loading | Fix nosetests for config file loading
| Python | bsd-3-clause | sky-uk/bslint |
4fb39abc5afef5b0ca87e5c3b40e3dc9c9c0b2ef | tests/functions_tests/test_accuracy.py | tests/functions_tests/test_accuracy.py | import unittest
import numpy
import six
import chainer
from chainer import cuda
from chainer import gradient_check
from chainer.testing import attr
if cuda.available:
cuda.init()
class TestAccuracy(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (10, 3)).astype(numpy.float32)
self.t = numpy.random.randint(3, size=(10,)).astype(numpy.int32)
def check_forward(self, x_data, t_data):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
y = chainer.functions.accuracy(x, t)
count = 0
for i in six.moves.range(self.t.size):
pred = self.x[i].argmax()
if pred == self.t[i]:
count += 1
expected = float(count) / self.t.size
gradient_check.assert_allclose(expected, cuda.to_cpu(y.data))
def test_forward_cpu(self):
self.check_forward(self.x, self.t)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.t))
| import unittest
import numpy
import six
import chainer
from chainer import cuda
from chainer import gradient_check
from chainer.testing import attr
if cuda.available:
cuda.init()
class TestAccuracy(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (10, 3)).astype(numpy.float32)
self.t = numpy.random.randint(3, size=(10,)).astype(numpy.int32)
def check_forward(self, x_data, t_data):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
y = chainer.functions.accuracy(x, t)
self.assertEqual((), y.data.shape)
count = 0
for i in six.moves.range(self.t.size):
pred = self.x[i].argmax()
if pred == self.t[i]:
count += 1
expected = float(count) / self.t.size
gradient_check.assert_allclose(expected, cuda.to_cpu(y.data))
def test_forward_cpu(self):
self.check_forward(self.x, self.t)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.t))
| Add test fot shape of result of accuracy function | Add test fot shape of result of accuracy function
| Python | mit | elviswf/chainer,1986ks/chainer,cupy/cupy,ktnyt/chainer,hvy/chainer,tigerneil/chainer,rezoo/chainer,sou81821/chainer,tereka114/chainer,hvy/chainer,wavelets/chainer,jfsantos/chainer,chainer/chainer,ikasumi/chainer,okuta/chainer,aonotas/chainer,ysekky/chainer,kashif/chainer,ktnyt/chainer,niboshi/chainer,pfnet/chainer,ytoyama/yans_chainer_hackathon,cupy/cupy,jnishi/chainer,muupan/chainer,AlpacaDB/chainer,cemoody/chainer,chainer/chainer,anaruse/chainer,wkentaro/chainer,ronekko/chainer,kiyukuta/chainer,masia02/chainer,truongdq/chainer,kikusu/chainer,umitanuki/chainer,hidenori-t/chainer,cupy/cupy,t-abe/chainer,keisuke-umezawa/chainer,jnishi/chainer,jnishi/chainer,keisuke-umezawa/chainer,truongdq/chainer,jnishi/chainer,AlpacaDB/chainer,niboshi/chainer,laysakura/chainer,niboshi/chainer,okuta/chainer,benob/chainer,hvy/chainer,t-abe/chainer,keisuke-umezawa/chainer,ktnyt/chainer,kikusu/chainer,kuwa32/chainer,chainer/chainer,delta2323/chainer,wkentaro/chainer,woodshop/complex-chainer,tscohen/chainer,muupan/chainer,ktnyt/chainer,woodshop/chainer,wkentaro/chainer,bayerj/chainer,okuta/chainer,minhpqn/chainer,Kaisuke5/chainer,cupy/cupy,sinhrks/chainer,benob/chainer,okuta/chainer,wkentaro/chainer,hvy/chainer,tkerola/chainer,chainer/chainer,keisuke-umezawa/chainer,niboshi/chainer,sinhrks/chainer,yanweifu/chainer |
7d6e6325e0baf5f4994350a7dd52856db0cb6c8a | src/apps/processing/ala/management/commands/ala_import.py | src/apps/processing/ala/management/commands/ala_import.py | import logging
from django.core.management.base import BaseCommand
from apps.processing.ala.util import util
from dateutil.parser import parse
from datetime import date, timedelta
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Import data from ALA stations, optionally you can pass date. Otherwise it will fetch yesterday data'
def add_arguments(self, parser):
parser.add_argument('date', nargs='?', type=parse, default=None)
def handle(self, *args, **options):
stations = util.get_or_create_stations()
day = options['date']
if day is None:
day = date.today() - timedelta(2)
logger.info(
'Importing observations of {} ALA stations from {}.'.format(
len(stations),
day
)
)
try:
for station in stations:
util.load(station, day)
util.create_avgs(station, day)
except Exception as e:
self.stdout.write(self.style.ERROR(e))
| import logging
from django.core.management.base import BaseCommand
from apps.processing.ala.util import util
from dateutil.parser import parse
from dateutil import relativedelta
from datetime import date, timedelta
logger = logging.getLogger(__name__)
def parse_date_range(date_str):
if len(date_str) == 4:
day_from = parse(date_str).replace(day=1, month=1)
day_to = day_from + relativedelta.relativedelta(years=1)
elif len(date_str) == 7:
day_from = parse(date_str).replace(day=1)
day_to = day_from + relativedelta.relativedelta(months=1)
else:
day_from = parse(date_str)
day_to = day_from + timedelta(1)
return [day_from, day_to]
class Command(BaseCommand):
help = 'Import data from ALA stations. Optionally you can pass date, ' \
'otherwise it will fetch the day before yesterday data.'
def add_arguments(self, parser):
parser.add_argument('date_range', nargs='?', type=parse_date_range,
default=[None, None])
def handle(self, *args, **options):
stations = util.get_or_create_stations()
day_from, day_to = options['date_range']
if day_from is None:
day_from = date.today() - timedelta(2)
day_to = day_from + timedelta(1)
day = day_from
while(day < day_to):
logger.info(
'Importing observations of {} ALA stations from {}.'.format(
len(stations),
day
)
)
for station in stations:
util.load(station, day)
util.create_avgs(station, day)
day += timedelta(1)
| Enable ALA import by month or year | Enable ALA import by month or year
| Python | bsd-3-clause | gis4dis/poster,gis4dis/poster,gis4dis/poster |
830a767aa42cfafc22342ff71c23419c86845fe8 | src/Classes/SubClass/SubClass.py | src/Classes/SubClass/SubClass.py | class defaultDict(dict):
def __init__(self, default=None):
dict.__init__(self)
self.default = default
def __getitem__(self, key):
try:
return dict.__getitem____(self, key)
except KeyError:
return self.default | class defaultDict(dict):
def __init__(self, default=None):
dict.__init__(self)
self.default = default
def __getitem__(self, key):
if(key in self):
return dict.__getitem__(self, key)
else:
return self.default
def get(self, key, *args):
if not args:
args = (self.default,)
return dict.get(self, key, *args)
def merge(self, other):
for key in other:
if key not in self:
self[key] = other[key]
if __name__ == '__main__':
'''
Built-in Dict has a issue.
You want solve the problem,
when dict doesn't declare variable that you call the variable will throw the "KeyError"
'''
print defaultDict
print type(defaultDict)
d = defaultDict(default = 5) #Init
print d
print type(d) # Get Type
print d.__class__ # Get Class
print type(d) is d.__class__
d[1] = 3
print d
print d[0]
d.merge({1:300, 2:400})
print d
print "-----"
built_dict = dict()
built_dict[1] = 3
print built_dict
print built_dict[0]
| Add get and merge method. Add Main Function to use class. | [Add] Add get and merge method.
Add Main Function to use class.
| Python | mit | williamHuang5468/ExpertPython |
ad2b8bce02c7b7b7ab477fc7fccc1b38fb60e63a | turbustat/statistics/input_base.py | turbustat/statistics/input_base.py | # Licensed under an MIT open source license - see LICENSE
from astropy.io.fits import PrimaryHDU
from spectral_cube import SpectralCube
from spectral_cube.lower_dimensional_structures import LowerDimensionalObject
def input_data(data):
'''
Accept a variety of input data forms and return those expected by the
various statistics.
'''
if isinstance(data, PrimaryHDU):
return (data.data, data.header)
elif isinstance(data, SpectralCube):
return (data.filled_data[:].value, data.header)
elif isinstance(data, LowerDimensionalObject):
return (data.value, data.header)
elif isinstance(data, tuple) or isinstance(data, list):
if len(data) != 2:
raise TypeError("Must have two items: data and the header.")
return data
else:
raise TypeError("Input data is not of an accepted form:"
" astropy.io.fits.PrimaryHDU, SpectralCube,"
" spectral_cube.LowerDimensionalObject or a tuple or"
" list containing the data and header, in that order.")
| # Licensed under an MIT open source license - see LICENSE
from astropy.io.fits import PrimaryHDU
from spectral_cube import SpectralCube
from spectral_cube.lower_dimensional_structures import LowerDimensionalObject
import numpy as np
def input_data(data, no_header=False):
'''
Accept a variety of input data forms and return those expected by the
various statistics.
Parameters
----------
data : astropy.io.fits.PrimaryHDU, SpectralCube,
spectral_cube.LowerDimensionalObject, np.ndarray or a tuple/list
with the data and the header
Data to be used with a given statistic or distance metric. no_header
must be enabled when passing only an array in.
no_header : bool, optional
When enabled, returns only the data without the header.
Returns
-------
ouput_data : tuple or np.ndarray
A tuple containing the data and the header. Or an array when no_header
is enabled.
'''
if isinstance(data, PrimaryHDU):
output_data = (data.data, data.header)
elif isinstance(data, SpectralCube):
output_data = (data.filled_data[:].value, data.header)
elif isinstance(data, LowerDimensionalObject):
output_data = (data.value, data.header)
elif isinstance(data, tuple) or isinstance(data, list):
if len(data) != 2:
raise TypeError("Must have two items: data and the header.")
output_data = data
elif isinstance(data, np.ndarray):
if not no_header:
raise TypeError("no_header must be enabled when giving data"
" without a header.")
output_data = (data, )
else:
raise TypeError("Input data is not of an accepted form:"
" astropy.io.fits.PrimaryHDU, SpectralCube,"
" spectral_cube.LowerDimensionalObject or a tuple or"
" list containing the data and header, in that order.")
if no_header:
return output_data[0]
return output_data
| Allow for the case when a header isn't needed | Allow for the case when a header isn't needed
| Python | mit | Astroua/TurbuStat,e-koch/TurbuStat |
2ef4362be90e2314b69a2ff17ccb5d25ef8905fd | rackspace/database/database_service.py | rackspace/database/database_service.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import service_filter
class DatabaseService(service_filter.ServiceFilter):
"""The database service."""
valid_versions = [service_filter.ValidVersion('v1', path='v1.0')]
def __init__(self, version=None):
"""Create a database service."""
super(DatabaseService, self).__init__(service_type="rax:database",
service_name="cloudDatabases",
version=version)
| # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from openstack import service_filter
class DatabaseService(service_filter.ServiceFilter):
"""The database service."""
valid_versions = [service_filter.ValidVersion('v1', path='v1.0')]
def __init__(self, version=None):
"""Create a database service."""
if not version:
version = "v1"
super(DatabaseService, self).__init__(service_type="rax:database",
service_name="cloudDatabases",
version=version)
| Set default version for cloud databases. | Set default version for cloud databases.
| Python | apache-2.0 | rackerlabs/rackspace-sdk-plugin,briancurtin/rackspace-sdk-plugin |
a307bddf490b6ad16739593d4ca51693b9a340fe | regulations/templatetags/in_context.py | regulations/templatetags/in_context.py | from django import template
register = template.Library()
class InContextNode(template.Node):
def __init__(self, nodelist, subcontext_names):
self.nodelist = nodelist
self.subcontext_names = subcontext_names
def render(self, context):
new_context = {}
for field in self.subcontext_names:
value = context.get(field, {})
if isinstance(value, dict):
new_context.update(context.get(field, {}))
else:
new_context[field] = value
return self.nodelist.render(template.Context(new_context))
@register.tag('begincontext')
def in_context(parser, token):
"""
Replaces the context (inside of this block) for easy (and safe) inclusion
of sub-content.
For example, if the context is {'name': 'Kitty', 'sub': {'size': 5}}
1: {{ name }} {{ size }}
{% begincontext sub %}
2: {{ name }} {{ size }}
{% endcontext %}
3: {{ name }} {{ size }}
Will print
1: Kitty
2: 5
3: Kitty
Arguments which are not dictionaries will 'cascade' into the inner
context.
"""
nodelist = parser.parse(('endcontext',))
parser.delete_first_token()
return InContextNode(nodelist, token.split_contents()[1:])
| from django import template
register = template.Library()
class InContextNode(template.Node):
def __init__(self, nodelist, subcontext_names):
self.nodelist = nodelist
self.subcontext_names = subcontext_names
def render(self, context):
new_context = {}
for field in self.subcontext_names:
value = context.get(field, {})
if isinstance(value, dict):
new_context.update(context.get(field, {}))
else:
new_context[field] = value
new_context = context.new(new_context)
return self.nodelist.render(new_context)
@register.tag('begincontext')
def in_context(parser, token):
"""
Replaces the context (inside of this block) for easy (and safe) inclusion
of sub-content.
For example, if the context is {'name': 'Kitty', 'sub': {'size': 5}}
1: {{ name }} {{ size }}
{% begincontext sub %}
2: {{ name }} {{ size }}
{% endcontext %}
3: {{ name }} {{ size }}
Will print
1: Kitty
2: 5
3: Kitty
Arguments which are not dictionaries will 'cascade' into the inner
context.
"""
nodelist = parser.parse(('endcontext',))
parser.delete_first_token()
return InContextNode(nodelist, token.split_contents()[1:])
| Fix custom template tag to work with django 1.8 | Fix custom template tag to work with django 1.8
| Python | cc0-1.0 | 18F/regulations-site,jeremiak/regulations-site,18F/regulations-site,18F/regulations-site,eregs/regulations-site,eregs/regulations-site,tadhg-ohiggins/regulations-site,eregs/regulations-site,tadhg-ohiggins/regulations-site,tadhg-ohiggins/regulations-site,18F/regulations-site,jeremiak/regulations-site,jeremiak/regulations-site,tadhg-ohiggins/regulations-site,eregs/regulations-site,jeremiak/regulations-site |
4a6f76857a626dd756675a4fe1dd3660cf63d8b7 | alg_fibonacci.py | alg_fibonacci.py | """Fibonacci series:
0, 1, 1, 2, 3, 5, 8,...
- Fib(0) = 0
- Fib(1) = 1
- Fib(n) = Fib(n - 1) + Fib(n - 2)
"""
from __future__ import print_function
import time
def fibonacci(n):
"""Get nth number of Fibonacci series by recursion."""
if n == 0:
return 0
elif n == 1 or n == 2:
return 1
else:
return fibonacci(n - 1) + fibonacci(n - 2)
def main():
n = 13
print('{}th number of Fibonacci series: {}'
.format(n, fibonacci(n)))
if __name__ == '__main__':
main()
| """Fibonacci series:
0, 1, 1, 2, 3, 5, 8,...
- Fib(0) = 0
- Fib(1) = 1
- Fib(n) = Fib(n - 1) + Fib(n - 2)
"""
from __future__ import print_function
def fibonacci(n):
"""Get nth number of Fibonacci series by recursion."""
if n == 0:
return 0
elif n == 1 or n == 2:
return 1
else:
return fibonacci(n - 1) + fibonacci(n - 2)
def main():
import time
n = 13
print('{}th number of Fibonacci series: {}'
.format(n, fibonacci(n)))
if __name__ == '__main__':
main()
| Move module time to main() | Move module time to main()
| Python | bsd-2-clause | bowen0701/algorithms_data_structures |
ea5fd30a583016b4dc858848e28168ada74deca3 | blaze/py3help.py | blaze/py3help.py | import sys
import itertools
PY3 = sys.version_info[:2] >= (3,0)
if PY3:
def dict_iteritems(d):
return d.items().__iter__()
xrange = range
_inttypes = (int,)
_strtypes = (str,)
unicode = str
imap = map
basestring = str
import urllib
urlparse = urllib.parse
else:
import __builtin__
def dict_iteritems(d):
return d.iteritems()
xrange = __builtin__.xrange
unicode = __builtin__.unicode
basestring = __builtin__.basestring
_strtypes = (str, unicode)
_inttypes = (int, long)
imap = itertools.imap
import urlparse
| import sys
import itertools
PY3 = sys.version_info[:2] >= (3,0)
if PY3:
def dict_iteritems(d):
return d.items().__iter__()
xrange = range
_inttypes = (int,)
_strtypes = (str,)
unicode = str
imap = map
basestring = str
import urllib.parse as urlparse
else:
import __builtin__
def dict_iteritems(d):
return d.iteritems()
xrange = __builtin__.xrange
unicode = __builtin__.unicode
basestring = __builtin__.basestring
_strtypes = (str, unicode)
_inttypes = (int, long)
imap = itertools.imap
import urlparse
| Check in fix for py3k and urlparse. | Check in fix for py3k and urlparse.
| Python | bsd-3-clause | scls19fr/blaze,markflorisson/blaze-core,ChinaQuants/blaze,jdmcbr/blaze,nkhuyu/blaze,cpcloud/blaze,LiaoPan/blaze,FrancescAlted/blaze,cowlicks/blaze,FrancescAlted/blaze,mwiebe/blaze,markflorisson/blaze-core,mwiebe/blaze,ContinuumIO/blaze,FrancescAlted/blaze,aterrel/blaze,AbhiAgarwal/blaze,jdmcbr/blaze,aterrel/blaze,ChinaQuants/blaze,mwiebe/blaze,mrocklin/blaze,jcrist/blaze,AbhiAgarwal/blaze,mwiebe/blaze,nkhuyu/blaze,alexmojaki/blaze,AbhiAgarwal/blaze,markflorisson/blaze-core,cowlicks/blaze,dwillmer/blaze,cpcloud/blaze,mrocklin/blaze,xlhtc007/blaze,maxalbert/blaze,alexmojaki/blaze,aterrel/blaze,caseyclements/blaze,FrancescAlted/blaze,caseyclements/blaze,xlhtc007/blaze,dwillmer/blaze,ContinuumIO/blaze,markflorisson/blaze-core,AbhiAgarwal/blaze,scls19fr/blaze,maxalbert/blaze,jcrist/blaze,LiaoPan/blaze |
05b2848849553172873600ffd6344fc2b1f12d8e | example/__init__.py | example/__init__.py | from pupa.scrape import Jurisdiction
from .people import PersonScraper
class Example(Jurisdiction):
jurisdiction_id = 'ex'
def get_metadata(self):
return {
'name': 'Example',
'legislature_name': 'Example Legislature',
'legislature_url': 'http://example.com',
'terms': [{
'name': '2013-2014',
'sessions': ['2013'],
'start_year': 2013,
'end_year': 2014
}],
'provides': ['people'],
'parties': [
{'name': 'Independent' },
{'name': 'Green' },
{'name': 'Bull-Moose'}
],
'session_details': {
'2013': {'_scraped_name': '2013'}
},
'feature_flags': [],
}
def get_scraper(self, term, session, scraper_type):
if scraper_type == 'people':
return PersonScraper
def scrape_session_list(self):
return ['2013']
| from pupa.scrape import Jurisdiction
from .people import PersonScraper
class Example(Jurisdiction):
jurisdiction_id = 'ocd-jurisdiction/country:us/state:ex/place:example'
def get_metadata(self):
return {
'name': 'Example',
'legislature_name': 'Example Legislature',
'legislature_url': 'http://example.com',
'terms': [{
'name': '2013-2014',
'sessions': ['2013'],
'start_year': 2013,
'end_year': 2014
}],
'provides': ['people'],
'parties': [
{'name': 'Independent' },
{'name': 'Green' },
{'name': 'Bull-Moose'}
],
'session_details': {
'2013': {'_scraped_name': '2013'}
},
'feature_flags': [],
}
def get_scraper(self, term, session, scraper_type):
if scraper_type == 'people':
return PersonScraper
def scrape_session_list(self):
return ['2013']
| Substitute a more realistic jurisdiction_id | Substitute a more realistic jurisdiction_id
| Python | bsd-3-clause | datamade/pupa,mileswwatkins/pupa,rshorey/pupa,opencivicdata/pupa,mileswwatkins/pupa,influence-usa/pupa,datamade/pupa,influence-usa/pupa,rshorey/pupa,opencivicdata/pupa |
3298fff0ded49c21897a7387a7f3093c351ae04f | scripts/run_psql.py | scripts/run_psql.py | #!/usr/bin/env python
# Copyright (C) 2011 Lukas Lalinsky
# Distributed under the MIT license, see the LICENSE file for details.
from acoustid.script import run_script
import subprocess
def main(script, opts, args):
subprocess.call(['psql'] + script.config.database.create_psql_args())
run_script(main)
| #!/usr/bin/env python
# Copyright (C) 2011 Lukas Lalinsky
# Distributed under the MIT license, see the LICENSE file for details.
from acoustid.script import run_script
import os
def main(script, opts, args):
os.execlp('psql', 'psql', *script.config.database.create_psql_args())
run_script(main)
| Use os.exelp to launch psql | Use os.exelp to launch psql
| Python | mit | lalinsky/acoustid-server,lalinsky/acoustid-server,lalinsky/acoustid-server,lalinsky/acoustid-server |
e10f2b85775412dd60f11deea6e7a7f8b84edfbe | buysafe/urls.py | buysafe/urls.py | from django.conf.urls import patterns
urlpatterns = patterns(
'buysafe.views',
(r'^entry/(?P<order_id>\d+)/$', 'entry'),
(r'^start/$', 'start'),
(r'^success/(?P<payment_type>[01])/$', 'success'),
(r'^fail/(?P<payment_type>[01])/$', 'fail'),
(r'^check/(?P<payment_type>[01])/$', 'check')
)
| from django.conf.urls import patterns, url
urlpatterns = patterns(
'buysafe.views',
url(r'^entry/(?P<order_id>\d+)/$', 'entry', name='buysafe_pay'),
(r'^start/$', 'start'),
(r'^success/(?P<payment_type>[01])/$', 'success'),
(r'^fail/(?P<payment_type>[01])/$', 'fail'),
(r'^check/(?P<payment_type>[01])/$', 'check')
)
| Add name for entry view URL | Add name for entry view URL
| Python | bsd-3-clause | uranusjr/django-buysafe |
202a9b2ad72ffe4ab5981f9a4a886e0a36808eb6 | tests/sentry/utils/json/tests.py | tests/sentry/utils/json/tests.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import datetime
import uuid
from sentry.utils import json
from sentry.testutils import TestCase
class JSONTest(TestCase):
def test_uuid(self):
res = uuid.uuid4()
self.assertEquals(json.dumps(res), '"%s"' % res.hex)
def test_datetime(self):
res = datetime.datetime(day=1, month=1, year=2011, hour=1, minute=1, second=1)
self.assertEquals(json.dumps(res), '"2011-01-01T01:01:01.000000Z"')
def test_set(self):
res = set(['foo', 'bar'])
self.assertEquals(json.dumps(res), '["foo","bar"]')
def test_frozenset(self):
res = frozenset(['foo', 'bar'])
self.assertEquals(json.dumps(res), '["foo","bar"]')
def test_escape(self):
res = '<script>alert(1);</script>'
assert json.dumps(res) == '"<script>alert(1);</script>"'
assert json.dumps(res, escape=True) == '"<script>alert(1);<\/script>"'
| # -*- coding: utf-8 -*-
from __future__ import absolute_import
import datetime
import uuid
from sentry.utils import json
from sentry.testutils import TestCase
class JSONTest(TestCase):
def test_uuid(self):
res = uuid.uuid4()
self.assertEquals(json.dumps(res), '"%s"' % res.hex)
def test_datetime(self):
res = datetime.datetime(day=1, month=1, year=2011, hour=1, minute=1, second=1)
self.assertEquals(json.dumps(res), '"2011-01-01T01:01:01.000000Z"')
def test_set(self):
res = set(['foo', 'bar'])
self.assertEquals(json.dumps(res), '["foo","bar"]')
def test_frozenset(self):
res = frozenset(['foo', 'bar'])
self.assertEquals(json.dumps(res), '["foo","bar"]')
def test_escape(self):
res = '<script>alert(1);</script>'
assert json.dumps(res) == '"<script>alert(1);</script>"'
assert json.dumps(res, escape=True) == '"<script>alert(1);<\/script>"'
def test_inf(self):
res = float('inf')
self.assertEquals(json.dumps(res), 'null')
| Add test for handling Infinity in json | Add test for handling Infinity in json
| Python | bsd-3-clause | mitsuhiko/sentry,ifduyue/sentry,looker/sentry,mvaled/sentry,gencer/sentry,mvaled/sentry,looker/sentry,zenefits/sentry,korealerts1/sentry,zenefits/sentry,Kryz/sentry,kevinlondon/sentry,alexm92/sentry,JamesMura/sentry,mvaled/sentry,kevinlondon/sentry,looker/sentry,beeftornado/sentry,BuildingLink/sentry,ifduyue/sentry,felixbuenemann/sentry,looker/sentry,JamesMura/sentry,ngonzalvez/sentry,zenefits/sentry,gencer/sentry,jean/sentry,fotinakis/sentry,gencer/sentry,daevaorn/sentry,Kryz/sentry,jean/sentry,korealerts1/sentry,BuildingLink/sentry,fotinakis/sentry,imankulov/sentry,daevaorn/sentry,felixbuenemann/sentry,BayanGroup/sentry,beeftornado/sentry,alexm92/sentry,nicholasserra/sentry,kevinlondon/sentry,mitsuhiko/sentry,JackDanger/sentry,felixbuenemann/sentry,mvaled/sentry,ngonzalvez/sentry,Natim/sentry,BuildingLink/sentry,nicholasserra/sentry,JackDanger/sentry,JamesMura/sentry,daevaorn/sentry,JamesMura/sentry,mvaled/sentry,imankulov/sentry,fotinakis/sentry,jean/sentry,BuildingLink/sentry,Natim/sentry,BayanGroup/sentry,BuildingLink/sentry,mvaled/sentry,ifduyue/sentry,imankulov/sentry,JackDanger/sentry,gencer/sentry,daevaorn/sentry,ifduyue/sentry,gencer/sentry,fotinakis/sentry,jean/sentry,BayanGroup/sentry,zenefits/sentry,Kryz/sentry,ngonzalvez/sentry,korealerts1/sentry,zenefits/sentry,Natim/sentry,ifduyue/sentry,jean/sentry,looker/sentry,alexm92/sentry,beeftornado/sentry,nicholasserra/sentry,JamesMura/sentry |
27fec389928c93ba0efe4ca1e4c5b34c3b73fa2c | snippet/__init__.py | snippet/__init__.py | """
"cms.plugins.snippet" (from djangocms) clone to extend it with some facilities
""" | """
"cms.plugins.snippet" (from djangocms) clone to extend it with some facilities
"""
__version__ = '2.3.6.1'
| Add a version base on djangocms version from where the code was cloned | Add a version base on djangocms version from where the code was cloned
| Python | bsd-3-clause | emencia/emencia-cms-snippet,emencia/emencia-cms-snippet,emencia/emencia-cms-snippet |
de22e547c57be633cb32d51e93a6efe9c9e90293 | src/helper_threading.py | src/helper_threading.py | import threading
try:
import prctl
def set_thread_name(name): prctl.set_name(name)
def _thread_name_hack(self):
set_thread_name(self.name)
threading.Thread.__bootstrap_original__(self)
threading.Thread.__bootstrap_original__ = threading.Thread._Thread__bootstrap
threading.Thread._Thread__bootstrap = _thread_name_hack
except ImportError:
log('WARN: prctl module is not installed. You will not be able to see thread names')
def set_thread_name(name): pass
class StoppableThread(object):
def initStop(self):
self.stop = threading.Event()
self._stopped = False
def stopThread(self):
self._stopped = True
self.stop.set()
| import threading
try:
import prctl
def set_thread_name(name): prctl.set_name(name)
def _thread_name_hack(self):
set_thread_name(self.name)
threading.Thread.__bootstrap_original__(self)
threading.Thread.__bootstrap_original__ = threading.Thread._Thread__bootstrap
threading.Thread._Thread__bootstrap = _thread_name_hack
except ImportError:
def set_thread_name(name): pass
class StoppableThread(object):
def initStop(self):
self.stop = threading.Event()
self._stopped = False
def stopThread(self):
self._stopped = True
self.stop.set()
| Remove buggy log message if prctl is missing | Remove buggy log message if prctl is missing
- it's not that important that you need to be informed of it, and
importing logging may cause cyclic dependencies/other problems
| Python | mit | hb9kns/PyBitmessage,hb9kns/PyBitmessage,hb9kns/PyBitmessage,hb9kns/PyBitmessage |
a25920e3549933e4c6c158cc9490f3eaa883ec66 | Lib/test/test_heapq.py | Lib/test/test_heapq.py | """Unittests for heapq."""
from test.test_support import verify, vereq, verbose, TestFailed
from heapq import heappush, heappop
import random
def check_invariant(heap):
# Check the heap invariant.
for pos, item in enumerate(heap):
parentpos = ((pos+1) >> 1) - 1
if parentpos >= 0:
verify(heap[parentpos] <= item)
def test_main():
# 1) Push 100 random numbers and pop them off, verifying all's OK.
heap = []
data = []
check_invariant(heap)
for i in range(256):
item = random.random()
data.append(item)
heappush(heap, item)
check_invariant(heap)
results = []
while heap:
item = heappop(heap)
check_invariant(heap)
results.append(item)
data_sorted = data[:]
data_sorted.sort()
vereq(data_sorted, results)
# 2) Check that the invariant holds for a sorted array
check_invariant(results)
# 3) Naive "N-best" algorithm
heap = []
for item in data:
heappush(heap, item)
if len(heap) > 10:
heappop(heap)
heap.sort()
vereq(heap, data_sorted[-10:])
# Make user happy
if verbose:
print "All OK"
if __name__ == "__main__":
test_main()
| """Unittests for heapq."""
from test.test_support import verify, vereq, verbose, TestFailed
from heapq import heappush, heappop
import random
def check_invariant(heap):
# Check the heap invariant.
for pos, item in enumerate(heap):
if pos: # pos 0 has no parent
parentpos = (pos-1) >> 1
verify(heap[parentpos] <= item)
def test_main():
# 1) Push 100 random numbers and pop them off, verifying all's OK.
heap = []
data = []
check_invariant(heap)
for i in range(256):
item = random.random()
data.append(item)
heappush(heap, item)
check_invariant(heap)
results = []
while heap:
item = heappop(heap)
check_invariant(heap)
results.append(item)
data_sorted = data[:]
data_sorted.sort()
vereq(data_sorted, results)
# 2) Check that the invariant holds for a sorted array
check_invariant(results)
# 3) Naive "N-best" algorithm
heap = []
for item in data:
heappush(heap, item)
if len(heap) > 10:
heappop(heap)
heap.sort()
vereq(heap, data_sorted[-10:])
# Make user happy
if verbose:
print "All OK"
if __name__ == "__main__":
test_main()
| Use the same child->parent "formula" used by heapq.py. | check_invariant(): Use the same child->parent "formula" used by heapq.py.
| Python | mit | sk-/python2.7-type-annotator,sk-/python2.7-type-annotator,sk-/python2.7-type-annotator |
b12b4f47d3cd701506380c914e45b9958490392c | functional_tests.py | functional_tests.py | from selenium import webdriver
browser = webdriver.Firefox()
browser.get('http://localhost:8000')
assert 'Django' in browser.title
| from selenium import webdriver
import unittest
class NewVisitorTest(unittest.TestCase):
def setUp(self):
self.browser = webdriver.Firefox()
def tearDown(self):
self.browser.quit()
def test_can_start_a_list_and_retrieve_it_later(self):
# Edith has heard about a cool new onlien to-do app. She goes
# to check out its homepage
self.browser.get('http://localhost:8000')
# She notices the page title and header mention to-do lists
self.assertIn('To-Do', self.browser.title)
self.fail('Finish the test!')
# She is invited to enter a to-do item straight away
# Edith has heard about a cool new online to-do app. She goes
# to check out its homepage
# She notices the page title and header mention to-do lists
# She is invited to enter a to-do item straight away
# She types "Buy peacock feathers" into a text box (Edith's hobby
# is tying fly-fishing lures)
# When she hits enter, the page updates, and now the page lists
# "1: Buy peacock feathers" as an itme in a to-do list
# There is still a text box inviting her to add another item. She
# enters "Use peacock feathers to make a fly" (Edith is very methodical)
# The page updates again, and now shows both updates on her list
# Edith wonders whether the site will remember her list. Then she sees
# that the site has generated a unique URL for her -- there is some
# explanatory text to that effect.
# She visits that URL - her to-do list is still there.
# Satisfied, she goes back to sleep
if __name__ == '__main__':
unittest.main(warnings='ignore')
| Add FT specs and use unittest. | Add FT specs and use unittest.
| Python | mit | ejpreciado/superlists,ejpreciado/superlists,ejpreciado/superlists |
d98e33d28105c8180b591e3097af83e42599bfc5 | django_local_apps/management/commands/docker_exec.py | django_local_apps/management/commands/docker_exec.py | import logging
import docker
from djangoautoconf.cmd_handler_base.msg_process_cmd_base import DjangoCmdBase
log = logging.getLogger()
class DockerExecutor(DjangoCmdBase):
def add_arguments(self, parser):
# Positional arguments
"""
:param: in the args it could be: /usr/local/bin/python /home/richard/codes/django-dev-server/manage.py help
NO need to add '"' as "/usr/local/bin/python /home/richard/codes/django-dev-server/manage.py help"
:return:
"""
parser.add_argument('--container_id', nargs=1)
parser.add_argument('--work_dir', nargs='?', default=None)
parser.add_argument('path_and_params', nargs='+')
def msg_loop(self):
print(self.options["path_and_params"])
client = docker.from_env()
container = client.containers.get(self.options["container_id"][0])
print(container.exec_run(" ".join(self.options["path_and_params"]), workdir=self.options["work_dir"]))
Command = DockerExecutor
| import logging
import docker
from djangoautoconf.cmd_handler_base.msg_process_cmd_base import DjangoCmdBase
log = logging.getLogger()
class DockerExecutor(DjangoCmdBase):
def add_arguments(self, parser):
# Positional arguments
"""
:param: in the args it could be: /usr/local/bin/python /home/richard/codes/django-dev-server/manage.py help
NO need to add '"' as "/usr/local/bin/python /home/richard/codes/django-dev-server/manage.py help"
:return:
"""
# for using with chronograph, do not use nargs param, because chronograph seems do not support passing
# array, but using nargs will generate a list for the parameters
parser.add_argument('-c', '--container_id')
parser.add_argument('-w', '--work_dir', default=None)
parser.add_argument('path_and_params', nargs='+')
def msg_loop(self):
print(self.options["container_id"])
print(self.options["work_dir"])
print(self.options["path_and_params"])
client = docker.from_env()
container = client.containers.get(self.options["container_id"])
print(container.exec_run(" ".join(self.options["path_and_params"]), workdir=(self.options["work_dir"])))
Command = DockerExecutor
| Remove the usage of nargs to avoid different behaviors between chronograph and command line. | Remove the usage of nargs to avoid different behaviors between chronograph and command line.
| Python | bsd-3-clause | weijia/django-local-apps,weijia/django-local-apps |
36e7af17c8d7c4bdb9cfd20387c470697e1bac96 | po/build-babel.py | po/build-babel.py | import subprocess
from pathlib import Path
po_path: Path = Path(__file__).resolve().parent
def run_babel(command: str, input: Path, output_file: Path, locale: str):
subprocess.run(
[
"pybabel",
command,
f"--input={str(input)}",
f"--output-file={str(output_file)}",
f"--locale={str(locale)}",
"--domain=gaphor",
]
)
def update_po_files():
pot_path = po_path / "gaphor.pot"
for path in (path for path in po_path.iterdir() if path.suffix == ".po"):
run_babel("update", pot_path, path, path.stem)
def compile_mo_files():
for path in (path for path in po_path.iterdir() if path.suffix == ".po"):
mo_path = po_path.parent / "locale" / path.stem / "LC_MESSAGES" / "gaphor.mo"
mo_path.parent.mkdir(parents=True, exist_ok=True)
run_babel("compile", path, mo_path, path.stem)
if __name__ == "__main__":
update_po_files()
compile_mo_files()
| import subprocess
from pathlib import Path
po_path: Path = Path(__file__).resolve().parent
def run_babel(command: str, input: Path, output_file: Path, locale: str):
subprocess.run(
[
"pybabel",
command,
f"--input={input}",
f"--output-file={output_file}",
f"--locale={locale}",
"--domain=gaphor",
]
)
def update_po_files():
pot_path = po_path / "gaphor.pot"
for path in (path for path in po_path.iterdir() if path.suffix == ".po"):
run_babel("update", pot_path, path, path.stem)
def compile_mo_files():
for path in (path for path in po_path.iterdir() if path.suffix == ".po"):
mo_path = po_path.parent / "locale" / path.stem / "LC_MESSAGES" / "gaphor.mo"
mo_path.parent.mkdir(parents=True, exist_ok=True)
run_babel("compile", path, mo_path, path.stem)
if __name__ == "__main__":
update_po_files()
compile_mo_files()
| Simplify f-string to remove cast | Simplify f-string to remove cast
| Python | lgpl-2.1 | amolenaar/gaphor,amolenaar/gaphor |
7cee7edabad08b01ceda0ed8f2798ebf47c87e95 | src/pycontw2016/urls.py | src/pycontw2016/urls.py | from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from core.views import flat_page
from users.views import user_dashboard
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name="index.html"), name='index'),
url(r'^dashboard/$', user_dashboard, name='user_dashboard'),
url(r'^accounts/', include('users.urls')),
url(r'^proposals/', include('proposals.urls')),
url(r'^admin/', include(admin.site.urls)),
]
if settings.URL_PREFIX:
urlpatterns = [
url(r'^{prefix}'.format(prefix=settings.URL_PREFIX),
include(urlpatterns)),
]
# User-uploaded files like profile pics need to be served in development
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# Catch-all URL pattern must be put last.
urlpatterns += [
url(r'^(?P<path>.+)/$', flat_page, name='page'),
]
| from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView
from core.views import flat_page
from users.views import user_dashboard
urlpatterns = [
url(r'^$', TemplateView.as_view(template_name="index.html"), name='index'),
url(r'^dashboard/$', user_dashboard, name='user_dashboard'),
url(r'^accounts/', include('users.urls')),
url(r'^proposals/', include('proposals.urls')),
url(r'^admin/', include(admin.site.urls)),
]
if settings.URL_PREFIX:
urlpatterns = [
url(r'^{prefix}'.format(prefix=settings.URL_PREFIX),
include(urlpatterns)),
]
# User-uploaded files like profile pics need to be served in development
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
# Catch-all URL pattern must be put last.
if settings.URL_PREFIX:
urlpatterns += [
url(r'^{prefix}(?P<path>.+)/$'.format(prefix=settings.URL_PREFIX),
flat_page, name='page'),
]
else:
urlpatterns += [url(r'^(?P<path>.+)/$', flat_page, name='page')]
| Fix catch-all URLs with prefix | Fix catch-all URLs with prefix
| Python | mit | uranusjr/pycontw2016,pycontw/pycontw2016,pycontw/pycontw2016,pycontw/pycontw2016,uranusjr/pycontw2016,pycontw/pycontw2016,uranusjr/pycontw2016,uranusjr/pycontw2016 |
e2cbd73218e6f5cf5b86718f6adebd92e9fee2a3 | geotrek/trekking/tests/test_filters.py | geotrek/trekking/tests/test_filters.py | from geotrek.land.tests.test_filters import LandFiltersTest
from geotrek.trekking.filters import TrekFilterSet
from geotrek.trekking.factories import TrekFactory
class TrekFilterLandTest(LandFiltersTest):
filterclass = TrekFilterSet
def test_land_filters_are_well_setup(self):
filterset = TrekFilterSet()
self.assertIn('work', filterset.filters)
def create_pair_of_distinct_path(self):
useless_path, seek_path = super(TrekFilterLandTest, self).create_pair_of_distinct_path()
self.create_pair_of_distinct_topologies(TrekFactory, useless_path, seek_path)
return useless_path, seek_path
| # Make sure land filters are set up when testing
from geotrek.land.filters import * # NOQA
from geotrek.land.tests.test_filters import LandFiltersTest
from geotrek.trekking.filters import TrekFilterSet
from geotrek.trekking.factories import TrekFactory
class TrekFilterLandTest(LandFiltersTest):
filterclass = TrekFilterSet
def test_land_filters_are_well_setup(self):
filterset = TrekFilterSet()
self.assertIn('work', filterset.filters)
def create_pair_of_distinct_path(self):
useless_path, seek_path = super(TrekFilterLandTest, self).create_pair_of_distinct_path()
self.create_pair_of_distinct_topologies(TrekFactory, useless_path, seek_path)
return useless_path, seek_path
| Make sure land filters are set up when testing | Make sure land filters are set up when testing
| Python | bsd-2-clause | GeotrekCE/Geotrek-admin,johan--/Geotrek,johan--/Geotrek,GeotrekCE/Geotrek-admin,johan--/Geotrek,johan--/Geotrek,Anaethelion/Geotrek,mabhub/Geotrek,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,mabhub/Geotrek,Anaethelion/Geotrek,mabhub/Geotrek,makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,Anaethelion/Geotrek,makinacorpus/Geotrek,makinacorpus/Geotrek,mabhub/Geotrek,Anaethelion/Geotrek |
c1b0cfe9fdfbacf71ffbba45b4a8f7efe3fe36a7 | docker/dev_wrong_port_warning.py | docker/dev_wrong_port_warning.py | #!/usr/bin/env python
"""
Per kobotoolbox/kobo-docker#301, we have changed the uWSGI port to 8001. This
provides a helpful message to anyone still trying to use port 8000
"""
import BaseHTTPServer
import sys
class Handler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(503)
self.end_headers()
self.wfile.write(
'Your development environment is trying to connect to the KoBoCAT '
'container on port 8000 instead of 8001. Please change this. See '
'https://github.com/kobotoolbox/kobo-docker/issues/301 '
'for more details.'
)
server_address = ('', int(sys.argv[1]))
httpd = BaseHTTPServer.HTTPServer(server_address, Handler)
httpd.serve_forever()
| #!/usr/bin/env python
"""
Per kobotoolbox/kobo-docker#301, we have changed the uWSGI port to 8001. This
provides a helpful message to anyone still trying to use port 8000
"""
import sys
from http.server import BaseHTTPRequestHandler, HTTPServer
class Handler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(503)
self.end_headers()
self.wfile.write(
b'Your development environment is trying to connect to the KoBoCAT '
b'container on port 8000 instead of 8001. Please change this. See '
b'https://github.com/kobotoolbox/kobo-docker/issues/301 '
b'for more details.'
)
server_address = ('', int(sys.argv[1]))
httpd = HTTPServer(server_address, Handler)
httpd.serve_forever()
| Update wront port script to Python3 | Update wront port script to Python3
| Python | bsd-2-clause | kobotoolbox/kobocat,kobotoolbox/kobocat,kobotoolbox/kobocat,kobotoolbox/kobocat |
fee10efeae410a0bc51842877ef8ffb5fe8b97af | src/file_dialogs.py | src/file_dialogs.py | #!/usr/bin/env python
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import message_loop
def open_file():
message_loop.init_main_loop()
if message_loop.is_gtk:
raise Exception("not implemented")
else:
import wx
wc = "JSON files (*.json)|*.json|All files (*.*)|*.*"
fd = wx.FileDialog(None, "Open trace file...", style=wx.FD_OPEN, wildcard=wc)
res = fd.ShowModal()
if res != wx.ID_OK:
return None
return fd.GetPath()
| #!/usr/bin/env python
# Copyright 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import message_loop
def open_file():
message_loop.init_main_loop()
if message_loop.is_gtk:
import gtk
dlg = gtk.FileChooserDialog(title=None,action=gtk.FILE_CHOOSER_ACTION_SAVE,
buttons=(gtk.STOCK_CANCEL,gtk.RESPONSE_CANCEL,gtk.STOCK_OPEN,gtk.RESPONSE_OK))
flt = gtk.FileFilter()
flt.set_name("JSON files")
flt.add_pattern("*.json");
dlg.add_filter(flt)
flt = gtk.FileFilter()
flt.set_name("All files")
flt.add_pattern("*.*");
dlg.add_filter(flt)
resp = dlg.run()
if resp == gtk.RESPONSE_CANCEL:
dlg.destroy()
return None
f = dlg.get_filename()
dlg.destroy()
return f
elif message_loop.is_wx:
import wx
wc = "JSON files (*.json)|*.json|All files (*.*)|*.*"
fd = wx.FileDialog(None, "Open trace file...", style=wx.FD_OPEN, wildcard=wc)
res = fd.ShowModal()
if res != wx.ID_OK:
return None
return fd.GetPath()
else:
raise Exception("Not implemented.")
| Add gtk implementation of open_file | Add gtk implementation of open_file
| Python | apache-2.0 | natduca/trace_event_viewer,natduca/trace_event_viewer,natduca/trace_event_viewer |
6104b111b4ceaec894018b77cbea4a0de31400d4 | chainer/trainer/extensions/_snapshot.py | chainer/trainer/extensions/_snapshot.py | from chainer.serializers import npz
from chainer.trainer import extension
def snapshot(savefun=npz.save_npz,
filename='snapshot_iter_{.updater.iteration}'):
"""Return a trainer extension to take snapshots of the trainer.
This extension serializes the trainer object and saves it to the output
directory. It is used to support resuming the training loop from the saved
state.
This extension is called once for each epoch by default.
.. note::
This extension first writes the serialized object to a temporary file
and then rename it to the target file name. Thus, if the program stops
right before the renaming, the temporary file might be left in the
output directory.
Args:
savefun: Function to save the trainer. It takes two arguments: the
output file path and the trainer object.
filename (str): Name of the file into which the trainer is serialized.
It can be a format string, where the trainer object is passed to
the :meth:`str.format` method.
"""
@extension.make_extension(trigger=(1, 'epoch'))
def ext(trainer):
fname = filename.format(trainer)
fd, tmppath = tempfile.mkstemp(prefix=fname, dir=trainer.out)
try:
savefun(tmppath, trainer)
finally:
os.close(fd)
os.rename(tmppath, os.path.join(trainer.out, fname))
return ext
| from chainer.serializers import npz
from chainer.trainer import extension
def snapshot(savefun=npz.save_npz,
filename='snapshot_iter_{.updater.iteration}'):
"""Return a trainer extension to take snapshots of the trainer.
This extension serializes the trainer object and saves it to the output
directory. It is used to support resuming the training loop from the saved
state.
This extension is called once for each epoch by default.
.. note::
This extension first writes the serialized object to a temporary file
and then rename it to the target file name. Thus, if the program stops
right before the renaming, the temporary file might be left in the
output directory.
Args:
savefun: Function to save the trainer. It takes two arguments: the
output file path and the trainer object.
filename (str): Name of the file into which the trainer is serialized.
It can be a format string, where the trainer object is passed to
the :meth:`str.format` method.
"""
@extension.make_extension(name='snapshot', trigger=(1, 'epoch'))
def ext(trainer):
fname = filename.format(trainer)
fd, tmppath = tempfile.mkstemp(prefix=fname, dir=trainer.out)
try:
savefun(tmppath, trainer)
finally:
os.close(fd)
os.rename(tmppath, os.path.join(trainer.out, fname))
return ext
| Add name to the snapshot extension | Add name to the snapshot extension
| Python | mit | hvy/chainer,jnishi/chainer,ktnyt/chainer,chainer/chainer,cupy/cupy,hvy/chainer,ktnyt/chainer,cupy/cupy,wkentaro/chainer,ysekky/chainer,kikusu/chainer,pfnet/chainer,jnishi/chainer,okuta/chainer,keisuke-umezawa/chainer,ktnyt/chainer,okuta/chainer,niboshi/chainer,niboshi/chainer,cupy/cupy,rezoo/chainer,chainer/chainer,hvy/chainer,delta2323/chainer,chainer/chainer,aonotas/chainer,wkentaro/chainer,keisuke-umezawa/chainer,tkerola/chainer,niboshi/chainer,niboshi/chainer,kashif/chainer,chainer/chainer,jnishi/chainer,keisuke-umezawa/chainer,kikusu/chainer,cupy/cupy,keisuke-umezawa/chainer,anaruse/chainer,okuta/chainer,wkentaro/chainer,jnishi/chainer,wkentaro/chainer,ronekko/chainer,ktnyt/chainer,kiyukuta/chainer,hvy/chainer,okuta/chainer |
ab6b61d8d0b91ebc2d0b1b8cbd526cfbb6a45a42 | chipy_org/libs/social_auth_pipelines.py | chipy_org/libs/social_auth_pipelines.py | from django.contrib.auth import get_user_model
from django.utils.translation import ugettext
from social_auth.exceptions import AuthAlreadyAssociated
from social_auth.backends.pipeline.associate import associate_by_email as super_associate_by_email
def associate_by_email(*args, **kwargs):
"""Check if a user with this email already exists. If they do, don't create an account."""
backend = kwargs['backend']
if backend.name in ['google-oauth2', 'github']:
# We provide and exception here for users upgrading.
return super_associate_by_email(*args, **kwargs)
email = kwargs['details'].get('email')
if email:
User = get_user_model()
if User.objects.filter(email=email).exists():
msg = ugettext('This email is already in use. First login with your other account and '
'under the top right menu click add account.')
raise AuthAlreadyAssociated(backend, msg % {
'provider': backend.name
})
| from django.contrib.auth import get_user_model
from django.utils.translation import ugettext
from social_auth.exceptions import AuthAlreadyAssociated
from social_auth.backends.pipeline.associate import associate_by_email as super_associate_by_email
def associate_by_email(*args, **kwargs):
"""Check if a user with this email already exists. If they do, don't create an account."""
backend = kwargs['backend']
if backend.name in ['google-oauth2', 'github'] or kwargs.get('user'):
# We provide and exception here for users upgrading.
return super_associate_by_email(*args, **kwargs)
email = kwargs['details'].get('email')
if email:
User = get_user_model()
if User.objects.filter(email=email).exists():
msg = ugettext('This email is already in use. First login with your other account and '
'under the top right menu click add account.')
raise AuthAlreadyAssociated(backend, msg % {
'provider': backend.name
})
| Check for a user from previously in the pipeline before checking for duplicate user. | Check for a user from previously in the pipeline before checking for duplicate user.
| Python | mit | chicagopython/chipy.org,tanyaschlusser/chipy.org,agfor/chipy.org,bharathelangovan/chipy.org,tanyaschlusser/chipy.org,chicagopython/chipy.org,brianray/chipy.org,chicagopython/chipy.org,tanyaschlusser/chipy.org,chicagopython/chipy.org,bharathelangovan/chipy.org,brianray/chipy.org,agfor/chipy.org,bharathelangovan/chipy.org,brianray/chipy.org,agfor/chipy.org |
62a04170e41d599d653e94afe0844576e175c314 | settings_example.py | settings_example.py | import os
import re
from imap import EmailCheckError, EmailServer
from postgresql import DatabaseServer
CSV_FOLDER = os.getcwd()
CSV_NAME_FORMAT = '{year}-{month}-{day}T{hour}{minute}.csv'
# Restrict emails by sender.
EMAIL_FROM = 'sender@example.com'
# Restrict emails by subject.
EMAIL_SUBJECT_RE = re.compile(''.join([
r'(?P<year>\d{4})',
r'(?P<month>\d{2})',
r'(?P<day>\d{2})',
r'(?P<hour>\d{2})',
r'(?P<minute>\d{2})',
r'\.csv',
]))
LOGGING_FORMAT = '''
- file: %(pathname)s
level: %(levelname)s
line: %(lineno)s
message: |
%(message)s
time: %(asctime)s
'''.strip()
TABLE_NAME_FORMAT = 'data_{year}{month}'
def get_database_client():
con = 'my_username/my_password@database.example.com:5432/my_database'
return DatabaseServer(con)
def get_email_client():
return EmailServer('mail.example.com', 'my_username', 'my_password')
| import os
import re
from imap import EmailCheckError, EmailServer
from postgresql import DatabaseServer
CSV_FOLDER = os.getcwd()
# Values come from `EMAIL_SUBJECT_RE`.
CSV_NAME_FORMAT = '{year}-{month}-{day}T{hour}{minute}.csv'
# Restrict emails by sender.
EMAIL_FROM = 'sender@example.com'
# Restrict emails by subject.
EMAIL_SUBJECT_RE = re.compile(''.join([
r'(?P<year>\d{4})',
r'(?P<month>\d{2})',
r'(?P<day>\d{2})',
r'(?P<hour>\d{2})',
r'(?P<minute>\d{2})',
r'\.csv',
]))
LOGGING_FORMAT = '''
- file: %(pathname)s
level: %(levelname)s
line: %(lineno)s
message: |
%(message)s
time: %(asctime)s
'''.strip()
# Values come from `EMAIL_SUBJECT_RE`.
TABLE_NAME_FORMAT = 'data_{year}{month}'
def get_database_client():
con = 'my_username/my_password@database.example.com:5432/my_database'
return DatabaseServer(con)
def get_email_client():
return EmailServer('mail.example.com', 'my_username', 'my_password')
| Add comments on variable settings | Add comments on variable settings
| Python | mit | AustralianAntarcticDataCentre/save_emails_to_files,AustralianAntarcticDataCentre/save_emails_to_files |
d03b385b5d23c321ee1d4bd2020be1452e8c1cab | pika/__init__.py | pika/__init__.py | # ***** BEGIN LICENSE BLOCK *****
#
# For copyright and licensing please refer to COPYING.
#
# ***** END LICENSE BLOCK *****
__version__ = '0.9.13p1'
from pika.connection import ConnectionParameters
from pika.connection import URLParameters
from pika.credentials import PlainCredentials
from pika.spec import BasicProperties
from pika.adapters.base_connection import BaseConnection
from pika.adapters.asyncore_connection import AsyncoreConnection
from pika.adapters.blocking_connection import BlockingConnection
from pika.adapters.select_connection import SelectConnection
# Python 2.4 support: add struct.unpack_from if it's missing.
try:
import struct
getattr(struct, "unpack_from")
except AttributeError:
def _unpack_from(fmt, buf, offset=0):
slice = buffer(buf, offset, struct.calcsize(fmt))
return struct.unpack(fmt, slice)
struct.unpack_from = _unpack_from
| # ***** BEGIN LICENSE BLOCK *****
#
# For copyright and licensing please refer to COPYING.
#
# ***** END LICENSE BLOCK *****
__version__ = '0.9.13p2'
from pika.connection import ConnectionParameters
from pika.connection import URLParameters
from pika.credentials import PlainCredentials
from pika.spec import BasicProperties
from pika.adapters.base_connection import BaseConnection
from pika.adapters.asyncore_connection import AsyncoreConnection
from pika.adapters.blocking_connection import BlockingConnection
from pika.adapters.select_connection import SelectConnection
| Remove Python 2.4 support monkey patch and bump rev | Remove Python 2.4 support monkey patch and bump rev
| Python | bsd-3-clause | reddec/pika,skftn/pika,Tarsbot/pika,shinji-s/pika,jstnlef/pika,zixiliuyue/pika,fkarb/pika-python3,renshawbay/pika-python3,vrtsystems/pika,Zephor5/pika,pika/pika,vitaly-krugl/pika,knowsis/pika,hugoxia/pika,benjamin9999/pika |
cea891a2de493ce211ccf13f4bf0c487f945985d | test_audio_files.py | test_audio_files.py | import fore.apikeys
import fore.mixer
import fore.database
import pyechonest.track
for file in fore.database.get_many_mp3(status='all'):
print("Name: {} Length: {}".format(file.filename, file.track_details['length']))
track.track_from_filename('audio/'+file.filename, force_upload=True)
| import fore.apikeys
import fore.mixer
import fore.database
import pyechonest.track
for file in fore.database.get_many_mp3(status='all'):
print("Name: {} Length: {}".format(file.filename, file.track_details['length']))
track = track.track_from_filename('audio/'+file.filename, force_upload=True)
print(track.id)
| Print out the new track id. | Print out the new track id.
| Python | artistic-2.0 | Rosuav/appension,MikeiLL/appension,MikeiLL/appension,MikeiLL/appension,Rosuav/appension,MikeiLL/appension,Rosuav/appension,Rosuav/appension |
550b48c0d1b464a782d875e30da140c742cd4b3e | tests/test_bayes.py | tests/test_bayes.py | from unittest import TestCase
from itertools import repeat, imap, izip, cycle
from spicedham.bayes import Bayes
from spicedham import Spicedham
class TestBayes(TestCase):
def test_classify(self):
sh = Spicedham()
b = Bayes(sh.config, sh.backend)
b.backend.reset()
self._training(b)
alphabet = map(chr, range(97, 123))
for letter in alphabet:
p = b.classify(letter)
self.assertEqual(p, 0.5)
def _training(self, bayes):
alphabet = map(chr, range(97, 123))
reversed_alphabet = reversed(alphabet)
messagePairs = izip(alphabet, reversed_alphabet)
for message, is_spam in izip(messagePairs, cycle((True, False))):
bayes.train(message, is_spam)
def test_train(self):
alphabet = map(chr, range(97, 123))
sh = Spicedham()
b = Bayes(sh.config, sh.backend)
b.backend.reset()
self._training(b)
for letter in alphabet:
result = sh.backend.get_key(b.__class__.__name__, letter)
self.assertEqual(result, {'numTotal': 2, 'numSpam': 1})
self.assertGreaterEqual(result['numTotal'], result['numSpam'])
| from unittest import TestCase
from itertools import repeat, imap, izip, cycle
from spicedham.bayes import Bayes
from spicedham import Spicedham
class TestBayes(TestCase):
def test_classify(self):
sh = Spicedham()
b = Bayes(sh.config, sh.backend)
b.backend.reset()
self._training(b)
alphabet = map(chr, range(97, 123))
for letter in alphabet:
p = b.classify(letter)
self.assertEqual(p, 0.5)
def _training(self, bayes):
alphabet = map(chr, range(97, 123))
reversed_alphabet = reversed(alphabet)
messagePairs = izip(alphabet, reversed_alphabet)
for message, is_spam in izip(messagePairs, cycle((True, False))):
bayes.train(message, is_spam)
def test_train(self):
alphabet = map(chr, range(97, 123))
sh = Spicedham()
b = Bayes(sh.config, sh.backend)
b.backend.reset()
self._training(b)
for letter in alphabet:
result = sh.backend.get_key(b.__class__.__name__, letter)
self.assertEqual(result, {'numTotal': 2, 'numSpam': 1})
self.assertTrue(result['numTotal'] >= result['numSpam'])
| Replace assertGreaterEqual with assertTrue(a >= b) | Replace assertGreaterEqual with assertTrue(a >= b)
| Python | mpl-2.0 | mozilla/spicedham,mozilla/spicedham |
2fdf35f8a9bf7a6249bc92236952655314a47080 | swapify/__init__.py | swapify/__init__.py | # -*- encoding: utf-8 -*-
__version__ = VERSION = '0.0.0' | # -*- encoding: utf-8 -*-
__author__ = 'Sebastian Vetter'
__version__ = VERSION = '0.0.0'
__license__ = 'MIT'
| Add author and lincense variables | Add author and lincense variables
| Python | mit | elbaschid/swapify |
40897be402bd05ed5fb53e116f03d2d954720245 | astrobin_apps_platesolving/utils.py | astrobin_apps_platesolving/utils.py | # Python
import urllib2
# Django
from django.conf import settings
from django.core.files import File
from django.core.files.temp import NamedTemporaryFile
def getFromStorage(image, alias):
url = image.thumbnail(alias)
if "://" in url:
url = url.split('://')[1]
else:
url = settings.BASE_URL + url
url = 'http://' + urllib2.quote(url.encode('utf-8'))
headers = { 'User-Agent' : 'Mozilla/5.0' }
req = urllib2.Request(url, None, headers)
img = NamedTemporaryFile(delete = True)
img.write(urllib2.urlopen(req).read())
img.flush()
img.seek(0)
return File(img)
| # Python
import urllib2
# Django
from django.conf import settings
from django.core.files import File
from django.core.files.temp import NamedTemporaryFile
def getFromStorage(image, alias):
def encoded(path):
return urllib2.quote(path.encode('utf-8'))
url = image.thumbnail(alias)
if "://" in url:
# We are getting the full path and must only encode the part after the protocol
# (we assume that the hostname is ASCII)
protocol, path = url.split("://")
url = protocol + encoded(path)
else:
url = settings.BASE_URL + encoded(url)
headers = { 'User-Agent' : 'Mozilla/5.0' }
req = urllib2.Request(url, None, headers)
img = NamedTemporaryFile(delete = True)
img.write(urllib2.urlopen(req).read())
img.flush()
img.seek(0)
return File(img)
| Fix plate-solving on local development mode | Fix plate-solving on local development mode
| Python | agpl-3.0 | astrobin/astrobin,astrobin/astrobin,astrobin/astrobin,astrobin/astrobin |
8fe8717b4e2afe6329d2dd25210371df3eab2b4f | test/test_stdlib.py | test/test_stdlib.py | # -*- coding: utf-8 -*-
"""
Tests for the standard library PEP 543 shim.
"""
import pep543.stdlib
from .backend_tests import SimpleNegotiation
class TestSimpleNegotiationStdlib(SimpleNegotiation):
BACKEND = pep543.stdlib.STDLIB_BACKEND
| # -*- coding: utf-8 -*-
"""
Tests for the standard library PEP 543 shim.
"""
import pep543
import pep543.stdlib
import pytest
from .backend_tests import SimpleNegotiation
CONTEXTS = (
pep543.stdlib.STDLIB_BACKEND.client_context,
pep543.stdlib.STDLIB_BACKEND.server_context
)
def assert_wrap_fails(context, exception):
"""
A convenient helper that calls wrap_buffers with the appropriate number of
arugments and asserts that it raises the appropriate error.
"""
if isinstance(context, pep543.stdlib.STDLIB_BACKEND.client_context):
with pytest.raises(exception):
context.wrap_buffers(server_hostname=None)
else:
with pytest.raises(exception):
context.wrap_buffers()
class TestSimpleNegotiationStdlib(SimpleNegotiation):
BACKEND = pep543.stdlib.STDLIB_BACKEND
class TestStdlibErrorHandling(object):
"""
Validate that the stdlib backend can do sensible error handling in specific
situations that it cannot handle.
"""
@pytest.mark.parametrize(
'lowest,highest', (
(object(), None), (None, object()), (object(), object())
)
)
@pytest.mark.parametrize('context', CONTEXTS)
def test_bad_values_for_versions_client(self, lowest, highest, context):
"""
Using TLSConfiguration objects with a bad value for their minimum
version raises a TLSError with Client contexts.
"""
config = pep543.TLSConfiguration(
validate_certificates=False,
lowest_supported_version=lowest,
highest_supported_version=highest
)
ctx = context(config)
assert_wrap_fails(ctx, pep543.TLSError)
| Test that we reject bad TLS versions | Test that we reject bad TLS versions
| Python | mit | python-hyper/pep543 |
04608636f6e4fc004458560499338af4b871cddb | asyncio_irc/message.py | asyncio_irc/message.py | from .utils import to_bytes
class Message:
"""A message recieved from the IRC network."""
def __init__(self, raw_message):
self.raw = raw_message
self.prefix, self.command, self.params, self.suffix = self._elements()
def _elements(self):
"""
Split the raw message into it's component parts.
Adapted from http://stackoverflow.com/a/930706/400691
"""
message = self.raw.strip()
prefix = b''
# Odd slicing required for bytes to avoid getting int instead of char
# http://stackoverflow.com/q/28249597/400691
if message[0:1] == b':':
prefix, message = message[1:].split(b' ', 1)
suffix = b''
if b' :' in message:
message, suffix = message.split(b' :', 1)
command, *params = message.split()
params = list(filter(None, params))
return prefix, command, params, suffix
def message_bytes(command, prefix=b'', params=None, suffix=b''):
command = to_bytes(command)
prefix = to_bytes(prefix)
params = list(map(to_bytes, params or []))
suffix = to_bytes(suffix)
message = command
if prefix:
message = b':' + prefix + b' ' + message
if params:
params = b' '.join(params)
message = message + b' ' + params
if suffix:
message = message + b' :' + suffix
return message
| from .utils import to_bytes
class Message(bytes):
"""A message recieved from the IRC network."""
def __init__(self, raw_message_bytes_ignored):
super().__init__()
self.prefix, self.command, self.params, self.suffix = self._elements()
def _elements(self):
"""
Split the raw message into it's component parts.
Adapted from http://stackoverflow.com/a/930706/400691
"""
message = self.strip()
prefix = b''
# Odd slicing required for bytes to avoid getting int instead of char
# http://stackoverflow.com/q/28249597/400691
if message[0:1] == b':':
prefix, message = message[1:].split(b' ', 1)
suffix = b''
if b' :' in message:
message, suffix = message.split(b' :', 1)
command, *params = message.split()
params = list(filter(None, params))
return prefix, command, params, suffix
def message_bytes(command, prefix=b'', params=None, suffix=b''):
command = to_bytes(command)
prefix = to_bytes(prefix)
params = list(map(to_bytes, params or []))
suffix = to_bytes(suffix)
message = command
if prefix:
message = b':' + prefix + b' ' + message
if params:
params = b' '.join(params)
message = message + b' ' + params
if suffix:
message = message + b' :' + suffix
return message
| Make Message a subclass of bytes | Make Message a subclass of bytes
| Python | bsd-2-clause | meshy/framewirc |
729a5c2e1276f9789733fc46fb7f48d0ac7e3178 | src/slackmoji.py | src/slackmoji.py | # pylint: disable = C0103, C0111
# Standard Library
import json
import mimetypes
from os import makedirs
from subprocess import check_output
# Third Party
import requests
def list_emojis(domain, token):
script = ['bin/list_emojis.sh {0} {1}'.format(domain, token)]
response = check_output(script, shell=True)
print "\nGot list of emojis from %s Slack domain!" % domain
return response
def download_emojis(emojis, folder):
count = 0
makedirs(folder)
emojis = json.loads(emojis)["emoji"]
print "\nDownloading emojis from list..."
for name, url in emojis.iteritems():
if 'alias' in url:
continue
else:
res = requests.get(url)
cont = res.headers['content-type']
ext = mimetypes.guess_extension(cont)
if ext == '.jpe':
ext = '.jpg'
output = folder + '/' + name + ext
with open(output, 'wb') as fd:
for chunk in res.iter_content(chunk_size=1024):
fd.write(chunk)
count = count + 1
print "\nFinished downloading %s emojis!" % count
| # pylint: disable = C0103, C0111
# Standard Library
import json
import mimetypes
from os import makedirs
# Third Party
import requests
def list_emojis(domain, token):
url = r'https://%s.slack.com/api/emoji.list' % domain
data = [('token', token)]
response = requests.post(url, data=data)
print "\nGot list of emojis from %s Slack domain!" % domain
emojis = json.loads(response.text)["emoji"]
return emojis
def download_emojis(emojis, folder):
count = 0
makedirs(folder)
print "\nDownloading emojis to %s folder..." % folder
for name, url in emojis.iteritems():
if 'alias' in url:
continue
else:
res = requests.get(url)
cont = res.headers['content-type']
ext = mimetypes.guess_extension(cont)
if ext == '.jpe':
ext = '.jpg'
output = folder + '/' + name + ext
with open(output, 'wb') as fd:
for chunk in res.iter_content(chunk_size=1024):
fd.write(chunk)
count = count + 1
print "\nFinished downloading %s emojis!" % count
| Use requests to fetch emoji list | Use requests to fetch emoji list
| Python | mit | le1ia/slackmoji |
48b7880fec255c7a021361211e56980be2bd4c6b | project/creditor/management/commands/addrecurring.py | project/creditor/management/commands/addrecurring.py | # -*- coding: utf-8 -*-
from creditor.models import RecurringTransaction
from django.core.management.base import BaseCommand, CommandError
class Command(BaseCommand):
help = 'Gets all RecurringTransactions and runs conditional_add_transaction()'
def handle(self, *args, **options):
for t in RecurringTransaction.objects.all():
ret = t.conditional_add_transaction()
if ret:
if options['verbosity'] > 1:
print("Created transaction %s" % ret)
| # -*- coding: utf-8 -*-
import datetime
import itertools
import dateutil.parser
from creditor.models import RecurringTransaction
from django.core.management.base import BaseCommand, CommandError
from django.utils import timezone
from asylum.utils import datetime_proxy, months
class Command(BaseCommand):
help = 'Gets all RecurringTransactions and runs conditional_add_transaction()'
def add_arguments(self, parser):
parser.add_argument('since', type=str, nargs='?', default=datetime_proxy(), help='Run for each month since the date, defaults to yesterday midnight')
def handle(self, *args, **options):
since_parsed = timezone.make_aware(dateutil.parser.parse(options['since']))
if options['verbosity'] > 2:
print("Processing since %s" % since_parsed.isoformat())
for t in RecurringTransaction.objects.all():
if options['verbosity'] > 2:
print("Processing: %s" % t)
for month in months(since_parsed, timezone.now()):
if options['verbosity'] > 2:
print(" month %s" % month.isoformat())
ret = t.conditional_add_transaction(month)
if ret:
if options['verbosity'] > 1:
print("Created transaction %s" % ret)
| Add "since" parameter to this command | Add "since" parameter to this command
Fixes #25
| Python | mit | HelsinkiHacklab/asylum,HelsinkiHacklab/asylum,HelsinkiHacklab/asylum,HelsinkiHacklab/asylum |
ccfe12391050d598ec32861ed146b66f4e907943 | noodles/entities.py | noodles/entities.py | """
Find all company names in a piece of text
extractor = EntityExtractor()
entities = extractor.entities_from_text(text)
> ['acme incorporated', 'fubar limited', ...]
TODO:
- work out a standard form to normalize company names to
- import company names into the massive regex
"""
import re
norm_reqs = (
('ltd.', 'limited'),
(' bv ', 'b.v.'),
)
def normalize(text):
text = text.lower()
for (rgx, replacement) in norm_reqs:
text = re.sub(rgx, replacement, text)
return text
def get_company_names():
return ['ABC Inc', 'Joost Ltd.']
class EntityExtractor(object):
def __init__(self):
normed = [re.escape(normalize(x)) for x in get_company_names()]
joined = '|'.join(normed)
self.regex = re.compile('(' + joined + ')')
def entities_from_text(self, text):
return self.regex.findall(normalize(text))
| """
Find all company names in a piece of text
extractor = EntityExtractor()
entities = extractor.entities_from_text(text)
> ['acme incorporated', 'fubar limited', ...]
"""
COMPANY_SOURCE_FILE = '/tmp/companies_dev.csv'
import re
import csv
norm_reqs = (
('ltd.', 'limited'),
(' bv ', 'b.v.'),
)
def normalize(text):
text = text.lower()
for (rgx, replacement) in norm_reqs:
text = re.sub(rgx, replacement, text)
return text
def get_company_names():
rows = csv.reader(open(COMPANY_SOURCE_FILE, 'r'))
names = [normalize(row[0]) for row in rows]
return names
class EntityExtractor(object):
def __init__(self):
normed = [re.escape(normalize(x)) for x in get_company_names()]
joined = '|'.join(normed)
self.regex = re.compile('(' + joined + ')')
def entities_from_text(self, text):
return self.regex.findall(normalize(text))
| Use big company list for regex entity extraction | Use big company list for regex entity extraction
| Python | mit | uf6/noodles,uf6/noodles |
93e2ff0dd32a72efa90222988d4289c70bb55b98 | c2corg_api/models/common/fields_book.py | c2corg_api/models/common/fields_book.py | DEFAULT_FIELDS = [
'locales.title',
'locales.summary',
'locales.description',
'locales.lang',
'author',
'editor',
'activities',
'url',
'isbn',
'book_types',
'publication_date',
'langs',
'nb_pages'
]
DEFAULT_REQUIRED = [
'locales',
'locales.title',
'book_types'
]
LISTING_FIELDS = [
'locales',
'locales.title',
'activities',
'author',
'quality',
'book_types'
]
fields_book = {
'fields': DEFAULT_FIELDS,
'required': DEFAULT_REQUIRED,
'listing': LISTING_FIELDS
}
| DEFAULT_FIELDS = [
'locales.title',
'locales.summary',
'locales.description',
'locales.lang',
'author',
'editor',
'activities',
'url',
'isbn',
'book_types',
'publication_date',
'langs',
'nb_pages'
]
DEFAULT_REQUIRED = [
'locales',
'locales.title',
'book_types'
]
LISTING_FIELDS = [
'locales',
'locales.title',
'locales.summary',
'activities',
'author',
'quality',
'book_types'
]
fields_book = {
'fields': DEFAULT_FIELDS,
'required': DEFAULT_REQUIRED,
'listing': LISTING_FIELDS
}
| Add summary to book listing | Add summary to book listing
| Python | agpl-3.0 | c2corg/v6_api,c2corg/v6_api,c2corg/v6_api |
38845ecae177635b98a2e074355227f0c9f9834d | cartoframes/viz/widgets/__init__.py | cartoframes/viz/widgets/__init__.py | """
Widget functions to generate widgets faster.
"""
from __future__ import absolute_import
from .animation_widget import animation_widget
from .category_widget import category_widget
from .default_widget import default_widget
from .formula_widget import formula_widget
from .histogram_widget import histogram_widget
from .time_series_widget import time_series_widget
def _inspect(widget):
import inspect
lines = inspect.getsource(widget)
print(lines)
__all__ = [
'animation_widget',
'category_widget',
'default_widget',
'formula_widget',
'histogram_widget',
'time_series_widget',
]
| """
Widget functions to generate widgets faster.
"""
from __future__ import absolute_import
from .animation_widget import animation_widget
from .category_widget import category_widget
from .default_widget import default_widget
from .formula_widget import formula_widget
from .histogram_widget import histogram_widget
from .time_series_widget import time_series_widget
from ..widget import Widget
from ..widget_list import WidgetList
def _inspect(widget):
import inspect
lines = inspect.getsource(widget)
print(lines)
__all__ = [
'Widget',
'WidgetList',
'animation_widget',
'category_widget',
'default_widget',
'formula_widget',
'histogram_widget',
'time_series_widget',
]
| Add Widget and WidgetList to namespace | Add Widget and WidgetList to namespace
| Python | bsd-3-clause | CartoDB/cartoframes,CartoDB/cartoframes |
3640a5b1976ea6c5b21617cda11324d73071fb9f | trimwhitespace.py | trimwhitespace.py | """Remove trailing whitespace from files in current path and sub directories."""
import os, glob
def scanpath(path):
for filepath in glob.glob(os.path.join(path, '*')):
if os.path.isdir(filepath):
scanpath(filepath)
else:
trimwhitespace(filepath)
def trimwhitespace(filepath):
handle = open(filepath, 'r')
stripped = ''
flag = False
for line in handle.readlines():
stripped_line = line.rstrip() + '\n'
if line != stripped_line:
flag = True
stripped += stripped_line
handle.close()
if flag:
overwritefile(filepath, stripped)
def overwritefile(filepath, contents):
handle = open(filepath, 'w')
handle.write(contents)
handle.close()
if __name__ == '__main__':
scanpath('.')
| """Remove trailing whitespace from files in current path and sub directories."""
import os, glob
def scanpath(path):
for filepath in glob.glob(os.path.join(path, '*')):
if os.path.isdir(filepath):
scanpath(filepath)
else:
trimwhitespace(filepath)
def trimwhitespace(filepath):
handle = open(filepath, 'rb')
stripped = ''
flag = False
for line in handle.readlines():
stripped_line = line.rstrip() + '\n'
if line != stripped_line:
flag = True
stripped += stripped_line
handle.close()
if flag:
print('FAIL: %s' % filepath)
overwritefile(filepath, stripped)
else:
print('OK: %s' % filepath)
def overwritefile(filepath, contents):
handle = open(filepath, 'wb')
handle.write(contents)
handle.close()
if __name__ == '__main__':
scanpath('.')
| Fix for whitespace.py so that Windows will save Unix EOLs. | Fix for whitespace.py so that Windows will save Unix EOLs.
| Python | apache-2.0 | GoogleCloudPlatform/datastore-ndb-python,GoogleCloudPlatform/datastore-ndb-python |
d31d2a73127a79566651e644d105cbe2063a6e2a | webapp/publish.py | webapp/publish.py | from cloudly.pubsub import Pusher
from cloudly.tweets import Tweets
from cloudly.twitterstream import Streamer
from webapp import config
class Publisher(Pusher):
def publish(self, tweets, event):
"""Keep only relevant fields from the given tweets."""
stripped = []
for tweet in tweets:
stripped.append({
'coordinates': tweet['coordinates'],
})
super(Publisher, self).publish(stripped, event)
def processor(tweet):
return True
def start():
# This trick of importing the current module is for RQ workers to
# correctly unpickle the `processor` function.
from webapp import publish
pubsub = publish.Publisher.open(config.pubsub_channel)
streamer = Streamer(publish.processor, pubsub=pubsub, is_queuing=True,
cache_length=100)
tweets = Tweets()
streamer.run(tweets.with_coordinates())
if __name__ == "__main__":
start()
| from cloudly.pubsub import Pusher
from cloudly.tweets import Tweets, StreamManager, keep
from webapp import config
pubsub = Pusher.open(config.pubsub_channel)
def processor(tweets):
pubsub.publish(keep(['coordinates'], tweets), "tweets")
return len(tweets)
def start():
streamer = StreamManager('locate', processor, is_queuing=False)
tweets = Tweets()
streamer.run(tweets.with_coordinates())
if __name__ == "__main__":
start()
| Fix for the new cloudly APi. | Fix for the new cloudly APi.
| Python | mit | hdemers/webapp-template,hdemers/webapp-template,hdemers/webapp-template |
c1f5b9e3bfa96762fdbe9f4ca54b3851b38294da | test/__init__.py | test/__init__.py | import glob, os.path, sys
# Add path to hiredis.so load path
path = glob.glob("build/lib*/hiredis/*.so")[0]
sys.path.insert(0, os.path.dirname(path))
from unittest import *
from . import reader
def tests():
suite = TestSuite()
suite.addTest(makeSuite(reader.ReaderTest))
return suite
| import glob, os.path, sys
version = sys.version.split(" ")[0]
majorminor = version[0:3]
# Add path to hiredis.so load path
path = glob.glob("build/lib*-%s/hiredis/*.so" % majorminor)[0]
sys.path.insert(0, os.path.dirname(path))
from unittest import *
from . import reader
def tests():
suite = TestSuite()
suite.addTest(makeSuite(reader.ReaderTest))
return suite
| Add versioned path for dynamic library lookup | Add versioned path for dynamic library lookup
| Python | bsd-3-clause | badboy/hiredis-py-win,charsyam/hiredis-py,badboy/hiredis-py-win,redis/hiredis-py,charsyam/hiredis-py,badboy/hiredis-py-win,redis/hiredis-py |
3e30737c98a4a9e890d362ba4cbbb2315163bc29 | cfgov/v1/__init__.py | cfgov/v1/__init__.py | from __future__ import absolute_import # Python 2 only
from django.contrib.staticfiles.storage import staticfiles_storage
from django.core.urlresolvers import reverse
from django.template.defaultfilters import slugify
from wagtail.wagtailcore.templatetags import wagtailcore_tags
from wagtail.wagtailadmin.templatetags import wagtailuserbar
from jinja2 import Environment
from compressor.contrib.jinja2ext import CompressorExtension
def environment(**options):
options.setdefault('extensions', []).append(CompressorExtension)
env = Environment(**options)
env.globals.update({
'static': staticfiles_storage.url,
'reverse': reverse,
'wagtailuserbar': wagtailuserbar.wagtailuserbar
})
env.filters.update({
'slugify': slugify,
'richtext': wagtailcore_tags.richtext
})
return env
| from django.contrib.staticfiles.storage import staticfiles_storage
from django.core.urlresolvers import reverse
from django.template.defaultfilters import slugify
from wagtail.wagtailcore.templatetags import wagtailcore_tags
from wagtail.wagtailadmin.templatetags import wagtailuserbar
from jinja2 import Environment
from compressor.contrib.jinja2ext import CompressorExtension
def environment(**options):
options.setdefault('extensions', []).append(CompressorExtension)
env = Environment(**options)
env.globals.update({
'static': staticfiles_storage.url,
'reverse': reverse,
'wagtailuserbar': wagtailuserbar.wagtailuserbar
})
env.filters.update({
'slugify': slugify,
'richtext': wagtailcore_tags.richtext
})
return env
| Remove unused Python 2 support | Remove unused Python 2 support
| Python | cc0-1.0 | kave/cfgov-refresh,kave/cfgov-refresh,kave/cfgov-refresh,kave/cfgov-refresh |
f20c0b650da2354f3008c7a0933eb32a1409fbf0 | openacademy/model/openacademy_session.py | openacademy/model/openacademy_session.py | # -*- coding: utf-8 -*-
from openerp import fields, models
class Session(models.Model):
_name = 'openacademy.session'
name = fields.Char(required=True)
start_date = fields.Date()
duration = fields.Float(digits=(6, 2), help="Duration in days")
seats = fields.Integer(string="Number of seats")
instructor_id = fields.Many2one('res.partner', string="Instructor")
course_id = fields.Many2one('openacademy.course',
ondelete='cascade', string="Course", required=True)
attendee_ids = fields.Many2many('res.partner', string="Attendees")
| # -*- coding: utf-8 -*-
from openerp import fields, models
class Session(models.Model):
_name = 'openacademy.session'
name = fields.Char(required=True)
start_date = fields.Date()
duration = fields.Float(digits=(6, 2), help="Duration in days")
seats = fields.Integer(string="Number of seats")
instructor_id = fields.Many2one('res.partner', string="Instructor",
domain=['|',
("instructor","=",True),
("category_id.name", "ilike", "Teacher")
])
course_id = fields.Many2one('openacademy.course',
ondelete='cascade', string="Course", required=True)
attendee_ids = fields.Many2many('res.partner', string="Attendees")
| Add domain or and ilike | [REF] openacademy: Add domain or and ilike
| Python | apache-2.0 | KarenKawaii/openacademy-project |
410207e4c0a091e7b4eca9cedd08f381095f50a9 | pypeerassets/card_parsers.py | pypeerassets/card_parsers.py | '''parse cards according to deck issue mode'''
from .pautils import exponent_to_amount
def none_parser(cards):
'''parser for NONE [0] issue mode'''
return None
def custom_parser(cards, parser=None):
'''parser for CUSTOM [1] issue mode,
please provide your custom parser as argument'''
if not parser:
return cards
else:
return parser(cards)
def once_parser(cards):
'''parser for ONCE [2] issue mode'''
return [next(i for i in cards if i.type == "CardIssue")]
def multi_parser(cards):
'''parser for MULTI [4] issue mode'''
return cards
def mono_parser(cards):
'''parser for MONO [8] issue mode'''
return [i for i in cards if i.type == "CardIssue"
and exponent_to_amount(i.amount[0], i.number_of_decimals) == 1]
def unflushable_parser(cards):
'''parser for UNFLUSHABLE [16] issue mode'''
return [i for i in cards if i.type == "CardIssue"]
parsers = {
0: none_parser,
1: custom_parser,
2: once_parser,
4: multi_parser,
8: mono_parser,
16: unflushable_parser
}
| '''parse cards according to deck issue mode'''
from .pautils import exponent_to_amount
def none_parser(cards):
'''parser for NONE [0] issue mode'''
return None
def custom_parser(cards, parser=None):
'''parser for CUSTOM [1] issue mode,
please provide your custom parser as argument'''
if not parser:
return cards
else:
return parser(cards)
def once_parser(cards):
'''parser for ONCE [2] issue mode'''
return [next(i for i in cards if i.type == "CardIssue")]
def multi_parser(cards):
'''parser for MULTI [4] issue mode'''
return cards
def mono_parser(cards):
'''parser for MONO [8] issue mode'''
return [i for i in cards if i.type == "CardIssue"
and exponent_to_amount(i.amount[0], i.number_of_decimals) == 1]
def unflushable_parser(cards):
'''parser for UNFLUSHABLE [16] issue mode'''
return [i for i in cards if i.type == "CardIssue"]
parsers = {
'NONE': none_parser,
'CUSTOM': none_parser,
'ONCE': once_parser,
'MULTI': multi_parser,
'MONO': mono_parser,
'UNFLUSHABLE': unflushable_parser
}
| Revert "change from string to int" | Revert "change from string to int"
This reverts commit a89f8f251ee95609ec4c1ea412b0aa7ec4603252.
| Python | bsd-3-clause | PeerAssets/pypeerassets |
b8f33283014854bfa2d92896e2061bf17de00836 | pygypsy/__init__.py | pygypsy/__init__.py | """pygypsy
Based on Hueng et all (2009)
Huang, S., Meng, S. X., & Yang, Y. (2009). A growth and yield projection system
(GYPSY) for natural and post-harvest stands in Alberta. Forestry Division,
Alberta Sustainable Resource Development, 25.
Important Acronyms:
aw = white aspen
sb = black spruce
sw = white spruce
pl = logdepole pine
bhage = Breast Height Age
tage = Total age
si_<xx> = estimated site index for species <xx>
y2bh = years until breast height age can be measured
"""
import os
import matplotlib
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
# Force matplotlib to not use any Xwindows backend so that headless docker works
matplotlib.use('Agg')
| """pygypsy
Based on Hueng et all (2009)
Huang, S., Meng, S. X., & Yang, Y. (2009). A growth and yield projection system
(GYPSY) for natural and post-harvest stands in Alberta. Forestry Division,
Alberta Sustainable Resource Development, 25.
Important Acronyms:
aw = white aspen
sb = black spruce
sw = white spruce
pl = logdepole pine
bhage = Breast Height Age
tage = Total age
si_<xx> = estimated site index for species <xx>
y2bh = years until breast height age can be measured
"""
import os
import matplotlib
from ._version import get_versions
import pygypsy.basal_area_increment
__version__ = get_versions()['version']
del get_versions
# Force matplotlib to not use any Xwindows backend so that headless docker works
matplotlib.use('Agg')
__all__ = ['basal_area_increment']
| Revert "Try not importing basal_area_increment to init" | Revert "Try not importing basal_area_increment to init"
This reverts commit 30f9d5367085285a3100d7ec5d6a9e07734c5ccd.
| Python | mit | tesera/pygypsy,tesera/pygypsy |
72ce164a461987f7b9d35ac9a2b3a36386b7f8c9 | ui/Interactor.py | ui/Interactor.py | """
Interactor
This class can be used to simply managing callback resources.
Callbacks are often used by interactors with vtk and callbacks
are hard to keep track of.
Use multiple inheritance to inherit from this class to get access
to the convenience methods.
Observers for vtk events can be added through
AddObserver(obj, eventName, callbackFunction) and when it is time
to clean up, just call cleanUpCallbacks().
:Authors:
Berend Klein Haneveld
"""
class Interactor(object):
"""
Interactor
"""
def __init__(self):
super(Interactor, self).__init__()
def AddObserver(self, obj, eventName, callbackFunction):
"""
Creates a callback and stores the callback so that later
on the callbacks can be properly cleaned up.
"""
if not hasattr(self, "_callbacks"):
self._callbacks = []
callback = obj.AddObserver(eventName, callbackFunction)
self._callbacks.append((obj, callback))
def cleanUpCallbacks(self):
"""
Cleans up the vtkCallBacks
"""
if not hasattr(self, "_callbacks"):
return
for obj, callback in self._callbacks:
obj.RemoveObserver(callback)
self._callbacks = []
| """
Interactor
This class can be used to simply managing callback resources.
Callbacks are often used by interactors with vtk and callbacks
are hard to keep track of.
Use multiple inheritance to inherit from this class to get access
to the convenience methods.
Observers for vtk events can be added through
AddObserver(obj, eventName, callbackFunction) and when it is time
to clean up, just call cleanUpCallbacks().
:Authors:
Berend Klein Haneveld
"""
class Interactor(object):
"""
Interactor
"""
def __init__(self):
super(Interactor, self).__init__()
def AddObserver(self, obj, eventName, callbackFunction, priority=None):
"""
Creates a callback and stores the callback so that later
on the callbacks can be properly cleaned up.
"""
if not hasattr(self, "_callbacks"):
self._callbacks = []
if priority is not None:
callback = obj.AddObserver(eventName, callbackFunction, priority)
else:
callback = obj.AddObserver(eventName, callbackFunction)
self._callbacks.append((obj, callback))
def cleanUpCallbacks(self):
"""
Cleans up the vtkCallBacks
"""
if not hasattr(self, "_callbacks"):
return
for obj, callback in self._callbacks:
obj.RemoveObserver(callback)
self._callbacks = []
| Add possibility of passing priority for adding an observer | Add possibility of passing priority for adding an observer
| Python | mit | berendkleinhaneveld/Registrationshop,berendkleinhaneveld/Registrationshop |
e85fcd553756eab32cedca214c9b8b86ff48f8b8 | app/forms/vacancy.py | app/forms/vacancy.py | from flask_wtf import Form
from flask_babel import lazy_gettext as _ # noqa
from wtforms import StringField, SubmitField, TextAreaField, \
DateField, SelectField
from wtforms.validators import InputRequired
class VacancyForm(Form):
title = StringField(_('Title'), validators=[InputRequired(
message=_('A title is required.'))])
description = TextAreaField(_('Description'), validators=[InputRequired(
message=_('A description is required.'))])
start_date = DateField(_('Start date'), validators=[InputRequired(
message=_('Start date is required.'))])
end_date = DateField(_('End date'), validators=[InputRequired(
message=_('End date is required.'))])
contract_of_service = SelectField(_('Contract'),
choices=[('voltijd', _('Voltijd')),
('deeltijd', _('Deeltijd')),
('bijbaan', _('Bijbaan')),
('stage', _('Stage'))])
workload = StringField(_('Workload'), validators=[InputRequired(
message=_('Workload is required.'))])
company_id = SelectField(_('Company'), coerce=int)
submit = SubmitField(_('Submit'))
| from flask_wtf import Form
from flask_babel import lazy_gettext as _ # noqa
from wtforms import StringField, SubmitField, TextAreaField, \
DateField, SelectField
from wtforms.validators import InputRequired
class VacancyForm(Form):
title = StringField(_('Title'), validators=[InputRequired(
message=_('A title is required.'))])
description = TextAreaField(_('Description'), validators=[InputRequired(
message=_('A description is required.'))])
start_date = DateField(_('Start date'), validators=[InputRequired(
message=_('Start date is required.'))])
end_date = DateField(_('End date'), validators=[InputRequired(
message=_('End date is required.'))])
contract_of_service = SelectField(_('Contract'),
choices=[('voltijd', _('Voltijd')),
('deeltijd', _('Deeltijd')),
('bijbaan', _('Bijbaan')),
('stage', _('Stage'))])
workload = StringField(_('Workload'))
company_id = SelectField(_('Company'), coerce=int)
submit = SubmitField(_('Submit'))
| Make workload optional when editing vacancies | Make workload optional when editing vacancies
| Python | mit | viaict/viaduct,viaict/viaduct,viaict/viaduct,viaict/viaduct,viaict/viaduct |
2627c2c5ea6fdbff9d9979765deee8740a11c7c9 | backend/globaleaks/handlers/__init__.py | backend/globaleaks/handlers/__init__.py | # -*- encoding: utf-8 -*-
#
# In here you will find all the handlers that are tasked with handling the
# requests specified in the API.
#
# From these handlers we will be instantiating models objects that will take care
# of our business logic and the generation of the output to be sent to GLClient.
#
# In here we are also tasked with doing the validation of the user supplied
# input. This must be done before instantiating any models object.
# Validation of input may be done with the functions inside of
# globaleaks.rest.
#
# See base.BaseHandler for details on the handlers.
__all__ = ['admin',
'base',
'css',
'files',
'node',
'receiver',
'rtip',
'submission',
'wbtip']
| # -*- encoding: utf-8 -*-
#
# In here you will find all the handlers that are tasked with handling the
# requests specified in the API.
#
# From these handlers we will be instantiating models objects that will take care
# of our business logic and the generation of the output to be sent to GLClient.
#
# In here we are also tasked with doing the validation of the user supplied
# input. This must be done before instantiating any models object.
# Validation of input may be done with the functions inside of
# globaleaks.rest.
#
# See base.BaseHandler for details on the handlers.
__all__ = ['admin',
'base',
'files',
'node',
'receiver',
'rtip',
'submission',
'wbtip']
| Fix module packaging thanks to landscape.io hint | Fix module packaging thanks to landscape.io hint
| Python | agpl-3.0 | vodkina/GlobaLeaks,vodkina/GlobaLeaks,vodkina/GlobaLeaks,vodkina/GlobaLeaks |
6728bf1fecef3ac1aea9489e7c74333ad0a3b552 | datalogger/__main__.py | datalogger/__main__.py | import sys
from PyQt5.QtWidgets import QApplication
from datalogger.bin.workspace import Workspace
from datalogger.analysis_window_testing import AnalysisWindow
def full():
app = 0
app = QApplication(sys.argv)
# Create the window
w = AnalysisWindow()
w.CurrentWorkspace = Workspace()
# Load the workspace
#CurrentWorkspace.load("//cued-fs/users/general/tab53/ts-home/Documents/urop/Logger 2017/cued_datalogger/tests/test_workspace.wsp")
w.addon_widget.discover_addons(w.CurrentWorkspace.path + "addons/")
# Run the program
w.show()
sys.exit(app.exec_())
if __name__ == '__main__':
full()
| import sys
from PyQt5.QtWidgets import QApplication
from datalogger.bin.workspace import Workspace
from datalogger.analysis_window_testing import AnalysisWindow
def full():
app = 0
app = QApplication(sys.argv)
# Create the window
w = AnalysisWindow()
w.CurrentWorkspace = Workspace()
#w.CurrentWorkspace.path = "//cued-fs/users/general/tab53/ts-home/Documents/urop/Logger 2017/cued_datalogger/"
# Load the workspace
#CurrentWorkspace.load("//cued-fs/users/general/tab53/ts-home/Documents/urop/Logger 2017/cued_datalogger/tests/test_workspace.wsp")
#w.addon_widget.discover_addons(w.CurrentWorkspace.path + "addons/")
# Run the program
w.show()
sys.exit(app.exec_())
if __name__ == '__main__':
full()
| Comment out audo-discovery of addons | Comment out audo-discovery of addons
| Python | bsd-3-clause | torebutlin/cued_datalogger |
a5faeb16a599b4260f32741846607dd05f10bc53 | myproject/myproject/project_settings.py | myproject/myproject/project_settings.py | # Project Settings - Settings that don't exist in settings.py that you want to
# add (e.g. USE_THOUSAND_SEPARATOR, GRAPPELLI_ADMIN_TITLE, CELERYBEAT_SCHEDULER,
# CELERYD_PREFETCH_MULTIPLIER, etc.)
#USE_THOUSAND_SEPARATOR = True
#GRAPPELLI_ADMIN_TITLE = ''
#import djcelery
#djcelery.setup_loader()
#CELERYBEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler'
#CELERYD_PREFETCH_MULTIPLIER = 1
import sys
if 'runserver' in sys.argv:
from local_settings import *
else:
from production_settings import *
| # Project Settings - Settings that don't exist in settings.py that you want to
# add (e.g. USE_THOUSAND_SEPARATOR, GRAPPELLI_ADMIN_TITLE, CELERYBEAT_SCHEDULER,
# CELERYD_PREFETCH_MULTIPLIER, etc.)
#USE_THOUSAND_SEPARATOR = True
#GRAPPELLI_ADMIN_TITLE = ''
#import djcelery
#djcelery.setup_loader()
#CELERYBEAT_SCHEDULER = 'djcelery.schedulers.DatabaseScheduler'
#CELERYD_PREFETCH_MULTIPLIER = 1
import os
import sys
if 'PRODUCTION' in os.environ and os.environ['PRODUCTION'].lower() in [True, 'y', 'yes', '1',]:
from production_settings import *
elif 'runserver' in sys.argv:
from local_settings import *
else:
from production_settings import *
| Add ability to specify which settings are used via `export PRODUCTION=0' | Add ability to specify which settings are used via `export PRODUCTION=0'
| Python | unlicense | django-settings/django-settings |
794596fd6f55806eecca1c54e155533590108eee | openspending/lib/unicode_dict_reader.py | openspending/lib/unicode_dict_reader.py | # work around python2's csv.py's difficulty with utf8
# partly cribbed from http://stackoverflow.com/questions/5478659/python-module-like-csv-dictreader-with-full-utf8-support
import csv
class EmptyCSVError(Exception):
pass
class UnicodeDictReader(object):
def __init__(self, file_or_str, encoding='utf8', **kwargs):
self.encoding = encoding
self.reader = csv.DictReader(file_or_str, **kwargs)
if not self.reader.fieldnames:
raise EmptyCSVError("No fieldnames in CSV reader: empty file?")
self.keymap = dict((k, k.decode(encoding)) for k in self.reader.fieldnames)
def __iter__(self):
return (self._decode_row(row) for row in self.reader)
def _decode_row(self, row):
return dict(
(self.keymap[k], self._decode_str(v)) for k, v in row.iteritems()
)
def _decode_str(self, s):
if s is None:
return None
return s.decode(self.encoding)
| # work around python2's csv.py's difficulty with utf8
# partly cribbed from http://stackoverflow.com/questions/5478659/python-module-like-csv-dictreader-with-full-utf8-support
import csv
class EmptyCSVError(Exception):
pass
class UnicodeDictReader(object):
def __init__(self, fp, encoding='utf8', **kwargs):
self.encoding = encoding
self.reader = csv.DictReader(fp, **kwargs)
if not self.reader.fieldnames:
raise EmptyCSVError("No fieldnames in CSV reader: empty file?")
self.keymap = dict((k, k.decode(encoding)) for k in self.reader.fieldnames)
def __iter__(self):
return (self._decode_row(row) for row in self.reader)
def _decode_row(self, row):
return dict(
(self.keymap[k], self._decode_str(v)) for k, v in row.iteritems()
)
def _decode_str(self, s):
if s is None:
return None
return s.decode(self.encoding)
| Rename misleading parameter name: UnicodeDictReader should have the same interface as csv.DictReader | Rename misleading parameter name: UnicodeDictReader should have the same interface as csv.DictReader
| Python | agpl-3.0 | CivicVision/datahub,nathanhilbert/FPA_Core,USStateDept/FPA_Core,johnjohndoe/spendb,openspending/spendb,spendb/spendb,CivicVision/datahub,pudo/spendb,CivicVision/datahub,nathanhilbert/FPA_Core,pudo/spendb,openspending/spendb,johnjohndoe/spendb,spendb/spendb,nathanhilbert/FPA_Core,openspending/spendb,johnjohndoe/spendb,spendb/spendb,USStateDept/FPA_Core,pudo/spendb,USStateDept/FPA_Core |
207d4c71fbc40dd30c0099769d6f12fcb63f826e | tests/test_utils.py | tests/test_utils.py | import subprocess
from pytest import mark
from pytest_benchmark.utils import clonefunc, get_commit_info
f1 = lambda a: a
def f2(a):
return a
@mark.parametrize('f', [f1, f2])
def test_clonefunc(f):
assert clonefunc(f)(1) == f(1)
assert clonefunc(f)(1) == f(1)
def test_clonefunc_not_function():
assert clonefunc(1) == 1
@mark.parametrize('scm', ['git', 'hg'])
def test_get_commit_info(scm, testdir):
subprocess.check_call([scm, 'init', '.'])
if scm == 'git':
subprocess.check_call('git config user.email you@example.com'.split())
subprocess.check_call('git config user.name you'.split())
testdir.makepyfile('asdf')
subprocess.check_call([scm, 'add', 'test_get_commit_info.py'])
subprocess.check_call([scm, 'commit', '-m', 'asdf'])
out = get_commit_info()
assert out.get('dirty') == False
assert 'id' in out
testdir.makepyfile('sadf')
out = get_commit_info()
assert out.get('dirty') == True
assert 'id' in out
| import subprocess
from pytest import mark
from pytest_benchmark.utils import clonefunc, get_commit_info
pytest_plugins = 'pytester',
f1 = lambda a: a
def f2(a):
return a
@mark.parametrize('f', [f1, f2])
def test_clonefunc(f):
assert clonefunc(f)(1) == f(1)
assert clonefunc(f)(1) == f(1)
def test_clonefunc_not_function():
assert clonefunc(1) == 1
@mark.parametrize('scm', ['git', 'hg'])
def test_get_commit_info(scm, testdir):
subprocess.check_call([scm, 'init', '.'])
if scm == 'git':
subprocess.check_call('git config user.email you@example.com'.split())
subprocess.check_call('git config user.name you'.split())
else:
testdir.tmpdir.join('.hg', 'hgrc').write("""
[ui]
username = you <you@example.com>
""")
testdir.makepyfile('asdf')
subprocess.check_call([scm, 'add', 'test_get_commit_info.py'])
subprocess.check_call([scm, 'commit', '-m', 'asdf'])
out = get_commit_info()
assert out.get('dirty') == False
assert 'id' in out
testdir.makepyfile('sadf')
out = get_commit_info()
assert out.get('dirty') == True
assert 'id' in out
| Add missing username conf for mercurial. | Add missing username conf for mercurial.
| Python | bsd-2-clause | thedrow/pytest-benchmark,SectorLabs/pytest-benchmark,ionelmc/pytest-benchmark,aldanor/pytest-benchmark |
967cf8774a5033f310ca69e7ad86fc79b2628882 | infrastructure/aws/trigger-provision.py | infrastructure/aws/trigger-provision.py | # trigger-provision.py <indexer-provision.sh | web-server-provision.sh>
import boto3
from datetime import datetime, timedelta
import sys
import os.path
provisioners = sys.argv[1:]
ec2 = boto3.resource('ec2')
client = boto3.client('ec2')
script = ''
for provisioner in provisioners:
script += open(provisioner).read() + '\n'
user_data = '''#!/usr/bin/env bash
cat > ~ubuntu/provision.sh <<"FINAL"
{script}
FINAL
chmod +x ~ubuntu/provision.sh
sudo -i -u ubuntu ~ubuntu/provision.sh
'''.format(script=script)
# ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server-20160815 (ami-f701cb97)
image_id = 'ami-f701cb97'
launch_spec = {
'ImageId': image_id,
'KeyName': 'Main Key Pair',
'SecurityGroups': ['indexer'],
'UserData': user_data,
'InstanceType': 'c3.2xlarge',
'BlockDeviceMappings': []
}
client.run_instances(MinCount=1, MaxCount=1, **launch_spec)
| # trigger-provision.py <indexer-provision.sh | web-server-provision.sh>
import boto3
from datetime import datetime, timedelta
import sys
import os.path
provisioners = sys.argv[1:]
ec2 = boto3.resource('ec2')
client = boto3.client('ec2')
script = ''
for provisioner in provisioners:
script += open(provisioner).read() + '\n'
user_data = '''#!/usr/bin/env bash
cat > ~ubuntu/provision.sh <<"FINAL"
{script}
FINAL
chmod +x ~ubuntu/provision.sh
sudo -i -u ubuntu ~ubuntu/provision.sh
'''.format(script=script)
# ubuntu/images/hvm-ssd/ubuntu-xenial-16.04-amd64-server-20160815 (ami-f701cb97)
image_id = 'ami-f701cb97'
launch_spec = {
'ImageId': image_id,
'KeyName': 'Main Key Pair',
'SecurityGroups': ['indexer'],
'UserData': user_data,
'InstanceType': 'c3.2xlarge',
'BlockDeviceMappings': [],
'TagSpecifications': [{
'ResourceType': 'instance',
'Tags': [{
'Key': 'provisioner',
'Value': sys.argv[1],
}],
}],
}
client.run_instances(MinCount=1, MaxCount=1, **launch_spec)
| Tag provisioning instances to make them easier to identify | Tag provisioning instances to make them easier to identify
| Python | mpl-2.0 | bill-mccloskey/searchfox,bill-mccloskey/searchfox,bill-mccloskey/searchfox,bill-mccloskey/searchfox,bill-mccloskey/searchfox,bill-mccloskey/searchfox |
2f31af1ea22e0bfefe2042f8d2ba5ad6ce365116 | RG/drivers/s3/secrets.py | RG/drivers/s3/secrets.py | #!/usr/bin/python
SECRETS = {
"AWS_ACCESS_KEY_ID": "AKIAI2TRFQV2HZIHUD4A",
"AWS_SECRET_ACCESS_KEY": "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"
}
| #!/usr/bin/python
SECRETS = {
"AWS_ACCESS_KEY_ID": "XXX",
"AWS_SECRET_ACCESS_KEY": "XXX"
}
| Clear S3 API key from code | Clear S3 API key from code
| Python | apache-2.0 | jcnelson/syndicate,iychoi/syndicate,iychoi/syndicate,jcnelson/syndicate,iychoi/syndicate,jcnelson/syndicate,jcnelson/syndicate,iychoi/syndicate,iychoi/syndicate,jcnelson/syndicate,iychoi/syndicate,jcnelson/syndicate,jcnelson/syndicate,iychoi/syndicate,jcnelson/syndicate,iychoi/syndicate |
fbe7b34c575e30114c54587952c9aa919bc28d81 | south/introspection_plugins/__init__.py | south/introspection_plugins/__init__.py | # This module contains built-in introspector plugins for various common
# Django apps.
# These imports trigger the lower-down files
import south.introspection_plugins.geodjango
import south.introspection_plugins.django_tagging
import south.introspection_plugins.django_taggit
import south.introspection_plugins.django_objectpermissions
| # This module contains built-in introspector plugins for various common
# Django apps.
# These imports trigger the lower-down files
import south.introspection_plugins.geodjango
import south.introspection_plugins.django_tagging
import south.introspection_plugins.django_taggit
import south.introspection_plugins.django_objectpermissions
import south.introspection_plugins.annoying_autoonetoone
| Add import of django-annoying patch | Add import of django-annoying patch
| Python | apache-2.0 | theatlantic/django-south,theatlantic/django-south |
9ff63d002293da44871307960d5b439b5e6ba48f | app/commands/help.py | app/commands/help.py | def proc(command, message):
return {
"data": {
"status": "ok",
"html": """
<p>
Hi! I can control your Raspberry Pi. Send me the commands <b>in bold</b> to make me do stuff.<br><br>
📷 camera controls<br>
<b>camera photo</b>: I will take a photo and send it back<br>
💡 light controls<br>
<b>lights on <i>color</i></b>: I will shine with the specified <i>color</i> (red, green, blue)<br>
<b>lights off</b>: I will stop shining!<br><br>
⚙ subscription to events<br>
<b>event subscribe security</b>: if I detect motion, I'll send you a photo<br>
<b>event unsubscribe security</b>: I will stop sending photos<br>
</p>
"""
},
"response_required": True
}
| def proc(command, message):
return {
"data": {
"status": "ok",
"html": """
<p>
Hi! I can control your Raspberry Pi. Send me the commands <b>in bold</b> to make me do stuff.<br><br>
📷 camera controls<br>
<b>camera photo</b>: I will take a photo and send it back<br>
⚙ subscription to events<br>
<b>event subscribe security</b>: if I detect motion, I'll send you a photo<br>
<b>event unsubscribe security</b>: I will stop sending photos<br>
</p>
"""
},
"response_required": True
}
| Remove 'lights' command for a while | Remove 'lights' command for a while
| Python | mit | alwye/spark-pi,alwye/spark-pi |
0a6b43f2202cb63aad18c119d7d46916b4d54873 | examples/site-api.py | examples/site-api.py | #!/usr/bin/env python
# Sets up a basic site that can allow two browsers to connect to each
# other via WebRTC DataChannels, sending connection events via WebSockets.
from flask import Flask, send_from_directory
from flask_sockets import Sockets
import json
app = Flask(__name__)
sockets = Sockets(app)
channels = {}
@sockets.route('/channel/<name>')
def channel_socket(ws, name):
if name in channels:
channels[name].append(ws)
else:
channels[name] = [ws]
while not ws.closed:
message = ws.receive()
print "Got msg:", message
if message is None:
continue
for other_ws in channels[name]:
if ws is not other_ws:
other_ws.send(message)
channels[name].remove(ws)
for other_ws in channels[name]:
other_ws.send(json.dumps({"type": "client_disconnected", "msg": {}}))
@app.route('/static/<path:path>')
def send_static(path):
return app.send_from_directory('static', path)
@app.route('/index.html')
def serve_site():
return app.send_static_file("index.html")
if __name__ == "__main__":
from gevent import pywsgi
from geventwebsocket.handler import WebSocketHandler
server = pywsgi.WSGIServer(('', 5000), app, handler_class=WebSocketHandler)
server.serve_forever()
| #!/usr/bin/env python
# Sets up a basic site that can allow two browsers to connect to each
# other via WebRTC DataChannels, sending connection events via WebSockets.
from flask import Flask, send_from_directory
from flask_sockets import Sockets
import json
app = Flask(__name__)
sockets = Sockets(app)
channels = {}
@sockets.route('/channel/<name>')
def channel_socket(ws, name):
if name in channels:
channels[name].append(ws)
else:
channels[name] = [ws]
while not ws.closed:
message = ws.receive()
print "Got msg:", message
if message is None:
continue
for other_ws in channels[name]:
if ws is not other_ws:
other_ws.send(message)
channels[name].remove(ws)
for other_ws in channels[name]:
other_ws.send(json.dumps({"type": "client_disconnected", "msg": {}}))
@app.route('/static/<path:path>')
def send_static(path):
return app.send_from_directory('static', path)
@app.route('/')
def serve_site():
return app.send_static_file("index.html")
if __name__ == "__main__":
from gevent import pywsgi
from geventwebsocket.handler import WebSocketHandler
server = pywsgi.WSGIServer(('', 5000), app, handler_class=WebSocketHandler)
server.serve_forever()
| Make it easier to load the initial page | Make it easier to load the initial page
| Python | bsd-3-clause | chadnickbok/librtcdcpp,chadnickbok/librtcdcpp |
dae9d7d67aaf2ab8d39b232d243d860d9597bbd2 | django_excel_tools/exceptions.py | django_excel_tools/exceptions.py | class BaseExcelError(Exception):
def __init__(self, message):
super(BaseExcelError, self).__init__()
self.message = message
class ValidationError(BaseExcelError):
pass
class ColumnNotEqualError(BaseExcelError):
pass
class FieldNotExist(BaseExcelError):
pass
| class BaseExcelError(Exception):
def __init__(self, message):
super(BaseExcelError, self).__init__()
self.message = message
class ValidationError(BaseExcelError):
pass
class ColumnNotEqualError(BaseExcelError):
pass
class FieldNotExist(BaseExcelError):
pass
class SerializerConfigError(BaseExcelError):
pass
| Add error when serializer setup has error | Add error when serializer setup has error
| Python | mit | NorakGithub/django-excel-tools |
2e6f0934c67baf27cdf3930d48d6b733995e413f | benchmark/_interfaces.py | benchmark/_interfaces.py | # Copyright ClusterHQ Inc. See LICENSE file for details.
"""
Interfaces for the benchmarking results server.
"""
from zope.interface import Interface
class IBackend(Interface):
"""
A backend for storing and querying the results.
"""
def store(result):
"""
Store a single benchmarking result.
:param dict result: The result in the JSON compatible format.
:return: A Deferred that produces an identifier for the stored
result.
"""
def retrieve(id):
"""
Retrieve a previously stored result by its identifier.
:param id: The identifier of the result.
:return: A Deferred that fires with the result in the JSON format.
"""
def query(filter, limit):
"""
Retrieve previously stored results that match the given filter.
The returned results will have the same values as specified in the
filter for the fields that are specified in the filter.
:param dict filter: The filter in the JSON compatible format.
:param int limit: The number of the *latest* results to return.
:return: A Deferred that fires with a list of the results
in the JSON compatible format.
"""
def delete(id):
"""
Delete a previously stored result by its identifier.
:param id: The identifier of the result.
:return: A Deferred that fires when the result is removed.
"""
| # Copyright ClusterHQ Inc. See LICENSE file for details.
"""
Interfaces for the benchmarking results server.
"""
from zope.interface import Interface
class IBackend(Interface):
"""
A backend for storing and querying the results.
"""
def store(result):
"""
Store a single benchmarking result.
:param dict result: The result in the JSON compatible format.
:return: A Deferred that produces an identifier for the stored
result.
"""
def retrieve(id):
"""
Retrieve a previously stored result by its identifier.
:param id: The identifier of the result.
:return: A Deferred that fires with the result in the JSON format.
"""
def query(filter, limit):
"""
Retrieve previously stored results that match the given filter.
The returned results will have the same values as specified in the
filter for the fields that are specified in the filter.
:param dict filter: The filter in the JSON compatible format.
:param int limit: The number of the results to return. The
results are sorted by their timestamp in descending order.
:return: A Deferred that fires with a list of the results
in the JSON compatible format.
"""
def delete(id):
"""
Delete a previously stored result by its identifier.
:param id: The identifier of the result.
:return: A Deferred that fires when the result is removed.
"""
| Make the query docstring a bit clearer | Make the query docstring a bit clearer
| Python | apache-2.0 | ClusterHQ/benchmark-server,ClusterHQ/benchmark-server |
0039eefbfa546f24b3f10031e664341d60e4055c | ranger/commands.py | ranger/commands.py | from ranger.api.commands import Command
class fzf_select(Command):
"""
:fzf_select
Find a file using fzf.
With a prefix argument select only directories.
See: https://github.com/junegunn/fzf
"""
def execute(self):
import subprocess
import os.path
if self.quantifier:
# match only directories
command="fd -t d --hidden | fzf +m"
# command="find -L . \( -path '*/\.*' -o -fstype 'dev' -o -fstype 'proc' \) -prune \
# -o -type d -print 2> /dev/null | sed 1d | cut -b3- | fzf +m"
else:
# match files and directories
command="fd --hidden | fzf +m"
# command="find -L . \( -path '*/\.*' -o -fstype 'dev' -o -fstype 'proc' \) -prune \
# -o -print 2> /dev/null | sed 1d | cut -b3- | fzf +m"
fzf = self.fm.execute_command(command, universal_newlines=True, stdout=subprocess.PIPE)
stdout, stderr = fzf.communicate()
if fzf.returncode == 0:
fzf_file = os.path.abspath(stdout.rstrip('\n'))
if os.path.isdir(fzf_file):
self.fm.cd(fzf_file)
else:
self.fm.select_file(fzf_file)
| from ranger.api.commands import Command
class fzf_select(Command):
"""
:fzf_select
Find a file using fzf.
With a prefix argument select only directories.
See: https://github.com/junegunn/fzf
"""
def execute(self):
import subprocess
import os.path
if self.quantifier:
# match only directories
command="fd -t d --hidden | fzf +m --preview 'cat {}'"
# command="find -L . \( -path '*/\.*' -o -fstype 'dev' -o -fstype 'proc' \) -prune \
# -o -type d -print 2> /dev/null | sed 1d | cut -b3- | fzf +m"
else:
# match files and directories
command="fd --hidden | fzf +m --preview 'cat {}'"
# command="find -L . \( -path '*/\.*' -o -fstype 'dev' -o -fstype 'proc' \) -prune \
# -o -print 2> /dev/null | sed 1d | cut -b3- | fzf +m"
fzf = self.fm.execute_command(command, universal_newlines=True, stdout=subprocess.PIPE)
stdout, stderr = fzf.communicate()
if fzf.returncode == 0:
fzf_file = os.path.abspath(stdout.rstrip('\n'))
if os.path.isdir(fzf_file):
self.fm.cd(fzf_file)
else:
self.fm.select_file(fzf_file)
| Use previews in ranger fzf | Use previews in ranger fzf
| Python | mit | darthdeus/dotfiles,darthdeus/dotfiles,darthdeus/dotfiles,darthdeus/dotfiles |
f5e8bfaf5c4f7a2131fbe0ffd0f8d14a316b907e | camoco/Exceptions.py | camoco/Exceptions.py | # Exception abstract class
class CamocoError(Exception):
pass
class CamocoExistsError(CamocoError):
'''
You tried to create a camoco object which already exists
under the same name,type combination.
'''
def __init__(self,expr,message='',*args):
self.expr = expr
self.message = (
'You are trying to create a Camoco based object'
'That already exists' + message.format(*args)
)
class CamocoGeneNameError(CamocoError):
'''
Gene names must be beautiful snowflakes.
'''
def __init__(self,expr,message='',*args):
self.expr = expr
self.message = 'Gene names must be unique:' + message.format(args)
class CamocoAccessionNameError(CamocoError):
'''
Accession names must be Unique.
'''
def __init__(self,expr,message='',*args):
self.expr = expr
self.message = (
'Accession names must be unique:' + message.format(args)
)
class CamocoZeroWindowError(CamocoError):
def __init__(self,expr,message,*args):
self.expr = expr
self.message = (
'Operation requiring window, but window is 0:' + \
message.format(args)
)
| # Exception abstract class
class CamocoError(Exception):
pass
class CamocoExistsError(CamocoError):
'''
You tried to create a camoco object which already exists
under the same name,type combination.
'''
def __init__(self,expr,message='',*args):
self.expr = expr
self.message = (
'You are trying to create a Camoco based object'
'That already exists' + message.format(*args)
)
class CamocoGeneNameError(CamocoError):
'''
Gene names must be beautiful snowflakes.
'''
def __init__(self,expr,message='',*args):
self.expr = expr
self.message = 'Gene names must be unique:' + message.format(args)
class CamocoAccessionNameError(CamocoError):
'''
Accession names must be Unique.
'''
def __init__(self,expr,message='',*args):
self.expr = expr
self.message = (
'Accession names must be unique:' + message.format(args)
)
class CamocoZeroWindowError(CamocoError):
def __init__(self,expr,message,*args):
self.expr = expr
self.message = (
'Operation requiring window, but window is 0:' + \
message.format(args)
)
class CamocoInteractive(CamocoError):
def __init__(self,expr=None,message='',*args):
self.expr = expr
self.message = 'Camoco interactive ipython session.'
| Add exception for cli command line to run interactively. | Add exception for cli command line to run interactively.
| Python | mit | schae234/Camoco,schae234/Camoco |
5da928fd9b08aeb0028b71535413159da18393b4 | comics/sets/forms.py | comics/sets/forms.py | import datetime
from django import forms
from django.template.defaultfilters import slugify
from comics.core.models import Comic
from comics.sets.models import Set
class NewSetForm(forms.ModelForm):
class Meta:
model = Set
fields = ('name',)
def save(self, commit=True):
set = super(NewSetForm, self).save(commit=False)
set.name = slugify(set.name)
set.last_modified = datetime.datetime.now()
set.last_loaded = datetime.datetime.now()
if commit:
set.save()
return set
class EditSetForm(forms.ModelForm):
comics = forms.ModelMultipleChoiceField(
Comic.objects.all(),
required=False,
widget=forms.CheckboxSelectMultiple)
add_new_comics = forms.BooleanField(
label='Automatically add new comics to the set', required=False)
hide_empty_comics = forms.BooleanField(
label='Hide comics without matching releases from view', required=False)
class Meta:
model = Set
fields = ('comics', 'add_new_comics', 'hide_empty_comics')
def save(self, commit=True):
comics_set = super(EditSetForm, self).save(commit=False)
comics_set.last_modified = datetime.datetime.now()
if commit:
comics_set.save()
self.save_m2m()
return comics_set
| import datetime
from django import forms
from django.template.defaultfilters import slugify
from comics.core.models import Comic
from comics.sets.models import Set
class NewSetForm(forms.ModelForm):
class Meta:
model = Set
fields = ('name',)
def save(self, commit=True):
set = super(NewSetForm, self).save(commit=False)
set.name = slugify(set.name)
set.last_modified = datetime.datetime.now()
set.last_loaded = datetime.datetime.now()
if commit:
set.save()
return set
class EditSetForm(forms.ModelForm):
comics = forms.ModelMultipleChoiceField(
Comic.objects.filter(active=True),
required=False,
widget=forms.CheckboxSelectMultiple)
add_new_comics = forms.BooleanField(
label='Automatically add new comics to the set', required=False)
hide_empty_comics = forms.BooleanField(
label='Hide comics without matching releases from view', required=False)
class Meta:
model = Set
fields = ('comics', 'add_new_comics', 'hide_empty_comics')
def save(self, commit=True):
comics_set = super(EditSetForm, self).save(commit=False)
comics_set.last_modified = datetime.datetime.now()
if commit:
comics_set.save()
self.save_m2m()
return comics_set
| Exclude inactive comics from sets editing, effectively throwing them out of the set when saved | Exclude inactive comics from sets editing, effectively throwing them out of the set when saved
| Python | agpl-3.0 | datagutten/comics,klette/comics,jodal/comics,datagutten/comics,jodal/comics,klette/comics,jodal/comics,datagutten/comics,jodal/comics,klette/comics,datagutten/comics |
5425e10a39ce66d3c50e730d1e7b7878e8e88eb0 | dmoj/executors/PHP7.py | dmoj/executors/PHP7.py | from .php_executor import PHPExecutor
from dmoj.judgeenv import env
class Executor(PHPExecutor):
name = 'PHP7'
command = env['runtime'].get('php7')
fs = ['.*\.so', '/etc/localtime$', '.*\.ini$']
initialize = Executor.initialize
| from .php_executor import PHPExecutor
from dmoj.judgeenv import env
class Executor(PHPExecutor):
name = 'PHP7'
command = env['runtime'].get('php7')
fs = ['.*\.so', '/etc/localtime$', '.*\.ini$', '/dev/urandom$']
initialize = Executor.initialize
| Allow /dev/urandom for PHP 7 | Allow /dev/urandom for PHP 7 | Python | agpl-3.0 | DMOJ/judge,DMOJ/judge,DMOJ/judge |
ec8d7b035617f9239a0a52be346d8611cf77cb6f | integration-tests/features/src/utils.py | integration-tests/features/src/utils.py | """Unsorted utility functions used in integration tests."""
import requests
def download_file_from_url(url):
"""Download file from the given URL and do basic check of response."""
assert url
response = requests.get(url)
assert response.status_code == 200
assert response.text is not None
return response.text
def split_comma_separated_list(l):
"""Split the list into elements separated by commas."""
return [i.strip() for i in l.split(',')]
| """Unsorted utility functions used in integration tests."""
import requests
import subprocess
def download_file_from_url(url):
"""Download file from the given URL and do basic check of response."""
assert url
response = requests.get(url)
assert response.status_code == 200
assert response.text is not None
return response.text
def split_comma_separated_list(l):
"""Split the list into elements separated by commas."""
return [i.strip() for i in l.split(',')]
def oc_login(url, username, password, tls_verify=True):
"""Wrapper around `oc login`.
:param url: str, OpenShift URL
:param username: str, username
:param password: str, password
:param tls_verify: bool, verify server's certificate?; default: True
:return: None on success, raises `subprocess.CalledProcessError` on error
"""
command = ['oc', 'login', url, '--username', username, '--password', password]
if not tls_verify:
command.extend(['--insecure-skip-tls-verify=true'])
try:
subprocess.check_call(command)
except subprocess.CalledProcessError as e:
# replace password with '***' so somebody will not accidentally leak it in CI logs
e.cmd = [x if x != password else '***' for x in e.cmd]
raise e
def oc_delete_pods(selector, namespace=None):
"""Wrapper around `oc delete`.
Selector determines which pods will be deleted.
More on selectors:
https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors
Note k8s/OpenShift will immediately restart deleted pods,
to match desired number of replicas for given deployment.
The expectation is that the user is already logged in
and has permissions to delete pods.
Example usage:
oc_delete_pods('service=bayesian-pgbouncer'
:param selector: str, selector identifying pods that will be deleted
:param namespace: str, namespace in which `oc delete` command should be executed,
default: currently selected namespace
:return: None on success, raises `subprocess.CalledProcessError` on error
"""
command = ['oc', 'delete', 'pods', '--selector=', selector]
if namespace:
command.extend(['--namespace', namespace])
subprocess.check_call(command)
| Add few oc wrappers for future resiliency testing | Add few oc wrappers for future resiliency testing
Not used anywhere yet.
| Python | apache-2.0 | jpopelka/fabric8-analytics-common,tisnik/fabric8-analytics-common,jpopelka/fabric8-analytics-common,tisnik/fabric8-analytics-common,tisnik/fabric8-analytics-common,jpopelka/fabric8-analytics-common |
65266813c6438eeb67002e17dd4cd70a00e84b5d | meta-iotqa/lib/oeqa/runtime/boottime.py | meta-iotqa/lib/oeqa/runtime/boottime.py | #[PROTEXCAT]
#\License: ALL RIGHTS RESERVED
"""System boot time"""
import os
from oeqa.oetest import oeRuntimeTest
from oeqa.runtime.helper import collect_pnp_log
class BootTimeTest(oeRuntimeTest):
def _setup(self):
(status,output) = self.target.copy_to(os.path.join(os.path.dirname(__file__), 'files','systemd-analyze'),"/tmp/systemd-analyze")
self.assertEqual(status, 0, msg="systemd-analyze could not be copied. Output: %s" % output)
(status,output) = self.target.run(" ls -la /tmp/systemd-analyze")
self.assertEqual(status, 0, msg="Failed to find systemd-analyze command")
def test_boot_time(self):
self._setup()
filename=os.path.basename(__file__)
casename=os.path.splitext(filename)[0]
(status,output) = self.target.run("/tmp/systemd-analyze time | awk -F '=' '{print $2}'")
collect_pnp_log(casename, output)
print "\n%s:%s\n" %(casename, output)
self.assertEqual(status, 0, output)
| #[PROTEXCAT]
#\License: ALL RIGHTS RESERVED
"""System boot time"""
import os
from oeqa.oetest import oeRuntimeTest
from oeqa.runtime.helper import collect_pnp_log
class BootTimeTest(oeRuntimeTest):
def _setup(self):
(status, output) = self.target.copy_to(
os.path.join(os.path.dirname(__file__), 'files',
'systemd-analyze'), "/tmp/systemd-analyze")
self.assertEqual(
status,
0,
msg="systemd-analyze could not be copied. Output: %s" %
output)
(status, output) = self.target.run(" ls -la /tmp/systemd-analyze")
self.assertEqual(
status,
0,
msg="Failed to find systemd-analyze command")
def test_boot_time(self):
self._setup()
filename = os.path.basename(__file__)
casename = os.path.splitext(filename)[0]
(status, output) = self.target.run("/tmp/systemd-analyze time"
" | awk -F '=' '{print $2}'")
collect_pnp_log(casename, output)
print "\n%s:%s\n" % (casename, output)
self.assertEqual(status, 0, output)
| Clean the code with autopen8 | Clean the code with autopen8
| Python | mit | daweiwu/meta-iotqa-1,ostroproject/meta-iotqa,ostroproject/meta-iotqa,ostroproject/meta-iotqa,daweiwu/meta-iotqa-1,wanghongjuan/meta-iotqa-1,wanghongjuan/meta-iotqa-1,ostroproject/meta-iotqa,daweiwu/meta-iotqa-1,daweiwu/meta-iotqa-1,wanghongjuan/meta-iotqa-1,wanghongjuan/meta-iotqa-1,ostroproject/meta-iotqa,daweiwu/meta-iotqa-1,wanghongjuan/meta-iotqa-1 |
860e16f506d0a601540847fe21e617d8f7fbf882 | malware/pickle_tool.py | malware/pickle_tool.py | import os
import cPickle as pickle
import simplejson as json
def update_pickle(new_cache):
# print('Updated pickle')
pickle.dump(new_cache, open('url_cache.pkl', 'wb'), 2)
def check_pickle():
# print('Checking in pickle')
if not os.path.isfile('url_cache.pkl'):
malicious_url = {}
pickle.dump(malicious_url, open('url_cache.pkl', 'wb'), 2)
cache = pickle.load(open('url_cache.pkl', 'rb'))
return cache
def update_json(data):
with open("url_cache.json", "wb") as fp:
try:
json.dump(data, fp)
except:
print "UnicodeDecodeError: 'utf8' codec can't decode byte 0xb8"
def check_json():
if not os.path.isfile('url_cache.json'):
init = {}
with open("url_cache.json", "wb") as fp:
json.dump(init, fp)
with open("url_cache.json", "rb") as fp:
# cache = json.load(fp)
cache = json.load(fp, "ISO-8859-1")
return cache
| import os
import cPickle as pickle
import simplejson as json
def update_pickle(new_cache):
# print('Updated pickle')
pickle.dump(new_cache, open('url_cache.pkl', 'wb'), 2)
def check_pickle():
# print('Checking in pickle')
if not os.path.isfile('url_cache.pkl'):
malicious_url = {}
pickle.dump(malicious_url, open('url_cache.pkl', 'wb'), 2)
cache = pickle.load(open('url_cache.pkl', 'rb'))
return cache
def update_json(data):
with open("url_cache.json", "wb") as fp:
try:
json.dump(data, fp)
except:
print "UnicodeDecodeError: 'utf8' codec can't decode byte 0xb8"
def check_json():
if not os.path.isfile('url_cache.json'):
init = {}
with open("url_cache.json", "wb") as fp:
json.dump(init, fp)
with open("url_cache.json", "rb") as fp:
# cache = json.load(fp)
cache = json.load(fp, "ISO-8859-1")
return cache
def pickle2json(pkl):
if os.path.isfile(pkl):
cache = pickle.load(open(pkl, 'rb'))
with open('pkl2json.json', 'wb') as fp:
json.dump(cache, fp)
else:
print "No such file"
| Add pickle convert to json method | Add pickle convert to json method
| Python | apache-2.0 | John-Lin/malware,John-Lin/malware |
45c4c1f627f224f36c24acebbec43a17a5c59fcb | nib/plugins/lesscss.py | nib/plugins/lesscss.py | from __future__ import absolute_import, division, print_function, unicode_literals
from os import path
import sh
from nib import Processor, resource
@resource('.less')
class LessCSSProcessor(Processor):
def resource(self, resource):
filepath = path.join(self.options['resource_path'],
resource.path + resource.extension)
resource.content = bytearray(str(sh.lessc(filepath)), 'utf-8')
resource.extension = '.css'
return resource
| from __future__ import absolute_import, division, print_function, unicode_literals
from os import path
import sh
from nib import Processor, resource
@resource('.less')
class LessCSSProcessor(Processor):
def resource(self, resource):
filepath = path.join(self.options['resource_path'],
resource.path + resource.extension)
print("Processing: ", filepath)
resource.content = bytearray(str(sh.lessc(filepath)), 'utf-8')
resource.extension = '.css'
return resource
| Print out file being processed, need to do to other modules, add -v flag | Print out file being processed, need to do to other modules, add -v flag
| Python | mit | jreese/nib |
3266ecee8f9a6601d8678a91e8923c6d7137adb3 | oggm/tests/conftest.py | oggm/tests/conftest.py | import pytest
import logging
import multiprocessing as mp
from oggm import cfg, utils
import pickle
logger = logging.getLogger(__name__)
def pytest_configure(config):
if config.pluginmanager.hasplugin('xdist'):
try:
from ilock import ILock
utils.lock = ILock("oggm_xdist_download_lock")
logger.info("ilock locking setup successfully for xdist tests")
except:
logger.warning("could not setup ilock locking for distributed tests")
| import pytest
import logging
import getpass
from oggm import cfg, utils
logger = logging.getLogger(__name__)
def pytest_configure(config):
if config.pluginmanager.hasplugin('xdist'):
try:
from ilock import ILock
utils.lock = ILock("oggm_xdist_download_lock_" + getpass.getuser())
logger.info("ilock locking setup successfully for xdist tests")
except:
logger.warning("could not setup ilock locking for distributed tests")
| Make ilock lock name user specific | Make ilock lock name user specific
| Python | bsd-3-clause | OGGM/oggm,OGGM/oggm,bearecinos/oggm,bearecinos/oggm,juliaeis/oggm,TimoRoth/oggm,anoukvlug/oggm,TimoRoth/oggm,juliaeis/oggm,anoukvlug/oggm |
0ae9b232b82285f2fa275b8ffa5dced6b9377b0e | keyring/credentials.py | keyring/credentials.py | import os
import abc
class Credential(metaclass=abc.ABCMeta):
"""Abstract class to manage credentials"""
@abc.abstractproperty
def username(self):
return None
@abc.abstractproperty
def password(self):
return None
class SimpleCredential(Credential):
"""Simple credentials implementation"""
def __init__(self, username, password):
self._username = username
self._password = password
@property
def username(self):
return self._username
@property
def password(self):
return self._password
class EnvironCredential(Credential):
"""Source credentials from environment variables.
Actual sourcing is deferred until requested.
"""
def __init__(self, user_env_var, pwd_env_var):
self.user_env_var = user_env_var
self.pwd_env_var = pwd_env_var
def _get_env(self, env_var):
"""Helper to read an environment variable"""
value = os.environ.get(env_var)
if not value:
raise ValueError('Missing environment variable:%s' % env_var)
return value
@property
def username(self):
return self._get_env(self.user_env_var)
@property
def password(self):
return self._get_env(self.pwd_env_var)
| import os
import abc
class Credential(metaclass=abc.ABCMeta):
"""Abstract class to manage credentials"""
@abc.abstractproperty
def username(self):
return None
@abc.abstractproperty
def password(self):
return None
class SimpleCredential(Credential):
"""Simple credentials implementation"""
def __init__(self, username, password):
self._username = username
self._password = password
@property
def username(self):
return self._username
@property
def password(self):
return self._password
class EnvironCredential(Credential):
"""Source credentials from environment variables.
Actual sourcing is deferred until requested.
"""
def __init__(self, user_env_var, pwd_env_var):
self.user_env_var = user_env_var
self.pwd_env_var = pwd_env_var
def __eq__(self, other: object) -> bool:
if not isinstance(other, EnvironCredential):
return NotImplemented
return (
self.user_env_var == other.user_env_var
and self.pwd_env_var == other.pwd_env_var
)
def _get_env(self, env_var):
"""Helper to read an environment variable"""
value = os.environ.get(env_var)
if not value:
raise ValueError('Missing environment variable:%s' % env_var)
return value
@property
def username(self):
return self._get_env(self.user_env_var)
@property
def password(self):
return self._get_env(self.pwd_env_var)
| Add equality operator to EnvironCredential | Add equality operator to EnvironCredential
Equality operator is useful for testing EnvironCredential
| Python | mit | jaraco/keyring |
e8c180e65dda3422ab472a6580183c715ef325c3 | easyium/decorator.py | easyium/decorator.py | __author__ = 'karl.gong'
from .exceptions import UnsupportedOperationException
def SupportedBy(*web_driver_types):
def handle_func(func):
def handle_args(*args, **kwargs):
wd_types = []
for wd_type in web_driver_types:
if isinstance(wd_type, list):
wd_types += wd_type
else:
wd_types += [wd_type]
current_web_driver_type = args[0].get_web_driver_type()
if current_web_driver_type not in wd_types:
raise UnsupportedOperationException(
"This operation is not supported by web driver [%s]." % current_web_driver_type)
return func(*args, **kwargs)
return handle_args
return handle_func
| __author__ = 'karl.gong'
from .exceptions import UnsupportedOperationException
def SupportedBy(*web_driver_types):
def handle_func(func):
def handle_args(*args, **kwargs):
wd_types = []
for wd_type in web_driver_types:
if isinstance(wd_type, list):
wd_types += wd_type
else:
wd_types += [wd_type]
current_web_driver_type = args[0].get_web_driver_type()
if current_web_driver_type not in wd_types:
raise UnsupportedOperationException(
"Operation [%s] is not supported by web driver [%s]." % (func.__name__, current_web_driver_type))
return func(*args, **kwargs)
return handle_args
return handle_func
| Update error message of UnsupportedOperationException. | Update error message of UnsupportedOperationException.
| Python | apache-2.0 | KarlGong/easyium-python,KarlGong/easyium |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.