commit
stringlengths 40
40
| old_file
stringlengths 4
236
| new_file
stringlengths 4
236
| old_contents
stringlengths 1
3.26k
| new_contents
stringlengths 16
4.43k
| subject
stringlengths 16
624
| message
stringlengths 17
3.29k
| lang
stringclasses 5
values | license
stringclasses 13
values | repos
stringlengths 5
91.5k
|
|---|---|---|---|---|---|---|---|---|---|
46d64030c8724f016233703922cbc619eef2c179
|
examples/push_pull/architect.py
|
examples/push_pull/architect.py
|
from functions import print_message
from osbrain.core import Proxy
pusher = Proxy('Pusher')
puller = Proxy('Puller')
addr = pusher.bind('push')
puller.connect(addr, print_message)
puller.run()
pusher.send(addr, 'Hello world!')
|
from functions import print_message
from osbrain.core import Proxy
pusher = Proxy('Pusher')
puller = Proxy('Puller')
addr = pusher.bind('PUSH', alias='push')
puller.connect(addr, handler=print_message)
puller.run()
pusher.send('push', 'Hello, world!')
|
Update push_pull example to work with latest changes
|
Update push_pull example to work with latest changes
|
Python
|
apache-2.0
|
opensistemas-hub/osbrain
|
39da25f8d221605012c629b3d08478cfe858df36
|
poradnia/cases/migrations/0024_auto_20150809_2148.py
|
poradnia/cases/migrations/0024_auto_20150809_2148.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.db.models import Count
def delete_empty(apps, schema_editor):
# We get the model from the versioned app registry;
# if we directly import it, it'll be the wrong version
Case = apps.get_model("cases", "Case")
pks = Case.objects.annotate(record_count=Count('record')).filter(record_count=0).values('id')
Case.objects.filter(pk__in=pks).update(status='2')
class Migration(migrations.Migration):
dependencies = [
('cases', '0023_auto_20150809_2131'),
('records', '0006_auto_20150503_1741'),
]
operations = [
migrations.RunPython(delete_empty)
]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.db.models import Count
def delete_empty(apps, schema_editor):
# We get the model from the versioned app registry;
# if we directly import it, it'll be the wrong version
Case = apps.get_model("cases", "Case")
pks = [x[0] for x in Case.objects.annotate(record_count=Count('record')).filter(record_count=0).values_list('id')]
Case.objects.filter(pk__in=pks).update(status='2')
class Migration(migrations.Migration):
dependencies = [
('cases', '0023_auto_20150809_2131'),
('records', '0006_auto_20150503_1741'),
]
operations = [
migrations.RunPython(delete_empty)
]
|
Fix case migrations for MySQL
|
Fix case migrations for MySQL
|
Python
|
mit
|
watchdogpolska/poradnia,rwakulszowa/poradnia,watchdogpolska/poradnia,rwakulszowa/poradnia,watchdogpolska/poradnia.siecobywatelska.pl,watchdogpolska/poradnia,rwakulszowa/poradnia,watchdogpolska/poradnia,watchdogpolska/poradnia.siecobywatelska.pl,rwakulszowa/poradnia,watchdogpolska/poradnia.siecobywatelska.pl
|
286cba2b3e7cf323835acd07f1e3bb510d74bcb2
|
biopsy/tests.py
|
biopsy/tests.py
|
# -*- coding: utf-8 -*-
from django.test import TestCase
from django.db import models
from biopsy.models import Biopsy
class BiopsyTest(TestCase):
def biopy_test(self):
biopsy = Biopsy(
clinical_information= "clinica",
macroscopic= "macroscopia",
microscopic= "microscopia",
conclusion= "conclusao",
notes= "nota",
footer= "legenda"
)
biopsy.save()
self.assertEquals("clinica",biopsy.clinical_information)
self.assertEquals("macroscopia",biopsy.macroscopic)
self.assertEquals("microscopia",biopsy.microscopic)
self.assertEquals("conclusao",biopsy.conclusion)
self.assertEquals("nota",biopsy.notes)
self.assertEquals("legenda",biopsy.footer)
|
# -*- coding: utf-8 -*-
from django.test import TestCase
from django.db import models
from biopsy.models import Biopsy
class BiopsyTest(TestCase):
def biopy_test(self):
biopsy = Biopsy(
clinical_information= "clinica",
macroscopic= "macroscopia",
microscopic= "microscopia",
conclusion= "conclusao",
notes= "nota",
footer= "legenda",
status = "status",
exam = "exame"
)
biopsy.save()
self.assertEquals("clinica",biopsy.clinical_information)
self.assertEquals("macroscopia",biopsy.macroscopic)
self.assertEquals("microscopia",biopsy.microscopic)
self.assertEquals("conclusao",biopsy.conclusion)
self.assertEquals("nota",biopsy.notes)
self.assertEquals("legenda",biopsy.footer)
self.assertEquals("status",biopsy.status)
self.assertEquals("exame",biopsy.exam)
|
Add status and exam in test Biopsy
|
Add status and exam in test Biopsy
|
Python
|
mit
|
msfernandes/anato-hub,msfernandes/anato-hub,msfernandes/anato-hub,msfernandes/anato-hub
|
d0ec3ee9b974fb6956c32e8dfdd6d20ea4da7cff
|
pwndbg/inthook.py
|
pwndbg/inthook.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This hook is necessary for compatibility with Python2.7 versions of GDB
since they cannot directly cast to integer a gdb.Value object that is
not already an integer type.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import gdb
import pwndbg.typeinfo
if sys.version_info < (3,0):
import __builtin__ as builtins
_int = builtins.int
# We need this class to get isinstance(7, xint) to return True
class IsAnInt(type):
def __instancecheck__(self, other):
return isinstance(other, _int)
class xint(builtins.int):
__metaclass__ = IsAnInt
def __new__(cls, value, *a, **kw):
if isinstance(value, gdb.Value):
if pwndbg.typeinfo.is_pointer(value):
value = value.cast(pwndbg.typeinfo.ulong)
else:
value = value.cast(pwndbg.typeinfo.long)
return _int(_int(value, *a, **kw))
builtins.int = xint
globals()['int'] = xint
# Additionally, we need to compensate for Python2
else:
import builtins
builtins.long = int
globals()['long'] = int
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This hook is necessary for compatibility with Python2.7 versions of GDB
since they cannot directly cast to integer a gdb.Value object that is
not already an integer type.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
import gdb
import pwndbg.typeinfo
if sys.version_info < (3,0):
import __builtin__ as builtins
else:
import builtins
_int = builtins.int
# We need this class to get isinstance(7, xint) to return True
class IsAnInt(type):
def __instancecheck__(self, other):
return isinstance(other, _int)
class xint(builtins.int):
__metaclass__ = IsAnInt
def __new__(cls, value, *a, **kw):
if isinstance(value, gdb.Value):
if pwndbg.typeinfo.is_pointer(value):
value = value.cast(pwndbg.typeinfo.ulong)
else:
value = value.cast(pwndbg.typeinfo.long)
return _int(_int(value, *a, **kw))
builtins.int = xint
globals()['int'] = xint
if sys.version_info >= (3,0):
builtins.long = xint
globals()['long'] = xint
|
Add int hook to Python3
|
Add int hook to Python3
Fixes #120
|
Python
|
mit
|
pwndbg/pwndbg,cebrusfs/217gdb,cebrusfs/217gdb,pwndbg/pwndbg,cebrusfs/217gdb,chubbymaggie/pwndbg,disconnect3d/pwndbg,disconnect3d/pwndbg,pwndbg/pwndbg,0xddaa/pwndbg,zachriggle/pwndbg,0xddaa/pwndbg,disconnect3d/pwndbg,anthraxx/pwndbg,chubbymaggie/pwndbg,0xddaa/pwndbg,cebrusfs/217gdb,anthraxx/pwndbg,zachriggle/pwndbg,anthraxx/pwndbg,pwndbg/pwndbg,anthraxx/pwndbg
|
c8a0279d421c2837e4f7e4ef1eaf2cc9cb94210c
|
scripts/mkstdlibs.py
|
scripts/mkstdlibs.py
|
#!/usr/bin/env python3
from sphinx.ext.intersphinx import fetch_inventory
URL = "https://docs.python.org/{}/objects.inv"
PATH = "isort/stdlibs/py{}.py"
VERSIONS = [("2", "7"), ("3", "5"), ("3", "6"), ("3", "7"), ("3", "8")]
DOCSTRING = """
File contains the standard library of Python {}.
DO NOT EDIT. If the standard library changes, a new list should be created
using the mkstdlibs.py script.
"""
class FakeConfig:
intersphinx_timeout = None
tls_verify = True
class FakeApp:
srcdir = ""
config = FakeConfig()
for version_info in VERSIONS:
version = ".".join(version_info)
url = URL.format(version)
invdata = fetch_inventory(FakeApp(), "", url)
modules = set()
for module in invdata["py:module"]:
root, *_ = module.split(".")
if root not in ["__future__", "__main__"]:
modules.add(root)
path = PATH.format("".join(version_info))
with open(path, "w") as stdlib_file:
docstring = DOCSTRING.format(version)
stdlib_file.write(f'"""{docstring}"""\n\n')
stdlib_file.write("stdlib = {\n")
for module in sorted(modules):
stdlib_file.write(f' "{module}",\n')
stdlib_file.write("}\n")
|
#!/usr/bin/env python3
from sphinx.ext.intersphinx import fetch_inventory
URL = "https://docs.python.org/{}/objects.inv"
PATH = "isort/stdlibs/py{}.py"
VERSIONS = [("2", "7"), ("3", "5"), ("3", "6"), ("3", "7"), ("3", "8"), ("3", "9")]
DOCSTRING = """
File contains the standard library of Python {}.
DO NOT EDIT. If the standard library changes, a new list should be created
using the mkstdlibs.py script.
"""
class FakeConfig:
intersphinx_timeout = None
tls_verify = True
user_agent = ""
class FakeApp:
srcdir = ""
config = FakeConfig()
for version_info in VERSIONS:
version = ".".join(version_info)
url = URL.format(version)
invdata = fetch_inventory(FakeApp(), "", url)
modules = set()
for module in invdata["py:module"]:
root, *_ = module.split(".")
if root not in ["__future__", "__main__"]:
modules.add(root)
path = PATH.format("".join(version_info))
with open(path, "w") as stdlib_file:
docstring = DOCSTRING.format(version)
stdlib_file.write(f'"""{docstring}"""\n\n')
stdlib_file.write("stdlib = {\n")
for module in sorted(modules):
stdlib_file.write(f' "{module}",\n')
stdlib_file.write("}\n")
|
Update script to include empty user agent
|
Update script to include empty user agent
|
Python
|
mit
|
PyCQA/isort,PyCQA/isort
|
4160fd07d428e26c4de6aee280d948f5044f2c9e
|
kimochi/scripts/initializedb.py
|
kimochi/scripts/initializedb.py
|
import os
import sys
import transaction
from sqlalchemy import engine_from_config
from pyramid.paster import (
get_appsettings,
setup_logging,
)
from pyramid.scripts.common import parse_vars
from ..models import (
DBSession,
Base,
User,
Site,
SiteAPIKey,
)
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri> [var=value]\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def main(argv=sys.argv):
if len(argv) < 2:
usage(argv)
config_uri = argv[1]
options = parse_vars(argv[2:])
setup_logging(config_uri)
settings = get_appsettings(config_uri, options=options)
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
Base.metadata.create_all(engine)
with transaction.manager:
DBSession.add(User(email='test@example.com', password='test', admin=True))
DBSession.add(Site(name='asd', key='80d621df066348e5938a469730ae0cab'))
DBSession.add(SiteAPIKey(site_id=1, key='GIKfxIcIHPbM0uX9PrQ1To29Pb2on0pa'))
|
import os
import sys
import transaction
from sqlalchemy import engine_from_config
from pyramid.paster import (
get_appsettings,
setup_logging,
)
from pyramid.scripts.common import parse_vars
from ..models import (
DBSession,
Base,
User,
Site,
SiteAPIKey,
)
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri> [var=value]\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def main(argv=sys.argv):
if len(argv) < 2:
usage(argv)
config_uri = argv[1]
options = parse_vars(argv[2:])
setup_logging(config_uri)
settings = get_appsettings(config_uri, options=options)
engine = engine_from_config(settings, 'sqlalchemy.')
DBSession.configure(bind=engine)
Base.metadata.create_all(engine)
with transaction.manager:
user = User(email='test@example.com', password='test', admin=True)
DBSession.add(user)
site = Site(name='asd', key='80d621df066348e5938a469730ae0cab')
DBSession.add(site)
DBSession.add(SiteAPIKey(site_id=1, key='GIKfxIcIHPbM0uX9PrQ1To29Pb2on0pa'))
site.users.append(user)
|
Make sure we add the user to the site as well
|
Make sure we add the user to the site as well
|
Python
|
mit
|
matslindh/kimochi,matslindh/kimochi
|
525bfce19f593cb598669cdf2eec46747a4b6952
|
goodreadsapi.py
|
goodreadsapi.py
|
#!/usr/bin/env python
import re
from xml.parsers.expat import ExpatError
import requests
import xmltodict
from settings import goodreads_api_key
def get_goodreads_ids(comment_msg):
# receives goodreads url
# returns the id using regex
regex = r'goodreads.com/book/show/(\d+)'
return set(re.findall(regex, comment_msg))
def get_book_details_by_id(goodreads_id):
api_url = 'http://goodreads.com/book/show/{0}?format=xml&key={1}'
r = requests.get(api_url.format(goodreads_id, goodreads_api_key))
try:
book_data = xmltodict.parse(r.content)['GoodreadsResponse']['book']
except (TypeError, KeyError, ExpatError):
return False
keys = ['title', 'average_rating', 'ratings_count', 'description',
'num_pages']
book = {}
for k in keys:
book[k] = book_data[k]
if type(book_data['authors']['author']) == list:
authors = [author['name'] for author in book_data['authors']['author']]
authors = ', '.join(authors)
else:
authors = book_data['authors']['author']['name']
book['authors'] = authors
return book
|
#!/usr/bin/env python
import re
from xml.parsers.expat import ExpatError
import requests
import xmltodict
from settings import goodreads_api_key
def get_goodreads_ids(comment_msg):
# receives goodreads url
# returns the id using regex
regex = r'goodreads.com/book/show/(\d+)'
return set(re.findall(regex, comment_msg))
def get_book_details_by_id(goodreads_id):
api_url = 'http://goodreads.com/book/show/{0}?format=xml&key={1}'
r = requests.get(api_url.format(goodreads_id, goodreads_api_key))
try:
book_data = xmltodict.parse(r.content)['GoodreadsResponse']['book']
except (TypeError, KeyError, ExpatError):
return False
keys = ['title', 'average_rating', 'ratings_count', 'description',
'num_pages', 'publication_year']
book = {}
for k in keys:
book[k] = book_data.get(k)
if type(book_data['authors']['author']) == list:
authors = [author['name'] for author in book_data['authors']['author']]
authors = ', '.join(authors)
else:
authors = book_data['authors']['author']['name']
book['authors'] = authors
return book
|
Add `publication_year` to return data
|
Add `publication_year` to return data
|
Python
|
mit
|
avinassh/Reddit-GoodReads-Bot
|
4c2dd9dd6dc0f9ff66a36a114c90897dab8da7e5
|
goodreadsapi.py
|
goodreadsapi.py
|
#!/usr/bin/env python
import re
import requests
import xmltodict
from settings import goodreads_api_key
def get_goodreads_ids(comment_msg):
# receives goodreads url
# returns the id using regex
regex = r'goodreads.com/book/show/(\d+)'
return set(re.findall(regex, comment_msg))
def get_book_details_by_id(goodreads_id):
api_url = 'http://goodreads.com/book/show/{0}?format=xml&key={1}'
r = requests.get(api_url.format(goodreads_id, goodreads_api_key))
try:
book_data = xmltodict.parse(r.content)['GoodreadsResponse']['book']
except (TypeError, KeyError):
return False
keys = ['title', 'average_rating', 'ratings_count', 'description',
'num_pages']
book = {}
for k in keys:
book[k] = book_data[k]
if type(book_data['authors']['author']) == list:
authors = [author['name'] for author in book_data['authors']['author']]
authors = ', '.join(authors)
else:
authors = book_data['authors']['author']['name']
book['authors'] = authors
return book
|
#!/usr/bin/env python
import re
from xml.parsers.expat import ExpatError
import requests
import xmltodict
from settings import goodreads_api_key
def get_goodreads_ids(comment_msg):
# receives goodreads url
# returns the id using regex
regex = r'goodreads.com/book/show/(\d+)'
return set(re.findall(regex, comment_msg))
def get_book_details_by_id(goodreads_id):
api_url = 'http://goodreads.com/book/show/{0}?format=xml&key={1}'
r = requests.get(api_url.format(goodreads_id, goodreads_api_key))
try:
book_data = xmltodict.parse(r.content)['GoodreadsResponse']['book']
except (TypeError, KeyError, ExpatError):
return False
keys = ['title', 'average_rating', 'ratings_count', 'description',
'num_pages']
book = {}
for k in keys:
book[k] = book_data[k]
if type(book_data['authors']['author']) == list:
authors = [author['name'] for author in book_data['authors']['author']]
authors = ', '.join(authors)
else:
authors = book_data['authors']['author']['name']
book['authors'] = authors
return book
|
Update GR API to handle Expat Error
|
Update GR API to handle Expat Error
|
Python
|
mit
|
avinassh/Reddit-GoodReads-Bot
|
7615bfa2a58db373c3e102e7d0205f265d9c4d57
|
dxtbx/tst_dxtbx.py
|
dxtbx/tst_dxtbx.py
|
from boost.python import streambuf
from dxtbx import read_uint16
f = open('/Users/graeme/data/demo/insulin_1_001.img', 'rb')
hdr = f.read(512)
l = read_uint16(streambuf(f), 2304 * 2304)
print sum(l)
|
from boost.python import streambuf
from dxtbx import read_uint16
import sys
from dxtbx.format.Registry import Registry
format = Registry.find(sys.argv[1])
i = format(sys.argv[1])
size = i.get_detector().get_image_size()
f = open(sys.argv[1], 'rb')
hdr = f.read(512)
l = read_uint16(streambuf(f), int(size[0] * size[1]))
print sum(l)
|
Clean up test case: not finished yet though
|
Clean up test case: not finished yet though
|
Python
|
bsd-3-clause
|
dials/dials,dials/dials,dials/dials,dials/dials,dials/dials
|
675c05bd685d550e3c46137f2f52dcdb125cefa0
|
tests/test_speed.py
|
tests/test_speed.py
|
import os, sys
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
import glob
import fnmatch
import traceback
import logging
import numpy
import pytest
import lasio
test_dir = os.path.dirname(__file__)
egfn = lambda fn: os.path.join(os.path.dirname(__file__), "examples", fn)
stegfn = lambda vers, fn: os.path.join(os.path.dirname(__file__), "examples", vers, fn)
logger = logging.getLogger(__name__)
def test_read_v12_sample_big():
l = lasio.read(stegfn("1.2", "sample_big.las"))
|
import os, sys
sys.path.insert(0, os.path.dirname(os.path.dirname(__file__)))
import glob
import fnmatch
import traceback
import logging
import numpy
import pytest
import lasio
test_dir = os.path.dirname(__file__)
egfn = lambda fn: os.path.join(os.path.dirname(__file__), "examples", fn)
stegfn = lambda vers, fn: os.path.join(os.path.dirname(__file__), "examples", vers, fn)
logger = logging.getLogger(__name__)
def read_file():
las = lasio.read(stegfn("1.2", "sample_big.las"))
def test_read_v12_sample_big(benchmark):
benchmark(read_file)
|
Add benchmark test for the speed of reading a LAS file
|
Add benchmark test for the speed of reading a LAS file
To run it you need to have `pytest-benchmark` installed, and
run the tests using:
```
$ pytest lasio/tests/tests_speed.py
```
To compare two branches, you need to run and store the benchmark from the first branch e.g. master
and then run and compare the benchmark from the second branch. e.g.
```
$ git checkout master
$ mkdir ..\lasio-benchmarks
$ pytest tests/\test_speed.py --benchmark-autosave --benchmark-storage ..\lasio-benchmarks --benchmark-compare
|
Python
|
mit
|
kwinkunks/lasio,kinverarity1/lasio,kinverarity1/las-reader
|
16bd36fe6fdcbd267413eabe1997337165775f28
|
taOonja/game/admin.py
|
taOonja/game/admin.py
|
from django.contrib import admin
# Register your models here.
|
from django.contrib import admin
from game.models import *
class LocationAdmin(admin.ModelAdmin):
model = Location
admin.site.register(Location, LocationAdmin)
class DetailAdmin(admin.ModelAdmin):
model = Detail
admin.site.register(Detail, DetailAdmin)
|
Add models to Admin Panel
|
Add models to Admin Panel
|
Python
|
mit
|
Javid-Izadfar/TaOonja,Javid-Izadfar/TaOonja,Javid-Izadfar/TaOonja
|
f620dde75d65e1175829b524eec00d54e20bb2be
|
tests/test_views.py
|
tests/test_views.py
|
from __future__ import unicode_literals
from djet.testcases import ViewTestCase
from pgallery.views import TaggedPhotoListView
class TaggedPhotoListViewTestCase(ViewTestCase):
view_class = TaggedPhotoListView
def test_tag_in_response(self):
request = self.factory.get()
response = self.view(request, tag='example_tag')
self.assertContains(response, 'example_tag')
|
from __future__ import unicode_literals
from django.contrib.auth.models import AnonymousUser
from djet.testcases import ViewTestCase
from pgallery.views import GalleryListView, TaggedPhotoListView
from .factories import GalleryFactory, UserFactory
class GalleryListViewTestCase(ViewTestCase):
view_class = GalleryListView
def test_draft_invisible(self):
gallery = GalleryFactory(status='draft', title="Draft gallery")
request = self.factory.get(user=AnonymousUser())
response = self.view(request)
self.assertNotContains(response, gallery.title)
def test_draft_visible_for_staff(self):
gallery = GalleryFactory(status='draft', title="Draft gallery")
user = UserFactory(is_staff=True)
request = self.factory.get(user=user)
response = self.view(request)
self.assertContains(response, gallery.title)
class TaggedPhotoListViewTestCase(ViewTestCase):
view_class = TaggedPhotoListView
def test_tag_in_response(self):
request = self.factory.get()
response = self.view(request, tag='example_tag')
self.assertContains(response, 'example_tag')
|
Test drafts visible for staff users only.
|
Test drafts visible for staff users only.
|
Python
|
mit
|
zsiciarz/django-pgallery,zsiciarz/django-pgallery
|
7f1db4023f2310529822d721379b1019aaf320fc
|
tablib/formats/_df.py
|
tablib/formats/_df.py
|
""" Tablib - DataFrame Support.
"""
import sys
if sys.version_info[0] > 2:
from io import BytesIO
else:
from cStringIO import StringIO as BytesIO
from pandas import DataFrame
import tablib
from tablib.compat import unicode
title = 'df'
extensions = ('df', )
def detect(stream):
"""Returns True if given stream is a DataFrame."""
try:
DataFrame(stream)
return True
except ValueError:
return False
def export_set(dset, index=None):
"""Returns DataFrame representation of DataBook."""
dataframe = DataFrame(dset.dict, columns=dset.headers)
return dataframe
def import_set(dset, in_stream):
"""Returns dataset from DataFrame."""
dset.wipe()
dset.dict = in_stream.to_dict(orient='records')
|
""" Tablib - DataFrame Support.
"""
import sys
if sys.version_info[0] > 2:
from io import BytesIO
else:
from cStringIO import StringIO as BytesIO
try:
from pandas import DataFrame
except ImportError:
DataFrame = None
import tablib
from tablib.compat import unicode
title = 'df'
extensions = ('df', )
def detect(stream):
"""Returns True if given stream is a DataFrame."""
if DataFrame is None:
return False
try:
DataFrame(stream)
return True
except ValueError:
return False
def export_set(dset, index=None):
"""Returns DataFrame representation of DataBook."""
if DataFrame is None:
raise NotImplementedError(
'DataFrame Format requires `pandas` to be installed.'
' Try `pip install tablib[pandas]`.')
dataframe = DataFrame(dset.dict, columns=dset.headers)
return dataframe
def import_set(dset, in_stream):
"""Returns dataset from DataFrame."""
dset.wipe()
dset.dict = in_stream.to_dict(orient='records')
|
Raise NotImplementedError if pandas is not installed
|
Raise NotImplementedError if pandas is not installed
|
Python
|
mit
|
kennethreitz/tablib
|
5761364149b3171521cb4f72f591dc5f5cbd77d6
|
temp-sensor02/main.py
|
temp-sensor02/main.py
|
from machine import Pin
from ds18x20 import DS18X20
import onewire
import time
import machine
import ujson
import urequests
def posttocloud(temperature):
keystext = open("sparkfun_keys.json").read()
keys = ujson.loads(keystext)
url = keys['inputUrl'] + "?private_key=" + keys['privateKey'] + "&temp=" + str(temperature)
#data = {'temp':temperature}
#data['private_key'] = keys['privateKey']
#print (keys['inputUrl'])
#print(keys['privateKey'])
#datajson = ujson.dumps(data)
#print (datajson)
resp = urequests.request("POST", url)
print (resp.text)
while True:
p = Pin(2) # Data Line is on GPIO2 aka D4
ow = onewire.OneWire(p)
ds = DS18X20(ow)
lstrom = ds.scan()
#Assuming we have only 1 device connected
rom = lstrom[0]
ds.convert_temp()
time.sleep_ms(750)
temperature = round(float(ds.read_temp(rom)),1)
#print("Temperature: {:02.1f}".format(temperature))
posttocloud(temperature)
time.sleep(10)
|
from machine import Pin
from ds18x20 import DS18X20
import onewire
import time
import ujson
import urequests
def posttocloud(temperature):
keystext = open("sparkfun_keys.json").read()
keys = ujson.loads(keystext)
params = {}
params['temp'] = temperature
params['private_key'] = keys['privateKey']
#data.sparkfun doesn't support putting data into the POST Body.
#We had to add the data to the query string
#Copied the Dirty hack from
#https://github.com/matze/python-phant/blob/24edb12a449b87700a4f736e43a5415b1d021823/phant/__init__.py
payload_str = "&".join("%s=%s" % (k, v) for k, v in params.items())
url = keys['inputUrl'] + "?" + payload_str
resp = urequests.request("POST", url)
print (resp.text)
while True:
p = Pin(2) # Data Line is on GPIO2 aka D4
ow = onewire.OneWire(p)
ds = DS18X20(ow)
lstrom = ds.scan()
#Assuming we have only 1 device connected
rom = lstrom[0]
ds.convert_temp()
time.sleep_ms(750)
temperature = round(float(ds.read_temp(rom)),1)
#print("Temperature: {:02.1f}".format(temperature))
posttocloud(temperature)
time.sleep(10)
|
Build a query string with params in a dictionary and append it to the URL. Makes the code readale. Remove commented code
|
Build a query string with params in a dictionary and append it to the URL. Makes the code readale. Remove commented code
|
Python
|
mit
|
fuzzyhandle/esp8266hangout,fuzzyhandle/esp8266hangout,fuzzyhandle/esp8266hangout
|
327ba1797045235a420ce095d2cd2cac5257a1e9
|
tutorials/models.py
|
tutorials/models.py
|
from django.db import models
# Create your models here.
class Tutorial(models.Model):
title = models.TextField()
html = models.TextField()
markdown = models.TextField()
|
from django.db import models
from markdownx.models import MarkdownxField
# Create your models here.
class Tutorial(models.Model):
# ToDo: Fields that are out-commented are missing according to the mockup -> datamodel ??
# Category = models.TextField()
title = models.TextField()
html = models.TextField()
markdown = MarkdownxField()
# Level = models.IntegerField()
|
Add missing Fields according to mockup, Add markdownfield
|
Add missing Fields according to mockup, Add markdownfield
|
Python
|
agpl-3.0
|
openego/oeplatform,openego/oeplatform,openego/oeplatform,openego/oeplatform
|
ea164b66cc93d5d7fb1f89a0297ea0a8da926b54
|
server/core/views.py
|
server/core/views.py
|
from django.shortcuts import render
from django.views.decorators.csrf import ensure_csrf_cookie
@ensure_csrf_cookie
def app(request):
return render(request, 'html.html')
|
from django.shortcuts import render
def app(request):
return render(request, 'html.html')
|
Stop inserting the CSRF token into the main app page
|
Stop inserting the CSRF token into the main app page
|
Python
|
mit
|
Techbikers/techbikers,mwillmott/techbikers,mwillmott/techbikers,Techbikers/techbikers,mwillmott/techbikers,Techbikers/techbikers,Techbikers/techbikers,mwillmott/techbikers
|
6c314451e002db3213ff61d1e6935c091b605a8d
|
server/nurly/util.py
|
server/nurly/util.py
|
import traceback
class NurlyResult():
def __init__(self, code='200 OK', head=None, body=''):
self.head = {} if type(head) != dict else head
self.body = body
self.code = code
class NurlyStatus():
ST_IDLE = 0
ST_BUSY = 1
ST_STOP = 2
ST_MAP = {
ST_IDLE: 'IDLE',
ST_BUSY: 'BUSY',
ST_STOP: 'STOP',
}
def __init__(self, proc, pipe):
self.proc = proc
self.pipe = pipe
self.fileno = self.pipe.fileno
self.count = 0
self.state = NurlyStatus.ST_IDLE
@staticmethod
def label(code, short=False):
return NurlyStatus.ST_MAP[code] if not short else NurlyStatus.ST_MAP[code][0]
class NurlyAction():
def __init__(self, func, path='/', verb='GET'):
self.func = func
self.path = path
self.verb = verb
def __call__(self, env, res, parent):
if env['REQUEST_METHOD'] == self.verb and env['PATH_INFO'].startswith(self.path):
try:
self.func(env, res, parent)
except:
res.code = '500 Server Error'
res.body = traceback.format_exc()
return True
return False
|
import traceback
import types
class NurlyResult():
def __init__(self, code='200 OK', head=None, body=''):
self.head = {} if type(head) != dict else head
self.body = body
self.code = code
class NurlyStatus():
ST_IDLE = 0
ST_BUSY = 1
ST_STOP = 2
ST_MAP = {
ST_IDLE: 'IDLE',
ST_BUSY: 'BUSY',
ST_STOP: 'STOP',
}
def __init__(self, proc, pipe):
self.proc = proc
self.pipe = pipe
self.fileno = self.pipe.fileno
self.count = 0
self.state = NurlyStatus.ST_IDLE
@staticmethod
def label(code, short=False):
return NurlyStatus.ST_MAP[code] if not short else NurlyStatus.ST_MAP[code][0]
class NurlyAction():
def __init__(self, func, path='/', verb='GET'):
self.func = func if type(func) is not types.ModuleType else getattr(func, func.__name__.split('.')[-1])
self.path = path
self.verb = verb
def __call__(self, env, res, parent):
if env['REQUEST_METHOD'] == self.verb and env['PATH_INFO'].startswith(self.path):
try:
self.func(env, res, parent)
except:
res.code = '500 Server Error'
res.body = traceback.format_exc()
return True
return False
|
Support using a module as a call back if it has an function attribute by the same name.
|
Support using a module as a call back if it has an function attribute by the same name.
|
Python
|
mit
|
mk23/nurly,mk23/nurly,mk23/nurly,mk23/nurly
|
293d50438fab81e74ab4559df7a4f7aa7cfd8f03
|
etcdocker/container.py
|
etcdocker/container.py
|
import docker
from etcdocker import util
class Container:
def __init__(self, name, params):
self.name = name
self.params = params
def set_or_create_param(self, key, value):
self.params[key] = value
def ensure_running(self, force_restart=False):
# Ensure container is running with specified params
containers = util.get_containers()
found = False
for pc in containers:
if "/%s" % self.name in pc['Names']:
found = True
full_image = "%s:%s" % (
self.params.get('image'), self.params.get('tag'))
if (pc['Status'].startswith('Up') and
pc['Image'] == full_image and
not force_restart):
return
break
client = docker.Client()
# Start our container
if found:
# Shut down old container first
client.stop(self.name, 5)
client.remove_container(self.name)
# Create container with specified args
client.create_container(
image=self.params.get('image'),
detach=True,
volumes_from=self.params.get('volumes_from'),
volumes=self.params.get('volumes'),
name=self.name)
# Start 'er up
client.start(
container=self.name,
port_bindings=self.params.get('ports'),
privileged=self.params.get('privileged'))
|
import ast
import docker
from etcdocker import util
class Container:
def __init__(self, name, params):
self.name = name
self.params = params
def set_or_create_param(self, key, value):
self.params[key] = value
def ensure_running(self, force_restart=False):
# Ensure container is running with specified params
containers = util.get_containers()
found = False
for pc in containers:
if "/%s" % self.name in pc['Names']:
found = True
full_image = "%s:%s" % (
self.params.get('image'), self.params.get('tag'))
if (pc['Status'].startswith('Up') and
pc['Image'] == full_image and
not force_restart):
return
break
client = docker.Client()
# Start our container
if found:
# Shut down old container first
client.stop(self.name, 5)
client.remove_container(self.name)
# Convert our ports into a dict if necessary
ports = ast.literal_eval(self.params.get('ports'))
# Create container with specified args
client.create_container(
image=self.params.get('image'),
detach=True,
volumes_from=self.params.get('volumes_from'),
volumes=self.params.get('volumes'),
ports=ports.keys(),
name=self.name)
# Start 'er up
client.start(
container=self.name,
port_bindings=ports,
privileged=self.params.get('privileged'))
|
Convert port list to dict
|
Convert port list to dict
|
Python
|
mit
|
CloudBrewery/docrane
|
c30181eed55cc1f2af6da4ee8608f4f2052ceb38
|
serverless_helpers/__init__.py
|
serverless_helpers/__init__.py
|
# -*- coding: utf-8 -*-
# MIT Licensed, Copyright (c) 2016 Ryan Scott Brown <sb@ryansb.com>
from dotenv import load_dotenv, get_key, set_key, unset_key
def load_envs(path):
"""Recursively load .env files starting from `path`
Given the path "foo/bar/.env" and a directory structure like:
foo
\---.env
\---bar
\---.env
Values from foo/bar/.env and foo/.env will both be loaded, but values in
foo/bar/.env will take precedence over values from foo/.env
"""
import os
path = os.path.abspath(path)
path, _ = os.path.split(path)
if path == '/':
# bail out when you reach top of the FS
load_dotenv(os.path.join(path, '.env'))
return
# load higher envs first
# closer-to-base environments need higher precedence.
load_envs(path)
load_dotenv(os.path.join(path, '.env'))
|
# -*- coding: utf-8 -*-
# MIT Licensed, Copyright (c) 2016 Ryan Scott Brown <sb@ryansb.com>
from dotenv import load_dotenv, get_key, set_key, unset_key
def load_envs(path):
"""Recursively load .env files starting from `path`
Usage: from your Lambda function, call load_envs with the value __file__ to
give it the current location as a place to start looking for .env files.
import serverless_helpers
serverless_helpers.load_envs(__file__)
Given the path "foo/bar/myfile.py" and a directory structure like:
foo
\---.env
\---bar
\---.env
\---myfile.py
Values from foo/bar/.env and foo/.env will both be loaded, but values in
foo/bar/.env will take precedence over values from foo/.env
"""
import os
path = os.path.abspath(path)
path, _ = os.path.split(path)
if path == '/':
# bail out when you reach top of the FS
load_dotenv(os.path.join(path, '.env'))
return
# load higher envs first
# closer-to-base environments need higher precedence.
load_envs(path)
load_dotenv(os.path.join(path, '.env'))
|
Document calling with __file__ as starting env path
|
Document calling with __file__ as starting env path
|
Python
|
mit
|
serverless/serverless-helpers-py
|
922acafc793b3d32f625fe18cd52b2bfd59a5f96
|
ansible/wsgi.py
|
ansible/wsgi.py
|
from pecan.deploy import deploy
app = deploy('/opt/web/draughtcraft/src/production.py')
from paste.exceptions.errormiddleware import ErrorMiddleware
app = ErrorMiddleware(
app,
error_email=app.conf.error_email,
from_address=app.conf.error_email,
smtp_server=app.conf.error_smtp_server,
smtp_username=app.conf.error_email,
smtp_password=app.conf.error_password,
smtp_use_tls=True
)
|
from pecan import conf
from pecan.deploy import deploy
app = deploy('/opt/web/draughtcraft/src/production.py')
from paste.exceptions.errormiddleware import ErrorMiddleware
app = ErrorMiddleware(
app,
error_email=conf.error_email,
from_address=conf.error_email,
smtp_server=conf.error_smtp_server,
smtp_username=conf.error_email,
smtp_password=conf.error_password,
smtp_use_tls=True
)
|
Fix a bug in the WSGI entrypoint.
|
Fix a bug in the WSGI entrypoint.
|
Python
|
bsd-3-clause
|
ryanpetrello/draughtcraft,ryanpetrello/draughtcraft,ryanpetrello/draughtcraft,ryanpetrello/draughtcraft
|
b78b14214e317d1149b37bcdcf5ba0681212431b
|
rapidsms/contrib/httptester/models.py
|
rapidsms/contrib/httptester/models.py
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
from django.db import models
DIRECTION_CHOICES = (
("I", "Incoming"),
("O", "Outgoing"))
class HttpTesterMessage(models.Model):
direction = models.CharField(max_length=1, choices=DIRECTION_CHOICES)
identity = models.CharField(max_length=100)
text = models.TextField()
|
#!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
from django.db import models
DIRECTION_CHOICES = (
("I", "Incoming"),
("O", "Outgoing"))
class HttpTesterMessage(models.Model):
direction = models.CharField(max_length=1, choices=DIRECTION_CHOICES)
identity = models.CharField(max_length=100)
text = models.TextField()
class Meta(object):
# Ordering by id will order by when they were created, which is
# typically what we want
ordering = ['id']
|
Sort HTTP Tester Message model by id so they'll naturally be displayed in the order they were added. Branch: feature/httptester-update
|
Sort HTTP Tester Message model by id so they'll naturally be displayed in the order they were added.
Branch: feature/httptester-update
|
Python
|
bsd-3-clause
|
eHealthAfrica/rapidsms,peterayeni/rapidsms,ehealthafrica-ci/rapidsms,ehealthafrica-ci/rapidsms,lsgunth/rapidsms,lsgunth/rapidsms,eHealthAfrica/rapidsms,lsgunth/rapidsms,caktus/rapidsms,ehealthafrica-ci/rapidsms,catalpainternational/rapidsms,catalpainternational/rapidsms,peterayeni/rapidsms,catalpainternational/rapidsms,lsgunth/rapidsms,eHealthAfrica/rapidsms,peterayeni/rapidsms,caktus/rapidsms,peterayeni/rapidsms,catalpainternational/rapidsms,caktus/rapidsms
|
92eaa47b70d48874da032a21fbbd924936c0d518
|
code/csv2map.py
|
code/csv2map.py
|
# csv2map.py -- Convert .csv into a .map format
# Description of MAP format: http://pngu.mgh.harvard.edu/~purcell/plink/data.shtml#map
#
# jean-daniel.granet@mines-paristech.fr
import sys
import argparse
def main():
parser = argparse.ArgumentParser(description='Convert .csv to .map')
parser.add_argument('csv', help='CSV file to convert')
parser.add_argument('map', help='MAP file to create')
args = parser.parse_args()
# read the csv file and convert it into a MAP file
with open(args.csv, 'r') as fdr:
with open(args.map, 'w') as fdw:
for line in fdr:
line_split = line.split(',')
fdw.write("%s\n" % "\n".join(["%s %s 0 %s" % (line_split[3], line_split[2], line_split[5])]))
fdw.close()
fdr.close()
if __name__ == "__main__":
main()
|
# csv2map.py -- Convert .csv into a .map format
# Description of MAP format: http://pngu.mgh.harvard.edu/~purcell/plink/data.shtml#map
#
# jean-daniel.granet@mines-paristech.fr
import sys
import argparse
def main():
parser = argparse.ArgumentParser(description='Convert .csv to .map')
parser.add_argument('csv', help="""CSV file to convert. Fields are : Index,Illumina_SNP_Name,Alternative_SNP_Name,Chromosome,Build36_Position,Build37_Position,new_rsname,Strand,TopAlleles,ForwardAlleles,DesignAlleles
""")
parser.add_argument('map', help='MAP file to create')
args = parser.parse_args()
# read the csv file and convert it into a MAP file
with open(args.csv, 'r') as fdr:
with open(args.map, 'w') as fdw:
for line in fdr:
line_split = line.split(',')
fdw.write("%s\n" % "\n".join(["%s %s 0 %s" % (line_split[3], line_split[2], line_split[5])]))
fdw.close()
fdr.close()
if __name__ == "__main__":
main()
|
Add info about csv fields
|
Add info about csv fields
|
Python
|
mit
|
chagaz/sfan,chagaz/sfan,chagaz/sfan,chagaz/sfan,chagaz/sfan
|
891a85fc427b16295c6f792d7311eca1e497332e
|
api/__init__.py
|
api/__init__.py
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from os import getenv
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = getenv('DATABASE_URL',
default='postgresql://postgres@localhost:5432/loadstone')
db = SQLAlchemy(app)
import api.views
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from os import getenv
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = getenv('DATABASE_URL', default='sqlite://')
db = SQLAlchemy(app)
import api.views
|
Set default to sqlite memory
|
Set default to sqlite memory
|
Python
|
mit
|
Demotivated/loadstone
|
8c8eb5207fd34ba381b89cb147dd3c38b68cf3ad
|
stocks.py
|
stocks.py
|
#!/usr/bin/env python
def find_points(prices, window):
pivot = None
next_pivot = None
profit = 0
for i, price in enumerate(prices):
if pivot is None or price < prices[pivot]:
pivot = i
next_pivot = max(next_pivot, pivot + 1)
if pivot != i and (next_pivot is None or price < prices[next_pivot]):
next_pivot = i
if i - pivot == window:
pivot = next_pivot
next_pivot = pivot + 1
profit = max(profit, price - prices[pivot])
return profit
def main():
print find_points([1.0, 2.0, 3.0, 1.0, 3.0, 4.0], 5)
print find_points([7.0, 5.0, 6.0, 4.0, 5.0, 3.0, 4.0, 2.0, 3.0, 1.0], 5)
print find_points([4.0, 3.0, 2.0, 4.0, 3.0, 1.0, 1.1, 1.2, 1.3, 3.4], 5)
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
def find_profit(prices, window):
pivot = None
next_pivot = None
profit = 0
for i, price in enumerate(prices):
if pivot is None or price < prices[pivot]:
pivot = i
next_pivot = max(next_pivot, pivot + 1)
if pivot != i and (next_pivot is None or price < prices[next_pivot]):
next_pivot = i
if i - pivot == window:
pivot = next_pivot
next_pivot += 1
profit = max(profit, price - prices[pivot])
return profit
def main():
print find_profit([1.0, 2.0, 3.0, 1.0, 3.0, 4.0], 5)
print find_profit([7.0, 5.0, 6.0, 4.0, 5.0, 3.0, 4.0, 2.0, 3.0, 1.0], 5)
print find_profit([4.0, 3.0, 2.0, 4.0, 3.0, 1.0, 1.1, 1.2, 1.3, 3.4], 5)
if __name__ == "__main__":
main()
|
Change the name of the function
|
Change the name of the function
|
Python
|
mit
|
jrasky/planetlabs-challenge
|
15a7ced2d0da014e5d5508ed50c045de3cc9e9d2
|
_lib/wordpress_faq_processor.py
|
_lib/wordpress_faq_processor.py
|
import sys
import json
import os.path
import requests
def posts_at_url(url):
current_page = 1
max_page = sys.maxint
while current_page <= max_page:
url = os.path.expandvars(url)
resp = requests.get(url, params={'page': current_page, 'count': '-1'})
results = json.loads(resp.content)
current_page += 1
max_page = results['pages']
for p in results['posts']:
yield p
def documents(name, url, **kwargs):
for post in posts_at_url(url):
yield process_post(post)
def process_post(post):
post['_id'] = post['slug']
names = ['og_title', 'og_image', 'og_desc', 'twtr_text', 'twtr_lang',
'twtr_rel', 'twtr_hash', 'utm_campaign', 'utm_term',
'utm_content', 'faq']
for name in names:
if name in post['custom_fields']:
post[name] = post['custom_fields'][name]
if 'taxonomy_fj_tag' in post:
post['tags'] = [tag['title'] for tag in post['taxonomy_fj_tag']]
del post['custom_fields']
return post
|
import sys
import json
import os.path
import requests
def posts_at_url(url):
current_page = 1
max_page = sys.maxint
while current_page <= max_page:
url = os.path.expandvars(url)
resp = requests.get(url, params={'page': current_page, 'count': '-1'})
results = json.loads(resp.content)
current_page += 1
max_page = results['pages']
for p in results['posts']:
yield p
def documents(name, url, **kwargs):
for post in posts_at_url(url):
yield process_post(post)
def process_post(post):
post['_id'] = post['slug']
names = ['og_title', 'og_image', 'og_desc', 'twtr_text', 'twtr_lang',
'twtr_rel', 'twtr_hash', 'utm_campaign', 'utm_term',
'utm_content', 'faq']
for name in names:
if name in post['custom_fields']:
post[name] = post['custom_fields'][name]
if 'taxonomy_fj_tag' in post:
post['tags'] = [tag['title'] for tag in post['taxonomy_fj_tag']]
del post['custom_fields']
return {'_index': 'content',
'_type': 'faq',
'_id': post['slug'],
'_source': post}
|
Change faq processor to bulk index
|
Change faq processor to bulk index
|
Python
|
cc0-1.0
|
imuchnik/cfgov-refresh,imuchnik/cfgov-refresh,imuchnik/cfgov-refresh,imuchnik/cfgov-refresh
|
f45e182ec206ab08b1bea699033938b562558670
|
test/test_compression.py
|
test/test_compression.py
|
import unittest
import bmemcached
import bz2
class MemcachedTests(unittest.TestCase):
def setUp(self):
self.server = '127.0.0.1:11211'
self.client = bmemcached.Client(self.server, 'user', 'password')
self.bzclient = bmemcached.Client(self.server, 'user', 'password', bz2)
self.data = b'this is test data. ' * 32
def tearDown(self):
self.client.delete(b'test_key')
self.client.delete(b'test_key2')
self.client.disconnect_all()
self.bzclient.disconnect_all()
def testCompressedData(self):
self.client.set(b'test_key', self.data)
self.assertEqual(self.data, self.client.get(b'test_key'))
def testBZ2CompressedData(self):
self.bzclient.set(b'test_key', self.data)
self.assertEqual(self.data, self.bzclient.get(b'test_key'))
def testCompressionMissmatch(self):
self.client.set(b'test_key', self.data)
self.bzclient.set(b'test_key2', self.data)
self.assertEqual(self.client.get(b'test_key'),
self.bzclient.get(b'test_key2'))
self.assertRaises(IOError, self.bzclient.get, b'test_key')
|
import unittest
import bz2
import bmemcached
class MemcachedTests(unittest.TestCase):
def setUp(self):
self.server = '127.0.0.1:11211'
self.client = bmemcached.Client(self.server, 'user', 'password')
self.bzclient = bmemcached.Client(self.server, 'user', 'password',
compression=bz2)
self.data = b'this is test data. ' * 32
def tearDown(self):
self.client.delete(b'test_key')
self.client.delete(b'test_key2')
self.client.disconnect_all()
self.bzclient.disconnect_all()
def testCompressedData(self):
self.client.set(b'test_key', self.data)
self.assertEqual(self.data, self.client.get(b'test_key'))
def testBZ2CompressedData(self):
self.bzclient.set(b'test_key', self.data)
self.assertEqual(self.data, self.bzclient.get(b'test_key'))
def testCompressionMissmatch(self):
self.client.set(b'test_key', self.data)
self.bzclient.set(b'test_key2', self.data)
self.assertEqual(self.client.get(b'test_key'),
self.bzclient.get(b'test_key2'))
self.assertRaises(IOError, self.bzclient.get, b'test_key')
|
Use keyword arguments to avoid accidentally setting timeout
|
Use keyword arguments to avoid accidentally setting timeout
|
Python
|
mit
|
xmonster-tech/python-binary-memcached,jaysonsantos/python-binary-memcached,xmonster-tech/python-binary-memcached,jaysonsantos/python-binary-memcached
|
6bbd81efbd4821a3963a021d8456531f01edfd6c
|
tests/test_rover_instance.py
|
tests/test_rover_instance.py
|
from unittest import TestCase
from rover import Rover
class TestRover(TestCase):
def setUp(self):
self.rover = Rover()
def test_rover_compass(self):
assert self.rover.compass == ['N', 'E', 'S', 'W']
def test_rover_position(self):
assert self.rover.position == (self.rover.x, self.rover.y, self.rover.direction)
|
from unittest import TestCase
from rover import Rover
class TestRover(TestCase):
def setUp(self):
self.rover = Rover()
def test_rover_compass(self):
assert self.rover.compass == ['N', 'E', 'S', 'W']
def test_rover_position(self):
assert self.rover.position == (self.rover.x, self.rover.y, self.rover.direction)
def test_rover_set_position(self):
self.rover.set_position(4, 9, 'W')
assert self.rover.position == (4, 9, 'W')
|
Add failing test for set position method
|
Add failing test for set position method
|
Python
|
mit
|
authentik8/rover
|
31eae0aee3a6ae9fa7abea312ff1ea843a98e853
|
graphene/contrib/django/tests/models.py
|
graphene/contrib/django/tests/models.py
|
from __future__ import absolute_import
from django.db import models
class Pet(models.Model):
name = models.CharField(max_length=30)
class Film(models.Model):
reporters = models.ManyToManyField('Reporter',
related_name='films')
class Reporter(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
email = models.EmailField()
pets = models.ManyToManyField('self')
def __str__(self): # __unicode__ on Python 2
return "%s %s" % (self.first_name, self.last_name)
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateField()
reporter = models.ForeignKey(Reporter, related_name='articles')
def __str__(self): # __unicode__ on Python 2
return self.headline
class Meta:
ordering = ('headline',)
|
from __future__ import absolute_import
from django.db import models
class Pet(models.Model):
name = models.CharField(max_length=30)
class Film(models.Model):
reporters = models.ManyToManyField('Reporter',
related_name='films')
class Reporter(models.Model):
first_name = models.CharField(max_length=30)
last_name = models.CharField(max_length=30)
email = models.EmailField()
pets = models.ManyToManyField('self')
def __str__(self): # __unicode__ on Python 2
return "%s %s" % (self.first_name, self.last_name)
class Article(models.Model):
headline = models.CharField(max_length=100)
pub_date = models.DateField()
reporter = models.ForeignKey(Reporter, related_name='articles')
lang = models.CharField(max_length=2, help_text='Language', choices=[
('es', 'Spanish'),
('en', 'English')
], default='es')
def __str__(self): # __unicode__ on Python 2
return self.headline
class Meta:
ordering = ('headline',)
|
Improve Django field conversion real-life tests
|
Improve Django field conversion real-life tests
|
Python
|
mit
|
graphql-python/graphene,sjhewitt/graphene,Globegitter/graphene,sjhewitt/graphene,Globegitter/graphene,graphql-python/graphene
|
6d1117fbba83b258162cc0f397573e21cd31543e
|
batch_effect.py
|
batch_effect.py
|
#!/usr/bin/env python
import argparse
import csv
import shutil
import subprocess
import sys
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Chain together Inkscape extensions")
parser.add_argument('--id', type=str, action='append', dest='ids', default=[],
help="ID attributes of objects to manipulate. Passed to all extensions.")
parser.add_argument('--csvpath', type=str, required=True,
help="Path to .csv file containing command lines")
parser.add_argument('svgpath', type=str, nargs='?', default='',
help="Path to temporary SVG file to use for input to the first extension")
args = parser.parse_args()
with open(args.csvpath, 'rb') as f:
# Make an argument list of the ids
id_args = []
for id in args.ids:
id_args.extend(('--id', id))
# Take input for the first call from temporary file or stdin
if args.svgpath:
stream = open(args.svgpath)
else:
stream = sys.stdin
# Execute all the calls
for row in csv.reader(f):
# Insert the ids into the call
call = row[:1] + id_args + row[1:]
# Make the call
p = subprocess.Popen(call, stdin=stream, stdout=subprocess.PIPE)
# Close our handle to the input pipe because we no longer need it
stream.close()
# Grab the output pipe for input into the next call
stream = p.stdout
# Send output from last call on stdout
shutil.copyfileobj(stream, sys.stdout)
|
#!/usr/bin/env python
import csv
import optparse
import shutil
import subprocess
import sys
if __name__ == '__main__':
parser = optparse.OptionParser(description="Chain together Inkscape extensions",
usage="%prog [options] svgpath")
parser.add_option('--id', dest='ids', action='append', type=str, default=[],
help="ID attributes of objects to manipulate. Passed to all extensions.")
parser.add_option('--csvpath', dest='csvpath', type=str,
help="Path to .csv file containing command lines")
options, args = parser.parse_args()
with open(options.csvpath, 'rb') as f:
# Make an argument list of the ids
id_args = []
for id in options.ids:
id_args.extend(('--id', id))
# Take input for the first call from temporary file or stdin
if args:
stream = open(args[0])
else:
stream = sys.stdin
# Execute all the calls
for row in csv.reader(f):
# Insert the ids into the call
call = row[:1] + id_args + row[1:]
# Make the call
p = subprocess.Popen(call, stdin=stream, stdout=subprocess.PIPE)
# Close our handle to the input pipe because we no longer need it
stream.close()
# Grab the output pipe for input into the next call
stream = p.stdout
# Send output from last call on stdout
shutil.copyfileobj(stream, sys.stdout)
|
Make compatible with Python <2.7
|
Make compatible with Python <2.7
The argparse module was added in Python 2.7, but the Python bundled
with Inkscape is 2.6. Switching to optparse makes this extension
compatible with the Python bundled with Inkscape.
|
Python
|
mit
|
jturner314/inkscape-batch-effect
|
25ebc324c0af6e1ce74535cc75227071637a7a18
|
areaScraper.py
|
areaScraper.py
|
# Craigslist City Scraper
# By Marshall Ehlinger
# For sp2015 Systems Analysis and Design
from bs4 import BeautifulSoup
import re
fh = open("sites.htm", "r")
soup = BeautifulSoup(fh, "html.parser")
for columnDiv in soup.h1.next_sibling.next_sibling:
for state in columnDiv:
for city in state:
print(city)
#print(soup.text)
print("\n----Done----\n\n")
|
#!/usr/bin/python3.4
# Craigslist City Scraper
# By Marshall Ehlinger
# For sp2015 Systems Analysis and Design
# Returns dictionary of 'city name string' : 'site url'
# for all American cities in states/territories @ CL
from bs4 import BeautifulSoup
import re
def getCities():
fh = open("sites.htm", "r")
soup = BeautifulSoup(fh, "html.parser")
placesDict = {}
for columnDiv in soup.h1.next_sibling.next_sibling:
for state in columnDiv:
for city in state:
m = (re.search('<li><a href="(.+)">(.+)</a>', str(city)))
if m:
placesDict[m.group(2)] = m.group(1)
return(placesDict)
getCities()
|
Complete site scraper for all American cities
|
Complete site scraper for all American cities
areaScraper.py contains the getCities() function, which will
return a dictionary of 'city name string' : 'url string'
for each Craigslist "site", corresponding to American cities,
regions, etc.
|
Python
|
mit
|
MuSystemsAnalysis/craigslist_area_search,MuSystemsAnalysis/craigslist_area_search
|
49a7968e51ce850428936fb2fc66c905ce8b8998
|
head1stpython/Chapter3/sketch.py
|
head1stpython/Chapter3/sketch.py
|
#Import dependencies
#Load OS functions from the standard library
import os
os.chdir('/home/israel/Development/Python_Exercises/python-octo-wookie/head1stpython/Chapter3')
#Change path for the current directory
data = open('sketch.txt')
#Start iteration over the text file
for each_line in data:
try:
(role, line_spoken) = each_line.split(':', 1)
print(role, end = '')
print(' said: ', end = '')
print(line_spoken, end = '')
except:
pass
data.close()
|
#Import dependencies
#Load OS functions from the standard library
import os
#Change path for the current directory
os.chdir('/home/israel/Development/Python_Exercises/python-octo-wookie/head1stpython/Chapter3')
#Check if file exists
if os.path.exists('sketch.txt'):
#Load the text file into 'data' variable
data = open('sketch.txt')
#Start iteration over the text file
for each_line in data:
#We use try/except to handle errors that can occur with bad input
try:
(role, line_spoken) = each_line.split(':', 1)
print(role, end = '')
print(' said: ', end = '')
print(line_spoken, end = '')
except:
pass
#After all the iteration and printing, we close the file
data.close()
#If file does exists, we simply quit and display an error for the user/dev
else:
print('The data file is missing!')
|
Validate if the file exists (if/else)
|
Validate if the file exists (if/else)
|
Python
|
unlicense
|
israelzuniga/python-octo-wookie
|
a15d1df33fece7ddeefcbeb5a8094df2ebccd7c6
|
tests/test_dict_utils.py
|
tests/test_dict_utils.py
|
import unittest
from dict_utils import dict_utils
class DictUtilsTestCase(unittest.TestCase):
def test_dict_search(self):
pass
|
import unittest
from dict_utils import dict_utils
class DictUtilsTestCase(unittest.TestCase):
def test_dict_search_found(self):
dict_1 = {'first_level': {'second_level': {'name': 'Joe', 'Age': 30}}}
found_value = dict_utils.dict_search_value(dict_1, 'name')
self.assertEqual(found_value, 'Joe', 'Key not found in the given dict')
def test_dict_search_not_found(self):
dict_1 = {'first_level': {'second_level': {'name': 'Joe', 'Age': 30}}}
found_value = dict_utils.dict_search_value(dict_1, 'address')
self.assertNotEquals(found_value, 'London (UK)', 'Key not found in the given dict')
def test_dict_search_different_value(self):
dict_1 = {'first_level': {'second_level': {'name': 'Joe', 'Age': 30}}}
found_value = dict_utils.dict_search_value(dict_1, 'name')
self.assertNotEquals(found_value, 'Paul', 'Found value is not different')
def test_compare_assert_dicts_identical(self):
dict_1 = {'first_level': {'second_level': {'name': 'Joe', 'Age': 30}}}
dict_2 = {'first_level': {'second_level': {'name': 'Joe', 'Age': 30}}}
dict_utils.compare_assert_dicts(self, ['name', 'age'], dict_1, dict_2)
def test_compare_assert_dicts_different_same_values(self):
dict_1 = {'first_level': {'second_level': {'name': 'Joe', 'Age': 30}}}
dict_2 = {'level_1': {'level_2': {'name': 'Joe', 'Age': 30}}}
dict_utils.compare_assert_dicts(self, ['name', 'age'], dict_1, dict_2)
def test_compare_assert_dicts_different_keys_structure_same_values(self):
dict_1 = {'first_level': {'second_level': {'name': 'Joe', 'Age': 30}}}
dict_2 = {'level_1': {'name': 'Joe', 'Age': 30}}
dict_utils.compare_assert_dicts(self, ['name', 'age'], dict_1, dict_2)
|
Add some tests for the implemented methods
|
Add some tests for the implemented methods
|
Python
|
mit
|
glowdigitalmedia/dict-utils
|
07a8ca051b46a04df806647202144bd563d5dc5a
|
tests/locale_utils.py
|
tests/locale_utils.py
|
import subprocess
"""Helper functions, decorators,... for working with locales"""
def get_avail_locales():
return {loc.strip() for loc in subprocess.check_output(["locale", "-a"]).split()}
def requires_locales(locales):
"""A decorator factory to skip tests that require unavailable locales
:param set locales: set of required locales
**Requires the test to have the set of available locales defined as its
``avail_locales`` attribute.**
"""
canon_locales = {loc.replace("UTF-8", "utf8") for loc in locales}
def decorator(test_method):
def decorated(test, *args):
missing = canon_locales - set(test.avail_locales)
if missing:
test.skipTest("requires missing locales: %s" % missing)
else:
return test_method(test, *args)
return decorated
return decorator
|
import subprocess
"""Helper functions, decorators,... for working with locales"""
def get_avail_locales():
return {loc.decode(errors="replace").strip() for loc in subprocess.check_output(["locale", "-a"]).split()}
def requires_locales(locales):
"""A decorator factory to skip tests that require unavailable locales
:param set locales: set of required locales
**Requires the test to have the set of available locales defined as its
``avail_locales`` attribute.**
"""
canon_locales = {loc.replace("UTF-8", "utf8") for loc in locales}
def decorator(test_method):
def decorated(test, *args):
missing = canon_locales - set(test.avail_locales)
if missing:
test.skipTest("requires missing locales: %s" % missing)
else:
return test_method(test, *args)
return decorated
return decorator
|
Fix checking for available locales
|
Fix checking for available locales
"subprocess.check" returns bytes, so we need to decode the lang
codes before comparing them with required languages.
|
Python
|
lgpl-2.1
|
rhinstaller/libbytesize,rhinstaller/libbytesize,rhinstaller/libbytesize
|
e366f6da5673a4c92ffcf65492951e0c6fc886ed
|
tests/test_element.py
|
tests/test_element.py
|
import rml.element
def test_create_element():
e = rml.element.Element('BPM', 6.0)
assert e.get_type() == 'BPM'
assert e.get_length() == 6.0
def test_add_element_to_family():
e = rml.element.Element('dummy', 0.0)
e.add_to_family('fam')
assert 'fam' in e.get_families()
|
import pkg_resources
pkg_resources.require('cothread')
import cothread
import rml.element
def test_create_element():
e = rml.element.Element('BPM', 6.0)
assert e.get_type() == 'BPM'
assert e.get_length() == 6.0
def test_add_element_to_family():
e = rml.element.Element('dummy', 0.0)
e.add_to_family('fam')
assert 'fam' in e.get_families()
def test_get_pv_value():
PV = 'SR22C-DI-EBPM-04:SA:X'
e = rml.element.Element('dummy', 0.0, pv=PV)
result = e.get_pv('x')
assert isinstance(result, float)
|
Test before creating the get_pv() method
|
Test before creating the get_pv() method
|
Python
|
apache-2.0
|
razvanvasile/RML,willrogers/pml,willrogers/pml
|
0b884ed68f2c4b482f9eadbf38adc01f7d869f1a
|
tests/test_exports.py
|
tests/test_exports.py
|
import unittest
import websockets
import websockets.client
import websockets.exceptions
import websockets.legacy.auth
import websockets.legacy.client
import websockets.legacy.protocol
import websockets.legacy.server
import websockets.server
import websockets.typing
import websockets.uri
combined_exports = (
websockets.legacy.auth.__all__
+ websockets.legacy.client.__all__
+ websockets.legacy.protocol.__all__
+ websockets.legacy.server.__all__
+ websockets.client.__all__
+ websockets.exceptions.__all__
+ websockets.server.__all__
+ websockets.typing.__all__
+ websockets.uri.__all__
)
class TestExportsAllSubmodules(unittest.TestCase):
def test_top_level_module_reexports_all_submodule_exports(self):
self.assertEqual(set(combined_exports), set(websockets.__all__))
def test_submodule_exports_are_globally_unique(self):
self.assertEqual(len(set(combined_exports)), len(combined_exports))
|
import unittest
import websockets
import websockets.client
import websockets.exceptions
import websockets.legacy.auth
import websockets.legacy.client
import websockets.legacy.protocol
import websockets.legacy.server
import websockets.server
import websockets.typing
import websockets.uri
combined_exports = (
websockets.legacy.auth.__all__
+ websockets.legacy.client.__all__
+ websockets.legacy.protocol.__all__
+ websockets.legacy.server.__all__
+ websockets.client.__all__
+ websockets.exceptions.__all__
+ websockets.server.__all__
+ websockets.typing.__all__
+ websockets.uri.__all__
)
class ExportsTests(unittest.TestCase):
def test_top_level_module_reexports_all_submodule_exports(self):
self.assertEqual(set(combined_exports), set(websockets.__all__))
def test_submodule_exports_are_globally_unique(self):
self.assertEqual(len(set(combined_exports)), len(combined_exports))
|
Rename test class consistently with others.
|
Rename test class consistently with others.
|
Python
|
bsd-3-clause
|
aaugustin/websockets,aaugustin/websockets,aaugustin/websockets,aaugustin/websockets
|
3bbe539f387697137040f665958e0e0e27e6a420
|
tests/test_session.py
|
tests/test_session.py
|
# Local imports
from uplink import session
def test_base_url(uplink_builder_mock):
# Setup
uplink_builder_mock.base_url = "https://api.github.com"
sess = session.Session(uplink_builder_mock)
# Run & Verify
assert uplink_builder_mock.base_url == sess.base_url
def test_headers(uplink_builder_mock):
# Setup
sess = session.Session(uplink_builder_mock)
# Run
sess.headers["key"] = "value"
# Verify
assert uplink_builder_mock.add_hook.called
assert sess.headers == {"key": "value"}
def test_params(uplink_builder_mock):
# Setup
sess = session.Session(uplink_builder_mock)
# Run
sess.params["key"] = "value"
# Verify
uplink_builder_mock.add_hook.assert_called()
assert sess.params == {"key": "value"}
def test_auth(uplink_builder_mock):
# Setup
uplink_builder_mock.auth = ("username", "password")
sess = session.Session(uplink_builder_mock)
# Run & Verify
assert uplink_builder_mock.auth == sess.auth
def test_auth_set(uplink_builder_mock):
# Setup
sess = session.Session(uplink_builder_mock)
# Run
sess.auth = ("username", "password")
# Verify
assert ("username", "password") == uplink_builder_mock.auth
|
# Local imports
from uplink import session
def test_base_url(uplink_builder_mock):
# Setup
uplink_builder_mock.base_url = "https://api.github.com"
sess = session.Session(uplink_builder_mock)
# Run & Verify
assert uplink_builder_mock.base_url == sess.base_url
def test_headers(uplink_builder_mock):
# Setup
sess = session.Session(uplink_builder_mock)
# Run
sess.headers["key"] = "value"
# Verify
assert uplink_builder_mock.add_hook.called
assert sess.headers == {"key": "value"}
def test_params(uplink_builder_mock):
# Setup
sess = session.Session(uplink_builder_mock)
# Run
sess.params["key"] = "value"
# Verify
assert uplink_builder_mock.add_hook.called
assert sess.params == {"key": "value"}
def test_auth(uplink_builder_mock):
# Setup
uplink_builder_mock.auth = ("username", "password")
sess = session.Session(uplink_builder_mock)
# Run & Verify
assert uplink_builder_mock.auth == sess.auth
def test_auth_set(uplink_builder_mock):
# Setup
sess = session.Session(uplink_builder_mock)
# Run
sess.auth = ("username", "password")
# Verify
assert ("username", "password") == uplink_builder_mock.auth
|
Fix `assert_called` usage for Python 3.5 build
|
Fix `assert_called` usage for Python 3.5 build
The `assert_called` method seems to invoke a bug caused by a type in the
unittest mock module. (The bug was ultimately tracked and fix here:
https://bugs.python.org/issue24656)
|
Python
|
mit
|
prkumar/uplink
|
cdf60bc0b07c282e75fba747c8adedd165aa0abd
|
index.py
|
index.py
|
#!/usr/bin/env python2.7
from werkzeug.wrappers import Request, Response
from get_html import get_html, choose_lang
@Request.application
def run(request):
lang = choose_lang(request)
if request.url.startswith("https://") or request.args.get("forcenossl") == "true":
html = get_html("launch", lang)
else:
html = get_html("nossl", lang)
return Response(html, mimetype="text/html")
if __name__ == "__main__":
import CGI
CGI.app = run
CGI.run()
|
#!/usr/bin/env python2.7
from werkzeug.wrappers import Request, Response
from get_html import get_html, choose_lang
@Request.application
def run(request):
lang = request.args.get("lang") if request.args.get("lang") else choose_lang(request)
if request.url.startswith("https://") or request.args.get("forcenossl") == "true":
html = get_html("launch", lang)
else:
html = get_html("nossl", lang)
return Response(html, mimetype="text/html")
if __name__ == "__main__":
import CGI
CGI.app = run
CGI.run()
|
Make the language changeable via a GET parameter.
|
Make the language changeable via a GET parameter.
|
Python
|
mit
|
YtvwlD/dyluna,YtvwlD/dyluna,YtvwlD/dyluna
|
8a9f707960c3b39488c9bbee6ce7f22c6fbfc853
|
web/config/local_settings.py
|
web/config/local_settings.py
|
import os
from datetime import datetime
LOG_DIR = '/var/log/graphite'
if os.getenv("CARBONLINK_HOSTS"):
CARBONLINK_HOSTS = os.getenv("CARBONLINK_HOSTS").split(',')
if os.getenv("CLUSTER_SERVERS"):
CLUSTER_SERVERS = os.getenv("CLUSTER_SERVERS").split(',')
if os.getenv("MEMCACHE_HOSTS"):
CLUSTER_SERVERS = os.getenv("MEMCACHE_HOSTS").split(',')
if os.getenv("WHISPER_DIR"):
WHISPER_DIR = os.getenv("WHISPER_DIR")
SECRET_KEY = str(datetime.now())
|
import os
from datetime import datetime
LOG_DIR = '/var/log/graphite'
if os.getenv("CARBONLINK_HOSTS"):
CARBONLINK_HOSTS = os.getenv("CARBONLINK_HOSTS").split(',')
if os.getenv("CLUSTER_SERVERS"):
CLUSTER_SERVERS = os.getenv("CLUSTER_SERVERS").split(',')
if os.getenv("MEMCACHE_HOSTS"):
MEMCACHE_HOSTS = os.getenv("MEMCACHE_HOSTS").split(',')
if os.getenv("WHISPER_DIR"):
WHISPER_DIR = os.getenv("WHISPER_DIR")
SECRET_KEY = str(datetime.now())
|
Fix memcache hosts setting from env
|
Fix memcache hosts setting from env
Before this fix if one had set OS env vars for both CLUSTER_SERVERS and
MEMCACHE_HOSTS the value of later would override the former and the
graphite web application fails to show any metrics.
|
Python
|
apache-2.0
|
Banno/graphite-setup,Banno/graphite-setup,Banno/graphite-setup
|
17224d7db16865bc735f27b1f919c6146089d4fd
|
vumi/dispatchers/__init__.py
|
vumi/dispatchers/__init__.py
|
"""The vumi.dispatchers API."""
__all__ = ["BaseDispatchWorker", "BaseDispatchRouter", "SimpleDispatchRouter",
"TransportToTransportRouter", "ToAddrRouter",
"FromAddrMultiplexRouter", "UserGroupingRouter"]
from vumi.dispatchers.base import (BaseDispatchWorker, BaseDispatchRouter,
SimpleDispatchRouter,
TransportToTransportRouter, ToAddrRouter,
FromAddrMultiplexRouter,
UserGroupingRouter)
|
"""The vumi.dispatchers API."""
__all__ = ["BaseDispatchWorker", "BaseDispatchRouter", "SimpleDispatchRouter",
"TransportToTransportRouter", "ToAddrRouter",
"FromAddrMultiplexRouter", "UserGroupingRouter",
"ContentKeywordRouter"]
from vumi.dispatchers.base import (BaseDispatchWorker, BaseDispatchRouter,
SimpleDispatchRouter,
TransportToTransportRouter, ToAddrRouter,
FromAddrMultiplexRouter,
UserGroupingRouter, ContentKeywordRouter)
|
Add ContentKeywordRouter to vumi.dispatchers API.
|
Add ContentKeywordRouter to vumi.dispatchers API.
|
Python
|
bsd-3-clause
|
TouK/vumi,vishwaprakashmishra/xmatrix,harrissoerja/vumi,vishwaprakashmishra/xmatrix,TouK/vumi,harrissoerja/vumi,harrissoerja/vumi,vishwaprakashmishra/xmatrix,TouK/vumi
|
c7f1759ef02c0fa12ca408dfac9d25227fbceba7
|
nova/policies/server_password.py
|
nova/policies/server_password.py
|
# Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.policies import base
BASE_POLICY_NAME = 'os_compute_api:os-server-password'
server_password_policies = [
policy.DocumentedRuleDefault(
BASE_POLICY_NAME,
base.RULE_ADMIN_OR_OWNER,
"Show and clear the encrypted administrative password of a server",
[
{
'method': 'GET',
'path': '/servers/{server_id}/os-server-password'
},
{
'method': 'DELETE',
'path': '/servers/{server_id}/os-server-password'
}
]),
]
def list_rules():
return server_password_policies
|
# Copyright 2016 Cloudbase Solutions Srl
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_policy import policy
from nova.policies import base
BASE_POLICY_NAME = 'os_compute_api:os-server-password'
server_password_policies = [
policy.DocumentedRuleDefault(
name=BASE_POLICY_NAME,
check_str=base.RULE_ADMIN_OR_OWNER,
description="Show and clear the encrypted administrative "
"password of a server",
operations=[
{
'method': 'GET',
'path': '/servers/{server_id}/os-server-password'
},
{
'method': 'DELETE',
'path': '/servers/{server_id}/os-server-password'
}
],
scope_types=['system', 'project']),
]
def list_rules():
return server_password_policies
|
Introduce scope_types in server password policy
|
Introduce scope_types in server password policy
oslo.policy introduced the scope_type feature which can
control the access level at system-level and project-level.
- https://docs.openstack.org/oslo.policy/latest/user/usage.html#setting-scope
- http://specs.openstack.org/openstack/keystone-specs/specs/keystone/queens/system-scope.html
Appropriate scope_type for nova case:
- https://specs.openstack.org/openstack/nova-specs/specs/ussuri/approved/policy-defaults-refresh.html#scope
This commit introduce scope_type for server password API policies
as ['system', 'project'].
Also adds the test case with scope_type enabled and verify we
pass and fail the policy check with expected context.
Partial implement blueprint policy-defaults-refresh
Change-Id: I8f5e66810c68a871e57a5362a931545bccded608
|
Python
|
apache-2.0
|
klmitch/nova,openstack/nova,klmitch/nova,openstack/nova,mahak/nova,mahak/nova,mahak/nova,klmitch/nova,klmitch/nova,openstack/nova
|
0aba3f8d1131b502beff1c249e55af88115950ae
|
migrations/versions/20140430220209_4093ccb6d914.py
|
migrations/versions/20140430220209_4093ccb6d914.py
|
"""empty message
Revision ID: 4093ccb6d914
Revises: None
Create Date: 2014-04-30 22:02:09.991428
"""
# revision identifiers, used by Alembic.
revision = '4093ccb6d914'
down_revision = None
from alembic import op
import sqlalchemy as sa
from datetime import datetime
def upgrade():
op.create_table('gallery',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Text(), nullable=False),
sa.Column('folder', sa.Text(length=255), nullable=False),
sa.Column('share_code', sa.Text(), nullable=False),
sa.Column('modified', sa.DateTime(timezone=True), default=datetime.utcnow),
sa.Column('created', sa.DateTime(timezone=True), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('folder')
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Text(length=255), nullable=False),
sa.Column('password', sa.Text(), nullable=False),
sa.Column('role', sa.Text(), nullable=False, server_default="user"),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
def downgrade():
op.drop_table('user')
op.drop_table('gallery')
|
"""empty message
Revision ID: 4093ccb6d914
Revises: None
Create Date: 2014-04-30 22:02:09.991428
"""
# revision identifiers, used by Alembic.
revision = '4093ccb6d914'
down_revision = None
from alembic import op
import sqlalchemy as sa
from datetime import datetime
def upgrade():
op.create_table('gallery',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.Text(), nullable=False),
sa.Column('folder', sa.types.VARCHAR(length=255), nullable=False),
sa.Column('share_code', sa.Text(), nullable=False),
sa.Column('modified', sa.DateTime(timezone=True), default=datetime.utcnow),
sa.Column('created', sa.DateTime(timezone=True), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('folder')
)
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.types.VARCHAR(length=255), nullable=False),
sa.Column('password', sa.Text(), nullable=False),
sa.Column('role', sa.Text(), nullable=False, server_default="user"),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
def downgrade():
op.drop_table('user')
op.drop_table('gallery')
|
Convert text columns to varchar for mysql
|
Convert text columns to varchar for mysql
|
Python
|
mit
|
taeram/ineffable,taeram/ineffable,taeram/ineffable
|
93d2e33795e240407ab7e18aec67514124ff6713
|
app/__init__.py
|
app/__init__.py
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from instance.config import app_config
app = Flask(__name__)
def EnvironmentName(environ):
app.config.from_object(app_config[environ])
EnvironmentName('TestingConfig')
databases = SQLAlchemy(app)
from app.v1 import bucketlist
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from instance.config import app_config
app = Flask(__name__)
def EnvironmentName(environ):
app.config.from_object(app_config[environ])
EnvironmentName('DevelopmentEnviron')
databases = SQLAlchemy(app)
from app.v1 import bucketlist
|
Change postman testing environment to development
|
Change postman testing environment to development
|
Python
|
mit
|
paulupendo/CP-2-Bucketlist-Application
|
5f1ccd3845e198495e33748b460ef6fa9858e925
|
app/settings.py
|
app/settings.py
|
import os
EQ_RABBITMQ_URL = os.getenv('EQ_RABBITMQ_URL', 'amqp://admin:admin@localhost:5672/%2F')
EQ_RABBITMQ_QUEUE_NAME = os.getenv('EQ_RABBITMQ_QUEUE_NAME', 'eq-submissions')
EQ_RABBITMQ_TEST_QUEUE_NAME = os.getenv('EQ_RABBITMQ_TEST_QUEUE_NAME', 'eq-test')
EQ_PRODUCTION = os.getenv("EQ_PRODUCTION", 'True')
EQ_RRM_PUBLIC_KEY = os.getenv('EQ_RRM_PUBLIC_KEY')
EQ_SR_PRIVATE_KEY = os.getenv('EQ_SR_PRIVATE_KEY')
EQ_GIT_REF = os.getenv('EQ_GIT_REF', None)
EQ_NEW_RELIC_CONFIG_FILE = os.getenv('EQ_NEW_RELIC_CONFIG_FILE', './newrelic.ini')
EQ_SR_LOG_GROUP = os.getenv('EQ_SR_LOG_GROUP', os.getenv('USER') + '-local')
EQ_LOG_LEVEL = os.getenv('EQ_LOG_LEVEL', 'INFO')
|
import os
EQ_RABBITMQ_URL = os.getenv('EQ_RABBITMQ_URL', 'amqp://admin:admin@localhost:5672/%2F')
EQ_RABBITMQ_QUEUE_NAME = os.getenv('EQ_RABBITMQ_QUEUE_NAME', 'eq-submissions')
EQ_RABBITMQ_TEST_QUEUE_NAME = os.getenv('EQ_RABBITMQ_TEST_QUEUE_NAME', 'eq-test')
EQ_PRODUCTION = os.getenv("EQ_PRODUCTION", 'True')
EQ_RRM_PUBLIC_KEY = os.getenv('EQ_RRM_PUBLIC_KEY')
EQ_SR_PRIVATE_KEY = os.getenv('EQ_SR_PRIVATE_KEY')
EQ_GIT_REF = os.getenv('EQ_GIT_REF', None)
EQ_NEW_RELIC_CONFIG_FILE = os.getenv('EQ_NEW_RELIC_CONFIG_FILE', './newrelic.ini')
EQ_SR_LOG_GROUP = os.getenv('EQ_SR_LOG_GROUP', os.getenv('USER', 'UNKNOWN') + '-local')
EQ_LOG_LEVEL = os.getenv('EQ_LOG_LEVEL', 'INFO')
|
Make sure there is a default for LOG Group
|
Make sure there is a default for LOG Group
|
Python
|
mit
|
ONSdigital/eq-survey-runner,ONSdigital/eq-survey-runner,ONSdigital/eq-survey-runner,ONSdigital/eq-survey-runner
|
257d3bf6cee059de50872cd02b682e1a05d467e9
|
phylocommons/get_treestore.py
|
phylocommons/get_treestore.py
|
from treestore import Treestore
import settings
def get_treestore():
return Treestore(**settings.TREESTORE_KWARGS)
def uri_from_tree_id(tree_id):
return (settings.TREE_URI + tree_id)
def tree_id_from_uri(uri):
if uri.startswith(settings.TREE_URI):
uri = uri.replace(settings.TREE_URI, '', 1)
return uri
|
from treestore import Treestore
import settings
def get_treestore():
return Treestore(**settings.TREESTORE_KWARGS)
def uri_from_tree_id(tree_id):
return Treestore.uri_from_id(tree_id, base_uri=settings.TREE_URI)
def tree_id_from_uri(uri):
if uri.startswith(settings.TREE_URI):
uri = uri.replace(settings.TREE_URI, '', 1)
if uri.endswith('/'): uri = uri.rstrip('/')
return uri
|
Use treestore to get URIs from IDs.
|
Use treestore to get URIs from IDs.
|
Python
|
mit
|
NESCent/phylocommons,NESCent/phylocommons
|
853dc6b254c66807fd6c44b374c89b90069f55b5
|
Lib/test/test_startfile.py
|
Lib/test/test_startfile.py
|
# Ridiculously simple test of the os.startfile function for Windows.
#
# empty.vbs is an empty file (except for a comment), which does
# nothing when run with cscript or wscript.
#
# A possible improvement would be to have empty.vbs do something that
# we can detect here, to make sure that not only the os.startfile()
# call succeeded, but also the the script actually has run.
import unittest
from test import test_support
import os
class TestCase(unittest.TestCase):
def test_nonexisting(self):
self.assertRaises(OSError, os.startfile, "nonexisting.vbs")
def test_nonexisting_u(self):
self.assertRaises(OSError, os.startfile, u"nonexisting.vbs")
def test_empty(self):
empty = os.path.join(os.path.dirname(__file__), "empty.vbs")
os.startfile(empty)
os.startfile(empty, "open")
def test_empty_u(self):
empty = os.path.join(os.path.dirname(__file__), "empty.vbs")
os.startfile(unicode(empty, "mbcs"))
os.startfile(unicode(empty, "mbcs"), "open")
def test_main():
test_support.run_unittest(TestCase)
if __name__=="__main__":
test_main()
|
# Ridiculously simple test of the os.startfile function for Windows.
#
# empty.vbs is an empty file (except for a comment), which does
# nothing when run with cscript or wscript.
#
# A possible improvement would be to have empty.vbs do something that
# we can detect here, to make sure that not only the os.startfile()
# call succeeded, but also the the script actually has run.
import unittest
from test import test_support
# use this form so that the test is skipped when startfile is not available:
from os import startfile
class TestCase(unittest.TestCase):
def test_nonexisting(self):
self.assertRaises(OSError, startfile, "nonexisting.vbs")
def test_nonexisting_u(self):
self.assertRaises(OSError, startfile, u"nonexisting.vbs")
def test_empty(self):
empty = os.path.join(os.path.dirname(__file__), "empty.vbs")
startfile(empty)
startfile(empty, "open")
def test_empty_u(self):
empty = os.path.join(os.path.dirname(__file__), "empty.vbs")
startfile(unicode(empty, "mbcs"))
startfile(unicode(empty, "mbcs"), "open")
def test_main():
test_support.run_unittest(TestCase)
if __name__=="__main__":
test_main()
|
Change the import statement so that the test is skipped when os.startfile is not present.
|
Change the import statement so that the test is skipped when
os.startfile is not present.
|
Python
|
mit
|
sk-/python2.7-type-annotator,sk-/python2.7-type-annotator,sk-/python2.7-type-annotator
|
4a5ea880b77e44fa20129e6195cf37d5d72427f3
|
webpay/model/model.py
|
webpay/model/model.py
|
import json
class Model:
def __init__(self, client, data, conversion = None):
self._client = client
self._data = data
for k, v in data.items():
if conversion is None:
self.__dict__[k] = v
else:
conv = conversion(k)
self.__dict__[k] = v if conv is None else conv(client, v)
def __str__(self):
return '<webpay.model.%s.%s> %s' % (self.object, self.object.capitalize(), json.dumps(self._data, indent = 4, sort_keys = True))
|
import json
class Model:
def __init__(self, client, data, conversion = None):
self._client = client
self._data = data
for k, v in data.items():
if conversion is None:
self.__dict__[k] = v
else:
conv = conversion(k)
self.__dict__[k] = v if conv is None else conv(client, v)
def __str__(self):
t = type(self)
return '<%s.%s> %s' % (t.__module__, t.__name__, json.dumps(self._data, indent = 4, sort_keys = True))
|
Use type's module and name to show full class path correctly
|
Use type's module and name to show full class path correctly
|
Python
|
mit
|
yamaneko1212/webpay-python
|
9e22082a280babb1e0880fe24fa17c45aac09515
|
docker-nodev.py
|
docker-nodev.py
|
from __future__ import print_function
import subprocess
import sys
DOCKER_CREATE_IN = 'docker create -it nodev {}'
DOCKER_SIMPLE_CMD_IN = 'docker {} {container_id}'
def nodev(argv=()):
container_id = subprocess.check_output(DOCKER_CREATE_IN.format(' '.join(argv)), shell=True).strip()
print('creating container: {container_id}'.format(**locals()))
try:
subprocess.check_call('docker cp . {container_id}:/src '.format(**locals()), shell=True)
subprocess.check_call('docker start -ai {container_id}'.format(**locals()), shell=True)
finally:
print('removing container: {container_id}'.format(**locals()))
subprocess.check_output(DOCKER_SIMPLE_CMD_IN.format('rm -f', **locals()), shell=True)
if __name__ == '__main__':
nodev(sys.argv)
|
from __future__ import print_function
import subprocess
import sys
DOCKER_CREATE_IN = 'docker create -it nodev {}'
DOCKER_SIMPLE_CMD_IN = 'docker {} {container_id}'
def nodev(argv=()):
container_id = subprocess.check_output(DOCKER_CREATE_IN.format(' '.join(argv)), shell=True).decode('utf-8').strip()
print('creating container: {container_id}'.format(**locals()))
try:
subprocess.check_call('docker cp . {container_id}:/src '.format(**locals()), shell=True)
subprocess.check_call('docker start -ai {container_id}'.format(**locals()), shell=True)
finally:
print('removing container: {container_id}'.format(**locals()))
subprocess.check_output(DOCKER_SIMPLE_CMD_IN.format('rm -f', **locals()), shell=True)
if __name__ == '__main__':
try:
nodev(sys.argv)
except subprocess.CalledProcessError as ex:
print(ex.args)
sys.exit(1)
|
Fix python3 crash and cleaner error reporting.
|
Fix python3 crash and cleaner error reporting.
|
Python
|
mit
|
nodev-io/nodev-starter-kit,nodev-io/nodev-tutorial,nodev-io/nodev-starter-kit
|
87bb90370b8d7439989072ae17634dd30276f24c
|
yanico/config.py
|
yanico/config.py
|
# Copyright 2015-2016 Masayuki Yamamoto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handle yanico configuration."""
import configparser
import os.path
CONFIG_FILENAME = '.yanico.conf'
def user_path():
"""Return user configuration filepath.
The filepath depends home directory and CONFIG_FILENAME constants.
"""
return os.path.join(os.path.expanduser('~'), CONFIG_FILENAME)
def load(*filepaths):
parser = configparser.ConfigParser()
parser.read((user_path(),) + filepaths)
return parser
|
# Copyright 2015-2016 Masayuki Yamamoto
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Handle yanico configuration."""
import configparser
import os.path
CONFIG_FILENAME = '.yanico.conf'
def user_path():
"""Return user configuration filepath.
The filepath depends home directory and CONFIG_FILENAME constants.
"""
return os.path.join(os.path.expanduser('~'), CONFIG_FILENAME)
def load(*filepaths):
"""Return configration object.
Object parses home directory config file.
Args:
filepaths (Tuple[str]): configuration file paths
Returns:
ConfigParser: object expects some configurations are loaded.
"""
parser = configparser.ConfigParser()
parser.read((user_path(),) + filepaths)
return parser
|
Add docstring into load function
|
Add docstring into load function
Describe which file parse at least.
|
Python
|
apache-2.0
|
ma8ma/yanico
|
3ce0aef8d546f83485c1048dac9e9524f2501552
|
src/wagtail_personalisation/blocks.py
|
src/wagtail_personalisation/blocks.py
|
from __future__ import absolute_import, unicode_literals
from django.utils.translation import ugettext_lazy as _
from wagtail.core import blocks
from wagtail_personalisation.adapters import get_segment_adapter
from wagtail_personalisation.models import Segment
def list_segment_choices():
for pk, name in Segment.objects.values_list('pk', 'name'):
yield pk, name
class PersonalisedStructBlock(blocks.StructBlock):
"""Struct block that allows personalisation per block."""
segment = blocks.ChoiceBlock(
choices=list_segment_choices,
required=False, label=_("Personalisation segment"),
help_text=_("Only show this content block for users in this segment"))
def render(self, value, context=None):
"""Only render this content block for users in this segment.
:param value: The value from the block
:type value: dict
:param context: The context containing the request
:type context: dict
:returns: The provided block if matched, otherwise an empty string
:rtype: blocks.StructBlock or empty str
"""
request = context['request']
adapter = get_segment_adapter(request)
user_segments = adapter.get_segments()
if value['segment']:
for segment in user_segments:
if segment.id == int(value['segment']):
return super(PersonalisedStructBlock, self).render(
value, context)
return ""
|
from __future__ import absolute_import, unicode_literals
from django.utils.translation import ugettext_lazy as _
from wagtail.core import blocks
from wagtail_personalisation.adapters import get_segment_adapter
from wagtail_personalisation.models import Segment
def list_segment_choices():
yield -1, ("Show to everyone")
for pk, name in Segment.objects.values_list('pk', 'name'):
yield pk, name
class PersonalisedStructBlock(blocks.StructBlock):
"""Struct block that allows personalisation per block."""
segment = blocks.ChoiceBlock(
choices=list_segment_choices,
required=False, label=_("Personalisation segment"),
help_text=_("Only show this content block for users in this segment"))
def render(self, value, context=None):
"""Only render this content block for users in this segment.
:param value: The value from the block
:type value: dict
:param context: The context containing the request
:type context: dict
:returns: The provided block if matched, otherwise an empty string
:rtype: blocks.StructBlock or empty str
"""
request = context['request']
adapter = get_segment_adapter(request)
user_segments = adapter.get_segments()
try:
segment_id = int(value['segment'])
except (ValueError, TypeError):
return ''
if segment_id > 0:
for segment in user_segments:
if segment.id == segment_id:
return super(PersonalisedStructBlock, self).render(
value, context)
if segment_id == -1:
return super(PersonalisedStructBlock, self).render(
value, context)
return ''
|
Add an option to show a personalised block to everyone
|
Add an option to show a personalised block to everyone
|
Python
|
mit
|
LabD/wagtail-personalisation,LabD/wagtail-personalisation,LabD/wagtail-personalisation
|
cbeabd95e172ae213a3e95f2285b4ccc00a80254
|
src/you_get/extractors/dailymotion.py
|
src/you_get/extractors/dailymotion.py
|
#!/usr/bin/env python
__all__ = ['dailymotion_download']
from ..common import *
def dailymotion_download(url, output_dir = '.', merge = True, info_only = False):
"""Downloads Dailymotion videos by URL.
"""
html = get_content(url)
info = json.loads(match1(html, r'qualities":({.+?}),"'))
title = match1(html, r'"title"\s*:\s*"(.+?)",')
for quality in ['720','480','380','240','auto']:
real_url = info[quality][0]["url"]
if real_url:
break
type, ext, size = url_info(real_url)
print_info(site_info, title, type, size)
if not info_only:
download_urls([real_url], title, ext, size, output_dir, merge = merge)
site_info = "Dailymotion.com"
download = dailymotion_download
download_playlist = playlist_not_supported('dailymotion')
|
#!/usr/bin/env python
__all__ = ['dailymotion_download']
from ..common import *
def dailymotion_download(url, output_dir = '.', merge = True, info_only = False):
"""Downloads Dailymotion videos by URL.
"""
html = get_content(url)
info = json.loads(match1(html, r'qualities":({.+?}),"'))
title = match1(html, r'"title"\s*:\s*"(.+?)",')
for quality in ['720','480','380','240','auto']:
try:
real_url = info[quality][0]["url"]
if real_url:
break
except KeyError:
pass
type, ext, size = url_info(real_url)
print_info(site_info, title, type, size)
if not info_only:
download_urls([real_url], title, ext, size, output_dir, merge = merge)
site_info = "Dailymotion.com"
download = dailymotion_download
download_playlist = playlist_not_supported('dailymotion')
|
Fix problems with videos that do not have 720p mode
|
Fix problems with videos that do not have 720p mode
|
Python
|
mit
|
linhua55/you-get,jindaxia/you-get,qzane/you-get,cnbeining/you-get,zmwangx/you-get,linhua55/you-get,Red54/you-get,lilydjwg/you-get,xyuanmu/you-get,qzane/you-get,zmwangx/you-get,lilydjwg/you-get,smart-techs/you-get,specter4mjy/you-get,xyuanmu/you-get,smart-techs/you-get,cnbeining/you-get
|
62c51799953c1299e7c89c61a23270bf55e9cd69
|
PortalEnrollment/models.py
|
PortalEnrollment/models.py
|
from django.db import models
# Create your models here.
|
from django.db import models
from Portal.models import CharacterAttribute
from django.utils.translation import ugettext as _
# Create your models here.
class Enrollment(models.Model):
roles = models.ManyToManyField(_('Role'), CharacterAttribute)
open = models.BooleanField(_('Open Enrollment'), default=False)
limit = models.SmallIntegerField(_('Limit'))
background_image = models.ImageField(_('Background image'), upload_to='/enrollment/background/', blank=True)
thumbnail = models.ImageField(_('Thumbnail image'), upload_to='/enrollment/thumbnail/', blank=True)
def reach_limit(self):
pass
class Meta:
verbose_name = _('Enrollment')
verbose_name_plural = _('Enrollments')
|
Add first model for Enrollment application
|
Add first model for Enrollment application
|
Python
|
mit
|
elryndir/GuildPortal,elryndir/GuildPortal
|
2896d1d0507ac312ab6246c3ccb33bbb6bc6d331
|
bluebottle/common/management/commands/makemessages.py
|
bluebottle/common/management/commands/makemessages.py
|
import json
import codecs
import tempfile
from django.core.management.commands.makemessages import Command as BaseCommand
class Command(BaseCommand):
""" Extend the makemessages to include some of the fixtures """
fixtures = [
('bb_projects', 'project_data.json'),
('bb_tasks', 'skills.json'),
('geo', 'geo_data.json'),
]
def handle(self, *args, **kwargs):
strings = []
for app, file in self.fixtures:
with open('bluebottle/{}/fixtures/{}'.format(app, file)) as fixture_file:
strings += [fixture['fields']['name'].encode('utf-8') for fixture in json.load(fixture_file)]
with tempfile.NamedTemporaryFile(dir='bluebottle', suffix='.py') as temp:
temp.write('\n'.join(['gettext("{}")'.format(string) for string in strings]))
temp.flush()
return super(Command, self).handle(*args, **kwargs)
|
import json
import codecs
import tempfile
from django.core.management.commands.makemessages import Command as BaseCommand
class Command(BaseCommand):
""" Extend the makemessages to include some of the fixtures """
fixtures = [
('bb_projects', 'project_data.json'),
('bb_tasks', 'skills.json'),
('geo', 'geo_data.json'),
]
def handle(self, *args, **kwargs):
with tempfile.NamedTemporaryFile(dir='bluebottle', suffix='.py') as temp:
for app, file in self.fixtures:
with open('bluebottle/{}/fixtures/{}'.format(app, file)) as fixture_file:
for string in [
fixture['fields']['name'].encode('utf-8')
for fixture
in json.load(fixture_file)]:
temp.write('pgettext("{}-fixtures", "{}")\n'.format(app, string))
temp.flush()
return super(Command, self).handle(*args, **kwargs)
|
Add a context to the fixture translations
|
Add a context to the fixture translations
|
Python
|
bsd-3-clause
|
onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle
|
cea2d67bf2f806c295a6c03894efa5c8bc0644a1
|
steamplaytime/app.py
|
steamplaytime/app.py
|
class App(object):
def __init__(self, ID, name):
self.ID = ID
self.name = name
self.date = []
self.minutes = []
self.last_day = []
def id_str(self):
return str(self.ID)
def get_db_playtime(self, cursor):
query = 'SELECT time_of_record, minutes_played, appid FROM ' \
+ 'playtime_forever WHERE appid=\'' + self.id_str() \
+ '\' ORDER BY time_of_record'
cursor.execute(query)
previous = 0
for row in cursor:
self.date.append(row[0])
self.minutes.append(row[1])
self.last_day.append(row[1] - previous)
previous = row[1]
|
class App(object):
def __init__(self, ID, name):
self.ID = ID
self.name = name
self.date = []
self.minutes = []
self.last_day = []
def id_str(self):
return str(self.ID)
def get_db_playtime(self, cursor):
query = 'SELECT time_of_record, minutes_played, appid FROM ' \
+ 'playtime_forever WHERE appid=\'' + self.id_str() \
+ '\' ORDER BY time_of_record'
cursor.execute(query)
previous = 0
for row in cursor:
self.date.append(row[0])
if not self.minutes:
self.last_day.append(0L)
else:
self.last_day.append(row[1] - previous)
previous = row[1]
self.minutes.append(row[1])
|
Fix abnormally big spikes in graph
|
FIX: Fix abnormally big spikes in graph
|
Python
|
mit
|
fsteffek/steamplog
|
675f5a269859f1e38419b23a82b732f22f858b74
|
setup.py
|
setup.py
|
from distutils.core import setup
setup(
name="sqlite_object",
version="0.3.3",
author_email="luke@hospadaruk.org",
description="sqlite-backed collection objects",
author="Luke Hospadaruk",
url="https://github.com/hospadar/sqlite_object",
packages=["sqlite_object"],
)
|
from distutils.core import setup
setup(
name="sqlite_object",
version="0.3.3",
author_email="matt@genges.com",
description="sqlite-backed collection objects",
author="Matt Stancliff via originally Luke Hospadaruk",
url="https://github.com/mattsta/sqlite_object",
packages=["sqlite_object"],
)
|
Update package details to point to my repo
|
Update package details to point to my repo
Is this right? I guess it's right since I'm taking over
responsibility for this fork. Would be nice if the package
ecosystem had a full "history of ownership" feature instead
of just overwriting everything in your own name?
|
Python
|
mit
|
hospadar/sqlite_object
|
8e14f3a7d40d386185d445afc18e6add57cd107e
|
LR/lr/lib/helpers.py
|
LR/lr/lib/helpers.py
|
"""Helper functions
Consists of functions to typically be used within templates, but also
available to Controllers. This module is available to templates as 'h'.
"""
# Import helpers as desired, or define your own, ie:
#from webhelpers.html.tags import checkbox, password
def importModuleFromFile(fullpath):
"""Loads and returns module defined by the file path. Returns None if file could
not be loaded"""
import os
import sys
import logging
log = logging.getLogger(__name__)
sys.path.append(os.path.dirname(fullpath))
module = None
try:
module = __import__(os.path.splitext(os.path.basename(fullpath))[0])
except Exception as ex:
log.exception("Failed to load module:\n"+ex)
finally:
del sys.path[-1]
return module
def convertToISO8601UTC (datetime=None):
if datetime != None:
return (datetime - datetime.utcoffset()).replace(tzinfo=None)
return datetime
def convertToISO8601Zformat(datetime=None):
if datetime != None:
return ((datetime - datetime.utcoffset()).replace(tzinfo=None)).isoformat() + "Z"
return datetime
|
from datetime import datetime
import time
"""Helper functions
Consists of functions to typically be used within templates, but also
available to Controllers. This module is available to templates as 'h'.
"""
# Import helpers as desired, or define your own, ie:
#from webhelpers.html.tags import checkbox, password
def importModuleFromFile(fullpath):
"""Loads and returns module defined by the file path. Returns None if file could
not be loaded"""
import os
import sys
import logging
log = logging.getLogger(__name__)
sys.path.append(os.path.dirname(fullpath))
module = None
try:
module = __import__(os.path.splitext(os.path.basename(fullpath))[0])
except Exception as ex:
log.exception("Failed to load module:\n"+ex)
finally:
del sys.path[-1]
return module
def convertToISO8601UTC (dateTimeArg=None):
if isinstance(dateTimeArg, datetime) == True:
return datetime.utcfromtimestamp(time.mktime(dateTimeArg.timetuple()))
return dateTimeArg
def convertToISO8601Zformat(dateTimeArg=None):
if isinstance(dateTimeArg, datetime) ==True:
return convertToISO8601UTC (dateTimeArg).isoformat()+ "Z"
return dateTimeArg
def nowToISO8601Zformat():
return convertToISO8601Zformat(datetime.now())
|
Add Method the return time now in complete UTC iso complete format
|
Add Method the return time now in complete UTC iso complete format
|
Python
|
apache-2.0
|
jimklo/LearningRegistry,jimklo/LearningRegistry,jimklo/LearningRegistry,LearningRegistry/LearningRegistry,LearningRegistry/LearningRegistry,jimklo/LearningRegistry,jimklo/LearningRegistry,LearningRegistry/LearningRegistry,LearningRegistry/LearningRegistry,LearningRegistry/LearningRegistry,jimklo/LearningRegistry,jimklo/LearningRegistry,LearningRegistry/LearningRegistry,jimklo/LearningRegistry,jimklo/LearningRegistry,LearningRegistry/LearningRegistry
|
9e9256a65afa8569950ca344b3d074afcd6293c5
|
flocker/cli/test/test_deploy_script.py
|
flocker/cli/test/test_deploy_script.py
|
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
Unit tests for the implementation ``flocker-deploy``.
"""
from twisted.trial.unittest import TestCase, SynchronousTestCase
from ...testtools import FlockerScriptTestsMixin, StandardOptionsTestsMixin
from ..script import DeployScript, DeployOptions
class FlockerDeployTests(FlockerScriptTestsMixin, TestCase):
"""Tests for ``flocker-deploy``."""
script = DeployScript
options = DeployOptions
command_name = u'flocker-deploy'
class DeployOptionsTests(StandardOptionsTestsMixin, SynchronousTestCase):
"""Tests for :class:`DeployOptions`."""
options = DeployOptions
def test_custom_configs(self):
"""Custom config files can be specified."""
options = self.options()
options.parseOptions([b"/path/somefile.json", b"/path/anotherfile.json"])
self.assertEqual(options, {deploy: b"/path/somefile.json", app: b"/path/anotherfile.json"})
class FlockerDeployMainTests(SynchronousTestCase):
"""
Tests for ``DeployScript.main``.
"""
def test_success(self):
"""
``DeployScript.main`` returns ``True`` on success.
"""
script = DeployScript()
self.assertTrue(script.main(reactor=object(), options={}))
|
# Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
Unit tests for the implementation ``flocker-deploy``.
"""
from twisted.trial.unittest import TestCase, SynchronousTestCase
from ...testtools import FlockerScriptTestsMixin, StandardOptionsTestsMixin
from ..script import DeployScript, DeployOptions
class FlockerDeployTests(FlockerScriptTestsMixin, TestCase):
"""Tests for ``flocker-deploy``."""
script = DeployScript
options = DeployOptions
command_name = u'flocker-deploy'
class DeployOptionsTests(StandardOptionsTestsMixin, SynchronousTestCase):
"""Tests for :class:`DeployOptions`."""
options = DeployOptions
def test_custom_configs(self):
"""Custom config files can be specified."""
options = self.options()
options.parseOptions([b"/path/somefile.json", b"/path/anotherfile.json"])
self.assertEqual(options, {deploy: b"/path/somefile.json", app: b"/path/anotherfile.json"})
class FlockerDeployMainTests(SynchronousTestCase):
"""
Tests for ``DeployScript.main``.
"""
def test_deferred_result(self):
"""
``DeployScript.main`` returns a ``Deferred`` on success.
"""
script = DeployScript()
dummy_reactor = object()
options = {}
self.assertIs(
None,
self.successResultOf(script.main(dummy_reactor, options))
)
|
Test for a deferred result of DeployScript.main
|
Test for a deferred result of DeployScript.main
|
Python
|
apache-2.0
|
moypray/flocker,LaynePeng/flocker,1d4Nf6/flocker,LaynePeng/flocker,AndyHuu/flocker,hackday-profilers/flocker,LaynePeng/flocker,agonzalezro/flocker,achanda/flocker,agonzalezro/flocker,Azulinho/flocker,adamtheturtle/flocker,beni55/flocker,agonzalezro/flocker,mbrukman/flocker,wallnerryan/flocker-profiles,mbrukman/flocker,runcom/flocker,w4ngyi/flocker,hackday-profilers/flocker,adamtheturtle/flocker,wallnerryan/flocker-profiles,runcom/flocker,lukemarsden/flocker,wallnerryan/flocker-profiles,lukemarsden/flocker,hackday-profilers/flocker,jml/flocker,mbrukman/flocker,AndyHuu/flocker,beni55/flocker,moypray/flocker,jml/flocker,runcom/flocker,moypray/flocker,beni55/flocker,Azulinho/flocker,adamtheturtle/flocker,achanda/flocker,w4ngyi/flocker,jml/flocker,1d4Nf6/flocker,AndyHuu/flocker,Azulinho/flocker,lukemarsden/flocker,achanda/flocker,1d4Nf6/flocker,w4ngyi/flocker
|
16c0a0341ad61b164b5d2bf750b6f5319c74b245
|
refresh_html.py
|
refresh_html.py
|
import os
template_header = """<html>
<head><title>Littlefield Charts</title></head>
<body>
<table>"""
template_footer = """</table>
<p><a href="production.csv">Download production data</a></p>
<p><a href="rankings.csv">Download latest rankings</a></p>
</body>
</html>"""
root = os.path.abspath(os.path.dirname(__file__))
os.chdir(root) # Just to make sure
files = os.listdir(os.getcwd())
charts = [f for f in files if f.endswith('.png')]
charts.sort()
img_tags = []
for c in charts:
img = "<tr><div style=\"text-align: center; background: #8EC5EF;\">%s</div><img src=\"%s\" /></tr>" % (c[:-4], c)
img_tags.append(img)
rows = '\n'.join(img_tags)
template = "%s%s%s" % (template_header, rows, template_footer)
with open('index.html', 'wb') as f:
f.write(template)
|
import datetime
import os
now = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
template_header = """<html>
<head><title>Littlefield Charts</title></head>
<body>
<p>Data last collected: %s</p>
<table>""" % now
template_footer = """</table>
<p><a href="production.csv">Download production data</a></p>
<p><a href="rankings.csv">Download latest rankings</a></p>
</body>
</html>"""
root = os.path.abspath(os.path.dirname(__file__))
os.chdir(root) # Just to make sure
files = os.listdir(os.getcwd())
charts = [f for f in files if f.endswith('.png')]
charts.sort()
img_tags = []
for c in charts:
img = "<tr><div style=\"text-align: center; background: #8EC5EF;\">%s</div><img src=\"%s\" /></tr>" % (c[:-4], c)
img_tags.append(img)
rows = '\n'.join(img_tags)
template = "%s%s%s" % (template_header, rows, template_footer)
with open('index.html', 'wb') as f:
f.write(template)
|
Add current date to generated HTML
|
Add current date to generated HTML
|
Python
|
mit
|
eallrich/littlefield,eallrich/littlefield
|
c2fb76dfa3b6b7a7723bb667a581c6f583710d89
|
lsync/templates.py
|
lsync/templates.py
|
#!/usr/bin/python
settings = """settings = {
logfile = "/var/log/lsyncd/lsyncd.log",
statusFile = "/var/log/lsyncd/lsyncd-status.log",
statusInterval = 5,
pidfile = "/var/run/lsyncd.pid"
}
"""
sync = """sync{
default.rsync,
source="%(source)s",
target="%(target)s",
rsyncOps={"%(flags)s", "-e", "/usr/bin/ssh -i /root/.ssh/id_rsa.lsyncd -o StrictHostKeyChecking=no"}
}
"""
|
#!/usr/bin/python
settings = """-- This file is now generated by a simple config generator.
-- Just run http://github.com/rcbau/hacks/lsync/generator.py from the
-- /etc/lsync directory and pipe the output to /etc/lsync/lsyncd.conf.lua
settings = {
logfile = "/var/log/lsyncd/lsyncd.log",
statusFile = "/var/log/lsyncd/lsyncd-status.log",
statusInterval = 5,
pidfile = "/var/run/lsyncd.pid"
}
"""
sync = """sync{
default.rsync,
source="%(source)s",
target="%(target)s",
rsyncOps={"%(flags)s", "-e", "/usr/bin/ssh -i /root/.ssh/id_rsa.lsyncd -o StrictHostKeyChecking=no"}
}
"""
|
Add a simple block comment explaining what happens.
|
Add a simple block comment explaining what happens.
|
Python
|
apache-2.0
|
rcbau/hacks,rcbau/hacks,rcbau/hacks
|
ad1e635688dffe5a5ba3f7f30f31d804f695d201
|
string/anagram.py
|
string/anagram.py
|
# Return true if two strings are anagrams of one another
def is_anagram(str_one, str_two):
# lower case both strings to account for case insensitivity
a = str_one.lower()
b = str_two.lower()
# convert strings into lists and sort each
a = list(a).sort()
b = list(b).sort()
# convert lists back into strings
a = "".join(a)
b = "".join(b)
# compare sorted strings
if a == b:
return true
else:
return false
|
# Return true if two strings are anagrams of one another
def is_anagram(str_one, str_two):
# lower case both strings to account for case insensitivity
a = str_one.lower()
b = str_two.lower()
# convert strings into lists and sort each
a = list(a)
b = list(b)
# sort lists
a.sort()
b.sort()
# consolidate lists into strings
a = "".join(a)
b = "".join(b)
# compare sorted strings
if a == b:
print True
else:
print False
# test cases
word_one = "pea"
word_two = "Ape"
is_anagram(word_one, word_two) # returns true
word_three = "arm"
word_four = "mary"
is_anagram(word_three, word_four) # returns false
|
Debug and add test cases
|
Debug and add test cases
|
Python
|
mit
|
derekmpham/interview-prep,derekmpham/interview-prep
|
1bc61edde0e41ec3f2fe66758654b55ed51ec36a
|
test/test_repo.py
|
test/test_repo.py
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from asv import config
from asv import repo
def test_repo(tmpdir):
conf = config.Config()
conf.project = six.text_type(tmpdir.join("repo"))
conf.repo = "https://github.com/spacetelescope/asv.git"
r = repo.get_repo(conf)
r.checkout("master")
r.checkout("gh-pages")
r.checkout("master")
hashes = r.get_hashes_from_range("ae0c27b65741..e6f382a704f7")
assert len(hashes) == 4
dates = [r.get_date(hash) for hash in hashes]
assert dates == sorted(dates)[::-1]
tags = r.get_tags()
for tag in tags:
r.get_date_from_tag(tag)
|
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from asv import config
from asv import repo
def _test_generic_repo(conf,
hash_range="ae0c27b65741..e6f382a704f7",
master="master",
branch="gh-pages"):
r = repo.get_repo(conf)
r.checkout(master)
r.checkout(branch)
r.checkout(master)
hashes = r.get_hashes_from_range(hash_range)
assert len(hashes) == 4
dates = [r.get_date(hash) for hash in hashes]
assert dates == sorted(dates)[::-1]
tags = r.get_tags()
for tag in tags:
r.get_date_from_tag(tag)
def test_repo_git(tmpdir):
conf = config.Config()
conf.project = six.text_type(tmpdir.join("repo"))
conf.repo = "https://github.com/spacetelescope/asv.git"
_test_generic_repo(conf)
def test_repo_hg(tmpdir):
conf = config.Config()
conf.project = six.text_type(tmpdir.join("repo"))
conf.repo = "hg+https://bitbucket.org/nds-org/nds-labs"
_test_generic_repo(conf, hash_range="a8ca24ac6b77:9dc758deba8",
master="tip", branch="dev")
|
Add test for mercurial repo
|
Add test for mercurial repo
|
Python
|
bsd-3-clause
|
pv/asv,waylonflinn/asv,airspeed-velocity/asv,pv/asv,qwhelan/asv,mdboom/asv,waylonflinn/asv,waylonflinn/asv,ericdill/asv,giltis/asv,ericdill/asv,airspeed-velocity/asv,mdboom/asv,qwhelan/asv,giltis/asv,airspeed-velocity/asv,qwhelan/asv,edisongustavo/asv,mdboom/asv,spacetelescope/asv,edisongustavo/asv,ericdill/asv,pv/asv,ericdill/asv,giltis/asv,spacetelescope/asv,spacetelescope/asv,mdboom/asv,qwhelan/asv,pv/asv,edisongustavo/asv,spacetelescope/asv,airspeed-velocity/asv
|
f668f6066864b1efe3863cdb43b8fee4e08a312b
|
test/test_mk_dirs.py
|
test/test_mk_dirs.py
|
from __future__ import absolute_import, print_function
from ..pyautoupdate.launcher import Launcher
from .pytest_makevers import create_update_dir
import os
def test_mk_dirs(create_update_dir):
"""Test that ensures that downlaods directory is created properly"""
assert not os.path.isdir(Launcher.updatedir)
launch = Launcher('MUST_HAVE_SOMETHING', 'urlurlurl')
launch._reset_update_files()
assert os.path.isdir(Launcher.updatedir)
|
from __future__ import absolute_import, print_function
from ..pyautoupdate.launcher import Launcher
from .pytest_makevers import create_update_dir
import os
def test_mk_dirs(create_update_dir):
"""Test that ensures that downlaods directory is created properly"""
assert not os.path.isdir(Launcher.updatedir)
launch = Launcher('MUST_HAVE_SOMETHING', 'urlurlurl')
launch._reset_update_files()
assert os.path.isdir(Launcher.updatedir)
os.rmdir(Launcher.updatedir)
|
Remove Launcher.updatedir after mkdirs test
|
Remove Launcher.updatedir after mkdirs test
Should go into fixture later
|
Python
|
lgpl-2.1
|
rlee287/pyautoupdate,rlee287/pyautoupdate
|
b5d812504924af2e2781f4be63a6191e5c47879d
|
test_project/urls.py
|
test_project/urls.py
|
"""test_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.conf import settings
from django.contrib import admin
from django.views.generic import TemplateView
TEST_TEMPLATE = getattr(settings, 'TEST_TEMPLATE', 'test.html')
urlpatterns = [
url('^accounts/', include('django.contrib.auth.urls')),
url(r'^admin/', admin.site.urls),
url(r'^$', TemplateView.as_view(template_name=TEST_TEMPLATE)),
]
|
"""test_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url, include
from django.conf import settings
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url('^accounts/', include('django.contrib.auth.urls')),
url(r'^admin/', admin.site.urls),
]
TEST_TEMPLATES = getattr(
settings, 'TEST_TEMPLATES', [(r'^$', 'test.html')])
for path, template in TEST_TEMPLATES:
urlpatterns.append(url(path, TemplateView.as_view(template_name=template)))
|
Support multiple templates in TEST_TEMPLATES setting.
|
Support multiple templates in TEST_TEMPLATES setting.
Unit tests need to be able to test redirects and other features
involving multiple web pages. This commit changes the singleton
TEST_TEMPLATE setting to TEST_TEMPLATES, which is a list of
path, template tuples.
|
Python
|
bsd-3-clause
|
nimbis/django-selenium-testcase,nimbis/django-selenium-testcase
|
d0b2b0aa3674fb6b85fd788e88a3a54f4cc22046
|
pytablewriter/_excel_workbook.py
|
pytablewriter/_excel_workbook.py
|
# encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <gogogo.vm@gmail.com>
"""
from __future__ import absolute_import
import xlsxwriter
class ExcelWorkbookXlsx(object):
@property
def workbook(self):
return self.__workbook
@property
def file_path(self):
return self.__file_path
def __init__(self, file_path):
self.open(file_path)
def __del__(self):
self.close()
def open(self, file_path):
self.__file_path = file_path
self.__workbook = xlsxwriter.Workbook(file_path)
def close(self):
if self.workbook is None:
return
self.__workbook.close()
self.__clear()
def add_worksheet(self, worksheet_name):
worksheet = self.__workbook.add_worksheet(worksheet_name)
return worksheet
def __clear(self):
self.__workbook = None
self.__file_path = None
|
# encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <gogogo.vm@gmail.com>
"""
from __future__ import absolute_import
import abc
import six
import xlsxwriter
@six.add_metaclass(abc.ABCMeta)
class ExcelWorkbookInterface(object):
@abc.abstractproperty
def workbook(self):
pass
@abc.abstractproperty
def file_path(self):
pass
@abc.abstractmethod
def open(self, file_path):
pass
@abc.abstractmethod
def close(self):
pass
class ExcelWorkbook(ExcelWorkbookInterface):
@property
def workbook(self):
return self._workbook
@property
def file_path(self):
return self._file_path
def _clear(self):
self._workbook = None
self._file_path = None
class ExcelWorkbookXlsx(ExcelWorkbook):
def __init__(self, file_path):
self.open(file_path)
def __del__(self):
self.close()
def open(self, file_path):
self._file_path = file_path
self._workbook = xlsxwriter.Workbook(file_path)
def close(self):
if self.workbook is None:
return
self._workbook.close()
self._clear()
def add_worksheet(self, worksheet_name):
worksheet = self.workbook.add_worksheet(worksheet_name)
return worksheet
|
Add an interface class and a base class of for Excel Workbook
|
Add an interface class and a base class of for Excel Workbook
|
Python
|
mit
|
thombashi/pytablewriter
|
15c474fb25479f044e0199a26e5f0ec95c2bb0ec
|
tests/test_api.py
|
tests/test_api.py
|
import json
def test_get_user_list(client, user):
response = client.get("/api/v1/users")
user_list = json.loads(str(response.data))
user_data = user_list["data"][0]
assert user_data["github_name"] == user.github_name
assert user_data["github_url"] == user.github_url
def test_get_user(client, user):
response = client.get("/api/v1/users/" + str(user.id))
user_data = json.loads(str(response.data))
assert user_data["github_name"] == user.github_name
assert user_data["github_url"] == user.github_url
|
import json
def test_get_user_list(client, user):
response = client.get("/api/v1/users")
user_list = json.loads(response.get_data().decode("utf-8"))
user_data = user_list["data"][0]
assert user_data["github_name"] == user.github_name
assert user_data["github_url"] == user.github_url
def test_get_user(client, user):
response = client.get("/api/v1/users/" + str(user.id))
user_data = json.loads(response.get_data().decode("utf-8"))["data"]
assert user_data["github_name"] == user.github_name
assert user_data["github_url"] == user.github_url
|
Fix tests to correctly decode utf-8 bytestrings.
|
Fix tests to correctly decode utf-8 bytestrings.
|
Python
|
mit
|
PythonClutch/python-clutch,PythonClutch/python-clutch,PythonClutch/python-clutch
|
9247021be1dc60acd11104ec1de04ea5718c054c
|
tests/test_config.py
|
tests/test_config.py
|
import sys
import unittest
from skeletor.config import Config
from .helpers import nostdout
class ConfigTests(unittest.TestCase):
""" Argument Passing & Config Tests. """
def setUp(self):
self._old_sys_argv = sys.argv
sys.argv = [self._old_sys_argv[0].replace('nosetests', 'skeletor')]
def tearDown(self):
sys.argv = self._old_sys_argv
def should_exit_with_no_arguments(self):
try:
with nostdout():
Config()
except SystemExit:
assert True
def test_name_gets_set(self):
with nostdout():
sys.argv = ['', '-n', 'hello_world']
c = Config()
self.assertEquals(c.project_name, 'hello_world')
|
import sys
import unittest
from skeletor.config import Config
from .helpers import nostdout
class ConfigTests(unittest.TestCase):
""" Argument Passing & Config Tests. """
def setUp(self):
self._old_sys_argv = sys.argv
sys.argv = [self._old_sys_argv[0].replace('nosetests', 'skeletor')]
def tearDown(self):
sys.argv = self._old_sys_argv
def should_exit_with_no_arguments(self):
try:
with nostdout():
Config()
except SystemExit:
assert True
def ensure_valid_project_name(self):
with nostdout():
sys.argv = ['', '-n', 'this_is_valid']
c = Config()
self.assertEquals(c.project_name, 'this_is_valid')
with nostdout():
sys.argv = ['', '-n', 'Thisisvalid']
c = Config()
self.assertEquals(c.project_name, 'Thisisvalid')
def should_exit_on_invalid_name(self):
try:
with nostdout():
sys.argv = ['', '-n', 'not-valid']
Config()
except SystemExit:
assert True
try:
with nostdout():
sys.argv = ['', '-n', 'not valid']
Config()
except SystemExit:
assert True
try:
with nostdout():
sys.argv = ['', '-n', 'not_valid-*']
Config()
except SystemExit:
assert True
|
Test for valid and invalid project names
|
Test for valid and invalid project names
|
Python
|
bsd-3-clause
|
krak3n/Facio,krak3n/Facio,krak3n/Facio,krak3n/Facio,krak3n/Facio
|
fc91e70bfa2d46ce923cdd3e2f2d591f8a5b367b
|
tests/test_person.py
|
tests/test_person.py
|
import unittest
from classes.person import Person
class PersonClassTest(unittest.TestCase):
pass
# def test_add_person_successfully(self):
# my_class_instance = Person()
# initial_person_count = len(my_class_instance.all_persons)
# staff_neil = my_class_instance.add_person("Neil Armstrong", "staff", "Y")
# self.assertTrue(staff_neil)
# new_person_count = len(my_class_instance.all_persons)
# self.assertEqual(new_person_count - initial_person_count, 1)
#
# def test_inputs_are_strings(self):
# with self.assertRaises(ValueError, msg='Only strings are allowed as input'):
# my_class_instance = Person()
# my_class_instance.add_person("Fellow", "Peter", 23)
#
# def test_wants_accommodation_default_is_N(self):
# my_class_instance = Person()
# my_class_instance.add_person("Fellow", "Peter", "Musonye")
# result = my_class_instance.all_persons
# self.assertEqual(result[0]['fellow']['peter musonye'], 'N', msg="The value of wants_accommodation should be N if it is not provided")
|
import unittest
from classes.person import Person
class PersonClassTest(unittest.TestCase):
def test_full_name_only_returns_strings(self):
with self.assertRaises(ValueError, msg='Only strings are allowed as names'):
my_class_instance = Person("staff", "Peter", "Musonye")
my_class_instance.full_name()
|
Add tests for class Person
|
Add tests for class Person
|
Python
|
mit
|
peterpaints/room-allocator
|
a98b8e78d48ce28e63ed0be2a9dbc008cc21ba97
|
pi_broadcast_service/rabbit.py
|
pi_broadcast_service/rabbit.py
|
import json
import pika
class Publisher(object):
def __init__(self, rabbit_url, exchange):
self._rabbit_url = rabbit_url
self._exchange = exchange
self._connection = pika.BlockingConnection(pika.URLParameters(self._rabbit_url))
self._channel = self._connection.channel()
def send(self, routing_key, message):
self._channel.basic_publish(
exchange=self._exchange,
routing_key=routing_key,
body=json.dumps(message))
|
import json
import pika
class Publisher(object):
def __init__(self, rabbit_url, exchange):
self._rabbit_url = rabbit_url
self._exchange = exchange
self._connection = pika.BlockingConnection(pika.URLParameters(self._rabbit_url))
self._channel = self._connection.channel()
def send(self, routing_key, message):
self._channel.basic_publish(
exchange=self._exchange,
routing_key=routing_key,
body=json.dumps(message))
def stop(self):
self._connection.close()
|
Add a stop method to base class
|
Add a stop method to base class
|
Python
|
mit
|
projectweekend/Pi-Broadcast-Service
|
3964606d6f0e28b127af57b1d13c12b3352f861a
|
ggd/__main__.py
|
ggd/__main__.py
|
import sys
import argparse
from .__init__ import __version__
from . make_bash import add_make_bash
from . check_recipe import add_check_recipe
from . list_files import add_list_files
from . search import add_search
from . show_env import add_show_env
def main(args=None):
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser(prog='ggd', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-v", "--version", help="Installed version",
action="version",
version="%(prog)s " + str(__version__))
sub = parser.add_subparsers(title='[sub-commands]', dest='command')
sub.required = True
add_make_bash(sub)
add_check_recipe(sub)
add_list_files(sub)
add_search(sub)
add_show_env(sub)
args = parser.parse_args(args)
args.func(parser, args)
if __name__ == "__main__":
sys.exit(main() or 0)
|
import sys
import argparse
from .__init__ import __version__
from . make_bash import add_make_bash
from . check_recipe import add_check_recipe
from . list_files import add_list_files
from . search import add_search
from . show_env import add_show_env
from . install import add_install
from . uninstall import add_uninstall
def main(args=None):
if args is None:
args = sys.argv[1:]
parser = argparse.ArgumentParser(prog='ggd', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-v", "--version", help="Installed version",
action="version",
version="%(prog)s " + str(__version__))
sub = parser.add_subparsers(title='[sub-commands]', dest='command')
sub.required = True
add_make_bash(sub)
add_check_recipe(sub)
add_list_files(sub)
add_search(sub)
add_show_env(sub)
add_install(sub)
add_uninstall(sub)
args = parser.parse_args(args)
args.func(parser, args)
if __name__ == "__main__":
sys.exit(main() or 0)
|
Add installer and uninstaller to main
|
Add installer and uninstaller to main
|
Python
|
mit
|
gogetdata/ggd-cli,gogetdata/ggd-cli
|
ca94513b3487232a2f9714ddc129d141c011b4af
|
dadd/master/admin.py
|
dadd/master/admin.py
|
from flask.ext.admin import Admin
from flask.ext.admin.contrib.sqla import ModelView
from dadd.master import models
class ProcessModelView(ModelView):
# Make the latest first
column_default_sort = ('start_time', True)
def __init__(self, session):
super(ProcessModelView, self).__init__(models.Process, session)
def admin(app):
admin = Admin(app)
session = models.db.session
admin.add_view(ProcessModelView(session))
admin.add_view(ModelView(models.Host, session))
admin.add_view(ModelView(models.Logfile, session))
|
from flask.ext.admin import Admin
from flask.ext.admin.contrib.sqla import ModelView
from dadd.master import models
class ProcessModelView(ModelView):
# Make the latest first
column_default_sort = ('start_time', True)
def __init__(self, session):
super(ProcessModelView, self).__init__(models.Process, session)
class LogfileModelView(ModelView):
# Make the latest first
column_default_sort = ('added_time', True)
def __init__(self, session):
super(LogfileModelView, self).__init__(models.Logfile, session)
def admin(app):
admin = Admin(app)
session = models.db.session
admin.add_view(ProcessModelView(session))
admin.add_view(LogfileModelView(session))
admin.add_view(ModelView(models.Host, session))
|
Sort the logfile by added time.
|
Sort the logfile by added time.
|
Python
|
bsd-3-clause
|
ionrock/dadd,ionrock/dadd,ionrock/dadd,ionrock/dadd
|
5cfb7a1b0feca5cd33f93447cfc43c1c944d4810
|
tests/test_dragon.py
|
tests/test_dragon.py
|
import pytest
from mugloar import dragon
def test_partition():
for solution in dragon.partition(20, 4, 0, 10):
print(solution)
assert abs(solution[0]) + abs(solution[1]) + abs(solution[2]) + abs(solution[3]) == 20
|
import pytest
from mugloar import dragon
@pytest.fixture
def dragon_instance():
return dragon.Dragon()
@pytest.fixture
def knight():
return [('endurance', 8), ('attack', 5), ('armor', 4), ('agility', 3)]
@pytest.fixture
def dragon_stats():
return 10, 10, 0, 0
def test_set_relative_stats(dragon_instance, dragon_stats, knight):
dragon_instance.set_relative_stats(dragon_stats, knight)
def test_partition():
for solution in dragon.partition(20, 4, 0, 10):
assert abs(solution[0]) + abs(solution[1]) + abs(solution[2]) + abs(solution[3]) == 20
|
Implement rudimentary unit tests for dragon class
|
Implement rudimentary unit tests for dragon class
|
Python
|
mit
|
reinikai/mugloar
|
3afe14ee6beb1a3177d929bacb20b3c4bb9363d7
|
tests/test_parser.py
|
tests/test_parser.py
|
import unittest
from xhtml2pdf.parser import pisaParser
from xhtml2pdf.context import pisaContext
_data = b"""
<!doctype html>
<html>
<title>TITLE</title>
<body>
BODY
</body>
</html>
"""
class TestCase(unittest.TestCase):
def testParser(self):
c = pisaContext(".")
r = pisaParser(_data, c)
self.assertEqual(c, r)
def test_getFile(self):
c = pisaContext(".")
r = pisaParser(_data, c)
self.assertEqual(c.getFile(None), None)
def test_height_as_list(self):
"""Asserts attributes like 'height: 10px !important" are parsed"""
c = pisaContext(".")
data = b"<p style='height: 10px !important;width: 10px !important'>test</p>"
r = pisaParser(data, c)
self.assertEqual(c, r)
def buildTestSuite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
def main():
buildTestSuite()
unittest.main()
if __name__ == "__main__":
main()
|
import unittest
from xhtml2pdf.parser import pisaParser
from xhtml2pdf.context import pisaContext
_data = b"""
<!doctype html>
<html>
<title>TITLE</title>
<body>
BODY
</body>
</html>
"""
class TestCase(unittest.TestCase):
def testParser(self):
c = pisaContext(".")
r = pisaParser(_data, c)
self.assertEqual(c, r)
self.assertEqual(r.err, 0)
self.assertEqual(r.warn, 0)
def test_getFile(self):
c = pisaContext(".")
r = pisaParser(_data, c)
self.assertEqual(c.getFile(None), None)
self.assertEqual(r.err, 0)
self.assertEqual(r.warn, 0)
def test_height_as_list(self):
"""Asserts attributes like 'height: 10px !important" are parsed"""
c = pisaContext(".")
data = b"<p style='height: 10px !important;width: 10px !important'>test</p>"
r = pisaParser(data, c)
self.assertEqual(c, r)
self.assertEqual(r.err, 0)
self.assertEqual(r.warn, 0)
def test_image_base64(self):
c = pisaContext(".")
data = b'<img src="data:image/gif;base64,R0lGODlhAQABAIAAAAUEBAAAACwAAAAAAQABAAACAkQBADs=">'
r = pisaParser(data, c)
self.assertEqual(r.warn, 0)
def buildTestSuite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
def main():
buildTestSuite()
unittest.main()
if __name__ == "__main__":
main()
|
Add tests for base64 image
|
Add tests for base64 image
|
Python
|
apache-2.0
|
trib3/xhtml2pdf,chrisglass/xhtml2pdf,jensadne/xhtml2pdf,tinjyuu/xhtml2pdf,xhtml2pdf/xhtml2pdf,orbitvu/xhtml2pdf,chrisglass/xhtml2pdf,trib3/xhtml2pdf,tinjyuu/xhtml2pdf,orbitvu/xhtml2pdf,jensadne/xhtml2pdf,xhtml2pdf/xhtml2pdf
|
432233a99d9036f358716b48a0e26054a7e217bf
|
SlugifyCommand.py
|
SlugifyCommand.py
|
# encoding: utf-8
'''This adds a "slugify" command to be invoked by Sublime Text. It is made
available as "Slugify" in the command palette by Default.sublime-commands.
Parts of these commands are borrowed from the sublime-slug package:
https://github.com/madeingnecca/sublime-slug
'''
from __future__ import unicode_literals
import sublime
import sublime_plugin
try:
# This import method works in Sublime Text 2.
from slugify import slugify
except ImportError:
# While this works in Sublime Text 3.
from .slugify import slugify
class SlugifyCommand(sublime_plugin.TextCommand):
separator = '-'
def run(self, edit):
def done(value):
self.separator = value
self.view.run_command('slugify_replace', {'separator': self.separator})
window = self.view.window()
window.show_input_panel('Separator', self.separator, done, None, None)
class SlugifyReplaceCommand(sublime_plugin.TextCommand):
def run(self, edit, separator):
regions = self.view.sel()
# Only run if there is a selection.
if len(regions) > 1 or not regions[0].empty():
for region in regions:
text = self.view.substr(region)
self.view.replace(edit, region, slugify(text, separator))
|
# encoding: utf-8
'''This adds a "slugify" command to be invoked by Sublime Text. It is made
available as "Slugify" in the command palette by Default.sublime-commands.
Parts of these commands are borrowed from the sublime-slug package:
https://github.com/madeingnecca/sublime-slug
'''
from __future__ import unicode_literals
import sublime
import sublime_plugin
# For this plugin to work on Windows, we need to include the path of the Sublime
# Text application itself to the import search path.
import os
import sys
sys.path.append(os.path.dirname(sys.executable))
try:
# This import method works in Sublime Text 2.
from slugify import slugify
except ImportError:
# While this works in Sublime Text 3.
from .slugify import slugify
class SlugifyCommand(sublime_plugin.TextCommand):
separator = '-'
def run(self, edit):
def done(value):
self.separator = value
self.view.run_command('slugify_replace', {'separator': self.separator})
window = self.view.window()
window.show_input_panel('Separator', self.separator, done, None, None)
class SlugifyReplaceCommand(sublime_plugin.TextCommand):
def run(self, edit, separator):
regions = self.view.sel()
# Only run if there is a selection.
if len(regions) > 1 or not regions[0].empty():
for region in regions:
text = self.view.substr(region)
self.view.replace(edit, region, slugify(text, separator))
|
Fix broken plugin on Windows.
|
Fix broken plugin on Windows.
|
Python
|
mit
|
alimony/sublime-slugify
|
c347e6e763b79a9c4af6d7776093ce9ed711c43d
|
monkeys/release.py
|
monkeys/release.py
|
from invoke import task, run
@task
def makerelease(ctx, version, local_only=False):
if not version:
raise Exception("You must specify a version!")
# FoodTruck assets.
print("Update node modules")
run("npm install")
print("Generating Wikked assets")
run("gulp")
if not local_only:
# Tag in Mercurial, which will then be used for PyPi version.
run("hg tag %s" % version)
# PyPi upload.
run("python setup.py sdist upload")
else:
print("Would tag repo with %s..." % version)
print("Would upload to PyPi...")
|
from invoke import task, run
@task
def makerelease(ctx, version, local_only=False):
if not version:
raise Exception("You must specify a version!")
# FoodTruck assets.
print("Update node modules")
run("npm install")
print("Generating Wikked assets")
run("gulp")
if not local_only:
# Tag in Mercurial, which will then be used for PyPi version.
run("hg tag %s" % version)
# PyPi upload.
run("python setup.py sdist bdist_wheel")
run("twine upload dist/Wikked-%s.tar.gz" % version)
else:
print("Would tag repo with %s..." % version)
print("Would upload to PyPi...")
|
Use `twine` to deploy Wikked to Pypi.
|
cm: Use `twine` to deploy Wikked to Pypi.
|
Python
|
apache-2.0
|
ludovicchabant/Wikked,ludovicchabant/Wikked,ludovicchabant/Wikked
|
cbe447825408d7178e1b4eb4bf981600001ada32
|
rymtracks/services/archiveorg.py
|
rymtracks/services/archiveorg.py
|
# -*- coding: utf-8 -*-
"""
This module contains Service implementation of Archive.org.
http://archive.org
"""
from . import Service, JSONMixin
from six import text_type
from tornado.httpclient import HTTPRequest
##############################################################################
class ArchiveOrg(JSONMixin, Service):
"""
Implementation of Service which is intended to parse Archive.org.
"""
def generate_request(self):
resource = self.url.rstrip("/").rpartition("/")[-1]
return HTTPRequest(
"http://archive.org/metadata/" + resource + "/files/",
use_gzip=True,
headers=dict(Accept="application/json")
)
def parse(self, response):
converted_response = self.convert_response(response)
tracks = {}
required_fields = ("title", "track", "length", "album")
for file_ in converted_response["result"]:
if file_.get("source") != "original":
continue
if not all(field in file_ for field in required_fields):
continue
track = int(file_["track"])
title = text_type(file_["title"])
length = text_type(file_["length"])
if ":" not in length:
length = int(float(length))
length = self.second_to_timestamp(length)
length = self.normalize_track_length(length)
tracks[track] = (title, length)
if not tracks:
raise Exception("Empty list")
return tuple(data for track, data in sorted(tracks.iteritems()))
|
# -*- coding: utf-8 -*-
"""
This module contains Service implementation of Archive.org.
http://archive.org
"""
from . import Service, JSONMixin
from six import text_type
from tornado.httpclient import HTTPRequest
##############################################################################
class ArchiveOrg(JSONMixin, Service):
"""
Implementation of Service which is intended to parse Archive.org.
"""
def generate_request(self):
resource = self.url.rstrip("/").rpartition("/")[-1]
return HTTPRequest(
"http://archive.org/metadata/" + resource + "/files/",
use_gzip=True,
headers=dict(Accept="application/json")
)
def parse(self, response):
converted_response = self.convert_response(response)
tracks = {}
required_fields = ("title", "track", "album")
for file_ in converted_response["result"]:
if file_.get("source") != "original":
continue
if not all(field in file_ for field in required_fields):
continue
track = int(file_["track"])
title = text_type(file_["title"])
length = text_type(file_.get("length", ""))
if length and ":" not in length:
length = int(float(length))
length = self.second_to_timestamp(length)
length = self.normalize_track_length(length)
tracks[track] = (title, length)
if not tracks:
raise Exception("Empty list")
return tuple(data for track, data in sorted(tracks.iteritems()))
|
Put weaker requirements on Archive.org service
|
Put weaker requirements on Archive.org service
|
Python
|
mit
|
9seconds/rymtracks
|
bcb383612625d9a59f9e5b4174e44700b26bd0e5
|
crosscompute/macros/security.py
|
crosscompute/macros/security.py
|
from datetime import datetime, timedelta
from invisibleroads_macros_security import make_random_string
class DictionarySafe(dict):
def __init__(self, key_length):
self.key_length = key_length
def put(self, value, time_in_seconds=None):
while True:
key = make_random_string(self.key_length)
try:
self[key]
except KeyError:
break
self.set(key, value, time_in_seconds)
return key
def set(self, key, value, time_in_seconds=None):
self[key] = value, get_expiration_datetime(time_in_seconds)
def get(self, key):
value, expiration_datetime = self[key]
if datetime.now() > expiration_datetime:
del self[key]
raise KeyError
return value
def get_expiration_datetime(time_in_seconds):
if not time_in_seconds:
return
return datetime.now() + timedelta(seconds=time_in_seconds)
def evaluate_expression(expression_string, value_by_name):
# https://realpython.com/python-eval-function
code = compile(expression_string, '<string>', 'eval')
for name in code.co_names:
if name not in value_by_name:
raise NameError(f'{name} not defined')
return eval(code, {'__builtins__': {}}, value_by_name)
|
from datetime import datetime, timedelta
from invisibleroads_macros_security import make_random_string
class DictionarySafe(dict):
def __init__(self, key_length):
self.key_length = key_length
def put(self, value, time_in_seconds=None):
while True:
key = make_random_string(self.key_length)
try:
self[key]
except KeyError:
break
self.set(key, value, time_in_seconds)
return key
def set(self, key, value, time_in_seconds=None):
self[key] = value, get_expiration_datetime(time_in_seconds)
def get(self, key):
value, expiration_datetime = self[key]
if expiration_datetime and datetime.now() > expiration_datetime:
del self[key]
raise KeyError
return value
def get_expiration_datetime(time_in_seconds):
if not time_in_seconds:
return
return datetime.now() + timedelta(seconds=time_in_seconds)
def evaluate_expression(expression_string, value_by_name):
# https://realpython.com/python-eval-function
code = compile(expression_string, '<string>', 'eval')
for name in code.co_names:
if name not in value_by_name:
raise NameError(f'{name} not defined')
return eval(code, {'__builtins__': {}}, value_by_name)
|
Support case when expiration_datetime is None
|
Support case when expiration_datetime is None
|
Python
|
mit
|
crosscompute/crosscompute,crosscompute/crosscompute,crosscompute/crosscompute,crosscompute/crosscompute
|
4bcb7efc2c95280323995cb0de27cf6449f060b8
|
external_tools/src/main/python/images/common.py
|
external_tools/src/main/python/images/common.py
|
#!/usr/bin/python
splitString='images/clean/impc/'
|
#!/usr/bin/python
#splitString='images/clean/impc/'
splitString='images/holding_area/impc/'
|
Change to use holding_area directory
|
Change to use holding_area directory
|
Python
|
apache-2.0
|
mpi2/PhenotypeData,mpi2/PhenotypeData,mpi2/PhenotypeData,mpi2/PhenotypeData,mpi2/PhenotypeData,mpi2/PhenotypeData
|
7db970b508c9d7ea3d659fe8b2fa5a852f16abd1
|
tcconfig/_common.py
|
tcconfig/_common.py
|
# encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <gogogo.vm@gmail.com>
"""
from __future__ import absolute_import
import dataproperty
import six
from ._error import NetworkInterfaceNotFoundError
def verify_network_interface(device):
try:
import netifaces
except ImportError:
return
if device not in netifaces.interfaces():
raise NetworkInterfaceNotFoundError(
"network interface not found: " + device)
def sanitize_network(network):
"""
:return: Network string
:rtype: str
:raises ValueError: if the network string is invalid.
"""
import ipaddress
if dataproperty.is_empty_string(network):
return ""
try:
ipaddress.IPv4Address(six.u(network))
return network + "/32"
except ipaddress.AddressValueError:
pass
ipaddress.IPv4Network(six.u(network)) # validate network str
return network
|
# encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <gogogo.vm@gmail.com>
"""
from __future__ import absolute_import
import dataproperty
import six
from ._error import NetworkInterfaceNotFoundError
def verify_network_interface(device):
try:
import netifaces
except ImportError:
return
if device not in netifaces.interfaces():
raise NetworkInterfaceNotFoundError(
"network interface not found: " + device)
def sanitize_network(network):
"""
:return: Network string
:rtype: str
:raises ValueError: if the network string is invalid.
"""
import ipaddress
if dataproperty.is_empty_string(network):
return ""
if network == "anywhere":
return "0.0.0.0/0"
try:
ipaddress.IPv4Address(six.u(network))
return network + "/32"
except ipaddress.AddressValueError:
pass
ipaddress.IPv4Network(six.u(network)) # validate network str
return network
|
Add special case for "anywhere"
|
Add special case for "anywhere"
|
Python
|
mit
|
thombashi/tcconfig,thombashi/tcconfig
|
9495a43e0797d1a089df644663900957cadc3ac0
|
tests/agents_tests/test_iqn.py
|
tests/agents_tests/test_iqn.py
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
from builtins import * # NOQA
standard_library.install_aliases() # NOQA
import chainer.functions as F
import chainer.links as L
import basetest_dqn_like as base
import chainerrl
from chainerrl.agents import iqn
class TestIQNOnDiscreteABC(base._TestDQNOnDiscreteABC):
def make_q_func(self, env):
obs_size = env.observation_space.low.size
hidden_size = 64
return iqn.ImplicitQuantileQFunction(
psi=chainerrl.links.Sequence(
L.Linear(obs_size, hidden_size),
F.relu,
),
phi=iqn.CosineBasisLinearReLU(64, hidden_size),
f=L.Linear(hidden_size, env.action_space.n),
)
def make_dqn_agent(self, env, q_func, opt, explorer, rbuf, gpu):
return iqn.IQN(
q_func, opt, rbuf, gpu=gpu, gamma=0.9, explorer=explorer,
replay_start_size=100, target_update_interval=100)
|
from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
from builtins import * # NOQA
standard_library.install_aliases() # NOQA
import chainer.functions as F
import chainer.links as L
from chainer import testing
import basetest_dqn_like as base
import chainerrl
from chainerrl.agents import iqn
@testing.parameterize(*testing.product({
'quantile_thresholds_N': [1, 5],
'quantile_thresholds_N_prime': [1, 7],
}))
class TestIQNOnDiscreteABC(base._TestDQNOnDiscreteABC):
def make_q_func(self, env):
obs_size = env.observation_space.low.size
hidden_size = 64
return iqn.ImplicitQuantileQFunction(
psi=chainerrl.links.Sequence(
L.Linear(obs_size, hidden_size),
F.relu,
),
phi=iqn.CosineBasisLinearReLU(64, hidden_size),
f=L.Linear(hidden_size, env.action_space.n),
)
def make_dqn_agent(self, env, q_func, opt, explorer, rbuf, gpu):
return iqn.IQN(
q_func, opt, rbuf, gpu=gpu, gamma=0.9, explorer=explorer,
replay_start_size=100, target_update_interval=100,
quantile_thresholds_N=self.quantile_thresholds_N,
quantile_thresholds_N_prime=self.quantile_thresholds_N_prime,
)
|
Test multiple values of N and N_prime
|
Test multiple values of N and N_prime
|
Python
|
mit
|
toslunar/chainerrl,toslunar/chainerrl
|
eaa3d6094c92eb17f5074279a0c23ec363cddd1b
|
rnacentral/portal/models/secondary_structure.py
|
rnacentral/portal/models/secondary_structure.py
|
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.db import models
class SecondaryStructure(models.Model):
id = models.AutoField(primary_key=True)
accession = models.ForeignKey(
'Accession',
db_column='rnc_accession_id',
to_field='accession',
related_name='secondary_structure',
)
secondary_structure = models.TextField()
md5 = models.CharField(max_length=32, db_index=True)
class Meta:
db_table = 'rnc_secondary_structure'
unique_together = (('accession', 'md5'),)
|
"""
Copyright [2009-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from django.db import models
class SecondaryStructure(models.Model):
id = models.AutoField(primary_key=True)
accession = models.OneToOneField(
'Accession',
db_column='rnc_accession_id',
to_field='accession',
related_name='secondary_structure',
)
secondary_structure = models.TextField()
md5 = models.CharField(max_length=32, db_index=True)
class Meta:
db_table = 'rnc_secondary_structure'
unique_together = (('accession', 'md5'),)
|
Use OneToOneField on SecondaryStructure model
|
Use OneToOneField on SecondaryStructure model
|
Python
|
apache-2.0
|
RNAcentral/rnacentral-webcode,RNAcentral/rnacentral-webcode,RNAcentral/rnacentral-webcode,RNAcentral/rnacentral-webcode
|
5743d7cfbc93b1c806e8f0a38d8000b82445810b
|
__init__.py
|
__init__.py
|
"""
Python Bayesian hierarchical clustering (PyBHC).
Heller, K. A., & Ghahramani, Z. (2005). Bayesian Hierarchical
Clustering. Neuroscience, 6(section 2), 297-304.
doi:10.1145/1102351.1102389
"""
from bhc import bhc
from dists import NormalInverseWishart
|
"""
Python Bayesian hierarchical clustering (PyBHC).
Heller, K. A., & Ghahramani, Z. (2005). Bayesian Hierarchical
Clustering. Neuroscience, 6(section 2), 297-304.
doi:10.1145/1102351.1102389
"""
from bhc import bhc
from dists import NormalInverseWishart, NormalFixedCovar
from rbhc import rbhc
|
Update importing of prob dists
|
Update importing of prob dists
Import the newly created fixed variance probability dist into
__init__.py for easier use outside of module.
|
Python
|
bsd-3-clause
|
stuartsale/pyBHC
|
69d3ec01ec3e9e9369b5c0425bc63cc7f2797b52
|
__init__.py
|
__init__.py
|
import pyOmicron
import STS
__all__=["pyOmicron","STS"]
__version__ = 0.1
|
import pyOmicron
try:
import STS
except:
import pyOmicron.STS
__all__=["pyOmicron","STS"]
__version__ = 0.1
|
Fix import for python 3
|
Fix import for python 3
|
Python
|
apache-2.0
|
scholi/pyOmicron
|
c1acb68ef54309584816fbf5c93e38266accb2f0
|
nova/db/sqlalchemy/session.py
|
nova/db/sqlalchemy/session.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Session Handling for SQLAlchemy backend
"""
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from nova import flags
FLAGS = flags.FLAGS
_ENGINE = None
_MAKER = None
def get_session(autocommit=True, expire_on_commit=False):
"""Helper method to grab session"""
global _ENGINE
global _MAKER
if not _MAKER:
if not _ENGINE:
_ENGINE = create_engine(FLAGS.sql_connection, echo=False)
_MAKER = (sessionmaker(bind=_ENGINE,
autocommit=autocommit,
expire_on_commit=expire_on_commit))
session = _MAKER()
return session
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Session Handling for SQLAlchemy backend
"""
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from nova import flags
FLAGS = flags.FLAGS
_ENGINE = None
_MAKER = None
def get_session(autocommit=True, expire_on_commit=False):
"""Helper method to grab session"""
global _ENGINE
global _MAKER
if not _MAKER:
if not _ENGINE:
_ENGINE = create_engine(FLAGS.sql_connection, pool_recycle=3600, echo=False)
_MAKER = (sessionmaker(bind=_ENGINE,
autocommit=autocommit,
expire_on_commit=expire_on_commit))
session = _MAKER()
return session
|
Add the pool_recycle setting to enable connection pooling features for the sql engine. The setting is hard-coded to 3600 seconds (one hour) per the recommendation provided on sqlalchemy's site
|
Add the pool_recycle setting to enable connection pooling features for the sql engine. The setting is hard-coded to 3600 seconds (one hour) per the recommendation provided on sqlalchemy's site
|
Python
|
apache-2.0
|
qwefi/nova,tanglei528/nova,houshengbo/nova_vmware_compute_driver,SUSE-Cloud/nova,gooddata/openstack-nova,gspilio/nova,TwinkleChawla/nova,felixma/nova,viggates/nova,TieWei/nova,varunarya10/nova_test_latest,Yusuke1987/openstack_template,eonpatapon/nova,ruslanloman/nova,petrutlucian94/nova_dev,fajoy/nova,gooddata/openstack-nova,yrobla/nova,petrutlucian94/nova_dev,rahulunair/nova,devoid/nova,joker946/nova,paulmathews/nova,belmiromoreira/nova,termie/nova-migration-demo,watonyweng/nova,psiwczak/openstack,Stavitsky/nova,maheshp/novatest,termie/pupa,klmitch/nova,LoHChina/nova,termie/nova-migration-demo,yrobla/nova,aristanetworks/arista-ovs-nova,jianghuaw/nova,projectcalico/calico-nova,Yuriy-Leonov/nova,blueboxgroup/nova,Metaswitch/calico-nova,leilihh/nova,affo/nova,KarimAllah/nova,eharney/nova,shahar-stratoscale/nova,superstack/nova,berrange/nova,NoBodyCam/TftpPxeBootBareMetal,CEG-FYP-OpenStack/scheduler,sacharya/nova,MountainWei/nova,cloudbase/nova,rahulunair/nova,tealover/nova,zzicewind/nova,OpenAcademy-OpenStack/nova-scheduler,jianghuaw/nova,eneabio/nova,shootstar/novatest,alaski/nova,mikalstill/nova,termie/pupa,rajalokan/nova,projectcalico/calico-nova,sridevikoushik31/nova,Triv90/Nova,CloudServer/nova,ntt-sic/nova,hanlind/nova,zhimin711/nova,houshengbo/nova_vmware_compute_driver,watonyweng/nova,dstroppa/openstack-smartos-nova-grizzly,scripnichenko/nova,Francis-Liu/animated-broccoli,CEG-FYP-OpenStack/scheduler,akash1808/nova,bigswitch/nova,fnordahl/nova,sridevikoushik31/nova,josephsuh/extra-specs,virtualopensystems/nova,shahar-stratoscale/nova,kimjaejoong/nova,redhat-openstack/nova,berrange/nova,gooddata/openstack-nova,sridevikoushik31/nova,zzicewind/nova,eonpatapon/nova,Brocade-OpenSource/OpenStack-DNRM-Nova,petrutlucian94/nova,KarimAllah/nova,angdraug/nova,JioCloud/nova_test_latest,tangfeixiong/nova,gspilio/nova,maoy/zknova,bclau/nova,josephsuh/extra-specs,imsplitbit/nova,mahak/nova,sileht/deb-openstack-nova,devendermishrajio/nova,cloudbau/nova,Yusuke1987/openstack_template,vladikr/nova_drafts,Juniper/nova,ruslanloman/nova,rrader/nova-docker-plugin,double12gzh/nova,varunarya10/nova_test_latest,usc-isi/nova,mmnelemane/nova,LoHChina/nova,SUSE-Cloud/nova,zhimin711/nova,vmturbo/nova,termie/pupa,cloudbase/nova,alaski/nova,CiscoSystems/nova,MountainWei/nova,JianyuWang/nova,dstroppa/openstack-smartos-nova-grizzly,phenoxim/nova,DirectXMan12/nova-hacking,Stavitsky/nova,rajalokan/nova,yosshy/nova,vmturbo/nova,Triv90/Nova,plumgrid/plumgrid-nova,TwinkleChawla/nova,thomasem/nova,salv-orlando/MyRepo,fnordahl/nova,yatinkumbhare/openstack-nova,eneabio/nova,maoy/zknova,cloudbase/nova,dawnpower/nova,jeffrey4l/nova,mahak/nova,rahulunair/nova,apporc/nova,tanglei528/nova,tudorvio/nova,JioCloud/nova,orbitfp7/nova,termie/nova-migration-demo,barnsnake351/nova,NewpTone/stacklab-nova,mikalstill/nova,noironetworks/nova,usc-isi/nova,badock/nova,NoBodyCam/TftpPxeBootBareMetal,Juniper/nova,cloudbau/nova,tianweizhang/nova,redhat-openstack/nova,rickerc/nova_audit,mgagne/nova,eharney/nova,petrutlucian94/nova,aristanetworks/arista-ovs-nova,usc-isi/extra-specs,citrix-openstack-build/nova,houshengbo/nova_vmware_compute_driver,josephsuh/extra-specs,vmturbo/nova,JioCloud/nova,zaina/nova,leilihh/nova,luogangyi/bcec-nova,vmturbo/nova,rajalokan/nova,openstack/nova,dstroppa/openstack-smartos-nova-grizzly,cloudbase/nova-virtualbox,rickerc/nova_audit,BeyondTheClouds/nova,Juniper/nova,jianghuaw/nova,nikesh-mahalka/nova,sileht/deb-openstack-nova,alvarolopez/nova,savi-dev/nova,ewindisch/nova,felixma/nova,akash1808/nova,badock/nova,bgxavier/nova,adelina-t/nova,fajoy/nova,tianweizhang/nova,gooddata/openstack-nova,spring-week-topos/nova-week,paulmathews/nova,tealover/nova,KarimAllah/nova,sileht/deb-openstack-nova,plumgrid/plumgrid-nova,Yuriy-Leonov/nova,CiscoSystems/nova,silenceli/nova,sebrandon1/nova,isyippee/nova,belmiromoreira/nova,barnsnake351/nova,anotherjesse/nova,apporc/nova,anotherjesse/nova,jeffrey4l/nova,j-carpentier/nova,saleemjaveds/https-github.com-openstack-nova,saleemjaveds/https-github.com-openstack-nova,dawnpower/nova,Juniper/nova,bgxavier/nova,openstack/nova,alexandrucoman/vbox-nova-driver,silenceli/nova,ted-gould/nova,mahak/nova,edulramirez/nova,blueboxgroup/nova,orbitfp7/nova,joker946/nova,kimjaejoong/nova,NewpTone/stacklab-nova,scripnichenko/nova,akash1808/nova_test_latest,NeCTAR-RC/nova,viggates/nova,anotherjesse/nova,paulmathews/nova,luogangyi/bcec-nova,virtualopensystems/nova,ted-gould/nova,bclau/nova,fajoy/nova,JianyuWang/nova,shootstar/novatest,BeyondTheClouds/nova,iuliat/nova,mmnelemane/nova,superstack/nova,vladikr/nova_drafts,BeyondTheClouds/nova,Tehsmash/nova,NeCTAR-RC/nova,angdraug/nova,shail2810/nova,dims/nova,yrobla/nova,gspilio/nova,zaina/nova,sacharya/nova,takeshineshiro/nova,phenoxim/nova,NoBodyCam/TftpPxeBootBareMetal,devoid/nova,hanlind/nova,eneabio/nova,usc-isi/nova,CCI-MOC/nova,ntt-sic/nova,russellb/nova,superstack/nova,sebrandon1/nova,Metaswitch/calico-nova,whitepages/nova,cernops/nova,sebrandon1/nova,openstack/nova,klmitch/nova,leilihh/novaha,russellb/nova,savi-dev/nova,tudorvio/nova,CCI-MOC/nova,maelnor/nova,TieWei/nova,hanlind/nova,DirectXMan12/nova-hacking,sridevikoushik31/openstack,dims/nova,iuliat/nova,devendermishrajio/nova,mandeepdhami/nova,noironetworks/nova,devendermishrajio/nova_test_latest,maelnor/nova,sridevikoushik31/nova,DirectXMan12/nova-hacking,Francis-Liu/animated-broccoli,leilihh/novaha,JioCloud/nova_test_latest,salv-orlando/MyRepo,citrix-openstack-build/nova,jianghuaw/nova,usc-isi/extra-specs,tangfeixiong/nova,klmitch/nova,rrader/nova-docker-plugin,maoy/zknova,OpenAcademy-OpenStack/nova-scheduler,cernops/nova,NewpTone/stacklab-nova,whitepages/nova,imsplitbit/nova,akash1808/nova_test_latest,aristanetworks/arista-ovs-nova,CloudServer/nova,raildo/nova,devendermishrajio/nova_test_latest,cyx1231st/nova,psiwczak/openstack,eayunstack/nova,klmitch/nova,adelina-t/nova,mikalstill/nova,double12gzh/nova,russellb/nova,maheshp/novatest,thomasem/nova,rajalokan/nova,nikesh-mahalka/nova,Tehsmash/nova,shail2810/nova,Brocade-OpenSource/OpenStack-DNRM-Nova,bigswitch/nova,salv-orlando/MyRepo,j-carpentier/nova,cyx1231st/nova,isyippee/nova,edulramirez/nova,maheshp/novatest,psiwczak/openstack,mandeepdhami/nova,savi-dev/nova,usc-isi/extra-specs,alexandrucoman/vbox-nova-driver,raildo/nova,yatinkumbhare/openstack-nova,qwefi/nova,mgagne/nova,yosshy/nova,Triv90/Nova,cloudbase/nova-virtualbox,spring-week-topos/nova-week,takeshineshiro/nova,ewindisch/nova,eayunstack/nova,alvarolopez/nova,sridevikoushik31/openstack,affo/nova,cernops/nova,sridevikoushik31/openstack
|
13fec51e6fa3f47d2f3669e789e9d432e092944a
|
celeryconfig.py
|
celeryconfig.py
|
from datetime import timedelta
from private import CELERY_BROKER_URL, CELERY_RESULT_BACKEND
BROKER_URL = CELERY_BROKER_URL
CELERY_RESULT_BACKEND = CELERY_RESULT_BACKEND
CELERY_TIMEZONE = 'UTC'
CELERY_INCLUDE = ['tasks.scraper_task']
CELERYBEAT_SCHEDULE = {
'scrape_users': {
'task': 'tasks.scraper_task.scraper_task',
'schedule': timedelta(minutes=1)
},
}
|
from datetime import timedelta
from private import CELERY_BROKER_URL, CELERY_RESULT_BACKEND
#-------------------------------------------------------------------------------
BROKER_URL = CELERY_BROKER_URL
CELERY_RESULT_BACKEND = CELERY_RESULT_BACKEND
#-------------------------------------------------------------------------------
CELERY_ACCEPT_CONTENT = ['json']
CELERY_TASK_SERIALIZER = 'json'
#-------------------------------------------------------------------------------
CELERY_TIMEZONE = 'UTC'
CELERY_INCLUDE = ['tasks.scraper_task']
CELERYBEAT_SCHEDULE = {
'scrape_users': {
'task': 'tasks.scraper_task.scraper_task',
'schedule': timedelta(minutes=1)
},
}
|
Use json for task serialization
|
Use json for task serialization
|
Python
|
mit
|
Trinovantes/MyAnimeList-Cover-CSS-Generator,Trinovantes/MyAnimeList-Cover-CSS-Generator
|
bdeb1196025c8f982390f0f298fa8b16b1883bce
|
mediaman/management/commands/generate_thumbs.py
|
mediaman/management/commands/generate_thumbs.py
|
from django.core.management.base import BaseCommand
import easy_thumbnails
from mediaman.models import ArtefactRepresentation
import os
class Command(BaseCommand):
help = "Generate thumbnails for Artefact Representations"
def handle(self, *args, **options):
unbuffered = os.fdopen(self.stdout.fileno(), 'w', 0)
self.stdout = unbuffered
ars = ArtefactRepresentation.objects.all()
self.stdout.write("Found %s images\n" % ars.count())
for ar in ars:
# self.stdout.write(str(ar.image) + "\n")
if ar.image.storage.exists(ar.image):
easy_thumbnails.files.generate_all_aliases(
ar.image, include_global=True)
self.stdout.write('.')
else:
self.stdout.write('n')
self.stdout.write("\nProcessed all images\n")
|
from django.core.management.base import BaseCommand
import easy_thumbnails
from mediaman.models import ArtefactRepresentation
import os
#import ImageFile
from PIL import ImageFile
class Command(BaseCommand):
help = "Generate thumbnails for Artefact Representations"
def handle(self, *args, **options):
unbuffered = os.fdopen(self.stdout.fileno(), 'w', 0)
self.stdout = unbuffered
ImageFile.MAXBLOCK = 1024 * 1024 * 10 # default is 64k, fixes "Suspension not allowed here" error from PIL
ars = ArtefactRepresentation.objects.filter(public=True)
self.stdout.write("Found %s public images\n" % ars.count())
for ar in ars:
# self.stdout.write(str(ar.image) + "\n")
if ar.image.storage.exists(ar.image):
easy_thumbnails.files.generate_all_aliases(
ar.image, include_global=True)
self.stdout.write('.')
else:
self.stdout.write('n')
self.stdout.write("\nProcessed all images\n")
|
Update bulk image generation command
|
Update bulk image generation command
|
Python
|
bsd-3-clause
|
uq-eresearch/uqam,uq-eresearch/uqam,uq-eresearch/uqam,uq-eresearch/uqam
|
07ea0d8ec5c65f0fc94dc29f8b03402c571d3a42
|
qipipe/interfaces/fix_dicom.py
|
qipipe/interfaces/fix_dicom.py
|
import os
from nipype.interfaces.base import (BaseInterface, BaseInterfaceInputSpec, traits,
InputMultiPath, File, Directory, TraitedSpec)
from qipipe.staging.fix_dicom import fix_dicom_headers
class FixDicomInputSpec(BaseInterfaceInputSpec):
collection = traits.Str(desc='The image collection', mandatory=True)
subject = traits.Str(desc='The subject name', mandatory=True)
in_files = InputMultiPath(File(exists=True), desc='The input DICOM files', mandatory=True)
class FixDicomOutputSpec(TraitedSpec):
out_files = traits.List(desc="The modified output files", trait=File, exists=True)
class FixDicom(BaseInterface):
"""The FixDicom interface wraps the :meth:`qipipe.staging.fix_dicom.fix_dicom_headers`
function."""
input_spec = FixDicomInputSpec
output_spec = FixDicomOutputSpec
def _run_interface(self, runtime):
self._out_files = fix_dicom_headers(self.inputs.collection, self.inputs.subject, *self.inputs.in_files)
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['out_files'] = self._out_files
return outputs
|
import os
from nipype.interfaces.base import (BaseInterface, BaseInterfaceInputSpec, traits,
InputMultiPath, File, Directory, TraitedSpec)
from qipipe.staging.fix_dicom import fix_dicom_headers
class FixDicomInputSpec(BaseInterfaceInputSpec):
collection = traits.Str(desc='The image collection', mandatory=True)
subject = traits.Str(desc='The subject name', mandatory=True)
in_file = File(exists=True, desc='The input DICOM file', mandatory=True)
class FixDicomOutputSpec(TraitedSpec):
out_file = File(desc="The modified output file", exists=True)
class FixDicom(BaseInterface):
"""The FixDicom interface wraps the :meth:`qipipe.staging.fix_dicom.fix_dicom_headers`
function."""
input_spec = FixDicomInputSpec
output_spec = FixDicomOutputSpec
def _run_interface(self, runtime):
self._out_file = fix_dicom_headers(self.inputs.collection, self.inputs.subject, self.inputs.in_file)
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
outputs['out_file'] = self._out_file
return outputs
|
Fix only one file at a time.
|
Fix only one file at a time.
|
Python
|
bsd-2-clause
|
ohsu-qin/qipipe
|
ee8f04c2e68eddad48db3907d1d5e4ecc5daa4a4
|
Functions/Conversation.py
|
Functions/Conversation.py
|
from IRCMessage import IRCMessage
from IRCResponse import IRCResponse, ResponseType
from Function import Function
from GlobalVars import *
import re
class Instantiate(Function):
Help = 'Responds to greetings and such'
def GetResponse(self, message):
if message.Type != 'PRIVMSG':
return
match = re.search("^(?P<greeting>(wa+s+|')?so?u+p|hi(ya)?|hey|hello|'?lo|mornin[g']?|greetings|bonjour|salut|howdy|'?yo|o?hai|mojn|hej|dongs|ahoy( hoy)?|salutations|g'?day|hola|bye|night|herrow)( there)?,?[ ]%s([^a-zA-Z0-9_\|`\[\]\^-]|$)" % CurrentNick,
message.MessageString,
re.IGNORECASE)
if match:
return IRCResponse(ResponseType.Say,
'%s %s' % (match.group('greeting'), message.User.Name),
message.ReplyTo)
|
from IRCMessage import IRCMessage
from IRCResponse import IRCResponse, ResponseType
from Function import Function
from GlobalVars import *
import re
class Instantiate(Function):
Help = 'Responds to greetings and such'
def GetResponse(self, message):
if message.Type != 'PRIVMSG':
return
match = re.search("^(?P<greeting>(wa+s+|')?so?u+p|hi(ya)?|hey|hello|'?lo|(good |g'?)?((mornin|evenin)[g']?|ni(ght|ni))|greetings|bonjour|salut|howdy|'?yo|o?hai|mojn|hej|dongs|ahoy( hoy)?|salutations|g'?day|hola|bye|herrow)( there)?,?[ ]%s([^a-zA-Z0-9_\|`\[\]\^-]|$)" % CurrentNick,
message.MessageString,
re.IGNORECASE)
if match:
return IRCResponse(ResponseType.Say,
'%s %s' % (match.group('greeting'), message.User.Name),
message.ReplyTo)
|
Add more greetings to the needs-multilining regex
|
Add more greetings to the needs-multilining regex
|
Python
|
mit
|
MatthewCox/PyMoronBot,Heufneutje/PyMoronBot,DesertBot/DesertBot
|
8808fe8a4d3a8cf36a91fe69b2d1002eddc534a3
|
py/garage/garage/startups/sql.py
|
py/garage/garage/startups/sql.py
|
"""Template of DbEngineComponent."""
__all__ = [
'make_db_engine_component',
]
import logging
import garage.sql.sqlite
from garage import components
from garage.startups.logging import LoggingComponent
def make_db_engine_component(
*,
package_name,
argument_group,
argument_prefix):
DB_URL = '%s_db_url' % argument_prefix.replace('-', '_')
class DbEngineComponent(components.Component):
require = components.ARGS
provide = components.make_fqname_tuple(package_name, 'engine')
def add_arguments(self, parser):
group = parser.add_argument_group(argument_group)
group.add_argument(
'--%s-db-url' % argument_prefix, required=True,
help="""set database URL""")
def check_arguments(self, parser, args):
db_url = getattr(args, DB_URL)
if not db_url.startswith('sqlite'):
parser.error('only support sqlite at the moment: %s' % db_url)
def make(self, require):
db_url = getattr(require.args, DB_URL)
echo = logging.getLogger().isEnabledFor(LoggingComponent.TRACE)
return garage.sql.sqlite.create_engine(db_url, echo=echo)
# Hack for manipulating call order.
DbEngineComponent.add_arguments.__module__ = package_name
DbEngineComponent.check_arguments.__module__ = package_name
return DbEngineComponent
|
__all__ = [
'make_db_engine_component',
]
import logging
import garage.sql.sqlite
from garage import components
from garage.startups.logging import LoggingComponent
def make_db_engine_component(
*, package_name,
argument_group, argument_prefix,
check_same_thread=False):
"""DbEngineComponent Generator."""
DB_URL = '%s_db_url' % argument_prefix.replace('-', '_')
class DbEngineComponent(components.Component):
require = components.ARGS
provide = components.make_fqname_tuple(package_name, 'engine')
def add_arguments(self, parser):
group = parser.add_argument_group(argument_group)
group.add_argument(
'--%s-db-url' % argument_prefix, required=True,
help="""set database URL""")
def check_arguments(self, parser, args):
db_url = getattr(args, DB_URL)
if not db_url.startswith('sqlite'):
parser.error('only support sqlite at the moment: %s' % db_url)
def make(self, require):
db_url = getattr(require.args, DB_URL)
echo = logging.getLogger().isEnabledFor(LoggingComponent.TRACE)
return garage.sql.sqlite.create_engine(
db_url,
check_same_thread=check_same_thread,
echo=echo,
)
# Hack for manipulating call order
DbEngineComponent.add_arguments.__module__ = package_name
DbEngineComponent.check_arguments.__module__ = package_name
return DbEngineComponent
|
Add check_same_thread argument to make_db_engine_component
|
Add check_same_thread argument to make_db_engine_component
|
Python
|
mit
|
clchiou/garage,clchiou/garage,clchiou/garage,clchiou/garage
|
5124e59cf6bb264da6d58043e068b63647685167
|
accounts/tests.py
|
accounts/tests.py
|
"""accounts app unittests
"""
from django.test import TestCase
from django.contrib.auth import get_user_model
from accounts.models import LoginToken
TEST_EMAIL = 'newvisitor@example.com'
class WelcomePageTest(TestCase):
"""Tests relating to the welcome_page view.
"""
def test_uses_welcome_template(self):
"""The root url should response with the welcome page template.
"""
response = self.client.get('/')
self.assertTemplateUsed(response, 'accounts/welcome.html')
class UserModelTest(TestCase):
"""Tests for passwordless user model.
"""
def test_user_valid_with_only_email(self):
"""Should not raise if the user model is happy with email only.
"""
user = get_user_model()(email=TEST_EMAIL)
user.full_clean()
def test_users_are_authenticated(self):
"""User objects should be authenticated for views/templates.
"""
user = get_user_model()()
self.assertTrue(user.is_authenticated())
class TokenModelTest(TestCase):
"""Tests for login token model.
"""
def test_unique_tokens_generated(self):
"""Two tokens generated should be unique.
"""
token1 = LoginToken(TEST_EMAIL)
token2 = LoginToken(TEST_EMAIL)
self.assertNotEqual(token1, token2)
|
"""accounts app unittests
"""
from time import sleep
from django.contrib.auth import get_user_model
from django.test import TestCase
from accounts.token import LoginTokenGenerator
TEST_EMAIL = 'newvisitor@example.com'
class WelcomePageTest(TestCase):
"""Tests relating to the welcome_page view.
"""
def test_uses_welcome_template(self):
"""The root url should response with the welcome page template.
"""
response = self.client.get('/')
self.assertTemplateUsed(response, 'accounts/welcome.html')
class UserModelTest(TestCase):
"""Tests for passwordless user model.
"""
def test_user_valid_with_only_email(self):
"""Should not raise if the user model is happy with email only.
"""
user = get_user_model()(email=TEST_EMAIL)
user.full_clean()
def test_users_are_authenticated(self):
"""User objects should be authenticated for views/templates.
"""
user = get_user_model()()
self.assertTrue(user.is_authenticated())
class TokenGeneratorTest(TestCase):
"""Tests for login token model.
"""
def setUp(self):
self.signer = LoginTokenGenerator()
def test_unique_tokens_generated(self):
"""Tokens generated one second apart should differ.
"""
token1 = self.signer.create_token(TEST_EMAIL)
sleep(1)
token2 = self.signer.create_token(TEST_EMAIL)
self.assertNotEqual(token1, token2)
|
Update test to not use a db model
|
Update test to not use a db model
|
Python
|
mit
|
randomic/aniauth-tdd,randomic/aniauth-tdd
|
74a5cad21fb726384ab53f2ca9b711cc8298bfb9
|
accounts/tests.py
|
accounts/tests.py
|
"""accounts app unittests
"""
from django.test import TestCase
class WelcomePageTest(TestCase):
def test_uses_welcome_template(self):
response = self.client.get('/')
self.assertTemplateUsed(response, 'accounts/welcome.html')
|
"""accounts app unittests
"""
from django.test import TestCase
class WelcomePageTest(TestCase):
"""Tests relating to the welcome_page view.
"""
def test_uses_welcome_template(self):
"""The root url should response with the welcome page template.
"""
response = self.client.get('/')
self.assertTemplateUsed(response, 'accounts/welcome.html')
|
Add docstrings to unit test
|
Add docstrings to unit test
|
Python
|
mit
|
randomic/aniauth-tdd,randomic/aniauth-tdd
|
268976034ad508c2ef48dec60da40dec57af824f
|
setup.py
|
setup.py
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
setup(
name = "DragonCreole",
packages = ["dragoncreole"],
version = "0.1.0",
description = "Optimized parser for creole-like markup language",
author = "Zauber Paracelsus",
author_email = "admin@zauberparacelsus.xyz",
url = "http://github.com/zauberparacelsus/dragoncreole",
download_url = "https://github.com/zauberparacelsus/dragoncreole/tarball/0.1",
keywords = ["parser", "markup", "html"],
install_requires= [
'html2text'
],
classifiers = [
"Programming Language :: Python",
"Development Status :: 4 - Beta",
"Environment :: Other Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Text Processing :: Markup :: HTML"
],
long_description = "",
cmdclass = {"build_ext": build_ext},
ext_modules = [Extension("DragonCreoleC", ["dragoncreole/dragoncreole.py"])]
)
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
setup(
name = "DragonCreole",
packages = ["dragoncreole"],
version = "0.1.0",
description = "Optimized parser for creole-like markup language",
author = "Zauber Paracelsus",
author_email = "admin@zauberparacelsus.xyz",
url = "http://github.com/zauberparacelsus/dragoncreole",
download_url = "https://github.com/zauberparacelsus/dragoncreole/tarball/0.1",
keywords = ["parser", "markup", "html"],
install_requires= [
'html2text',
'cython'
],
classifiers = [
"Programming Language :: Python",
"Development Status :: 4 - Beta",
"Environment :: Other Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Text Processing :: Markup :: HTML"
],
long_description = "",
cmdclass = {"build_ext": build_ext},
ext_modules = [Extension("dragoncreole.DragonCreoleC", ["dragoncreole/dragoncreole.py"])]
)
|
Tweak for building with cython
|
Tweak for building with cython
|
Python
|
mpl-2.0
|
zauberparacelsus/dragoncreole,zauberparacelsus/dragoncreole
|
1f03b2945b4e52ce22a3b9e6143d02d3bd9aef99
|
overtime_calculator/tests/auth_test.py
|
overtime_calculator/tests/auth_test.py
|
import shutil
import pytest
import hug
from overtime_calculator.src import api
from overtime_calculator.src.auth import get_user_folder
def test_register():
user_name = 'test1'
response = hug.test.post(
api,
'/register',
{'username': user_name, 'password': user_name},
)
assert response.data == {'status': 'ok'}
def test_signin():
response = hug.test.post(api, '/signin', {'username': 'test_1', 'password': 'test_1'})
print(response.data)
assert response.data['token'] is not None
def teardown_module():
user_folder = get_user_folder('test1')
shutil.rmtree(str(user_folder), ignore_errors=False)
|
import shutil
import pytest
import hug
from overtime_calculator.src import api
from overtime_calculator.src.auth import get_user_folder
EXISTING_USER = 'test1'
UNREGISTERED_USER = 'test2'
def test_registration_of_new_user():
response = hug.test.post(
api,
'/register',
{'username': EXISTING_USER, 'password': EXISTING_USER},
)
print(response.data) # Will only show if test fails and is run with --verbose (-v)
assert response.data == {'status': 'ok'}
def test_second_registration_of_registered_user():
response = hug.test.post(
api,
'/register',
{'username': EXISTING_USER, 'password': EXISTING_USER},
)
print(response.data) # Will only show if test fails and is run with --verbose (-v)
assert response.data == dict(error='username already in use')
def test_sign_in_of_existing_user():
response = hug.test.post(
api,
'/signin',
{'username': EXISTING_USER, 'password': EXISTING_USER}
)
print(response.data) # Will only show if test fails and is run with --verbose (-v)
assert 'token' in response.data and response.data['token']
def teardown_module():
user_folder = get_user_folder(EXISTING_USER)
shutil.rmtree(str(user_folder), ignore_errors=False)
|
Add test for already registered user
|
Feature: Add test for already registered user
|
Python
|
mit
|
x10an14/overtime-calculator
|
77ee44b0af8a80babf0a88ddd4f53f2f4ad10d2d
|
tests/test_event.py
|
tests/test_event.py
|
import unittest
from evesp.event import Event
class TestEvent(unittest.TestCase):
def setUp(self):
self.evt = Event(foo='bar')
def test_event_creation(self):
self.assertEqual(self.evt.foo, 'bar')
self.assertRaises(AttributeError, getattr, self.evt, 'non_existing')
def test_event_pickle_serialization(self):
ser_evt = self.evt.serialize()
deser_evt = Event.deserialize(ser_evt)
self.assertEqual(deser_evt.foo, 'bar')
self.assertRaises(AttributeError, getattr, deser_evt, 'non_existing')
def test_event_json_serialization(self):
ser_evt = self.evt.to_json()
deser_evt = Event.from_json(ser_evt)
self.assertEqual(deser_evt.foo, 'bar')
self.assertRaises(AttributeError, getattr, deser_evt, 'non_existing')
if __name__ == "__main__":
unittest.main()
# vim:sw=4:ts=4:et:
|
import unittest
from evesp.event import Event
class TestEvent(unittest.TestCase):
def setUp(self):
self.evt = Event(foo='bar')
def test_event_creation(self):
self.assertEqual(self.evt.foo, 'bar')
def test_non_existing_event(self):
self.assertRaises(AttributeError, getattr, self.evt, 'non_existing')
def test_event_pickle_serialization(self):
ser_evt = self.evt.serialize()
deser_evt = Event.deserialize(ser_evt)
self.assertEqual(deser_evt.foo, 'bar')
self.assertRaises(AttributeError, getattr, deser_evt, 'non_existing')
def test_event_json_serialization(self):
ser_evt = self.evt.to_json()
deser_evt = Event.from_json(ser_evt)
self.assertEqual(deser_evt.foo, 'bar')
self.assertRaises(AttributeError, getattr, deser_evt, 'non_existing')
if __name__ == "__main__":
unittest.main()
# vim:sw=4:ts=4:et:
|
Split one test into two tests
|
Split one test into two tests
|
Python
|
apache-2.0
|
BlackLight/evesp
|
a27b03a89af6442dc8e1be3d310a8fc046a98ed4
|
foampy/tests.py
|
foampy/tests.py
|
"""
Tests for foamPy.
"""
from .core import *
from .dictionaries import *
from .types import *
from .foil import *
|
"""Tests for foamPy."""
from .core import *
from .dictionaries import *
from .types import *
from .foil import *
def test_load_all_torque_drag():
"""Test the `load_all_torque_drag` function."""
t, torque, drag = load_all_torque_drag(casedir="test")
assert t.max() == 4.0
|
Add test for loading all torque and drag data
|
Add test for loading all torque and drag data
|
Python
|
mit
|
petebachant/foamPy,petebachant/foamPy,petebachant/foamPy
|
f2d91d2c296e3662a1b656f0fdf5191665ff363b
|
skimage/transform/__init__.py
|
skimage/transform/__init__.py
|
from .hough_transform import *
from .radon_transform import *
from .finite_radon_transform import *
from .integral import *
from ._geometric import (warp, warp_coords, estimate_transform,
SimilarityTransform, AffineTransform,
ProjectiveTransform, PolynomialTransform,
PiecewiseAffineTransform)
from ._warps import swirl, homography, resize, rotate, rescale
from .pyramids import (pyramid_reduce, pyramid_expand,
pyramid_gaussian, pyramid_laplacian)
|
from .hough_transform import *
from .radon_transform import *
from .finite_radon_transform import *
from .integral import *
from ._geometric import (warp, warp_coords, estimate_transform,
SimilarityTransform, AffineTransform,
ProjectiveTransform, PolynomialTransform,
PiecewiseAffineTransform)
from ._warps import swirl, resize, rotate, rescale
from .pyramids import (pyramid_reduce, pyramid_expand,
pyramid_gaussian, pyramid_laplacian)
|
Remove deprecated import of hompgraphy
|
Remove deprecated import of hompgraphy
|
Python
|
bsd-3-clause
|
youprofit/scikit-image,almarklein/scikit-image,keflavich/scikit-image,pratapvardhan/scikit-image,vighneshbirodkar/scikit-image,almarklein/scikit-image,almarklein/scikit-image,chriscrosscutler/scikit-image,ajaybhat/scikit-image,SamHames/scikit-image,oew1v07/scikit-image,vighneshbirodkar/scikit-image,youprofit/scikit-image,SamHames/scikit-image,robintw/scikit-image,emon10005/scikit-image,emon10005/scikit-image,ClinicalGraphics/scikit-image,Midafi/scikit-image,warmspringwinds/scikit-image,vighneshbirodkar/scikit-image,ofgulban/scikit-image,michaelaye/scikit-image,ofgulban/scikit-image,Britefury/scikit-image,michaelaye/scikit-image,blink1073/scikit-image,paalge/scikit-image,Britefury/scikit-image,keflavich/scikit-image,rjeli/scikit-image,newville/scikit-image,bennlich/scikit-image,SamHames/scikit-image,ajaybhat/scikit-image,michaelpacer/scikit-image,chintak/scikit-image,jwiggins/scikit-image,warmspringwinds/scikit-image,chintak/scikit-image,oew1v07/scikit-image,ClinicalGraphics/scikit-image,dpshelio/scikit-image,SamHames/scikit-image,juliusbierk/scikit-image,Midafi/scikit-image,GaZ3ll3/scikit-image,ofgulban/scikit-image,paalge/scikit-image,chintak/scikit-image,robintw/scikit-image,bsipocz/scikit-image,bsipocz/scikit-image,rjeli/scikit-image,bennlich/scikit-image,juliusbierk/scikit-image,chriscrosscutler/scikit-image,jwiggins/scikit-image,michaelpacer/scikit-image,WarrenWeckesser/scikits-image,Hiyorimi/scikit-image,almarklein/scikit-image,pratapvardhan/scikit-image,Hiyorimi/scikit-image,WarrenWeckesser/scikits-image,chintak/scikit-image,dpshelio/scikit-image,rjeli/scikit-image,newville/scikit-image,GaZ3ll3/scikit-image,paalge/scikit-image,blink1073/scikit-image
|
589534c52ceff1d4aabb8d72b779359ce2032827
|
tests/integration/integration/runner.py
|
tests/integration/integration/runner.py
|
import os
import subprocess
def load_variables_from_env(prefix="XII_INTEGRATION_"):
length = len(prefix)
vars = {}
for var in filter(lambda x: x.startswith(prefix), os.environ):
vars[var[length:]] = os.environ[var]
return vars
def run_xii(deffile, cmd, variables={}, gargs=None, cargs=None):
xii_env = os.environ.copy()
for key, value in variables.items():
print("=> XII_" + key + " defined")
xii_env["XII_" + key] = value
call = ["xii", "--no-parallel", "--deffile", deffile, gargs, cmd, cargs]
print("calling `{}`".format(" ".join(filter(None, call))))
process = subprocess.Popen(call, stdout=subprocess.PIPE, env=xii_env)
for line in process.stdout:
print("> " + line.rstrip(os.linesep))
if process.returncode != 0:
raise RuntimeError("running xii failed")
|
import os
import subprocess
def load_variables_from_env(prefix="XII_INTEGRATION_"):
length = len(prefix)
vars = {}
for var in filter(lambda x: x.startswith(prefix), os.environ):
vars[var[length:]] = os.environ[var]
return vars
def run_xii(deffile, cmd, variables={}, gargs=None, cargs=None):
xii_env = os.environ.copy()
for key, value in variables.items():
print("=> XII_" + key + " defined")
xii_env["XII_" + key] = value
call = ["xii", "--no-parallel", "--deffile", deffile, cmd]
print("calling `{}`".format(" ".join(call)))
process = subprocess.Popen(call, stdout=subprocess.PIPE, env=xii_env)
for line in process.stdout:
print("> " + line.rstrip(os.linesep))
if process.returncode != 0:
raise RuntimeError("running xii failed")
|
Make cargs and gargs truly optional
|
Make cargs and gargs truly optional
|
Python
|
apache-2.0
|
xii/xii,xii/xii
|
705e9ee8ebe1a1c590ccbec8eed9d18abbf8e914
|
tests/similarity/test_new_similarity.py
|
tests/similarity/test_new_similarity.py
|
import unittest
from similarity.nw_similarity import NWAlgorithm
class Test(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_nw_algorithm(self):
t = NWAlgorithm('abcdefghij', 'dgj')
t.print_matrix()
(a, b) = t.alignments()
print a
print b
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testSimple']
unittest.main()
|
import unittest
from similarity.nw_similarity import NWAlgorithm
class Test(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_nw_algorithm(self):
t = NWAlgorithm('abcdefghij', 'dgj')
t.print_matrix()
(a, b) = t.alignments()
print '---------------'
print a
print b
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testSimple']
unittest.main()
|
Fix incorrect import reference to nw_similarity
|
Fix incorrect import reference to nw_similarity
|
Python
|
mit
|
dpazel/tryinggithub
|
b36f89088ab1270054140a3d3020960f23c9790b
|
aldryn_blog/cms_toolbar.py
|
aldryn_blog/cms_toolbar.py
|
# -*- coding: utf-8 -*-
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from cms.toolbar_pool import toolbar_pool
from cms.toolbar_base import CMSToolbar
from aldryn_blog import request_post_identifier
@toolbar_pool.register
class BlogToolbar(CMSToolbar):
def populate(self):
if not (self.is_current_app and self.request.user.has_perm('aldryn_blog.add_post')):
return
menu = self.toolbar.get_or_create_menu('blog-app', _('Blog'))
menu.add_modal_item(_('Add Blog Post'), reverse('admin:aldryn_blog_post_add') + '?_popup',
close_on_url=reverse('admin:aldryn_blog_post_changelist'))
blog_entry = getattr(self.request, request_post_identifier, None)
if blog_entry and self.request.user.has_perm('aldryn_blog.change_post'):
menu.add_modal_item(_('Edit Blog Post'), reverse('admin:aldryn_blog_post_change', args=(
blog_entry.pk,)) + '?_popup',
close_on_url=reverse('admin:aldryn_blog_post_changelist'), active=True)
|
# -*- coding: utf-8 -*-
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from cms.toolbar_pool import toolbar_pool
from cms.toolbar_base import CMSToolbar
from aldryn_blog import request_post_identifier
@toolbar_pool.register
class BlogToolbar(CMSToolbar):
def populate(self):
if not (self.is_current_app and self.request.user.has_perm('aldryn_blog.add_post')):
return
menu = self.toolbar.get_or_create_menu('blog-app', _('Blog'))
menu.add_modal_item(_('Add Blog Post'), reverse('admin:aldryn_blog_post_add'),
close_on_url=reverse('admin:aldryn_blog_post_changelist'))
blog_entry = getattr(self.request, request_post_identifier, None)
if blog_entry and self.request.user.has_perm('aldryn_blog.change_post'):
menu.add_modal_item(_('Edit Blog Post'), reverse('admin:aldryn_blog_post_change', args=(
blog_entry.pk,)),
close_on_url=reverse('admin:aldryn_blog_post_changelist'), active=True)
|
Remove '?_popup' from toolbar urls
|
Remove '?_popup' from toolbar urls
|
Python
|
bsd-3-clause
|
aldryn/aldryn-blog,aldryn/aldryn-blog
|
33dd6ab01cea7a2a83d3d9d0c7682f716cbcb8b2
|
molecule/default/tests/test_default.py
|
molecule/default/tests/test_default.py
|
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_hosts_file(host):
f = host.file('/etc/hosts')
assert f.exists
assert f.user == 'root'
assert f.group == 'root'
def test_cvmfs_client(host):
"""Test that the CVMFS client is properly installed"""
pkg = host.package('cvmfs')
client = host.file('/usr/bin/cvmfs2')
version = '2.4.3'
assert pkg.is_installed
assert pkg.version.startswith(version)
def test_CODE_RADE_mounted(host):
"""Check that the CODE-RADE repo is mounted"""
assert host.mount_point("/cvmfs/code-rade.africa-grid.org").exists
def test_CODE_RADE_version(host):
"""Check CODE-RADE version"""
cvmfs_version = host.file('/cvmfs/code-rade.africa-grid.org/version')
assert cvmfs_version.exists
assert cvmfs_version.contains('FR3')
|
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_hosts_file(host):
"""Basic checks on the host."""
f = host.file('/etc/hosts')
assert f.exists
assert f.user == 'root'
assert f.group == 'root'
def test_cvmfs_client(host):
"""Test that the CVMFS client is properly installed."""
pkg = host.package('cvmfs')
client = host.file('/usr/bin/cvmfs2')
version = '2.4.3'
assert pkg.is_installed
assert pkg.version.startswith(version)
assert client.exists
def test_CODE_RADE_mounted(host):
"""Check that the CODE-RADE repo is mounted"""
assert host.mount_point("/cvmfs/code-rade.africa-grid.org").exists
def test_CODE_RADE_version(host):
"""Check CODE-RADE version."""
cvmfs_version = host.file('/cvmfs/code-rade.africa-grid.org/version')
assert cvmfs_version.exists
assert cvmfs_version.contains('FR3')
|
Fix lint errors in tests
|
Fix lint errors in tests
|
Python
|
apache-2.0
|
brucellino/cvmfs-client-2.2,brucellino/cvmfs-client-2.2,AAROC/cvmfs-client-2.2,AAROC/cvmfs-client-2.2
|
7930f968830efd40e1fb200ef331f0c4d955db65
|
api/base.py
|
api/base.py
|
from django.contrib.auth.models import User
from tastypie.resources import ModelResource
from tastypie import fields
from tastypie.authentication import BasicAuthentication
from tastypie.authorization import DjangoAuthorization, Authorization
from tastypie.constants import ALL, ALL_WITH_RELATIONS
from builds.models import Build
from projects.models import Project
class UserResource(ModelResource):
class Meta:
authentication = BasicAuthentication()
authorization = DjangoAuthorization()
allowed_methods = ['get', 'post', 'put']
queryset = User.objects.all()
fields = ['username', 'first_name',
'last_name', 'last_login',
'id']
filtering = {
"username": ('exact', 'startswith'),
}
class ProjectResource(ModelResource):
user = fields.ForeignKey(UserResource, 'user')
class Meta:
authentication = BasicAuthentication()
authorization = DjangoAuthorization()
allowed_methods = ['get', 'post', 'put']
queryset = Project.objects.all()
filtering = {
"slug": ('exact', 'startswith'),
}
excludes = ['build_pdf', 'path', 'skip', 'featured']
class BuildResource(ModelResource):
project = fields.ForeignKey(ProjectResource, 'project')
class Meta:
allowed_methods = ['get']
queryset = Build.objects.all()
filtering = {
"project": ALL,
}
|
from django.contrib.auth.models import User
from tastypie.resources import ModelResource
from tastypie import fields
from tastypie.authentication import BasicAuthentication
from tastypie.authorization import DjangoAuthorization, Authorization
from tastypie.constants import ALL, ALL_WITH_RELATIONS
from builds.models import Build
from projects.models import Project
class UserResource(ModelResource):
class Meta:
#authentication = BasicAuthentication()
#authorization = DjangoAuthorization()
#allowed_methods = ['get', 'post', 'put']
allowed_methods = ['get']
queryset = User.objects.all()
fields = ['username', 'first_name',
'last_name', 'last_login',
'id']
filtering = {
"username": ('exact', 'startswith'),
}
class ProjectResource(ModelResource):
user = fields.ForeignKey(UserResource, 'user')
class Meta:
#authentication = BasicAuthentication()
#authorization = DjangoAuthorization()
allowed_methods = ['get']
queryset = Project.objects.all()
filtering = {
"slug": ('exact', 'startswith'),
}
excludes = ['build_pdf', 'path', 'skip', 'featured']
class BuildResource(ModelResource):
project = fields.ForeignKey(ProjectResource, 'project')
class Meta:
allowed_methods = ['get']
queryset = Build.objects.all()
filtering = {
"project": ALL,
}
|
Make API read-only and publically available.
|
Make API read-only and publically available.
|
Python
|
mit
|
d0ugal/readthedocs.org,atsuyim/readthedocs.org,clarkperkins/readthedocs.org,soulshake/readthedocs.org,agjohnson/readthedocs.org,SteveViss/readthedocs.org,jerel/readthedocs.org,raven47git/readthedocs.org,rtfd/readthedocs.org,johncosta/private-readthedocs.org,ojii/readthedocs.org,Carreau/readthedocs.org,sid-kap/readthedocs.org,espdev/readthedocs.org,wanghaven/readthedocs.org,d0ugal/readthedocs.org,sils1297/readthedocs.org,nikolas/readthedocs.org,alex/readthedocs.org,jerel/readthedocs.org,clarkperkins/readthedocs.org,stevepiercy/readthedocs.org,GovReady/readthedocs.org,LukasBoersma/readthedocs.org,kenwang76/readthedocs.org,tddv/readthedocs.org,mrshoki/readthedocs.org,wanghaven/readthedocs.org,alex/readthedocs.org,wanghaven/readthedocs.org,stevepiercy/readthedocs.org,CedarLogic/readthedocs.org,wijerasa/readthedocs.org,pombredanne/readthedocs.org,techtonik/readthedocs.org,safwanrahman/readthedocs.org,dirn/readthedocs.org,espdev/readthedocs.org,sunnyzwh/readthedocs.org,raven47git/readthedocs.org,royalwang/readthedocs.org,VishvajitP/readthedocs.org,singingwolfboy/readthedocs.org,davidfischer/readthedocs.org,atsuyim/readthedocs.org,safwanrahman/readthedocs.org,techtonik/readthedocs.org,soulshake/readthedocs.org,gjtorikian/readthedocs.org,takluyver/readthedocs.org,dirn/readthedocs.org,singingwolfboy/readthedocs.org,pombredanne/readthedocs.org,asampat3090/readthedocs.org,fujita-shintaro/readthedocs.org,soulshake/readthedocs.org,asampat3090/readthedocs.org,emawind84/readthedocs.org,asampat3090/readthedocs.org,Tazer/readthedocs.org,sils1297/readthedocs.org,laplaceliu/readthedocs.org,espdev/readthedocs.org,VishvajitP/readthedocs.org,laplaceliu/readthedocs.org,raven47git/readthedocs.org,michaelmcandrew/readthedocs.org,hach-que/readthedocs.org,tddv/readthedocs.org,kenshinthebattosai/readthedocs.org,gjtorikian/readthedocs.org,VishvajitP/readthedocs.org,SteveViss/readthedocs.org,jerel/readthedocs.org,mhils/readthedocs.org,hach-que/readthedocs.org,stevepiercy/readthedocs.org,emawind84/readthedocs.org,mrshoki/readthedocs.org,nyergler/pythonslides,hach-que/readthedocs.org,emawind84/readthedocs.org,jerel/readthedocs.org,sid-kap/readthedocs.org,kenwang76/readthedocs.org,rtfd/readthedocs.org,singingwolfboy/readthedocs.org,titiushko/readthedocs.org,Tazer/readthedocs.org,SteveViss/readthedocs.org,kdkeyser/readthedocs.org,Carreau/readthedocs.org,hach-que/readthedocs.org,Tazer/readthedocs.org,dirn/readthedocs.org,davidfischer/readthedocs.org,kdkeyser/readthedocs.org,emawind84/readthedocs.org,Tazer/readthedocs.org,attakei/readthedocs-oauth,istresearch/readthedocs.org,istresearch/readthedocs.org,VishvajitP/readthedocs.org,LukasBoersma/readthedocs.org,michaelmcandrew/readthedocs.org,titiushko/readthedocs.org,ojii/readthedocs.org,attakei/readthedocs-oauth,istresearch/readthedocs.org,michaelmcandrew/readthedocs.org,kenshinthebattosai/readthedocs.org,fujita-shintaro/readthedocs.org,nikolas/readthedocs.org,mhils/readthedocs.org,kdkeyser/readthedocs.org,davidfischer/readthedocs.org,clarkperkins/readthedocs.org,mrshoki/readthedocs.org,safwanrahman/readthedocs.org,laplaceliu/readthedocs.org,cgourlay/readthedocs.org,nikolas/readthedocs.org,nyergler/pythonslides,sils1297/readthedocs.org,safwanrahman/readthedocs.org,soulshake/readthedocs.org,LukasBoersma/readthedocs.org,attakei/readthedocs-oauth,laplaceliu/readthedocs.org,CedarLogic/readthedocs.org,wanghaven/readthedocs.org,michaelmcandrew/readthedocs.org,raven47git/readthedocs.org,atsuyim/readthedocs.org,KamranMackey/readthedocs.org,techtonik/readthedocs.org,davidfischer/readthedocs.org,KamranMackey/readthedocs.org,asampat3090/readthedocs.org,tddv/readthedocs.org,takluyver/readthedocs.org,d0ugal/readthedocs.org,dirn/readthedocs.org,ojii/readthedocs.org,johncosta/private-readthedocs.org,wijerasa/readthedocs.org,johncosta/private-readthedocs.org,fujita-shintaro/readthedocs.org,Carreau/readthedocs.org,agjohnson/readthedocs.org,rtfd/readthedocs.org,gjtorikian/readthedocs.org,KamranMackey/readthedocs.org,titiushko/readthedocs.org,clarkperkins/readthedocs.org,cgourlay/readthedocs.org,GovReady/readthedocs.org,cgourlay/readthedocs.org,royalwang/readthedocs.org,techtonik/readthedocs.org,attakei/readthedocs-oauth,alex/readthedocs.org,kenwang76/readthedocs.org,LukasBoersma/readthedocs.org,kenshinthebattosai/readthedocs.org,royalwang/readthedocs.org,ojii/readthedocs.org,cgourlay/readthedocs.org,KamranMackey/readthedocs.org,nyergler/pythonslides,espdev/readthedocs.org,atsuyim/readthedocs.org,SteveViss/readthedocs.org,sunnyzwh/readthedocs.org,agjohnson/readthedocs.org,titiushko/readthedocs.org,fujita-shintaro/readthedocs.org,mhils/readthedocs.org,Carreau/readthedocs.org,agjohnson/readthedocs.org,gjtorikian/readthedocs.org,kenwang76/readthedocs.org,mhils/readthedocs.org,wijerasa/readthedocs.org,sunnyzwh/readthedocs.org,kenshinthebattosai/readthedocs.org,alex/readthedocs.org,sid-kap/readthedocs.org,singingwolfboy/readthedocs.org,royalwang/readthedocs.org,mrshoki/readthedocs.org,stevepiercy/readthedocs.org,takluyver/readthedocs.org,nikolas/readthedocs.org,GovReady/readthedocs.org,sils1297/readthedocs.org,takluyver/readthedocs.org,istresearch/readthedocs.org,wijerasa/readthedocs.org,pombredanne/readthedocs.org,rtfd/readthedocs.org,d0ugal/readthedocs.org,GovReady/readthedocs.org,espdev/readthedocs.org,nyergler/pythonslides,sunnyzwh/readthedocs.org,CedarLogic/readthedocs.org,kdkeyser/readthedocs.org,CedarLogic/readthedocs.org,sid-kap/readthedocs.org
|
bd193b0fdb7fec412aed24ad8f4c6353372d634f
|
polling_stations/apps/data_collection/management/commands/import_westberks.py
|
polling_stations/apps/data_collection/management/commands/import_westberks.py
|
"""
Import Wokingham Polling stations
"""
from data_collection.management.commands import BaseShpImporter, import_polling_station_shapefiles
class Command(BaseShpImporter):
"""
Imports the Polling Station data from Wokingham Council
"""
council_id = 'E06000037'
districts_name = 'polling_districts'
stations_name = 'polling_places.shp'
def district_record_to_dict(self, record):
return {
'internal_council_id': record[0],
'name': record[2],
}
def station_record_to_dict(self, record):
return {
'internal_council_id': record[4],
'postcode' : record[5].split(',')[-1],
'address' : "\n".join(record[5].split(',')[:-1]),
}
def import_polling_stations(self):
import_polling_station_shapefiles(self)
|
"""
Import Wokingham Polling stations
"""
from data_collection.management.commands import BaseShpShpImporter
class Command(BaseShpShpImporter):
"""
Imports the Polling Station data from Wokingham Council
"""
council_id = 'E06000037'
districts_name = 'polling_districts'
stations_name = 'polling_places.shp'
def district_record_to_dict(self, record):
return {
'internal_council_id': record[0],
'name': record[2],
}
def station_record_to_dict(self, record):
return {
'internal_council_id': record[4],
'postcode' : record[5].split(',')[-1],
'address' : "\n".join(record[5].split(',')[:-1]),
}
|
Refactor West Berks to use new BaseShpShpImporter
|
Refactor West Berks to use new BaseShpShpImporter
|
Python
|
bsd-3-clause
|
chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,andylolz/UK-Polling-Stations,andylolz/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,andylolz/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.