commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
e312e2c61d6ddba147be73e636b26b14aaf49f60 | use django.conf.settings instead of plain settings | lingcod/kmlapp/tests.py | lingcod/kmlapp/tests.py | """
Unit tests for the KML App
"""
from django.conf import settings
from django.test import TestCase
from django.contrib.gis.geos import GEOSGeometry
from django.contrib.auth.models import *
from lingcod.common import utils
from lingcod.mpa.models import MpaDesignation
Mpa = utils.get_mpa_class()
MpaArray = utils.get_array_class()
user = User.objects.get(username="dummy")
class KMLAppTest(TestCase):
def setUp(self):
g1 = GEOSGeometry('SRID=4326;POLYGON ((-120.42 34.37, -119.64 34.32, -119.63 34.12, -120.44 34.15, -120.42 34.37))')
g2 = GEOSGeometry('SRID=4326;POLYGON ((-121.42 34.37, -120.64 34.32, -120.63 34.12, -121.44 34.15, -121.42 34.37))')
g3 = GEOSGeometry('SRID=4326;POLYGON ((-122.42 34.37, -121.64 34.32, -121.63 34.12, -122.44 34.15, -122.42 34.37))')
g1.transform(settings.GEOMETRY_DB_SRID)
g2.transform(settings.GEOMETRY_DB_SRID)
g3.transform(settings.GEOMETRY_DB_SRID)
smr = MpaDesignation.objects.create(name="Reserve", acronym="R")
smr.save()
mpa1 = Mpa.objects.create( name='Test_MPA_1', user=user, geometry_final=g1)
mpa2 = Mpa.objects.create( name='Test_MPA_2', designation=smr, user=user, geometry_final=g2)
mpa3 = Mpa.objects.create( name='Test_MPA_3', designation=smr, user=user, geometry_final=g3)
mpa1.save()
mpa2.save()
mpa3.save()
array1 = MpaArray.objects.create( name='Test_Array_1', user=user)
array1.save()
array1.add_mpa(mpa1)
array2 = MpaArray.objects.create( name='Test_Array_2', user=user)
array2.save()
array2.add_mpa(mpa2)
def test_dummy_kml_view(self):
"""
Tests that dummy user can retrieve a KML file
"""
response = self.client.get('/kml/dummy/mpa.kml', {})
self.assertEquals(response.status_code, 200)
def test_valid_kml(self):
"""
Tests that dummy kml is valid (requires feedvalidator)
"""
import feedvalidator
from feedvalidator import compatibility
response = self.client.get('/kml/dummy/mpa.kml', {})
events = feedvalidator.validateString(response.content, firstOccurrenceOnly=1)['loggedEvents']
# Three levels of compatibility
# "A" is most basic level
# "AA" mimics online validator
# "AAA" is experimental; these rules WILL change or disappear in future versions
filterFunc = getattr(compatibility, "AA")
# there are a few bugs in feedvalidator, doesn't recognize valid ExtendedData element so we ignore
events = [x for x in filterFunc(events)
if not (isinstance(x,feedvalidator.logging.UndefinedElement) and x.params['element']==u'ExtendedData')]
from feedvalidator.formatter.text_plain import Formatter
output = Formatter(events)
if output:
print "\n".join(output)
raise Exception("Invalid KML")
else:
print "No KML errors or warnings"
self.assertEquals(response.status_code, 200)
| """
Unit tests for the KML App
"""
import settings
from django.test import TestCase
from django.contrib.gis.geos import GEOSGeometry
from django.contrib.auth.models import *
from lingcod.common import utils
from lingcod.mpa.models import MpaDesignation
Mpa = utils.get_mpa_class()
MpaArray = utils.get_array_class()
user = User.objects.get(username="dummy")
class KMLAppTest(TestCase):
def setUp(self):
g1 = GEOSGeometry('SRID=4326;POLYGON ((-120.42 34.37, -119.64 34.32, -119.63 34.12, -120.44 34.15, -120.42 34.37))')
g2 = GEOSGeometry('SRID=4326;POLYGON ((-121.42 34.37, -120.64 34.32, -120.63 34.12, -121.44 34.15, -121.42 34.37))')
g3 = GEOSGeometry('SRID=4326;POLYGON ((-122.42 34.37, -121.64 34.32, -121.63 34.12, -122.44 34.15, -122.42 34.37))')
g1.transform(settings.GEOMETRY_DB_SRID)
g2.transform(settings.GEOMETRY_DB_SRID)
g3.transform(settings.GEOMETRY_DB_SRID)
smr = MpaDesignation.objects.create(name="Reserve", acronym="R")
smr.save()
mpa1 = Mpa.objects.create( name='Test_MPA_1', user=user, geometry_final=g1)
mpa2 = Mpa.objects.create( name='Test_MPA_2', designation=smr, user=user, geometry_final=g2)
mpa3 = Mpa.objects.create( name='Test_MPA_3', designation=smr, user=user, geometry_final=g3)
mpa1.save()
mpa2.save()
mpa3.save()
array1 = MpaArray.objects.create( name='Test_Array_1', user=user)
array1.save()
array1.add_mpa(mpa1)
array2 = MpaArray.objects.create( name='Test_Array_2', user=user)
array2.save()
array2.add_mpa(mpa2)
def test_dummy_kml_view(self):
"""
Tests that dummy user can retrieve a KML file
"""
response = self.client.get('/kml/dummy/mpa.kml', {})
self.assertEquals(response.status_code, 200)
def test_valid_kml(self):
"""
Tests that dummy kml is valid (requires feedvalidator)
"""
import feedvalidator
from feedvalidator import compatibility
response = self.client.get('/kml/dummy/mpa.kml', {})
events = feedvalidator.validateString(response.content, firstOccurrenceOnly=1)['loggedEvents']
# Three levels of compatibility
# "A" is most basic level
# "AA" mimics online validator
# "AAA" is experimental; these rules WILL change or disappear in future versions
filterFunc = getattr(compatibility, "AA")
# there are a few bugs in feedvalidator, doesn't recognize valid ExtendedData element so we ignore
events = [x for x in filterFunc(events)
if not (isinstance(x,feedvalidator.logging.UndefinedElement) and x.params['element']==u'ExtendedData')]
from feedvalidator.formatter.text_plain import Formatter
output = Formatter(events)
if output:
print "\n".join(output)
raise Exception("Invalid KML")
else:
print "No KML errors or warnings"
self.assertEquals(response.status_code, 200)
| Python | 0 |
368d46ba4bec2da22abfba306badf39a3a552e88 | Remove now-unused imports | tests/test_snippets.py | tests/test_snippets.py | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
from xml.dom.minidom import parseString
from xml.parsers.expat import ExpatError
import pytest
import requests
from bs4 import BeautifulSoup
REQUESTS_TIMEOUT = 20
URL_TEMPLATE = '{}/{}/Firefox/default/default/default/en-US/{}/default/default/default/'
_user_agent_firefox = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.1) Gecko/20100101 Firefox/10.0.1'
def _get_redirect(url, user_agent=_user_agent_firefox, locale='en-US'):
headers = {'user-agent': user_agent,
'accept-language': locale}
return requests.get(url, headers=headers, timeout=REQUESTS_TIMEOUT)
def _parse_response(content):
return BeautifulSoup(content, 'html.parser')
@pytest.mark.parametrize(('version'), ['3', '5'], ids=['legacy', 'activitystream'])
@pytest.mark.parametrize(('channel'), ['aurora', 'beta', 'release'])
def test_response_codes(base_url, version, channel):
url = URL_TEMPLATE.format(base_url, version, channel)
r = _get_redirect(url)
assert r.status_code in (requests.codes.ok, requests.codes.no_content)
@pytest.mark.parametrize(('channel'), ['aurora', 'beta', 'release'])
def test_that_snippets_are_well_formed_xml(base_url, channel):
url = URL_TEMPLATE.format(base_url, '3', channel)
r = _get_redirect(url)
try:
print(r.content)
parseString('<div>{}</div>'.format(r.content))
except ExpatError as e:
raise AssertionError('Snippets at {0} do not contain well formed '
'xml: {1}'.format(url, e))
| # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http://mozilla.org/MPL/2.0/.
import json
import re
from xml.dom.minidom import parseString
from xml.parsers.expat import ExpatError
import pytest
import requests
from bs4 import BeautifulSoup
REQUESTS_TIMEOUT = 20
URL_TEMPLATE = '{}/{}/Firefox/default/default/default/en-US/{}/default/default/default/'
_user_agent_firefox = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:10.0.1) Gecko/20100101 Firefox/10.0.1'
def _get_redirect(url, user_agent=_user_agent_firefox, locale='en-US'):
headers = {'user-agent': user_agent,
'accept-language': locale}
return requests.get(url, headers=headers, timeout=REQUESTS_TIMEOUT)
def _parse_response(content):
return BeautifulSoup(content, 'html.parser')
@pytest.mark.parametrize(('version'), ['3', '5'], ids=['legacy', 'activitystream'])
@pytest.mark.parametrize(('channel'), ['aurora', 'beta', 'release'])
def test_response_codes(base_url, version, channel):
url = URL_TEMPLATE.format(base_url, version, channel)
r = _get_redirect(url)
assert r.status_code in (requests.codes.ok, requests.codes.no_content)
@pytest.mark.parametrize(('channel'), ['aurora', 'beta', 'release'])
def test_that_snippets_are_well_formed_xml(base_url, channel):
url = URL_TEMPLATE.format(base_url, '3', channel)
r = _get_redirect(url)
try:
print(r.content)
parseString('<div>{}</div>'.format(r.content))
except ExpatError as e:
raise AssertionError('Snippets at {0} do not contain well formed '
'xml: {1}'.format(url, e))
| Python | 0 |
142ef9b907868f53c696bd4426a7f08b7ef57528 | Change metatdata test | tests/test_tifffile.py | tests/test_tifffile.py | """ Test tifffile plugin functionality.
"""
import os
import numpy as np
from pytest import raises
from imageio.testing import run_tests_if_main, get_test_dir, need_internet
from imageio.core import get_remote_file
import imageio
test_dir = get_test_dir()
def test_tifffile_format():
# Test selection
for name in ['tiff', '.tif']:
format = imageio.formats[name]
assert format.name == 'TIFF'
def test_tifffile_reading_writing():
""" Test reading and saving tiff """
need_internet() # We keep a test image in the imageio-binary repo
im2 = np.ones((10, 10, 3), np.uint8) * 2
filename1 = os.path.join(test_dir, 'test_tiff.tiff')
# One image
imageio.imsave(filename1, im2)
im = imageio.imread(filename1)
ims = imageio.mimread(filename1)
assert (im == im2).all()
assert len(ims) == 1
# Multiple images
imageio.mimsave(filename1, [im2, im2, im2])
im = imageio.imread(filename1)
ims = imageio.mimread(filename1)
assert (im == im2).all()
assert len(ims) == 3, ims[0].shape
# remote multipage rgb file
filename2 = get_remote_file('images/multipage_rgb.tif')
img = imageio.mimread(filename2)
assert len(img) == 2
assert img[0].shape == (3, 10, 10)
# Mixed
W = imageio.save(filename1)
W.set_meta_data({'planarconfig': 'planar'})
assert W.format.name == 'TIFF'
W.append_data(im2)
W.append_data(im2)
W.close()
#
R = imageio.read(filename1)
assert R.format.name == 'TIFF'
ims = list(R) # == [im for im in R]
assert (ims[0] == im2).all()
meta = R.get_meta_data()
assert meta['orientation'] == 'top_left'
# Fail
raises(IndexError, R.get_data, -1)
raises(IndexError, R.get_data, 3)
# Ensure imwrite write works round trip
filename3 = os.path.join(test_dir, 'test_tiff2.tiff')
R = imageio.imread(filename1)
imageio.imwrite(filename3, R)
R2 = imageio.imread(filename3)
assert (R == R2).all()
run_tests_if_main()
| """ Test tifffile plugin functionality.
"""
import os
import numpy as np
from pytest import raises
from imageio.testing import run_tests_if_main, get_test_dir, need_internet
from imageio.core import get_remote_file
import imageio
test_dir = get_test_dir()
def test_tifffile_format():
# Test selection
for name in ['tiff', '.tif']:
format = imageio.formats[name]
assert format.name == 'TIFF'
def test_tifffile_reading_writing():
""" Test reading and saving tiff """
need_internet() # We keep a test image in the imageio-binary repo
im2 = np.ones((10, 10, 3), np.uint8) * 2
filename1 = os.path.join(test_dir, 'test_tiff.tiff')
# One image
imageio.imsave(filename1, im2)
im = imageio.imread(filename1)
ims = imageio.mimread(filename1)
assert (im == im2).all()
assert len(ims) == 1
# Multiple images
imageio.mimsave(filename1, [im2, im2, im2])
im = imageio.imread(filename1)
ims = imageio.mimread(filename1)
assert (im == im2).all()
assert len(ims) == 3, ims[0].shape
# remote multipage rgb file
filename2 = get_remote_file('images/multipage_rgb.tif')
img = imageio.mimread(filename2)
assert len(img) == 2
assert img[0].shape == (3, 10, 10)
# Mixed
W = imageio.save(filename1)
W.set_meta_data({'planarconfig': 'planar'})
assert W.format.name == 'TIFF'
W.append_data(im2)
W.append_data(im2)
W.close()
#
R = imageio.read(filename1)
assert R.format.name == 'TIFF'
ims = list(R) # == [im for im in R]
assert (ims[0] == im2).all()
meta = R.get_meta_data()
print(meta)
assert meta['is_rgb']
# Fail
raises(IndexError, R.get_data, -1)
raises(IndexError, R.get_data, 3)
# Ensure imwrite write works round trip
filename3 = os.path.join(test_dir, 'test_tiff2.tiff')
R = imageio.imread(filename1)
imageio.imwrite(filename3, R)
R2 = imageio.imread(filename3)
assert (R == R2).all()
run_tests_if_main()
| Python | 0 |
8d09e745f24e663cb81ff5be6bc7b643c6c5bd76 | call it pennyblack 0.3.0 | pennyblack/__init__.py | pennyblack/__init__.py | VERSION = (0, 3, 0,)
__version__ = '.'.join(map(str, VERSION))
# Do not use Django settings at module level as recommended
try:
from django.utils.functional import LazyObject
except ImportError:
pass
else:
class LazySettings(LazyObject):
def _setup(self):
from pennyblack import default_settings
self._wrapped = Settings(default_settings)
class Settings(object):
def __init__(self, settings_module):
for setting in dir(settings_module):
if setting == setting.upper():
setattr(self, setting, getattr(settings_module, setting))
settings = LazySettings()
def send_newsletter(newsletter_name, *args, **kwargs):
"""
Gets a newsletter by its name and tries to send it to receiver
"""
from pennyblack.models import Newsletter
newsletter = Newsletter.objects.get_workflow_newsletter_by_name(newsletter_name)
if newsletter:
newsletter.send(*args, **kwargs) | VERSION = (0, 3, 0, 'pre')
__version__ = '.'.join(map(str, VERSION))
# Do not use Django settings at module level as recommended
try:
from django.utils.functional import LazyObject
except ImportError:
pass
else:
class LazySettings(LazyObject):
def _setup(self):
from pennyblack import default_settings
self._wrapped = Settings(default_settings)
class Settings(object):
def __init__(self, settings_module):
for setting in dir(settings_module):
if setting == setting.upper():
setattr(self, setting, getattr(settings_module, setting))
settings = LazySettings()
def send_newsletter(newsletter_name, *args, **kwargs):
"""
Gets a newsletter by its name and tries to send it to receiver
"""
from pennyblack.models import Newsletter
newsletter = Newsletter.objects.get_workflow_newsletter_by_name(newsletter_name)
if newsletter:
newsletter.send(*args, **kwargs) | Python | 0.999594 |
57d5622d205854eafd8babf8dfa1ad45bf05ebcb | Update ipc_lista1.15.py | lista1/ipc_lista1.15.py | lista1/ipc_lista1.15.py | #ipc_lista1.15
#Professor: Jucimar Junior
#Any Mendes Carvalho - 1615310044
#
#
#
#
##Faça um Programa que pergunte quanto você ganha por hora e o número de horas trabalhadas no mês. Calcule e mostre o total do seu salário no referido mês, sabendo-se que são descontados 11% para o Imposto de Renda, 8% para o INSS e 5% para o sindicato, faça um programa que nos dê:
#salário bruto.
#quanto pagou ao INSS.
#quanto pagou ao sindicato.
#o salário líquido.
#calcule os descontos e o salário líquido, conforme a tabela abaixo:
#+ Salário Bruto : R$
#- IR (11%) : R$
#- INSS (8%) : R$
#- Sindicato ( 5%) : R$
#= Salário Liquido : R$
#Obs.: Salário Bruto - Descontos = Salário Líquido.
qHora = input("Quanto você ganha por hora: ")
hT = input("Quantas horas você trabalhou: ")
SalBruto = qHora
ir = (11/100.0 * salBruto)
inss = (8/100.0 * SalBruto)
sindicato = (5/100.0 * SalBruto)
vT = ir + sindicato
SalLiq = SalBruto - vT
print "------------------------"
print "Seu salário bruto e: ",SalBruto
print '------------------------"
print "Valor dos impostos"
print "-------------------------"
print "IR: ",ir
print "INSS: ",inss
print"--------------------------"
print"Se salario liquido e: ",SalLiq
| #ipc_lista1.15
#Professor: Jucimar Junior
#Any Mendes Carvalho - 1615310044
#
#
#
#
##Faça um Programa que pergunte quanto você ganha por hora e o número de horas trabalhadas no mês. Calcule e mostre o total do seu salário no referido mês, sabendo-se que são descontados 11% para o Imposto de Renda, 8% para o INSS e 5% para o sindicato, faça um programa que nos dê:
#salário bruto.
#quanto pagou ao INSS.
#quanto pagou ao sindicato.
#o salário líquido.
#calcule os descontos e o salário líquido, conforme a tabela abaixo:
#+ Salário Bruto : R$
#- IR (11%) : R$
#- INSS (8%) : R$
#- Sindicato ( 5%) : R$
#= Salário Liquido : R$
#Obs.: Salário Bruto - Descontos = Salário Líquido.
qHora = input("Quanto você ganha por hora: ")
hT = input("Quantas horas você trabalhou: ")
SalBruto = qHora
ir = (11/100.0 * salBruto)
inss = (8/100.0m* SalBruto)
sindicato = (5/100.0 * SalBruto)
vT = ir + sindicato
SalLiq = SalBruto - vT
print "------------------------"
print "Seu salário bruto e: ",SalBruto
print '------------------------"
print "Valor dos impostos"
print "-------------------------"
print "IR: ",ir
print "INSS: ",inss
print"--------------------------"
print"Se salario liquido e: ",SalLiq
| Python | 0 |
b37432e914b6c6e45803a928f35fbaa8964780aa | test by uuid | tests/unit/test_log.py | tests/unit/test_log.py | import pytest
from raft import log
def mle(index, term, committed=False, msgid='', msg={}):
return dict(index=index, term=term, committed=committed,
msgid=msgid, msg=msg)
def test_le():
# a's term is greater than b's
a = {1: mle(1, 2),
2: mle(2, 2),
3: mle(3, 4)}
b = {1: mle(1, 2),
2: mle(2, 2),
3: mle(3, 3)}
ra = log.RaftLog(a)
rb = log.RaftLog(b)
assert ra > rb
# terms are equal
a = {1: mle(1, 2),
2: mle(2, 2),
3: mle(3, 4)}
b = {1: mle(1, 2),
2: mle(2, 2),
3: mle(3, 4)}
ra = log.RaftLog(a)
rb = log.RaftLog(b)
assert a <= b
assert b <= a
# terms equal but more commits in b
a = {1: mle(1, 2),
2: mle(2, 2),
3: mle(3, 4)}
b = {1: mle(1, 2),
2: mle(2, 2),
3: mle(3, 4),
4: mle(4, 4)}
ra = log.RaftLog(a)
rb = log.RaftLog(b)
assert rb > ra
def test_dump():
rl = log.RaftLog(None)
dump = {0: {'term': 0, 'msgid': '', 'committed': True,
'acked': [], 'msg': {}, 'index': 0}}
assert rl.dump() == dump
def test_get_max_index_term():
rl = log.RaftLog(None)
le = log.logentry(2, 'abcd', {})
rl.add(le)
assert rl.get_max_index_term() == (1, 2)
le = log.logentry(6, 'abcdefg', {})
rl.add(le)
assert rl.get_max_index_term() == (2, 6)
def test_has_uuid():
rl = log.RaftLog(None)
le = log.logentry(2, 'abcd', {})
rl.add(le)
assert rl.has_uuid('abcd') == True
assert rl.has_uuid('dcba') == False
| import pytest
from raft import log
def mle(index, term, committed=False, msgid='', msg={}):
return dict(index=index, term=term, committed=committed,
msgid=msgid, msg=msg)
def test_le():
# a's term is greater than b's
a = {1: mle(1, 2),
2: mle(2, 2),
3: mle(3, 4)}
b = {1: mle(1, 2),
2: mle(2, 2),
3: mle(3, 3)}
ra = log.RaftLog(a)
rb = log.RaftLog(b)
assert ra > rb
# terms are equal
a = {1: mle(1, 2),
2: mle(2, 2),
3: mle(3, 4)}
b = {1: mle(1, 2),
2: mle(2, 2),
3: mle(3, 4)}
ra = log.RaftLog(a)
rb = log.RaftLog(b)
assert a <= b
assert b <= a
# terms equal but more commits in b
a = {1: mle(1, 2),
2: mle(2, 2),
3: mle(3, 4)}
b = {1: mle(1, 2),
2: mle(2, 2),
3: mle(3, 4),
4: mle(4, 4)}
ra = log.RaftLog(a)
rb = log.RaftLog(b)
assert rb > ra
def test_dump():
rl = log.RaftLog(None)
dump = {0: {'term': 0, 'msgid': '', 'committed': True,
'acked': [], 'msg': {}, 'index': 0}}
assert rl.dump() == dump
def test_get_max_index_term():
rl = log.RaftLog(None)
le = log.logentry(2, 'abcd', {})
rl.add(le)
assert rl.get_max_index_term() == (1, 2)
le = log.logentry(6, 'abcdefg', {})
rl.add(le)
assert rl.get_max_index_term() == (2, 6)
| Python | 0.000001 |
62d5a4446c4c0a919557dd5f2e95d21c5a8259a8 | Test the optimized set | tests/unit/test_set.py | tests/unit/test_set.py | import pytest
from pypred import OptimizedPredicateSet, PredicateSet, Predicate
class TestPredicateSet(object):
def test_two(self):
p1 = Predicate("name is 'Jack'")
p2 = Predicate("name is 'Jill'")
s = PredicateSet([p1, p2])
match = s.evaluate({'name': 'Jill'})
assert match == [p2]
def test_dup(self):
p1 = Predicate("name is 'Jill'")
s = PredicateSet([p1, p1])
match = s.evaluate({'name': 'Jill'})
assert match == [p1]
class TestOptPredicateSet(object):
def test_two(self):
p1 = Predicate("name is 'Jack'")
p2 = Predicate("name is 'Jill'")
s = OptimizedPredicateSet([p1, p2])
match = s.evaluate({'name': 'Jill'})
assert match == [p2]
def test_dup(self):
p1 = Predicate("name is 'Jill'")
s = OptimizedPredicateSet([p1, p1])
match = s.evaluate({'name': 'Jill'})
assert match == [p1]
def test_invalidate(self):
"AST is invalidated when set changes"
p1 = Predicate("name is 'Jack'")
p2 = Predicate("name is 'Jill'")
s = OptimizedPredicateSet([p1, p2])
match = s.evaluate({'name': 'Jill'})
assert match == [p2]
p3 = Predicate("name is 'Joe'")
s.add(p3)
assert s.ast == None
match = s.evaluate({'name': 'Joe'})
assert match == [p3]
def test_finalize(self):
p1 = Predicate("name is 'Jack'")
p2 = Predicate("name is 'Jill'")
s = OptimizedPredicateSet([p1, p2])
s.finalize()
match = s.evaluate({'name': 'Jill'})
assert match == [p2]
p3 = Predicate("name is 'Joe'")
with pytest.raises(Exception):
s.add(p3)
| from pypred import PredicateSet, Predicate
class TestPredicateSet(object):
def test_two(self):
p1 = Predicate("name is 'Jack'")
p2 = Predicate("name is 'Jill'")
s = PredicateSet([p1, p2])
match = s.evaluate({'name': 'Jill'})
assert match == [p2]
def test_dup(self):
p1 = Predicate("name is 'Jill'")
s = PredicateSet([p1, p1])
match = s.evaluate({'name': 'Jill'})
assert match == [p1]
| Python | 0.000043 |
cae0764f2cbb8d00de1832079e55b8e4d45f55f2 | Fix for short OTU name when there is a species but no genus or higher | phylotoast/otu_calc.py | phylotoast/otu_calc.py | from __future__ import division
import ast
from collections import defaultdict
from phylotoast import biom_calc as bc
def otu_name(tax):
"""
Determine a simple Genus-species identifier for an OTU, if possible.
If OTU is not identified to the species level, name it as
Unclassified (familly/genus/etc...).
:type tax: list
:param tax: QIIME-style taxonomy identifiers, e.g.
["k__Bacteria", u"p__Firmicutes", u"c__Bacilli", ...
:rtype: str
:return: Returns genus-species identifier based on identified taxonomical
level.
"""
extract_name = lambda lvl: "_".join(lvl.split("_")[2:])
spname = "spp."
for lvl in tax[::-1]:
if len(lvl) <= 3:
continue
if lvl.startswith("s"):
spname = extract_name(lvl)
elif lvl.startswith("g"):
return "{}_{}".format(extract_name(lvl), spname)
else:
if spname != "spp.":
return spname
else:
return "Unclassified_{}".format(extract_name(lvl))
def load_core_file(core_fp):
"""
For core OTU data file, returns Genus-species identifier for each data
entry.
:type core_fp: str
:param core_fp: A file containing core OTU data.
:rtype: str
:return: Returns genus-species identifier based on identified taxonomical
level.
"""
with open(core_fp, "rU") as in_f:
return {otu_name(ast.literal_eval(line.split("\t")[1]))
for line in in_f.readlines()[1:]}
def assign_otu_membership(biomfile):
"""
Determines the OTUIDs present in each sample.
:type biomfile: biom.table.Table
:param biomfile: BIOM table object from the biom-format library.
:rtype: dict
:return: Returns a dictionary keyed on Sample ID with sets containing
the IDs of OTUIDs found in each sample.
"""
samples = defaultdict(set)
_ = biomfile.pa()
for sid in biomfile.ids():
for otuid in biomfile.ids("observation"):
if biomfile.get_value_by_ids(otuid, sid) == 1:
samples[sid].add(otuid)
return samples
| from __future__ import division
import ast
from collections import defaultdict
from phylotoast import biom_calc as bc
def otu_name(tax):
"""
Determine a simple Genus-species identifier for an OTU, if possible.
If OTU is not identified to the species level, name it as
Unclassified (familly/genus/etc...).
:type tax: list
:param tax: QIIME-style taxonomy identifiers, e.g.
["k__Bacteria", u"p__Firmicutes", u"c__Bacilli", ...
:rtype: str
:return: Returns genus-species identifier based on identified taxonomical
level.
"""
extract_name = lambda lvl: "_".join(lvl.split("_")[2:])
spname = "spp."
for lvl in tax[::-1]:
if len(lvl) <= 3:
continue
if lvl.startswith("s"):
spname = extract_name(lvl)
elif lvl.startswith("g"):
return "{}_{}".format(extract_name(lvl), spname)
else:
return "Unclassified_{}".format(extract_name(lvl))
def load_core_file(core_fp):
"""
For core OTU data file, returns Genus-species identifier for each data
entry.
:type core_fp: str
:param core_fp: A file containing core OTU data.
:rtype: str
:return: Returns genus-species identifier based on identified taxonomical
level.
"""
with open(core_fp, "rU") as in_f:
return {otu_name(ast.literal_eval(line.split("\t")[1]))
for line in in_f.readlines()[1:]}
def assign_otu_membership(biomfile):
"""
Determines the OTUIDs present in each sample.
:type biomfile: biom.table.Table
:param biomfile: BIOM table object from the biom-format library.
:rtype: dict
:return: Returns a dictionary keyed on Sample ID with sets containing
the IDs of OTUIDs found in each sample.
"""
samples = defaultdict(set)
_ = biomfile.pa()
for sid in biomfile.ids():
for otuid in biomfile.ids("observation"):
if biomfile.get_value_by_ids(otuid, sid) == 1:
samples[sid].add(otuid)
return samples
| Python | 0.00004 |
7c4d3fffe62190b8c27317ed83bd5e7110b103ec | Update parser.py | MusicXMLParser/parser.py | MusicXMLParser/parser.py | '''
Takes a musicXML file, and creates a file that can be played by my MusicPlayer arduino library.
Written by Eivind Lie Andreassen, 2016
Licensed under the MIT license.
'''
import xml.dom.minidom
import valueHelper
xmlPath = input("Enter path to MusicXML file: ")
savePath = input("Enter save path of converted file: ")
domTree = xml.dom.minidom.parse(xmlPath)
collection = domTree.documentElement
if(collection.hasAttribute("example")):
print(collection.getAttribute("sample"))
notesXML = collection.getElementsByTagName("note")
notes = []
noteLengths = []
for note in notesXML:
if (len(note.getElementsByTagName("rest"))>0):
noteValue = '0'
else:
noteValue = note.getElementsByTagName("step")[0].childNodes[0].data + note.getElementsByTagName("octave")[0].childNodes[0].data
if len(note.getElementsByTagName("alter")) > 0:
index = valueHelper.noteValues.index(noteValue) + int(note.getElementsByTagName("alter")[0].childNodes[0].data)
if(index < 0):
index = 0
elif(index >= len(valueHelper.noteValues)):
index = len(valueHelper.noteValues) - 1
noteValue = valueHelper.noteValues[index]
if(len(note.getElementsByTagName("type")) == 0):
continue
noteLength = valueHelper.lengthValues[note.getElementsByTagName("type")[0].childNodes[0].data]
if(len(note.getElementsByTagName("dot")) > 1):
noteLength *= 1.75
elif(len(note.getElementsByTagName("dot")) > 0):
noteLength *= 1.5
notes.append(noteValue)
noteLengths.append(noteLength)
output = "NoteNumber: " + str(len(notes)) + "\n\n"
output += "Notes:\n{"
for i in range(len(notes)):
output += notes[i]
if(i < len(notes) - 1):
output += ", "
if(i != 0 and i % 10 == 0):
output += "\n"
output += "};\n\n"
output += "NoteLengths:\n{"
for i in range(len(notes)):
output += str(noteLengths[i])
if(i < len(notes) - 1):
output += ", "
if(i != 0 and i % 10 == 0):
output += "\n"
output += "};"
with open(savePath, "w") as file:
file.write(output)
| '''
Takes a musicXML file, and creates a file that can be played by my MusicPlayer arduino library.
Written by Eivind Lie Andreassen, 2016
Licensed under Creative Commons Attribution-ShareAlike 4.0 International. http://creativecommons.org/licenses/by-sa/4.0/
'''
import xml.dom.minidom
import valueHelper
xmlPath = input("Enter path to MusicXML file: ")
savePath = input("Enter save path of converted file: ")
domTree = xml.dom.minidom.parse(xmlPath)
collection = domTree.documentElement
if(collection.hasAttribute("example")):
print(collection.getAttribute("sample"))
notesXML = collection.getElementsByTagName("note")
notes = []
noteLengths = []
for note in notesXML:
if (len(note.getElementsByTagName("rest"))>0):
noteValue = '0'
else:
noteValue = note.getElementsByTagName("step")[0].childNodes[0].data + note.getElementsByTagName("octave")[0].childNodes[0].data
if len(note.getElementsByTagName("alter")) > 0:
index = valueHelper.noteValues.index(noteValue) + int(note.getElementsByTagName("alter")[0].childNodes[0].data)
if(index < 0):
index = 0
elif(index >= len(valueHelper.noteValues)):
index = len(valueHelper.noteValues) - 1
noteValue = valueHelper.noteValues[index]
if(len(note.getElementsByTagName("type")) == 0):
continue
noteLength = valueHelper.lengthValues[note.getElementsByTagName("type")[0].childNodes[0].data]
if(len(note.getElementsByTagName("dot")) > 1):
noteLength *= 1.75
elif(len(note.getElementsByTagName("dot")) > 0):
noteLength *= 1.5
notes.append(noteValue)
noteLengths.append(noteLength)
output = "NoteNumber: " + str(len(notes)) + "\n\n"
output += "Notes:\n{"
for i in range(len(notes)):
output += notes[i]
if(i < len(notes) - 1):
output += ", "
if(i != 0 and i % 10 == 0):
output += "\n"
output += "};\n\n"
output += "NoteLengths:\n{"
for i in range(len(notes)):
output += str(noteLengths[i])
if(i < len(notes) - 1):
output += ", "
if(i != 0 and i % 10 == 0):
output += "\n"
output += "};"
with open(savePath, "w") as file:
file.write(output) | Python | 0.000001 |
83b83cb3491bd4ccf39e2c6ade72f8f526ea27fe | Increase toolbox reporting | ArcToolbox/Scripts/ExportFolder2PDF.py | ArcToolbox/Scripts/ExportFolder2PDF.py | #Export a folder of maps to PDFs at their Map Document set sizes
#Written using ArcGIS 10 and Python 2.6.5
#by: Guest
# https://gis.stackexchange.com/questions/7147/how-to-batch-export-mxd-to-pdf-files
import arcpy, os
#Read input parameter from user.
path = arcpy.GetParameterAsText(0)
#Write MXD names in folder to txt log file.
writeLog=open(path+"\FileListLog.txt","w")
for fileName in os.listdir(path):
fullPath = os.path.join(path, fileName)
if os.path.isfile(fullPath):
basename, extension = os.path.splitext(fullPath)
if extension == ".mxd":
writeLog.write(fullPath+"\n")
mxd = arcpy.mapping.MapDocument(fullPath)
arcpy.AddMessage('Found: ' + fileName)
del mxd
arcpy.AddMessage("Done")
writeLog.close()
# Set all the parameters as variables here:
data_frame = 'PAGE_LAYOUT'
df_export_width = 1920
df_export_height = 1200
resolution = "300"
image_quality = "BETTER"
colorspace = "RGB"
compress_vectors = "True"
image_compression = "ADAPTIVE"
picture_symbol = 'VECTORIZE_BITMAP'
convert_markers = "False"
embed_fonts = "True"
layers_attributes = "LAYERS_ONLY"
georef_info = "False"
jpeg_compression_quality = 85
exportPath =arcpy.GetParameterAsText(1)
MXDread=open(path+"\FileListLog.txt","r")
for line in MXDread:
#Strip newline from line.
line=line.rstrip('\n')
if os.path.isfile(line):
basename, extension = os.path.splitext(line)
newName=basename.split('\\')[-1]
if extension.lower() == ".mxd":
# arcpy.AddMessage( "Basename:" +newName )
mxd = arcpy.mapping.MapDocument(line)
newPDF=exportPath+"\\"+newName+".pdf"
arcpy.AddMessage( 'Writing: ' + newPDF )
arcpy.mapping.ExportToPDF(mxd,newPDF, data_frame, df_export_width, df_export_height, resolution, image_quality, colorspace, compress_vectors, image_compression, picture_symbol, convert_markers, embed_fonts, layers_attributes, georef_info, jpeg_compression_quality)
arcpy.AddMessage( 'Finished: ' + line)
MXDread.close()
item=path+"\FileListLog.txt"
os.remove(item)
del mxd
arcpy.GetMessages() | #Export a folder of maps to PDFs at their Map Document set sizes
#Written using ArcGIS 10 and Python 2.6.5
#by: Guest
# https://gis.stackexchange.com/questions/7147/how-to-batch-export-mxd-to-pdf-files
import arcpy, os
#Read input parameter from user.
path = arcpy.GetParameterAsText(0)
#Write MXD names in folder to txt log file.
writeLog=open(path+"\FileListLog.txt","w")
for fileName in os.listdir(path):
fullPath = os.path.join(path, fileName)
if os.path.isfile(fullPath):
basename, extension = os.path.splitext(fullPath)
if extension == ".mxd":
writeLog.write(fullPath+"\n")
mxd = arcpy.mapping.MapDocument(fullPath)
print fileName + "\n"
del mxd
print "Done"
writeLog.close()
# Set all the parameters as variables here:
data_frame = 'PAGE_LAYOUT'
df_export_width = 1920
df_export_height = 1200
resolution = "300"
image_quality = "BETTER"
colorspace = "RGB"
compress_vectors = "True"
image_compression = "ADAPTIVE"
picture_symbol = 'VECTORIZE_BITMAP'
convert_markers = "False"
embed_fonts = "True"
layers_attributes = "LAYERS_ONLY"
georef_info = "False"
jpeg_compression_quality = 85
exportPath =arcpy.GetParameterAsText(1)
MXDread=open(path+"\FileListLog.txt","r")
for line in MXDread:
#Strip newline from line.
line=line.rstrip('\n')
if os.path.isfile(line):
basename, extension = os.path.splitext(line)
newName=basename.split('\\')[-1]
if extension.lower() == ".mxd":
print "Basename:" +newName
mxd = arcpy.mapping.MapDocument(line)
newPDF=exportPath+"\\"+newName+".pdf"
print newPDF
arcpy.mapping.ExportToPDF(mxd,newPDF, data_frame, df_export_width, df_export_height, resolution, image_quality, colorspace, compress_vectors, image_compression, picture_symbol, convert_markers, embed_fonts, layers_attributes, georef_info, jpeg_compression_quality)
print line + "Export Done"
MXDread.close()
item=path+"\FileListLog.txt"
os.remove(item)
del mxd | Python | 0 |
f5a3d65d56a1746fac3bd42d38537cca359a968c | improve range-only command detection | ex_command_parser.py | ex_command_parser.py | """a simple 'parser' for :ex commands
"""
from collections import namedtuple
import re
# holds info about an ex command
EX_CMD = namedtuple('ex_command', 'name command forced range args')
EX_RANGE_REGEXP = re.compile(r'^(:?([.$%]|(:?/.*?/|\?.*?\?){1,2}|\d+)([-+]\d+)?)(([,;])(:?([.$]|(:?/.*?/|\?.*?\?){1,2}|\d+)([-+]\d+)?))?')
EX_ONLY_RANGE_REGEXP = re.compile(r'(?:([%$.]|\d+|/.*?(?<!\\)/|\?.*?\?)([-+]\d+)*(?:([,;])([%$.]|\d+|/.*?(?<!\\)/|\?.*?\?)([-+]\d+)*)?)|(^[/?].*$)')
EX_COMMANDS = {
('write', 'w'): {'command': 'ex_write_file', 'args': ['file_name']},
('wall', 'wa'): {'command': 'ex_write_all', 'args': []},
('pwd', 'pw'): {'command': 'ex_print_working_dir', 'args': []},
('buffers', 'buffers'): {'command': 'ex_prompt_select_open_file', 'args': []},
('ls', 'ls'): {'command': 'ex_prompt_select_open_file', 'args': []},
('map', 'map'): {'command': 'ex_map', 'args': []},
('abbreviate', 'ab'): {'command': 'ex_abbreviate', 'args': []},
('read', 'r'): {'command': 'ex_read_shell_out', 'args': ['shell_cmd']},
}
def find_command(cmd_name):
names = [x for x in EX_COMMANDS.keys() if x[0].startswith(cmd_name)]
# unknown command name
if not names: return None
# check for matches in known aliases and full names
full_match = [(x, y) for (x, y) in names if cmd_name in (x, y)]
if full_match:
return full_match[0]
else:
# partial match, but not a known alias
return names[0]
def is_only_range(cmd_line):
try:
return EX_ONLY_RANGE_REGEXP.search(cmd_line) and \
EX_RANGE_REGEXP.search(cmd_line).span()[1] == len(cmd_line)
except AttributeError:
return EX_ONLY_RANGE_REGEXP.search(cmd_line)
def get_cmd_line_range(cmd_line):
try:
start, end = EX_RANGE_REGEXP.search(cmd_line).span()
except AttributeError:
return None
return cmd_line[start:end]
def parse_command(cmd):
# strip :
cmd_name = cmd[1:]
# first the odd commands
if is_only_range(cmd_name):
return EX_CMD(name=':',
command='ex_goto',
forced=False,
range=cmd_name,
args=''
)
if cmd_name.startswith('!'):
cmd_name = '!'
args = cmd[2:]
return EX_CMD(name=cmd_name,
command=None,
forced=False,
range=None,
args=args
)
range = get_cmd_line_range(cmd_name)
if range: cmd_name = cmd_name[len(range):]
cmd_name, _, args = cmd_name.partition(' ')
args = re.sub(r' {2,}', ' ', args)
args = args.split(' ')
bang =False
if cmd_name.endswith('!'):
cmd_name = cmd_name[:-1]
bang = True
cmd_data = find_command(cmd_name)
if not cmd_data: return None
cmd_data = EX_COMMANDS[cmd_data]
cmd_args = {}
if cmd_data['args'] and args:
cmd_args = dict(zip(cmd_data['args'], args))
return EX_CMD(name=cmd_name,
command=cmd_data['command'],
forced=bang,
range=range,
args=cmd_args
)
| """a simple 'parser' for :ex commands
"""
from collections import namedtuple
import re
# holds info about an ex command
EX_CMD = namedtuple('ex_command', 'name command forced range args')
EX_RANGE_REGEXP = re.compile(r'^(:?([.$%]|(:?/.*?/|\?.*?\?){1,2}|\d+)([-+]\d+)?)(([,;])(:?([.$]|(:?/.*?/|\?.*?\?){1,2}|\d+)([-+]\d+)?))?')
EX_ONLY_RANGE_REGEXP = re.compile(r'(?:([%$.]|\d+|/.*?(?<!\\)/|\?.*?\?)([-+]\d+)*(?:([,;])([%$.]|\d+|/.*?(?<!\\)/|\?.*?\?)([-+]\d+)*)?)|(^[/?].*$)')
EX_COMMANDS = {
('write', 'w'): {'command': 'ex_write_file', 'args': ['file_name']},
('wall', 'wa'): {'command': 'ex_write_all', 'args': []},
('pwd', 'pw'): {'command': 'ex_print_working_dir', 'args': []},
('buffers', 'buffers'): {'command': 'ex_prompt_select_open_file', 'args': []},
('ls', 'ls'): {'command': 'ex_prompt_select_open_file', 'args': []},
('map', 'map'): {'command': 'ex_map', 'args': []},
('abbreviate', 'ab'): {'command': 'ex_abbreviate', 'args': []},
('read', 'r'): {'command': 'ex_read_shell_out', 'args': ['shell_cmd']},
}
def find_command(cmd_name):
names = [x for x in EX_COMMANDS.keys() if x[0].startswith(cmd_name)]
# unknown command name
if not names: return None
# check for matches in known aliases and full names
full_match = [(x, y) for (x, y) in names if cmd_name in (x, y)]
if full_match:
return full_match[0]
else:
# partial match, but not a known alias
return names[0]
def is_only_range(cmd_line):
return EX_ONLY_RANGE_REGEXP.search(cmd_line) and \
EX_RANGE_REGEXP.search(cmd_line).span()[1] == len(cmd_line)
def get_cmd_line_range(cmd_line):
try:
start, end = EX_RANGE_REGEXP.search(cmd_line).span()
except AttributeError:
return None
return cmd_line[start:end]
def parse_command(cmd):
# strip :
cmd_name = cmd[1:]
# first the odd commands
if is_only_range(cmd_name):
return EX_CMD(name=':',
command='ex_goto',
forced=False,
range=cmd_name,
args=''
)
if cmd_name.startswith('!'):
cmd_name = '!'
args = cmd[2:]
return EX_CMD(name=cmd_name,
command=None,
forced=False,
range=None,
args=args
)
range = get_cmd_line_range(cmd_name)
if range: cmd_name = cmd_name[len(range):]
cmd_name, _, args = cmd_name.partition(' ')
args = re.sub(r' {2,}', ' ', args)
args = args.split(' ')
bang =False
if cmd_name.endswith('!'):
cmd_name = cmd_name[:-1]
bang = True
cmd_data = find_command(cmd_name)
if not cmd_data: return None
cmd_data = EX_COMMANDS[cmd_data]
cmd_args = {}
if cmd_data['args'] and args:
cmd_args = dict(zip(cmd_data['args'], args))
return EX_CMD(name=cmd_name,
command=cmd_data['command'],
forced=bang,
range=range,
args=cmd_args
)
| Python | 0.000001 |
b614436766e8ee3316936c5718262b35cfae3869 | Add slug field on save | memex_explorer/base/models.py | memex_explorer/base/models.py | from django.db import models
from django.utils.text import slugify
class Project(models.Model):
name = models.CharField(max_length=64)
slug = models.SlugField(max_length=64, unique=True)
description = models.TextField()
icon = models.CharField(max_length=64)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
if not self.id:
# Newly created object, so save to get self.id
super(Project, self).save(*args, **kwargs)
self.slug = '%i-%s' % (
self.id, slugify(self.name)
)
super(Project, self).save(*args, **kwargs)
class DataModel(models.Model):
name = models.CharField(max_length=64)
project = models.ForeignKey(Project)
def __str__(self):
return self.name
class Crawl(models.Model):
name = models.CharField(max_length=64)
slug = models.CharField(max_length=64)
description = models.TextField()
crawler = models.CharField(max_length=64)
status = models.CharField(max_length=64)
config = models.CharField(max_length=64)
seeds_list = models.CharField(max_length=64)
pages_crawled = models.BigIntegerField()
harvest_rate = models.FloatField()
project = models.ForeignKey(Project)
data_model = models.ForeignKey(DataModel)
def __str__(self):
return self.name
class DataSource(models.Model):
name = models.CharField(max_length=64)
data_uri = models.CharField(max_length=200)
description = models.TextField()
project = models.ForeignKey(Project)
crawl = models.ForeignKey(Crawl)
def __str__(self):
return self.name
| from django.db import models
class Project(models.Model):
name = models.CharField(max_length=64)
slug = models.SlugField(max_length=64, unique=True)
description = models.TextField()
icon = models.CharField(max_length=64)
def __str__(self):
return self.name
class DataModel(models.Model):
name = models.CharField(max_length=64)
project = models.ForeignKey(Project)
def __str__(self):
return self.name
class Crawl(models.Model):
name = models.CharField(max_length=64)
slug = models.CharField(max_length=64)
description = models.TextField()
crawler = models.CharField(max_length=64)
status = models.CharField(max_length=64)
config = models.CharField(max_length=64)
seeds_list = models.CharField(max_length=64)
pages_crawled = models.BigIntegerField()
harvest_rate = models.FloatField()
project = models.ForeignKey(Project)
data_model = models.ForeignKey(DataModel)
def __str__(self):
return self.name
class DataSource(models.Model):
name = models.CharField(max_length=64)
data_uri = models.CharField(max_length=200)
description = models.TextField()
project = models.ForeignKey(Project)
crawl = models.ForeignKey(Crawl)
def __str__(self):
return self.name
| Python | 0.000001 |
047db1c64cd5b7ef070f73e1d580e36236ac9613 | Print warning when using deprecated 'python3' module | mesonbuild/modules/python3.py | mesonbuild/modules/python3.py | # Copyright 2016-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sysconfig
from .. import mesonlib, dependencies
from . import ExtensionModule
from mesonbuild.modules import ModuleReturnValue
from ..interpreterbase import noKwargs, permittedKwargs, FeatureDeprecated
from ..build import known_shmod_kwargs
class Python3Module(ExtensionModule):
@FeatureDeprecated('python3 module', '0.48.0')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.snippets.add('extension_module')
@permittedKwargs(known_shmod_kwargs)
def extension_module(self, interpreter, state, args, kwargs):
if 'name_prefix' in kwargs:
raise mesonlib.MesonException('Name_prefix is set automatically, specifying it is forbidden.')
if 'name_suffix' in kwargs:
raise mesonlib.MesonException('Name_suffix is set automatically, specifying it is forbidden.')
host_system = state.host_machine.system
if host_system == 'darwin':
# Default suffix is 'dylib' but Python does not use it for extensions.
suffix = 'so'
elif host_system == 'windows':
# On Windows the extension is pyd for some unexplainable reason.
suffix = 'pyd'
else:
suffix = []
kwargs['name_prefix'] = ''
kwargs['name_suffix'] = suffix
return interpreter.func_shared_module(None, args, kwargs)
@noKwargs
def find_python(self, state, args, kwargs):
py3 = dependencies.ExternalProgram('python3', mesonlib.python_command, silent=True)
return ModuleReturnValue(py3, [py3])
@noKwargs
def language_version(self, state, args, kwargs):
return ModuleReturnValue(sysconfig.get_python_version(), [])
@noKwargs
def sysconfig_path(self, state, args, kwargs):
if len(args) != 1:
raise mesonlib.MesonException('sysconfig_path() requires passing the name of path to get.')
path_name = args[0]
valid_names = sysconfig.get_path_names()
if path_name not in valid_names:
raise mesonlib.MesonException('{} is not a valid path name {}.'.format(path_name, valid_names))
# Get a relative path without a prefix, e.g. lib/python3.6/site-packages
path = sysconfig.get_path(path_name, vars={'base': '', 'platbase': '', 'installed_base': ''})[1:]
return ModuleReturnValue(path, [])
def initialize(*args, **kwargs):
return Python3Module(*args, **kwargs)
| # Copyright 2016-2017 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sysconfig
from .. import mesonlib, dependencies
from . import ExtensionModule
from mesonbuild.modules import ModuleReturnValue
from ..interpreterbase import noKwargs, permittedKwargs
from ..build import known_shmod_kwargs
class Python3Module(ExtensionModule):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.snippets.add('extension_module')
@permittedKwargs(known_shmod_kwargs)
def extension_module(self, interpreter, state, args, kwargs):
if 'name_prefix' in kwargs:
raise mesonlib.MesonException('Name_prefix is set automatically, specifying it is forbidden.')
if 'name_suffix' in kwargs:
raise mesonlib.MesonException('Name_suffix is set automatically, specifying it is forbidden.')
host_system = state.host_machine.system
if host_system == 'darwin':
# Default suffix is 'dylib' but Python does not use it for extensions.
suffix = 'so'
elif host_system == 'windows':
# On Windows the extension is pyd for some unexplainable reason.
suffix = 'pyd'
else:
suffix = []
kwargs['name_prefix'] = ''
kwargs['name_suffix'] = suffix
return interpreter.func_shared_module(None, args, kwargs)
@noKwargs
def find_python(self, state, args, kwargs):
py3 = dependencies.ExternalProgram('python3', mesonlib.python_command, silent=True)
return ModuleReturnValue(py3, [py3])
@noKwargs
def language_version(self, state, args, kwargs):
return ModuleReturnValue(sysconfig.get_python_version(), [])
@noKwargs
def sysconfig_path(self, state, args, kwargs):
if len(args) != 1:
raise mesonlib.MesonException('sysconfig_path() requires passing the name of path to get.')
path_name = args[0]
valid_names = sysconfig.get_path_names()
if path_name not in valid_names:
raise mesonlib.MesonException('{} is not a valid path name {}.'.format(path_name, valid_names))
# Get a relative path without a prefix, e.g. lib/python3.6/site-packages
path = sysconfig.get_path(path_name, vars={'base': '', 'platbase': '', 'installed_base': ''})[1:]
return ModuleReturnValue(path, [])
def initialize(*args, **kwargs):
return Python3Module(*args, **kwargs)
| Python | 0.000007 |
1018d6bde32a8d18a2315dafd084826443209ba1 | Update clock.py | examples/clock.py | examples/clock.py | #!/usr/bin/python
import time
import datetime
from Adafruit_LED_Backpack import SevenSegment
# ===========================================================================
# Clock Example
# ===========================================================================
segment = SevenSegment.SevenSegment(address=0x70)
# Initialize the display. Must be called once before using the display.
segment.begin()
print "Press CTRL+Z to exit"
# Continually update the time on a 4 char, 7-segment display
while(True):
now = datetime.datetime.now()
hour = now.hour
minute = now.minute
second = now.second
if hour >= 22 or hour < 7 and minutes == 26:
segment.set_brightness(0)
else:
segment.set_brightness(5)
if hour == 24:
hour = 12
else:
hour = hour % 12
A = int(hour / 10)
if A == 0:
A = ' '
segment.clear()
# Set hours
segment.set_digit(0, A) # Tens
segment.set_digit(1, hour % 10) # Ones
# Set minutes
segment.set_digit(2, int(minute / 10)) # Tens
segment.set_digit(3, minute % 10) # Ones
# Toggle colon
segment.set_colon(second % 2) # Toggle colon at 1Hz
# Write the display buffer to the hardware. This must be called to
# update the actual display LEDs.
segment.write_display()
# Wait a quarter second (less than 1 second to prevent colon blinking getting$
time.sleep(0.25)
| #!/usr/bin/python
import time
import datetime
from Adafruit_LED_Backpack import SevenSegment
# ===========================================================================
# Clock Example
# ===========================================================================
segment = SevenSegment.SevenSegment(address=0x70)
# Initialize the display. Must be called once before using the display.
segment.begin()
print "Press CTRL+Z to exit"
# Continually update the time on a 4 char, 7-segment display
while(True):
now = datetime.datetime.now()
hour = now.hour
minute = now.minute
second = now.second
if hour >= 22 or hour < 7:
segment.set_brightness(0)
else:
segment.set_brightness(5)
if hour == 24:
hour = 12
else:
hour = hour % 12
A = int(hour / 10)
if A == 0:
A = ' '
segment.clear()
# Set hours
segment.set_digit(0, A) # Tens
segment.set_digit(1, hour % 10) # Ones
# Set minutes
segment.set_digit(2, int(minute / 10)) # Tens
segment.set_digit(3, minute % 10) # Ones
# Toggle colon
segment.set_colon(second % 2) # Toggle colon at 1Hz
# Write the display buffer to the hardware. This must be called to
# update the actual display LEDs.
segment.write_display()
# Wait a quarter second (less than 1 second to prevent colon blinking getting$
time.sleep(0.25)
| Python | 0.000002 |
37da8a56f127a871c4133f0ba58921779e9b487c | Update __init__.py | deepdish/io/__init__.py | deepdish/io/__init__.py | from __future__ import division, print_function, absolute_import
from .mnist import load_mnist
from .norb import load_small_norb
from .casia import load_casia
from .cifar import load_cifar_10
try:
import tables
_pytables_ok = True
del tables
except ImportError:
_pytables_ok = False
if _pytables_ok:
from .hdf5io import load, save
else:
def _f(*args, **kwargs):
raise ImportError("You need PyTables for this function")
load = save = _f
__all__ = ['load_mnist', 'load_small_norb', 'load_casia', 'load_cifar_10']
| from __future__ import division, print_function, absolute_import
from .mnist import load_mnist
from .norb import load_small_norb
from .casia import load_casia
from .cifar import load_cifar_10
try:
import tables
_pytables_ok = True
except ImportError:
_pytables_ok = False
del tables
if _pytables_ok:
from .hdf5io import load, save
else:
def _f(*args, **kwargs):
raise ImportError("You need PyTables for this function")
load = save = _f
__all__ = ['load_mnist', 'load_small_norb', 'load_casia', 'load_cifar_10']
| Python | 0 |
0d914a4843e5959c108077e8c5275a1ddd05f617 | Upgrade version number | djaloha/__init__.py | djaloha/__init__.py | # -*- coding: utf-8 -*-
VERSION = (0, 2)
def get_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
# if VERSION[2]:
# version = '%s.%s' % (version, VERSION[2])
# if VERSION[3] != "final":
# version = '%s%s%s' % (version, VERSION[3], VERSION[4])
return version
__version__ = get_version()
| # -*- coding: utf-8 -*-
VERSION = (0, 1)
def get_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
# if VERSION[2]:
# version = '%s.%s' % (version, VERSION[2])
# if VERSION[3] != "final":
# version = '%s%s%s' % (version, VERSION[3], VERSION[4])
return version
__version__ = get_version()
| Python | 0.000001 |
0f5433458be9add6a879e8e490017663714d7664 | fix cron job FailedRunsNotificationCronJob to import get_class routine from new place | django_cron/cron.py | django_cron/cron.py | from django.conf import settings
from django_cron import CronJobBase, Schedule, get_class
from django_cron.models import CronJobLog
from django_common.helper import send_mail
class FailedRunsNotificationCronJob(CronJobBase):
"""
Send email if cron failed to run X times in a row
"""
RUN_EVERY_MINS = 30
schedule = Schedule(run_every_mins=RUN_EVERY_MINS)
code = 'django_cron.FailedRunsNotificationCronJob'
def do(self):
CRONS_TO_CHECK = map(lambda x: get_class(x), settings.CRON_CLASSES)
EMAILS = [admin[1] for admin in settings.ADMINS]
try:
FAILED_RUNS_CRONJOB_EMAIL_PREFIX = settings.FAILED_RUNS_CRONJOB_EMAIL_PREFIX
except:
FAILED_RUNS_CRONJOB_EMAIL_PREFIX = ''
for cron in CRONS_TO_CHECK:
try:
min_failures = cron.MIN_NUM_FAILURES
except AttributeError:
min_failures = 10
failures = 0
jobs = CronJobLog.objects.filter(code=cron.code).order_by('-end_time')[:min_failures]
message = ''
for job in jobs:
if not job.is_success:
failures += 1
message += 'Job ran at %s : \n\n %s \n\n' % (job.start_time, job.message)
if failures == min_failures:
send_mail(
'%s%s failed %s times in a row!' % (FAILED_RUNS_CRONJOB_EMAIL_PREFIX, cron.code, \
min_failures), message,
settings.DEFAULT_FROM_EMAIL, EMAILS
)
| from django.conf import settings
from django_cron import CronJobBase, Schedule
from django_cron.models import CronJobLog
from django_cron.management.commands.runcrons import get_class
from django_common.helper import send_mail
class FailedRunsNotificationCronJob(CronJobBase):
"""
Send email if cron failed to run X times in a row
"""
RUN_EVERY_MINS = 30
schedule = Schedule(run_every_mins=RUN_EVERY_MINS)
code = 'django_cron.FailedRunsNotificationCronJob'
def do(self):
CRONS_TO_CHECK = map(lambda x: get_class(x), settings.CRON_CLASSES)
EMAILS = [admin[1] for admin in settings.ADMINS]
try:
FAILED_RUNS_CRONJOB_EMAIL_PREFIX = settings.FAILED_RUNS_CRONJOB_EMAIL_PREFIX
except:
FAILED_RUNS_CRONJOB_EMAIL_PREFIX = ''
for cron in CRONS_TO_CHECK:
try:
min_failures = cron.MIN_NUM_FAILURES
except AttributeError:
min_failures = 10
failures = 0
jobs = CronJobLog.objects.filter(code=cron.code).order_by('-end_time')[:min_failures]
message = ''
for job in jobs:
if not job.is_success:
failures += 1
message += 'Job ran at %s : \n\n %s \n\n' % (job.start_time, job.message)
if failures == min_failures:
send_mail(
'%s%s failed %s times in a row!' % (FAILED_RUNS_CRONJOB_EMAIL_PREFIX, cron.code, \
min_failures), message,
settings.DEFAULT_FROM_EMAIL, EMAILS
)
| Python | 0 |
57184440872c8c29906c84a919624e7878f7d75c | fix compat | skitai/backbone/https_server.py | skitai/backbone/https_server.py | #!/usr/bin/env python
from . import http_server
from ..counter import counter
import socket, time
from rs4 import asyncore
import ssl
from skitai import lifetime
import os, sys, errno
import skitai
from errno import EWOULDBLOCK
from aquests.protocols.http2 import H2_PROTOCOLS
from ..handlers import vhost_handler
class https_channel (http_server.http_channel):
ac_out_buffer_size = 65536
ac_in_buffer_size = 65536
def send(self, data):
try:
result = self.socket.send(data)
except ssl.SSLError as why:
if why.errno == ssl.SSL_ERROR_WANT_WRITE:
return 0
elif why.errno == ssl.SSL_ERROR_ZERO_RETURN:
self.handle_close ()
return 0
else:
raise
if result <= 0:
return 0
else:
self.server.bytes_out.increment(result)
return result
def recv(self, buffer_size = 65535):
try:
result = self.socket.recv(buffer_size)
if result is None:
return b''
elif result == b'':
self.handle_close()
return b''
else:
self.server.bytes_in.increment(len(result))
return result
except MemoryError:
lifetime.shutdown (1, 1.0)
except ssl.SSLError as why:
if why.errno == ssl.SSL_ERROR_WANT_READ:
try:
raise BlockingIOError
except NameError:
raise socket.error (EWOULDBLOCK)
# closed connection
elif why.errno in (ssl.SSL_ERROR_ZERO_RETURN, ssl.SSL_ERROR_EOF):
self.handle_close ()
return b''
else:
raise
class https_server (http_server.http_server):
CERTINFO = None
def __init__ (self, ip, port, ctx, quic = None, server_logger = None, request_logger = None):
super ().__init__ (ip, port, server_logger, request_logger)
self.ctx = ctx
self.socket = self.ctx.wrap_socket (self.socket, server_side = True)
if quic:
if sys.version_info.major == 3 and sys.version_info.minor < 6:
self.log ('unsupoorted Python version for QUIC, DISABLED', 'error')
else:
from . import http3_server
ctx = http3_server.init_context (*self.CERTINFO)
self.altsvc = http3_server.http3_server (ip, quic, ctx, server_logger, request_logger)
def install_handler (self, handler, back = 1):
super ().install_handler (handler, back)
if self.altsvc and isinstance (handler, vhost_handler.Handler):
self.altsvc.install_handler (handler)
def serve (self, sub_server = None):
self.altsvc and self.altsvc._serve ()
super ().serve (sub_server)
def handle_accept (self):
self.total_clients.inc()
try:
conn, addr = self.accept()
except socket.error:
#self.log_info ('server accept() threw an exception', 'warning')
return
except TypeError:
if os.name == "nt":
self.log_info ('server accept() threw EWOULDBLOCK', 'warning')
return
except:
self.trace()
https_channel (self, conn, addr)
def init_context (certfile, keyfile, pass_phrase):
https_server.CERTINFO = (certfile, keyfile, pass_phrase)
try:
protocol = ssl.PROTOCOL_TLS
except AttributeError:
protocol = ssl.PROTOCOL_SSLv23
ctx = ssl.SSLContext (protocol)
try:
ctx.set_alpn_protocols (H2_PROTOCOLS)
except AttributeError:
ctx.set_npn_protocols (H2_PROTOCOLS)
ctx.load_cert_chain (certfile, keyfile, pass_phrase)
ctx.check_hostname = False
return ctx
| #!/usr/bin/env python
from . import http_server
from ..counter import counter
import socket, time
from rs4 import asyncore
import ssl
from skitai import lifetime
import os, sys, errno
import skitai
from errno import EWOULDBLOCK
from aquests.protocols.http2 import H2_PROTOCOLS
from ..handlers import vhost_handler
class https_channel (http_server.http_channel):
ac_out_buffer_size = 65536
ac_in_buffer_size = 65536
def send(self, data):
try:
result = self.socket.send(data)
except ssl.SSLError as why:
if why.errno == ssl.SSL_ERROR_WANT_WRITE:
return 0
elif why.errno == ssl.SSL_ERROR_ZERO_RETURN:
self.handle_close ()
return 0
else:
raise
if result <= 0:
return 0
else:
self.server.bytes_out.increment(result)
return result
def recv(self, buffer_size = 65535):
try:
result = self.socket.recv(buffer_size)
if result is None:
return b''
elif result == b'':
self.handle_close()
return b''
else:
self.server.bytes_in.increment(len(result))
return result
except MemoryError:
lifetime.shutdown (1, 1.0)
except ssl.SSLError as why:
if why.errno == ssl.SSL_ERROR_WANT_READ:
try:
raise BlockingIOError
except NameError:
raise socket.error (EWOULDBLOCK)
# closed connection
elif why.errno in (ssl.SSL_ERROR_ZERO_RETURN, ssl.SSL_ERROR_EOF):
self.handle_close ()
return b''
else:
raise
class https_server (http_server.http_server):
CERTINFO = None
def __init__ (self, ip, port, ctx, quic = None, server_logger = None, request_logger = None):
super ().__init__ (ip, port, server_logger, request_logger)
self.ctx = ctx
self.socket = self.ctx.wrap_socket (self.socket, server_side = True)
if quic:
from . import http3_server
ctx = http3_server.init_context (*self.CERTINFO)
self.altsvc = http3_server.http3_server (ip, quic, ctx, server_logger, request_logger)
def install_handler (self, handler, back = 1):
super ().install_handler (handler, back)
if self.altsvc and isinstance (handler, vhost_handler.Handler):
self.altsvc.install_handler (handler)
def serve (self, sub_server = None):
self.altsvc and self.altsvc._serve ()
super ().serve (sub_server)
def handle_accept (self):
self.total_clients.inc()
try:
conn, addr = self.accept()
except socket.error:
#self.log_info ('server accept() threw an exception', 'warning')
return
except TypeError:
if os.name == "nt":
self.log_info ('server accept() threw EWOULDBLOCK', 'warning')
return
except:
self.trace()
https_channel (self, conn, addr)
def init_context (certfile, keyfile, pass_phrase):
https_server.CERTINFO = (certfile, keyfile, pass_phrase)
try:
protocol = ssl.PROTOCOL_TLS
except AttributeError:
protocol = ssl.PROTOCOL_SSLv23
ctx = ssl.SSLContext (protocol)
try:
ctx.set_alpn_protocols (H2_PROTOCOLS)
except AttributeError:
ctx.set_npn_protocols (H2_PROTOCOLS)
ctx.load_cert_chain (certfile, keyfile, pass_phrase)
ctx.check_hostname = False
return ctx
| Python | 0.000001 |
7f3b2b0ab21e4dadffb55da912684eb84ce6da3d | Check if remot git is already on commit | gitric/api.py | gitric/api.py | from __future__ import with_statement
from fabric.state import env
from fabric.api import local, run, abort, task, cd, puts
from fabric.context_managers import settings
@task
def allow_dirty():
'''allow pushing even when the working copy is dirty'''
env.gitric_allow_dirty = True
@task
def force_push():
'''allow pushing even when history will be lost'''
env.gitric_force_push = True
def git_seed(repo_path, commit=None, ignore_untracked_files=False):
'''seed a remote git repository'''
commit = _get_commit(commit)
force = ('gitric_force_push' in env) and '-f' or ''
dirty_working_copy = _is_dirty(commit, ignore_untracked_files)
if dirty_working_copy and 'gitric_allow_dirty' not in env:
abort(
'Working copy is dirty. This check can be overridden by\n'
'importing gitric.api.allow_dirty and adding allow_dirty to your '
'call.')
# initialize the remote repository (idempotent)
run('git init %s' % repo_path)
# finis execution if remote git it's already on commit.
with cd(repo_path):
if run('git rev-parse HEAD') == commit:
puts('Remote already on commit %s' % commit)
return
# silence git complaints about pushes coming in on the current branch
# the pushes only seed the immutable object store and do not modify the
# working copy
run('GIT_DIR=%s/.git git config receive.denyCurrentBranch ignore' %
repo_path)
# a target doesn't need to keep track of which branch it is on so we always
# push to its "master"
with settings(warn_only=True):
push = local(
'git push git+ssh://%s@%s:%s%s %s:refs/heads/master %s' % (
env.user, env.host, env.port, repo_path, commit, force))
if push.failed:
abort(
'%s is a non-fast-forward\n'
'push. The seed will abort so you don\'t lose information. '
'If you are doing this\nintentionally import '
'gitric.api.force_push and add it to your call.' % commit)
def git_reset(repo_path, commit=None):
'''checkout a sha1 on a remote git repo'''
commit = _get_commit(commit)
run('cd %s && git reset --hard %s' % (repo_path, commit))
def _get_commit(commit):
if commit is None:
# if no commit is specified we will push HEAD
commit = local('git rev-parse HEAD', capture=True)
return commit
def _is_dirty(commit, ignore_untracked_files):
untracked_files = '--untracked-files=no' if ignore_untracked_files else ''
return local('git status %s --porcelain' % untracked_files, capture=True) != ''
| from __future__ import with_statement
from fabric.state import env
from fabric.api import local, run, abort, task
from fabric.context_managers import settings
@task
def allow_dirty():
'''allow pushing even when the working copy is dirty'''
env.gitric_allow_dirty = True
@task
def force_push():
'''allow pushing even when history will be lost'''
env.gitric_force_push = True
def git_seed(repo_path, commit=None, ignore_untracked_files=False):
'''seed a remote git repository'''
commit = _get_commit(commit)
force = ('gitric_force_push' in env) and '-f' or ''
dirty_working_copy = _is_dirty(commit, ignore_untracked_files)
if dirty_working_copy and 'gitric_allow_dirty' not in env:
abort(
'Working copy is dirty. This check can be overridden by\n'
'importing gitric.api.allow_dirty and adding allow_dirty to your '
'call.')
# initialize the remote repository (idempotent)
run('git init %s' % repo_path)
# silence git complaints about pushes coming in on the current branch
# the pushes only seed the immutable object store and do not modify the
# working copy
run('GIT_DIR=%s/.git git config receive.denyCurrentBranch ignore' %
repo_path)
# a target doesn't need to keep track of which branch it is on so we always
# push to its "master"
with settings(warn_only=True):
push = local(
'git push git+ssh://%s@%s:%s%s %s:refs/heads/master %s' % (
env.user, env.host, env.port, repo_path, commit, force))
if push.failed:
abort(
'%s is a non-fast-forward\n'
'push. The seed will abort so you don\'t lose information. '
'If you are doing this\nintentionally import '
'gitric.api.force_push and add it to your call.' % commit)
def git_reset(repo_path, commit=None):
'''checkout a sha1 on a remote git repo'''
commit = _get_commit(commit)
run('cd %s && git reset --hard %s' % (repo_path, commit))
def _get_commit(commit):
if commit is None:
# if no commit is specified we will push HEAD
commit = local('git rev-parse HEAD', capture=True)
return commit
def _is_dirty(commit, ignore_untracked_files):
untracked_files = '--untracked-files=no' if ignore_untracked_files else ''
return local('git status %s --porcelain' % untracked_files, capture=True) != ''
| Python | 0 |
47e7fcc3b837b459a2800e09ee87c2a6f87cdfba | Update SController.py | skype_controller/SController.py | skype_controller/SController.py | """Import somes important packages"""
import Skype4Py
import config as gbconfig
import json
from common import get_project_path
# Get Skype class instance
SKYPE_OBJ = Skype4Py.Skype()
# Establish the connection from the Skype object to the Skype ddclient.
SKYPE_OBJ.Attach()
# Get all contact from object. This function might not be used in this case
def get_file():
"""Function to get file contains list of skype's contact"""
returndata = {}
try:
root_path = get_project_path()
# print root_path
file_path = "%s/%s" % (root_path, gbconfig.FILE_CONTACT)
filename = open(file_path, 'r')
returndata = json.loads(filename.read())
filename.close()
except Exception as ex:
print 'What the fuck? I could not load your file: %s - %s' % (gbconfig.FILE_CONTACT, ex)
return returndata
def main_function():
"""Runable function"""
get_file()
for contact, message in get_file().iteritems():
SKYPE_OBJ.SendMessage(contact, message)
print "Message has been sent"
if __name__ == "__main__":
main_function()
| """Import somes important packages"""
import Skype4Py
import config as gbconfig
import json
from common import get_project_path
# Get Skype class instance
SKYPE_OBJ = Skype4Py.Skype()
# Establish the connection from the Skype object to the Skype ddclient.
SKYPE_OBJ.Attach()
# Get all contact from object. This function might not be used in this case
"""Function to get file contains list of skype's contact"""
returndata = {}
try:
root_path = get_project_path()
# print root_path
file_path = "%s/%s" % (root_path, gbconfig.FILE_CONTACT)
filename = open(file_path, 'r')
returndata = json.loads(filename.read())
filename.close()
except Exception as ex:
print 'What the fuck? I could not load your file: %s - %s' % (gbconfig.FILE_CONTACT, ex)
return returndata
def main_function():
"""Runable function"""
get_file()
for contact, message in get_file().iteritems():
SKYPE_OBJ.SendMessage(contact, message)
print "Message has been sent"
if __name__ == "__main__":
main_function()
| Python | 0.000001 |
f21e732eada64a18e08524052ec66ce8705d9e9b | make imagemagick env var default to 'convert' instead of None | glc/config.py | glc/config.py | """
glc.config
==========
At the moment this only houses the environmental variable
for the ImageMagick binary. If you don't want to set that,
or can't for some reason, you can replace ``"convert"`` with the
path where the ``convert`` application that comes with it
lives in, if it doesn't happen to be in your PATH.
(c) 2016 LeoV
https://github.com/leovoel/
"""
import os
IMAGEMAGICK_BINARY = os.getenv("IMAGEMAGICK_BINARY", "convert")
| """
glc.config
==========
At the moment this only houses the environmental variable
for the ImageMagick binary. If you don't want to set that,
or can't for some reason, you can replace ``None`` with the
path where the ``convert`` application that comes with it
lives in.
(c) 2016 LeoV
https://github.com/leovoel/
"""
import os
IMAGEMAGICK_BINARY = os.getenv("IMAGEMAGICK_BINARY", None)
| Python | 0.000113 |
d338dc15c57e3aea12de78354da908b1457c5055 | Clean up command more (#30) | earwigbot/commands/lag.py | earwigbot/commands/lag.py | # -*- coding: utf-8 -*-
#
# Copyright (C) 2009-2012 Ben Kurtovic <ben.kurtovic@verizon.net>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from earwigbot import exceptions
from earwigbot.commands import Command
class Lag(Command):
"""Return the replag for a specific database on the Toolserver."""
name = "lag"
commands = ["lag", "replag", "maxlag"]
def process(self, data):
site = self.get_site(data)
if not site:
return
msg = "\x0302{0}\x0F: Toolserver replag is {1} seconds; database maxlag is {2} seconds."
msg = msg.format(site.name, site.get_replag(), site.get_maxlag())
self.reply(data, msg)
def get_site(self):
if data.kwargs and "project" in data.kwargs and "lang" in data.kwargs:
project, lang = data.kwargs["project"], data.kwargs["lang"]
return self.get_site_from_proj_and_lang(data, project, lang)
if not data.args:
return self.bot.wiki.get_site()
if len(data.args) > 1:
name = " ".join(data.args)
self.reply(data, "unknown site: \x0302{0}\x0F.".format(name))
return
name = data.args[0]
if "." in name:
lang, project = name.split(".")[:2]
elif ":" in name:
project, lang = name.split(":")[:2]
else:
try:
return self.bot.wiki.get_site(name)
except exceptions.SiteNotFoundError:
msg = "unknown site: \x0302{0}\x0F.".format(name)
self.reply(data, msg)
return
return self.get_site_from_proj_and_lang(data, project, lang)
def get_site_from_proj_and_lang(self, data, project, lang):
try:
site = self.bot.wiki.get_site(project=project, lang=lang)
except exceptions.SiteNotFoundError:
try:
site = self.bot.wiki.add_site(project=project, lang=lang)
except exceptions.APIError:
msg = "site \x0302{0}:{1}\x0F not found."
self.reply(data, msg.format(project, lang))
return
return site
| # -*- coding: utf-8 -*-
#
# Copyright (C) 2009-2012 Ben Kurtovic <ben.kurtovic@verizon.net>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from earwigbot import exceptions
from earwigbot.commands import Command
class Lag(Command):
"""Return the replag for a specific database on the Toolserver."""
name = "lag"
commands = ["lag", "replag", "maxlag"]
def process(self, data):
if data.kwargs and "project" in data.kwargs and "lang" in data.kwargs:
project, lang = data.kwargs["project"], data.kwargs["lang"]
site = self.get_site(data, project, lang)
if not site:
return
elif data.args:
if len(data.args) > 1:
name = " ".join(data.args)
self.reply(data, "unknown site: \x0302{0}\x0F.".format(name))
return
name = data.args[0]
if "." in name:
lang, project = name.split(".")[:2]
elif ":" in name:
project, lang = name.split(":")[:2]
else:
try:
site = self.bot.wiki.get_site(name)
except exceptions.SiteNotFoundError:
msg = "unknown site: \x0302{0}\x0F.".format(name)
self.reply(data, msg)
return
site = self.get_site(data, project, lang)
if not site:
return
else:
site = self.bot.wiki.get_site()
msg = "\x0302{0}\x0F: Toolserver replag is {1} seconds; database maxlag is {2} seconds"
msg = msg.format(site.name, site.get_replag(), site.get_maxlag())
self.reply(data, msg)
def get_site(self, data, project, lang):
try:
site = self.bot.wiki.get_site(project=project, lang=lang)
except exceptions.SiteNotFoundError:
try:
site = self.bot.wiki.add_site(project=project, lang=lang)
except exceptions.APIError:
msg = "site \x0302{0}:{1}\x0F not found."
self.reply(data, msg.format(project, lang))
return
return site
| Python | 0.000001 |
09a313a2cd74c391c12761306cb8ae641e9f0d28 | fix logs app prompt | ebcli/controllers/logs.py | ebcli/controllers/logs.py | # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import argparse
from ..core.abstractcontroller import AbstractBaseController
from ..resources.strings import strings, flag_text
from ..operations import logsops
from ..objects.exceptions import InvalidOptionsError, NotFoundError
class LogsController(AbstractBaseController):
class Meta:
label = 'logs'
description = strings['logs.info']
usage = AbstractBaseController.Meta.usage.replace('{cmd}', label)
arguments = AbstractBaseController.Meta.arguments + [
(['-a', '--all'], dict(
action='store_true', help=flag_text['logs.all'])),
(['-z', '--zip'], dict(
action='store_true', help=flag_text['logs.zip'])),
(['-i', '--instance'], dict(help=flag_text['logs.instance'])),
(['--stream'], dict(action='store_true',
help=flag_text['logs.stream'])),
]
epilog = strings['logs.epilog']
def do_command(self):
app_name = self.get_app_name()
env_name = self.get_env_name()
if self.app.pargs.stream:
try:
return logsops.stream_logs(env_name)
except NotFoundError:
raise NotFoundError(strings['cloudwatch-stream.notsetup'])
all = self.app.pargs.all
instance = self.app.pargs.instance
zip = self.app.pargs.zip
if all and instance:
raise InvalidOptionsError(strings['logs.allandinstance'])
if zip:
info_type = 'bundle'
do_zip = True
elif all:
info_type = 'bundle'
do_zip = False
else:
info_type = 'tail'
do_zip = False
logsops.logs(env_name, info_type, do_zip=do_zip,
instance_id=instance) | # Copyright 2014 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import argparse
from ..core.abstractcontroller import AbstractBaseController
from ..resources.strings import strings, flag_text
from ..operations import logsops
from ..objects.exceptions import InvalidOptionsError, NotFoundError
class LogsController(AbstractBaseController):
class Meta:
label = 'logs'
description = strings['logs.info']
usage = AbstractBaseController.Meta.usage.replace('{cmd}', label)
arguments = AbstractBaseController.Meta.arguments + [
(['-a', '--all'], dict(
action='store_true', help=flag_text['logs.all'])),
(['-z', '--zip'], dict(
action='store_true', help=flag_text['logs.zip'])),
(['-i', '--instance'], dict(help=flag_text['logs.instance'])),
(['--stream'], dict(action='store_true',
help=flag_text['logs.stream'])),
]
epilog = strings['logs.epilog']
def do_command(self):
env_name = self.get_env_name()
if self.app.pargs.stream:
try:
return logsops.stream_logs(env_name)
except NotFoundError:
raise NotFoundError(strings['cloudwatch-stream.notsetup'])
all = self.app.pargs.all
instance = self.app.pargs.instance
zip = self.app.pargs.zip
if all and instance:
raise InvalidOptionsError(strings['logs.allandinstance'])
if zip:
info_type = 'bundle'
do_zip = True
elif all:
info_type = 'bundle'
do_zip = False
else:
info_type = 'tail'
do_zip = False
logsops.logs(env_name, info_type, do_zip=do_zip,
instance_id=instance) | Python | 0.000001 |
d3cd1778f4ccb1651feb2186ecfdd0c81f86088c | Improve Instruction parsing | instruction.py | instruction.py | class Instruction(object):
def __init__(self, line):
instr = line.split(' ')
self.name = instr[0]
self.ops = []
if len(instr) > 4:
raise Exception('too many operands: {}'.format(line))
# iterate through operands, perform some loose checks, and append
# to self.ops
for each in instr[1:]:
if each.endswith(','):
each = each[:-1]
self.ops.append(each[1:] if each.startswith('$') else each)
| class Instruction(object):
def __init__(self, line):
instr = line.split(' ')
self.name = instr[0]
self.ops = []
if len(instr) > 4:
raise Exception('too many operands: {}'.format(line))
# iterate through operands, perform some loose checks, and append
# to self.ops
for i, each in enumerate(instr[1:]):
if each.endswith(','):
each = each[:-1]
if each.startswith('$'):
self.ops.append(each[1:])
else:
self.ops.append(each)
| Python | 0.000072 |
23c95dcba178b3876bf07bec4b0c4f5c06895181 | add padding to file names | copyfiles.py | copyfiles.py | import hashlib
import os
import random
import shutil
import string
BLOCKSIZE = 65536
class FileCopier(object):
def __init__(self, dest_dir, copy):
self._dest_dir = dest_dir
self._copy = copy
def copy_file(self, in_path, date, subject):
out_dir = self._get_directory_name(date, subject)
out_file = self._get_file_name(in_path, date)
self._create_dir(out_dir)
out_path = os.path.join(out_dir, out_file)
if os.path.exists(out_path):
if self._hash_file(in_path) == self._hash_file(out_path):
print 'File already exists - skipping'
return
else:
basename, extension = os.path.splitext(out_path)
rand_str = ''.join(random.choice(string.lowercase) for i in range(10))
out_file = basename + '-' + rand_str + extension
out_path = os.path.join(out_dir, out_file)
self._copy_file(in_path, out_path)
def _get_directory_name(self, date, subject):
month_dir = '%(y)04d-%(m)02d' % {'y': date.year, 'm': date.month}
subject_dir = ('%(y)04d_%(m)02d_%(d)02d-%(subj)s' %
{'y': date.year, 'm': date.month, 'd': date.day,
'subj': subject})
return os.path.join(self._dest_dir, month_dir, subject_dir)
def _get_file_name(self, in_path, date):
file_basename = ('%(y)04d_%(m)02d_%(d)02d-%(hr)s_%(min)s_%(sec)s' %
{'y': date.year, 'm': date.month, 'd': date.day,
'hr': date.hour, 'min': date.minute,
'sec': date.second})
extension = os.path.splitext(in_path)[1]
return file_basename + extension
def _create_dir(self, path):
if not os.path.exists(path):
os.makedirs(path)
def _hash_file(self, path):
hasher = hashlib.md5()
with open(path, 'rb') as afile:
buf = afile.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
return hasher.hexdigest()
def _copy_file(self, in_path, out_path):
print 'Copying file from', in_path, 'to', out_path
if self._copy:
shutil.copy2(in_path, out_path)
else:
os.link(in_path, out_path)
| import hashlib
import os
import random
import shutil
import string
BLOCKSIZE = 65536
class FileCopier(object):
def __init__(self, dest_dir, copy):
self._dest_dir = dest_dir
self._copy = copy
def copy_file(self, in_path, date, subject):
out_dir = self._get_directory_name(date, subject)
out_file = self._get_file_name(in_path, date)
self._create_dir(out_dir)
out_path = os.path.join(out_dir, out_file)
if os.path.exists(out_path):
if self._hash_file(in_path) == self._hash_file(out_path):
print 'File already exists - skipping'
return
else:
basename, extension = os.path.splitext(out_path)
rand_str = ''.join(random.choice(string.lowercase) for i in range(10))
out_file = basename + '-' + rand_str + extension
out_path = os.path.join(out_dir, out_file)
self._copy_file(in_path, out_path)
def _get_directory_name(self, date, subject):
month_dir = '%(y)s-%(m)s' % {'y': date.year, 'm': date.month}
subject_dir = ('%(y)s_%(m)s_%(d)s-%(subj)s' %
{'y': date.year, 'm': date.month, 'd': date.day,
'subj': subject})
return os.path.join(self._dest_dir, month_dir, subject_dir)
def _get_file_name(self, in_path, date):
file_basename = ('%(y)s_%(m)s_%(d)s-%(hr)s_%(min)s_%(sec)s' %
{'y': date.year, 'm': date.month, 'd': date.day,
'hr': date.hour, 'min': date.minute,
'sec': date.second})
extension = os.path.splitext(in_path)[1]
return file_basename + extension
def _create_dir(self, path):
if not os.path.exists(path):
os.makedirs(path)
def _hash_file(self, path):
hasher = hashlib.md5()
with open(path, 'rb') as afile:
buf = afile.read(BLOCKSIZE)
while len(buf) > 0:
hasher.update(buf)
buf = afile.read(BLOCKSIZE)
return hasher.hexdigest()
def _copy_file(self, in_path, out_path):
print 'Copying file from', in_path, 'to', out_path
if self._copy:
shutil.copy2(in_path, out_path)
else:
os.link(in_path, out_path)
| Python | 0.000001 |
daa29e745256b164b3375e502444ec247aa0d892 | implement get_default_args for the future repr development | logwrap/func_helpers.py | logwrap/func_helpers.py | # Copyright 2016 Mirantis, Inc.
# Copyright 2016 Alexey Stepanov aka penguinolog
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""func_helpers module
This is no reason to import this submodule directly, all required methods is
available from the main module.
"""
from __future__ import absolute_import
import collections
import inspect
import sys
# pylint: disable=no-member
def get_arg_names(func):
"""get argument names for function
:param func: Function to extract arguments from
:type func: callable
:return: list of function argument names
:rtype: list
>>> def tst_1():
... pass
>>> get_arg_names(tst_1)
[]
>>> def tst_2(arg):
... pass
>>> get_arg_names(tst_2)
['arg']
"""
# noinspection PyUnresolvedReferences
if sys.version_info[0:2] < (3, 3):
# pylint: disable=deprecated-method
# noinspection PyDeprecation
spec = inspect.getargspec(func=func)
# pylint: enable=deprecated-method
args = spec.args[:]
if spec.varargs:
args.append(spec.varargs)
if spec.keywords:
args.append(spec.keywords)
return args
return list(inspect.signature(obj=func).parameters.keys())
def get_call_args(func, *positional, **named):
"""get real function call arguments without calling function
:param func: Function to bind arguments
:type func: callable
:type positional: iterable
:type named: dict
:rtype: collections.OrderedDict
>>> def tst(arg, darg=2, *args, **kwargs):
... pass
>>> get_call_args(tst, *(1, ))
OrderedDict([('arg', 1), ('darg', 2), ('args', ()), ('kwargs', {})])
"""
# noinspection PyUnresolvedReferences
if sys.version_info[0:2] < (3, 5): # apply_defaults is py35 feature
# pylint: disable=deprecated-method
orig_args = inspect.getcallargs(func, *positional, **named)
# pylint: enable=deprecated-method
# Construct OrderedDict as Py3
arguments = collections.OrderedDict(
[(key, orig_args[key]) for key in get_arg_names(func)]
)
return arguments
sig = inspect.signature(func).bind(*positional, **named)
sig.apply_defaults() # after bind we doesn't have defaults
return sig.arguments
def get_default_args(func):
"""Get function defaults from it's signature
:param func: target function
:type func: function
:rtype: collections.OrderedDict
>>> def tst0():pass
>>> get_default_args(tst0)
OrderedDict()
>>> def tst1(a): pass
>>> get_default_args(tst1)
OrderedDict()
>>> def tst2(a, b): pass
>>> get_default_args(tst2)
OrderedDict()
>>> def tst3(a=0): pass
>>> get_default_args(tst3)
OrderedDict([('a', 0)])
>>> def tst4(a, b=1): pass
>>> get_default_args(tst4)
OrderedDict([('b', 1)])
>>> def tst5(a=0, b=1): pass
>>> get_default_args(tst5)
OrderedDict([('a', 0), ('b', 1)])
"""
if sys.version_info[0:2] < (3, 0):
# pylint: disable=deprecated-method
# noinspection PyDeprecation
spec = inspect.getargspec(func)
# pylint: enable=deprecated-method
if not spec.defaults:
return collections.OrderedDict()
collector = []
for val in range(1, len(spec.defaults)+1):
collector.append((spec.args[-val], spec.defaults[-val]))
return collections.OrderedDict(reversed(collector))
sig = inspect.signature(func)
result = collections.OrderedDict(
[
(arg.name, arg.default)
for arg in sig.parameters.values()
if arg.default != inspect.Parameter.empty
]
)
return result
# pylint: enable=no-member
__all__ = ['get_arg_names', 'get_call_args']
| # Copyright 2016 Mirantis, Inc.
# Copyright 2016 Alexey Stepanov aka penguinolog
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""func_helpers module
This is no reason to import this submodule directly, all required methods is
available from the main module.
"""
from __future__ import absolute_import
import collections
import inspect
import sys
# pylint: disable=no-member
def get_arg_names(func):
"""get argument names for function
:param func: Function to extract arguments from
:type func: callable
:return: list of function argument names
:rtype: list
>>> def tst_1():
... pass
>>> get_arg_names(tst_1)
[]
>>> def tst_2(arg):
... pass
>>> get_arg_names(tst_2)
['arg']
"""
# noinspection PyUnresolvedReferences
if sys.version_info[0:2] < (3, 0):
# pylint: disable=deprecated-method
# noinspection PyDeprecation
spec = inspect.getargspec(func=func)
# pylint: enable=deprecated-method
args = spec.args[:]
if spec.varargs:
args.append(spec.varargs)
if spec.keywords:
args.append(spec.keywords)
return args
return list(inspect.signature(obj=func).parameters.keys())
def get_call_args(func, *positional, **named):
"""get real function call arguments without calling function
:param func: Function to bind arguments
:type func: callable
:type positional: iterable
:type named: dict
:rtype: collections.OrderedDict
>>> def tst(arg, darg=2, *args, **kwargs):
... pass
>>> get_call_args(tst, *(1, ))
OrderedDict([('arg', 1), ('darg', 2), ('args', ()), ('kwargs', {})])
"""
# noinspection PyUnresolvedReferences
if sys.version_info[0:2] < (3, 5): # apply_defaults is py35 feature
# pylint: disable=deprecated-method
orig_args = inspect.getcallargs(func, *positional, **named)
# pylint: enable=deprecated-method
# Construct OrderedDict as Py3
arguments = collections.OrderedDict(
[(key, orig_args[key]) for key in get_arg_names(func)]
)
return arguments
sig = inspect.signature(func).bind(*positional, **named)
sig.apply_defaults() # after bind we doesn't have defaults
return sig.arguments
# pylint: enable=no-member
__all__ = ['get_arg_names', 'get_call_args']
| Python | 0 |
d223ee2988be1eb439ebde1146c28ffc83576a29 | Return empty list on exception | fetch_mentions.py | fetch_mentions.py | import time
import requests
import json
import tweepy
from tweepy.error import TweepError
from raven import Client
from settings import consumer_key, consumer_secret, key, secret, count, pubKey, payload_type, SENTRY_DSN,\
overlap_count, waiting_period, speed_layer_endpoint_url, password, timeout, ssl_verification
client = Client(SENTRY_DSN)
class GetMentions(object):
latest_tweet_id = 0
since_id = 1
api = None
data = {"payloadType": payload_type, "pubKey": pubKey}
overlap_count = overlap_count
headers = {'content-type': 'application/json'}
def authenticate(self):
"""
Authenticate twitter credentials
"""
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(key, secret)
self.api = tweepy.API(auth)
def verify_credentials(self):
"""
Return after Verifying twitter credentials
"""
try:
verified = self.api.verify_credentials()
except TweepError, errors:
client.captureException()
for msg in errors.message:
# If Rate limit exceeded, will retry after 15 minutes
if msg['code'] == 88:
print "Sleeping for 15 minutes, Rate limit hit"
time.sleep(15 * 60)
return True
return False
else:
return verified
def get_mentions(self):
"""
Fetch mentions from twitter
"""
try:
print "Fetching mentions"
mentions = self.api.mentions_timeline(count=count, since_id=self.since_id)
except TweepError, errors:
client.captureException()
for msg in errors.message:
# If Rate limit exceeded, will retry after 15 minutes
if msg['code'] == 88:
print "Sleeping for 15 minutes, Rate limit hit"
time.sleep(15 * 60)
break
print msg
return []
else:
return mentions
def process_mentions(self):
"""
Send the twitter mentions to rest end point server
"""
mentions = self.get_mentions()
for mention in mentions:
self.data.update({"transactionId": mention.id, "transactionSent": mention.created_at.isoformat(),
"transactionData": mention._json})
print "--------- Sending to Rest End point ---------"
try:
resp = requests.post(speed_layer_endpoint_url, auth=(pubKey, password), headers=self.headers, timeout=timeout,
data=json.dumps(self.data), verify=ssl_verification)
except Exception as e:
print 'Failed to post to HTTP endpoint due to "%s"' % e.message
if mention.id > self.latest_tweet_id:
# get highest tweet id so that it could be used as since_id later
self.latest_tweet_id = mention.id
print "Latest mention ID so far: {0}".format(self.latest_tweet_id)
print "Mentions Tweet ID: {0}".format(mention.id)
print "Mentions Tweet: {0}".format(mention.text)
if self.overlap_count:
# Force duplicate tweets
try:
self.since_id = mentions[-1 * (overlap_count + 1)].id
except IndexError:
print "Mentions overlap index out of range"
client.captureException()
self.since_id = self.latest_tweet_id
else:
self.since_id = self.latest_tweet_id
if __name__ == "__main__":
obj = GetMentions()
obj.authenticate()
while obj.verify_credentials():
obj.process_mentions()
print "Waiting for %s seconds" % waiting_period
time.sleep(waiting_period)
else:
print "Verification Failed"
| import time
import requests
import json
import tweepy
from tweepy.error import TweepError
from raven import Client
from settings import consumer_key, consumer_secret, key, secret, count, pubKey, payload_type, SENTRY_DSN,\
overlap_count, waiting_period, speed_layer_endpoint_url, password, timeout, ssl_verification
client = Client(SENTRY_DSN)
class GetMentions(object):
latest_tweet_id = 0
since_id = 1
api = None
data = {"payloadType": payload_type, "pubKey": pubKey}
overlap_count = overlap_count
headers = {'content-type': 'application/json'}
def authenticate(self):
"""
Authenticate twitter credentials
"""
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(key, secret)
self.api = tweepy.API(auth)
def verify_credentials(self):
"""
Return after Verifying twitter credentials
"""
try:
verified = self.api.verify_credentials()
except TweepError, errors:
client.captureException()
for msg in errors.message:
# If Rate limit exceeded, will retry after 15 minutes
if msg['code'] == 88:
print "Sleeping for 15 minutes, Rate limit hit"
time.sleep(15 * 60)
return True
return False
else:
return verified
def get_mentions(self):
"""
Fetch mentions from twitter
"""
try:
print "Fetching mentions"
mentions = self.api.mentions_timeline(count=count, since_id=self.since_id)
except TweepError, errors:
client.captureException()
for msg in errors.message:
# If Rate limit exceeded, will retry after 15 minutes
if msg['code'] == 88:
print "Sleeping for 15 minutes, Rate limit hit"
time.sleep(15 * 60)
break
return None
else:
return mentions
def process_mentions(self):
"""
Send the twitter mentions to rest end point server
"""
mentions = self.get_mentions()
for mention in mentions:
self.data.update({"transactionId": mention.id, "transactionSent": mention.created_at.isoformat(),
"transactionData": mention._json})
print "--------- Sending to Rest End point ---------"
try:
resp = requests.post(speed_layer_endpoint_url, auth=(pubKey, password), headers=self.headers, timeout=timeout,
data=json.dumps(self.data), verify=ssl_verification)
except Exception as e:
print 'Failed to post to HTTP endpoint due to "%s"' % e.message
if mention.id > self.latest_tweet_id:
# get highest tweet id so that it could be used as since_id later
self.latest_tweet_id = mention.id
print "Latest mention ID so far: {0}".format(self.latest_tweet_id)
print "Mentions Tweet ID: {0}".format(mention.id)
print "Mentions Tweet: {0}".format(mention.text)
if self.overlap_count:
# Force duplicate tweets
try:
self.since_id = mentions[-1 * (overlap_count + 1)].id
except IndexError:
print "Mentions overlap index out of range"
client.captureException()
self.since_id = self.latest_tweet_id
else:
self.since_id = self.latest_tweet_id
if __name__ == "__main__":
obj = GetMentions()
obj.authenticate()
while obj.verify_credentials():
obj.process_mentions()
print "Waiting for %s seconds" % waiting_period
time.sleep(waiting_period)
else:
print "Verification Failed"
| Python | 0.999717 |
8d0a41391fae5c66c296d5dfacc0ac6f82a6b355 | fix gridsearch path | gridsearch.py | gridsearch.py | import time
import itertools as it
from gensim.models import word2vec
from goethe.corpora import Corpus
model_config = {
'size': [200, 300, 400, 500, 600],
'window': [5, 10, 20],
'sg': [0, 1] # Skip-gram or CBOW
}
sample_size = 10000000
epochs = 10
def train_model(config):
size, window, sg = config
sentences = Corpus('../corpora/eval', limit=sample_size)
model = word2vec.Word2Vec(sentences=sentences, size=size, window=window,
iter=epochs, workers=4)
name = 'n{}_size{}_epochs{}_sg{}_window{}'.format(sample_size, size, epochs, sg, window)
return name, model
def minutes(t0):
t1 = time.time()
return int((t1-t0)/60)
if __name__ == '__main__':
parameters = it.product(model_config['size'], model_config['window'],
model_config['sg'])
t0 = time.time()
for p in parameters:
name, model = train_model(p)
model.save('models/' + name + '.model')
print('{}\', saved model: {}'.format(minutes(t0), name))
| import time
import itertools as it
from gensim.models import word2vec
from goethe.corpora import Corpus
model_config = {
'size': [200, 300, 400, 500, 600],
'window': [5, 10, 20],
'sg': [0, 1] # Skip-gram or CBOW
}
sample_size = 10000000
epochs = 10
def train_model(config):
size, window, sg = config
sentences = Corpus('../corpora/eval/eval.tokens.txt', limit=sample_size)
model = word2vec.Word2Vec(sentences=sentences, size=size, window=window,
iter=epochs, workers=4)
name = 'n{}_size{}_epochs{}_sg{}_window{}'.format(sample_size, size, epochs, sg, window)
return name, model
def minutes(t0):
t1 = time.time()
return int((t1-t0)/60)
if __name__ == '__main__':
parameters = it.product(model_config['size'], model_config['window'],
model_config['sg'])
t0 = time.time()
for p in parameters:
name, model = train_model(p)
model.save('models/' + name + '.model')
print('{}\', saved model: {}'.format(minutes(t0), name))
| Python | 0.000001 |
f682300d4a8ab7e13ad0e26d2b37fdf24cdbdce9 | Bump development version | filer/__init__.py | filer/__init__.py | # -*- coding: utf-8 -*-
# version string following pep-0396 and pep-0386
__version__ = '1.2.6.rc2' # pragma: nocover
default_app_config = 'filer.apps.FilerConfig'
| # -*- coding: utf-8 -*-
# version string following pep-0396 and pep-0386
__version__ = '1.2.6.rc1' # pragma: nocover
default_app_config = 'filer.apps.FilerConfig'
| Python | 0 |
05c8dffcbfc08bbfd98d0f6a506af245719b3ac8 | FIX dependency | stock_picking_ean128_report/__openerp__.py | stock_picking_ean128_report/__openerp__.py | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Stock Picking EAN128 Report',
'version': '1.0',
'category': 'Warehouse Management',
'sequence': 14,
'summary': '',
'description': """
Stock Picking EAN128 Report
===========================
""",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'images': [
],
'depends': [
'stock_ean128',
'report_aeroo',
],
'data': [
'wizard/stock_print_remit_view.xml',
'report/stock_report.xml'
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 ADHOC SA (http://www.adhoc.com.ar)
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Stock Picking EAN128 Report',
'version': '1.0',
'category': 'Warehouse Management',
'sequence': 14,
'summary': '',
'description': """
Stock Picking EAN128 Report
===========================
""",
'author': 'ADHOC SA',
'website': 'www.adhoc.com.ar',
'images': [
],
'depends': [
'stock_ean128',
],
'data': [
'wizard/stock_print_remit_view.xml',
'report/stock_report.xml'
],
'demo': [
],
'test': [
],
'installable': True,
'auto_install': False,
'application': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| Python | 0.000001 |
9608fff230665f5120e6ea98d4ae0efc91a345ef | Revert "Add teuthology git version query/logging" | teuthology/__init__.py | teuthology/__init__.py | from gevent import monkey
monkey.patch_all(
dns=False,
# Don't patch subprocess to avoid http://tracker.ceph.com/issues/14990
subprocess=False,
)
import sys
# Don't write pyc files
sys.dont_write_bytecode = True
from .orchestra import monkey
monkey.patch_all()
import logging
import os
__version__ = '0.1.0'
# If we are running inside a virtualenv, ensure we have its 'bin' directory in
# our PATH. This doesn't happen automatically if scripts are called without
# first activating the virtualenv.
exec_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
if os.path.split(exec_dir)[-1] == 'bin' and exec_dir not in os.environ['PATH']:
os.environ['PATH'] = ':'.join((exec_dir, os.environ['PATH']))
# We don't need to see log entries for each connection opened
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(
logging.WARN)
# if requests doesn't bundle it, shut it up anyway
logging.getLogger('urllib3.connectionpool').setLevel(
logging.WARN)
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s.%(msecs)03d %(levelname)s:%(name)s:%(message)s')
log = logging.getLogger(__name__)
def setup_log_file(log_path):
root_logger = logging.getLogger()
handlers = root_logger.handlers
for handler in handlers:
if isinstance(handler, logging.FileHandler) and \
handler.stream.name == log_path:
log.debug("Already logging to %s; not adding new handler",
log_path)
return
formatter = logging.Formatter(
fmt=u'%(asctime)s.%(msecs)03d %(levelname)s:%(name)s:%(message)s',
datefmt='%Y-%m-%dT%H:%M:%S')
handler = logging.FileHandler(filename=log_path)
handler.setFormatter(formatter)
root_logger.addHandler(handler)
| from gevent import monkey
monkey.patch_all(
dns=False,
# Don't patch subprocess to avoid http://tracker.ceph.com/issues/14990
subprocess=False,
)
import sys
# Don't write pyc files
sys.dont_write_bytecode = True
from .orchestra import monkey
monkey.patch_all()
import logging
import os
import subprocess
__version__ = '1.0.0'
# do our best, but if it fails, continue with above
try:
__version__ += '-' + subprocess.check_output(
'git rev-parse --short HEAD'.split(),
cwd=os.path.dirname(os.path.realpath(__file__))
).strip()
except Exception as e:
# before logging; should be unusual
print >>sys.stderr, 'Can\'t get version from git rev-parse', e
# If we are running inside a virtualenv, ensure we have its 'bin' directory in
# our PATH. This doesn't happen automatically if scripts are called without
# first activating the virtualenv.
exec_dir = os.path.abspath(os.path.dirname(sys.argv[0]))
if os.path.split(exec_dir)[-1] == 'bin' and exec_dir not in os.environ['PATH']:
os.environ['PATH'] = ':'.join((exec_dir, os.environ['PATH']))
# We don't need to see log entries for each connection opened
logging.getLogger('requests.packages.urllib3.connectionpool').setLevel(
logging.WARN)
# if requests doesn't bundle it, shut it up anyway
logging.getLogger('urllib3.connectionpool').setLevel(
logging.WARN)
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s.%(msecs)03d %(levelname)s:%(name)s:%(message)s')
log = logging.getLogger(__name__)
log.info('teuthology version: %s', __version__)
def setup_log_file(log_path):
root_logger = logging.getLogger()
handlers = root_logger.handlers
for handler in handlers:
if isinstance(handler, logging.FileHandler) and \
handler.stream.name == log_path:
log.debug("Already logging to %s; not adding new handler",
log_path)
return
formatter = logging.Formatter(
fmt=u'%(asctime)s.%(msecs)03d %(levelname)s:%(name)s:%(message)s',
datefmt='%Y-%m-%dT%H:%M:%S')
handler = logging.FileHandler(filename=log_path)
handler.setFormatter(formatter)
root_logger.addHandler(handler)
root_logger.log.info('teuthology version: %s', __version__)
| Python | 0 |
26123b15e28975c331b4c29e86bf69f2bee3a2c2 | Add option to get_or_create_inspection to filter inspections with content. | thezombies/tasks/urls.py | thezombies/tasks/urls.py | from __future__ import absolute_import
from django.db import transaction
from django.conf import settings
from celery import shared_task
import requests
from requests.exceptions import (MissingSchema)
from cachecontrol import CacheControl
from .utils import ResultDict, logger
from thezombies.models import URLInspection
REQUEST_TIMEOUT = getattr(settings, 'REQUEST_TIMEOUT', 60)
session = CacheControl(requests.Session(), cache_etags=False)
@shared_task
def check_and_correct_url(url, method='GET'):
"""Check a url for issues, record exceptions, and attempt to correct the url.
:param url: URL to check and correct
:param method: http method to use, as a string. Default is 'GET'
"""
returnval = ResultDict({'initial_url': url})
req = requests.Request(method.upper(), url)
try:
preq = req.prepare()
except MissingSchema as e:
returnval.add_error(e)
new_url = 'http://{}'.format(req.url)
req.url = new_url
try:
preq = req.prepare()
returnval['corrected_url'] = preq.url
except Exception as e:
returnval.add_error(e)
except Exception as e:
returnval.add_error(e)
return returnval
@shared_task
def request_url(url, method='GET'):
"""Task to request a url, a GET request by default. Tracks and returns errors.
Will not raise an Exception, but may return None for response
:param url: URL to request
:param method: http method to use, as a string. Default is 'GET'
"""
resp = None
checker_result = check_and_correct_url(url)
valid_url = checker_result.get('corrected_url', url)
returnval = ResultDict(checker_result)
try:
resp = session.request(method.upper(), valid_url, allow_redirects=True, timeout=REQUEST_TIMEOUT)
except requests.exceptions.Timeout as e:
returnval.add_error(e)
returnval['timeout'] = True
except Exception as e:
returnval.add_error(e)
# a non-None requests.Response will evaluate to False if it carries an HTTPError value
if resp is not None:
try:
resp.raise_for_status()
except Exception as e:
returnval.add_error(e)
returnval['response'] = resp
return returnval
@shared_task
def get_or_create_inspection(url, with_content=False):
"""Task to get the lastest URLInspection or create a new one if none exists.
:param url: The url to retrieve.
"""
latest_dates = URLInspection.objects.datetimes('created_at', 'minute')
recent_inspections = None
if latest_dates:
latest_date = latest_dates.latest()
recent_inspections = URLInspection.objects.filter(requested_url=url,
created_at__day=latest_date.day,
parent_id__isnull=True,
content__isnull=(not with_content))
inspection = None
if recent_inspections and recent_inspections.count() > 0:
inspection = recent_inspections.latest()
else:
logger.info('No stored inspection, fetch url')
fetch_val = request_url(url)
response = fetch_val.get('response', None)
with transaction.atomic():
if response is not None:
inspection = URLInspection.objects.create_from_response(response)
inspection.save()
else:
timeout = fetch_val.get('timeout', False)
inspection = URLInspection.objects.create(requested_url=url, timeout=timeout)
inspection.save()
return ResultDict({'inspection_id': getattr(inspection, 'id', None), 'url': url})
| from __future__ import absolute_import
from django.db import transaction
from django.conf import settings
from celery import shared_task
import requests
from requests.exceptions import (MissingSchema)
from cachecontrol import CacheControl
from .utils import ResultDict, logger
from thezombies.models import URLInspection
REQUEST_TIMEOUT = getattr(settings, 'REQUEST_TIMEOUT', 60)
session = CacheControl(requests.Session(), cache_etags=False)
@shared_task
def check_and_correct_url(url, method='GET'):
"""Check a url for issues, record exceptions, and attempt to correct the url.
:param url: URL to check and correct
:param method: http method to use, as a string. Default is 'GET'
"""
returnval = ResultDict({'initial_url': url})
req = requests.Request(method.upper(), url)
try:
preq = req.prepare()
except MissingSchema as e:
returnval.add_error(e)
new_url = 'http://{}'.format(req.url)
req.url = new_url
try:
preq = req.prepare()
returnval['corrected_url'] = preq.url
except Exception as e:
returnval.add_error(e)
except Exception as e:
returnval.add_error(e)
return returnval
@shared_task
def request_url(url, method='GET'):
"""Task to request a url, a GET request by default. Tracks and returns errors.
Will not raise an Exception, but may return None for response
:param url: URL to request
:param method: http method to use, as a string. Default is 'GET'
"""
resp = None
checker_result = check_and_correct_url(url)
valid_url = checker_result.get('corrected_url', url)
returnval = ResultDict(checker_result)
try:
resp = session.request(method.upper(), valid_url, allow_redirects=True, timeout=REQUEST_TIMEOUT)
except requests.exceptions.Timeout as e:
returnval.add_error(e)
returnval['timeout'] = True
except Exception as e:
returnval.add_error(e)
# a non-None requests.Response will evaluate to False if it carries an HTTPError value
if resp is not None:
try:
resp.raise_for_status()
except Exception as e:
returnval.add_error(e)
returnval['response'] = resp
return returnval
@shared_task
def get_or_create_inspection(url):
"""Task to get the lastest URLInspection or create a new one if none exists.
:param url: The url to retrieve.
"""
latest_dates = URLInspection.objects.datetimes('created_at', 'minute')
recent_inspections = None
if latest_dates:
latest_date = latest_dates.latest()
recent_inspections = URLInspection.objects.filter(requested_url=url, created_at__day=latest_date.day, parent_id__isnull=True)
inspection = None
if recent_inspections and recent_inspections.count() > 0:
inspection = recent_inspections.latest()
else:
logger.info('No stored inspection, fetch url')
fetch_val = request_url(url)
response = fetch_val.get('response', None)
with transaction.atomic():
if response is not None:
inspection = URLInspection.objects.create_from_response(response)
inspection.save()
else:
timeout = fetch_val.get('timeout', False)
inspection = URLInspection.objects.create(requested_url=url, timeout=timeout)
inspection.save()
return ResultDict({'inspection_id': getattr(inspection, 'id', None), 'url': url})
| Python | 0 |
0e4c8fb4965eadf8cf45ff0f6d3406df17015f46 | remove print | timeside/server/tasks.py | timeside/server/tasks.py | from __future__ import absolute_import
import time
import gc
from celery import shared_task
from celery.result import AsyncResult
from celery.result import GroupResult
from .models import Item, Selection, Preset, Experience, Task
from .models import _DONE
from celery.task import chord
from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)
@shared_task
def task_run(task_id):
task = Task.objects.get(uuid=task_id)
results = []
if task.selection:
for item in task.selection.get_all_items():
results.append(experience_run.delay(str(task.experience.uuid), str(item.uuid)))
results_id = [res.id for res in results]
elif task.item:
results.append(experience_run.delay(str(task.experience.uuid), str(task.item.uuid)))
results_id = [res.id for res in results]
task_monitor.delay(task_id, results_id)
@shared_task
def experience_run(exp_id, item_id):
item = Item.objects.get(uuid=item_id)
experience = Experience.objects.get(uuid=exp_id)
item.run(experience)
gc.collect()
@shared_task
def task_monitor(task_id, results_id):
results = [AsyncResult(id) for id in results_id]
while not all([res.ready() for res in results]):
time.sleep(1)
task = Task.objects.get(uuid=task_id)
task.status_setter(_DONE)
| from __future__ import absolute_import
import time
import gc
from celery import shared_task
from celery.result import AsyncResult
from celery.result import GroupResult
from .models import Item, Selection, Preset, Experience, Task
from .models import _DONE
from celery.task import chord
from celery.utils.log import get_task_logger
logger = get_task_logger(__name__)
@shared_task
def task_run(task_id):
print(task_id)
task = Task.objects.get(uuid=task_id)
results = []
if task.selection:
for item in task.selection.get_all_items():
results.append(experience_run.delay(str(task.experience.uuid), str(item.uuid)))
results_id = [res.id for res in results]
elif task.item:
results.append(experience_run.delay(str(task.experience.uuid), str(task.item.uuid)))
results_id = [res.id for res in results]
task_monitor.delay(task_id, results_id)
@shared_task
def experience_run(exp_id, item_id):
item = Item.objects.get(uuid=item_id)
experience = Experience.objects.get(uuid=exp_id)
item.run(experience)
gc.collect()
@shared_task
def task_monitor(task_id, results_id):
results = [AsyncResult(id) for id in results_id]
while not all([res.ready() for res in results]):
time.sleep(1)
task = Task.objects.get(uuid=task_id)
task.status_setter(_DONE)
| Python | 0.000793 |
bcf6470f2e01b81a3373779cb83820ff125754c2 | Fix add item http post | items/views.py | items/views.py | import os
from django.shortcuts import render
from django.http import JsonResponse, HttpResponse
from django.views.generic import View
from items.models import Item, UploadForm
from django.contrib.auth.models import User
from django.core import serializers
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
class ItemView(View):
def get(self, req, *args, **kwargs):
items = Item.objects.select_related('owner')
s = serializers.serialize("json", list(items))
return JsonResponse({'items':s}, safe=False)
class OfferView(View):
def get(self, req, *args, **kwargs):
items = Item.objects.select_related('owner')
data = serializers.serialize("json", items)
return JsonResponse(data, safe=False)
# title = CharField(max_length=255, null=True, blank=True)
# description = CharField(max_length=1000, null=True, blank=True)
# auther = CharField(max_length=255, null=True, blank=True)
# size = CharField(max_length=255, null=True, blank=True)
# n_copies = DecimalField(max_digits=10, decimal_places=2, null=True)
# code = CharField(max_length=255, null=True, blank=True)
# type = CharField(max_length=4, choices=TYPES, default='O')
# price = DecimalField(max_digits=10, decimal_places=2, null=True)
# fpath = CharField(max_length=1000, null=True, blank=True)
# created = DateTimeField(auto_now_add=True)
# updated = DateTimeField(auto_now=True)
# owner = ForeignKey(User, null=True, blank=True, on_delete=models.CASCADE)
@method_decorator(csrf_exempt, name='dispatch')
class UploadView(View):
def post(self, req, *args, **kwargs):
uForm = UploadForm(req.POST, req.FILES)
#if uForm.is_valid():
item = Item()
item.title = uForm.data["title"]
item.description = uForm.data["description"]
#item.code = uForm.data["code"]
item.dimension = uForm.data["dimension"]
item.author = uForm.data["author"]
item.year = uForm.data["year"]
item.type = uForm.data["type"]
item.source = uForm.data["source"]
item.style = uForm.data["style"]
item.price = uForm.data["price"]
item.currency = uForm.data["currency"]
item.n_copies = uForm.data["n_copies"]
item.fpath = uForm.data["fpath"]
item.created = uForm.data["created"]
item.updated = uForm.data["updated"]
owner_id = uForm.data["owner_id"]
item.owner = User.objects.get(id=1)#owner_id)
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
folder = os.path.join("sample", item.owner.username)
filename = req.FILES['file'].name
# create the folder if it doesn't exist.
try:
#os.mkdir(os.path.join(BASE_DIR, 'sample'))
os.mkdir(os.path.join(BASE_DIR, folder))
except:
pass
# # save the uploaded file inside that folder.
# full_filename = BASE_DIR + folder + filename
# fout = open(full_filename, 'wb+')
# file_content = ContentFile( req.FILES['file'].read() )
# try:
# # Iterate through the chunks.
# for chunk in file_content.chunks():
# fout.write(chunk)
# fout.close()
# except:
# pass
# item.fpath = os.path.join(folder, filename)
# item.save()
return JsonResponse({'saved': True}, safe=False)
#return JsonResponse({'saved': False}, safe=False) | import os
from django.shortcuts import render
from django.http import JsonResponse, HttpResponse
from django.views.generic import View
from items.models import Item, UploadForm
from django.contrib.auth.models import User
from django.core import serializers
from django import forms
from django.views.decorators.csrf import csrf_exempt
from django.utils.decorators import method_decorator
class ItemView(View):
def get(self, req, *args, **kwargs):
items = Item.objects.select_related('owner')
s = serializers.serialize("json", list(items))
return JsonResponse({'items':s}, safe=False)
class OfferView(View):
def get(self, req, *args, **kwargs):
items = Item.objects.select_related('owner')
data = serializers.serialize("json", items)
return JsonResponse(data, safe=False)
# title = CharField(max_length=255, null=True, blank=True)
# description = CharField(max_length=1000, null=True, blank=True)
# auther = CharField(max_length=255, null=True, blank=True)
# size = CharField(max_length=255, null=True, blank=True)
# n_copies = DecimalField(max_digits=10, decimal_places=2, null=True)
# code = CharField(max_length=255, null=True, blank=True)
# type = CharField(max_length=4, choices=TYPES, default='O')
# price = DecimalField(max_digits=10, decimal_places=2, null=True)
# fpath = CharField(max_length=1000, null=True, blank=True)
# created = DateTimeField(auto_now_add=True)
# updated = DateTimeField(auto_now=True)
# owner = ForeignKey(User, null=True, blank=True, on_delete=models.CASCADE)
@method_decorator(csrf_exempt, name='dispatch')
class UploadView(View):
def post(self, req, *args, **kwargs):
uForm = UploadForm(req.POST, req.FILES)
#if uForm.is_valid():
item = Item()
# item.title = uForm.cleaned_data["title"]
# item.description = uForm.cleaned_data["description"]
# item.author = uForm.cleaned_data["author"]
# item.size = uForm.cleaned_data["size"]
# item.n_copies = uForm.cleaned_data["n_copies"]
# item.type = uForm.cleaned_data["type"]
# item.price = uForm.cleaned_data["price"]
# owner_id = uForm.cleaned_data["owner_id"]
# item.owner = User.objects.get(id=owner_id)
# BASE_DIR = os.path.dirname(os.path.dirname(__file__)).replace("\\", "/")
# folder = "/sample/" + uForm.cleaned_data["username"] + "/"
# filename = req.FILES['file'].name
# # create the folder if it doesn't exist.
# try:
# os.mkdir(BASE_DIR + '/sample/')
# os.mkdir(BASE_DIR + folder)
# except:
# pass
# # save the uploaded file inside that folder.
# full_filename = BASE_DIR + folder + filename
# fout = open(full_filename, 'wb+')
# file_content = ContentFile( req.FILES['file'].read() )
# try:
# # Iterate through the chunks.
# for chunk in file_content.chunks():
# fout.write(chunk)
# fout.close()
# except:
# pass
# item.fpath = os.path.join(folder, filename)
# item.save()
return JsonResponse({'saved': True}, safe=False)
#return JsonResponse({'saved': False}, safe=False) | Python | 0 |
0138eacf0d518b86e819a70000b7b527434a6b35 | Change les arguments passés à celery pour gérer la sérialisation JSON. | libretto/signals.py | libretto/signals.py | # coding: utf-8
from __future__ import unicode_literals
from celery_haystack.signals import CelerySignalProcessor
from django.contrib.admin.models import LogEntry
from django.contrib.sessions.models import Session
from reversion.models import Version, Revision
from .tasks import auto_invalidate
class CeleryAutoInvalidator(CelerySignalProcessor):
def enqueue(self, action, instance, sender, **kwargs):
if sender in (LogEntry, Session, Revision, Version):
return
auto_invalidate.delay(action, instance.__class__, instance.pk)
| # coding: utf-8
from __future__ import unicode_literals
from celery_haystack.signals import CelerySignalProcessor
from django.contrib.admin.models import LogEntry
from reversion.models import Version, Revision
from .tasks import auto_invalidate
class CeleryAutoInvalidator(CelerySignalProcessor):
def enqueue(self, action, instance, sender, **kwargs):
if sender in (LogEntry, Revision, Version):
return
auto_invalidate.delay(action, instance)
| Python | 0 |
eb102bb8550d59b34373f1806633a6079f7064a8 | Make sure that all requests for static files are correctly hidden from output | devserver/utils/http.py | devserver/utils/http.py | from django.conf import settings
from django.core.servers.basehttp import WSGIRequestHandler
from django.db import connection
from devserver.utils.time import ms_from_timedelta
from datetime import datetime
class SlimWSGIRequestHandler(WSGIRequestHandler):
"""
Hides all requests that originate from either ``STATIC_URL`` or ``MEDIA_URL``
as well as any request originating with a prefix included in
``DEVSERVER_IGNORED_PREFIXES``.
"""
def handle(self, *args, **kwargs):
self._start_request = datetime.now()
return WSGIRequestHandler.handle(self, *args, **kwargs)
def log_message(self, format, *args):
duration = datetime.now() - self._start_request
env = self.get_environ()
for url in (settings.STATIC_URL, settings.MEDIA_URL):
if self.path.startswith(url):
return
elif url.startswith('http:'):
if ('http://%s%s' % (env['HTTP_HOST'], self.path)).startswith(url):
return
for path in getattr(settings, 'DEVSERVER_IGNORED_PREFIXES', []):
if self.path.startswith(path):
return
format += " (time: %.2fs; sql: %dms (%dq))"
args = list(args) + [
ms_from_timedelta(duration) / 1000,
sum(float(c.get('time', 0)) for c in connection.queries) * 1000,
len(connection.queries),
]
return WSGIRequestHandler.log_message(self, format, *args) | from django.conf import settings
from django.core.servers.basehttp import WSGIRequestHandler
from django.db import connection
from devserver.utils.time import ms_from_timedelta
from datetime import datetime
class SlimWSGIRequestHandler(WSGIRequestHandler):
"""
Hides all requests that originate from ```MEDIA_URL`` as well as any
request originating with a prefix included in ``DEVSERVER_IGNORED_PREFIXES``.
"""
def handle(self, *args, **kwargs):
self._start_request = datetime.now()
return WSGIRequestHandler.handle(self, *args, **kwargs)
def log_message(self, format, *args):
duration = datetime.now() - self._start_request
env = self.get_environ()
if settings.MEDIA_URL.startswith('http:'):
if ('http://%s%s' % (env['HTTP_HOST'], self.path)).startswith(settings.MEDIA_URL):
return
# if self.path.startswith(settings.MEDIA_URL):
# return
for path in getattr(settings, 'DEVSERVER_IGNORED_PREFIXES', []):
if self.path.startswith(path):
return
format += " (time: %.2fs; sql: %dms (%dq))"
args = list(args) + [
ms_from_timedelta(duration) / 1000,
sum(float(c.get('time', 0)) for c in connection.queries) * 1000,
len(connection.queries),
]
return WSGIRequestHandler.log_message(self, format, *args) | Python | 0.000001 |
866e5fee6d39da9eb1a4893f12b1fe7aafbdbefd | update a comment about None gradient | examples/train_mlp.py | examples/train_mlp.py | ##############
#
# This example demonstrates the basics of using `equinox.jitf` and `equinox.gradf`.
#
# Here we'll use them to facilitate training a simple MLP: to automatically take gradients and jit with respect to
# all the jnp.arrays constituting the parameters. (But not with respect to anything else, like the choice of activation
# function -- as that isn't something we can differentiate/JIT anyway!)
#
#############
import functools as ft
import jax
import jax.numpy as jnp
import jax.random as jrandom
import optax
import equinox as eqx
# Toy data
def get_data(dataset_size, *, key):
x = jrandom.normal(key, (dataset_size, 1))
y = 5 * x - 2
return x, y
# Simple dataloader
def dataloader(arrays, key, batch_size):
dataset_size = arrays[0].shape[0]
assert all(array.shape[0] == dataset_size for array in arrays)
indices = jnp.arange(dataset_size)
while True:
perm = jrandom.permutation(key, indices)
(key,) = jrandom.split(key, 1)
start = 0
end = batch_size
while end < dataset_size:
batch_perm = perm[start:end]
yield tuple(array[batch_perm] for array in arrays)
start = end
end = start + batch_size
def main(
dataset_size=10000,
batch_size=256,
learning_rate=3e-3,
steps=1000,
width_size=8,
depth=1,
seed=5678,
):
data_key, loader_key, model_key = jrandom.split(jrandom.PRNGKey(seed), 3)
data = get_data(dataset_size, key=data_key)
data = dataloader(data, batch_size=batch_size, key=loader_key)
# We happen to be using an Equinox model here, but that *is not important*.
# `equinox.jitf` and `equinox.gradf` will work just fine on any PyTree you like.
# (Here, `model` is actually a PyTree -- have a look at the `build_model.py` example for more on that.)
model = eqx.nn.MLP(
in_size=1, out_size=1, width_size=depth, depth=depth, key=model_key
)
# `jitf` and `value_and_grad_f` are thin wrappers around the usual `jax` functions; they just flatten the
# input PyTrees and filter them according to their filter functions. In this case we're asking to only
# JIT/optimise with respect to arrays of floating point numbers, i.e. the parameters of our model. (So that
# for example we will statically JIT-compile with respect to any boolean flags.)
@ft.partial(eqx.jitf, filter_fn=eqx.is_inexact_array)
@ft.partial(eqx.value_and_grad_f, filter_fn=eqx.is_inexact_array)
def loss(model, x, y):
pred_y = jax.vmap(model)(x)
return jnp.mean((y - pred_y) ** 2)
optim = optax.sgd(learning_rate)
opt_state = optim.init(model)
for step, (x, y) in zip(range(steps), data):
value, grads = loss(model, x, y)
updates, opt_state = optim.update(grads, opt_state)
# Essentially equivalent to optax.apply_updates, it just doesn't try to update anything with a `None` gradient.
model = eqx.apply_updates(model, updates)
print(step, value)
return value # Final loss
if __name__ == "__main__":
main()
| ##############
#
# This example demonstrates the basics of using `equinox.jitf` and `equinox.gradf`.
#
# Here we'll use them to facilitate training a simple MLP: to automatically take gradients and jit with respect to
# all the jnp.arrays constituting the parameters. (But not with respect to anything else, like the choice of activation
# function -- as that isn't something we can differentiate/JIT anyway!)
#
#############
import functools as ft
import jax
import jax.numpy as jnp
import jax.random as jrandom
import optax
import equinox as eqx
# Toy data
def get_data(dataset_size, *, key):
x = jrandom.normal(key, (dataset_size, 1))
y = 5 * x - 2
return x, y
# Simple dataloader
def dataloader(arrays, key, batch_size):
dataset_size = arrays[0].shape[0]
assert all(array.shape[0] == dataset_size for array in arrays)
indices = jnp.arange(dataset_size)
while True:
perm = jrandom.permutation(key, indices)
(key,) = jrandom.split(key, 1)
start = 0
end = batch_size
while end < dataset_size:
batch_perm = perm[start:end]
yield tuple(array[batch_perm] for array in arrays)
start = end
end = start + batch_size
def main(
dataset_size=10000,
batch_size=256,
learning_rate=3e-3,
steps=1000,
width_size=8,
depth=1,
seed=5678,
):
data_key, loader_key, model_key = jrandom.split(jrandom.PRNGKey(seed), 3)
data = get_data(dataset_size, key=data_key)
data = dataloader(data, batch_size=batch_size, key=loader_key)
# We happen to be using an Equinox model here, but that *is not important*.
# `equinox.jitf` and `equinox.gradf` will work just fine on any PyTree you like.
# (Here, `model` is actually a PyTree -- have a look at the `build_model.py` example for more on that.)
model = eqx.nn.MLP(
in_size=1, out_size=1, width_size=depth, depth=depth, key=model_key
)
# `jitf` and `value_and_grad_f` are thin wrappers around the usual `jax` functions; they just flatten the
# input PyTrees and filter them according to their filter functions. In this case we're asking to only
# JIT/optimise with respect to arrays of floating point numbers, i.e. the parameters of our model. (So that
# for example we will statically JIT-compile with respect to any boolean flags.)
@ft.partial(eqx.jitf, filter_fn=eqx.is_inexact_array)
@ft.partial(eqx.value_and_grad_f, filter_fn=eqx.is_inexact_array)
def loss(model, x, y):
pred_y = jax.vmap(model)(x)
return jnp.mean((y - pred_y) ** 2)
optim = optax.sgd(learning_rate)
opt_state = optim.init(model)
for step, (x, y) in zip(range(steps), data):
value, grads = loss(model, x, y)
updates, opt_state = optim.update(grads, opt_state)
# Essentially equivalent to optax.apply_updates, it just doesn't try to update anything with a zero gradient.
# Anything we filtered out above will have a zero gradient. But in general some of the things we filtered out
# might not even be jnp.arrays, and might not even have a notion of addition. We don't want to try adding zero
# to arbitrary Python objects.
model = eqx.apply_updates(model, updates)
print(step, value)
return value # Final loss
if __name__ == "__main__":
main()
| Python | 0 |
c053d6d46e6c1102b712649c8d91841d57b32ca7 | add todo | buncuts/utils.py | buncuts/utils.py | # coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import codecs
import re
default_delimeter = "。!?▲"
default_quote_dict = {"「": "」", "『": "』"}
def split_into_sentences(text=sys.stdin, sentence_delim=default_delimeter,
quote_dict=default_quote_dict,
output=sys.stdout, append=False,
is_dir=False, limit=float('inf'),
echo=False):
# TODO: implement the echo and limit option.
# TODO: implement encoding option.
# TODO: implement chunk_size option.
count = 0
# create a list from chars in a string
sentence_delim = list(sentence_delim)
if append:
mode = 'a'
else:
mode = 'w'
if text is not sys.stdin:
text_file = codecs.open(text, 'r', 'sjis')
else:
text_file = text
if output is not sys.stdout:
if is_dir:
path = os.path.join(output, os.path.basename(text))
output_file = codecs.open(path, mode, 'sjis')
else:
output_file = codecs.open(output, mode, 'sjis')
else:
output_file = output
for line in text_file:
# [todo] - skip empty line.
count += 1
# strip half/full width spaces
# strip() somehow don't work very well.
# use re instead.
line = re.sub(r"^[ \n]+|[ ]+$", "", line)
line_splitted, count_added = chunk_splitter(line,
sentence_delim, quote_dict)
output_file.write(line_splitted)
count += count_added
# close files
if text is not sys.stdin:
text_file.close()
if output is not sys.stdout:
output_file.close()
def chunk_splitter(chunk,
sentence_delim=default_delimeter,
quote_dict=default_quote_dict):
"""Chunk splitter.
Reads in a chunk, returns the splitted string and a count as a tuple:
(result, count)
"""
result = ""
count = 0
length = len(chunk)
outside_quote = True
quote_chars = quote_dict.keys()
current_quote = ""
current_close_quote = ""
for i in xrange(length):
char = chunk[i]
result = ''.join((result, char)) # append char to the result
# TODO: Should use a FILO to avoid multiple embeded quotations.
if outside_quote:
if char in quote_chars:
outside_quote = False
current_quote = char
current_close_quote = quote_dict[current_quote]
elif char in sentence_delim:
count += 1
# add a newline after a sentence delimeter.
if i < length - 1 and chunk[i+1] != '\n':
result = ''.join((result, '\n'))
elif i == length - 1:
result = ''.join((result, '\n'))
elif char == current_close_quote:
outside_quote = True
return result, count
| # coding: utf-8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import sys
import codecs
import re
default_delimeter = "。!?▲"
default_quote_dict = {"「": "」", "『": "』"}
def split_into_sentences(text=sys.stdin, sentence_delim=default_delimeter,
quote_dict=default_quote_dict,
output=sys.stdout, append=False,
is_dir=False, limit=float('inf'),
echo=False):
# TODO: implement the echo and limit option.
# TODO: implement encoding option.
count = 0
# create a list from chars in a string
sentence_delim = list(sentence_delim)
if append:
mode = 'a'
else:
mode = 'w'
if text is not sys.stdin:
text_file = codecs.open(text, 'r', 'sjis')
else:
text_file = text
if output is not sys.stdout:
if is_dir:
path = os.path.join(output, os.path.basename(text))
output_file = codecs.open(path, mode, 'sjis')
else:
output_file = codecs.open(output, mode, 'sjis')
else:
output_file = output
for line in text_file:
# [todo] - skip empty line.
count += 1
# strip half/full width spaces
# strip() somehow don't work very well.
# use re instead.
line = re.sub(r"^[ \n]+|[ ]+$", "", line)
line_splitted, count_added = chunk_splitter(line,
sentence_delim, quote_dict)
output_file.write(line_splitted)
count += count_added
# close files
if text is not sys.stdin:
text_file.close()
if output is not sys.stdout:
output_file.close()
def chunk_splitter(chunk,
sentence_delim=default_delimeter,
quote_dict=default_quote_dict):
"""Chunk splitter.
Reads in a chunk, returns the splitted string and a count as a tuple:
(result, count)
"""
result = ""
count = 0
length = len(chunk)
outside_quote = True
quote_chars = quote_dict.keys()
current_quote = ""
current_close_quote = ""
for i in xrange(length):
char = chunk[i]
result = ''.join((result, char)) # append char to the result
# TODO: Should use a FILO to avoid multiple embeded quotations.
if outside_quote:
if char in quote_chars:
outside_quote = False
current_quote = char
current_close_quote = quote_dict[current_quote]
elif char in sentence_delim:
count += 1
# add a newline after a sentence delimeter.
if i < length - 1 and chunk[i+1] != '\n':
result = ''.join((result, '\n'))
elif i == length - 1:
result = ''.join((result, '\n'))
elif char == current_close_quote:
outside_quote = True
return result, count
| Python | 0 |
649d4b7fb22c92fa116fb574c1e4a07578ca6faa | Create methods for getting revisions. | forum/models.py | forum/models.py | from django.db import models
import django.contrib.auth.models as auth
class User(auth.User):
"""Model for representing users.
It has few fields that aren't in the standard authentication user
table, and are needed for the forum to work, like footers.
"""
display_name = models.CharField(max_length=30, null=True)
footer = models.TextField(null=True)
def __str__(self):
"""Show display name or user name."""
return self.display_name or self.username
class Thread(models.Model):
"""Model for representing threads."""
title = models.CharField(max_length=100)
views = models.PositiveIntegerField(default=0)
sticky = models.BooleanField(default=False)
closed = models.BooleanField(default=False)
def __str__(self):
"""Show thread title."""
return self.title
class Post(models.Model):
"""Model for representing posts.
Actual posts are stored in PostRevision, this only stores the
thread number. The first created revision contains the author
of post and date of its creation. The last revision contains actual
text post.
"""
thread = models.ForeignKey(Thread)
def first_revision(self):
return self.postrevision_set.first()
def last_revision(self):
return self.postrevision_set.last()
class PostRevision(models.Model):
"""Model for representing post revisions.
The first revision for given post contains its author and date to
show to the user. The last revision shows the date it was created
on.
"""
post = models.ForeignKey(Post)
author = models.ForeignKey(User)
date_created = models.DateTimeField(auto_now=True)
text = models.TextField()
class Meta:
ordering = ['date_created']
| from django.db import models
import django.contrib.auth.models as auth
class User(auth.User):
"""Model for representing users.
It has few fields that aren't in the standard authentication user
table, and are needed for the forum to work, like footers.
"""
display_name = models.CharField(max_length=30, null=True)
footer = models.TextField(null=True)
def __str__(self):
"""Show display name or user name."""
return self.display_name or self.username
class Thread(models.Model):
"""Model for representing threads."""
title = models.CharField(max_length=100)
views = models.PositiveIntegerField(default=0)
sticky = models.BooleanField(default=False)
closed = models.BooleanField(default=False)
def __str__(self):
"""Show thread title."""
return self.title
class Post(models.Model):
"""Model for representing posts.
Actual posts are stored in PostRevision, this only stores the
thread number. The first created revision contains the author
of post and date of its creation. The last revision contains actual
text post.
"""
thread = models.ForeignKey(Thread)
class PostRevision(models.Model):
"""Model for representing post revisions.
The first revision for given post contains its author and date to
show to the user. The last revision shows the date it was created
on.
"""
post = models.ForeignKey(Post)
author = models.ForeignKey(User)
date_created = models.DateTimeField(auto_now=True)
text = models.TextField()
class Meta:
ordering = ['date_created']
| Python | 0 |
65eb9bcd58d78fd80fabd03a26be73335f1a1122 | Update ci nose config to run with four processes | goldstone/settings/ci.py | goldstone/settings/ci.py | """Settings for accessing a distributed docker instance."""
# Copyright 2015 Solinea, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .docker import * # pylint: disable=W0614,W0401
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(os.environ.get('GS_DEBUG', True))
TEMPLATE_DEBUG = bool(os.environ.get('GS_TEMPLATE_DEBUG', True))
STATIC_ROOT = os.path.join(os.getcwd(), 'static')
INSTALLED_APPS += (
'django_nose',
)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = (
'--verbosity=2',
'--detailed-errors',
'--with-xunit',
'--xunit-file=/reports/nosetests.xml',
'--with-coverage',
'--cover-package=goldstone',
'--processes=4',
)
| """Settings for accessing a distributed docker instance."""
# Copyright 2015 Solinea, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from .docker import * # pylint: disable=W0614,W0401
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(os.environ.get('GS_DEBUG', True))
TEMPLATE_DEBUG = bool(os.environ.get('GS_TEMPLATE_DEBUG', True))
STATIC_ROOT = os.path.join(os.getcwd(), 'static')
INSTALLED_APPS += (
'django_nose',
)
TEST_RUNNER = 'django_nose.NoseTestSuiteRunner'
NOSE_ARGS = (
'--verbosity=2',
'--detailed-errors',
'--with-xunit',
'--xunit-file=/reports/nosetests.xml',
'--with-coverage',
'--cover-package=goldstone',
)
| Python | 0 |
39c16a3552ef882441b26a4c6defc57d9ea42010 | return JSON rather than request.response objects | mondo.py | mondo.py | import requests
class MondoClient():
url = 'https://production-api.gmon.io/'
def __init__(self, url = None):
if url != None:
self.url = url
def token(self, client_id, client_secret, username, password):
"""
Acquiring an access token
"""
payload = {'grant_type': 'password', 'client_id': client_id, 'client_secret': client_secret, 'username': username, 'password': password }
r = requests.post(self.url + '/oauth2/token', payload)
return r.json()
def refresh_token(self, client_id, client_secret, refresh_token):
"""
Refreshing a proviously acquired token
"""
payload = {'grant_type': 'refresh_token', 'client_id': client_id, 'client_secret': client_secret, 'refresh_token': refresh_token }
r = requests.post(self.url + '/oauth2/token', payload)
return r.json()
def transaction(self, id, access_token, merchant = True):
"""
Getting details about a transaction
"""
headers = {'Authorization': 'Bearer ' + access_token}
params = {}
if merchant:
params['expand[]'] = 'merchant'
r = requests.get(self.url + '/transactions/' + id, params=params, headers=headers)
return r.json()
def transactions(self, access_token, account_id, limit = 100, since = None, before = None):
"""
List transactions
"""
headers = {'Authorization': 'Bearer ' + access_token}
params = {'limit': limit, "account_id": account_id}
if since != None:
params['since'] = since
if before != None:
params['before'] = before
r = requests.get(self.url + '/transactions', params=params, headers=headers)
return r.json()
def authenticate(self, access_token, client_id, user_id):
"""
authenticate user
"""
headers = {'Authorization': 'Bearer ' + str(access_token)}
r = requests.get(self.url + '/ping/whoami', headers=headers)
return r.json()
def accounts(self, access_token):
"""
detailed information about customer's accounts
"""
headers = {'Authorization': 'Bearer ' + access_token}
r = requests.get(self.url + '/accounts', headers=headers)
return r.json()
def create_feed_item(self, access_token, account_id, title, image_url, background_color = '#FCF1EE', body_color = '#FCF1EE', title_color = '#333', body = ''):
"""
publish a new feed entry
"""
headers = {'Authorization': 'Bearer ' + access_token}
payload = {
"account_id": account_id,
"type": "basic",
"params[title]": title,
"params[image_url]": image_url,
"params[background_color]": background_color,
"params[body_color]": body_color,
"params[title_color]": title_color,
"params[body]": body
}
r = requests.post(self.url + '/feed', data=payload, headers=headers)
return r
def register_webhook(self, access_token, account_id, url):
"""
registering a webhook
"""
headers = {'Authorization': 'Bearer ' + access_token}
payload = {"account_id": account_id, "url": url}
r = requests.post(self.url + '/feed', data=payload, headers=headers)
return r
| import requests
class MondoClient():
url = 'https://production-api.gmon.io/'
def __init__(self, url = None):
if url != None:
self.url = url
def token(self, client_id, client_secret, username, password):
"""
Acquiring an access token
"""
payload = {'grant_type': 'password', 'client_id': client_id, 'client_secret': client_secret, 'username': username, 'password': password }
r = requests.post(self.url + '/oauth2/token', payload)
return r
def refresh_token(self, client_id, client_secret, refresh_token):
"""
Refreshing a proviously acquired token
"""
payload = {'grant_type': 'refresh_token', 'client_id': client_id, 'client_secret': client_secret, 'refresh_token': refresh_token }
r = requests.post(self.url + '/oauth2/token', payload)
return r
def transaction(self, id, access_token, merchant = True):
"""
Getting details about a transaction
"""
headers = {'Authorization': 'Bearer ' + access_token}
params = {}
if merchant:
params['expand[]'] = 'merchant'
r = requests.get(self.url + '/transactions/' + id, params=params, headers=headers)
return r
def transactions(self, access_token, account_id, limit = 100, since = None, before = None):
"""
List transactions
"""
headers = {'Authorization': 'Bearer ' + access_token}
params = {'limit': limit, "account_id": account_id}
if since != None:
params['since'] = since
if before != None:
params['before'] = before
r = requests.get(self.url + '/transactions', params=params, headers=headers)
return r
def authenticate(self, access_token, client_id, user_id):
"""
authenticate user
"""
headers = {'Authorization': 'Bearer ' + str(access_token)}
r = requests.get(self.url + '/ping/whoami', headers=headers)
return r
def accounts(self, access_token):
"""
detailed information about customer's accounts
"""
headers = {'Authorization': 'Bearer ' + access_token}
r = requests.get(self.url + '/accounts', headers=headers)
return r
def create_feed_item(self, access_token, account_id, title, image_url, background_color = '#FCF1EE', body_color = '#FCF1EE', title_color = '#333', body = ''):
"""
publish a new feed entry
"""
headers = {'Authorization': 'Bearer ' + access_token}
payload = {
"account_id": account_id,
"type": "basic",
"params[title]": title,
"params[image_url]": image_url,
"params[background_color]": background_color,
"params[body_color]": body_color,
"params[title_color]": title_color,
"params[body]": body
}
r = requests.post(self.url + '/feed', data=payload, headers=headers)
return r
def register_webhook(self, access_token, account_id, url):
"""
registering a webhook
"""
headers = {'Authorization': 'Bearer ' + access_token}
payload = {"account_id": account_id, "url": url}
r = requests.post(self.url + '/feed', data=payload, headers=headers)
return r
| Python | 0.000019 |
0fe56f804a7ed4958f33a534e30e3b6c79526ea6 | Update SimplyPremiumCom.py | module/plugins/accounts/SimplyPremiumCom.py | module/plugins/accounts/SimplyPremiumCom.py | # -*- coding: utf-8 -*-
from module.common.json_layer import json_loads
from module.plugins.Account import Account
class SimplyPremiumCom(Account):
__name__ = "SimplyPremiumCom"
__type__ = "account"
__version__ = "0.04"
__description__ = """Simply-Premium.com account plugin"""
__license__ = "GPLv3"
__authors__ = [("EvolutionClip", "evolutionclip@live.de")]
def loadAccountInfo(self, user, req):
validuntil = -1
trafficleft = None
json_data = req.load('http://www.simply-premium.com/api/user.php?format=json')
self.logDebug("JSON data: " + json_data)
json_data = json_loads(json_data)
if 'vip' in json_data['result'] and json_data['result']['vip'] == 0:
return {"premium": False}
if 'timeend' in json_data['result'] and json_data['result']['timeend']:
validuntil = float(json_data['result']['timeend'])
if 'traffic' in json_data['result'] and json_data['result']['remain_traffic']:
trafficleft = float(json_data['result']['remain_traffic']) / 1024 #@TODO: Remove `/ 1024` in 0.4.10
return {"premium": True, "validuntil": validuntil, "trafficleft": trafficleft}
def login(self, user, data, req):
req.cj.setCookie("simply-premium.com", "lang", "EN")
if data['password'] == '' or data['password'] == '0':
post_data = {"key": user}
else:
post_data = {"login_name": user, "login_pass": data['password']}
html = req.load("http://www.simply-premium.com/login.php", post=post_data, decode=True)
if 'logout' not in html:
self.wrongPassword()
| # -*- coding: utf-8 -*-
from module.common.json_layer import json_loads
from module.plugins.Account import Account
class SimplyPremiumCom(Account):
__name__ = "SimplyPremiumCom"
__type__ = "account"
__version__ = "0.03"
__description__ = """Simply-Premium.com account plugin"""
__license__ = "GPLv3"
__authors__ = [("EvolutionClip", "evolutionclip@live.de")]
def loadAccountInfo(self, user, req):
validuntil = -1
trafficleft = None
json_data = req.load('http://www.simply-premium.com/api/user.php?format=json')
self.logDebug("JSON data: " + json_data)
json_data = json_loads(json_data)
if 'vip' in json_data['result'] and json_data['result']['vip'] == 0:
return {"premium": False}
if 'timeend' in json_data['result'] and json_data['result']['timeend']:
validuntil = float(json_data['result']['timeend'])
if 'traffic' in json_data['result'] and json_data['result']['traffic']:
trafficleft = float(json_data['result']['traffic']) / 1024 #@TODO: Remove `/ 1024` in 0.4.10
return {"premium": True, "validuntil": validuntil, "trafficleft": trafficleft}
def login(self, user, data, req):
req.cj.setCookie("simply-premium.com", "lang", "EN")
if data['password'] == '' or data['password'] == '0':
post_data = {"key": user}
else:
post_data = {"login_name": user, "login_pass": data['password']}
html = req.load("http://www.simply-premium.com/login.php", post=post_data, decode=True)
if 'logout' not in html:
self.wrongPassword()
| Python | 0 |
a58de69cd0b93f1967a9b56812b1972a3ab9e5d1 | Update main.py | WikiQA_CNN+Feat/main.py | WikiQA_CNN+Feat/main.py | """
** deeplean-ai.com **
** dl-lab **
created by :: GauravBh1010tt
"""
import numpy as np
from dl_text import *
import model
import wiki_utils as wk
from dl_text.metrics import eval_metric
glove_fname = 'D:/workspace/Trec_QA-master/data/Glove/glove.6B.50d.txt'
################### DEFINING MODEL ###################
lrmodel = model.cnn
model_name = lrmodel.func_name
################### DEFINING HYPERPARAMETERS ###################
dimx = 60
dimy = 60
dimft = 44
batch_size = 50
vocab_size = 8000
embedding_dim = 50
nb_filter = 120,
filter_length = (50,4)
depth = 1
nb_epoch = 3
ques, ans, label_train, train_len, test_len, wordVec_model, res_fname, pred_fname, feat_train, feat_test = wk.load_wiki(model_name, glove_fname)
data_l , data_r, embedding_matrix = dl.process_data(ques, ans,
wordVec_model,dimx=dimx,
dimy=dimy,vocab_size=vocab_size,
embedding_dim=embedding_dim)
X_train_l,X_test_l,X_dev_l,X_train_r,X_test_r,X_dev_r = wk.prepare_train_test(data_l,data_r,
train_len,test_len)
if model_name == 'cnn_ft':
lrmodel = lrmodel(embedding_matrix, dimx=dimx, dimy=dimy, dimft=dimft, nb_filter = 120,
embedding_dim = 50, filter_length = (50,4), vocab_size = 8000, depth = 1)
print '\n',model_name,'model built \n'
lrmodel.fit([X_train_l, X_train_r,feat_train],label_train,batch_size=batch_size,nb_epoch=nb_epoch,verbose=2)
map_val, mrr_val = eval_metric(lrmodel, X_test_l, X_test_r, res_fname, pred_fname, feat_test=feat_test)
else:
lrmodel = lrmodel(embedding_matrix, dimx=dimx, dimy=dimy, nb_filter = 120,
embedding_dim = 50, filter_length = (50,4), vocab_size = 8000, depth = 1)
print '\n', model_name,'model built \n'
lrmodel.fit([X_train_l, X_train_r],label_train,batch_size=batch_size,nb_epoch=nb_epoch,verbose=2)
map_val, mrr_val = eval_metric(lrmodel, X_test_l, X_test_r, res_fname, pred_fname)
print 'MAP : ',map_val,' MRR : ',mrr_val
| import numpy as np
from dl_text import *
import model
import wiki_utils as wk
from dl_text.metrics import eval_metric
glove_fname = 'D:/workspace/Trec_QA-master/data/Glove/glove.6B.50d.txt'
################### DEFINING MODEL ###################
lrmodel = model.cnn
model_name = lrmodel.func_name
################### DEFINING HYPERPARAMETERS ###################
dimx = 60
dimy = 60
dimft = 44
batch_size = 50
vocab_size = 8000
embedding_dim = 50
nb_filter = 120,
filter_length = (50,4)
depth = 1
nb_epoch = 3
ques, ans, label_train, train_len, test_len, wordVec_model, res_fname, pred_fname, feat_train, feat_test = wk.load_wiki(model_name, glove_fname)
data_l , data_r, embedding_matrix = dl.process_data(ques, ans,
wordVec_model,dimx=dimx,
dimy=dimy,vocab_size=vocab_size,
embedding_dim=embedding_dim)
X_train_l,X_test_l,X_dev_l,X_train_r,X_test_r,X_dev_r = wk.prepare_train_test(data_l,data_r,
train_len,test_len)
if model_name == 'cnn_ft':
lrmodel = lrmodel(embedding_matrix, dimx=dimx, dimy=dimy, dimft=dimft, nb_filter = 120,
embedding_dim = 50, filter_length = (50,4), vocab_size = 8000, depth = 1)
print '\n',model_name,'model built \n'
lrmodel.fit([X_train_l, X_train_r,feat_train],label_train,batch_size=batch_size,nb_epoch=nb_epoch,verbose=2)
map_val, mrr_val = eval_metric(lrmodel, X_test_l, X_test_r, res_fname, pred_fname, feat_test=feat_test)
else:
lrmodel = lrmodel(embedding_matrix, dimx=dimx, dimy=dimy, nb_filter = 120,
embedding_dim = 50, filter_length = (50,4), vocab_size = 8000, depth = 1)
print '\n', model_name,'model built \n'
lrmodel.fit([X_train_l, X_train_r],label_train,batch_size=batch_size,nb_epoch=nb_epoch,verbose=2)
map_val, mrr_val = eval_metric(lrmodel, X_test_l, X_test_r, res_fname, pred_fname)
print 'MAP : ',map_val,' MRR : ',mrr_val
| Python | 0.000001 |
187e8237f9ba56dc517b2ad6e58be3e8031fa9df | Update __init__.py | examples/__init__.py | examples/__init__.py | # coding: utf-8
import sys
sys.path.append("./examples")
| # coding: utf-8
# In[1]:
import sys
sys.path.append("./examples")#add the examples as a module
| Python | 0.000072 |
9ee0ad7dfad15e3d933b4d1c3fab508d99480748 | Fix example. | examples/faithful.py | examples/faithful.py | import numpy as np
import matplotlib.pyplot as plt
from gmm.algorithm import GMM
# Read in dataset from file
with open('faithful.txt', 'rt') as f:
data = []
for row in f:
cols = row.strip('\r\n').split(' ')
data.append(np.fromiter(map(lambda x: float(x), cols), np.float))
data = np.array(data)
# Initialize GMM algorithm
means = np.array([np.array([4.0, 80], np.float), np.array([2.0, 55], np.float)])
covariances = np.array([np.identity(2), np.identity(2)])
mixing_probs = np.array([1/2, 1/2], np.float)
gmm_model = GMM(means, covariances, mixing_probs)
# Fit GMM to the data
gmm_model.fit(data)
# Cluster data
labelled = gmm_model.cluster(data)
# Plot clustered data with the location of Gaussian mixtures
plt.figure()
# Plot contours of Gaussian mixtures
for mean, cov in zip(gmm_model.means, gmm_model.covariances):
# Create grid
mean_x = mean[0]
std_x = np.sqrt(cov[0][0])
mean_y = mean[1]
std_y = np.sqrt(cov[1][1])
x = np.linspace(mean_x - 3*std_x, mean_x + 3*std_x, 100)
y = np.linspace(mean_y - 3*std_y, mean_y + 3*std_y, 100)
X, Y = np.meshgrid(x, y)
# Tabulate pdf values
Z = np.empty(X.shape, np.float)
for i in np.arange(X.shape[0]):
for j in np.arange(X.shape[1]):
v = np.array([X[i][j], Y[i][j]])
Z[i][j] = gmm_model.multivariate_normal_pdf(v, mean, cov)
# Plot contours
plt.contour(X, Y, Z)
# Plot features assigned to each Gaussian mixture
markers = ['o', '+']
colors = ['r', 'b']
for d, l in zip(data, labelled):
plt.scatter(d[0], d[1], color=colors[l], marker=markers[l])
plt.savefig('scatter_plot.pdf')
| import numpy as np
import matplotlib.pyplot as plt
from gmm.algorithm import GMM
# Read in dataset from file
with open('faithful.txt', 'rt') as f:
data = []
for row in f:
cols = row.strip('\r\n').split(' ')
data.append(np.fromiter(map(lambda x: float(x), cols), np.float))
data = np.array(data)
# Initialize GMM algorithm
means = np.array([np.array([4.0, 80], np.float), np.array([2.0, 55], np.float)])
covariances = np.array([np.identity(3), np.identity(2)])
mixing_probs = np.array([1/2, 1/2], np.float)
gmm_model = GMM(means, covariances, mixing_probs)
# Fit GMM to the data
gmm_model.fit(data)
# Cluster data
labelled = gmm_model.cluster(data)
# Plot clustered data with the location of Gaussian mixtures
plt.figure()
# Plot contours of Gaussian mixtures
for mean, cov in zip(gmm_model.means, gmm_model.covariances):
# Create grid
mean_x = mean[0]
std_x = np.sqrt(cov[0][0])
mean_y = mean[1]
std_y = np.sqrt(cov[1][1])
x = np.linspace(mean_x - 3*std_x, mean_x + 3*std_x, 100)
y = np.linspace(mean_y - 3*std_y, mean_y + 3*std_y, 100)
X, Y = np.meshgrid(x, y)
# Tabulate pdf values
Z = np.empty(X.shape, np.float)
for i in np.arange(X.shape[0]):
for j in np.arange(X.shape[1]):
v = np.array([X[i][j], Y[i][j]])
Z[i][j] = gmm_model.multivariate_normal_pdf(v, mean, cov)
# Plot contours
plt.contour(X, Y, Z)
# Plot features assigned to each Gaussian mixture
markers = ['o', '+']
colors = ['r', 'b']
for d, l in zip(data, labelled):
plt.scatter(d[0], d[1], color=colors[l], marker=markers[l])
plt.savefig('scatter_plot.pdf')
| Python | 0.000004 |
dc03a20265c2fc611c7b2027e76d01a495ef2e7e | fix typo | examples/ssd/eval.py | examples/ssd/eval.py | from __future__ import division
import argparse
import sys
import time
import chainer
from chainer import iterators
from chainercv.datasets import VOCDetectionDataset
from chainercv.datasets import voc_detection_label_names
from chainercv.evaluations import eval_detection_voc
from chainercv.links import SSD300
from chainercv.links import SSD512
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--model', choices=('ssd300', 'ssd512'), default='ssd300')
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--batchsize', type=int, default=32)
args = parser.parse_args()
if args.model == 'ssd300':
model = SSD300(pretrained_model='voc0712')
elif args.model == 'ssd512':
model = SSD512(pretrained_model='voc0712')
if args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use()
model.to_gpu()
model.use_preset('evaluate')
dataset = VOCDetectionDataset(
year='2007', split='test', use_difficult=True, return_difficult=True)
iterator = iterators.SerialIterator(
dataset, args.batchsize, repeat=False, shuffle=False)
start_time = time.time()
pred_bboxes = list()
pred_labels = list()
pred_scores = list()
gt_bboxes = list()
gt_labels = list()
gt_difficults = list()
while True:
try:
batch = next(iterator)
except StopIteration:
break
imgs, bboxes, labels, difficults = zip(*batch)
gt_bboxes.extend(bboxes)
gt_labels.extend(labels)
gt_difficults.extend(difficults)
bboxes, labels, scores = model.predict(imgs)
pred_bboxes.extend(bboxes)
pred_labels.extend(labels)
pred_scores.extend(scores)
fps = len(gt_bboxes) / (time.time() - start_time)
sys.stdout.write(
'\r{:d} of {:d} images, {:.2f} FPS'.format(
len(gt_bboxes), len(dataset), fps))
sys.stdout.flush()
eval_ = eval_detection_voc(
pred_bboxes, pred_labels, pred_scores,
gt_bboxes, gt_labels, gt_difficults,
use_07_metric=True)
print()
print('mAP: {:f}'.format(eval_['map']))
for l, name in enumerate(voc_detection_label_names):
if l in eval_:
print('{:s}: {:f}'.format(name, eval_[l]))
else:
print('{:s}: -'.format(name))
if __name__ == '__main__':
main()
| from __future__ import division
import argparse
import sys
import time
import chainer
from chainer import iterators
from chainercv.datasets import VOCDetectionDataset
from chainercv.datasets import voc_detection_label_names
from chainercv.evaluations import eval_detection_voc
from chainercv.links import SSD300
from chainercv.links import SSD512
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
'--model', choices=('ssd300', 'ssd512'), default='ssd300')
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--batchsize', type=int, default=32)
args = parser.parse_args()
if args.model == 'ssd300':
model = SSD300(pretrained_model='voc0712')
elif args.model == 'ssd512':
model = SSD512(pretrained_model='voc0712')
if args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use()
model.to_gpu()
model.use_preset('evaluate')
dataset = VOCDetectionDataset(
year='2007', split='test', use_difficult=True, return_difficult=True)
iterator = iterators.SerialIterator(
dataset, args.batchsize, repeat=False, shuffle=False)
start_time = time.time()
pred_bboxes = list()
pred_labels = list()
pred_scores = list()
gt_bboxes = list()
gt_labels = list()
gt_difficults = list()
while True:
try:
batch = next(iterator)
except StopIteration:
break
imgs, bboxes, labels, difficults = zip(*batch)
gt_bboxes.extend(bboxes)
gt_labels.extend(labels)
gt_difficults.extend(difficults)
bboxes, labels, scores = model.predict(imgs)
pred_bboxes.extend(bboxes)
pred_labels.extend(labels)
pred_scores.extend(scores)
fps = len(gt_bboxes) / (time.time() - start_time)
sys.stdout.write(
'\r{:d} of {:d} images, {:.2f} FPS'.format(
len(gt_bboxes), len(dataset), fps))
sys.stdout.flush()
eval_ = eval_detection_voc(
pred_bboxes, pred_labels, pred_scores,
gt_bboxes, gt_labels, gt_difficults,
use_07_metric=True)
print()
print('mAP: {:f}'.format(eval_['map']))
for l, name in enumerate(voc_detection_label_names):
if l in eval_:
print('{:s}: {;f}'.format(name, eval_[l]))
else:
print('{:s}: -'.format(name))
if __name__ == '__main__':
main()
| Python | 0.999991 |
a27f561ff24b41f215bdb3e33cdcdfcc4c43bf93 | fix imports, crashes with TypeError, now | examples/testknn2.py | examples/testknn2.py | # -*- coding: utf-8 -*-
"""
SVM for Wind Power Prediction (SAES, global timeout)
================================
"""
# Future
from __future__ import absolute_import, division, print_function, \
unicode_literals, with_statement
# Third Party
#from sklearn.neighbors import KNeighborsRegressor
from windml.datasets.nrel import NREL
from windml.mapping.power_mapping import PowerMapping
import math
from wdknn import KNN
from metaopt.core.returnspec.util.decorator import minimize
from metaopt.core.paramspec.util import param
from metaopt.core.optimize.optimize import optimize
# First Party
feature_window, horizon = 1, 3
train_step, test_step = 50,50 #only every n data points
park_id=NREL.park_id['lancaster']
windpark = NREL().get_windpark_nearest(park_id, 3, 2004, 2005)
target = windpark.get_target()
mapping = PowerMapping()
X = mapping.get_features_park(windpark, feature_window, horizon)
y = mapping.get_labels_turbine(target, feature_window, horizon)
train_to, test_to = int(math.floor(len(X) * 0.5)), len(X)
X_train=X[:train_to:train_step]
y_train=y[:train_to:train_step]
X_test=X[train_to:test_to:test_step]
y_test=y[train_to:test_to:test_step]
@minimize("Score")
#@param.float("C", interval=[1, 1000], step=1.0)
#@param.float("C_exp", interval=[0, 5], step=1)
#@param.float("gamma", interval=[0.0001, 1.0], step=0.00001)
#@param.float("gamma_exp", interval=[-5, 0], step=1)
@param.float("a",interval=[5,100.0],step=50)
@param.float("b",interval=[5,100.0],step=50)
@param.float("c",interval=[5,100.0],step=50)
@param.float("d",interval=[5,100.0],step=50)
def f(a,b,c,d):
clf = KNN(n_neighbors=5, weights=[a,b,c,d])
clf.fit(X_train, y_train)
return clf.score(X_test, y_test)
def main():
from metaopt.optimizer.saes import SAESOptimizer
from metaopt.concurrent.invoker.dualthread import DualThreadInvoker
from metaopt.concurrent.invoker.pluggable import PluggableInvoker
from metaopt.plugin.print.status import StatusPrintPlugin
from metaopt.plugin.visualization.landscape import VisualizeLandscapePlugin
from metaopt.plugin.visualization.best_fitness import VisualizeBestFitnessPlugin
timeout = 20
optimizer = SAESOptimizer(mu=5, lamb=5)
#optimizer = GridSearchOptimizer()
visualize_landscape_plugin = VisualizeLandscapePlugin()
visualize_best_fitness_plugin = VisualizeBestFitnessPlugin()
plugins = [
StatusPrintPlugin(),
visualize_landscape_plugin,
visualize_best_fitness_plugin
]
#invoker = PluggableInvoker(invoker=DualThreadInvoker(),plugins=plugins)
#optimum = custom_optimize(invoker=invoker,f=f, timeout=timeout, optimizer=optimizer)
optimum = optimize(f=f, timeout=timeout, optimizer=optimizer,
plugins=plugins)
print("The optimal parameters are %s." % str(optimum))
# visualize_landscape_plugin.show_surface_plot()
# visualize_landscape_plugin.show_image_plot()
#
visualize_best_fitness_plugin.show_fitness_invocations_plot()
# visualize_best_fitness_plugin.show_fitness_time_plot()
if __name__ == '__main__':
main()
| # -*- coding: utf-8 -*-
"""
SVM for Wind Power Prediction (SAES, global timeout)
================================
"""
# Future
from __future__ import absolute_import, division, print_function, \
unicode_literals, with_statement
# Third Party
#from sklearn.neighbors import KNeighborsRegressor
from windml.datasets.nrel import NREL
from windml.mapping.power_mapping import PowerMapping
import math
from wdknn import KNN
# First Party
from metaopt.core.param.util import param
from metaopt.core.returns.util.decorator import maximize
from metaopt.core.returns.util.decorator import minimize
feature_window, horizon = 1, 3
train_step, test_step = 50,50 #only every n data points
park_id=NREL.park_id['lancaster']
windpark = NREL().get_windpark_nearest(park_id, 3, 2004, 2005)
target = windpark.get_target()
mapping = PowerMapping()
X = mapping.get_features_park(windpark, feature_window, horizon)
y = mapping.get_labels_turbine(target, feature_window, horizon)
train_to, test_to = int(math.floor(len(X) * 0.5)), len(X)
X_train=X[:train_to:train_step]
y_train=y[:train_to:train_step]
X_test=X[train_to:test_to:test_step]
y_test=y[train_to:test_to:test_step]
@minimize("Score")
#@param.float("C", interval=[1, 1000], step=1.0)
#@param.float("C_exp", interval=[0, 5], step=1)
#@param.float("gamma", interval=[0.0001, 1.0], step=0.00001)
#@param.float("gamma_exp", interval=[-5, 0], step=1)
@param.float("a",interval=[5,100.0],step=50)
@param.float("b",interval=[5,100.0],step=50)
@param.float("c",interval=[5,100.0],step=50)
@param.float("d",interval=[5,100.0],step=50)
def f(a,b,c,d):
clf = KNN(n_neighbors=5, weights=[a,b,c,d])
clf.fit(X_train, y_train)
return clf.score(X_test, y_test)
def main():
from metaopt.core.main import optimize
from metaopt.core.main import custom_optimize
from metaopt.optimizer.saes import SAESOptimizer
from metaopt.optimizer.gridsearch import GridSearchOptimizer
from metaopt.invoker.dualthread import DualThreadInvoker
from metaopt.invoker.pluggable import PluggableInvoker
from metaopt.plugins.print import PrintPlugin
from metaopt.plugins.visualize import VisualizeLandscapePlugin
from metaopt.plugins.visualize import VisualizeBestFitnessPlugin
timeout = 20
optimizer = SAESOptimizer(mu=5, lamb=5)
#optimizer = GridSearchOptimizer()
visualize_landscape_plugin = VisualizeLandscapePlugin()
visualize_best_fitness_plugin = VisualizeBestFitnessPlugin()
plugins = [
PrintPlugin(),
visualize_landscape_plugin,
visualize_best_fitness_plugin
]
#invoker = PluggableInvoker(invoker=DualThreadInvoker(),plugins=plugins)
#optimum = custom_optimize(invoker=invoker,f=f, timeout=timeout, optimizer=optimizer)
optimum = optimize(f=f, timeout=timeout, optimizer=optimizer,
plugins=plugins)
print("The optimal parameters are %s." % str(optimum))
# visualize_landscape_plugin.show_surface_plot()
# visualize_landscape_plugin.show_image_plot()
#
visualize_best_fitness_plugin.show_fitness_invocations_plot()
# visualize_best_fitness_plugin.show_fitness_time_plot()
if __name__ == '__main__':
main()
| Python | 0 |
528b10713d98e2603fad62c3fb252464c08896f0 | make '/mc_trks' absolute path to avoid confusion | examples/tonphdf5.py | examples/tonphdf5.py | #!/usr/bin/env python
"""
Converts hits in a Jpp-ROOT file to HDF5.
"""
from km3pipe.pumps.aanet import AanetPump
from km3pipe import Pipeline, Module
import sys
import pandas as pd
import h5py
if len(sys.argv) < 3:
sys.exit('Usage: {0} FILENAME.root OUTPUTFILENAME.h5'.format(sys.argv[0]))
FILEPATH = sys.argv[1]
OUTPUTFILEPATH = sys.argv[2]
class HDF5Sink(Module):
def __init__(self, **context):
super(self.__class__, self).__init__(**context)
self.filename = self.get('filename') or 'dump.h5'
self.hits = {}
self.mc_tracks = {}
self.index = 0
print("Processing {0}...".format(self.filename))
def process(self, blob):
try:
self._add_hits(blob['Hits'])
except KeyError:
print("No hits found. Skipping...")
try:
self._add_mc_tracks(blob['MCTracks'])
except KeyError:
print("No MC tracks found. Skipping...")
self.index += 1
return blob
def _add_hits(self, hits):
for hit in hits:
self.hits.setdefault('event_id', []).append(self.index)
self.hits.setdefault('id', []).append(hit.id)
self.hits.setdefault('pmt_id', []).append(hit.pmt_id)
self.hits.setdefault('time', []).append(hit.t)
self.hits.setdefault('tot', []).append(ord(hit.tot))
self.hits.setdefault('triggered', []).append(bool(hit.trig))
self.hits.setdefault('dom_id', []).append(hit.dom_id)
self.hits.setdefault('channel_id', []).append(ord(hit.channel_id))
def _add_mc_tracks(self, mc_tracks):
for mc_track in mc_tracks:
self.mc_tracks.setdefault('event_id', []).append(self.index)
self.mc_tracks.setdefault('id', []).append(mc_track.id)
self.mc_tracks.setdefault('x', []).append(mc_track.pos.x)
self.mc_tracks.setdefault('y', []).append(mc_track.pos.y)
self.mc_tracks.setdefault('z', []).append(mc_track.pos.z)
self.mc_tracks.setdefault('dx', []).append(mc_track.dir.x)
self.mc_tracks.setdefault('dy', []).append(mc_track.dir.y)
self.mc_tracks.setdefault('dz', []).append(mc_track.dir.z)
self.mc_tracks.setdefault('time', []).append(mc_track.t)
self.mc_tracks.setdefault('energy', []).append(mc_track.E)
self.mc_tracks.setdefault('type', []).append(mc_track.type)
def finish(self):
h5 = h5py.File(self.filename, 'w')
if self.hits:
df = pd.DataFrame(self.hits)
rec = df.to_records(index=False)
h5.create_dataset('/hits', data=rec)
print("Finished writing hits in {0}".format(self.filename))
if self.mc_tracks:
df = pd.DataFrame(self.mc_tracks)
rec = df.to_records(index=False)
h5.create_dataset('/mc_tracks', data=rec)
print("Finished writing MC tracks in {0}".format(self.filename))
h5.close()
pipe = Pipeline()
pipe.attach(AanetPump, filename=FILEPATH)
pipe.attach(HDF5Sink, filename=OUTPUTFILEPATH)
pipe.drain()
| #!/usr/bin/env python
"""
Converts hits in a Jpp-ROOT file to HDF5.
"""
from km3pipe.pumps.aanet import AanetPump
from km3pipe import Pipeline, Module
import sys
import pandas as pd
import h5py
if len(sys.argv) < 3:
sys.exit('Usage: {0} FILENAME.root OUTPUTFILENAME.h5'.format(sys.argv[0]))
FILEPATH = sys.argv[1]
OUTPUTFILEPATH = sys.argv[2]
class HDF5Sink(Module):
def __init__(self, **context):
super(self.__class__, self).__init__(**context)
self.filename = self.get('filename') or 'dump.h5'
self.hits = {}
self.mc_tracks = {}
self.index = 0
print("Processing {0}...".format(self.filename))
def process(self, blob):
try:
self._add_hits(blob['Hits'])
except KeyError:
print("No hits found. Skipping...")
try:
self._add_mc_tracks(blob['MCTracks'])
except KeyError:
print("No MC tracks found. Skipping...")
self.index += 1
return blob
def _add_hits(self, hits):
for hit in hits:
self.hits.setdefault('event_id', []).append(self.index)
self.hits.setdefault('id', []).append(hit.id)
self.hits.setdefault('pmt_id', []).append(hit.pmt_id)
self.hits.setdefault('time', []).append(hit.t)
self.hits.setdefault('tot', []).append(ord(hit.tot))
self.hits.setdefault('triggered', []).append(bool(hit.trig))
self.hits.setdefault('dom_id', []).append(hit.dom_id)
self.hits.setdefault('channel_id', []).append(ord(hit.channel_id))
def _add_mc_tracks(self, mc_tracks):
for mc_track in mc_tracks:
self.mc_tracks.setdefault('event_id', []).append(self.index)
self.mc_tracks.setdefault('id', []).append(mc_track.id)
self.mc_tracks.setdefault('x', []).append(mc_track.pos.x)
self.mc_tracks.setdefault('y', []).append(mc_track.pos.y)
self.mc_tracks.setdefault('z', []).append(mc_track.pos.z)
self.mc_tracks.setdefault('dx', []).append(mc_track.dir.x)
self.mc_tracks.setdefault('dy', []).append(mc_track.dir.y)
self.mc_tracks.setdefault('dz', []).append(mc_track.dir.z)
self.mc_tracks.setdefault('time', []).append(mc_track.t)
self.mc_tracks.setdefault('energy', []).append(mc_track.E)
self.mc_tracks.setdefault('type', []).append(mc_track.type)
def finish(self):
h5 = h5py.File(self.filename, 'w')
if self.hits:
df = pd.DataFrame(self.hits)
rec = df.to_records(index=False)
h5.create_dataset('/hits', data=rec)
print("Finished writing hits in {0}".format(self.filename))
if self.mc_tracks:
df = pd.DataFrame(self.mc_tracks)
rec = df.to_records(index=False)
h5.create_dataset('mc_tracks', data=rec)
print("Finished writing MC tracks in {0}".format(self.filename))
h5.close()
pipe = Pipeline()
pipe.attach(AanetPump, filename=FILEPATH)
pipe.attach(HDF5Sink, filename=OUTPUTFILEPATH)
pipe.drain()
| Python | 0.000004 |
2c09c700b524a7272436feff19c4128ca3211725 | Update word2vec.py | examples/word2vec.py | examples/word2vec.py | import copy
import gensim
import logging
import pyndri
import sys
logging.basicConfig(level=logging.INFO)
if len(sys.argv) <= 1:
logging.error('Usage: python %s <path-to-indri-index>'.format(sys.argv[0]))
sys.exit(0)
logging.info('Initializing word2vec.')
word2vec_init = gensim.models.Word2Vec(
size=300, # Embedding size
window=5, # One-sided window size
sg=True, # Skip-gram.
min_count=5, # Minimum word frequency.
sample=1e-3, # Sub-sample threshold.
hs=False, # Hierarchical softmax.
negative=10, # Number of negative examples.
iter=1, # Number of iterations.
workers=8, # Number of workers.
)
with pyndri.open(sys.argv[1]) as index:
logging.info('Loading vocabulary.')
dictionary = pyndri.extract_dictionary(index)
sentences = pyndri.compat.IndriSentences(index, dictionary)
logging.info('Constructing word2vec vocabulary.')
# Build vocab.
word2vec_init.build_vocab(sentences, trim_rule=None)
models = [word2vec_init]
for epoch in range(1, 5 + 1):
logging.info('Epoch %d', epoch)
model = copy.deepcopy(models[-1])
model.train(sentences)
models.append(model)
logging.info('Trained models: %s', models)
| import copy
import gensim
import logging
import pyndri
import sys
logging.basicConfig(level=logging.INFO)
if len(sys.argv) <= 1:
logging.error('Usage: python %s <path-to-indri-index>'.format(sys.argv[0]))
sys.exit(0)
logging.info('Initializing word2vec.')
word2vec_init = gensim.models.Word2Vec(
size=300, # Embedding size
window=5, # One-sided window size
sg=True, # Skip-gram.
min_count=5, # Minimum word frequency.
sample=1e-3, # Sub-sample treshold.
hs=False, # Hierarchical softmax.
negative=10, # Number of negative examples.
iter=1, # Number of iterations.
workers=8, # Number of workers.
)
with pyndri.open(sys.argv[1]) as index:
logging.info('Loading vocabulary.')
dictionary = pyndri.extract_dictionary(index)
sentences = pyndri.compat.IndriSentences(index, dictionary)
logging.info('Constructing word2vec vocabulary.')
# Build vocab.
word2vec_init.build_vocab(sentences, trim_rule=None)
models = [word2vec_init]
for epoch in range(1, 5 + 1):
logging.info('Epoch %d', epoch)
model = copy.deepcopy(models[-1])
model.train(sentences)
models.append(model)
logging.info('Trained models: %s', models)
| Python | 0.000014 |
2ece6032b2344e3cf6304a757714d0ecf5015324 | split drive controller to allow other pwm interface eg raspy juice | fishpi/vehicle/test_drive.py | fishpi/vehicle/test_drive.py | #!/usr/bin/python
#
# FishPi - An autonomous drop in the ocean
#
# Simple test of PWM motor and servo drive
#
import raspberrypi
from time import sleep
from drive_controller import AdafruitDriveController
if __name__ == "__main__":
print "testing drive controller..."
drive = AdafruitDriveController(debug=True, i2c_bus=raspberrypi.i2c_bus())
print "run full ahead for 5 sec..."
drive.set_throttle(1.0)
sleep(5)
print "run 50% ahead for 5 sec..."
drive.set_throttle(0.5)
sleep(5)
print "run 0% for 5 sec..."
drive.set_throttle(0.0)
sleep(5)
print "run 50% reverse for 5 sec"
drive.set_throttle(-0.5)
sleep(5)
print "run full reverse for 5 sec"
drive.set_throttle(-1.0)
sleep(5)
print "and back to neutral..."
drive.set_throttle(0.0)
sleep(5)
print "check out of bounds errors"
try:
drive.set_throttle(15.0)
except ValueError:
print "caught 15"
try:
drive.set_throttle(-10.0)
except ValueError:
print "caught -10"
# test steering
print "steer hard to port for 5 sec"
drive.set_heading(-0.785398)
sleep(5)
print "steer to port for 5 sec"
drive.set_heading(-0.3927)
sleep(5)
print "and back to neutral..."
drive.set_heading(0.0)
sleep(5)
print "steer to starboard for 5 sec"
drive.set_heading(0.3927)
sleep(5)
print "steer hard to starboard for 5 sec"
drive.set_heading(0.785398)
sleep(5)
print "and back to neutral..."
drive.set_heading(0.0)
sleep(5)
| #!/usr/bin/python
#
# FishPi - An autonomous drop in the ocean
#
# Simple test of PWM motor and servo drive
#
import raspberrypi
from time import sleep
from drive_controller import DriveController
if __name__ == "__main__":
print "testing drive controller..."
drive = DriveController(debug=True, i2c_bus=raspberrypi.i2c_bus())
print "run full ahead for 5 sec..."
drive.set_throttle(1.0)
sleep(5)
print "run 50% ahead for 5 sec..."
drive.set_throttle(0.5)
sleep(5)
print "run 0% for 5 sec..."
drive.set_throttle(0.0)
sleep(5)
print "run 50% reverse for 5 sec"
drive.set_throttle(-0.5)
sleep(5)
print "run full reverse for 5 sec"
drive.set_throttle(-1.0)
sleep(5)
print "and back to neutral..."
drive.set_throttle(0.0)
sleep(5)
print "check out of bounds errors"
try:
drive.set_throttle(15.0)
except ValueError:
print "caught 15"
try:
drive.set_throttle(-10.0)
except ValueError:
print "caught -10"
# test steering
print "steer hard to port for 5 sec"
drive.set_heading(-0.785398)
sleep(5)
print "steer to port for 5 sec"
drive.set_heading(-0.3927)
sleep(5)
print "and back to neutral..."
drive.set_heading(0.0)
sleep(5)
print "steer to starboard for 5 sec"
drive.set_heading(0.3927)
sleep(5)
print "steer hard to starboard for 5 sec"
drive.set_heading(0.785398)
sleep(5)
print "and back to neutral..."
drive.set_heading(0.0)
sleep(5)
| Python | 0 |
3566c7689f59715f2d58886f52d3dd0c00a0ce4e | change manage.py | maili-develop/manage.py | maili-develop/manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
if 'test' in sys.argv:
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "test.settings")
else:
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "maili.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "maili.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| Python | 0.000002 |
cf79339061669bace0c97ca3e3452b27b77ad8da | Fix region not being passed to JAAS | conjureup/controllers/bootstrap/common.py | conjureup/controllers/bootstrap/common.py | from pathlib import Path
from conjureup import events, juju
from conjureup.app_config import app
from conjureup.models.step import StepModel
from conjureup.telemetry import track_event
class BaseBootstrapController:
msg_cb = NotImplementedError()
def is_existing_controller(self):
controllers = juju.get_controllers()['controllers']
return app.provider.controller in controllers
async def run(self):
await app.provider.configure_tools()
if app.is_jaas or self.is_existing_controller():
await self.do_add_model()
else:
await self.do_bootstrap()
async def do_add_model(self):
self.emit('Creating Juju model.')
cloud_with_region = app.provider.cloud
if app.provider.region:
cloud_with_region = '/'.join([app.provider.cloud,
app.provider.region])
await juju.add_model(app.provider.model,
app.provider.controller,
cloud_with_region,
app.provider.credential)
self.emit('Juju model created.')
events.Bootstrapped.set()
async def do_bootstrap(self):
await self.pre_bootstrap()
self.emit('Bootstrapping Juju controller.')
track_event("Juju Bootstrap", "Started", "")
cloud_with_region = app.provider.cloud
if app.provider.region:
cloud_with_region = '/'.join([app.provider.cloud,
app.provider.region])
success = await juju.bootstrap(app.provider.controller,
cloud_with_region,
app.provider.model,
credential=app.provider.credential)
if not success:
log_file = '{}-bootstrap.err'.format(app.provider.controller)
log_file = Path(app.config['spell-dir']) / log_file
err_log = log_file.read_text('utf8').splitlines()
app.log.error("Error bootstrapping controller: "
"{}".format(err_log))
app.sentry.context.merge({'extra': {'err_log': err_log[-400:]}})
raise Exception('Unable to bootstrap (cloud type: {})'.format(
app.provider.cloud_type))
self.emit('Bootstrap complete.')
track_event("Juju Bootstrap", "Done", "")
await juju.login() # login to the newly created (default) model
step = StepModel({},
filename='00_post-bootstrap',
name='post-bootstrap')
await step.run(self.msg_cb, 'Juju Post-Bootstrap')
events.Bootstrapped.set()
async def pre_bootstrap(self):
""" runs pre bootstrap script if exists
"""
step = StepModel({},
filename='00_pre-bootstrap',
name='pre-bootstrap')
await step.run(self.msg_cb)
def emit(self, msg):
app.log.info(msg)
self.msg_cb(msg)
| from pathlib import Path
from conjureup import events, juju
from conjureup.app_config import app
from conjureup.models.step import StepModel
from conjureup.telemetry import track_event
class BaseBootstrapController:
msg_cb = NotImplementedError()
def is_existing_controller(self):
controllers = juju.get_controllers()['controllers']
return app.provider.controller in controllers
async def run(self):
await app.provider.configure_tools()
if app.is_jaas or self.is_existing_controller():
await self.do_add_model()
else:
await self.do_bootstrap()
async def do_add_model(self):
self.emit('Creating Juju model.')
await juju.add_model(app.provider.model,
app.provider.controller,
app.provider.cloud,
app.provider.credential)
self.emit('Juju model created.')
events.Bootstrapped.set()
async def do_bootstrap(self):
await self.pre_bootstrap()
self.emit('Bootstrapping Juju controller.')
track_event("Juju Bootstrap", "Started", "")
cloud_with_region = app.provider.cloud
if app.provider.region:
cloud_with_region = '/'.join([app.provider.cloud,
app.provider.region])
success = await juju.bootstrap(app.provider.controller,
cloud_with_region,
app.provider.model,
credential=app.provider.credential)
if not success:
log_file = '{}-bootstrap.err'.format(app.provider.controller)
log_file = Path(app.config['spell-dir']) / log_file
err_log = log_file.read_text('utf8').splitlines()
app.log.error("Error bootstrapping controller: "
"{}".format(err_log))
app.sentry.context.merge({'extra': {'err_log': err_log[-400:]}})
raise Exception('Unable to bootstrap (cloud type: {})'.format(
app.provider.cloud_type))
self.emit('Bootstrap complete.')
track_event("Juju Bootstrap", "Done", "")
await juju.login() # login to the newly created (default) model
step = StepModel({},
filename='00_post-bootstrap',
name='post-bootstrap')
await step.run(self.msg_cb, 'Juju Post-Bootstrap')
events.Bootstrapped.set()
async def pre_bootstrap(self):
""" runs pre bootstrap script if exists
"""
step = StepModel({},
filename='00_pre-bootstrap',
name='pre-bootstrap')
await step.run(self.msg_cb)
def emit(self, msg):
app.log.info(msg)
self.msg_cb(msg)
| Python | 0 |
ed267933edf8b6e2e2b63d11ece7457943fe9646 | Add documentation for powerline.lib.shell.run_cmd | powerline/lib/shell.py | powerline/lib/shell.py | # vim:fileencoding=utf-8:noet
from __future__ import absolute_import, unicode_literals, division, print_function
from subprocess import Popen, PIPE
from locale import getlocale, getdefaultlocale, LC_MESSAGES
def _get_shell_encoding():
return getlocale(LC_MESSAGES)[1] or getdefaultlocale()[1] or 'utf-8'
def run_cmd(pl, cmd, stdin=None):
'''Run command and return its stdout, stripped
If running command fails returns None and logs failure to ``pl`` argument.
:param PowerlineLogger pl:
Logger used to log failures.
:param list cmd:
Command which will be run.
:param str stdin:
String passed to command. May be None.
'''
try:
p = Popen(cmd, stdout=PIPE, stdin=PIPE)
except OSError as e:
pl.exception('Could not execute command ({0}): {1}', e, cmd)
return None
else:
stdout, err = p.communicate(stdin)
stdout = stdout.decode(_get_shell_encoding())
return stdout.strip()
def asrun(pl, ascript):
'''Run the given AppleScript and return the standard output and error.'''
return run_cmd(pl, ['osascript', '-'], ascript)
def readlines(cmd, cwd):
'''Run command and read its output, line by line
:param list cmd:
Command which will be run.
:param str cwd:
Working directory of the command which will be run.
'''
p = Popen(cmd, shell=False, stdout=PIPE, stderr=PIPE, cwd=cwd)
encoding = _get_shell_encoding()
p.stderr.close()
with p.stdout:
for line in p.stdout:
yield line[:-1].decode(encoding)
| # vim:fileencoding=utf-8:noet
from __future__ import absolute_import, unicode_literals, division, print_function
from subprocess import Popen, PIPE
from locale import getlocale, getdefaultlocale, LC_MESSAGES
def _get_shell_encoding():
return getlocale(LC_MESSAGES)[1] or getdefaultlocale()[1] or 'utf-8'
def run_cmd(pl, cmd, stdin=None):
try:
p = Popen(cmd, stdout=PIPE, stdin=PIPE)
except OSError as e:
pl.exception('Could not execute command ({0}): {1}', e, cmd)
return None
else:
stdout, err = p.communicate(stdin)
stdout = stdout.decode(_get_shell_encoding())
return stdout.strip()
def asrun(pl, ascript):
'''Run the given AppleScript and return the standard output and error.'''
return run_cmd(pl, ['osascript', '-'], ascript)
def readlines(cmd, cwd):
'''Run command and read its output, line by line
:param list cmd:
Command which will be run.
:param str cwd:
Working directory of the command which will be run.
'''
p = Popen(cmd, shell=False, stdout=PIPE, stderr=PIPE, cwd=cwd)
encoding = _get_shell_encoding()
p.stderr.close()
with p.stdout:
for line in p.stdout:
yield line[:-1].decode(encoding)
| Python | 0.000001 |
be8fd3f10dbfd8e2099a046340a8e51758e60bd5 | Add signout view so we can signout (by entering url manually) | makerbase/views/auth.py | makerbase/views/auth.py | import json
from urllib import urlencode
from urlparse import parse_qs, urlsplit, urlunsplit
from flask import redirect, request, url_for
from flaskext.login import LoginManager, login_user, logout_user
import requests
from makerbase import app
from makerbase.models import User
login_manager = LoginManager()
login_manager.setup_app(app, add_context_processor=True)
login_manager.user_loader(User.get)
@app.route('/signin/github')
def signin_github():
urlparts = urlsplit(request.base_url)
params = {
'client_id': app.config['GITHUB_CLIENT_ID'],
'redirect_url': urlunsplit((urlparts.scheme, urlparts.netloc, url_for('complete_github'), None, None)),
'scope': '',
}
redirect_url = 'https://github.com/login/oauth/authorize?%s' % urlencode(params)
return redirect(redirect_url)
@app.route('/signout')
def signout():
logout_user()
return redirect(url_for('home'))
@app.route('/complete/github')
def complete_github():
try:
code = request.args.get('code')
except KeyError:
raise # TODO
params = {
'client_id': app.config['GITHUB_CLIENT_ID'],
'client_secret': app.config['GITHUB_SECRET'],
'code': code,
}
token_resp = requests.post('https://github.com/login/oauth/access_token', data=params)
token_params = parse_qs(token_resp.content)
access_token = token_params['access_token']
user_resp = requests.get('https://api.github.com/user', data={'access_token': access_token})
github_user = json.loads(user_resp.content)
userid = u"github:%s" % github_user['login']
user = User.get(userid)
if user is None:
user = User(userid)
user.name = github_user['name']
user.avatar_url = github_user['avatar_url']
user.profile_url = github_user['html_url']
user.save()
login_user(user)
return redirect(url_for('home'))
| import json
from urllib import urlencode
from urlparse import parse_qs, urlsplit, urlunsplit
from flask import redirect, request, url_for
from flaskext.login import LoginManager, login_user
import requests
from makerbase import app
from makerbase.models import User
login_manager = LoginManager()
login_manager.setup_app(app, add_context_processor=True)
login_manager.user_loader(User.get)
@app.route('/signin/github')
def signin_github():
urlparts = urlsplit(request.base_url)
params = {
'client_id': app.config['GITHUB_CLIENT_ID'],
'redirect_url': urlunsplit((urlparts.scheme, urlparts.netloc, url_for('complete_github'), None, None)),
'scope': '',
}
redirect_url = 'https://github.com/login/oauth/authorize?%s' % urlencode(params)
return redirect(redirect_url)
@app.route('/complete/github')
def complete_github():
try:
code = request.args.get('code')
except KeyError:
raise # TODO
params = {
'client_id': app.config['GITHUB_CLIENT_ID'],
'client_secret': app.config['GITHUB_SECRET'],
'code': code,
}
token_resp = requests.post('https://github.com/login/oauth/access_token', data=params)
token_params = parse_qs(token_resp.content)
access_token = token_params['access_token']
user_resp = requests.get('https://api.github.com/user', data={'access_token': access_token})
github_user = json.loads(user_resp.content)
userid = u"github:%s" % github_user['login']
user = User.get(userid)
if user is None:
user = User(userid)
user.name = github_user['name']
user.avatar_url = github_user['avatar_url']
user.profile_url = github_user['html_url']
user.save()
login_user(user)
return redirect(url_for('home'))
| Python | 0 |
df27f7ad62eebf29b42bfa9b2bce7d73739c4a8e | Fix minor template modernization bug | enhydris/conf/settings.py | enhydris/conf/settings.py | # Enhydris settings for {{ project_name }} project.
#
# Generated by 'enhydris-admin newinstance' using Enhydris {{ enhydris_version }}
# and Django {{ django_version }}.
#
# For more information on this file, see
# http://enhydris.readthedocs.org/en/{{ enhydris_docs_version }}/general/install.html#settings-reference
from enhydris.settings.base import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# SECURITY_WARNING: Keep the secret key used in production secret!
SECRET_KEY = '{{ secret_key }}'
# List the domains through which this instance will be accessible. Enhydris
# will refuse to serve other domains.
ALLOWED_HOSTS = ['example.com']
# Administrators who will be notified whenever attention is needed.
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
# Whenever the system emails users, this is the address from which the emails
# will appear to have been sent.
DEFAULT_FROM_EMAIL = 'noreply@example.com'
# Whenever the system emails administrators, this is the address from which the
# emails will appear to have been sent.
SERVER_EMAIL = DEFAULT_FROM_EMAIL
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'enhydris_db',
'USER': 'enhydris_user',
'PASSWORD': 'topsecret',
'HOST': 'localhost',
'PORT': '5432',
}
}
# Default system timezone, such as 'Europe/Athens'.
TIME_ZONE = 'UTC'
# You should probably leave this as it is. See the installation instructions if
# in doubt.
SITE_ID = 1
# Where static files will be stored. By "static" files we mean css, javascript,
# and images of the skin. These files will be copied there at installation
# time when executing "python manage.py collectstatic".
STATIC_ROOT = '/var/cache/enhydris/static/'
# Where media files will be stored. "media" files are static files uploaded
# by users, e.g. images and videos of stations. The web server must be
# configured to map the URL /enhydris-media/ to that directory. See the
# installation instructions for more information.
MEDIA_ROOT = '/var/lib/enhydris/media/'
# The web server must be configured to map the URLs below to the directories
# specified by STATIC_ROOT and MEDIA_ROOT above. Enhydris will use the settings
# below to create appropriate links.
STATIC_URL = '/enhydris-static/'
MEDIA_URL = '/enhydris-media/'
# Mail server settings; used whenever the system needs to email users or
# admins.
EMAIL_USE_TLS = True
EMAIL_PORT = 587
EMAIL_HOST = 'smtp.example.com'
EMAIL_HOST_USER = 'emailuser'
EMAIL_HOST_PASSWORD = 'topsecret'
if DEBUG:
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# The above are only the settings that are absolutely essential. Check the
# installation instructions for more settings that you can set.
| # Enhydris settings for {{ project_name }} project.
#
# Generated by 'enhydris-admin newinstance' using Enhydris {{ enhydris_version }}
# and Django {{ django_version }}.
#
# For more information on this file, see
# http://enhydris.readthedocs.org/en/{{ enhydris_docs_version }}/general/install.html#settings-reference
from enhydris.settings.base import *
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# SECURITY_WARNING: Keep the secret key used in production secret!
SECRET_KEY = '{{ secret_key }}'
# List the domains through which this instance will be accessible. Enhydris
# will refuse to serve other domains.
ALLOWED_HOSTS = ['example.com']
# Administrators who will be notified whenever attention is needed.
ADMINS = (
# ('Your Name', 'your_email@domain.com'),
)
MANAGERS = ADMINS
# Whenever the system emails users, this is the address from which the emails
# will appear to have been sent.
DEFAULT_FROM_EMAIL = 'noreply@example.com'
# Whenever the system emails administrators, this is the address from which the
# emails will appear to have been sent.
SERVER_EMAIL = DEFAULT_FROM_EMAIL
DATABASES = {
'default': {
'ENGINE': 'django.contrib.gis.db.backends.postgis',
'NAME': 'enhydris_db',
'USER': 'enhydris_user',
'PASSWORD': 'topsecret',
'HOST': 'localhost',
'PORT': '5432',
}
}
# Default system timezone, such as 'Europe/Athens'.
TIME_ZONE = 'UTC'
# You should probably leave this as it is. See the installation instructions if
# in doubt.
SITE_ID = 1
# Where static files will be stored. By "static" files we mean css, javascript,
# and images of the skin. These files will be copied there at installation
# time when executing "python manage.py collectstatic".
STATIC_ROOT = '/var/cache/enhydris/static/'
# Where media files will be stored. "media" files are static files uploaded
# by users, e.g. images and videos of stations. The web server must be
# configured to map the URL /enhydris-media/ to that directory. See the
# installation instructions for more information.
MEDIA_ROOT = '/var/lib/enhydris/media/'
# The web server must be configured to map the URLs below to the directories
# specified by STATIC_ROOT and MEDIA_ROOT above. Enhydris will use the settings
# below to create appropriate links.
STATIC_URL = '/enhydris-static/'
MEDIA_URL = '/enhydris-media/'
# Mail server settings; used whenever the system needs to email users or
# admins.
EMAIL_USE_TLS = True
EMAIL_PORT = 587
EMAIL_HOST = 'smtp.example.com'
EMAIL_HOST_USER = 'emailuser'
EMAIL_HOST_PASSWORD = 'topsecret'
if DEBUG:
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# The above are only the settings that are absolutely essential. Check the
# installation instructions for more settings that you can set.
| Python | 0 |
4464919c5114193179490c151844fd771bfd880b | fix setup without flask installed | flask_ecstatic.py | flask_ecstatic.py | """Serves static files with optional directory index.
Files in static folder are automatically served on static URL by Flask.
See http://flask.pocoo.org/docs/0.10/api/#application-object.
It's recommended to specify static folder and URL path directly on Flask application object,
unless you need additional static folders, or have multiple route handlers for the URL path,
e.g. when serving static files on root URL ('') for any path unmatched with previous routes.
"""
__all__ = 'add'.split()
__version__ = '0.1.1'
import os
def add(app, url = None, path = None, endpoint=None, index='index.html', send_from_directory=None):
"""Adds static files endpoint with optional directory index."""
if not send_from_directory:
from flask import send_from_directory
url = url or app.static_url_path or ''
path = os.path.abspath(path or app.static_folder or '.')
endpoint = endpoint or 'static_' + os.path.basename(path)
if path == app.static_folder:
if url != app.static_url_path:
raise ValueError('Files in `{}` path are automatically served on `{}` URL by Flask.'
' Use different path for serving them at `{}` URL'.format(path, app.static_url_path, url))
else:
@app.route(url + '/<path:filename>', endpoint = endpoint)
def static_files(filename):
return send_from_directory(path, filename)
if index:
@app.route(url + '/', endpoint = endpoint + '_index')
def static_index():
return send_from_directory(path, index)
if url:
@app.route(url, endpoint = endpoint + '_index_bare')
def static_index_bare():
return send_from_directory(path, index)
| """Serves static files with optional directory index.
Files in static folder are automatically served on static URL by Flask.
See http://flask.pocoo.org/docs/0.10/api/#application-object.
It's recommended to specify static folder and URL path directly on Flask application object,
unless you need additional static folders, or have multiple route handlers for the URL path,
e.g. when serving static files on root URL ('') for any path unmatched with previous routes.
"""
__all__ = 'add'.split()
__version__ = '0.1.1'
import os
from flask import send_from_directory
def add(app, url = None, path = None, endpoint=None, index='index.html'):
"""Adds static files endpoint with optional directory index."""
url = url or app.static_url_path or ''
path = os.path.abspath(path or app.static_folder or '.')
endpoint = endpoint or 'static_' + os.path.basename(path)
if path == app.static_folder:
if url != app.static_url_path:
raise ValueError('Files in `{}` path are automatically served on `{}` URL by Flask.'
' Use different path for serving them at `{}` URL'.format(path, app.static_url_path, url))
else:
@app.route(url + '/<path:filename>', endpoint = endpoint)
def static_files(filename):
return send_from_directory(path, filename)
if index:
@app.route(url + '/', endpoint = endpoint + '_index')
def static_index():
return send_from_directory(path, index)
if url:
@app.route(url, endpoint = endpoint + '_index_bare')
def static_index_bare():
return send_from_directory(path, index)
| Python | 0.000001 |
b83e371f37477b5eaf552ac78383e3f0ac94bc21 | Change in presseurop RSS feed | modules/presseurop/backend.py | modules/presseurop/backend.py | # -*- coding: utf-8 -*-
# Copyright(C) 2012 Florent Fourcot
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
"backend for http://www.presseurop.eu"
from weboob.capabilities.messages import ICapMessages, Thread
from weboob.tools.capabilities.messages.GenericBackend import GenericNewspaperBackend
from weboob.tools.backend import BackendConfig
from weboob.tools.value import Value
from .browser import NewspaperPresseuropBrowser
from .tools import rssid, url2id
from weboob.tools.newsfeed import Newsfeed
class NewspaperPresseuropBackend(GenericNewspaperBackend, ICapMessages):
MAINTAINER = 'Florent Fourcot'
EMAIL = 'weboob@flo.fourcot.fr'
VERSION = '0.d'
LICENSE = 'AGPLv3+'
STORAGE = {'seen': {}}
NAME = 'presseurop'
DESCRIPTION = u'Presseurop website'
BROWSER = NewspaperPresseuropBrowser
RSSID = staticmethod(rssid)
URL2ID = staticmethod(url2id)
RSSSIZE = 140
CONFIG = BackendConfig(Value('lang', label='Lang of articles',
choices={'fr': 'fr', 'de': 'de', 'en': 'en', 'cs': 'cs', 'es': 'es', 'it': 'it', 'nl': 'nl', 'pl': 'pl', 'pt': 'pt', 'ro': 'ro'}, default='fr'))
def __init__(self, *args, **kwargs):
GenericNewspaperBackend.__init__(self, *args, **kwargs)
self.RSS_FEED = 'http://www.presseurop.eu/%s/rss.xml' % (self.config['lang'].get())
def iter_threads(self):
for article in Newsfeed(self.RSS_FEED, self.RSSID).iter_entries():
thread = Thread(article.link)
thread.title = article.title
thread.date = article.datetime
yield(thread)
| # -*- coding: utf-8 -*-
# Copyright(C) 2012 Florent Fourcot
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
"backend for http://www.presseurop.eu"
from weboob.capabilities.messages import ICapMessages, Thread
from weboob.tools.capabilities.messages.GenericBackend import GenericNewspaperBackend
from weboob.tools.backend import BackendConfig
from weboob.tools.value import Value
from .browser import NewspaperPresseuropBrowser
from .tools import rssid, url2id
from weboob.tools.newsfeed import Newsfeed
class NewspaperPresseuropBackend(GenericNewspaperBackend, ICapMessages):
MAINTAINER = 'Florent Fourcot'
EMAIL = 'weboob@flo.fourcot.fr'
VERSION = '0.d'
LICENSE = 'AGPLv3+'
STORAGE = {'seen': {}}
NAME = 'presseurop'
DESCRIPTION = u'Presseurop website'
BROWSER = NewspaperPresseuropBrowser
RSSID = staticmethod(rssid)
URL2ID = staticmethod(url2id)
RSSSIZE = 50
CONFIG = BackendConfig(Value('lang', label='Lang of articles',
choices={'fr': 'fr', 'de': 'de', 'en': 'en', 'cs': 'cs', 'es': 'es', 'it': 'it', 'nl': 'nl', 'pl': 'pl', 'pt': 'pt', 'ro': 'ro'}, default='fr'))
def __init__(self, *args, **kwargs):
GenericNewspaperBackend.__init__(self, *args, **kwargs)
self.RSS_FEED = 'http://www.presseurop.eu/%s/rss.xml' % (self.config['lang'].get())
def iter_threads(self):
for article in Newsfeed(self.RSS_FEED, self.RSSID).iter_entries():
thread = Thread(article.link)
thread.title = article.title
thread.date = article.datetime
yield(thread)
| Python | 0 |
a3ae8a7ece6bc75437b9848cf43335690250c128 | exit http server gracefully | presstatic/__main__.py | presstatic/__main__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import argparse
import SimpleHTTPServer
import SocketServer
from clint.textui import colored, puts, indent
from presstatic import help
from presstatic.builders import SiteBuilder
from presstatic.storage import s3
def http_server(host, port, dir):
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
os.chdir(dir)
httpd = SocketServer.TCPServer((host, int(port)), Handler)
with indent(4, quote='>>'):
puts(colored.green("Serving {path}".format(path=dir)))
puts(colored.yellow("@ {host}:{port} ".format(host=host, port=port)))
try:
httpd.serve_forever()
except KeyboardInterrupt:
pass
httpd.server_close()
def main():
cli_parser = argparse.ArgumentParser(prog='presstatic')
cli_parser.add_argument('-output',
help="relative directory for the generated files.",
default='public')
cli_parser.add_argument('-http',
metavar='HOST:PORT',
help="creates an HTTP Server with <directory> as root dir.")
cli_parser.add_argument('-s3',
help="deploy on the specified S3 bucket.",
metavar='bucket')
cli_parser.add_argument('directory',
help='directory containing the static website.')
cli_args = cli_parser.parse_args()
site_builder = SiteBuilder(cli_args.directory, output=cli_args.output)
site_builder.build()
if cli_args.http:
host, port = cli_args.http.split(':')
root_dir = os.path.join(cli_args.directory, cli_args.output)
http_server(host, port, root_dir)
elif cli_args.s3:
s3.S3Storage(cli_args.s3).store(site_builder.output_path)
puts(help.s3_setup(bucket=cli_args.s3))
if __name__ == '__main__':
main()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import argparse
import SimpleHTTPServer
import SocketServer
from clint.textui import colored, puts, indent
from presstatic import help
from presstatic.builders import SiteBuilder
from presstatic.storage import s3
def http_server_on_dir(host, port, dir):
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
os.chdir(dir)
httpd = SocketServer.TCPServer((host, int(port)), Handler)
with indent(4, quote='>>'):
puts(colored.green("Serving {path}".format(path=dir)))
puts(colored.yellow("@ {host}:{port} ".format(host=host, port=port)))
httpd.serve_forever()
def main():
cli_parser = argparse.ArgumentParser(prog='presstatic')
cli_parser.add_argument('-output',
help="relative directory for the generated files.",
default='public')
cli_parser.add_argument('-http',
metavar='HOST:PORT',
help="creates an HTTP Server with <directory> as root dir.")
cli_parser.add_argument('-s3',
help="deploy on the specified S3 bucket.",
metavar='bucket')
cli_parser.add_argument('directory',
help='directory containing the static website.')
cli_args = cli_parser.parse_args()
site_builder = SiteBuilder(cli_args.directory, output=cli_args.output)
site_builder.build()
if cli_args.http:
host, port = cli_args.http.split(':')
root_dir = os.path.join(cli_args.directory, cli_args.output)
http_server_on_dir(host, port, root_dir)
elif cli_args.s3:
s3.S3Storage(cli_args.s3).store(site_builder.output_path)
puts(help.s3_setup(bucket=cli_args.s3))
if __name__ == '__main__':
main()
| Python | 0.000001 |
c272b597a7ec2a1a5596daecce144001ae8a7d99 | fix simulate device | fluxghost/websocket/touch.py | fluxghost/websocket/touch.py |
from uuid import UUID
import logging
import json
from fluxclient.upnp.task import UpnpTask
from .base import WebSocketBase
logger = logging.getLogger("WS.DISCOVER")
class WebsocketTouch(WebSocketBase):
def __init__(self, *args):
WebSocketBase.__init__(self, *args)
def on_text_message(self, message):
try:
payload = json.loads(message)
uuid = UUID(hex=payload["uuid"])
password = payload.get("password")
self.touch_device(uuid, password)
except Exception:
logger.exception("Touch error")
self.close()
def _run_auth(self, task, password=None):
ttl = 3
while True:
try:
if password:
return task.auth_with_password(password)
else:
return task.auth_without_password()
except RuntimeError as e:
if e.args[0] == "TIMEOUT" and ttl > 0:
logger.warn("Remote no response, retry")
ttl -= 1
else:
raise
def touch_device(self, uuid, password=None):
try:
if uuid.hex == "0" * 32:
self.send_text(json.dumps({
"serial": "SIMULATE00",
"name": "Simulate Device",
"has_response": True,
"reachable": True,
"auth": True
}))
return
task = UpnpTask(uuid, lookup_timeout=30.0)
resp = self._run_auth(task, password)
self.send_text(json.dumps({
"uuid": uuid.hex,
"serial": task.serial,
"name": task.name,
"has_response": resp is not None,
"reachable": True,
"auth": resp and resp.get("status") == "ok"
}))
except RuntimeError as err:
logger.debug("Error: %s" % err)
self.send_text(json.dumps({
"uuid": uuid.hex,
"has_response": False,
"reachable": False,
"auth": False
}))
|
from uuid import UUID
import logging
import json
from fluxclient.upnp.task import UpnpTask
from .base import WebSocketBase
logger = logging.getLogger("WS.DISCOVER")
class WebsocketTouch(WebSocketBase):
def __init__(self, *args):
WebSocketBase.__init__(self, *args)
def on_text_message(self, message):
try:
payload = json.loads(message)
uuid = UUID(hex=payload["uuid"])
password = payload.get("password")
self.touch_device(uuid, password)
except Exception:
logger.exception("Touch error")
self.close()
def _run_auth(self, task, password=None):
ttl = 3
while True:
try:
if password:
return task.auth_with_password(password)
else:
return task.auth_without_password()
except RuntimeError as e:
if e.args[0] == "TIMEOUT" and ttl > 0:
logger.warn("Remote no response, retry")
ttl -= 1
else:
raise
def touch_device(self, uuid, password=None):
try:
if uuid.hex == "0" * 32:
self.send_text(json.dumps({
"serial": "SIMULATE00",
"name": "Simulate Device",
"has_response": True,
"reachable": True,
"auth": True
}))
task = UpnpTask(uuid, lookup_timeout=30.0)
resp = self._run_auth(task, password)
self.send_text(json.dumps({
"uuid": uuid.hex,
"serial": task.serial,
"name": task.name,
"has_response": resp is not None,
"reachable": True,
"auth": resp and resp.get("status") == "ok"
}))
except RuntimeError as err:
logger.debug("Error: %s" % err)
self.send_text(json.dumps({
"uuid": uuid.hex,
"has_response": False,
"reachable": False,
"auth": False
}))
| Python | 0.000002 |
4bc9217f15ee394332ab54efdb96ded056825c2b | Add todo to handle shuffling of tracks. | mopidy_pandora/doubleclick.py | mopidy_pandora/doubleclick.py | import logging
import time
from mopidy.internal import encoding
from pandora.errors import PandoraException
from mopidy_pandora.library import PandoraUri
logger = logging.getLogger(__name__)
class DoubleClickHandler(object):
def __init__(self, config, client):
self.on_pause_resume_click = config["on_pause_resume_click"]
self.on_pause_next_click = config["on_pause_next_click"]
self.on_pause_previous_click = config["on_pause_previous_click"]
self.double_click_interval = config['double_click_interval']
self.client = client
self._click_time = 0
def set_click_time(self, click_time=None):
if click_time is None:
self._click_time = time.time()
else:
self._click_time = click_time
def get_click_time(self):
return self._click_time
def is_double_click(self):
double_clicked = self._click_time > 0 and time.time() - self._click_time < float(self.double_click_interval)
if double_clicked is False:
self._click_time = 0
return double_clicked
def on_change_track(self, active_track_uri, new_track_uri):
from mopidy_pandora.uri import PandoraUri
if not self.is_double_click():
return False
if active_track_uri is not None:
new_track_index = int(PandoraUri.parse(new_track_uri).index)
active_track_index = int(PandoraUri.parse(active_track_uri).index)
# TODO: the order of the tracks will no longer be sequential if the user has 'shuffled' the tracklist
# Need to find a better approach for determining whether 'next' or 'previous' was clicked.
if new_track_index > active_track_index or new_track_index == 0 and active_track_index == 2:
return self.process_click(self.on_pause_next_click, active_track_uri)
elif new_track_index < active_track_index or new_track_index == active_track_index:
return self.process_click(self.on_pause_previous_click, active_track_uri)
return False
def on_resume_click(self, track_uri, time_position):
if not self.is_double_click() or time_position == 0:
return False
return self.process_click(self.on_pause_resume_click, track_uri)
def process_click(self, method, track_uri):
self.set_click_time(0)
uri = PandoraUri.parse(track_uri)
logger.info("Triggering event '%s' for song: %s", method, uri.name)
func = getattr(self, method)
try:
func(uri.token)
except PandoraException as e:
logger.error('Error calling event: %s', encoding.locale_decode(e))
return False
return True
def thumbs_up(self, track_token):
return self.client.add_feedback(track_token, True)
def thumbs_down(self, track_token):
return self.client.add_feedback(track_token, False)
def sleep(self, track_token):
return self.client.sleep_song(track_token)
def add_artist_bookmark(self, track_token):
return self.client.add_artist_bookmark(track_token)
def add_song_bookmark(self, track_token):
return self.client.add_song_bookmark(track_token)
| import logging
import time
from mopidy.internal import encoding
from pandora.errors import PandoraException
from mopidy_pandora.library import PandoraUri
logger = logging.getLogger(__name__)
class DoubleClickHandler(object):
def __init__(self, config, client):
self.on_pause_resume_click = config["on_pause_resume_click"]
self.on_pause_next_click = config["on_pause_next_click"]
self.on_pause_previous_click = config["on_pause_previous_click"]
self.double_click_interval = config['double_click_interval']
self.client = client
self._click_time = 0
def set_click_time(self, click_time=None):
if click_time is None:
self._click_time = time.time()
else:
self._click_time = click_time
def get_click_time(self):
return self._click_time
def is_double_click(self):
double_clicked = self._click_time > 0 and time.time() - self._click_time < float(self.double_click_interval)
if double_clicked is False:
self._click_time = 0
return double_clicked
def on_change_track(self, active_track_uri, new_track_uri):
from mopidy_pandora.uri import PandoraUri
if not self.is_double_click():
return False
if active_track_uri is not None:
new_track_index = int(PandoraUri.parse(new_track_uri).index)
active_track_index = int(PandoraUri.parse(active_track_uri).index)
if new_track_index > active_track_index or new_track_index == 0 and active_track_index == 2:
return self.process_click(self.on_pause_next_click, active_track_uri)
elif new_track_index < active_track_index or new_track_index == active_track_index:
return self.process_click(self.on_pause_previous_click, active_track_uri)
return False
def on_resume_click(self, track_uri, time_position):
if not self.is_double_click() or time_position == 0:
return False
return self.process_click(self.on_pause_resume_click, track_uri)
def process_click(self, method, track_uri):
self.set_click_time(0)
uri = PandoraUri.parse(track_uri)
logger.info("Triggering event '%s' for song: %s", method, uri.name)
func = getattr(self, method)
try:
func(uri.token)
except PandoraException as e:
logger.error('Error calling event: %s', encoding.locale_decode(e))
return False
return True
def thumbs_up(self, track_token):
return self.client.add_feedback(track_token, True)
def thumbs_down(self, track_token):
return self.client.add_feedback(track_token, False)
def sleep(self, track_token):
return self.client.sleep_song(track_token)
def add_artist_bookmark(self, track_token):
return self.client.add_artist_bookmark(track_token)
def add_song_bookmark(self, track_token):
return self.client.add_song_bookmark(track_token)
| Python | 0 |
d4f083d0cc1096152d7d5cd13c63e82f1a58a298 | handle testrunner exits as PR failures | spawner.py | spawner.py | #!/usr/bin/env python3
# Parse the YAML file, start the testrunners in parallel,
# and wait for them.
import os
import sys
import traceback
import subprocess
import utils.parser as parser
import utils.ghupdate as ghupdate
def main():
"Main entry point."
try:
n = parse_suites()
except SyntaxError as e:
# print the error to give feedback, but exit nicely
traceback.print_exc()
msg = e.msg
if e.__cause__ is not None:
msg += ": " + e.__cause__.msg
update_gh('error', msg)
else:
spawn_testrunners(n)
count_failures(n)
def parse_suites():
yml_file = os.path.join('checkouts',
os.environ['github_repo'],
'.redhat-ci.yml')
# this should have been checked already
assert os.path.isfile(yml_file)
for idx, suite in enumerate(parser.load_suites(yml_file)):
suite_dir = 'state/suite-%d/parsed' % idx
parser.flush_suite(suite, suite_dir)
# return the number of testsuites
return idx + 1
def spawn_testrunners(n):
testrunner = os.path.join(sys.path[0], "testrunner")
runners = []
for i in range(n):
p = subprocess.Popen([testrunner, str(i)])
runners.append(p)
# We don't implement any fail fast here, so just do a
# naive wait to collect them all.
failed = False
for runner in runners:
if runner.wait() != 0:
failed = True
# NB: When we say 'failed' here, we're talking about
# infrastructure failure. Bad PR code should never cause
# rc != 0.
if failed:
raise Exception("at least one runner failed")
def count_failures(n):
# It's helpful to have an easy global way to figure out
# if any of the suites failed, e.g. for integration in
# Jenkins. Let's write a 'failures' file counting the
# number of failed suites.
failed = 0
for i in range(n):
# If the rc file doesn't exist but the runner exited
# nicely, then it means there was a semantic error
# in the YAML (e.g. bad Docker image, bad ostree
# revision, etc...).
if not os.path.isfile("state/suite-%d/rc" % i):
failed += 1
else:
with open("state/suite-%d/rc" % i) as f:
if int(f.read().strip()) != 0:
failed += 1
with open("state/failures", "w") as f:
f.write("%d" % failed)
def update_gh(state, description):
args = {'repo': os.environ['github_repo'],
'commit': os.environ['github_commit'],
'token': os.environ['github_token'],
'state': state,
'context': 'Red Hat CI',
'description': description}
ghupdate.send(**args)
if os.path.isfile('state/is_merge_sha'):
with open('state/sha') as f:
args['commit'] = f.read()
ghupdate.send(**args)
if __name__ == '__main__':
sys.exit(main())
| #!/usr/bin/env python3
# Parse the YAML file, start the testrunners in parallel,
# and wait for them.
import os
import sys
import traceback
import subprocess
import utils.parser as parser
import utils.ghupdate as ghupdate
def main():
"Main entry point."
try:
n = parse_suites()
except SyntaxError as e:
# print the error to give feedback, but exit nicely
traceback.print_exc()
msg = e.msg
if e.__cause__ is not None:
msg += ": " + e.__cause__.msg
update_gh('error', msg)
else:
spawn_testrunners(n)
count_failures(n)
def parse_suites():
yml_file = os.path.join('checkouts',
os.environ['github_repo'],
'.redhat-ci.yml')
# this should have been checked already
assert os.path.isfile(yml_file)
for idx, suite in enumerate(parser.load_suites(yml_file)):
suite_dir = 'state/suite-%d/parsed' % idx
parser.flush_suite(suite, suite_dir)
# return the number of testsuites
return idx + 1
def spawn_testrunners(n):
testrunner = os.path.join(sys.path[0], "testrunner")
runners = []
for i in range(n):
p = subprocess.Popen([testrunner, str(i)])
runners.append(p)
# We don't implement any fail fast here, so just do a
# naive wait to collect them all.
failed = False
for runner in runners:
if runner.wait() != 0:
failed = True
# NB: When we say 'failed' here, we're talking about
# infrastructure failure. Bad PR code should never cause
# rc != 0.
if failed:
raise Exception("at least one runner failed")
def count_failures(n):
# It's helpful to have an easy global way to figure out
# if any of the suites failed, e.g. for integration in
# Jenkins. Let's write a 'failures' file counting the
# number of failed suites.
failed = 0
for i in range(n):
with open("state/suite-%d/rc" % i) as f:
if int(f.read().strip()) != 0:
failed += 1
with open("state/failures", "w") as f:
f.write("%d" % failed)
def update_gh(state, description):
args = {'repo': os.environ['github_repo'],
'commit': os.environ['github_commit'],
'token': os.environ['github_token'],
'state': state,
'context': 'Red Hat CI',
'description': description}
ghupdate.send(**args)
if os.path.isfile('state/is_merge_sha'):
with open('state/sha') as f:
args['commit'] = f.read()
ghupdate.send(**args)
if __name__ == '__main__':
sys.exit(main())
| Python | 0 |
9381657d8ad2e96c2537c89fad7a4b3d8d8ddff5 | Add gpu parameter to htcondor_at_vispa example. | examples/htcondor_at_vispa/analysis/framework.py | examples/htcondor_at_vispa/analysis/framework.py | # -*- coding: utf-8 -*-
"""
Law example tasks to demonstrate HTCondor workflows at VISPA.
In this file, some really basic tasks are defined that can be inherited by
other tasks to receive the same features. This is usually called "framework"
and only needs to be defined once per user / group / etc.
"""
import os
import luigi
import law
import law.contrib.htcondor
class Task(law.Task):
"""
Base task that we use to force a version parameter on all inheriting tasks, and that provides
some convenience methods to create local file and directory targets at the default data path.
"""
version = luigi.Parameter()
def store_parts(self):
return (self.__class__.__name__, self.version)
def local_path(self, *path):
# ANALYSIS_DATA_PATH is defined in setup.sh
parts = (os.getenv("ANALYSIS_DATA_PATH"),) + self.store_parts() + path
return os.path.join(*parts)
def local_target(self, *path):
return law.LocalFileTarget(self.local_path(*path))
class HTCondorWorkflow(law.contrib.htcondor.HTCondorWorkflow):
"""
Batch systems are typically very heterogeneous by design, and so is HTCondor. Law does not aim
to "magically" adapt to all possible HTCondor setups which would certainly end in a mess.
Therefore we have to configure the base HTCondor workflow in law.contrib.htcondor to work with
the VISPA environment. In most cases, like in this example, only a minimal amount of
configuration is required.
"""
htcondor_gpus = luigi.IntParameter(default=law.NO_INT, significant=False, desription="number "
"of GPUs to request on the VISPA cluster, leave empty to use only CPUs")
def htcondor_output_directory(self):
# the directory where submission meta data should be stored
return law.LocalDirectoryTarget(self.local_path())
def htcondor_create_job_file_factory(self):
# tell the factory, which is responsible for creating our job files,
# that the files are not temporary, i.e., it should not delete them after submission
factory = super(HTCondorWorkflow, self).htcondor_create_job_file_factory()
factory.is_tmp = False
return factory
def htcondor_bootstrap_file(self):
# each job can define a bootstrap file that is executed prior to the actual job
# in order to setup software and environment variables
return law.util.rel_path(__file__, "bootstrap.sh")
def htcondor_job_config(self, config, job_num, branches):
# render_data is rendered into all files sent with a job
config.render_variables["analysis_path"] = os.getenv("ANALYSIS_PATH")
# tell the job config if GPUs are requested
if not law.is_no_param(self.htcondor_gpus):
config.custom_config = [("request_gpus", self.htcondor_gpus)]
return config
| # -*- coding: utf-8 -*-
"""
Law example tasks to demonstrate HTCondor workflows at VISPA.
In this file, some really basic tasks are defined that can be inherited by
other tasks to receive the same features. This is usually called "framework"
and only needs to be defined once per user / group / etc.
"""
import os
import luigi
import law
import law.contrib.htcondor
class Task(law.Task):
"""
Base task that we use to force a version parameter on all inheriting tasks, and that provides
some convenience methods to create local file and directory targets at the default data path.
"""
version = luigi.Parameter()
def store_parts(self):
return (self.__class__.__name__, self.version)
def local_path(self, *path):
# ANALYSIS_DATA_PATH is defined in setup.sh
parts = (os.getenv("ANALYSIS_DATA_PATH"),) + self.store_parts() + path
return os.path.join(*parts)
def local_target(self, *path):
return law.LocalFileTarget(self.local_path(*path))
class HTCondorWorkflow(law.contrib.htcondor.HTCondorWorkflow):
"""
Batch systems are typically very heterogeneous by design, and so is HTCondor. Law does not aim
to "magically" adapt to all possible HTCondor setups which would certainly end in a mess.
Therefore we have to configure the base HTCondor workflow in law.contrib.htcondor to work with
the VISPA environment. In most cases, like in this example, only a minimal amount of
configuration is required.
"""
def htcondor_output_directory(self):
# the directory where submission meta data should be stored
return law.LocalDirectoryTarget(self.local_path())
def htcondor_create_job_file_factory(self):
# tell the factory, which is responsible for creating our job files,
# that the files are not temporary, i.e., it should not delete them after submission
factory = super(HTCondorWorkflow, self).htcondor_create_job_file_factory()
factory.is_tmp = False
return factory
def htcondor_bootstrap_file(self):
# each job can define a bootstrap file that is executed prior to the actual job
# in order to setup software and environment variables
return law.util.rel_path(__file__, "bootstrap.sh")
def htcondor_job_config(self, config, job_num, branches):
# render_data is rendered into all files sent with a job
config.render_variables["analysis_path"] = os.getenv("ANALYSIS_PATH")
return config
| Python | 0 |
a5de78328cc2e63c901c409fb6c7b376abf921e2 | reorganize code the object oriented way | src/scopus.py | src/scopus.py | from itertools import chain
import requests
_HEADERS = {'Accept': 'application/json'}
class ScopusResponse(object):
def __init__(self, data):
self._data = data
def __getitem__(self, key):
return self._data['search-results'][key]
def __iter__(self):
resp = self
while resp:
yield resp
resp = resp.next()
def entries(self):
return [] if self.is_empty() else self['entry']
def is_empty(self):
return int(self['opensearch:totalResults']) == 0
def next_page_url(self):
try: return next(l['@href'] for l in self['link'] if l['@ref']=='next')
except: return None
def next(self):
url = self.next_page_url()
if not url: return None
return ScopusResponse(requests.get(url, headers=_HEADERS).json())
def all_entries(self):
return list(chain.from_iterable(page.entries() for page in self))
class ScopusClient(object):
_AUTHOR_API = 'http://api.elsevier.com/content/search/author'
_SCOPUS_API = 'http://api.elsevier.com/content/search/scopus'
def __init__(self, apiKey):
self.apiKey = apiKey
def _api(self, endpoint, query):
resp = requests.get(endpoint, headers=_HEADERS,
params={'apiKey': self.apiKey, 'query': query})
return ScopusResponse(resp.json())
def authorSearch(self, query):
return self._api(self._AUTHOR_API, query)
def scopusSearch(self, query):
return self._api(self._SCOPUS_API, query)
def get_scopus_entries(self, same_author_ids):
query = ' OR '.join('AU-ID(%s)' % ID for ID in same_author_ids)
return self.scopusSearch(query).all_entries()
def get_authors(self, first, last, affil, subjabbr):
query = ('authlast(%s) AND authfirst(%s) AND affil(%s) AND subjabbr(%s)'
% (last, first, affil, subjabbr))
return self.authorSearch(query).all_entries()
| import requests
class ScopusClient(object):
_AUTHOR_API = 'http://api.elsevier.com/content/search/author'
_SCOPUS_API = 'http://api.elsevier.com/content/search/scopus'
_HEADERS = {'Accept': 'application/json'}
def __init__(self, apiKey):
self.apiKey = apiKey
def _api(self, endpoint, query, parse=False):
resp = requests.get(endpoint, headers=self._HEADERS,
params={'apiKey': self.apiKey, 'query': query})
return resp.json() if parse else resp
def _is_empty(self, resp):
return int(resp['search-results']['opensearch:totalResults']) == 0
def _all_pages(self, resp):
if not self._is_empty(resp):
while True:
yield resp
try:
url = next(li['@href']
for li in resp['search-results']['link']
if li['@ref'] == 'next')
resp = requests.get(url, headers=self._HEADERS).json()
except: break
def authorSearch(self, query, parse=False):
return self._api(self._AUTHOR_API, query, parse)
def scopusSearch(self, query, parse=False):
return self._api(self._SCOPUS_API, query, parse)
def get_scopus_entries(self, same_author_ids):
query = ' OR '.join('AU-ID(%s)' % ID for ID in same_author_ids)
resp = self.scopusSearch(query, parse=True)
return [entry for page in self._all_pages(resp)
for entry in page['search-results']['entry']]
def get_authors(self, first, last, affil, subjabbr):
query = ('authlast(%s) AND authfirst(%s) AND affil(%s) AND subjabbr(%s)'
% (last, first, affil, subjabbr))
resp = self.authorSearch(query, parse=True)
return [] if self._is_empty(resp) else resp['search-results']['entry']
| Python | 0.000153 |
28601b95720474a4f8701d6cb784ac6ec3618d49 | add support for wheel DL proxy (#14010) | tools/download-wheels.py | tools/download-wheels.py | #!/usr/bin/env python
"""
Download SciPy wheels from Anaconda staging area.
"""
import sys
import os
import re
import shutil
import argparse
import urllib
import urllib3
from bs4 import BeautifulSoup
__version__ = '0.1'
# Edit these for other projects.
STAGING_URL = 'https://anaconda.org/multibuild-wheels-staging/scipy'
PREFIX = 'scipy'
def http_manager():
"""
Return a urllib3 http request manager, leveraging
proxy settings when available.
"""
proxy_dict = urllib.request.getproxies()
if 'http' in proxy_dict:
http = urllib3.ProxyManager(proxy_dict['http'])
elif 'all' in proxy_dict:
http = urllib3.ProxyManager(proxy_dict['all'])
else:
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED')
return http
def get_wheel_names(version):
""" Get wheel names from Anaconda HTML directory.
This looks in the Anaconda multibuild-wheels-staging page and
parses the HTML to get all the wheel names for a release version.
Parameters
----------
version : str
The release version. For instance, "1.5.0".
"""
http = http_manager()
tmpl = re.compile(rf"^.*{PREFIX}-{version}-.*\.whl$")
index_url = f"{STAGING_URL}/files"
index_html = http.request('GET', index_url)
soup = BeautifulSoup(index_html.data, 'html.parser')
return soup.findAll(text=tmpl)
def download_wheels(version, wheelhouse):
"""Download release wheels.
The release wheels for the given SciPy version are downloaded
into the given directory.
Parameters
----------
version : str
The release version. For instance, "1.5.0".
wheelhouse : str
Directory in which to download the wheels.
"""
http = http_manager()
wheel_names = get_wheel_names(version)
for i, wheel_name in enumerate(wheel_names):
wheel_url = f"{STAGING_URL}/{version}/download/{wheel_name}"
wheel_path = os.path.join(wheelhouse, wheel_name)
with open(wheel_path, 'wb') as f:
with http.request('GET', wheel_url, preload_content=False,) as r:
print(f"{i + 1:<4}{wheel_name}")
shutil.copyfileobj(r, f)
print(f"\nTotal files downloaded: {len(wheel_names)}")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"version",
help="SciPy version to download.")
parser.add_argument(
"-w", "--wheelhouse",
default=os.path.join(os.getcwd(), "release", "installers"),
help="Directory in which to store downloaded wheels\n"
"[defaults to <cwd>/release/installers]")
args = parser.parse_args()
wheelhouse = os.path.expanduser(args.wheelhouse)
if not os.path.isdir(wheelhouse):
raise RuntimeError(
f"{wheelhouse} wheelhouse directory is not present."
" Perhaps you need to use the '-w' flag to specify one.")
download_wheels(args.version, wheelhouse)
| #!/usr/bin/env python
"""
Download SciPy wheels from Anaconda staging area.
"""
import sys
import os
import re
import shutil
import argparse
import urllib3
from bs4 import BeautifulSoup
__version__ = '0.1'
# Edit these for other projects.
STAGING_URL = 'https://anaconda.org/multibuild-wheels-staging/scipy'
PREFIX = 'scipy'
def get_wheel_names(version):
""" Get wheel names from Anaconda HTML directory.
This looks in the Anaconda multibuild-wheels-staging page and
parses the HTML to get all the wheel names for a release version.
Parameters
----------
version : str
The release version. For instance, "1.5.0".
"""
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED')
tmpl = re.compile(rf"^.*{PREFIX}-{version}-.*\.whl$")
index_url = f"{STAGING_URL}/files"
index_html = http.request('GET', index_url)
soup = BeautifulSoup(index_html.data, 'html.parser')
return soup.findAll(text=tmpl)
def download_wheels(version, wheelhouse):
"""Download release wheels.
The release wheels for the given SciPy version are downloaded
into the given directory.
Parameters
----------
version : str
The release version. For instance, "1.5.0".
wheelhouse : str
Directory in which to download the wheels.
"""
http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED')
wheel_names = get_wheel_names(version)
for i, wheel_name in enumerate(wheel_names):
wheel_url = f"{STAGING_URL}/{version}/download/{wheel_name}"
wheel_path = os.path.join(wheelhouse, wheel_name)
with open(wheel_path, 'wb') as f:
with http.request('GET', wheel_url, preload_content=False,) as r:
print(f"{i + 1:<4}{wheel_name}")
shutil.copyfileobj(r, f)
print(f"\nTotal files downloaded: {len(wheel_names)}")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"version",
help="SciPy version to download.")
parser.add_argument(
"-w", "--wheelhouse",
default=os.path.join(os.getcwd(), "release", "installers"),
help="Directory in which to store downloaded wheels\n"
"[defaults to <cwd>/release/installers]")
args = parser.parse_args()
wheelhouse = os.path.expanduser(args.wheelhouse)
if not os.path.isdir(wheelhouse):
raise RuntimeError(
f"{wheelhouse} wheelhouse directory is not present."
" Perhaps you need to use the '-w' flag to specify one.")
download_wheels(args.version, wheelhouse)
| Python | 0 |
c43bd7f829ce79577421374ba6c00b74adca05aa | add test for using glob matching in a directory tree in file iterator | src/test/file_iterator_tests.py | src/test/file_iterator_tests.py | import nose
from nose.tools import *
from unittest import TestCase
import os
from shutil import rmtree
from tempfile import mkdtemp
from file_iterator import FileIterator
class FileIteratorTests(TestCase):
def setUp(self):
self.directory = mkdtemp('-gb-file-iterator-tests')
self.file_iterator = FileIterator(self.directory)
def tearDown(self):
rmtree(self.directory)
def test_iterate_files_in_single_directory(self):
self._create_file(self.directory, 'file1')
self._create_file(self.directory, 'file2')
s = set(self.file_iterator.files())
eq_(set(self._prepend_dir(['file1', 'file2'])), s)
def test_iterate_files_in_directory_tree(self):
self._create_file(self.directory, 'file0')
dir1 = self._create_dir(self.directory, 'dir1')
self._create_file(dir1, 'file1')
dir2 = self._create_dir(dir1, 'dir2')
self._create_file(dir2, 'file2')
dir3 = self._create_dir(self.directory, 'dir3')
self._create_file(dir3, 'file3')
expected = set()
expected.add(os.path.join(self.directory, 'file0'))
expected.add(os.path.join(dir1, 'file1'))
expected.add(os.path.join(dir2, 'file2'))
expected.add(os.path.join(dir3, 'file3'))
s = set(self.file_iterator.files())
eq_(expected, s)
def test_match_files_by_glob(self):
self.file_iterator.set_glob("*.java")
self._create_file(self.directory, 'file.txt')
self._create_file(self.directory, 'file.java')
s = set(self.file_iterator.files())
eq_(set([os.path.join(self.directory, 'file.java')]), s)
def test_match_files_in_directory_tree_by_glob(self):
self.file_iterator.set_glob("*.java")
self._create_file(self.directory, 'file0.java')
dir1 = self._create_dir(self.directory, 'dir1')
self._create_file(dir1, 'file1.c')
dir2 = self._create_dir(dir1, 'dir2')
self._create_file(dir2, 'file2.java')
dir3 = self._create_dir(self.directory, 'dir3')
self._create_file(dir3, 'file3.py')
expected = set()
expected.add(os.path.join(self.directory, 'file0.java'))
expected.add(os.path.join(dir2, 'file2.java'))
s = set(self.file_iterator.files())
eq_(expected, s)
@nottest
def _create_file(self, directory, filename):
filepath = os.path.join(directory, filename)
with open(filepath, 'w') as f:
f.write(filename)
f.close()
@nottest
def _create_dir(self, directory, dirname):
dirpath = os.path.join(directory, dirname)
os.mkdir(dirpath)
return dirpath
@nottest
def _prepend_dir(self, files, directory=None):
if directory is None:
directory = self.directory
return [os.path.join(directory, f) for f in files]
| import nose
from nose.tools import *
from unittest import TestCase
import os
from shutil import rmtree
from tempfile import mkdtemp
from file_iterator import FileIterator
class FileIteratorTests(TestCase):
def setUp(self):
self.directory = mkdtemp('-gb-file-iterator-tests')
self.file_iterator = FileIterator(self.directory)
def tearDown(self):
rmtree(self.directory)
def test_iterate_files_in_single_directory(self):
self._create_file(self.directory, 'file1')
self._create_file(self.directory, 'file2')
s = set(self.file_iterator.files())
eq_(set(self._prepend_dir(['file1', 'file2'])), s)
def test_iterate_files_in_directory_tree(self):
self._create_file(self.directory, 'file0')
dir1 = self._create_dir(self.directory, 'dir1')
self._create_file(dir1, 'file1')
dir2 = self._create_dir(dir1, 'dir2')
self._create_file(dir2, 'file2')
dir3 = self._create_dir(self.directory, 'dir3')
self._create_file(dir3, 'file3')
expected = set()
expected.add(os.path.join(self.directory, 'file0'))
expected.add(os.path.join(dir1, 'file1'))
expected.add(os.path.join(dir2, 'file2'))
expected.add(os.path.join(dir3, 'file3'))
s = set(self.file_iterator.files())
eq_(expected, s)
def test_match_files_by_glob(self):
self.file_iterator.set_glob("*.java")
self._create_file(self.directory, 'file.txt')
self._create_file(self.directory, 'file.java')
s = set(self.file_iterator.files())
eq_(set([os.path.join(self.directory, 'file.java')]), s)
@nottest
def _create_file(self, directory, filename):
filepath = os.path.join(directory, filename)
with open(filepath, 'w') as f:
f.write(filename)
f.close()
@nottest
def _create_dir(self, directory, dirname):
dirpath = os.path.join(directory, dirname)
os.mkdir(dirpath)
return dirpath
@nottest
def _prepend_dir(self, files, directory=None):
if directory is None:
directory = self.directory
return [os.path.join(directory, f) for f in files]
| Python | 0 |
9e03e2c83328b5dc1b291dcd43c6fca1a7df2d74 | Implement thread safe object for devkitstatuses | src/server.py | src/server.py | import logging
import asyncio
import flask
import threading
import time
import aiocoap.resource as resource
import aiocoap
app = flask.Flask(__name__,static_folder="../static",static_url_path="/static",template_folder="../templates")
@app.route("/")
class Nordicnode():
def __init__(self, led="0,0,0,0", active=False, address=None, lastactive=0, name=None):
self.lock = threading.Lock()
self.led = led
self.active = active
self.address = address
self.lastactive = lastactive
self.name = name
def updateled(self, led):
self.lock.acquire()
try:
logging.debug('Acquired a lock')
self.led = led
finally:
logging.debug('Released a lock')
self.lock.release()
def updatestatus(self, active):
self.lock.acquire()
try:
self.active = active
finally:
self.lock.release()
def updateaddress(self, address):
self.lock.acquire()
try:
self.address = address
finally:
self.lock.release()
def updatelastactive(self, lastactive):
self.lock.acquire()
try:
self.lastactive = lastactive
finally:
self.lock.release()
DEVICES = {}
for i in enumerate(range(1,20)):
DEVICES[str(i).zfill(2)] = (Nordicnode(name=str(i).zfill(2)))
def hello():
return flask.render_template("index.html", name="index")
class LedResource(resource.Resource):
def __init__(self,kit):
super(BlockResource, self).__init__()
# self.content = ("test-content: yes please").encode("ascii")
self.kit = kit
@asyncio.coroutine
def render_get(self, req):
resp = aiocoap.Message(code=aiocoap.CONTENT,payload=self.content)
return resp
def render_put(self,req):
print("Got payload: %s" % req.payload)
self.content = req.payload
DEVICES[self.kit].updateled(req.payload.decode('ascii'))
"""
Echo back messages
"""
return aiocoap.Message(code=aiocoap.CHANGED,payload=req.payload)
class LastSeenResource(resource.Resource):
def __init__(self,kit):
super(BlockResource, self).__init__()
# self.content = ("test-content: yes please").encode("ascii")
self.kit = kit
@asyncio.coroutine
def render_get(self, req):
resp = aiocoap.Message(code=aiocoap.CONTENT,payload=self.content)
return resp
def render_put(self,req):
print("Got payload: %s" % req.payload)
self.content = req.payload
DEVICES[self.kit].updatelastactive(time.time())
DEVICES[self.kit].updateaddress(req.remote[0])
"""
Echo back messages
"""
return aiocoap.Message(code=aiocoap.CHANGED,payload=req.payload)
def main():
root = resource.Site()
for kit in enumerate(range(1,21)):
root.add_resource((str(kit).zfill(2),'button'), LedResource(str(kit).zfill(2)))
root.add_resource((str(kit).zfill(2),'i_am_alive'), BlockResource(str(kit).zfill(2)))
websrv = threading.Thread(target=(lambda: app.run(debug=True, port=25565, use_reloader=False)), name="Flask-server")
websrv.start()
asyncio.async(aiocoap.Context.create_server_context(root))
asyncio.get_event_loop().run_forever()
logging.basicConfig(level=logging.INFO)
logging.getLogger("coap-server").setLevel(logging.DEBUG)
if __name__ == "__main__":
main()
| import logging
import asyncio
import flask
import threading
import aiocoap.resource as resource
import aiocoap
app = flask.Flask(__name__,static_folder="../static",static_url_path="/static",template_folder="../templates")
@app.route("/")
def hello():
return flask.render_template("index.html", name="index")
class BlockResource(resource.Resource):
"""
We will test some of aiocoap here.
Enjoy.
"""
def __init__(self):
super(BlockResource, self).__init__()
self.content = ("test-content: yes please").encode("ascii")
@asyncio.coroutine
def render_get(self, req):
resp = aiocoap.Message(code=aiocoap.CONTENT,payload=self.content)
return resp
def render_put(self,req):
print("Got payload: %s" % req.payload)
self.content = req.payload
"""
Echo back messages
"""
return aiocoap.Message(code=aiocoap.CHANGED,payload=req.payload)
def main():
root = resource.Site()
root.add_resource(('block',),BlockResource())
websrv = threading.Thread(target=(lambda: app.run(debug=True, port=25565, use_reloader=False)), name="Flask-server")
websrv.start()
asyncio.async(aiocoap.Context.create_server_context(root))
asyncio.get_event_loop().run_forever()
logging.basicConfig(level=logging.INFO)
logging.getLogger("coap-server").setLevel(logging.DEBUG)
if __name__ == "__main__":
main()
| Python | 0 |
0bf9c011ca36df2d72dfd3b8fc59f6320e837e43 | Update __init__.py | dmoj/cptbox/__init__.py | dmoj/cptbox/__init__.py | from collections import defaultdict
from dmoj.cptbox.sandbox import SecurePopen, PIPE
from dmoj.cptbox.handlers import DISALLOW, ALLOW
from dmoj.cptbox.chroot import CHROOTSecurity
from dmoj.cptbox.syscalls import SYSCALL_COUNT
if sys.version_info.major == 2:
range = xrange
class NullSecurity(defaultdict):
def __init__(self):
for i in range(SYSCALL_COUNT):
self[i] = ALLOW
| from collections import defaultdict
from dmoj.cptbox.sandbox import SecurePopen, PIPE
from dmoj.cptbox.handlers import DISALLOW, ALLOW
from dmoj.cptbox.chroot import CHROOTSecurity
from dmoj.cptbox.syscalls import SYSCALL_COUNT
class NullSecurity(defaultdict):
def __init__(self):
for i in xrange(SYSCALL_COUNT):
self[i] = ALLOW
| Python | 0.000072 |
ca050969d1267d13986a0494a86f7fd50616937e | Remove old code. | process/scripts/run.py | process/scripts/run.py | """
Document processing script. Does NER for persons, dates, etc.
Usage:
process.py <dbname>
process.py -h | --help | --version
Options:
-h --help Show this screen
--version Version number
"""
import logging
from docopt import docopt
from iepy.db import connect, DocumentManager
from iepy.preprocess import PreProcessPipeline
from process.settings import pipeline_steps
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('process')
opts = docopt(__doc__, version=0.1)
connect(opts['<dbname>'])
docs = DocumentManager()
pipeline = PreProcessPipeline(pipeline_steps, docs)
pipeline.process_everything()
| """
Document processing script. Does NER for persons, dates, etc.
Usage:
process.py <dbname>
process.py -h | --help | --version
Options:
-h --help Show this screen
--version Version number
"""
import logging
from docopt import docopt
from iepy.db import connect, DocumentManager
from iepy.preprocess import PreProcessPipeline
from process.settings import pipeline_steps
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger('process')
opts = docopt(__doc__, version=0.1)
connect(opts['<dbname>'])
docs = DocumentManager()
"""set_custom_entity_kinds(zip(map(lambda x: x.lower(), CUSTOM_ENTITIES),
CUSTOM_ENTITIES))"""
pipeline = PreProcessPipeline(pipeline_steps, docs)
pipeline.process_everything()
| Python | 0.000045 |
9bc1c77250d338ca93ff33d2832c2b3117b3550e | Use the raw domain for Netflix, because there's only one now | services/netflix.py | services/netflix.py | import urlparse
import foauth.providers
from oauthlib.oauth1.rfc5849 import SIGNATURE_TYPE_QUERY
class Netflix(foauth.providers.OAuth1):
# General info about the provider
provider_url = 'https://www.netflix.com/'
favicon_url = 'https://netflix.hs.llnwd.net/e1/en_US/icons/nficon.ico'
docs_url = 'http://developer.netflix.com/docs'
category = 'Movies/TV'
# URLs to interact with the API
request_token_url = 'http://api-public.netflix.com/oauth/request_token'
authorize_url = 'https://api-user.netflix.com/oauth/login'
access_token_url = 'http://api-public.netflix.com/oauth/access_token'
api_domain = 'api-public.netflix.com'
available_permissions = [
(None, 'read and manage your queue'),
]
https = False
signature_type = SIGNATURE_TYPE_QUERY
def get_authorize_params(self, *args, **kwargs):
params = super(Netflix, self).get_authorize_params(*args, **kwargs)
params['oauth_consumer_key'] = self.client_id
return params
def get_user_id(self, key):
r = self.api(key, self.api_domain, u'/users/current',
params={'output': 'json'})
redirect = r.json[u'resource'][u'link'][u'href']
parts = urlparse.urlparse(redirect)
r = self.api(key, parts.netloc, parts.path,
params={'output': 'json'})
return r.json[u'user'][u'user_id']
| import urlparse
import foauth.providers
from oauthlib.oauth1.rfc5849 import SIGNATURE_TYPE_QUERY
class Netflix(foauth.providers.OAuth1):
# General info about the provider
provider_url = 'https://www.netflix.com/'
favicon_url = 'https://netflix.hs.llnwd.net/e1/en_US/icons/nficon.ico'
docs_url = 'http://developer.netflix.com/docs'
category = 'Movies/TV'
# URLs to interact with the API
request_token_url = 'http://api-public.netflix.com/oauth/request_token'
authorize_url = 'https://api-user.netflix.com/oauth/login'
access_token_url = 'http://api-public.netflix.com/oauth/access_token'
api_domain = 'api-public.netflix.com'
available_permissions = [
(None, 'read and manage your queue'),
]
https = False
signature_type = SIGNATURE_TYPE_QUERY
def get_authorize_params(self, *args, **kwargs):
params = super(Netflix, self).get_authorize_params(*args, **kwargs)
params['oauth_consumer_key'] = self.client_id
return params
def get_user_id(self, key):
r = self.api(key, self.api_domains[0], u'/users/current',
params={'output': 'json'})
redirect = r.json[u'resource'][u'link'][u'href']
parts = urlparse.urlparse(redirect)
r = self.api(key, parts.netloc, parts.path,
params={'output': 'json'})
return r.json[u'user'][u'user_id']
| Python | 0.000001 |
87cc7ccb8626d9e236954cce0667167f5f9742be | fix prometheus test | restclients_core/tests/test_prometheus.py | restclients_core/tests/test_prometheus.py | from restclients_core.tests.dao_implementation.test_live import TDAO
from unittest import TestCase, skipUnless
from prometheus_client import generate_latest, REGISTRY
import os
@skipUnless("RUN_LIVE_TESTS" in os.environ, "RUN_LIVE_TESTS=1 to run tests")
class TestPrometheusObservations(TestCase):
def test_prometheus_observation(self):
response = TDAO().getURL('/ok', {})
metrics = generate_latest(REGISTRY).decode('utf-8')
self.assertRegexpMatches(
metrics,
r'.*\nrestclient_request_duration_seconds_bucket{le="0.005",'
r'service="backend_test"} [1-9].*', metrics)
self.assertRegexpMatches(
metrics,
r'.*\nrestclient_response_status_code_bucket{le="200.0",'
r'service="backend_test"} [1-9].*', metrics)
self.assertIn('restclient_request_timeout_total counter', metrics)
self.assertIn('restclient_request_ssl_error_total counter', metrics)
| from restclients_core.tests.dao_implementation.test_backend import TDAO
from unittest import TestCase
from prometheus_client import generate_latest, REGISTRY
class TestPrometheusObservations(TestCase):
def test_prometheus_observation(self):
metrics = generate_latest(REGISTRY).decode('utf-8')
self.assertRegexpMatches(
metrics,
r'.*\nrestclient_request_duration_seconds_bucket{le="0.005",'
r'service="backend_test"} [1-9].*', metrics)
self.assertRegexpMatches(
metrics,
r'.*\nrestclient_response_status_code_bucket{le="200.0",'
r'service="backend_test"} [1-9].*', metrics)
self.assertIn('restclient_request_timeout_total counter', metrics)
self.assertIn('restclient_request_ssl_error_total counter', metrics)
| Python | 0.000018 |
2b1cb336168e8ac72a768b03852ebe5a0c02a8a9 | Remove unused import | corehq/apps/users/tests/fixture_status.py | corehq/apps/users/tests/fixture_status.py | from datetime import datetime
from django.test import TestCase
from django.contrib.auth.models import User
from corehq.apps.users.models import CouchUser, CommCareUser
from corehq.apps.domain.models import Domain
from corehq.apps.fixtures.models import UserFixtureStatus, UserFixtureType
class TestFixtureStatus(TestCase):
def setUp(self):
all_users = CouchUser.all()
for user in all_users:
user.delete()
User.objects.all().delete()
self.username = "joe@my-domain.commcarehq.org"
password = "password"
self.domain = Domain(name='my-domain')
self.domain.save()
self.couch_user = CommCareUser.create(self.domain.name, self.username, password)
self.couch_user.save()
def tearDown(self):
UserFixtureStatus.objects.all().delete()
def test_get_statuses(self):
no_status = {UserFixtureType.CHOICES[0][0]: UserFixtureStatus.DEFAULT_LAST_MODIFIED}
self.assertEqual(self.couch_user.get_fixture_statuses(), no_status)
now = datetime.utcnow()
UserFixtureStatus(
user_id=self.couch_user._id,
fixture_type=UserFixtureType.CHOICES[0][0],
last_modified=now,
).save()
expected_status = {UserFixtureType.CHOICES[0][0]: now}
self.assertEqual(self.couch_user.get_fixture_statuses(), expected_status)
def test_get_status(self):
now = datetime.utcnow()
couch_user = CommCareUser.get_by_username(self.username)
UserFixtureStatus(
user_id=self.couch_user._id,
fixture_type=UserFixtureType.CHOICES[0][0],
last_modified=now,
).save()
self.assertEqual(self.couch_user.fixture_status("fake_status"), UserFixtureStatus.DEFAULT_LAST_MODIFIED)
self.assertEqual(couch_user.fixture_status(UserFixtureType.CHOICES[0][0]), now)
def test_update_status_set_location(self):
fake_location = MagicMock()
fake_location._id = "the_depths_of_khazad_dum"
self.assertEqual(UserFixtureStatus.objects.all().count(), 0)
self.couch_user.set_location(fake_location)
self.assertEqual(UserFixtureStatus.objects.all().count(), 1)
user_fixture_status = UserFixtureStatus.objects.first()
self.assertEqual(user_fixture_status.user_id, self.couch_user._id)
self.assertEqual(user_fixture_status.fixture_type, UserFixtureType.LOCATION)
def test_update_status_unset_location(self):
fake_location = MagicMock()
fake_location._id = "the_mines_of_moria"
self.couch_user.set_location(fake_location)
previously_updated_time = UserFixtureStatus.objects.get(user_id=self.couch_user._id).last_modified
self.couch_user.unset_location()
new_updated_time = UserFixtureStatus.objects.get(user_id=self.couch_user._id).last_modified
self.assertTrue(new_updated_time > previously_updated_time)
def test_update_status_reset_location(self):
fake_location = MagicMock()
fake_location._id = "misty_mountains"
self.couch_user.set_location(fake_location)
previously_updated_time = UserFixtureStatus.objects.get(user_id=self.couch_user._id).last_modified
fake_location._id = "lonely_mountain"
self.couch_user.set_location(fake_location)
new_updated_time = UserFixtureStatus.objects.get(user_id=self.couch_user._id).last_modified
self.assertTrue(new_updated_time > previously_updated_time)
| from datetime import datetime
from django.test import TestCase
from mock import MagicMock, patch
from django.contrib.auth.models import User
from corehq.apps.users.models import CouchUser, CommCareUser
from corehq.apps.domain.models import Domain
from corehq.apps.fixtures.models import UserFixtureStatus, UserFixtureType
class TestFixtureStatus(TestCase):
def setUp(self):
all_users = CouchUser.all()
for user in all_users:
user.delete()
User.objects.all().delete()
self.username = "joe@my-domain.commcarehq.org"
password = "password"
self.domain = Domain(name='my-domain')
self.domain.save()
self.couch_user = CommCareUser.create(self.domain.name, self.username, password)
self.couch_user.save()
def tearDown(self):
UserFixtureStatus.objects.all().delete()
def test_get_statuses(self):
no_status = {UserFixtureType.CHOICES[0][0]: UserFixtureStatus.DEFAULT_LAST_MODIFIED}
self.assertEqual(self.couch_user.get_fixture_statuses(), no_status)
now = datetime.utcnow()
UserFixtureStatus(
user_id=self.couch_user._id,
fixture_type=UserFixtureType.CHOICES[0][0],
last_modified=now,
).save()
expected_status = {UserFixtureType.CHOICES[0][0]: now}
self.assertEqual(self.couch_user.get_fixture_statuses(), expected_status)
def test_get_status(self):
now = datetime.utcnow()
couch_user = CommCareUser.get_by_username(self.username)
UserFixtureStatus(
user_id=self.couch_user._id,
fixture_type=UserFixtureType.CHOICES[0][0],
last_modified=now,
).save()
self.assertEqual(self.couch_user.fixture_status("fake_status"), UserFixtureStatus.DEFAULT_LAST_MODIFIED)
self.assertEqual(couch_user.fixture_status(UserFixtureType.CHOICES[0][0]), now)
def test_update_status_set_location(self):
fake_location = MagicMock()
fake_location._id = "the_depths_of_khazad_dum"
self.assertEqual(UserFixtureStatus.objects.all().count(), 0)
self.couch_user.set_location(fake_location)
self.assertEqual(UserFixtureStatus.objects.all().count(), 1)
user_fixture_status = UserFixtureStatus.objects.first()
self.assertEqual(user_fixture_status.user_id, self.couch_user._id)
self.assertEqual(user_fixture_status.fixture_type, UserFixtureType.LOCATION)
def test_update_status_unset_location(self):
fake_location = MagicMock()
fake_location._id = "the_mines_of_moria"
self.couch_user.set_location(fake_location)
previously_updated_time = UserFixtureStatus.objects.get(user_id=self.couch_user._id).last_modified
self.couch_user.unset_location()
new_updated_time = UserFixtureStatus.objects.get(user_id=self.couch_user._id).last_modified
self.assertTrue(new_updated_time > previously_updated_time)
def test_update_status_reset_location(self):
fake_location = MagicMock()
fake_location._id = "misty_mountains"
self.couch_user.set_location(fake_location)
previously_updated_time = UserFixtureStatus.objects.get(user_id=self.couch_user._id).last_modified
fake_location._id = "lonely_mountain"
self.couch_user.set_location(fake_location)
new_updated_time = UserFixtureStatus.objects.get(user_id=self.couch_user._id).last_modified
self.assertTrue(new_updated_time > previously_updated_time)
| Python | 0.000001 |
dcd7b8e990ff0f3d29807be78d7d3967c6ad46c4 | Add trailing newline | gen_tmpfiles.py | gen_tmpfiles.py | #!/usr/bin/python
'''Scan an existing directory tree and record installed directories.
During build a number of directories under /var are created in the stateful
partition. We want to make sure that those are always there so create a record
of them using systemd's tempfiles config format so they are recreated during
boot if they go missing for any reason.
'''
import optparse
import os
import stat
import sys
import pwd
import grp
def main():
keep = set()
parser = optparse.OptionParser(description=__doc__)
parser.add_option('--root', help='Remove root prefix from output')
parser.add_option('--output', help='Write output to the given file')
opts, args = parser.parse_args()
if opts.root:
opts.root = os.path.abspath(opts.root)
for path in args:
path = os.path.abspath(path)
if opts.root:
assert path.startswith(opts.root)
for dirpath, dirnames, filenames in os.walk(path):
if any(f.startswith('.keep') for f in filenames):
keep.add(dirpath)
# Add all parent directories too
for path in frozenset(keep):
split = []
for pathbit in path.split('/'):
split.append(pathbit)
joined = '/'.join(split)
if not joined:
continue
if opts.root and not joined.startswith(opts.root):
continue
if opts.root == joined:
continue
keep.add(joined)
config = []
for path in sorted(keep):
if opts.root:
assert path.startswith(opts.root)
stripped = path[len(opts.root):]
assert len(stripped) > 1
else:
stripped = path
info = os.stat(path)
assert stat.S_ISDIR(info.st_mode)
mode = stat.S_IMODE(info.st_mode)
try:
owner = pwd.getpwuid(info.st_uid).pw_name
except KeyError:
owner = str(info.st_uid)
try:
group = grp.getgrgid(info.st_gid).gr_name
except KeyError:
group = str(info.st_gid)
config.append('d %-22s %04o %-10s %-10s - -'
% (stripped, mode, owner, group))
if opts.output:
fd = open(opts.output, 'w')
fd.write('\n'.join(config)+'\n')
fd.close()
else:
print '\n'.join(config)
if __name__ == '__main__':
main()
| #!/usr/bin/python
'''Scan an existing directory tree and record installed directories.
During build a number of directories under /var are created in the stateful
partition. We want to make sure that those are always there so create a record
of them using systemd's tempfiles config format so they are recreated during
boot if they go missing for any reason.
'''
import optparse
import os
import stat
import sys
import pwd
import grp
def main():
keep = set()
parser = optparse.OptionParser(description=__doc__)
parser.add_option('--root', help='Remove root prefix from output')
parser.add_option('--output', help='Write output to the given file')
opts, args = parser.parse_args()
if opts.root:
opts.root = os.path.abspath(opts.root)
for path in args:
path = os.path.abspath(path)
if opts.root:
assert path.startswith(opts.root)
for dirpath, dirnames, filenames in os.walk(path):
if any(f.startswith('.keep') for f in filenames):
keep.add(dirpath)
# Add all parent directories too
for path in frozenset(keep):
split = []
for pathbit in path.split('/'):
split.append(pathbit)
joined = '/'.join(split)
if not joined:
continue
if opts.root and not joined.startswith(opts.root):
continue
if opts.root == joined:
continue
keep.add(joined)
config = []
for path in sorted(keep):
if opts.root:
assert path.startswith(opts.root)
stripped = path[len(opts.root):]
assert len(stripped) > 1
else:
stripped = path
info = os.stat(path)
assert stat.S_ISDIR(info.st_mode)
mode = stat.S_IMODE(info.st_mode)
try:
owner = pwd.getpwuid(info.st_uid).pw_name
except KeyError:
owner = str(info.st_uid)
try:
group = grp.getgrgid(info.st_gid).gr_name
except KeyError:
group = str(info.st_gid)
config.append('d %-22s %04o %-10s %-10s - -'
% (stripped, mode, owner, group))
if opts.output:
fd = open(opts.output, 'w')
fd.write('\n'.join(config))
fd.close()
else:
print '\n'.join(config)
if __name__ == '__main__':
main()
| Python | 0.000001 |
738cc42fc0ee8e06a53842c27dd4a392403bea49 | remove asgi warning | fiduswriter/manage.py | fiduswriter/manage.py | #!/usr/bin/env python3
import os
import sys
from importlib import import_module
os.environ["DJANGO_ALLOW_ASYNC_UNSAFE"] = "true"
if "COVERAGE_PROCESS_START" in os.environ:
import coverage
coverage.process_startup()
SRC_PATH = os.path.dirname(os.path.realpath(__file__))
os.environ.setdefault("SRC_PATH", SRC_PATH)
def inner(default_project_path):
sys.path.append(SRC_PATH)
sys_argv = sys.argv
PROJECT_PATH = False
while "--pythonpath" in sys_argv:
index = sys_argv.index("--pythonpath")
PROJECT_PATH = os.path.join(os.getcwd(), sys_argv[index + 1])
sys.path.insert(0, PROJECT_PATH)
# We prevent the pythonpath to be handled later on by removing it from
# sys_argv
sys_argv = sys_argv[:index] + sys_argv[index + 2 :]
if not PROJECT_PATH:
PROJECT_PATH = default_project_path
sys.path.insert(0, PROJECT_PATH)
os.environ.setdefault("PROJECT_PATH", PROJECT_PATH)
if "--settings" in sys_argv:
index = sys_argv.index("--settings")
SETTINGS_MODULE = sys_argv[index + 1]
else:
SETTINGS_MODULE = "configuration"
mod = False
# There are three levels of settings, each overiding the previous one:
# global_settings.py, settings.py and configuration.py
from django.conf import global_settings as CONFIGURATION
from base import settings as SETTINGS
SETTINGS_PATHS = [SETTINGS.__file__]
for setting in dir(SETTINGS):
setting_value = getattr(SETTINGS, setting)
setattr(CONFIGURATION, setting, setting_value)
try:
mod = import_module(SETTINGS_MODULE)
except ModuleNotFoundError:
SETTINGS_MODULE = None
if mod:
SETTINGS_PATHS.append(mod.__file__)
for setting in dir(mod):
if setting.isupper():
setattr(CONFIGURATION, setting, getattr(mod, setting))
INSTALLED_APPS = CONFIGURATION.BASE_INSTALLED_APPS + list(
CONFIGURATION.INSTALLED_APPS
)
for app in CONFIGURATION.REMOVED_APPS:
INSTALLED_APPS.remove(app)
from django.conf import settings
settings.configure(
CONFIGURATION,
SETTINGS_MODULE=SETTINGS_MODULE,
SETTINGS_PATHS=SETTINGS_PATHS,
INSTALLED_APPS=INSTALLED_APPS,
MIDDLEWARE=(
CONFIGURATION.BASE_MIDDLEWARE + list(CONFIGURATION.MIDDLEWARE)
),
)
os.environ["TZ"] = settings.TIME_ZONE
if sys_argv[1] in ["version", "--version"]:
from base import get_version
sys.stdout.write(get_version() + "\n")
return
from django.core.management import execute_from_command_line
execute_from_command_line(sys_argv)
def entry():
os.environ.setdefault("NO_COMPILEMESSAGES", "true")
inner(os.getcwd())
if __name__ == "__main__":
inner(SRC_PATH)
| #!/usr/bin/env python3
import os
import sys
from importlib import import_module
if "COVERAGE_PROCESS_START" in os.environ:
import coverage
coverage.process_startup()
SRC_PATH = os.path.dirname(os.path.realpath(__file__))
os.environ.setdefault("SRC_PATH", SRC_PATH)
def inner(default_project_path):
sys.path.append(SRC_PATH)
sys_argv = sys.argv
PROJECT_PATH = False
while "--pythonpath" in sys_argv:
index = sys_argv.index("--pythonpath")
PROJECT_PATH = os.path.join(os.getcwd(), sys_argv[index + 1])
sys.path.insert(0, PROJECT_PATH)
# We prevent the pythonpath to be handled later on by removing it from
# sys_argv
sys_argv = sys_argv[:index] + sys_argv[index + 2 :]
if not PROJECT_PATH:
PROJECT_PATH = default_project_path
sys.path.insert(0, PROJECT_PATH)
os.environ.setdefault("PROJECT_PATH", PROJECT_PATH)
if "--settings" in sys_argv:
index = sys_argv.index("--settings")
SETTINGS_MODULE = sys_argv[index + 1]
else:
SETTINGS_MODULE = "configuration"
mod = False
# There are three levels of settings, each overiding the previous one:
# global_settings.py, settings.py and configuration.py
from django.conf import global_settings as CONFIGURATION
from base import settings as SETTINGS
SETTINGS_PATHS = [SETTINGS.__file__]
for setting in dir(SETTINGS):
setting_value = getattr(SETTINGS, setting)
setattr(CONFIGURATION, setting, setting_value)
try:
mod = import_module(SETTINGS_MODULE)
except ModuleNotFoundError:
SETTINGS_MODULE = None
if mod:
SETTINGS_PATHS.append(mod.__file__)
for setting in dir(mod):
if setting.isupper():
setattr(CONFIGURATION, setting, getattr(mod, setting))
INSTALLED_APPS = CONFIGURATION.BASE_INSTALLED_APPS + list(
CONFIGURATION.INSTALLED_APPS
)
for app in CONFIGURATION.REMOVED_APPS:
INSTALLED_APPS.remove(app)
from django.conf import settings
settings.configure(
CONFIGURATION,
SETTINGS_MODULE=SETTINGS_MODULE,
SETTINGS_PATHS=SETTINGS_PATHS,
INSTALLED_APPS=INSTALLED_APPS,
MIDDLEWARE=(
CONFIGURATION.BASE_MIDDLEWARE + list(CONFIGURATION.MIDDLEWARE)
),
)
os.environ["TZ"] = settings.TIME_ZONE
if sys_argv[1] in ["version", "--version"]:
from base import get_version
sys.stdout.write(get_version() + "\n")
return
from django.core.management import execute_from_command_line
execute_from_command_line(sys_argv)
def entry():
os.environ.setdefault("NO_COMPILEMESSAGES", "true")
inner(os.getcwd())
if __name__ == "__main__":
inner(SRC_PATH)
| Python | 0.000006 |
a363edd761e2c99bae6d4492d0ca44a404e5d904 | Avoid py36 error when printing unicode chars in a stream | neutronclient/tests/unit/test_exceptions.py | neutronclient/tests/unit/test_exceptions.py | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
import mock
from oslo_utils import encodeutils
import six
import testtools
from neutronclient._i18n import _
from neutronclient.common import exceptions
class TestExceptions(testtools.TestCase):
def test_exception_print_with_unicode(self):
class TestException(exceptions.NeutronException):
message = _('Exception with %(reason)s')
multibyte_unicode_string = u'\uff21\uff22\uff23'
e = TestException(reason=multibyte_unicode_string)
with mock.patch.object(sys, 'stdout') as mock_stdout:
print(e)
exc_str = 'Exception with %s' % multibyte_unicode_string
mock_stdout.assert_has_calls([mock.call.write(exc_str)])
def test_exception_message_with_encoded_unicode(self):
class TestException(exceptions.NeutronException):
message = _('Exception with %(reason)s')
multibyte_string = u'\uff21\uff22\uff23'
multibyte_binary = encodeutils.safe_encode(multibyte_string)
e = TestException(reason=multibyte_binary)
self.assertEqual('Exception with %s' % multibyte_string,
six.text_type(e))
| # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
from oslo_utils import encodeutils
import six
import testtools
from neutronclient._i18n import _
from neutronclient.common import exceptions
class TestExceptions(testtools.TestCase):
def test_exception_print_with_unicode(self):
class TestException(exceptions.NeutronException):
message = _('Exception with %(reason)s')
multibyte_unicode_string = u'\uff21\uff22\uff23'
e = TestException(reason=multibyte_unicode_string)
fixture = fixtures.StringStream('stdout')
self.useFixture(fixture)
with fixtures.MonkeyPatch('sys.stdout', fixture.stream):
print(e)
self.assertEqual('Exception with %s' % multibyte_unicode_string,
fixture.getDetails().get('stdout').as_text())
def test_exception_message_with_encoded_unicode(self):
class TestException(exceptions.NeutronException):
message = _('Exception with %(reason)s')
multibyte_string = u'\uff21\uff22\uff23'
multibyte_binary = encodeutils.safe_encode(multibyte_string)
e = TestException(reason=multibyte_binary)
self.assertEqual('Exception with %s' % multibyte_string,
six.text_type(e))
| Python | 0.999789 |
62404979e69ccd129f8382e8a2fce5329c577a78 | fix version | experimentator/__version__.py | experimentator/__version__.py | __version__ = '0.2.0dev5'
| __version__ = '0.2.0.dev5'
| Python | 0.000001 |
b0e3b4fcd3d85989ebe72fa805cfab4576867835 | Fix how node-caffe detects if GPU is supported on a machine | node-caffe/binding.gyp | node-caffe/binding.gyp | {
'targets': [
{
'target_name': 'caffe',
'sources': [
'src/caffe.cpp'
],
'variables': {
'has_gpu': '<!(if which nvidia-smi; then echo true; else echo false; fi)',
},
'include_dirs': [
'<!(node -e "require(\'nan\')")',
'<!(echo $CAFFE_ROOT/include)',
'/usr/local/cuda/include/',
'<!(pwd)/caffe/src/',
'<!(pwd)/caffe/include/',
'<!(pwd)/caffe/build/include/',
],
'libraries': [
'-L<!(echo "$CAFFE_ROOT/lib")',
'-L<!(pwd)/caffe/build/lib',
'<!(if [ "$has_gpu" = true ] && [ -d /usr/local/cuda/lib ]; then echo "-L/usr/local/cuda/lib"; fi)',
'<!(if [ "$has_gpu" = true ] && [ -d /usr/local/cuda/lib64 ]; then echo "-L/usr/local/cuda/lib64"; fi)',
'<!(if [ "$has_gpu" = true ] && [ -d /usr/local/cuda ]; then echo "-lcudart"; fi)',
'-lcaffe',
],
'ldflags': [
'-Wl,-rpath,<!(echo $CAFFE_ROOT/lib)',
'-Wl,-rpath,<!(pwd)/caffe/build/lib',
'<!(if [ "$has_gpu" = true ] && [ -d /usr/local/cuda/lib ]; then echo "-Wl,-rpath,/usr/local/cuda/lib"; fi)',
'<!(if [ "$has_gpu" = true ] && [ -d /usr/local/cuda/lib64 ]; then echo "-Wl,-rpath,/usr/local/cuda/lib64"; fi)',
],
'cflags_cc': [
'-std=c++11',
'-fexceptions',
'-Wno-ignored-qualifiers',
],
'defines': [
'<!(if [ "$has_gpu" = true ]; then echo "HAVE_CUDA"; else echo "CPU_ONLY"; fi)',
],
'conditions': [
['OS=="win"', {
'dependencies': [
]
}],
['OS=="mac"', {
'defines': [
'<!(if [ "$has_gpu" = true ]; then echo "HAVE_CUDA"; else echo "CPU_ONLY"; fi)',
],
'include_dirs': [
'<!(brew --prefix openblas)' + '/include',
'<!(brew --prefix boost 2> /dev/null)' + '/include',
'<!(brew --prefix gflags)' + '/include',
'<!(brew --prefix glog)' + '/include',
'<!(brew --prefix protobuf)' + '/include',
],
'libraries': [
'<!(brew --prefix boost 2> /dev/null)' + '/lib/libopencv_core.dylib',
'-lboost_system',
'-lboost_thread-mt',
],
'xcode_settings': {
'GCC_ENABLE_CPP_EXCEPTIONS': 'YES',
'GCC_ENABLE_CPP_RTTI': 'YES',
'MACOSX_DEPLOYMENT_TARGET': '10.11',
'OTHER_LDFLAGS': [
'-Wl,-rpath,<!(echo $CAFFE_ROOT/lib)',
'-Wl,-rpath,<!(pwd)/caffe/build/lib',
'<!(if [ "$has_gpu" = true ] && [ -d /usr/local/cuda/lib ]; then echo "-Wl,-rpath,/usr/local/cuda/lib"; fi)',
'<!(if [ "$has_gpu" = true ] && [ -d /usr/local/cuda/lib64 ]; then echo "-Wl,-rpath,/usr/local/cuda/lib64"; fi)',
],
},
'libraries': [
],
}]
]
}
]
}
| {
'targets': [
{
'target_name': 'caffe',
'sources': [
'src/caffe.cpp'
],
'include_dirs': [
'<!(node -e "require(\'nan\')")',
'<!(echo $CAFFE_ROOT/include)',
'/usr/local/cuda/include/',
'<!(pwd)/caffe/src/',
'<!(pwd)/caffe/include/',
'<!(pwd)/caffe/build/include/',
],
'libraries': [
'-L<!(echo "$CAFFE_ROOT/lib")',
'-L<!(pwd)/caffe/build/lib',
'<!(if [ -d /usr/local/cuda/lib ]; then echo "-L/usr/local/cuda/lib"; fi)',
'<!(if [ -d /usr/local/cuda/lib64 ]; then echo "-L/usr/local/cuda/lib64"; fi)',
'<!(if [ -d /usr/local/cuda ]; then echo "-lcudart"; fi)',
'-lcaffe',
],
'ldflags': [
'-Wl,-rpath,<!(echo $CAFFE_ROOT/lib)',
'-Wl,-rpath,<!(pwd)/caffe/build/lib',
'<!(if [ -d /usr/local/cuda/lib ]; then echo "-Wl,-rpath,/usr/local/cuda/lib"; fi)',
'<!(if [ -d /usr/local/cuda/lib64 ]; then echo "-Wl,-rpath,/usr/local/cuda/lib64"; fi)',
],
'cflags_cc': [
'-std=c++11',
'-fexceptions',
'-Wno-ignored-qualifiers',
],
'defines': [
'<!(if [ -d /usr/local/cuda ]; then echo "HAVE_CUDA"; else echo "CPU_ONLY"; fi)',
],
'dependencies': [
],
'conditions': [
['OS=="win"', {
'dependencies': [
]
}],
['OS=="mac"', {
'defines': [
'<!(if [ -d /usr/local/cuda ]; then echo "HAVE_CUDA"; else echo "CPU_ONLY"; fi)',
],
'include_dirs': [
'<!(brew --prefix openblas)' + '/include',
'<!(brew --prefix boost 2> /dev/null)' + '/include',
'<!(brew --prefix gflags)' + '/include',
'<!(brew --prefix glog)' + '/include',
'<!(brew --prefix protobuf)' + '/include',
],
'libraries': [
'<!(brew --prefix boost 2> /dev/null)' + '/lib/libopencv_core.dylib',
'-lboost_system',
'-lboost_thread-mt',
],
'xcode_settings': {
'GCC_ENABLE_CPP_EXCEPTIONS': 'YES',
'GCC_ENABLE_CPP_RTTI': 'YES',
'MACOSX_DEPLOYMENT_TARGET': '10.11',
'OTHER_LDFLAGS': [
'-Wl,-rpath,<!(echo $CAFFE_ROOT/lib)',
'-Wl,-rpath,<!(pwd)/caffe/build/lib',
'<!(if [ -d /usr/local/cuda/lib ]; then echo "-Wl,-rpath,/usr/local/cuda/lib"; fi)',
'<!(if [ -d /usr/local/cuda/lib64 ]; then echo "-Wl,-rpath,/usr/local/cuda/lib64"; fi)',
],
'OTHER_CFLAGS': [
]
},
'libraries': [
],
}]
]
}
]
}
| Python | 0.000082 |
3471381b5894536142a9460410e0efe5e8b1b294 | make doc regex more lenient | flask_website/docs.py | flask_website/docs.py | # -*- coding: utf-8 -*-
import os
import re
from flask import url_for, Markup
from flask_website import app
from flask_website.search import Indexable
_doc_body_re = re.compile(r'''(?smx)
<title>(.*?)</title>.*?
<div.*?class=".*?body.*?>(.*)
<div.*?class=".*?sphinxsidebar
''')
class DocumentationPage(Indexable):
search_document_kind = 'documentation'
def __init__(self, slug):
self.slug = slug
fn = os.path.join(app.config['DOCUMENTATION_PATH'],
slug, 'index.html')
with open(fn) as f:
contents = f.read().decode('utf-8')
title, text = _doc_body_re.search(contents).groups()
self.title = Markup(title).striptags().split(u'—')[0].strip()
self.text = Markup(text).striptags().strip().replace(u'¶', u'')
def get_search_document(self):
return dict(
id=unicode(self.slug),
title=self.title,
keywords=[],
content=self.text
)
@property
def url(self):
return url_for('docs.show', page=self.slug)
@classmethod
def describe_search_result(cls, result):
rv = cls(result['id'])
return Markup(result.highlights('content', text=rv.text)) or None
@classmethod
def iter_pages(cls):
base_folder = os.path.abspath(app.config['DOCUMENTATION_PATH'])
for dirpath, dirnames, filenames in os.walk(base_folder):
if 'index.html' in filenames:
slug = dirpath[len(base_folder) + 1:]
# skip the index page. useless
if slug:
yield DocumentationPage(slug)
| # -*- coding: utf-8 -*-
import os
import re
from flask import url_for, Markup
from flask_website import app
from flask_website.search import Indexable
_doc_body_re = re.compile(r'''(?smx)
<title>(.*?)</title>.*?
<div\s+class="body">(.*?)<div\s+class="sphinxsidebar">
''')
class DocumentationPage(Indexable):
search_document_kind = 'documentation'
def __init__(self, slug):
self.slug = slug
fn = os.path.join(app.config['DOCUMENTATION_PATH'],
slug, 'index.html')
with open(fn) as f:
contents = f.read().decode('utf-8')
title, text = _doc_body_re.search(contents).groups()
self.title = Markup(title).striptags().split(u'—')[0].strip()
self.text = Markup(text).striptags().strip().replace(u'¶', u'')
def get_search_document(self):
return dict(
id=unicode(self.slug),
title=self.title,
keywords=[],
content=self.text
)
@property
def url(self):
return url_for('docs.show', page=self.slug)
@classmethod
def describe_search_result(cls, result):
rv = cls(result['id'])
return Markup(result.highlights('content', text=rv.text)) or None
@classmethod
def iter_pages(cls):
base_folder = os.path.abspath(app.config['DOCUMENTATION_PATH'])
for dirpath, dirnames, filenames in os.walk(base_folder):
if 'index.html' in filenames:
slug = dirpath[len(base_folder) + 1:]
# skip the index page. useless
if slug:
yield DocumentationPage(slug)
| Python | 0.000692 |
43403cfca2ca217c03f4aaffdddd7a4d03a74520 | Change string formatting | getMolecules.py | getMolecules.py | #!/usr/bin/env python
# Copyright (c) 2016 Ryan Collins <rcollins@chgr.mgh.harvard.edu>
# Distributed under terms of the MIT license.
"""
Estimate original molecule sizes and coordinates from 10X linked-read WGS barcodes
"""
import argparse
from collections import defaultdict, Counter, namedtuple
import pysam
def get_gemcode_regions(ibam, dist):
"""
Estimates molecule coordinates from colinear reads with overlapping 10X barcodes
Parameters
----------
ibam : pysam.AlignmentFile
Input 10X bam
dist : int
Partitioning distance (bp) for separating reads with overlapping
barcodes into independent fragments
Yields
------
region : namedtuple
chr, start, end, barcode, readcount
"""
#Create namedtuples for storing read coordinate and molecule info
coords = namedtuple('coords', ['chr', 'pos'])
molecule = namedtuple('molecule', ['chr', 'start', 'end', 'barcode', 'readcount'])
#Create defaultdict for storing gemcode tuples
#Key is gemcodes, value is coords namedtuple
gemcodes = defaultdict(list)
#Iterate over reads in bamfile
for read in ibam:
#Save 10X barcode for read as gem
gem = read.get_tag('RX')
#If barcode has been seen previously and new read is either from different
#contig or is colinear but beyond dist, yield old barcode as interval
#before adding new read to list
if gem in gemcodes and (gemcodes[gem][-1].chr != read.reference_name or
read.reference_start - gemcodes[gem][-1].pos > dist):
yield molecule(gemcodes[gem][0].chr,
min([pos for chr, pos in gemcodes[gem]]),
max([pos for chr, pos in gemcodes[gem]]),
gem, len(gemcodes[gem]))
gemcodes[gem] = [coords(read.reference_name, read.reference_start)]
else:
#Else just add read to preexisting dictionary
gemcodes[gem].append(coords(read.reference_name, read.reference_start))
#Write out all remaining molecules at end of bam
for gem in gemcodes:
yield molecule(gemcodes[gem][0].chr, min([pos for chr, pos in gemcodes[gem]]), max([pos for chr, pos in gemcodes[gem]]), gem, len(gemcodes[gem]))
#Run
def main():
#Add arguments
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('ibam', type=pysam.AlignmentFile,
help='Input bam')
parser.add_argument('outfile', help='Output bed file')
parser.add_argument('-d', '--dist', type=int, default=50000,
help='Molecule partitioning distance in bp (default: 50000)')
args = parser.parse_args()
#Open outfile
fout = open(args.outfile, 'w')
#Get gemcode regions
for bed in get_gemcode_regions(args.ibam, args.dist):
#Turn molecule object into string
bed_str = '{0}\t{1}\t{2}\t{3}\t{4}'.format(bed.chr, bed.start, bed.end, bed.barcode, bed.readcount)
#Write to file
fout.write(bed_str + '\n')
#Close outfile
f.close()
if __name__ == '__main__':
main() | #!/usr/bin/env python
# Copyright (c) 2016 Ryan Collins <rcollins@chgr.mgh.harvard.edu>
# Distributed under terms of the MIT license.
"""
Estimate original molecule sizes and coordinates from 10X linked-read WGS barcodes
"""
import argparse
from collections import defaultdict, Counter, namedtuple
import pysam
def get_gemcode_regions(ibam, dist):
"""
Estimates molecule coordinates from colinear reads with overlapping 10X barcodes
Parameters
----------
ibam : pysam.AlignmentFile
Input 10X bam
dist : int
Partitioning distance (bp) for separating reads with overlapping
barcodes into independent fragments
Yields
------
region : namedtuple
chr, start, end, barcode, readcount
"""
#Create namedtuples for storing read coordinate and molecule info
coords = namedtuple('coords', ['chr', 'pos'])
molecule = namedtuple('molecule', ['chr', 'start', 'end', 'barcode', 'readcount'])
#Create defaultdict for storing gemcode tuples
#Key is gemcodes, value is coords namedtuple
gemcodes = defaultdict(list)
#Iterate over reads in bamfile
for read in ibam:
#Save 10X barcode for read as gem
gem = read.get_tag('RX')
#If barcode has been seen previously and new read is either from different
#contig or is colinear but beyond dist, yield old barcode as interval
#before adding new read to list
if gem in gemcodes and (gemcodes[gem][-1].chr != read.reference_name or
read.reference_start - gemcodes[gem][-1].pos > dist):
yield molecule(gemcodes[gem][0].chr,
min([pos for chr, pos in gemcodes[gem]]),
max([pos for chr, pos in gemcodes[gem]]),
gem, len(gemcodes[gem]))
gemcodes[gem] = [coords(read.reference_name, read.reference_start)]
else:
#Else just add read to preexisting dictionary
gemcodes[gem].append(coords(read.reference_name, read.reference_start))
#Write out all remaining molecules at end of bam
for gem in gemcodes:
yield molecule(gemcodes[gem][0].chr, min([pos for chr, pos in gemcodes[gem]]), max([pos for chr, pos in gemcodes[gem]]), gem, len(gemcodes[gem]))
#Run
def main():
#Add arguments
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('ibam', type=pysam.AlignmentFile,
help='Input bam')
parser.add_argument('outfile', help='Output bed file')
parser.add_argument('-d', '--dist', type=int, default=50000,
help='Molecule partitioning distance in bp (default: 50000)')
args = parser.parse_args()
#Open outfile
fout = open(args.outfile, 'w')
#Get gemcode regions
for bed in get_gemcode_regions(args.ibam, args.dist):
#Turn molecule object into string
bed_str = '%s\t%s\t%s\t%s\t%s' % bed.chr, bed.start, bed.end, bed.barcode, bed.readcount
#Write to file
fout.write(bed_str + '\n')
#Close outfile
f.close()
if __name__ == '__main__':
main() | Python | 0.000007 |
98e899d5e76f8bee5c288bffe78ccf943665693c | Fix white space to be 4 spaces | get_cve_data.py | get_cve_data.py | # Script to import CVE data from NIST https://nvd.nist.gov/vuln/data-feeds#JSON_FEED
# https://static.nvd.nist.gov/feeds/json/cve/1.0/nvdcve-1.0-modified.json.gz
import gzip
import json
import pycurl
from io import BytesIO
NVD_CVE_ENDPOINT = 'https://static.nvd.nist.gov/feeds/json/cve/1.0/nvdcve-1.0-modified.json.gz'
DATA_FILE = './data.json.gz'
with open(DATA_FILE, 'wb') as fd:
c = pycurl.Curl()
c.setopt(c.URL, NVD_CVE_ENDPOINT)
c.setopt(c.WRITEDATA, fd)
c.perform()
if c.getinfo(c.RESPONSE_CODE) != 200:
print('data request failed')
exit(1)
# Close Curl
c.close()
# Test: Ensure we received a valid gzip file
# Extract gzip and load JSON to in-memory
with gzip.open(DATA_FILE, 'rt') as fd:
data = json.load(fd)
# Read in-memory JSON structure keys
# CVE_data_version
# CVE_data_format
# CVE_Items
# CVE_data_timestamp
# CVE_data_numberOfCVEs
# CVE_data_type
print('CVE_data_version:', data['CVE_data_version'])
print('CVE_data_format:', data['CVE_data_format'])
print('CVE_data_timestamp:', data['CVE_data_timestamp'])
print('CVE_data_numberOfCVEs:', data['CVE_data_numberOfCVEs'])
print('CVE_data_type:', data['CVE_data_type'])
count = 0
for item in data['CVE_Items']:
if 'cve' in item:
if 'CVE_data_meta' in item['cve']:
if 'ID' in item['cve']['CVE_data_meta']:
#print(item['cve']['CVE_data_meta']['ID'])
count = count + 1
| # Script to import CVE data from NIST https://nvd.nist.gov/vuln/data-feeds#JSON_FEED
# https://static.nvd.nist.gov/feeds/json/cve/1.0/nvdcve-1.0-modified.json.gz
import gzip
import json
import pycurl
from io import BytesIO
NVD_CVE_ENDPOINT = 'https://static.nvd.nist.gov/feeds/json/cve/1.0/nvdcve-1.0-modified.json.gz'
DATA_FILE = './data.json.gz'
with open(DATA_FILE, 'wb') as fd:
c = pycurl.Curl()
c.setopt(c.URL, NVD_CVE_ENDPOINT)
c.setopt(c.WRITEDATA, fd)
c.perform()
if c.getinfo(c.RESPONSE_CODE) != 200:
print('data request failed')
exit(1)
# Close Curl
c.close()
# Test: Ensure we received a valid gzip file
# Extract gzip and load JSON to in-memory
with gzip.open(DATA_FILE, 'rt') as fd:
data = json.load(fd)
# Read in-memory JSON structure keys
# CVE_data_version
# CVE_data_format
# CVE_Items
# CVE_data_timestamp
# CVE_data_numberOfCVEs
# CVE_data_type
print('CVE_data_version:', data['CVE_data_version'])
print('CVE_data_format:', data['CVE_data_format'])
print('CVE_data_timestamp:', data['CVE_data_timestamp'])
print('CVE_data_numberOfCVEs:', data['CVE_data_numberOfCVEs'])
print('CVE_data_type:', data['CVE_data_type'])
count = 0
for item in data['CVE_Items']:
if 'cve' in item:
if 'CVE_data_meta' in item['cve']:
if 'ID' in item['cve']['CVE_data_meta']:
#print(item['cve']['CVE_data_meta']['ID'])
count = count + 1
| Python | 0.999999 |
5e3a4c84d31fe63a67fdb2e64c5edc1ffe8d24ab | Tweak return message | flexget/api/cached.py | flexget/api/cached.py | from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
from flask.helpers import send_file
from flask_restplus import inputs
from flexget.api import api, APIResource, APIError, BadRequest
from flexget.utils.tools import cached_resource
from requests import RequestException
cached_api = api.namespace('cached', description='Cache remote resources')
cached_parser = api.parser()
cached_parser.add_argument('url', required=True, help='URL to cache')
cached_parser.add_argument('force', type=inputs.boolean, default=False, help='Force fetching remote resource')
@cached_api.route('/')
@api.doc(description='Returns a cached copy of the requested resource, matching its mime type')
class CachedResource(APIResource):
@api.response(200, description='Cached resource')
@api.response(BadRequest)
@api.response(APIError)
@api.doc(parser=cached_parser)
def get(self, session=None):
""" Cache remote resources """
args = cached_parser.parse_args()
url = args.get('url')
force = args.get('force')
try:
file_path, mime_type = cached_resource(url, force)
except RequestException as e:
raise BadRequest('Request Error: {}'.format(e.args[0]))
except OSError as e:
raise APIError('Error: {}'.format(e.args[0]))
return send_file(file_path, mimetype=mime_type)
| from __future__ import unicode_literals, division, absolute_import
from builtins import * # pylint: disable=unused-import, redefined-builtin
from flask.helpers import send_file
from flask_restplus import inputs
from flexget.api import api, APIResource, APIError, BadRequest
from flexget.utils.tools import cached_resource
from requests import RequestException
cached_api = api.namespace('cached', description='Cache remote resources')
cached_parser = api.parser()
cached_parser.add_argument('url', required=True, help='URL to cache')
cached_parser.add_argument('force', type=inputs.boolean, default=False, help='Force fetching remote resource')
@cached_api.route('/')
@api.doc(description='Returns a cached copy of the requested resource, matching its mime type')
class CachedResource(APIResource):
@api.response(200, description='Return file')
@api.response(BadRequest)
@api.response(APIError)
@api.doc(parser=cached_parser)
def get(self, session=None):
""" Cache remote resources """
args = cached_parser.parse_args()
url = args.get('url')
force = args.get('force')
try:
file_path, mime_type = cached_resource(url, force)
except RequestException as e:
raise BadRequest('Request Error: {}'.format(e.args[0]))
except OSError as e:
raise APIError('Error: {}'.format(e.args[0]))
return send_file(file_path, mimetype=mime_type)
| Python | 0 |
16e38be365c939f7e74fb321280e88370606a6c3 | Fix daemonizing on python 3.4+ | flexget/task_queue.py | flexget/task_queue.py | from __future__ import unicode_literals, division, absolute_import
from builtins import *
import logging
import queue
import sys
import threading
import time
from sqlalchemy.exc import ProgrammingError, OperationalError
from flexget.task import TaskAbort
log = logging.getLogger('task_queue')
class TaskQueue(object):
"""
Task processing thread.
Only executes one task at a time, if more are requested they are queued up and run in turn.
"""
def __init__(self):
self.run_queue = queue.PriorityQueue()
self._shutdown_now = False
self._shutdown_when_finished = False
self.current_task = None
# We don't override `threading.Thread` because debugging this seems unsafe with pydevd.
# Overriding __len__(self) seems to cause a debugger deadlock.
self._thread = threading.Thread(target=self.run, name='task_queue')
self._thread.daemon = True
def start(self):
self._thread.start()
def run(self):
while not self._shutdown_now:
# Grab the first job from the run queue and do it
try:
self.current_task = self.run_queue.get(timeout=0.5)
except queue.Empty:
if self._shutdown_when_finished:
self._shutdown_now = True
continue
try:
self.current_task.execute()
except TaskAbort as e:
log.debug('task %s aborted: %r' % (self.current_task.name, e))
except (ProgrammingError, OperationalError):
log.critical('Database error while running a task. Attempting to recover.')
self.current_task.manager.crash_report()
except Exception:
log.critical('BUG: Unhandled exception during task queue run loop.')
self.current_task.manager.crash_report()
finally:
self.run_queue.task_done()
self.current_task = None
remaining_jobs = self.run_queue.qsize()
if remaining_jobs:
log.warning('task queue shut down with %s tasks remaining in the queue to run.' % remaining_jobs)
else:
log.debug('task queue shut down')
def is_alive(self):
return self._thread.is_alive()
def put(self, task):
"""Adds a task to be executed to the queue."""
self.run_queue.put(task)
def __len__(self):
return self.run_queue.qsize()
def shutdown(self, finish_queue=True):
"""
Request shutdown.
:param bool finish_queue: Should all tasks be finished before ending thread.
"""
log.debug('task queue shutdown requested')
if finish_queue:
self._shutdown_when_finished = True
if self.run_queue.qsize():
log.verbose('There are %s tasks to execute. Shutdown will commence when they have completed.' %
self.run_queue.qsize())
else:
self._shutdown_now = True
def wait(self):
"""
Waits for the thread to exit.
Allows abortion of task queue with ctrl-c
"""
if sys.version_info >= (3, 4):
# Due to python bug, Thread.is_alive doesn't seem to work properly under our conditions on python 3.4+
# http://bugs.python.org/issue26793
# TODO: Is it important to have the clean abortion? Do we need to find a better way?
self._thread.join()
return
try:
while self._thread.is_alive():
time.sleep(0.5)
except KeyboardInterrupt:
log.error('Got ctrl-c, shutting down after running task (if any) completes')
self.shutdown(finish_queue=False)
# We still wait to finish cleanly, pressing ctrl-c again will abort
while self._thread.is_alive():
time.sleep(0.5)
| from __future__ import unicode_literals, division, absolute_import
from builtins import *
import logging
import queue
import threading
import time
from sqlalchemy.exc import ProgrammingError, OperationalError
from flexget.task import TaskAbort
log = logging.getLogger('task_queue')
class TaskQueue(object):
"""
Task processing thread.
Only executes one task at a time, if more are requested they are queued up and run in turn.
"""
def __init__(self):
self.run_queue = queue.PriorityQueue()
self._shutdown_now = False
self._shutdown_when_finished = False
self.current_task = None
# We don't override `threading.Thread` because debugging this seems unsafe with pydevd.
# Overriding __len__(self) seems to cause a debugger deadlock.
self._thread = threading.Thread(target=self.run, name='task_queue')
self._thread.daemon = True
def start(self):
self._thread.start()
def run(self):
while not self._shutdown_now:
# Grab the first job from the run queue and do it
try:
self.current_task = self.run_queue.get(timeout=0.5)
except queue.Empty:
if self._shutdown_when_finished:
self._shutdown_now = True
continue
try:
self.current_task.execute()
except TaskAbort as e:
log.debug('task %s aborted: %r' % (self.current_task.name, e))
except (ProgrammingError, OperationalError):
log.critical('Database error while running a task. Attempting to recover.')
self.current_task.manager.crash_report()
except Exception:
log.critical('BUG: Unhandled exception during task queue run loop.')
self.current_task.manager.crash_report()
finally:
self.run_queue.task_done()
self.current_task = None
remaining_jobs = self.run_queue.qsize()
if remaining_jobs:
log.warning('task queue shut down with %s tasks remaining in the queue to run.' % remaining_jobs)
else:
log.debug('task queue shut down')
def is_alive(self):
return self._thread.is_alive()
def put(self, task):
"""Adds a task to be executed to the queue."""
self.run_queue.put(task)
def __len__(self):
return self.run_queue.qsize()
def shutdown(self, finish_queue=True):
"""
Request shutdown.
:param bool finish_queue: Should all tasks be finished before ending thread.
"""
log.debug('task queue shutdown requested')
if finish_queue:
self._shutdown_when_finished = True
if self.run_queue.qsize():
log.verbose('There are %s tasks to execute. Shutdown will commence when they have completed.' %
self.run_queue.qsize())
else:
self._shutdown_now = True
def wait(self):
"""
Waits for the thread to exit.
Allows abortion of task queue with ctrl-c
"""
try:
while self._thread.is_alive():
time.sleep(0.5)
except KeyboardInterrupt:
log.error('Got ctrl-c, shutting down after running task (if any) completes')
self.shutdown(finish_queue=False)
# We still wait to finish cleanly, pressing ctrl-c again will abort
while self._thread.is_alive():
time.sleep(0.5)
| Python | 0 |
9df0c21bcefdeda4ca65ee4126ed965a6d099416 | Fix line length on import statement | website/website/wagtail_hooks.py | website/website/wagtail_hooks.py | from wagtail.contrib.modeladmin.options import ModelAdmin,\
modeladmin_register, ThumbnailMixin
from website.models import Logo
class LogoAdmin(ThumbnailMixin, ModelAdmin):
model = Logo
menu_icon = 'picture'
menu_order = 1000
list_display = ('admin_thumb', 'category', 'link')
add_to_settings_menu = True
thumb_image_field_name = 'logo'
thumb_image_filter_spec = 'fill-128x128'
modeladmin_register(LogoAdmin)
| from wagtail.contrib.modeladmin.options import ModelAdmin, modeladmin_register, \
ThumbnailMixin
from website.models import Logo
class LogoAdmin(ThumbnailMixin, ModelAdmin):
model = Logo
menu_icon = 'picture'
menu_order = 1000
list_display = ('admin_thumb', 'category', 'link')
add_to_settings_menu = True
thumb_image_field_name = 'logo'
thumb_image_filter_spec = 'fill-128x128'
modeladmin_register(LogoAdmin)
| Python | 0.00979 |
d4d69ed62cbd726c92de9382136175e0fbdbb8af | Remove super-stale file paths | opencog/nlp/anaphora/agents/testingAgent.py | opencog/nlp/anaphora/agents/testingAgent.py |
from __future__ import print_function
from pprint import pprint
# from pln.examples.deduction import deduction_agent
from opencog.atomspace import types, AtomSpace, TruthValue
from agents.hobbs import HobbsAgent
from agents.dumpAgent import dumpAgent
from opencog.scheme_wrapper import load_scm,scheme_eval_h, scheme_eval, __init__
__author__ = 'Hujie Wang'
'''
This agent is purely for testing purposes, which can be used to test hobbsAgent in a standalone atomspace environment.
'''
atomspace = AtomSpace()
# __init__(atomspace)
scheme_eval(atomspace, "(use-modules (opencog) (opencog exec))")
scheme_eval(atomspace, "(use-modules (opencog nlp))")
#status2 = load_scm(atomspace, "opencog/nlp/anaphora/tests/atomspace.scm")
#init=initAgent()
#init.run(atomspace)
dump=dumpAgent()
dump.run(atomspace)
hobbsAgent = HobbsAgent()
hobbsAgent.run(atomspace)
|
from __future__ import print_function
from pprint import pprint
# from pln.examples.deduction import deduction_agent
from opencog.atomspace import types, AtomSpace, TruthValue
from agents.hobbs import HobbsAgent
from agents.dumpAgent import dumpAgent
from opencog.scheme_wrapper import load_scm,scheme_eval_h, scheme_eval, __init__
__author__ = 'Hujie Wang'
'''
This agent is purely for testing purposes, which can be used to test hobbsAgent in a standalone atomspace environment.
'''
atomspace = AtomSpace()
# __init__(atomspace)
scheme_eval(atomspace, "(use-modules (opencog) (opencog exec))")
scheme_eval(atomspace, "(use-modules (opencog nlp))")
data=["opencog/scm/config.scm",
"opencog/scm/core_types.scm",
"spacetime/spacetime_types.scm",
"opencog/nlp/types/nlp_types.scm",
"opencog/attention/attention_types.scm",
"opencog/embodiment/embodiment_types.scm",
"opencog/scm/apply.scm",
"opencog/scm/file-utils.scm",
"opencog/scm/persistence.scm",
#"opencog/scm/repl-shell.scm",
"opencog/scm/utilities.scm",
"opencog/scm/av-tv.scm",
"opencog/nlp/scm/type-definitions.scm",
"opencog/nlp/scm/config.scm",
"opencog/nlp/scm/file-utils.scm",
"opencog/nlp/scm/nlp-utils.scm",
"opencog/nlp/scm/disjunct-list.scm",
"opencog/nlp/scm/processing-utils.scm",
"opencog/nlp/anaphora/tests/atomspace.log"
]
#status2 = load_scm(atomspace, "opencog/nlp/anaphora/tests/atomspace.scm")
# for item in data:
# load_scm(atomspace, item)
#init=initAgent()
#init.run(atomspace)
dump=dumpAgent()
dump.run(atomspace)
hobbsAgent = HobbsAgent()
hobbsAgent.run(atomspace)
| Python | 0.000002 |
f585a7cdf4ecbecfe240873a3b4b8e7d4376e69c | Move fallback URL pattern to the end of the list so it gets matched last. | campus02/urls.py | campus02/urls.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from django.views.static import serve
urlpatterns = [
url(r'^', include('django.contrib.auth.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^web/', include('campus02.web.urls', namespace='web')),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns.append(
url(
r'^__debug__/',
include(debug_toolbar.urls)
)
)
urlpatterns.append(
url(
r'^media/(?P<path>.*)$',
serve,
{
'document_root': settings.MEDIA_ROOT,
}
)
)
urlpatterns.append(
url(
r'^static/(?P<path>.*)$',
serve,
{
'document_root': settings.STATIC_ROOT,
}
),
)
urlpatterns.append(
url(r'^', include('campus02.base.urls', namespace='base'))
)
| #!/usr/bin/python
# -*- coding: utf-8 -*-
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from django.views.static import serve
urlpatterns = [
url(r'^', include('django.contrib.auth.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^web/', include('campus02.web.urls', namespace='web')),
url(r'^', include('campus02.base.urls', namespace='base')),
]
if settings.DEBUG:
import debug_toolbar
urlpatterns.append(
url(
r'^__debug__/',
include(debug_toolbar.urls)
)
)
urlpatterns.append(
url(
r'^media/(?P<path>.*)$',
serve,
{
'document_root': settings.MEDIA_ROOT,
}
)
)
urlpatterns.append(
url(
r'^static/(?P<path>.*)$',
serve,
{
'document_root': settings.STATIC_ROOT,
}
),
)
| Python | 0 |
c05a93d8993e14fc8521dd01a13a930e85fcb882 | fix code style in `test_db_util` (#9594) | datadog_checks_base/tests/test_db_util.py | datadog_checks_base/tests/test_db_util.py | # -*- coding: utf-8 -*-
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import time
from datadog_checks.base.utils.db.utils import ConstantRateLimiter, RateLimitingTTLCache
def test_constant_rate_limiter():
rate_limit = 8
test_duration_s = 0.5
ratelimiter = ConstantRateLimiter(rate_limit)
start = time.time()
sleep_count = 0
while time.time() - start < test_duration_s:
ratelimiter.sleep()
sleep_count += 1
max_expected_count = rate_limit * test_duration_s
assert max_expected_count - 1 <= sleep_count <= max_expected_count + 1
def test_ratelimiting_ttl_cache():
ttl = 0.1
cache = RateLimitingTTLCache(maxsize=5, ttl=ttl)
for i in range(5):
assert cache.acquire(i), "cache is empty so the first set of keys should pass"
for i in range(5, 10):
assert not cache.acquire(i), "cache is full so we do not expect any more new keys to pass"
for i in range(5):
assert not cache.acquire(i), "none of the first set of keys should pass because they're still under TTL"
assert len(cache) == 5, "cache should be at the max size"
time.sleep(ttl * 2)
assert len(cache) == 0, "cache should be empty after the TTL has kicked in"
for i in range(5, 10):
assert cache.acquire(i), "cache should be empty again so these keys should go in OK"
| # -*- coding: utf-8 -*-
# (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import math
import time
from datadog_checks.base.utils.db.utils import RateLimitingTTLCache, ConstantRateLimiter
def test_constant_rate_limiter():
rate_limit = 8
test_duration_s = 0.5
ratelimiter = ConstantRateLimiter(rate_limit)
start = time.time()
sleep_count = 0
while time.time() - start < test_duration_s:
ratelimiter.sleep()
sleep_count += 1
max_expected_count = rate_limit * test_duration_s
assert max_expected_count - 1 <= sleep_count <= max_expected_count + 1
def test_ratelimiting_ttl_cache():
ttl = 0.1
cache = RateLimitingTTLCache(maxsize=5, ttl=ttl)
for i in range(5):
assert cache.acquire(i), "cache is empty so the first set of keys should pass"
for i in range(5, 10):
assert not cache.acquire(i), "cache is full so we do not expect any more new keys to pass"
for i in range(5):
assert not cache.acquire(i), "none of the first set of keys should pass because they're still under TTL"
assert len(cache) == 5, "cache should be at the max size"
time.sleep(ttl * 2)
assert len(cache) == 0, "cache should be empty after the TTL has kicked in"
for i in range(5, 10):
assert cache.acquire(i), "cache should be empty again so these keys should go in OK"
| Python | 0 |
a9a9b60d9994bc4780e74ddd01df833a85247b95 | Change state serialiser to pass hex color values. | suddendev/game/state_encoder.py | suddendev/game/state_encoder.py | #!/usr/bin/python3
import json
def encodeState(game):
return json.dumps(game, cls=StateEncoder)
def clamp(x):
return max(0, min(x, 255))
class StateEncoder(json.JSONEncoder):
def default(self, o):
return self.serializeState(o)
def serializeState(self, state):
return {
'players': self.serializePlayers(state.players),
'enemies': self.serializeEnemies(state.enemies),
'powerups': self.serializePowerups(state.powerups),
'core': self.serializeCore(state.core)
}
def serializeEntity(self, entity):
return {
'tag': entity.tag,
'pos': self.serializeVector(entity.pos),
'vel': self.serializeVector(entity.vel),
'size': entity.size,
'healthMax': entity.healthMax,
'health': entity.health
}
def serializePlayers(self, players):
result = []
for p in players:
json = self.serializeEntity(p)
json['name'] = p.name;
#json['ammo'] = p.ammo;
json['color'] = self.serializeColor(p.color);
result.append(json)
return result
def serializeVector(self, pos):
return {'x' : pos.x, 'y': pos.y}
def serializeColor(self, color):
return "#{0:02x}{1:02x}{2:02x}".format(clamp(color.r), clamp(color.g), clamp(color.b))
#TODO
#Duplication, will extend if enemies or powerups start to differ
def serializeEnemies(self, enemies):
result = []
for e in enemies:
json = self.serializeEntity(e)
result.append(json)
return result
def serializePowerups(self, powerups):
result = []
for p in powerups:
json = self.serializeEntity(p)
result.append(json)
return result
def serializeCore(self, core):
return self.serializeEntity(core)
| #!/usr/bin/python3
import json
def encodeState(game):
return json.dumps(game, cls=StateEncoder)
class StateEncoder(json.JSONEncoder):
def default(self, o):
return self.serializeState(o)
def serializeState(self, state):
return {
'players': self.serializePlayers(state.players),
'enemies': self.serializeEnemies(state.enemies),
'powerups': self.serializePowerups(state.powerups),
'core': self.serializeCore(state.core)
}
def serializeEntity(self, entity):
return {
'tag': entity.tag,
'pos': self.serializeVector(entity.pos),
'vel': self.serializeVector(entity.vel),
'size': entity.size,
'healthMax': entity.healthMax,
'health': entity.health
}
def serializePlayers(self, players):
result = []
for p in players:
json = self.serializeEntity(p)
json['name'] = p.name;
#json['ammo'] = p.ammo;
json['color'] = self.serializeColor(p.color);
result.append(json)
return result
def serializeVector(self, pos):
return {'x' : pos.x, 'y': pos.y}
def serializeColor(self, color):
return {
'r': color.r,
'g': color.g,
'b': color.b
}
#TODO
#Duplication, will extend if enemies or powerups start to differ
def serializeEnemies(self, enemies):
result = []
for e in enemies:
json = self.serializeEntity(e)
result.append(json)
return result
def serializePowerups(self, powerups):
result = []
for p in powerups:
json = self.serializeEntity(p)
result.append(json)
return result
def serializeCore(self, core):
return self.serializeEntity(core)
| Python | 0 |
3e5c500eed84ca24ffc31e802e061833a5a43225 | use unicode names in test | suvit_system/tests/test_node.py | suvit_system/tests/test_node.py | # -*- coding: utf-8 -*-
from openerp.tests.common import TransactionCase
class TestNode(TransactionCase):
def test_node_crud(self):
Node = self.env['suvit.system.node']
root = Node.create({'name': u'Root1'})
ch1 = Node.create({'name': u'Ch1', 'parent_id': root.id})
ch2 = Node.create({'name': u'Ch3', 'parent_id': root.id})
ch3 = Node.create({'name': u'Ch2', 'parent_id': root.id})
ch2.write({'name': u'Ch3-last', 'sequence': 1000})
self.assertEqual(len(root.child_ids), 3)
self.assertEqual(root.child_ids.mapped('name'), [u'Ch1', u'Ch2', u'Ch3-last'])
ch3.unlink()
# create duplicate to ch2
ch2.action_duplicate()
# add children for ch2
ch2_1 = Node.create({'name': u'Ch2-1', 'parent_id': ch2.id})
ch2_2 = Node.create({'name': u'Ch2-2', 'parent_id': ch2.id})
| # -*- coding: utf-8 -*-
from openerp.tests.common import TransactionCase
class TestNode(TransactionCase):
def test_node_crud(self):
Node = self.env['suvit.system.node']
root = Node.create({'name': 'Root1'})
ch1 = Node.create({'name': 'Ch1', 'parent_id': root.id})
ch2 = Node.create({'name': 'Ch3', 'parent_id': root.id})
ch3 = Node.create({'name': 'Ch2', 'parent_id': root.id})
ch2.write({'name': 'Ch3-last', 'sequence': 1000})
self.assertEqual(len(root.child_ids), 3)
self.assertEqual(root.child_ids.mapped('name'), ['Ch1', 'Ch2', 'Ch3-last'])
ch3.unlink()
# create duplicate to ch2
ch2.action_duplicate()
# add children for ch2
ch2_1 = Node.create({'name': 'Ch2-1', 'parent_id': ch2.id})
ch2_2 = Node.create({'name': 'Ch2-2', 'parent_id': ch2.id})
| Python | 0.000026 |
c300e70bb65da3678f27c4ba01b22f9a3d4fc717 | Use spi_serial for tools | tools/serial_rf_spy.py | tools/serial_rf_spy.py | #!/usr/bin/env python
import os
import serial
import time
class SerialRfSpy:
CMD_GET_STATE = 1
CMD_GET_VERSION = 2
CMD_GET_PACKET = 3
CMD_SEND_PACKET = 4
CMD_SEND_AND_LISTEN = 5
CMD_UPDATE_REGISTER = 6
CMD_RESET = 7
def __init__(self, serial_port, rtscts=None):
if not rtscts:
rtscts = int(os.getenv('RFSPY_RTSCTS', 1))
if serial_port.find('spi') >= 0:
import spi_serial
self.ser = spi_serial.SpiSerial()
else:
self.ser = serial.Serial(serial_port, 19200, rtscts=rtscts, timeout=1)
self.buf = bytearray()
def do_command(self, command, param=""):
self.send_command(command, param)
return self.get_response()
def send_command(self, command, param=""):
self.ser.write(chr(command))
if len(param) > 0:
self.ser.write(param)
def get_response(self, timeout=0):
start = time.time()
while 1:
bytesToRead = self.ser.inWaiting()
if bytesToRead > 0:
self.buf.extend(self.ser.read(bytesToRead))
eop = self.buf.find(b'\x00',0)
if eop >= 0:
r = self.buf[:eop]
del self.buf[:(eop+1)]
return r
if (timeout > 0) and (start + timeout < time.time()):
return bytearray()
time.sleep(0.005)
def sync(self):
while 1:
self.send_command(self.CMD_GET_STATE)
data = self.get_response(1)
if data == "OK":
print "RileyLink " + data
break
print "retry", len(data), str(data).encode('hex')
while 1:
self.send_command(self.CMD_GET_VERSION)
data = self.get_response(1)
if len(data) >= 3:
print "Version: " + data
break
print "retry", len(data), str(data).encode('hex')
| #!/usr/bin/env python
import os
import serial
import time
class SerialRfSpy:
CMD_GET_STATE = 1
CMD_GET_VERSION = 2
CMD_GET_PACKET = 3
CMD_SEND_PACKET = 4
CMD_SEND_AND_LISTEN = 5
CMD_UPDATE_REGISTER = 6
CMD_RESET = 7
def __init__(self, serial_port, rtscts=None):
if not rtscts:
rtscts = int(os.getenv('RFSPY_RTSCTS', 1))
self.ser = serial.Serial(serial_port, 19200, rtscts=rtscts, timeout=1)
self.buf = bytearray()
def do_command(self, command, param=""):
self.send_command(command, param)
return self.get_response()
def send_command(self, command, param=""):
self.ser.write(chr(command))
if len(param) > 0:
self.ser.write(param)
def get_response(self, timeout=0):
start = time.time()
while 1:
bytesToRead = self.ser.inWaiting()
if bytesToRead > 0:
self.buf.extend(self.ser.read(bytesToRead))
eop = self.buf.find(b'\x00',0)
if eop >= 0:
r = self.buf[:eop]
del self.buf[:(eop+1)]
return r
if (timeout > 0) and (start + timeout < time.time()):
return bytearray()
time.sleep(0.005)
def sync(self):
while 1:
self.send_command(self.CMD_GET_STATE)
data = self.get_response(1)
if data == "OK":
print "RileyLink " + data
break
print "retry", len(data), str(data).encode('hex')
while 1:
self.send_command(self.CMD_GET_VERSION)
data = self.get_response(1)
if len(data) >= 3:
print "Version: " + data
break
print "retry", len(data), str(data).encode('hex')
| Python | 0.000001 |
da3f842107e5f8062013b7a6412da2cb18592f5f | make the dt statement into an assertion | examples/basic_example.py | examples/basic_example.py | """
Basic Example
=============
Here we demonstrate both the coordinate-based slicing notatation, which is
unique to pySpecData,
as well the way in which the axis coordinates for a Fourier transform are
handled automatically.
The case considered here is that of an NMR FID that has been acquired with a
wider spectral width than the signal of interest, and with a longer acquisition
time.
"""
from pylab import *
from pyspecdata import *
fig, (ax_time, ax_freq) = subplots(2,1)
t = nddata(r_[0:0.5:1e-3], 't2') # 1 kHz SW
fake_data = exp(1j*2*pi*100*t-10*t*pi) # 10 Hz wide line at 100 Hz offset
fake_data.add_noise(0.3).set_units('t2','s')
plot(fake_data, ax=ax_time, alpha=0.2, label='raw data')
ax_freq.set_title("time domain")
fake_data.ft('t2', shift=True)
assert fake_data.get_ft_prop('t2','dt') == 1e-3
print("note that the original dwell time is",fake_data.get_ft_prop('t2','dt'),
"and the original frequency resolution is",fake_data.get_ft_prop('t2','df'))
plot(fake_data, ax=ax_freq, alpha=0.2, label='raw data')
ax_time.set_title("frequency domain")
fig.tight_layout()
fake_data = fake_data['t2':(-200,200)] # filter the data in the frequency domain by slicing from -200 to 200 Hz
plot(fake_data, '--', ax=ax_freq, alpha=0.2, label='after frequency slice')
fake_data.ift('t2') # now, move back into the time domain, where it will have less noise, and with less points
plot(fake_data, ax=ax_time, alpha=0.5, label='after frequency slice')
# in the time domain, we can either slice simply:
truncated_data = fake_data['t2':(0,0.2)]
plot(truncated_data, ax=ax_time, alpha=0.5, label='after time slice')
# or we can implement a matched filter:
fake_data *= 2*exp(-10*pi*fake_data.fromaxis('t2'))
plot(fake_data, ax=ax_time, alpha=0.5, label='apodized')
truncated_data.ft('t2') # note that after we ft the first time, it "remembers"
# whether or not we have frequency shifted, and will
# use the existing start point in the frequency domain
fake_data.ft('t2')
plot(truncated_data, ax=ax_freq, alpha=0.5, label='after time slice')
plot(fake_data, ax=ax_freq, alpha=0.5, label='after apodization')
print("note that the new dwell time of the truncated data is",truncated_data.get_ft_prop('t2','dt'),
"and the new frequency resolution is",truncated_data.get_ft_prop('t2','df'))
# finally, show that we can zero fill
truncated_data.ift('t2')
truncated_data.ft('t2', pad=256)
plot(truncated_data, ax=ax_freq, alpha=0.5, label='after time slice and zero filling')
truncated_data.ift('t2')
plot(truncated_data, '--', ax=ax_time, alpha=0.5, label='after time slice and zero filling')
ax_time.legend(**dict(bbox_to_anchor=(1.05,1), loc=2, borderaxespad=0.))
ax_freq.legend(**dict(bbox_to_anchor=(1.05,1), loc=2, borderaxespad=0.))
fig.tight_layout()
show()
| """
Basic Example
=============
Here we demonstrate both the coordinate-based slicing notatation, which is
unique to pySpecData,
as well the way in which the axis coordinates for a Fourier transform are
handled automatically.
The case considered here is that of an NMR FID that has been acquired with a
wider spectral width than the signal of interest, and with a longer acquisition
time.
"""
from pylab import *
from pyspecdata import *
fig, (ax_time, ax_freq) = subplots(2,1)
t = nddata(r_[0:0.5:1e-3], 't2') # 1 kHz SW
fake_data = exp(1j*2*pi*100*t-10*t*pi) # 10 Hz wide line at 100 Hz offset
fake_data.add_noise(0.3).set_units('t2','s')
plot(fake_data, ax=ax_time, alpha=0.2, label='raw data')
ax_freq.set_title("time domain")
fake_data.ft('t2', shift=True)
print("note that the original dwell time is",fake_data.get_ft_prop('t2','dt'),
"and the original frequency resolution is",fake_data.get_ft_prop('t2','df'))
plot(fake_data, ax=ax_freq, alpha=0.2, label='raw data')
ax_time.set_title("frequency domain")
fig.tight_layout()
fake_data = fake_data['t2':(-200,200)] # filter the data in the frequency domain by slicing from -200 to 200 Hz
plot(fake_data, '--', ax=ax_freq, alpha=0.2, label='after frequency slice')
fake_data.ift('t2') # now, move back into the time domain, where it will have less noise, and with less points
plot(fake_data, ax=ax_time, alpha=0.5, label='after frequency slice')
# in the time domain, we can either slice simply:
truncated_data = fake_data['t2':(0,0.2)]
plot(truncated_data, ax=ax_time, alpha=0.5, label='after time slice')
# or we can implement a matched filter:
fake_data *= 2*exp(-10*pi*fake_data.fromaxis('t2'))
plot(fake_data, ax=ax_time, alpha=0.5, label='apodized')
truncated_data.ft('t2') # note that after we ft the first time, it "remembers"
# whether or not we have frequency shifted, and will
# use the existing start point in the frequency domain
fake_data.ft('t2')
plot(truncated_data, ax=ax_freq, alpha=0.5, label='after time slice')
plot(fake_data, ax=ax_freq, alpha=0.5, label='after apodization')
print("note that the new dwell time of the truncated data is",truncated_data.get_ft_prop('t2','dt'),
"and the new frequency resolution is",truncated_data.get_ft_prop('t2','df'))
# finally, show that we can zero fill
truncated_data.ift('t2')
truncated_data.ft('t2', pad=256)
plot(truncated_data, ax=ax_freq, alpha=0.5, label='after time slice and zero filling')
truncated_data.ift('t2')
plot(truncated_data, '--', ax=ax_time, alpha=0.5, label='after time slice and zero filling')
ax_time.legend(**dict(bbox_to_anchor=(1.05,1), loc=2, borderaxespad=0.))
ax_freq.legend(**dict(bbox_to_anchor=(1.05,1), loc=2, borderaxespad=0.))
fig.tight_layout()
show()
| Python | 0.999999 |
73029223f54dc6c793c190e70a135d0bd3faa50b | Update download script | download_corpora.py | download_corpora.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Downloads the necessary NLTK models and corpora.'''
from text.packages import nltk
REQUIRED_CORPORA = [
'brown',
'punkt',
'conll2000',
'maxent_treebank_pos_tagger',
]
def main():
for each in REQUIRED_CORPORA:
print(('Downloading "{0}"'.format(each)))
nltk.download(each)
print("Finished.")
if __name__ == '__main__':
main()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''Downloads the necessary NLTK models and corpora.'''
from nltk import download
REQUIRED_CORPORA = [
'brown',
'punkt',
'conll2000',
'maxent_treebank_pos_tagger',
]
def main():
for each in REQUIRED_CORPORA:
print(('Downloading "{0}"'.format(each)))
download(each)
print("Finished.")
if __name__ == '__main__':
main()
| Python | 0 |
c818007491fe10101a73beeb7adfa59f9aadf3c6 | use private key instead of public | osf/management/commands/file_view_counts.py | osf/management/commands/file_view_counts.py | from __future__ import unicode_literals
import logging
import django
django.setup()
import json
import requests
import urllib
from django.core.management.base import BaseCommand
from django.db import transaction
from osf.models import BaseFileNode, PageCounter
from website import settings
from keen import KeenClient
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
def set_file_view_counts(state, *args, **kwargs):
# get all osfstorage files which is_deleted == False
files = BaseFileNode.resolve_class('osfstorage', BaseFileNode.FILE).active.all()
for file_node in files:
# for each file get the file view counts from keen
query = [{'property_name': 'page.info.path', 'operator': 'eq', 'property_value': file_node._id}]
query = urllib.quote(json.dumps(query))
url = 'https://api.keen.io/3.0/projects/{}/queries/count' \
'?api_key={}&event_collection=pageviews&timezone=UTC&timeframe' \
'=this_14_days&filters={}'.format(settings.KEEN['private']['project_id'], settings.KEEN['private']['read_key'], query)
resp = requests.get(url)
file_view_count = int(resp.json()['result'])
# udpate the pagecounter for file view counts
PageCounter.set_basic_counters('view:{0}:{1}'.format(file_node.node._id, file_node._id), file_view_count)
logger.info('File ID {0}: has inputed "{1}" view counts'.format(file_node._id, file_view_count))
logger.info('File view counts migration from keen completed.')
class Command(BaseCommand):
"""
Backfill Retraction.date_retracted with `RETRACTION_APPROVED` log date.
"""
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
'--dry',
action='store_true',
dest='dry_run',
help='Run migration and roll back changes to db',
)
def handle(self, *args, **options):
dry_run = options.get('dry_run', False)
if not dry_run:
script_utils.add_file_logger(logger, __file__)
with transaction.atomic():
set_file_view_counts
if dry_run:
raise RuntimeError('Dry run, transaction rolled back.')
| from __future__ import unicode_literals
import logging
import django
django.setup()
import json
import requests
import urllib
from django.core.management.base import BaseCommand
from django.db import transaction
from osf.models import BaseFileNode, PageCounter
from website import settings
from keen import KeenClient
from scripts import utils as script_utils
logger = logging.getLogger(__name__)
def set_file_view_counts(state, *args, **kwargs):
# get all osfstorage files which is_deleted == False
files = BaseFileNode.resolve_class('osfstorage', BaseFileNode.FILE).active.all()
for file_node in files:
# for each file get the file view counts from keen
client = KeenClient(
project_id=settings.KEEN['public']['project_id'],
read_key=settings.KEEN['public']['read_key'],
)
node_pageviews = client.count(
event_collection='pageviews',
timeframe='this_7_days',
group_by='node.id',
filters=[
{
'property_name': 'node.id',
'operator': 'exists',
'property_value': True
}
]
)
query = [{'property_name': 'page.info.path', 'operator': 'eq', 'property_value': file_node._id}]
query = urllib.quote(json.dumps(query))
url = 'https://api.keen.io/3.0/projects/{}/queries/count' \
'?api_key={}&event_collection=pageviews&timezone=UTC&timeframe' \
'=this_14_days&filters={}'.format(settings.KEEN['public']['project_id'], settings.KEEN['public']['read_key'], query)
resp = requests.get(url)
file_view_count = int(resp.json()['result'])
# udpate the pagecounter for file view counts
PageCounter.set_basic_counters('view:{0}:{1}'.format(file_node.node._id, file_node._id), file_view_count)
logger.info('File ID {0}: has inputed "{1}" view counts'.format(file_node._id, file_view_count))
logger.info('File view counts migration from keen completed.')
class Command(BaseCommand):
"""
Backfill Retraction.date_retracted with `RETRACTION_APPROVED` log date.
"""
def add_arguments(self, parser):
super(Command, self).add_arguments(parser)
parser.add_argument(
'--dry',
action='store_true',
dest='dry_run',
help='Run migration and roll back changes to db',
)
def handle(self, *args, **options):
dry_run = options.get('dry_run', False)
if not dry_run:
script_utils.add_file_logger(logger, __file__)
with transaction.atomic():
set_file_view_counts
if dry_run:
raise RuntimeError('Dry run, transaction rolled back.')
| Python | 0.000001 |
02490615e82be65520a8d608cff467dcf1bd7b96 | add picture commands help messages | extensions/picture/picture.py | extensions/picture/picture.py | from discord.ext import commands
import discord
import os
class Picture():
def __init__(self, bot):
self.bot = bot
@commands.command(help="Awoo!")
async def awoo(self):
fp = os.path.join(os.path.dirname(os.path.realpath(__file__)) + '/awoo.jpg')
with open(fp, 'rb') as f:
try:
await self.bot.upload(f)
except discord.Forbidden:
await self.bot.say("Error: bot does not have permission to upload pictures.")
@commands.command(help="Shows a baka.")
async def baka(self):
fp = os.path.join(os.path.dirname(os.path.realpath(__file__)) + '/baka.jpg')
with open(fp, 'rb') as f:
try:
await self.bot.upload(f)
except discord.Forbidden:
await self.bot.say("Error: bot does not have permission to upload pictures.")
@commands.command(help="Summons the anti-bully ranger.")
async def nobully(self):
fp = os.path.join(os.path.dirname(os.path.realpath(__file__)) + '/nobully.jpg')
with open(fp, 'rb') as f:
try:
await self.bot.upload(f)
except discord.Forbidden:
await self.bot.say("Error: bot does not have permission to upload pictures.")
@commands.command(help="Sad things are sad.")
async def sad(self):
fp = os.path.join(os.path.dirname(os.path.realpath(__file__)) + '/sad.jpg')
with open(fp, 'rb') as f:
try:
await self.bot.upload(f)
except discord.Forbidden:
await self.bot.say("Error: bot does not have permission to upload pictures.")
def setup(bot):
bot.add_cog(Picture(bot)) | from discord.ext import commands
import discord
import os
class Picture():
def __init__(self, bot):
self.bot = bot
@commands.command()
async def awoo(self):
fp = os.path.join(os.path.dirname(os.path.realpath(__file__)) + '/awoo.jpg')
with open(fp, 'rb') as f:
try:
await self.bot.upload(f)
except discord.Forbidden:
await self.bot.say("Error: bot does not have permission to upload pictures.")
@commands.command()
async def baka(self):
fp = os.path.join(os.path.dirname(os.path.realpath(__file__)) + '/baka.jpg')
with open(fp, 'rb') as f:
try:
await self.bot.upload(f)
except discord.Forbidden:
await self.bot.say("Error: bot does not have permission to upload pictures.")
@commands.command()
async def nobully(self):
fp = os.path.join(os.path.dirname(os.path.realpath(__file__)) + '/nobully.jpg')
with open(fp, 'rb') as f:
try:
await self.bot.upload(f)
except discord.Forbidden:
await self.bot.say("Error: bot does not have permission to upload pictures.")
@commands.command()
async def sad(self):
fp = os.path.join(os.path.dirname(os.path.realpath(__file__)) + '/sad.jpg')
with open(fp, 'rb') as f:
try:
await self.bot.upload(f)
except discord.Forbidden:
await self.bot.say("Error: bot does not have permission to upload pictures.")
def setup(bot):
bot.add_cog(Picture(bot)) | Python | 0.000001 |
6148e7cccc50023955ef7c3d86709c5197ae78e7 | Make output more understandable | doc/python/cryptobox.py | doc/python/cryptobox.py | #!/usr/bin/python
import sys
import botan
def main(args = None):
if args is None:
args = sys.argv
if len(args) != 3:
raise Exception("Usage: <password> <input>");
password = args[1]
input = ''.join(open(args[2]).readlines())
rng = botan.RandomNumberGenerator()
ciphertext = botan.cryptobox_encrypt(input, password, rng)
print ciphertext
plaintext = ''
try:
plaintext = botan.cryptobox_decrypt(ciphertext, password + 'FAIL')
except Exception, e:
print "Good news: bad password caused exception: "
print e
plaintext = botan.cryptobox_decrypt(ciphertext, password)
print "Original input was: "
print plaintext
if __name__ == '__main__':
sys.exit(main())
| #!/usr/bin/python
import sys
import botan
def main(args = None):
if args is None:
args = sys.argv
if len(args) != 3:
raise Exception("Bad usage")
password = args[1]
input = ''.join(open(args[2]).readlines())
rng = botan.RandomNumberGenerator()
ciphertext = botan.cryptobox_encrypt(input, password, rng)
print ciphertext
plaintext = ''
try:
plaintext = botan.cryptobox_decrypt(ciphertext, password + 'FAIL')
except Exception, e:
print "Oops -- ", e
plaintext = botan.cryptobox_decrypt(ciphertext, password)
print plaintext
if __name__ == '__main__':
sys.exit(main())
| Python | 1 |
4dc6b726e5f685d01229bc6438b99f060fb63380 | Add content type and accpet header checking, probaly too strict. | src/webapp.py | src/webapp.py | #!/usr/bin/env python
import os
import json
import uuid
import tornado.ioloop
import tornado.web
import tornado.options
tornado.options.define('cookie_secret', default='sssecccc', help='Change this to a real secret')
tornado.options.define('favicon', default='static/favicon.ico', help='Path to favicon.ico')
tornado.options.define('static_path', default='static/', help='Path static items')
tornado.options.define('port', default=8888, help='Port to run webservice')
tornado.options.define('config', default='server.conf', help='Config file location')
JSON_CONTENT = 'application/vnd.api+json'
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
def post(self):
pass
cache = {}
class ProductHandler(tornado.web.RequestHandler):
def get(self, shopid):
if shopid not in cache:
inventory = {'pizzas':
[ {'id': 1, 'name': 'Pizza lagano', 'price': 45},
{'id': 2, 'name': 'Pizza vegan', 'price': 50},
{'id': 3, 'name': 'Pizza %s' % shopid, 'price': 55},
],
'toppings': [
{'id': 1, 'name': 'garlic', 'price': 1},
{'id': 2, 'name': 'extra cheese', 'price': 5},
{'id': 3, 'name': 'pepperoni', 'price': 2}
],
'sizes': [
{'id': 28, 'name': '28 cm', 'price': -5},
{'id': 32, 'name': '32 cm', 'price': 0},
{'id': 36, 'name': '36 cm', 'price': 5}
]
}
cache[shopid] = json.dumps(inventory)
self.set_header('Content-Type', JSON_CONTENT)
if not self.get_cookie('user'):
self.set_cookie('user', str(uuid.uuid1()))
self.write(cache[shopid])
def post(self, shopid):
if shopid not in cache:
raise tornado.web.HTTPError(404)
if self._check_header('Content-Type') and self._check_header('Accept'):
self.set_header('Content-Type', JSON_CONTENT)
self.write(shopid)
else:
raise tornado.web.HTTPError(406)
def _check_header(self, key, value=None):
return key in self.request.headers and self.request.headers.get(key).lower() == (value or JSON_CONTENT).lower()
def main():
tornado.options.parse_command_line()
options = tornado.options.options
if os.path.exists(options.config):
tornado.options.parse_config_file(options.config)
handlers = [
(r'/api/products/([^/]+)/', ProductHandler),
(r'/', MainHandler),
]
settings = {
'static_path': os.path.join(os.path.dirname(__file__), '..', options.static_path),
'cookie_secret': options.cookie_secret,
'login_url': '/login',
'xsrf_cookies': False,
'autoreload': True
}
application = tornado.web.Application(handlers, **settings)
application.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
| #!/usr/bin/env python
import os
import json
import uuid
import tornado.ioloop
import tornado.web
import tornado.options
tornado.options.define('cookie_secret', default='sssecccc', help='Change this to a real secret')
tornado.options.define('favicon', default='static/favicon.ico', help='Path to favicon.ico')
tornado.options.define('static_path', default='static/', help='Path static items')
tornado.options.define('port', default=8888, help='Port to run webservice')
tornado.options.define('config', default='server.conf', help='Config file location')
class MainHandler(tornado.web.RequestHandler):
def get(self):
self.write("Hello, world")
def post(self):
pass
cache = {}
class ProductHandler(tornado.web.RequestHandler):
def get(self, shopid):
if shopid not in cache:
inventory = {'pizzas':
[ {'id': 1, 'name': 'Pizza lagano', 'price': 45},
{'id': 2, 'name': 'Pizza vegan', 'price': 50},
{'id': 3, 'name': 'Pizza %s' % shopid, 'price': 55},
],
'toppings': [
{'id': 1, 'name': 'garlic', 'price': 1},
{'id': 2, 'name': 'extra cheese', 'price': 5},
{'id': 3, 'name': 'pepperoni', 'price': 2}
],
'sizes': [
{'id': 28, 'name': '28 cm', 'price': -5},
{'id': 32, 'name': '32 cm', 'price': 0},
{'id': 36, 'name': '36 cm', 'price': 5}
]
}
cache[shopid] = json.dumps(inventory)
self.set_header('Content-Type', 'application/vnd.api+json')
if not self.get_cookie('user'):
self.set_cookie('user', str(uuid.uuid1()))
self.write(cache[shopid])
def post(self, shopid):
if shopid not in cache:
raise tornado.web.HTTPError(404)
self.set_header('Content-Type', 'application/vnd.api+json')
self.write(shopid)
def main():
tornado.options.parse_command_line()
options = tornado.options.options
if os.path.exists(options.config):
tornado.options.parse_config_file(options.config)
handlers = [
(r'/api/products/([^/]+)/', ProductHandler),
(r'/', MainHandler),
]
settings = {
'static_path': os.path.join(os.path.dirname(__file__), '..', options.static_path),
'cookie_secret': options.cookie_secret,
'login_url': '/login',
'xsrf_cookies': False,
'autoreload': True
}
application = tornado.web.Application(handlers, **settings)
application.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
| Python | 0 |
6174580057cc1c54539f9f05bc09c6a80df1fe4b | Fix issue following rebase | app/questionnaire/create_questionnaire_manager.py | app/questionnaire/create_questionnaire_manager.py | from app.validation.validator import Validator
from app.routing.routing_engine import RoutingEngine
from app.navigation.navigator import Navigator
from app.questionnaire.questionnaire_manager import QuestionnaireManager
from app.main import errors
from app.utilities.factory import factory
from app.metadata.metadata_store import MetaDataStore
from flask_login import current_user
import logging
from app.main.views.root import load_and_parse_schema
logger = logging.getLogger(__name__)
def create_questionnaire_manager():
metadata = MetaDataStore.get_instance(current_user)
eq_id = metadata.get_eq_id()
form_type = metadata.get_form_type()
logger.debug("Requested questionnaire %s for form type %s", eq_id, form_type)
schema = load_and_parse_schema(eq_id, form_type)
if not schema:
return errors.page_not_found()
# load the response store
response_store = factory.create("response-store")
# load the validation store
validation_store = factory.create("validation-store")
# Create the validator
validator = Validator(schema, validation_store, response_store)
# Create the routing engine
routing_engine = RoutingEngine(schema, response_store)
# load the navigation history
navigation_history = factory.create("navigation-history")
# create the navigator
navigator = Navigator(schema, navigation_history)
# if navigator.get_current_location() == "introduction" or navigator.get_current_location() is None:
# navigator.go_to('questionnaire')
# instantiate the questionnaire manager
questionnaire_manager = QuestionnaireManager(schema,
response_store,
validator,
validation_store,
routing_engine,
navigator,
navigation_history,
metadata)
return questionnaire_manager
def create_test_survey_manager():
metadata = MetaDataStore.get_instance(current_user)
logger.debug("Requested test questionnaire")
from app.test_survey.test_survey import test_questionnaire as schema
# load the response store
response_store = factory.create("response-store")
# load the validation store
validation_store = factory.create("validation-store")
# Create the validator
validator = Validator(schema, validation_store, response_store)
# Create the routing engine
routing_engine = RoutingEngine(schema, response_store)
# load the navigation history
navigation_history = factory.create("navigation-history")
# create the navigator
navigator = Navigator(schema, navigation_history)
# if navigator.get_current_location() == "introduction" or navigator.get_current_location() is None:
# navigator.go_to('questionnaire')
# instantiate the questionnaire manager
questionnaire_manager = QuestionnaireManager(schema,
response_store,
validator,
validation_store,
routing_engine,
navigator,
navigation_history,
metadata)
return questionnaire_manager
| from app.validation.validator import Validator
from app.routing.routing_engine import RoutingEngine
from app.navigation.navigator import Navigator
from app.questionnaire.questionnaire_manager import QuestionnaireManager
from app.main import errors
from app.utilities.factory import factory
from app.metadata.metadata_store import MetaDataStore
from flask_login import current_user
import logging
from app.main.views.root import load_and_parse_schema
logger = logging.getLogger(__name__)
def create_questionnaire_manager():
metadata = MetaDataStore.get_instance(current_user)
eq_id = metadata.get_eq_id()
form_type = metadata.get_form_type()
logger.debug("Requested questionnaire %s for form type %s", eq_id, form_type)
schema = load_and_parse_schema(eq_id, form_type)
if not schema:
return errors.page_not_found()
# load the response store
response_store = factory.create("response-store")
# load the validation store
validation_store = factory.create("validation-store")
# Create the validator
validator = Validator(schema, validation_store, response_store)
# Create the routing engine
routing_engine = RoutingEngine(schema, response_store)
# load the navigation history
navigation_history = factory.create("navigation-history")
# create the navigator
navigator = Navigator(schema, navigation_history)
# if navigator.get_current_location() == "introduction" or navigator.get_current_location() is None:
# navigator.go_to('questionnaire')
# instantiate the questionnaire manager
questionnaire_manager = QuestionnaireManager(schema,
response_store,
validator,
validation_store,
routing_engine,
navigator,
navigation_history,
metadata)
return questionnaire_manager
def create_test_survey_manager():
metadata = factory.create("metadata-store")
logger.debug("Requested test questionnaire")
from app.test_survey.test_survey import test_questionnaire as schema
# load the response store
response_store = factory.create("response-store")
# load the validation store
validation_store = factory.create("validation-store")
# Create the validator
validator = Validator(schema, validation_store, response_store)
# Create the routing engine
routing_engine = RoutingEngine(schema, response_store)
# load the navigation history
navigation_history = factory.create("navigation-history")
# create the navigator
navigator = Navigator(schema, navigation_history)
# if navigator.get_current_location() == "introduction" or navigator.get_current_location() is None:
# navigator.go_to('questionnaire')
# instantiate the questionnaire manager
questionnaire_manager = QuestionnaireManager(schema,
response_store,
validator,
validation_store,
routing_engine,
navigator,
navigation_history,
metadata)
return questionnaire_manager
| Python | 0.000001 |
b7b78a9282e2a4ee776020f83812e3abd437467b | Corrige erro de QuerySet immutable | djangosige/apps/cadastro/views/cliente.py | djangosige/apps/cadastro/views/cliente.py | # -*- coding: utf-8 -*-
from django.core.urlresolvers import reverse_lazy
from djangosige.apps.cadastro.forms import ClienteForm
from djangosige.apps.cadastro.models import Cliente
from .base import AdicionarPessoaView, PessoasListView, EditarPessoaView
class AdicionarClienteView(AdicionarPessoaView):
template_name = "cadastro/pessoa_add.html"
success_url = reverse_lazy('cadastro:listaclientesview')
success_message = "Cliente <b>%(nome_razao_social)s </b>adicionado com sucesso."
permission_codename = 'add_cliente'
def get_context_data(self, **kwargs):
context = super(AdicionarClienteView, self).get_context_data(**kwargs)
context['title_complete'] = 'CADASTRAR CLIENTE'
context['return_url'] = reverse_lazy('cadastro:listaclientesview')
context['tipo_pessoa'] = 'cliente'
return context
def get(self, request, *args, **kwargs):
form = ClienteForm(prefix='cliente_form')
return super(AdicionarClienteView, self).get(request, form, *args, **kwargs)
def post(self, request, *args, **kwargs):
req_post = request.POST.copy()
req_post['cliente_form-limite_de_credito'] = req_post['cliente_form-limite_de_credito'].replace(
'.', '')
request.POST = req_post
form = ClienteForm(request.POST, request.FILES,
prefix='cliente_form', request=request)
return super(AdicionarClienteView, self).post(request, form, *args, **kwargs)
class ClientesListView(PessoasListView):
template_name = 'cadastro/pessoa_list.html'
model = Cliente
context_object_name = 'all_clientes'
success_url = reverse_lazy('cadastro:listaclientesview')
permission_codename = 'view_cliente'
def get_context_data(self, **kwargs):
context = super(ClientesListView, self).get_context_data(**kwargs)
context['title_complete'] = 'CLIENTES CADASTRADOS'
context['add_url'] = reverse_lazy('cadastro:addclienteview')
context['tipo_pessoa'] = 'cliente'
return context
class EditarClienteView(EditarPessoaView):
form_class = ClienteForm
model = Cliente
template_name = "cadastro/pessoa_edit.html"
success_url = reverse_lazy('cadastro:listaclientesview')
success_message = "Cliente <b>%(nome_razao_social)s </b>editado com sucesso."
permission_codename = 'change_cliente'
def get_context_data(self, **kwargs):
context = super(EditarClienteView, self).get_context_data(**kwargs)
context['return_url'] = reverse_lazy('cadastro:listaclientesview')
context['tipo_pessoa'] = 'cliente'
return context
def get(self, request, *args, **kwargs):
self.object = self.get_object()
form_class = self.get_form_class()
form_class.prefix = "cliente_form"
form = self.get_form(form_class)
return super(EditarClienteView, self).get(request, form, *args, **kwargs)
def post(self, request, *args, **kwargs):
req_post = request.POST.copy()
req_post['cliente_form-limite_de_credito'] = req_post['cliente_form-limite_de_credito'].replace(
'.', '')
request.POST = req_post
self.object = self.get_object()
form_class = self.get_form_class()
form = form_class(request.POST, request.FILES,
prefix='cliente_form', instance=self.object, request=request)
return super(EditarClienteView, self).post(request, form, *args, **kwargs)
| # -*- coding: utf-8 -*-
from django.core.urlresolvers import reverse_lazy
from djangosige.apps.cadastro.forms import ClienteForm
from djangosige.apps.cadastro.models import Cliente
from .base import AdicionarPessoaView, PessoasListView, EditarPessoaView
class AdicionarClienteView(AdicionarPessoaView):
template_name = "cadastro/pessoa_add.html"
success_url = reverse_lazy('cadastro:listaclientesview')
success_message = "Cliente <b>%(nome_razao_social)s </b>adicionado com sucesso."
permission_codename = 'add_cliente'
def get_context_data(self, **kwargs):
context = super(AdicionarClienteView, self).get_context_data(**kwargs)
context['title_complete'] = 'CADASTRAR CLIENTE'
context['return_url'] = reverse_lazy('cadastro:listaclientesview')
context['tipo_pessoa'] = 'cliente'
return context
def get(self, request, *args, **kwargs):
form = ClienteForm(prefix='cliente_form')
return super(AdicionarClienteView, self).get(request, form, *args, **kwargs)
def post(self, request, *args, **kwargs):
request.POST._mutable = True
request.POST['cliente_form-limite_de_credito'] = request.POST['cliente_form-limite_de_credito'].replace(
'.', '')
form = ClienteForm(request.POST, request.FILES,
prefix='cliente_form', request=request)
return super(AdicionarClienteView, self).post(request, form, *args, **kwargs)
class ClientesListView(PessoasListView):
template_name = 'cadastro/pessoa_list.html'
model = Cliente
context_object_name = 'all_clientes'
success_url = reverse_lazy('cadastro:listaclientesview')
permission_codename = 'view_cliente'
def get_context_data(self, **kwargs):
context = super(ClientesListView, self).get_context_data(**kwargs)
context['title_complete'] = 'CLIENTES CADASTRADOS'
context['add_url'] = reverse_lazy('cadastro:addclienteview')
context['tipo_pessoa'] = 'cliente'
return context
class EditarClienteView(EditarPessoaView):
form_class = ClienteForm
model = Cliente
template_name = "cadastro/pessoa_edit.html"
success_url = reverse_lazy('cadastro:listaclientesview')
success_message = "Cliente <b>%(nome_razao_social)s </b>editado com sucesso."
permission_codename = 'change_cliente'
def get_context_data(self, **kwargs):
context = super(EditarClienteView, self).get_context_data(**kwargs)
context['return_url'] = reverse_lazy('cadastro:listaclientesview')
context['tipo_pessoa'] = 'cliente'
return context
def get(self, request, *args, **kwargs):
self.object = self.get_object()
form_class = self.get_form_class()
form_class.prefix = "cliente_form"
form = self.get_form(form_class)
return super(EditarClienteView, self).get(request, form, *args, **kwargs)
def post(self, request, *args, **kwargs):
request.POST['cliente_form-limite_de_credito'] = request.POST['cliente_form-limite_de_credito'].replace(
'.', '')
self.object = self.get_object()
form_class = self.get_form_class()
form = form_class(request.POST, request.FILES,
prefix='cliente_form', instance=self.object, request=request)
return super(EditarClienteView, self).post(request, form, *args, **kwargs)
| Python | 0.000001 |
a2a26ed8b216c4a40db7a384571d05c690e25cef | Update test expectation | examples/run-example-8.py | examples/run-example-8.py | #!/usr/bin/python
import os, sys, inspect
DIR = os.path.dirname(os.path.realpath(__file__))
NORM_EXPECT='Require Coq.omega.Omega.\nRequire Top.A.\nRequire Top.B.\nRequire Top.C.\nRequire Top.D.\n\nImport Top.D.\n\nFail Check A.mA.axA.\n'
GET_EXPECT = {
'Coq.omega.Omega': None,
'Top.A': 'Module Export Top_DOT_A.\nModule Export Top.\nModule A.\nImport Coq.omega.Omega.\nModule mA.\n Section secA.\n Axiom axA : Set.\n End secA.\nEnd mA.\n\nModule mA2.\nEnd mA2.\n\nEnd A.\n\nEnd Top.\n\nEnd Top_DOT_A.\n',
'Top.B': 'Module Export Top_DOT_B.\nModule Export Top.\nModule B.\nImport Top.A.\n\nEnd B.\n\nEnd Top.\n\nEnd Top_DOT_B.\n',
'Top.C': 'Module Export Top_DOT_C.\nModule Export Top.\nModule C.\nImport Top.A.\n\nEnd C.\n\nEnd Top.\n\nEnd Top_DOT_C.\n',
'Top.D': 'Module Export Top_DOT_D.\nModule Export Top.\nModule D.\nImport Top.C Top.B.\nImport Top.A.\n\nEnd D.\n\nEnd Top.\n\nEnd Top_DOT_D.\n'
}
def trace(frame, event, arg):
print((frame, event, arg))
if __name__ == '__main__':
# from http://stackoverflow.com/a/6098238/377022
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0], "..")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
from replace_imports import normalize_requires, get_required_contents
os.chdir(DIR)
os.chdir('example_8')
NORM_FOUND = normalize_requires("example_8.v")
print('if %s != %s:' % (repr(NORM_FOUND), repr(NORM_EXPECT)))
if NORM_FOUND != NORM_EXPECT:
print('sys.exit(1)')
sys.exit(1)
for libname, expect in GET_EXPECT.items():
print('libname: ' + libname)
try:
got = get_required_contents(libname)
except IOError:
got = None
print('if %s != %s:' % (repr(got), repr(expect)))
if got != expect:
print('sys.exit(1)')
sys.exit(1)
print('sys.exit(0)')
sys.exit(0)
| #!/usr/bin/python
import os, sys, inspect
DIR = os.path.dirname(os.path.realpath(__file__))
NORM_EXPECT='Require Coq.omega.Omega.\nRequire Top.A.\nRequire Top.B.\nRequire Top.C.\nRequire Top.D.\n\nImport Top.D.\n'
GET_EXPECT = {
'Coq.omega.Omega': None,
'Top.A': 'Module Export Top_DOT_A.\nModule Export Top.\nModule A.\nImport Coq.omega.Omega.\nModule mA.\n Section secA.\n Axiom axA : Set.\n End secA.\nEnd mA.\n\nModule mA2.\nEnd mA2.\n\nEnd A.\n\nEnd Top.\n\nEnd Top_DOT_A.\n',
'Top.B': 'Module Export Top_DOT_B.\nModule Export Top.\nModule B.\nImport Top.A.\n\nEnd B.\n\nEnd Top.\n\nEnd Top_DOT_B.\n',
'Top.C': 'Module Export Top_DOT_C.\nModule Export Top.\nModule C.\nImport Top.A.\n\nEnd C.\n\nEnd Top.\n\nEnd Top_DOT_C.\n',
'Top.D': 'Module Export Top_DOT_D.\nModule Export Top.\nModule D.\nImport Top.C Top.B.\nImport Top.A.\n\nEnd D.\n\nEnd Top.\n\nEnd Top_DOT_D.\n'
}
def trace(frame, event, arg):
print((frame, event, arg))
if __name__ == '__main__':
# from http://stackoverflow.com/a/6098238/377022
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0], "..")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
from replace_imports import normalize_requires, get_required_contents
os.chdir(DIR)
os.chdir('example_8')
NORM_FOUND = normalize_requires("example_8.v")
print('if %s != %s:' % (repr(NORM_FOUND), repr(NORM_EXPECT)))
if NORM_FOUND != NORM_EXPECT:
print('sys.exit(1)')
sys.exit(1)
for libname, expect in GET_EXPECT.items():
print('libname: ' + libname)
try:
got = get_required_contents(libname)
except IOError:
got = None
print('if %s != %s:' % (repr(got), repr(expect)))
if got != expect:
print('sys.exit(1)')
sys.exit(1)
print('sys.exit(0)')
sys.exit(0)
| Python | 0.000001 |
48a89b4cdb2e084f4b349e9ca12a91daba9e0734 | Fix build with Python 3 | spyne/test/transport/test_msgpack.py | spyne/test/transport/test_msgpack.py |
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
import msgpack
from spyne import Application, ServiceBase, rpc
from spyne.model import Unicode
from spyne.protocol.msgpack import MessagePackDocument
from twisted.trial import unittest
class TestMessagePackServer(unittest.TestCase):
def gen_prot(self, app):
from spyne.server.twisted.msgpack import TwistedMessagePackProtocol
from twisted.test.proto_helpers import StringTransportWithDisconnection
from spyne.server.msgpack import MessagePackServerBase
prot = TwistedMessagePackProtocol(MessagePackServerBase(app))
transport = StringTransportWithDisconnection()
prot.makeConnection(transport)
transport.protocol = prot
return prot
def test_roundtrip(self):
v = "yaaay!"
class SomeService(ServiceBase):
@rpc(Unicode, _returns=Unicode)
def yay(ctx, u):
return u
app = Application([SomeService], 'tns',
in_protocol=MessagePackDocument(),
out_protocol=MessagePackDocument())
prot = self.gen_prot(app)
request = msgpack.packb({'yay': [v]})
prot.dataReceived(msgpack.packb([1, request]))
val = prot.transport.value()
print(repr(val))
val = msgpack.unpackb(val)
print(repr(val))
self.assertEquals(val, [0, msgpack.packb(v)])
def test_roundtrip_deferred(self):
from twisted.internet import reactor
from twisted.internet.task import deferLater
v = "yaaay!"
p_ctx = []
class SomeService(ServiceBase):
@rpc(Unicode, _returns=Unicode)
def yay(ctx, u):
def _cb():
return u
p_ctx.append(ctx)
return deferLater(reactor, 0.1, _cb)
app = Application([SomeService], 'tns',
in_protocol=MessagePackDocument(),
out_protocol=MessagePackDocument())
prot = self.gen_prot(app)
request = msgpack.packb({'yay': [v]})
def _ccb(_):
val = prot.transport.value()
print(repr(val))
val = msgpack.unpackb(val)
print(repr(val))
self.assertEquals(val, [0, msgpack.packb(v)])
prot.dataReceived(msgpack.packb([1, request]))
return p_ctx[0].out_object[0].addCallback(_ccb)
|
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
import msgpack
from spyne import Application, ServiceBase, rpc
from spyne.model import Unicode
from spyne.protocol.msgpack import MessagePackDocument
from twisted.trial import unittest
class TestMessagePackServer(unittest.TestCase):
def gen_prot(self, app):
from spyne.server.twisted.msgpack import TwistedMessagePackProtocol
from twisted.test.proto_helpers import StringTransportWithDisconnection
from spyne.server.msgpack import MessagePackServerBase
prot = TwistedMessagePackProtocol(MessagePackServerBase(app))
transport = StringTransportWithDisconnection()
prot.makeConnection(transport)
transport.protocol = prot
return prot
def test_roundtrip(self):
v = "yaaay!"
class SomeService(ServiceBase):
@rpc(Unicode, _returns=Unicode)
def yay(ctx, u):
return u
app = Application([SomeService], 'tns',
in_protocol=MessagePackDocument(),
out_protocol=MessagePackDocument())
prot = self.gen_prot(app)
request = msgpack.packb({'yay': [v]})
prot.dataReceived(msgpack.packb([1, request]))
val = prot.transport.value()
print repr(val)
val = msgpack.unpackb(val)
print repr(val)
self.assertEquals(val, [0, msgpack.packb(v)])
def test_roundtrip_deferred(self):
from twisted.internet import reactor
from twisted.internet.task import deferLater
v = "yaaay!"
p_ctx = []
class SomeService(ServiceBase):
@rpc(Unicode, _returns=Unicode)
def yay(ctx, u):
def _cb():
return u
p_ctx.append(ctx)
return deferLater(reactor, 0.1, _cb)
app = Application([SomeService], 'tns',
in_protocol=MessagePackDocument(),
out_protocol=MessagePackDocument())
prot = self.gen_prot(app)
request = msgpack.packb({'yay': [v]})
def _ccb(_):
val = prot.transport.value()
print repr(val)
val = msgpack.unpackb(val)
print repr(val)
self.assertEquals(val, [0, msgpack.packb(v)])
prot.dataReceived(msgpack.packb([1, request]))
return p_ctx[0].out_object[0].addCallback(_ccb)
| Python | 0.000001 |
6cdc05abb5777656684d18cfb215aa7dbcb10c43 | Create indexes if not created | scripts/install/create_tables_cloudsql.py | scripts/install/create_tables_cloudsql.py | #! /usr/bin/env python
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "engine", "src"))
import MySQLdb
import time
from juliabox.db import JBoxUserV2, JBoxDynConfig, JBoxSessionProps, JBoxInstanceProps, JBPluginDB, JBoxAPISpec, JBoxUserProfile
from juliabox.jbox_util import JBoxCfg
# import any plugins that contribute tables
import juliabox.plugins.course_homework
import juliabox.plugins.usage_accounting
def table_exists(table_name):
try:
c.execute("select * from %s where 1=0" % (table_name,))
c.fetchall()
return c.description is not None
except:
return False
def table_create(table_name, columns=None, types=None, keys=None, keys_types=None):
stmt = []
if keys is not None:
for k, t in zip(keys, keys_types):
stmt.append('`' + k + '` ' + t)
if columns is not None:
for col, t in zip(columns, types):
stmt.append('`' + col + '` ' + t)
sql = 'create table `%s` (%s' % (table_name, ', '.join(stmt))
if keys is not None:
sql += (', primary key (%s)' % (', '.join(keys),))
sql += ')'
c.execute(sql)
conn.commit()
def index_exists(tname, iname):
f = c.execute("show index from `%s` where Key_name=\"%s\"" % (tname, iname))
return f is not 0
def indexes_create(table_name, indexes):
if indexes == None:
print("No indexes to create")
return
for idx in indexes:
name = idx['name']
if index_exists(table_name, name):
print("Index %s already exists" % name)
else:
cols = ', '.join('`' + col + '`' for col in idx['cols'])
sql = "CREATE INDEX `%s` ON `%s`(%s)" % (name, table_name, cols)
c.execute(sql)
conn.commit()
print("Created index %s" & name)
def get_connection():
# Copy from /jboxengine/conf/jbox.user
conf = None
with open('/jboxengine/conf/jbox.user') as f:
conf = eval(f.read())
db = conf['db']
user = db['user']
passwd = db['passwd']
unix_socket = db['unix_socket']
db = db['db']
return MySQLdb.connect(user=user, passwd=passwd, db=db, unix_socket=unix_socket)
conn = get_connection()
c = conn.cursor()
tables = [JBoxUserV2, JBoxDynConfig, JBoxSessionProps, JBoxInstanceProps, JBoxAPISpec, JBoxUserProfile]
for plugin in JBPluginDB.jbox_get_plugins(JBPluginDB.JBP_TABLE_DYNAMODB):
tables.append(plugin)
for cls in tables:
print("Creating %s..." % (cls.NAME,))
if table_exists(cls.NAME):
print("\texists already!")
else:
table_create(cls.NAME, cls.ATTRIBUTES, cls.TYPES, cls.KEYS, cls.KEYS_TYPES)
print("\tcreated.")
for cls in tables:
print("Creating indexes for %s..." % (cls.NAME))
indexes_create(cls.NAME, cls.SQL_INDEXES)
print('Creating scale_up_time')
if table_exists('scale_up_time'):
print('\texists already!')
else:
table_create('scale_up_time', ['scale_up_time'], ['INT'])
c.execute('INSERT INTO scale_up_time (scale_up_time) VALUES (%d)' % int(time.time()))
conn.commit()
print('\tcreated.')
print('Creating mails')
if table_exists('mails'):
print('\texists already!')
else:
table_create('mails', ['rcpt', 'sender', 'timestamp'],
['VARCHAR(200)', 'VARCHAR(200)', 'INT'])
print('\tcreated.')
| #! /usr/bin/env python
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), "..", "..", "engine", "src"))
import MySQLdb
import time
from juliabox.db import JBoxUserV2, JBoxDynConfig, JBoxSessionProps, JBoxInstanceProps, JBPluginDB, JBoxAPISpec, JBoxUserProfile
from juliabox.jbox_util import JBoxCfg
# import any plugins that contribute tables
import juliabox.plugins.course_homework
import juliabox.plugins.usage_accounting
def table_exists(table_name):
try:
c.execute("select * from %s where 1=0" % (table_name,))
c.fetchall()
return c.description is not None
except:
return False
def table_create(table_name, columns=None, types=None, keys=None, keys_types=None):
stmt = []
if keys is not None:
for k, t in zip(keys, keys_types):
stmt.append('`' + k + '` ' + t)
if columns is not None:
for col, t in zip(columns, types):
stmt.append('`' + col + '` ' + t)
sql = 'create table `%s` (%s' % (table_name, ', '.join(stmt))
if keys is not None:
sql += (', primary key (%s)' % (', '.join(keys),))
sql += ')'
c.execute(sql)
conn.commit()
def indexes_create(table_name, indexes):
if indexes == None:
return
for idx in indexes:
name = idx['name']
cols = ', '.join('`' + idx['cols'] + '`')
sql = "CREATE INDEX `%s` ON `%s`(%s)" % (name, table_name, cols)
c.execute(sql)
conn.commit()
def get_connection():
# Copy from /jboxengine/conf/jbox.user
conf = None
with open('/jboxengine/conf/jbox.user') as f:
conf = eval(f.read())
db = conf['db']
user = db['user']
passwd = db['passwd']
unix_socket = db['unix_socket']
db = db['db']
return MySQLdb.connect(user=user, passwd=passwd, db=db, unix_socket=unix_socket)
conn = get_connection()
c = conn.cursor()
tables = [JBoxUserV2, JBoxDynConfig, JBoxSessionProps, JBoxInstanceProps, JBoxAPISpec, JBoxUserProfile]
for plugin in JBPluginDB.jbox_get_plugins(JBPluginDB.JBP_TABLE_DYNAMODB):
tables.append(plugin)
for cls in tables:
print("Creating %s..." % (cls.NAME,))
if table_exists(cls.NAME):
print("\texists already!")
else:
table_create(cls.NAME, cls.ATTRIBUTES, cls.TYPES, cls.KEYS, cls.KEYS_TYPES)
indexes_create(cls.NAME, cls.SQL_INDEXES)
print("\tcreated.")
print('Creating scale_up_time')
if table_exists('scale_up_time'):
print('\texists already!')
else:
table_create('scale_up_time', ['scale_up_time'], ['INT'])
c.execute('INSERT INTO scale_up_time (scale_up_time) VALUES (%d)' % int(time.time()))
conn.commit()
print('\tcreated.')
print('Creating mails')
if table_exists('mails'):
print('\texists already!')
else:
table_create('mails', ['rcpt', 'sender', 'timestamp'],
['VARCHAR(200)', 'VARCHAR(200)', 'INT'])
print('\tcreated.')
| Python | 0.000002 |
0dcad3ac5f3754fedf12c7aca7050ec6ab85840d | fix merge cleaning | transports/__init__.py | transports/__init__.py | from yo_ug_http import YoUgHttpTransport
from cm_nl_yo_ug_http import CmYoTransport
from cm_nl_http import CmTransport
from push_tz_yo_ug_http import PushYoTransport
from push_tz_http import PushTransport
from push_tz_smpp import PushTzSmppTransport
from mobivate_http import MobivateHttpTransport
from movilgate_http import MovilgateHttpTransport
from mobtech_ml_http import MobtechMlHttpTransport
from smsgh_smpp import SmsghSmppTransport
from forward_http import ForwardHttp
from ttc_bf_http import TtcBfHttpTransport
__all__ = ["YoUgHttpTransport",
"CmYoTransport",
"CmTransport",
"PushYoTransport",
"PushTransport",
"PushTzSmppTransport",
"MobivateHttpTransport",
"MovilgateHttpTransport",
"MobtechMlHttpTransport",
"SmsghSmppTransport",
"TtcBfHttpTransport",
"ForwardHttp"]
| from yo_ug_http import YoUgHttpTransport
from cm_nl_yo_ug_http import CmYoTransport
from cm_nl_http import CmTransport
from push_tz_yo_ug_http import PushYoTransport
from push_tz_http import PushTransport
from push_tz_smpp import PushTzSmppTransport
from mobivate_http import MobivateHttpTransport
from movilgate_http import MovilgateHttpTransport
from mobtech_ml_http import MobtechMlHttpTransport
from smsgh_smpp import SmsghSmppTransport
from forward_http import ForwardHttp
from ttc_bf_http import TtcBfHttpTransport
__all__ = ["YoUgHttpTransport",
"CmYoTransport",
"CmTransport",
"PushYoTransport",
"PushTransport",
"PushTzSmppTransport",
"MobivateHttpTransport",
"MovilgateHttpTransport",
<<<<<<< HEAD
"MobtechMlHttpTransport",
"SmsghSmppTransport",
=======
"TtcBfHttpTransport",
>>>>>>> feature/burkina
"ForwardHttp"]
| Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.