commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
08be74a30805f3afb86cafc815955288ae96f7ea | fix the bug | jeremykid/FunAlgorithm,jeremykid/FunAlgorithm,jeremykid/FunAlgorithm,jeremykid/FunAlgorithm | python_practice/graph/undirectedGraph.py | python_practice/graph/undirectedGraph.py | import math
class undirectedGraph(object):
def __init__(self, degrees):
self.degrees = degrees
self.adjacent_matrix = []
for i in range(degrees):
self.adjacent_matrix.append([0]*degrees)
def __str__(self):
output = ""
for row in self.adjacent_matrix:
for item in row:
output += "|"+str(item)
output += "|\n"
return output
def addEdge(self, vertex1, vertex2, weight = 1):
self.adjacent_matrix[vertex1][vertex2] += weight
self.adjacent_matrix[vertex2][vertex1] += weight
def isTree(self):
#copy the adjacent matrix
#let used edge = 0
for row in range(self.degrees):
for node in range(self.degrees):
#use depthFirstSeach to check if there are back
break
return True
def getAllEdges(self):
result = []
for node_start in range(self.degrees):
for node_end in range(self.degrees):
if self.adjacent_matrix[node_start][node_end] != 0:
result.append([node_start, node_end])
return result
def depthFirstSearch(self, start_vertex, finded_vertexes):
for node in range(self.degrees):
if self.adjacent_matrix[start_vertex][node] != 0 and node not in finded_vertexes:
finded_vertexes.append(node)
finded_vertexes = self.depthFirstSearch(node, finded_vertexes)
return finded_vertexes
def breathFirstSearch(self, start_vertex, finded_vertexes):
for node in range(self.degrees):
if self.adjacent_matrix[start_vertex][node] != 0 and node not in finded_vertexes:
finded_vertexes.append(node)
for node in range(self.degrees):
if self.adjacent_matrix[start_vertex][node] != 0 and node not in finded_vertexes:
finded_vertexes = self.breathFirstSearch(node, finded_vertexes)
return finded_vertexes
def Dijkstra(self, start):
#with weight
distanceList = [float('inf')]*self.degrees
distanceList[start] = 0
self.DijkstraRecursion(start, distanceList, [])
return distanceList
def DijkstraRecursion(self ,start ,distance_list ,min_distance_list):
if len(min_distance_list) == self.degrees:
return distance_list
else:
min_distance_list.append(start)
for node in range(self.degrees):
if self.adjacent_matrix[start][node] != 0:
new_distance = distance_list[start] + self.adjacent_matrix[start][node]
if new_distance <= distance_list[node]:
distance_list[node] = new_distance
self.DijkstraRecursion(start ,distance_list ,min_distance_list)
| import math
class undirectedGraph(object):
def __init__(self, degrees):
self.degrees = degrees
self.adjacent_matrix = []
for i in range(degrees):
self.adjacent_matrix.append([0]*degrees)
def __str__(self):
output = ""
for row in self.adjacent_matrix:
for item in row:
output += "|"+str(item)
output += "|\n"
return output
def addEdge(self, vertex1, vertex2, weight = 1):
self.adjacent_matrix[vertex1][vertex2] += weight
self.adjacent_matrix[vertex2][vertex1] += weight
def isTree(self):
#copy the adjacent matrix
#let used edge = 0
for row in range(self.degrees):
for node in range(self.degrees):
#use depthFirstSeach to check if there are back
break
return True
def getAllEdges(self):
result = []
for node_start in range(self.degrees):
for node_end in range(self.degrees):
if self.adjacent_matrix[node_start][node_end] != 0:
result.append([node_start, node_end])
return result
def depthFirstSearch(self, start_vertex, finded_vertexes):
for node in range(self.degrees):
if self.adjacent_matrix[start_vertex][node] != 0 and node not in finded_vertexes:
finded_vertexes.append(node)
finded_vertexes = self.depthFirstSearch(node, finded_vertexes)
return finded_vertexes
def breathFirstSearch(self, start_vertex, finded_vertexes):
for node in range(self.degrees):
if self.adjacent_matrix[start_vertex][node] != 0 and node not in finded_vertexes:
finded_vertexes.append(node)
for node in range(self.degrees):
if self.adjacent_matrix[start_vertex][node] != 0 and node not in finded_vertexes:
finded_vertexes = self.breathFirstSearch(node, finded_vertexes)
return finded_vertexes
def Dijkstra(self, start):
#with weight
distanceList = [float('inf')]*self.degrees
distanceList[start] = 0
self.DijkstraRecursion(start, distanceList, [])
return distanceList
def DijkstraRecursion(self ,start ,distance_list ,min_distance_list):
if len(min_distance_list) == self.degrees:
return distance_list
else:
min_distance_list.append(start)
for node in range(self.degrees):
if self.adjacent_matrix[start][node] != 0:
new_distance = distance_list[start] + self.adjacent_matrix[start][node]
if new_distance <= distance_list[node]:
distance_list[node] = new_distance
min_distance_list.append(start)
self.DijkstraRecursion(start ,distance_list ,min_distance_list)
| mit | Python |
38ed21b8a2438f9dedd5f61d74d6117717228510 | Bump DSTK version number | jotterbach/dstk | DSTK/__init__.py | DSTK/__init__.py | __version__ = '0.0.2'
__all__ = ['GAM', 'Distinctor', 'AutoEncoder', 'BoostedFeatureSelectors', 'utils']
| __version__ = '0.0.1'
__all__ = ['GAM', 'Distinctor', 'AutoEncoder', 'BoostedFeatureSelectors', 'utils']
| mit | Python |
94ce77ad973abe63476e014fa61f5699706fb88b | use text file for writing bvecs | StongeEtienne/dipy,mdesco/dipy,jyeatman/dipy,oesteban/dipy,villalonreina/dipy,demianw/dipy,oesteban/dipy,demianw/dipy,maurozucchelli/dipy,matthieudumont/dipy,FrancoisRheaultUS/dipy,beni55/dipy,beni55/dipy,JohnGriffiths/dipy,mdesco/dipy,samuelstjean/dipy,nilgoyyou/dipy,nilgoyyou/dipy,samuelstjean/dipy,FrancoisRheaultUS/dipy,StongeEtienne/dipy,sinkpoint/dipy,matthieudumont/dipy,Messaoud-Boudjada/dipy,samuelstjean/dipy,maurozucchelli/dipy,JohnGriffiths/dipy,rfdougherty/dipy,Messaoud-Boudjada/dipy,sinkpoint/dipy,maurozucchelli/dipy,jyeatman/dipy,villalonreina/dipy,rfdougherty/dipy | dipy/io/tests/test_io_gradients.py | dipy/io/tests/test_io_gradients.py | import os.path as osp
import tempfile
import numpy as np
import numpy.testing as npt
from nose.tools import assert_raises
from dipy.data import get_data
from dipy.io.gradients import read_bvals_bvecs
from dipy.core.gradients import gradient_table
def test_read_bvals_bvecs():
fimg, fbvals, fbvecs = get_data('small_101D')
bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs)
gt = gradient_table(bvals, bvecs)
npt.assert_array_equal(bvals, gt.bvals)
npt.assert_array_equal(bvecs, gt.bvecs)
# None should also work as an input:
bvals_none, bvecs_none = read_bvals_bvecs(None, fbvecs)
npt.assert_array_equal(bvecs_none, gt.bvecs)
bvals_none, bvecs_none = read_bvals_bvecs(fbvals, None)
npt.assert_array_equal(bvals_none, gt.bvals)
# Test for error raising with unknown file formats:
nan_fbvecs = osp.splitext(fbvecs)[0] + '.nan' # Nonsense extension
npt.assert_raises(ValueError, read_bvals_bvecs, fbvals, nan_fbvecs)
# Test for error raising with incorrect file-contents:
# These bvecs only have two rows/columns:
new_bvecs1 = bvecs[:,:2]
# Make a temporary file
bv_file1 = tempfile.NamedTemporaryFile(mode='wt')
# And fill it with these 2-columned bvecs:
for x in range(new_bvecs1.shape[0]):
bv_file1.file.write('%s %s\n' %
(new_bvecs1[x][0], new_bvecs1[x][1]))
bv_file1.close()
npt.assert_raises(IOError,read_bvals_bvecs, fbvals, bv_file1.name)
# These bvecs are saved as one long array:
new_bvecs2 = np.ravel(bvecs)
bv_file2 = tempfile.NamedTemporaryFile()
np.save(bv_file2, new_bvecs2)
bv_file2.close()
npt.assert_raises(IOError,read_bvals_bvecs, fbvals, bv_file2.name)
# Theres less bvecs than bvals:
new_bvecs3 = bvecs[:-1, :]
bv_file3 = tempfile.NamedTemporaryFile()
np.save(bv_file3, new_bvecs3)
bv_file3.close()
npt.assert_raises(IOError,read_bvals_bvecs, fbvals, bv_file3.name)
if __name__ == '__main__':
from numpy.testing import run_module_suite
run_module_suite()
| import os.path as osp
import tempfile
import numpy as np
import numpy.testing as npt
from nose.tools import assert_raises
from dipy.data import get_data
from dipy.io.gradients import read_bvals_bvecs
from dipy.core.gradients import gradient_table
def test_read_bvals_bvecs():
fimg, fbvals, fbvecs = get_data('small_101D')
bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs)
gt = gradient_table(bvals, bvecs)
npt.assert_array_equal(bvals, gt.bvals)
npt.assert_array_equal(bvecs, gt.bvecs)
# None should also work as an input:
bvals_none, bvecs_none = read_bvals_bvecs(None, fbvecs)
npt.assert_array_equal(bvecs_none, gt.bvecs)
bvals_none, bvecs_none = read_bvals_bvecs(fbvals, None)
npt.assert_array_equal(bvals_none, gt.bvals)
# Test for error raising with unknown file formats:
nan_fbvecs = osp.splitext(fbvecs)[0] + '.nan' # Nonsense extension
npt.assert_raises(ValueError, read_bvals_bvecs, fbvals, nan_fbvecs)
# Test for error raising with incorrect file-contents:
# These bvecs only have two rows/columns:
new_bvecs1 = bvecs[:,:2]
# Make a temporary file
bv_file1 = tempfile.NamedTemporaryFile()
# And fill it with these 2-columned bvecs:
[bv_file1.file.write('%s %s\n'%(new_bvecs1[x][0], new_bvecs1[x][1]))
for x in range(new_bvecs1.shape[0])]
bv_file1.close()
npt.assert_raises(IOError,read_bvals_bvecs, fbvals, bv_file1.name)
# These bvecs are saved as one long array:
new_bvecs2 = np.ravel(bvecs)
bv_file2 = tempfile.NamedTemporaryFile()
np.save(bv_file2, new_bvecs2)
bv_file2.close()
npt.assert_raises(IOError,read_bvals_bvecs, fbvals, bv_file2.name)
# Theres less bvecs than bvals:
new_bvecs3 = bvecs[:-1, :]
bv_file3 = tempfile.NamedTemporaryFile()
np.save(bv_file3, new_bvecs3)
bv_file3.close()
npt.assert_raises(IOError,read_bvals_bvecs, fbvals, bv_file3.name)
if __name__ == '__main__':
from numpy.testing import run_module_suite
run_module_suite()
| bsd-3-clause | Python |
8762e5e9eb97a9213c423fd7c36d1096614b9837 | Drop dead test | Yubico/yubikey-manager,Yubico/yubikey-manager | test/on_yubikey/test_cli_misc.py | test/on_yubikey/test_cli_misc.py | import unittest
from .util import (DestructiveYubikeyTestCase, is_fips, ykman_cli)
class TestYkmanInfo(DestructiveYubikeyTestCase):
def test_ykman_info(self):
info = ykman_cli('info')
self.assertIn('Device type:', info)
self.assertIn('Serial number:', info)
self.assertIn('Firmware version:', info)
@unittest.skipIf(is_fips(), 'Not applicable to YubiKey FIPS.')
def test_ykman_info_does_not_report_fips_for_non_fips_device(self):
info = ykman_cli('info')
self.assertNotIn('FIPS', info)
@unittest.skipIf(not is_fips(), 'YubiKey FIPS required.')
def test_ykman_info_reports_fips_status(self):
info = ykman_cli('info')
self.assertIn('FIPS Approved Mode:', info)
self.assertIn(' FIDO U2F:', info)
self.assertIn(' OATH:', info)
self.assertIn(' OTP:', info)
| import unittest
from .util import (DestructiveYubikeyTestCase, is_fips, ykman_cli)
class TestYkmanInfo(DestructiveYubikeyTestCase):
def test_ykman_info(self):
info = ykman_cli('info')
self.assertIn('Device type:', info)
self.assertIn('Serial number:', info)
self.assertIn('Firmware version:', info)
@unittest.skipIf(not is_fips(), 'YubiKey FIPS required.')
def test_ykman_info_reports_fips_device(self):
info = ykman_cli('info')
self.assertIn('This YubiKey is capable of FIPS Approved Mode.', info)
@unittest.skipIf(is_fips(), 'Not applicable to YubiKey FIPS.')
def test_ykman_info_does_not_report_fips_for_non_fips_device(self):
info = ykman_cli('info')
self.assertNotIn('FIPS', info)
@unittest.skipIf(not is_fips(), 'YubiKey FIPS required.')
def test_ykman_info_reports_fips_status(self):
info = ykman_cli('info')
self.assertIn('FIPS Approved Mode:', info)
self.assertIn(' FIDO U2F:', info)
self.assertIn(' OATH:', info)
self.assertIn(' OTP:', info)
| bsd-2-clause | Python |
3201c461cc86b2ce79f78e47a19d0489d50a0b08 | Improve setup.py | ashishb/adb-enhanced | adb-enhanced/setup.py | adb-enhanced/setup.py | from setuptools import setup, find_packages
import sys, os
version = '1.1'
setup(name='adb-enhanced',
version=version,
description="An ADB wrapper for Android developers",
long_description="""\
An ADB wrapper for Android developers for testing.
See Readme for more details -
https://github.com/ashishb/adb-enhanced/blob/master/README.md
""",
classifiers=["Intended Audience :: Developers"], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='Android ADB developer',
author='Ashish Bhatia',
author_email='ashishb@ashishb.net',
url='https://github.com/ashishb/adb-enhanced',
license='Apache',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=True,
install_requires=[
# -*- Extra requirements: -*-
'docopt',
],
entry_points={
# -*- Entry points: -*-
'console_scripts': [
'adbe=adbe:main',
],
}
)
| from setuptools import setup, find_packages
import sys, os
version = '1.1'
setup(name='adb-enhanced',
version=version,
description="An ADB wrapper for Android developers",
long_description="""\
An ADB wrapper for Android developers for testing.
See Readme for more details -
https://github.com/ashishb/adb-enhanced/blob/master/README.md
""",
classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
keywords='Android ADB developer',
author='Ashish Bhatia',
author_email='ashishbhatia.ab@gmail.com',
url='https://github.com/ashishb/adb-enhanced',
license='Apache',
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
zip_safe=True,
install_requires=[
# -*- Extra requirements: -*-
'docopt',
],
entry_points={
# -*- Entry points: -*-
'console_scripts': [
'adbe=adbe:main',
],
}
)
| apache-2.0 | Python |
c8478e40bdb996d5e0a1f01ae0ae55e6926f318d | Remove short call timeout from func test | openstack/nova,openstack/nova,openstack/nova,mahak/nova,mahak/nova,mahak/nova | nova/tests/functional/regressions/test_bug_1909120.py | nova/tests/functional/regressions/test_bug_1909120.py | # Copyright 2020, Red Hat, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.functional.api import client
from nova.tests.functional import integrated_helpers
class TestDetachVolumeWhileComputeDown(integrated_helpers._IntegratedTestBase):
"""Regression test for bug 1909120
This regression test aims to assert the behaviour of the
os-volume_attachments API when removing a volume attachment from an
instance hosted on a down compute.
"""
microversion = 'latest'
def test_volume_detach_while_compute_down(self):
# Launch a test instance
server = self._create_server(networks='none')
# Attach the volume
volume_id = self.cinder.IMAGE_BACKED_VOL
self.api.post_server_volume(
server['id'],
{'volumeAttachment': {'volumeId': volume_id}}
)
# Assert that the volume is attached in Nova
attachment = self.api.get_server_volumes(server['id'])[0]
self.assertEqual(volume_id, attachment.get('volumeId'))
# Assert that the volume is attached in the Cinder fixture
self.assertIn(
volume_id, self.cinder.volume_ids_for_instance(server['id']))
# Stop and force down the compute
self.compute.stop()
compute_id = self.admin_api.get_services(
binary='nova-compute')[0]['id']
self.admin_api.put_service_force_down(compute_id, True)
# Assert that the request is rejected by n-api with a 409 response
ex = self.assertRaises(
client.OpenStackApiException,
self.api.delete_server_volume, server['id'], volume_id)
self.assertEqual(409, ex.response.status_code)
| # Copyright 2020, Red Hat, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.functional.api import client
from nova.tests.functional import integrated_helpers
class TestDetachVolumeWhileComputeDown(integrated_helpers._IntegratedTestBase):
"""Regression test for bug 1909120
This regression test aims to assert the behaviour of the
os-volume_attachments API when removing a volume attachment from an
instance hosted on a down compute.
"""
microversion = 'latest'
def test_volume_detach_while_compute_down(self):
# _IntegratedTestBase uses CastAsCall so set the response timeout to 1
self.flags(rpc_response_timeout=1)
# Launch a test instance
server = self._create_server(networks='none')
# Attach the volume
volume_id = self.cinder.IMAGE_BACKED_VOL
self.api.post_server_volume(
server['id'],
{'volumeAttachment': {'volumeId': volume_id}}
)
# Assert that the volume is attached in Nova
attachment = self.api.get_server_volumes(server['id'])[0]
self.assertEqual(volume_id, attachment.get('volumeId'))
# Assert that the volume is attached in the Cinder fixture
self.assertIn(
volume_id, self.cinder.volume_ids_for_instance(server['id']))
# Stop and force down the compute
self.compute.stop()
compute_id = self.admin_api.get_services(
binary='nova-compute')[0]['id']
self.admin_api.put_service_force_down(compute_id, True)
# Assert that the request is rejected by n-api with a 409 response
ex = self.assertRaises(
client.OpenStackApiException,
self.api.delete_server_volume, server['id'], volume_id)
self.assertEqual(409, ex.response.status_code)
| apache-2.0 | Python |
d09517af26a458af51e23a0f892798eca24db776 | Fix test | zwadar/pyqode.core,pyQode/pyqode.core,pyQode/pyqode.core | test/test_backend/test_server.py | test/test_backend/test_server.py | import os
import sys
from pyqode.core.backend import server
from pyqode.core.frontend.client import JsonTcpClient
from threading import Timer
from subprocess import Popen
process = None
port = None
def test_default_parser():
assert server.default_parser() is not None
def run_client_process():
global process, port
wd = os.path.join(os.getcwd(), 'test', 'test_backend')
script = os.path.join(wd, 'cli.py')
process = Popen([sys.executable, script, port], cwd=wd)
def test_json_server():
global port
class Args:
port = 6789
argv = list(sys.argv)
srv = server.JsonServer(args=Args())
srv.server_close()
port = str(JsonTcpClient.pick_free_port())
sys.argv.clear()
sys.argv.append('server.py')
sys.argv.append(port)
srv = server.JsonServer()
Timer(10, srv.server_close).start()
Timer(1, run_client_process).start()
try:
srv.serve_forever()
except ValueError:
pass # when closed from client we have a ValueError because of a
# bad file descriptior, this is not a bug in pyqode but in
# socketserver
| import os
import sys
from pyqode.core.backend import server
from pyqode.core.frontend.client import JsonTcpClient
from threading import Timer
from subprocess import Popen
process = None
port = None
def test_default_parser():
assert server.default_parser() is not None
def run_client_process():
global process, port
wd = os.path.join(os.getcwd(), 'test', 'test_backend')
script = os.path.join(wd, 'cli.py')
process = Popen([sys.executable, script, port], cwd=wd)
def test_json_server():
global port
class Args:
port = 6789
argv = sys.argv.copy()
srv = server.JsonServer(args=Args())
srv.server_close()
port = str(JsonTcpClient.pick_free_port())
sys.argv.clear()
sys.argv.append('server.py')
sys.argv.append(port)
srv = server.JsonServer()
Timer(10, srv.server_close).start()
Timer(1, run_client_process).start()
try:
srv.serve_forever()
except ValueError:
pass # when closed from client we have a ValueError because of a
# bad file descriptior, this is not a bug in pyqode but in
# socketserver
| mit | Python |
6a7fb1ff05202f60c7036db369926e3056372123 | Simplify test of rsqrt function. | chainer/chainer,niboshi/chainer,hvy/chainer,hvy/chainer,anaruse/chainer,ktnyt/chainer,jnishi/chainer,pfnet/chainer,keisuke-umezawa/chainer,cupy/cupy,ysekky/chainer,kashif/chainer,keisuke-umezawa/chainer,ronekko/chainer,niboshi/chainer,wkentaro/chainer,cupy/cupy,kiyukuta/chainer,okuta/chainer,delta2323/chainer,hvy/chainer,ktnyt/chainer,keisuke-umezawa/chainer,niboshi/chainer,hvy/chainer,ktnyt/chainer,aonotas/chainer,rezoo/chainer,okuta/chainer,jnishi/chainer,cupy/cupy,ktnyt/chainer,keisuke-umezawa/chainer,wkentaro/chainer,okuta/chainer,wkentaro/chainer,niboshi/chainer,tkerola/chainer,chainer/chainer,okuta/chainer,jnishi/chainer,jnishi/chainer,wkentaro/chainer,chainer/chainer,cupy/cupy,chainer/chainer | tests/chainer_tests/functions_tests/math_tests/test_sqrt.py | tests/chainer_tests/functions_tests/math_tests/test_sqrt.py | import unittest
import numpy
import chainer.functions as F
from chainer import testing
#
# sqrt
def make_data(dtype, shape):
x = numpy.random.uniform(0.1, 5, shape).astype(dtype)
gy = numpy.random.uniform(-1, 1, shape).astype(dtype)
return x, gy
@testing.math_function_test(F.Sqrt(), make_data=make_data)
class TestSqrt(unittest.TestCase):
pass
#
# rsqrt
def rsqrt(x):
return numpy.reciprocal(numpy.sqrt(x))
class TestRsqrt(unittest.TestCase):
def test_rsqrt(self):
x = numpy.random.uniform(0.1, 5, (3, 2)).astype(numpy.float32)
testing.assert_allclose(F.rsqrt(x).data, rsqrt(x))
testing.run_module(__name__, __file__)
| import unittest
import numpy
import chainer.functions as F
from chainer import testing
def make_data(dtype, shape):
x = numpy.random.uniform(0.1, 5, shape).astype(dtype)
gy = numpy.random.uniform(-1, 1, shape).astype(dtype)
return x, gy
#
# sqrt
@testing.math_function_test(F.Sqrt(), make_data=make_data)
class TestSqrt(unittest.TestCase):
pass
#
# rsqrt
def rsqrt(x, dtype=numpy.float32):
return numpy.reciprocal(numpy.sqrt(x, dtype=dtype))
# TODO(takagi) Fix test of rsqrt not to use this decorator.
@testing.math_function_test(F.rsqrt, func_expected=rsqrt, make_data=make_data)
class TestRsqrt(unittest.TestCase):
pass
testing.run_module(__name__, __file__)
| mit | Python |
3c94bd63563d02854e504b2b3569eb770ae4f6f8 | use more friendly output for US sensors | Ecam-Eurobot-2017/main,Ecam-Eurobot-2017/main,Ecam-Eurobot-2017/main | code/raspberrypi/range_sensors.py | code/raspberrypi/range_sensors.py | from i2c import I2C
from enum import IntEnum
class Command(IntEnum):
MeasureOne = 1
MeasureAll = 2
Count = 3
class RangeSensor(I2C):
"""
This class is an abstraction around the I2C communication with
the range-sensor module.
Details of the "protocol" used:
The Raspberry Pi sends a byte to the module containing a command
and eventually a sensor number. Both informations are coded on 4 bits
totalling 8 bits together. The null byte, 0x00, is used to indicate errors.
This means that we have 15 possible commands and 15 possible sensors.
We only use 3 different commands:
1. MeasureOne (get_range): 0001 xxxx
This command requests the last measure of the sensor number xxxx
Sensor indices begin at 1. If the sensor does not exists, the module
will return a null byte. If the sensor does exists, two bytes will be
returned making up the 16 bits value together.
2. MeasureAll (get_ranges): 0010 0000
This command requests the last measures of all the available sensors.
The response to this request is a sequence of 2*n bytes where n is the
number of available sensors.
3. Count (get_number_of_sensors): 0011 0000
This command requests the number of available sensors.
The response is only one byte as there are only 15 possible sensors.
"""
def __init__(self, address):
"""Constructor takes the adress of the I2C module"""
super(RangeSensor, self).__init__(address)
self.n = self.get_number_of_sensors()
def get_range(self, sensor):
"""Requests the last measurement of a specific sensor"""
cmd = I2C.pack8(Command.MeasureOne, sensor)
self.send(cmd)
r = self.receive(2)
return I2C.pack16(r[1], r[0])
def get_ranges(self):
"""Requests the last measurements of all sensors"""
cmd = I2C.pack8(Command.MeasureAll, 0)
self.send(cmd)
data = self.receive(2 * self.n)
ranges = list()
for i in range(self.n):
j = i*2
ranges.append(I2C.pack16(data[(i*2)+1], data[i*2]))
return ranges
def get_number_of_sensors(self):
"""Requests the number of available sensors"""
cmd = I2C.pack8(Command.Count, 0)
self.send(cmd)
return self.receive()
| from i2c import I2C
from enum import IntEnum
class Command(IntEnum):
MeasureOne = 1
MeasureAll = 2
Count = 3
class RangeSensor(I2C):
"""
This class is an abstraction around the I2C communication with
the range-sensor module.
Details of the "protocol" used:
The Raspberry Pi sends a byte to the module containing a command
and eventually a sensor number. Both informations are coded on 4 bits
totalling 8 bits together. The null byte, 0x00, is used to indicate errors.
This means that we have 15 possible commands and 15 possible sensors.
We only use 3 different commands:
1. MeasureOne (get_range): 0001 xxxx
This command requests the last measure of the sensor number xxxx
Sensor indices begin at 1. If the sensor does not exists, the module
will return a null byte. If the sensor does exists, two bytes will be
returned making up the 16 bits value together.
2. MeasureAll (get_ranges): 0010 0000
This command requests the last measures of all the available sensors.
The response to this request is a sequence of 2*n bytes where n is the
number of available sensors.
3. Count (get_number_of_sensors): 0011 0000
This command requests the number of available sensors.
The response is only one byte as there are only 15 possible sensors.
"""
def __init__(self, address):
"""Constructor takes the adress of the I2C module"""
super(RangeSensor, self).__init__(address)
self.n = self.get_number_of_sensors()
def get_range(self, sensor):
"""Requests the last measurement of a specific sensor"""
cmd = I2C.pack8(Command.MeasureOne, sensor)
self.send(cmd)
r = self.receive(2)
return I2C.pack16(r[1], r[0])
def get_ranges(self):
"""Requests the last measurements of all sensors"""
cmd = I2C.pack8(Command.MeasureAll, 0)
self.send(cmd)
return self.receive(2 * self.n)
def get_number_of_sensors(self):
"""Requests the number of available sensors"""
cmd = I2C.pack8(Command.Count, 0)
self.send(cmd)
return self.receive()
| mit | Python |
f7cf66867bff75f5b53b6d0a2919a67e6e22242f | remove test too | commaai/openpilot,commaai/openpilot,commaai/openpilot,commaai/openpilot,commaai/openpilot,commaai/openpilot | selfdrive/locationd/test/test_calibrationd.py | selfdrive/locationd/test/test_calibrationd.py | #!/usr/bin/env python3
import random
import unittest
import cereal.messaging as messaging
from common.params import Params
from selfdrive.locationd.calibrationd import Calibrator
class TestCalibrationd(unittest.TestCase):
def test_read_saved_params(self):
msg = messaging.new_message('liveCalibration')
msg.liveCalibration.validBlocks = random.randint(1, 10)
msg.liveCalibration.rpyCalib = [random.random() for _ in range(3)]
Params().put("CalibrationParams", msg.to_bytes())
c = Calibrator(param_put=True)
self.assertEqual(list(msg.liveCalibration.rpyCalib), c.rpy)
self.assertEqual(msg.liveCalibration.validBlocks, c.valid_blocks)
if __name__ == "__main__":
unittest.main()
| #!/usr/bin/env python3
import json
import random
import unittest
import cereal.messaging as messaging
from common.params import Params
from selfdrive.locationd.calibrationd import Calibrator
class TestCalibrationd(unittest.TestCase):
def test_read_saved_params_json(self):
r = [random.random() for _ in range(3)]
b = random.randint(1, 10)
cal_params = {"calib_radians": r,
"valid_blocks": b}
Params().put("CalibrationParams", json.dumps(cal_params).encode('utf8'))
c = Calibrator(param_put=True)
self.assertEqual(r, c.rpy)
self.assertEqual(b, c.valid_blocks)
def test_read_saved_params(self):
msg = messaging.new_message('liveCalibration')
msg.liveCalibration.validBlocks = random.randint(1, 10)
msg.liveCalibration.rpyCalib = [random.random() for _ in range(3)]
Params().put("CalibrationParams", msg.to_bytes())
c = Calibrator(param_put=True)
self.assertEqual(list(msg.liveCalibration.rpyCalib), c.rpy)
self.assertEqual(msg.liveCalibration.validBlocks, c.valid_blocks)
if __name__ == "__main__":
unittest.main()
| mit | Python |
828f8fbae2f1960bc2a764394db1a0320bf33705 | Add recursive flag | Encrylize/EasyEuler | EasyEuler/cli.py | EasyEuler/cli.py | import sys
import os
import click
from .utils import write_to_file, get_problem, get_problem_id, verify_solution
from .types import ProblemType
commands = click.Group()
@commands.command()
@click.option('--path', '-p', type=click.Path())
@click.option('--overwrite', '-o', is_flag=True)
@click.argument('problem', type=ProblemType())
@click.argument('language', required=False, default='python')
def generate(problem, language, path, overwrite):
try:
path, success = write_to_file(problem, language, path, overwrite)
except (FileNotFoundError, PermissionError) as e:
sys.exit('An exception occurred: %s' % e)
if not success:
sys.exit('%s already exists. Use the --overwrite flag to overwrite it' %
click.format_filename(path))
click.echo('Written to %s' % click.format_filename(path))
@commands.command()
@click.option('--language', '-l')
@click.option('--recursive', '-r', is_flag=True)
@click.argument('path', type=click.Path(exists=True, readable=True), nargs=-1)
def verify(path, language, recursive):
for path_ in path:
if os.path.isdir(path_):
if recursive:
process_dir(path_, language)
else:
click.echo('Skipping %s because it is a directory and ' \
'--recursive was not specified' %
click.format_filename(path_))
continue
validate_file(path_, language)
def process_dir(path, language):
for root, directories, file_names in os.walk(path):
for file_name in file_names:
validate_file(os.path.join(root, file_name), language)
def validate_file(path, language):
if os.path.isdir(path):
click.echo('Skipping %s because it is a directory' %
click.format_filename(path))
return
problem_id = get_problem_id(path)
if problem_id is None or get_problem(problem_id) is None:
click.echo('Skipping %s because it does not contain ' \
'a valid problem ID' % click.format_filename(path))
return
status, output = verify_solution(path, problem_id, language)
click.echo('Checking output of %s: %s' % (click.format_filename(path),
output))
click.echo({'C': 'Correct', 'I': 'Incorrect', 'E': 'Error'}[status])
| import sys
import os
import click
from .utils import write_to_file, get_problem, get_problem_id, verify_solution
from .types import ProblemType
commands = click.Group()
@commands.command()
@click.option('--path', '-p', type=click.Path())
@click.option('--overwrite', '-o', is_flag=True)
@click.argument('problem', type=ProblemType())
@click.argument('language', required=False, default='python')
def generate(problem, language, path, overwrite):
try:
path, success = write_to_file(problem, language, path, overwrite)
except (FileNotFoundError, PermissionError) as e:
sys.exit('An exception occurred: %s' % e)
if not success:
sys.exit('%s already exists. Use the --overwrite flag to overwrite it' %
click.format_filename(path))
click.echo('Written to %s' % click.format_filename(path))
@commands.command()
@click.option('--language', '-l')
@click.argument('path', type=click.Path(exists=True, readable=True), nargs=-1)
def verify(path, language):
for path_ in path:
valid, status, output = process_path(path_, language)
if not valid:
continue
click.echo('Checking output of %s: %s' % (click.format_filename(path_),
output))
click.echo({'C': 'Correct', 'I': 'Incorrect', 'E': 'Error'}[status])
def process_path(path, language):
if os.path.isdir(path):
click.echo('Skipping %s because it is a directory' %
click.format_filename(path))
return False, None, None
problem_id = get_problem_id(path)
if problem_id is None or get_problem(problem_id) is None:
click.echo('Skipping %s because it does not contain ' \
'a valid problem ID' % click.format_filename(path))
return False, None, None
status, output = verify_solution(path, problem_id, language)
return True, status, output
| mit | Python |
0aa97796cf61499ec425e2092ca0086c501938ca | Make the widget warning easier to catch by specifying the module. | ipython/ipython,ipython/ipython | IPython/html/widgets/__init__.py | IPython/html/widgets/__init__.py | from .widget import Widget, DOMWidget, CallbackDispatcher, register
from .widget_bool import Checkbox, ToggleButton
from .widget_button import Button
from .widget_box import Box, Popup, FlexBox, HBox, VBox
from .widget_float import FloatText, BoundedFloatText, FloatSlider, FloatProgress, FloatRangeSlider
from .widget_image import Image
from .widget_int import IntText, BoundedIntText, IntSlider, IntProgress, IntRangeSlider
from .widget_output import Output
from .widget_selection import RadioButtons, ToggleButtons, Dropdown, Select
from .widget_selectioncontainer import Tab, Accordion
from .widget_string import HTML, Latex, Text, Textarea
from .interaction import interact, interactive, fixed, interact_manual
from .widget_link import Link, link, DirectionalLink, dlink
# Deprecated classes
from .widget_bool import CheckboxWidget, ToggleButtonWidget
from .widget_button import ButtonWidget
from .widget_box import ContainerWidget, PopupWidget
from .widget_float import FloatTextWidget, BoundedFloatTextWidget, FloatSliderWidget, FloatProgressWidget
from .widget_image import ImageWidget
from .widget_int import IntTextWidget, BoundedIntTextWidget, IntSliderWidget, IntProgressWidget
from .widget_selection import RadioButtonsWidget, ToggleButtonsWidget, DropdownWidget, SelectWidget
from .widget_selectioncontainer import TabWidget, AccordionWidget
from .widget_string import HTMLWidget, LatexWidget, TextWidget, TextareaWidget
# We use warn_explicit so we have very brief messages without file or line numbers.
# The concern is that file or line numbers will confuse the interactive user.
# To ignore this warning, do:
#
# from warnings import filterwarnings
# filterwarnings('ignore', module='IPython.html.widgets')
from warnings import warn_explicit
__warningregistry__ = {}
warn_explicit("IPython widgets are experimental and may change in the future.",
FutureWarning, '', 0, module = 'IPython.html.widgets',
registry = __warningregistry__, module_globals = globals)
| from .widget import Widget, DOMWidget, CallbackDispatcher, register
from .widget_bool import Checkbox, ToggleButton
from .widget_button import Button
from .widget_box import Box, Popup, FlexBox, HBox, VBox
from .widget_float import FloatText, BoundedFloatText, FloatSlider, FloatProgress, FloatRangeSlider
from .widget_image import Image
from .widget_int import IntText, BoundedIntText, IntSlider, IntProgress, IntRangeSlider
from .widget_output import Output
from .widget_selection import RadioButtons, ToggleButtons, Dropdown, Select
from .widget_selectioncontainer import Tab, Accordion
from .widget_string import HTML, Latex, Text, Textarea
from .interaction import interact, interactive, fixed, interact_manual
from .widget_link import Link, link, DirectionalLink, dlink
# Deprecated classes
from .widget_bool import CheckboxWidget, ToggleButtonWidget
from .widget_button import ButtonWidget
from .widget_box import ContainerWidget, PopupWidget
from .widget_float import FloatTextWidget, BoundedFloatTextWidget, FloatSliderWidget, FloatProgressWidget
from .widget_image import ImageWidget
from .widget_int import IntTextWidget, BoundedIntTextWidget, IntSliderWidget, IntProgressWidget
from .widget_selection import RadioButtonsWidget, ToggleButtonsWidget, DropdownWidget, SelectWidget
from .widget_selectioncontainer import TabWidget, AccordionWidget
from .widget_string import HTMLWidget, LatexWidget, TextWidget, TextareaWidget
# we use warn_explicit so we have very brief messages without file or line numbers
# the concern is that file or line numbers will confuse the interactive user
from warnings import warn_explicit
__warningregistry__ = {}
warn_explicit("IPython widgets are experimental and may change in the future.",
FutureWarning, '', 0, registry=__warningregistry__)
| bsd-3-clause | Python |
75c9f644a72b4c2937409c4b0bbf28a917f4f0dd | Use np.round to get more accurate float images | starcalibre/microscopium,jni/microscopium,Don86/microscopium,Don86/microscopium,microscopium/microscopium,jni/microscopium,microscopium/microscopium | husc/io.py | husc/io.py | import os
import numpy as np
import Image
def imwrite(ar, fn, bitdepth=None):
"""Write a np.ndarray 2D volume to a .png or .tif image
Parameters
----------
ar : numpy ndarray, shape (M, N)
The volume to be written to disk.
fn : string
The file name to which to write the volume.
bitdepth : {8, 16, 32}, optional
The bits per pixel.
Returns
-------
None : None
No value is returned.
Notes
-----
The syntax `imwrite(fn, ar)` is also supported.
"""
if type(fn) == np.ndarray and type(ar) == str:
ar, fn = fn, ar
fn = os.path.expanduser(fn)
ar = np.round(ar)
if 0 <= ar.max() <= 1 and ar.dtype == np.double:
bitdepth = 16 if None else bitdepth
imdtype = np.uint16 if bitdepth == 16 else np.uint8
ar = ((2**bitdepth-1)*ar).astype(imdtype)
if 1 < ar.max() < 256 and bitdepth == None or bitdepth == 8:
mode = 'L'
mode_base = 'L'
ar = ar.astype(np.uint8)
elif 256 <= np.max(ar) < 2**16 and bitdepth == None or \
bitdepth == 16:
mode = 'I;16'
mode_base = 'I'
ar = ar.astype(np.uint16)
else:
mode = 'RGBA'
mode_base = 'RGBA'
ar = ar.astype(np.uint32)
im = Image.new(mode_base, ar.T.shape)
im.fromstring(ar.tostring(), 'raw', mode)
im.save(fn)
imsave = imwrite
| import os
import numpy as np
import Image
def imwrite(ar, fn, bitdepth=None):
"""Write a np.ndarray 2D volume to a .png or .tif image
Parameters
----------
ar : numpy ndarray, shape (M, N)
The volume to be written to disk.
fn : string
The file name to which to write the volume.
bitdepth : {8, 16, 32}, optional
The bits per pixel.
Returns
-------
None : None
No value is returned.
Notes
-----
The syntax `imwrite(fn, ar)` is also supported.
"""
if type(fn) == np.ndarray and type(ar) == str:
ar, fn = fn, ar
fn = os.path.expanduser(fn)
if 0 <= ar.max() <= 1 and ar.dtype == np.double:
bitdepth = 16 if None else bitdepth
imdtype = np.uint16 if bitdepth == 16 else np.uint8
ar = ((2**bitdepth-1)*ar).astype(imdtype)
if 1 < ar.max() < 256 and bitdepth == None or bitdepth == 8:
mode = 'L'
mode_base = 'L'
ar = ar.astype(np.uint8)
elif 256 <= np.max(ar) < 2**16 and bitdepth == None or \
bitdepth == 16:
mode = 'I;16'
mode_base = 'I'
ar = ar.astype(np.uint16)
else:
mode = 'RGBA'
mode_base = 'RGBA'
ar = ar.astype(np.uint32)
im = Image.new(mode_base, ar.T.shape)
im.fromstring(ar.tostring(), 'raw', mode)
im.save(fn)
imsave = imwrite
| bsd-3-clause | Python |
b908f62ecc868618b49433db378e7986bdb3d271 | Remove whitespace https://www.python.org/dev/peps/pep-0008/#whitespace-in-expressions-and-statements | plotly/dash,plotly/dash,plotly/dash,plotly/dash,plotly/dash | packages/dash-core-components/setup.py | packages/dash-core-components/setup.py | from setuptools import setup
exec(open('dash_core_components/version.py').read())
setup(
name='dash_core_components',
version=__version__,
author='Chris Parmer',
author_email='chris@plot.ly',
packages=['dash_core_components'],
include_package_data=True,
license='MIT',
description='Dash UI core component suite',
install_requires=['dash']
)
| from setuptools import setup
exec (open('dash_core_components/version.py').read())
setup(
name='dash_core_components',
version=__version__,
author='Chris Parmer',
author_email='chris@plot.ly',
packages=['dash_core_components'],
include_package_data=True,
license='MIT',
description='Dash UI core component suite',
install_requires=['dash']
)
| mit | Python |
cc50a1ed2c032361166639642e8ab977bd7c8a5e | use zip64 | dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/blobs/zipdb.py | corehq/blobs/zipdb.py | from __future__ import absolute_import
from os.path import commonprefix, join, sep
import zipfile
from corehq.blobs import DEFAULT_BUCKET
from corehq.blobs.exceptions import BadName
from corehq.blobs.interface import AbstractBlobDB, SAFENAME
class ZipBlobDB(AbstractBlobDB):
"""Blobs stored in zip file. Used for exporting a domain's blobs
"""
def __init__(self, slug, domain):
self.zipname = 'export-{domain}-{slug}-blobs.zip'.format(domain=domain, slug=slug)
def put(self, content, basename="", bucket=DEFAULT_BUCKET):
raise NotImplementedError
def get(self, identifier, bucket=DEFAULT_BUCKET):
raise NotImplementedError
def delete(self, *args, **kw):
raise NotImplementedError
def bulk_delete(self, paths):
raise NotImplementedError
def copy_blob(self, content, info, bucket):
path = self.get_path(info.identifier, bucket)
with zipfile.ZipFile(self.zipname, 'a', allowZip64=True) as z:
z.writestr(path, content.read())
def get_path(self, identifier=None, bucket=DEFAULT_BUCKET):
if identifier is None:
return bucket
return safejoin(bucket, identifier)
def exists(self, identifier, bucket=DEFAULT_BUCKET):
path = self.get_path(identifier, bucket)
with zipfile.ZipFile(self.zipname, 'r') as z:
return path in z.namelist()
def safejoin(root, subpath):
if not SAFENAME.match(subpath):
raise BadName(u"unsafe path name: %r" % subpath)
path = join(root, subpath)
if commonprefix([root + sep, path]) != root + sep:
raise BadName(u"invalid relative path: %r" % subpath)
return path
def get_blob_db_exporter(slug, domain):
return _get_zip_db(slug, domain)
def _get_zip_db(slug, domain):
return ZipBlobDB(slug, domain)
| from __future__ import absolute_import
from os.path import commonprefix, join, sep
import zipfile
from corehq.blobs import DEFAULT_BUCKET
from corehq.blobs.exceptions import BadName
from corehq.blobs.interface import AbstractBlobDB, SAFENAME
class ZipBlobDB(AbstractBlobDB):
"""Blobs stored in zip file. Used for exporting a domain's blobs
"""
def __init__(self, slug, domain):
self.zipname = 'export-{domain}-{slug}-blobs.zip'.format(domain=domain, slug=slug)
def put(self, content, basename="", bucket=DEFAULT_BUCKET):
raise NotImplementedError
def get(self, identifier, bucket=DEFAULT_BUCKET):
raise NotImplementedError
def delete(self, *args, **kw):
raise NotImplementedError
def bulk_delete(self, paths):
raise NotImplementedError
def copy_blob(self, content, info, bucket):
path = self.get_path(info.identifier, bucket)
with zipfile.ZipFile(self.zipname, 'a') as z:
z.writestr(path, content.read())
def get_path(self, identifier=None, bucket=DEFAULT_BUCKET):
if identifier is None:
return bucket
return safejoin(bucket, identifier)
def exists(self, identifier, bucket=DEFAULT_BUCKET):
path = self.get_path(identifier, bucket)
with zipfile.ZipFile(self.zipname, 'r') as z:
return path in z.namelist()
def safejoin(root, subpath):
if not SAFENAME.match(subpath):
raise BadName(u"unsafe path name: %r" % subpath)
path = join(root, subpath)
if commonprefix([root + sep, path]) != root + sep:
raise BadName(u"invalid relative path: %r" % subpath)
return path
def get_blob_db_exporter(slug, domain):
return _get_zip_db(slug, domain)
def _get_zip_db(slug, domain):
return ZipBlobDB(slug, domain)
| bsd-3-clause | Python |
c30fff32d5249d603b9d21072e48950538941b5b | remove AuthenticationHelper | hackedd/gw2api | gw2api/v2/account.py | gw2api/v2/account.py | from .endpoint import EndpointBase, Endpoint
class AuthenticatedMixin(object):
token = None
@classmethod
def set_token(cls, token):
cls.token = token
def _get(self, path, **kwargs):
if self.token:
headers = kwargs.setdefault("headers", {})
headers.setdefault("Authorization", "Bearer " + self.token)
return super(AuthenticatedMixin, self)._get(path, **kwargs)
class AccountEndpoint(AuthenticatedMixin, EndpointBase):
def get(self):
return self.get_cached(self.name, None)
class CharacterEndpoint(AuthenticatedMixin, Endpoint):
pass
| import urllib
import warnings
from .endpoint import EndpointBase, Endpoint
import gw2api
class AuthenticationHelper(object):
authorization_url = "https://account.guildwars2.com/oauth2/authorization"
token_url = "https://account.guildwars2.com/oauth2/token"
def __init__(self, client_id, client_secret, redirect_uri):
super(AuthenticationHelper, self).__init__()
warnings.warn("OAuth2 has been deprecated and will be disabled on "
"Thursday, June 4th (http://tinyurl.com/gw2oauth).",
stacklevel=2)
self.client_id = client_id
self.client_secret = client_secret
self.redirect_uri = redirect_uri
def authorize_url(self, scope="account offline", **kwargs):
params = {
"redirect_uri": self.redirect_uri,
"client_id": self.client_id,
"response_type": "code",
"scope": scope,
}
params.update(kwargs)
return self.authorization_url + "?" + urllib.urlencode(params)
def get_token(self, code, **kwargs):
params = {
"redirect_uri": self.redirect_uri,
"client_id": self.client_id,
"client_secret": self.client_secret,
"grant_type": "authorization_code",
"code": code,
}
params.update(kwargs)
r = gw2api.session.post(self.token_url, data=params)
r.raise_for_status()
return r.json()
class AuthenticatedMixin(object):
token = None
@classmethod
def set_token(cls, token):
cls.token = token
def _get(self, path, **kwargs):
if self.token:
headers = kwargs.setdefault("headers", {})
headers.setdefault("Authorization", "Bearer " + self.token)
return super(AuthenticatedMixin, self)._get(path, **kwargs)
class AccountEndpoint(AuthenticatedMixin, EndpointBase):
def get(self):
return self.get_cached(self.name, None)
class CharacterEndpoint(AuthenticatedMixin, Endpoint):
pass
| mit | Python |
c425046d662a8554a00267184216d0f883dbc3cf | Update use_four.py | weepingdog/ArcpyScriptToolkits | Four/use_four.py | Four/use_four.py | import os
import math
import string
dx=-403.275
dy=178.190
rot_s=-10.300182
k=1.00009495477874
def fourptrans(x0in,y0in,dxin,dyin,rotin,kin):
rot_rad=rotin/3600./180.*math.pi
sinr=math.sin(rot_rad)
cosr=math.cos(rot_rad)
x=dxin+kin*x0in*cosr-kin*y0in*sinr
y=dyin+kin*x0in*sinr+kin*y0in*cosr
return(x,y)
f1=open("in.txt","r")
f2=open("out.txt","w")
bz=[]
rdl=f1.readline()
index=1
while(rdl):
dataline=rdl.split("\t")
x0=string.atof(dataline[1])
y0=string.atof(dataline[2])
x,y=fourptrans(x0,y0,dx,dy,rot_s,k)
ptl="{0}\t{1:.4f}\t{2:.4f}\t{3:.4f}\t{4:.4f}".format(dataline[0],x0,y0,x,y)
print ptl
wtl="{0}\t{1:.4f}\t{2:.4f}\t{3:.4f}\t{4:.4f}\n".format(dataline[0],x0,y0,x,y)
f2.write(wtl)
rdl=f1.readline()
index=index+1
f1.close()
f2.close()
| import os
import math
import string
dx=-403.275
dy=178.190
rot_s=-10.300182
k=1.00009495477874
def fourptrans(x0in,y0in,dxin,dyin,rotin,kin):
rot_rad=rotin/3600./180.*math.pi
sinr=math.sin(rot_rad)
cosr=math.cos(rot_rad)
x=dxin+kin*x0in*cosr-kin*y0in*sinr
y=dyin+kin*x0in*sinr+kin*y0in*cosr
return(x,y)
f1=open("bz.txt","r")
f2=open("bz2000out.txt","w")
bz=[]
rdl=f1.readline()
index=1
while(rdl):
dataline=rdl.split("\t")
x0=string.atof(dataline[1])
y0=string.atof(dataline[2])
x,y=fourptrans(x0,y0,dx,dy,rot_s,k)
ptl="{0}\t{1:.4f}\t{2:.4f}\t{3:.4f}\t{4:.4f}".format(dataline[0],x0,y0,x,y)
print ptl
wtl="{0}\t{1:.4f}\t{2:.4f}\t{3:.4f}\t{4:.4f}\n".format(dataline[0],x0,y0,x,y)
f2.write(wtl)
rdl=f1.readline()
index=index+1
f1.close()
f2.close()
| apache-2.0 | Python |
63bf9c267ff891f1a2bd1f472a5d77f8df1e0209 | Split IAM template tests with paramtrize | gogoair/foremast,gogoair/foremast | tests/iam/test_iam_valid_json.py | tests/iam/test_iam_valid_json.py | """Test IAM Policy templates are valid JSON."""
import json
import jinja2
import pytest
from foremast.iam.construct_policy import render_policy_template
from foremast.utils.templates import LOCAL_TEMPLATES
def iam_templates():
"""Generate list of IAM templates."""
jinjaenv = jinja2.Environment(loader=jinja2.FileSystemLoader([LOCAL_TEMPLATES]))
iam_template_names = jinjaenv.list_templates(filter_func=lambda x: all([
x.startswith('infrastructure/iam/'),
'trust' not in x,
'wrapper' not in x, ]))
for iam_template_name in iam_template_names:
yield iam_template_name
@pytest.mark.parametrize(argnames='template_name', argvalues=iam_templates())
def test_all_iam_templates(template_name):
"""Verify all IAM templates render as proper JSON."""
*_, service_json = template_name.split('/')
service, *_ = service_json.split('.')
items = ['resource1', 'resource2']
if service == 'rds-db':
items = {
'resource1': 'user1',
'resource2': 'user2',
}
try:
rendered = render_policy_template(
account_number='',
app='coreforrest',
env='dev',
group='forrest',
items=items,
pipeline_settings={
'lambda': {
'vpc_enabled': False,
},
},
region='us-east-1',
service=service)
except json.decoder.JSONDecodeError:
pytest.fail('Bad template: {0}'.format(template_name), pytrace=False)
assert isinstance(rendered, list)
| """Test IAM Policy templates are valid JSON."""
import jinja2
from foremast.iam.construct_policy import render_policy_template
from foremast.utils.templates import LOCAL_TEMPLATES
def iam_templates():
"""Generate list of IAM templates."""
jinjaenv = jinja2.Environment(loader=jinja2.FileSystemLoader([LOCAL_TEMPLATES]))
iam_template_names = jinjaenv.list_templates(filter_func=lambda x: all([
x.startswith('infrastructure/iam/'),
'trust' not in x,
'wrapper' not in x, ]))
for iam_template_name in iam_template_names:
yield iam_template_name
items = ['resource1', 'resource2']
if service == 'rds-db':
items = {
'resource1': 'user1',
'resource2': 'user2',
}
rendered = render_policy_template(
account_number='',
app='coreforrest',
env='dev',
group='forrest',
items=items,
pipeline_settings={
'lambda': {
'vpc_enabled': False,
},
},
region='us-east-1',
service=service)
assert isinstance(rendered, list)
| apache-2.0 | Python |
e4968d033d344016b7b101d04572f01bfe9e0b26 | make flake8 happy | viper-framework/har2tree,viper-framework/har2tree,viper-framework/har2tree | har2tree/__init__.py | har2tree/__init__.py | from .parser import CrawledTree # noqa
from .nodes import HostNode, URLNode, HarTreeNode # noqa
from .har2tree import Har2Tree, HarFile, Har2TreeLogAdapter # noqa
from .helper import Har2TreeError # noqa
import logging
logging.getLogger(__name__).addHandler(logging.NullHandler())
| from .parser import CrawledTree
from .nodes import HostNode, URLNode, HarTreeNode
from .har2tree import Har2Tree, HarFile, Har2TreeLogAdapter
from .helper import Har2TreeError
import logging
logging.getLogger(__name__).addHandler(logging.NullHandler())
| bsd-3-clause | Python |
57774a4f6f9bead4f58b2b82cc6ae0cdd2309ebd | remove invalid options | minamorl/staccato | staccato/twitter.py | staccato/twitter.py | from requests_oauthlib import OAuth1Session
from json import JSONDecoder
from staccato import utils
class Twitter():
API = "https://api.twitter.com/1.1/"
def __init__(self, session=None):
self.session = session
def auth(self, consumer_key, consumer_secret, access_token_key, access_token_secret):
session = OAuth1Session(
consumer_key,
client_secret=consumer_secret,
resource_owner_key=access_token_key,
resource_owner_secret=access_token_secret)
self.session=session
def request(self, method='get', endpoint='', **kwargs):
def _parsed(s):
try:
return JSONDecoder().decode(s)
except:
return s
if self.session is None:
raise TwitterAuthException()
_url=Twitter.API + endpoint
parsed=_parsed(self.session.request(method, _url, **kwargs).text)
return parsed
def get_followers(self, screen_name, count=0):
return self.request("get", "followers/ids.json", params={"screen_name": screen_name, "count": count})
def get_followings(self, screen_name, count=0):
return self.request("get", "friends/ids.json", params={"screen_name": screen_name, "count": count})
def lookup(self, user_ids):
result=[]
for ids in utils.chunk(user_ids, 100):
r=self.request("get", "users/lookup.json", params={"user_id": ','.join(ids)})
result.extend(r)
return result
def remove_user(self, user_id):
return self.request("post", "friendships/destroy.json", params={"user_id": user_id})
class TwitterAuthException(Exception):
pass
| from requests_oauthlib import OAuth1Session
from json import JSONDecoder
from staccato import utils
class Twitter():
API = "https://api.twitter.com/1.1/"
def __init__(self, conf, session=None):
self.conf = conf
self.session = session
def auth(self, consumer_key, consumer_secret, access_token_key, access_token_secret):
session = OAuth1Session(
consumer_key,
client_secret=consumer_secret,
resource_owner_key=access_token_key,
resource_owner_secret=access_token_secret)
self.session=session
def request(self, method='get', endpoint='', **kwargs):
def _parsed(s):
try:
return JSONDecoder().decode(s)
except:
return s
if self.session is None:
raise TwitterAuthException()
_url=Twitter.API + endpoint
parsed=_parsed(self.session.request(method, _url, **kwargs).text)
return parsed
def get_followers(self, screen_name, count=0):
return self.request("get", "followers/ids.json", params={"screen_name": screen_name, "count": count})
def get_followings(self, screen_name, count=0):
return self.request("get", "friends/ids.json", params={"screen_name": screen_name, "count": count})
def lookup(self, user_ids):
result=[]
for ids in utils.chunk(user_ids, 100):
r=self.request("get", "users/lookup.json", params={"user_id": ','.join(ids)})
result.extend(r)
return result
def remove_user(self, user_id):
return self.request("post", "friendships/destroy.json", params={"user_id": user_id})
class TwitterAuthException(Exception):
pass
| mit | Python |
affd1a18915a81379133183617e91fe6adb26dfd | Update k-th-symbol-in-grammar.py | tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015 | Python/k-th-symbol-in-grammar.py | Python/k-th-symbol-in-grammar.py | # Time: O(logn) = O(1) because n is 32-bit integer
# Space: O(1)
# On the first row, we write a 0.
# Now in every subsequent row,
# we look at the previous row and replace each occurrence of 0 with 01,
# and each occurrence of 1 with 10.
#
# Given row N and index K, return the K-th indexed symbol in row N.
# (The values of K are 1-indexed.) (1 indexed).
#
# Examples:
# Input: N = 1, K = 1
# Output: 0
#
# Input: N = 2, K = 1
# Output: 0
#
# Input: N = 2, K = 2
# Output: 1
#
# Input: N = 4, K = 5
# Output: 1
#
# Explanation:
# row 1: 0
# row 2: 01
# row 3: 0110
# row 4: 01101001
#
# Note:
# - N will be an integer in the range [1, 30].
# - K will be an integer in the range [1, 2^(N-1)].
class Solution(object):
def kthGrammar(self, N, K):
"""
:type N: int
:type K: int
:rtype: int
"""
def bitCount(n):
result = 0
while n:
n &= n - 1
result += 1
return result
return bitCount(K-1) % 2
| # Time: O(logn)
# Space: O(1)
# On the first row, we write a 0.
# Now in every subsequent row,
# we look at the previous row and replace each occurrence of 0 with 01,
# and each occurrence of 1 with 10.
#
# Given row N and index K, return the K-th indexed symbol in row N.
# (The values of K are 1-indexed.) (1 indexed).
#
# Examples:
# Input: N = 1, K = 1
# Output: 0
#
# Input: N = 2, K = 1
# Output: 0
#
# Input: N = 2, K = 2
# Output: 1
#
# Input: N = 4, K = 5
# Output: 1
#
# Explanation:
# row 1: 0
# row 2: 01
# row 3: 0110
# row 4: 01101001
#
# Note:
# - N will be an integer in the range [1, 30].
# - K will be an integer in the range [1, 2^(N-1)].
class Solution(object):
def kthGrammar(self, N, K):
"""
:type N: int
:type K: int
:rtype: int
"""
def bitCount(n):
result = 0
while n:
n &= n - 1
result += 1
return result
return bitCount(K-1) % 2
| mit | Python |
77fbb9547905fdecce23b80d1e33d847d3006345 | Make pybb cmake_build check to see if build directory exists | wombatant/nostalgia,wombatant/nostalgia,wombatant/nostalgia | deps/buildcore/scripts/pybb.py | deps/buildcore/scripts/pybb.py | #! /usr/bin/env python3
#
# Copyright 2016 - 2021 gary@drinkingtea.net
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# "Python Busy Box" - adds cross platform equivalents to Unix commands that
# don't translate well to that other operating system
import os
import shutil
import subprocess
import sys
def mkdir(path):
if not os.path.exists(path) and os.path.isdir(path):
os.mkdir(path)
# this exists because Windows is utterly incapable of providing a proper rm -rf
def rm(path):
if (os.path.exists(path) or os.path.islink(path)) and not os.path.isdir(path):
os.remove(path)
elif os.path.isdir(path):
shutil.rmtree(path)
def cmake_build(base_path, target):
if not os.path.isdir(base_path):
# nothing to build
return 0
for d in os.listdir(base_path):
args = ['cmake', '--build', os.path.join(base_path, d), '--target']
if target is not None:
args.append(target)
err = subprocess.run(args).returncode
if err != 0:
return err
def main():
if sys.argv[1] == 'mkdir':
mkdir(sys.argv[2])
elif sys.argv[1] == 'rm':
for i in range(2, len(sys.argv)):
rm(sys.argv[i])
elif sys.argv[1] == 'cmake-build':
err = cmake_build(sys.argv[2], sys.argv[3] if len(sys.argv) > 3 else None)
sys.exit(err)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.exit(1)
| #! /usr/bin/env python3
#
# Copyright 2016 - 2021 gary@drinkingtea.net
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
#
# "Python Busy Box" - adds cross platform equivalents to Unix commands that
# don't translate well to that other operating system
import os
import shutil
import subprocess
import sys
def mkdir(path):
if not os.path.exists(path) and os.path.isdir(path):
os.mkdir(path)
# this exists because Windows is utterly incapable of providing a proper rm -rf
def rm(path):
if (os.path.exists(path) or os.path.islink(path)) and not os.path.isdir(path):
os.remove(path)
elif os.path.isdir(path):
shutil.rmtree(path)
def cmake_build(base_path, target):
for d in os.listdir(base_path):
args = ['cmake', '--build', os.path.join(base_path, d), '--target']
if target is not None:
args.append(target)
err = subprocess.run(args).returncode
if err != 0:
sys.exit(err)
def main():
if sys.argv[1] == 'mkdir':
mkdir(sys.argv[2])
elif sys.argv[1] == 'rm':
for i in range(2, len(sys.argv)):
rm(sys.argv[i])
elif sys.argv[1] == 'cmake-build':
cmake_build(sys.argv[2], sys.argv[3] if len(sys.argv) > 3 else None)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.exit(1)
| mpl-2.0 | Python |
14fd9e86e868f7c0717723e2c06aa0cac8f613f9 | Fix spelling in a doc string | dragorosson/heat,openstack/heat,dragorosson/heat,jasondunsmore/heat,maestro-hybrid-cloud/heat,rdo-management/heat,jasondunsmore/heat,pshchelo/heat,miguelgrinberg/heat,noironetworks/heat,srznew/heat,miguelgrinberg/heat,takeshineshiro/heat,maestro-hybrid-cloud/heat,openstack/heat,cryptickp/heat,rdo-management/heat,srznew/heat,rh-s/heat,gonzolino/heat,redhat-openstack/heat,cwolferh/heat-scratch,redhat-openstack/heat,pshchelo/heat,rh-s/heat,takeshineshiro/heat,steveb/heat,dims/heat,gonzolino/heat,cryptickp/heat,pratikmallya/heat,dims/heat,cwolferh/heat-scratch,noironetworks/heat,steveb/heat,pratikmallya/heat | heat/common/crypt.py | heat/common/crypt.py | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
from Crypto.Cipher import AES
from oslo.config import cfg
from heat.openstack.common.crypto import utils
auth_opts = [
cfg.StrOpt('auth_encryption_key',
default='notgood but just long enough i think',
help="Encryption key used for authentication info in database.")
]
cfg.CONF.register_opts(auth_opts)
def encrypt(auth_info):
if auth_info is None:
return None, None
sym = utils.SymmetricCrypto()
res = sym.encrypt(cfg.CONF.auth_encryption_key[:32],
auth_info, b64encode=True)
return 'oslo_decrypt_v1', res
def oslo_decrypt_v1(auth_info):
if auth_info is None:
return None
sym = utils.SymmetricCrypto()
return sym.decrypt(cfg.CONF.auth_encryption_key[:32],
auth_info, b64decode=True)
def heat_decrypt(auth_info):
"""Decrypt function for data that has been encrypted using an older
version of Heat.
Note: the encrypt function returns the function that is needed to
decrypt the data. The database then stores this. When the data is
then retrieved (potentially by a later version of Heat) the decrypt
function must still exist. So whilst it may seem that this function
is not referenced, it will be referenced from the database.
"""
if auth_info is None:
return None
auth = base64.b64decode(auth_info)
iv = auth[:AES.block_size]
cipher = AES.new(cfg.CONF.auth_encryption_key[:32], AES.MODE_CFB, iv)
res = cipher.decrypt(auth[AES.block_size:])
return res
| #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
from Crypto.Cipher import AES
from oslo.config import cfg
from heat.openstack.common.crypto import utils
auth_opts = [
cfg.StrOpt('auth_encryption_key',
default='notgood but just long enough i think',
help="Encryption key used for authentication info in database.")
]
cfg.CONF.register_opts(auth_opts)
def encrypt(auth_info):
if auth_info is None:
return None, None
sym = utils.SymmetricCrypto()
res = sym.encrypt(cfg.CONF.auth_encryption_key[:32],
auth_info, b64encode=True)
return 'oslo_decrypt_v1', res
def oslo_decrypt_v1(auth_info):
if auth_info is None:
return None
sym = utils.SymmetricCrypto()
return sym.decrypt(cfg.CONF.auth_encryption_key[:32],
auth_info, b64decode=True)
def heat_decrypt(auth_info):
"""Decrypt function for data that has been encrypted using an older
version of Heat.
Note: the encrypt function returns the function that is needed to
decrypt the data. The database then stores this. When the data is
then retrieved (potentially by a later version of Heat) the decrypt
function must still exist. So whilst it my seem that this function
is not referenced, it will be referenced from the database.
"""
if auth_info is None:
return None
auth = base64.b64decode(auth_info)
iv = auth[:AES.block_size]
cipher = AES.new(cfg.CONF.auth_encryption_key[:32], AES.MODE_CFB, iv)
res = cipher.decrypt(auth[AES.block_size:])
return res
| apache-2.0 | Python |
b40ca78355da8b555e363febed72d5bacf132d5d | bump taglib-sharp to 2.0.3.7 | lamalex/Banshee,ixfalia/banshee,directhex/banshee-hacks,GNOME/banshee,babycaseny/banshee,Carbenium/banshee,allquixotic/banshee-gst-sharp-work,arfbtwn/banshee,lamalex/Banshee,GNOME/banshee,petejohanson/banshee,arfbtwn/banshee,petejohanson/banshee,GNOME/banshee,petejohanson/banshee,ixfalia/banshee,Carbenium/banshee,arfbtwn/banshee,lamalex/Banshee,directhex/banshee-hacks,lamalex/Banshee,arfbtwn/banshee,directhex/banshee-hacks,Dynalon/banshee-osx,babycaseny/banshee,GNOME/banshee,Dynalon/banshee-osx,GNOME/banshee,Dynalon/banshee-osx,Carbenium/banshee,dufoli/banshee,ixfalia/banshee,dufoli/banshee,babycaseny/banshee,Carbenium/banshee,directhex/banshee-hacks,ixfalia/banshee,allquixotic/banshee-gst-sharp-work,allquixotic/banshee-gst-sharp-work,petejohanson/banshee,babycaseny/banshee,mono-soc-2011/banshee,stsundermann/banshee,babycaseny/banshee,GNOME/banshee,ixfalia/banshee,arfbtwn/banshee,babycaseny/banshee,arfbtwn/banshee,lamalex/Banshee,arfbtwn/banshee,lamalex/Banshee,babycaseny/banshee,GNOME/banshee,allquixotic/banshee-gst-sharp-work,dufoli/banshee,stsundermann/banshee,Dynalon/banshee-osx,stsundermann/banshee,directhex/banshee-hacks,GNOME/banshee,mono-soc-2011/banshee,Dynalon/banshee-osx,dufoli/banshee,ixfalia/banshee,ixfalia/banshee,allquixotic/banshee-gst-sharp-work,allquixotic/banshee-gst-sharp-work,Carbenium/banshee,babycaseny/banshee,dufoli/banshee,Carbenium/banshee,petejohanson/banshee,mono-soc-2011/banshee,Dynalon/banshee-osx,stsundermann/banshee,ixfalia/banshee,dufoli/banshee,dufoli/banshee,mono-soc-2011/banshee,mono-soc-2011/banshee,directhex/banshee-hacks,mono-soc-2011/banshee,petejohanson/banshee,stsundermann/banshee,stsundermann/banshee,stsundermann/banshee,mono-soc-2011/banshee,Dynalon/banshee-osx,stsundermann/banshee,arfbtwn/banshee,dufoli/banshee,Dynalon/banshee-osx | build/bundle/packages/taglib-sharp.py | build/bundle/packages/taglib-sharp.py | Package ('taglib-sharp', '2.0.3.7',
sources = [ 'http://download.banshee-project.org/%{name}/%{version}/%{name}-%{version}.tar.gz' ],
configure_flags = [ '--disable-docs' ]
)
| Package ('taglib-sharp', '2.0.3.6',
sources = [ 'http://download.banshee-project.org/%{name}/%{version}/%{name}-%{version}.tar.gz' ],
configure_flags = [ '--disable-docs' ]
)
| mit | Python |
ce26c08cc0b6d2907baddb694a42ef99fee05f52 | return more than just visible | gleitz/spheremusic,gleitz/spheremusic,gleitz/spheremusic | satellites.py | satellites.py | # Symphony of the Satellites
# https://hackpad.com/Symphony-of-the-Satellites-c0aGX4vfmTN
import math
import requests
import ephem
import datetime
from math import degrees
import json
from calendar import timegm
def chunks(l, n):
""" Yield successive n-sized chunks from l"""
for i in xrange(0, len(l), n):
yield l[i:i+n]
def get_satellites(only_visible = True, now=None):
SAT_BRIGHTEST = 'http://celestrak.com/NORAD/elements/visual.txt'
SAT_GEO = 'http://celestrak.com/NORAD/elements/geo.txt'
SAT_DEBRIS = 'http://celestrak.com/NORAD/elements/1999-025.txt'
# Fetch the ~100 brightest satellites
r = requests.get(SAT_DEBRIS)
data = r.text.split('\r\n')
# Split each into TLE
if only_visible:
count = 0
for tle in chunks(data, 3):
if len(tle) != 3:
continue
count += 1
tle_data = get_location(tle, now = now)
if tle_data['visible']:
yield tle_data
print 'analyzed {0} satellites; found {1} visible'.format(count, len(visible))
else:
for tle in chunks(data, 3):
if len(tle) != 3:
continue
count += 1
yield get_location(tle, now = now)
def get_location(tle, now=None, lat=None, lng=None):
"""Compute the current location of the ISS"""
now = now or datetime.datetime.utcnow()
lat = lat or 37.7701
lng = lng or -122.4664
satellite = ephem.readtle(str(tle[0]), str(tle[1]), str(tle[2]))
# Compute for current location
observer = ephem.Observer()
observer.lat = lat
observer.lon = lng
observer.elevation = 0
observer.date = now
satellite.compute(observer)
lon = degrees(satellite.sublong)
lat = degrees(satellite.sublat)
# Return the relevant timestamp and data
data = {"timestamp": timegm(now.timetuple()), "position": {"latitude": lat, "longitude": lon}, "visible": float(repr(satellite.alt)) > 0 and float(repr(satellite.alt)) < math.pi, "range": satellite.range, "velocity": satellite.range_velocity}
return data
| # Symphony of the Satellites
# https://hackpad.com/Symphony-of-the-Satellites-c0aGX4vfmTN
import math
import requests
import ephem
import datetime
from math import degrees
import json
from calendar import timegm
def chunks(l, n):
""" Yield successive n-sized chunks from l"""
for i in xrange(0, len(l), n):
yield l[i:i+n]
def get_satellites():
SAT_BRIGHTEST = 'http://celestrak.com/NORAD/elements/visual.txt'
SAT_GEO = 'http://celestrak.com/NORAD/elements/geo.txt'
SAT_DEBRIS = 'http://celestrak.com/NORAD/elements/1999-025.txt'
# Fetch the ~100 brightest satellites
r = requests.get(SAT_DEBRIS)
data = r.text.split('\r\n')
# Split each into TLE
visible = []
count = 0
for tle in chunks(data, 3):
if len(tle) != 3:
continue
count += 1
tle_data = get_location(tle)
if tle_data['visible']:
visible.append(tle_data)
print 'analyzed {0} satellites; found {1} visible'.format(count, len(visible))
return visible
def get_location(tle, now=None, lat=None, lng=None):
"""Compute the current location of the ISS"""
now = now or datetime.datetime.utcnow()
lat = lat or 37.7701
lng = lng or -122.4664
satellite = ephem.readtle(str(tle[0]), str(tle[1]), str(tle[2]))
# Compute for current location
observer = ephem.Observer()
observer.lat = lat
observer.lon = lng
observer.elevation = 0
observer.date = now
satellite.compute(observer)
lon = degrees(satellite.sublong)
lat = degrees(satellite.sublat)
# Return the relevant timestamp and data
data = {"timestamp": timegm(now.timetuple()), "position": {"latitude": lat, "longitude": lon}, "visible": float(repr(satellite.alt)) > 0 and float(repr(satellite.alt)) < math.pi, "range": satellite.range, "velocity": satellite.range_velocity}
return data
| mit | Python |
f178e385bb2ec305157064c1612ffa9267206b77 | add unit tests of DNS and Traceroute | rpanah/centinel,rpanah/centinel,lianke123321/centinel,rpanah/centinel,iclab/centinel,iclab/centinel,lianke123321/centinel,iclab/centinel,lianke123321/centinel,Ashish1805/centinel,JASONews/centinel | centinel/unit_test/test_traceroute.py | centinel/unit_test/test_traceroute.py | __author__ = 'xinwenwang'
import pytest
from centinel.primitives import traceroute
class TestTraceRoute:
#1. test good case
def test_traceroute_valid_domain(self):
domain = 'www.google.com'
result = traceroute.traceroute(domain)
#.1 test traceroute results is not none
assert result is not None
#* test domain is matched with given domain
assert result['domain'] == domain
#* test method is not none
assert result['method'] is not None
#+ test method is udp if no method given
assert result['method'] == 'udp'
assert 'error' not in result
#* test other field is not None
assert result["domain"] is not None
assert result["method"] is not None
assert result["total_hops"] is not None
assert result["meaningful_hops"] is not None
assert result["hops"] is not None
assert result["unparseable_lines"] is not None
assert result["forcefully_terminated"] is not None
assert result["time_elapsed"] is not None
#+ test method is tcp if given method as tcp
def test_traceroute_tcp_connection(self):
domain = 'www.google.com'
result = traceroute.traceroute(domain, 'tcp')
assert result is not None
#* test 'error' is not in the results
assert 'error' not in result
# 2. test bad case
def test_traceroute_invalid_domain_name(self):
#.1 given a invalid domain name
domain = 'www.sdfadsfdasefwefewfew.fewfwefw.fwefwfsafdas.com'
result = traceroute.traceroute(domain)
#* test 'error' is not none
assert result is not None
assert 'error' in result
#+ test 'error' is ': name or service not known'
assert result['error'] == ': name or service not known'
# .2 given a great number of domain names
@pytest.mark.skipif(True)
def test_traceroute_batch(self):
domains = []
result = traceroute.traceroute_batch(domains)
assert result is not None
#* test 'error' is in results
assert 'error' in result
#+ test 'error' is "Threads took too long to finish."
assert result['error'] == "Threads took too long to finish."
| __author__ = 'xinwenwang'
import pytest
from ..primitives import traceroute
class TestTraceRoute:
#1. test good case
def test_traceroute_valid_domain(self):
domain = 'www.google.com'
result = traceroute.traceroute(domain)
#.1 test traceroute results is not none
assert result is not None
#* test domain is matched with given domain
assert result['domain'] == domain
#* test method is not none
assert result['method'] is not None
#+ test method is udp if no method given
assert result['method'] == 'udp'
assert 'error' not in result
#* test other field is not None
assert result["domain"] is not None
assert result["method"] is not None
assert result["total_hops"] is not None
assert result["meaningful_hops"] is not None
assert result["hops"] is not None
assert result["unparseable_lines"] is not None
assert result["forcefully_terminated"] is not None
assert result["time_elapsed"] is not None
#+ test method is tcp if given method as tcp
def test_traceroute_tcp_connection(self):
domain = 'www.google.com'
result = traceroute.traceroute(domain, 'tcp')
assert result is not None
#* test 'error' is not in the results
assert 'error' not in result
# 2. test bad case
def test_traceroute_invalid_domain_name(self):
#.1 given a invalid domain name
domain = 'www.sdfadsfdasefwefewfew.fewfwefw.fwefwfsafdas.com'
result = traceroute.traceroute(domain)
#* test 'error' is not none
assert result is not None
assert 'error' in result
#+ test 'error' is ': name or service not known'
assert result['error'] == ': name or service not known'
# .2 given a great number of domain names
@pytest.mark.skipif(True)
def test_traceroute_batch(self):
domains = []
result = traceroute.traceroute_batch(domains)
assert result is not None
#* test 'error' is in results
assert 'error' in result
#+ test 'error' is "Threads took too long to finish."
assert result['error'] == "Threads took too long to finish."
| mit | Python |
c6d6bcd85c9ddf152778d92081cc7ad912d3373f | Test arguments | danielfrg/datasciencebox,danielfrg/datasciencebox,danielfrg/datasciencebox,danielfrg/datasciencebox | datasciencebox/tests/core/test_instance.py | datasciencebox/tests/core/test_instance.py | import pytest
from datasciencebox.core.settings import Settings
from datasciencebox.core.cloud.instance import Instance, BareInstance, AWSInstance, GCPInstance
default_username = 'default_username'
default_keypair = 'default_keypair'
settings = Settings()
settings['USERNAME'] = default_username
settings['KEYPAIR'] = default_keypair
def test_new_bare():
instance = Instance.new(settings=settings)
assert isinstance(instance, BareInstance)
instance = Instance.new(settings=settings, uid='myid')
assert instance.uid == 'myid'
assert instance.port == 22
assert instance.username == default_username
assert instance.keypair == default_keypair
instance = Instance.new(settings=settings, uid='myid', ip='1.1.1.1', port=33, username='me', keypair='mykey')
assert instance.ip == '1.1.1.1'
assert instance.port == 33
assert instance.username == 'me'
assert instance.keypair == 'mykey'
assert instance.to_dict() == {'ip': '1.1.1.1', 'port': 33, 'uid': 'myid'}
def test_new_bare_ip_with_port():
instance = Instance.new(settings=settings, uid='myid', ip='1.1.1.1:2022')
assert isinstance(instance, BareInstance)
assert instance.ip == '1.1.1.1'
assert instance.port == 2022
assert instance.username == default_username
assert instance.keypair == default_keypair
def test_new_clouds():
settings['CLOUD'] = 'aws'
instance = Instance.new(settings=settings)
assert isinstance(instance, AWSInstance)
settings['CLOUD'] = 'gcp'
instance = Instance.new(settings=settings)
assert isinstance(instance, GCPInstance)
| import pytest
from datasciencebox.core.settings import Settings
from datasciencebox.core.cloud.instance import Instance, BareInstance, AWSInstance, GCPInstance
settings = Settings()
def test_new_bare():
instance = Instance.new(settings=settings)
assert isinstance(instance, BareInstance)
instance = Instance.new(settings=settings, uid='myid')
assert instance.uid == 'myid'
instance = Instance.new(settings=settings, uid='myid', ip='1.1.1.1')
assert instance.ip == '1.1.1.1'
assert instance.port == 22
assert instance.to_dict() == {'ip': '1.1.1.1', 'port': 22, 'uid': 'myid'}
def test_new_bare_ssh_port():
instance = Instance.new(settings=settings, uid='myid', ip='1.1.1.1:2022')
assert isinstance(instance, BareInstance)
assert instance.ip == '1.1.1.1'
assert instance.port == 2022
def test_new_clouds():
settings['CLOUD'] = 'aws'
instance = Instance.new(settings=settings)
assert isinstance(instance, AWSInstance)
settings['CLOUD'] = 'gcp'
instance = Instance.new(settings=settings)
assert isinstance(instance, GCPInstance)
| apache-2.0 | Python |
71e65eb2aeda3cd4b27cbb2f945954ada10bb516 | fix variable name of eval_imagenet | pfnet/chainercv,yuyu2172/chainercv,yuyu2172/chainercv,chainer/chainercv,chainer/chainercv | examples/classification/eval_imagenet.py | examples/classification/eval_imagenet.py | import argparse
import sys
import time
import numpy as np
import chainer
import chainer.functions as F
from chainer import iterators
from chainercv.datasets import directory_parsing_label_names
from chainercv.datasets import DirectoryParsingLabelDataset
from chainercv.links import FeaturePredictor
from chainercv.links import VGG16
from chainercv.utils import apply_prediction_to_iterator
class ProgressHook(object):
def __init__(self, n_total):
self.n_total = n_total
self.start = time.time()
self.n_processed = 0
def __call__(self, imgs, pred_values, gt_values):
self.n_processed += len(imgs)
fps = self.n_processed / (time.time() - self.start)
sys.stdout.write(
'\r{:d} of {:d} images, {:.2f} FPS'.format(
self.n_processed, self.n_total, fps))
sys.stdout.flush()
def main():
parser = argparse.ArgumentParser(
description='Learning convnet from ILSVRC2012 dataset')
parser.add_argument('val', help='Path to root of the validation dataset')
parser.add_argument('--model', choices=('vgg16',))
parser.add_argument('--pretrained_model', default='imagenet')
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--batchsize', type=int, default=32)
parser.add_argument('--crop', choices=('center', '10'), default='center')
args = parser.parse_args()
dataset = DirectoryParsingLabelDataset(args.val)
label_names = directory_parsing_label_names(args.val)
iterator = iterators.MultiprocessIterator(
dataset, args.batchsize, repeat=False, shuffle=False,
n_processes=6, shared_mem=300000000)
if args.model == 'vgg16':
extractor = VGG16(len(label_names), args.pretrained_model)
model = FeaturePredictor(
extractor, crop_size=224, scale_size=256, crop=args.crop)
if args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use()
model.to_gpu()
print('Model has been prepared. Evaluation starts.')
imgs, pred_values, gt_values = apply_prediction_to_iterator(
model.predict, iterator, hook=ProgressHook(len(dataset)))
del imgs
pred_probs, = pred_values
gt_labels, = gt_values
accuracy = F.accuracy(
np.array(list(pred_probs)), np.array(list(gt_labels))).data
print()
print('Top 1 Error {}'.format(1. - accuracy))
if __name__ == '__main__':
main()
| import argparse
import sys
import time
import numpy as np
import chainer
import chainer.functions as F
from chainer import iterators
from chainercv.datasets import directory_parsing_label_names
from chainercv.datasets import DirectoryParsingLabelDataset
from chainercv.links import FeaturePredictor
from chainercv.links import VGG16
from chainercv.utils import apply_prediction_to_iterator
class ProgressHook(object):
def __init__(self, n_total):
self.n_total = n_total
self.start = time.time()
self.n_processed = 0
def __call__(self, imgs, pred_values, gt_values):
self.n_processed += len(imgs)
fps = self.n_processed / (time.time() - self.start)
sys.stdout.write(
'\r{:d} of {:d} images, {:.2f} FPS'.format(
self.n_processed, self.n_total, fps))
sys.stdout.flush()
def main():
parser = argparse.ArgumentParser(
description='Learning convnet from ILSVRC2012 dataset')
parser.add_argument('val', help='Path to root of the validation dataset')
parser.add_argument('--model', choices=('vgg16',))
parser.add_argument('--pretrained_model', default='imagenet')
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--batchsize', type=int, default=32)
parser.add_argument('--crop', choices=('center', '10'), default='center')
args = parser.parse_args()
dataset = DirectoryParsingLabelDataset(args.val)
label_names = directory_parsing_label_names(args.val)
iterator = iterators.MultiprocessIterator(
dataset, args.batchsize, repeat=False, shuffle=False,
n_processes=6, shared_mem=300000000)
if args.model == 'vgg16':
extractor = VGG16(len(label_names), args.pretrained_model)
model = FeaturePredictor(
extractor, crop_size=224, scale_size=256, crop=args.crop)
if args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use()
model.to_gpu()
print('Model has been prepared. Evaluation starts.')
imgs, pred_values, gt_values = apply_prediction_to_iterator(
model.predict, iterator, hook=ProgressHook(len(dataset)))
del imgs
pred_probs, = pred_values
gt_probs, = gt_values
accuracy = F.accuracy(
np.array(list(pred_probs)), np.array(list(gt_probs))).data
print()
print('Top 1 Error {}'.format(1. - accuracy))
if __name__ == '__main__':
main()
| mit | Python |
f570a92057cb5e7254a5b416df23feb968f8af4e | add pretrained_model ooption to eval_imagenet | chainer/chainercv,yuyu2172/chainercv,chainer/chainercv,pfnet/chainercv,yuyu2172/chainercv | examples/classification/eval_imagenet.py | examples/classification/eval_imagenet.py | import argparse
import random
import numpy as np
import chainer
import chainer.links as L
import chainer.functions as F
from chainer import iterators
from chainer import training
from chainer.training import extensions
from chainercv.datasets import ImageFolderDataset
from chainercv.links import VGG16Layers
from chainercv.utils import apply_prediction_to_iterator
def main():
parser = argparse.ArgumentParser(
description='Learning convnet from ILSVRC2012 dataset')
parser.add_argument('val', help='Path to root of the validation dataset')
parser.add_argument('--pretrained_model')
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--batchsize', type=int, default=32)
args = parser.parse_args()
dataset = ImageFolderDataset(args.val)
iterator = iterators.MultiprocessIterator(
dataset, args.batchsize, repeat=False, shuffle=False,
n_processes=4)
model = VGG16Layers()
if args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use()
model.to_gpu()
imgs, pred_values, gt_values = apply_prediction_to_iterator(model.predict, iterator)
del imgs
pred_labels, = pred_values
gt_labels, = gt_values
accuracy = F.accuracy(
np.array(list(pred_labels)), np.array(list(gt_labels))).data
print accuracy
if __name__ == '__main__':
main()
| import argparse
import random
import numpy as np
import chainer
import chainer.links as L
import chainer.functions as F
from chainer import iterators
from chainer import training
from chainer.training import extensions
from chainercv.datasets import ImageFolderDataset
from chainercv.links import VGG16Layers
from chainercv.utils import apply_prediction_to_iterator
def main():
parser = argparse.ArgumentParser(
description='Learning convnet from ILSVRC2012 dataset')
parser.add_argument('val', help='Path to root of the validation dataset')
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--batchsize', type=int, default=32)
args = parser.parse_args()
dataset = ImageFolderDataset(args.val)
iterator = iterators.MultiprocessIterator(
dataset, args.batchsize, repeat=False, shuffle=False,
n_processes=4)
model = VGG16Layers()
if args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use()
model.to_gpu()
imgs, pred_values, gt_values = apply_prediction_to_iterator(model.predict, iterator)
del imgs
pred_labels, = pred_values
gt_labels, = gt_values
accuracy = F.accuracy(
np.array(list(pred_labels)), np.array(list(gt_labels))).data
print accuracy
if __name__ == '__main__':
main()
| mit | Python |
cf0d127d328fbfc379f02ebc2b31606c736a3aa1 | check name change | sq8kfh/kfhlog,sq8kfh/kfhlog | kfhlog/views/check.py | kfhlog/views/check.py | from pyramid.httpexceptions import HTTPNotFound
from pyramid.view import view_config
from ..models import Qso, Band, Mode, Profile, dbtools
def _check(dbsession, call, profile=None):
call = dbtools.formatters.call_formatter(call)
tmp = None
req_type = None
addrowlink = False
name = ''
if profile:
req_type = Mode
name = "%s (%s)" % (dbsession.query(Profile).get(profile).call, dbsession.query(Profile).get(profile).gridsquare)
tmp = dbsession.query(Qso.mode, Qso.band).group_by(Qso.mode, Qso.band).\
filter_by(call=call, profile=profile).all()
else:
req_type = Profile
addrowlink = True
tmp = dbsession.query(Qso.profile, Qso.band).group_by(Qso.profile, Qso.band).filter_by(call=call).all()
if not tmp:
return {'message': 'Call %s is not in the log...' % call}
col = {x[1] for x in tmp}
col = col.union([5, 7, 8, 9, 10, 11, 12, 13]) #always show 80m-10m band
col = sorted(col)
row = {x[0] for x in tmp}
row = sorted(row)
res = {}
for r, c in tmp:
res[(r, c)] = True
if profile:
rowh = {id: dbsession.query(Mode).get(id).name for id in row}
else:
rowh = {id: "%s (%s)" % (dbsession.query(Profile).get(id).call, dbsession.query(Profile).get(id).gridsquare) for id in row}
colh = [dbsession.query(Band).get(id).name for id in col]
return {'call': call, 'name': name, 'res': res, 'col': col, 'row': row,
'rowh': rowh, 'colh': colh, 'addrowlink': addrowlink}
@view_config(route_name='checkwp', renderer='check.jinja2')
def checkwp_view(request):
profile = request.matchdict['profile']
tmp = request.dbsession.query(Profile).get(profile)
if not tmp:
raise HTTPNotFound()
if 'call' in request.params:
call = request.params['call']
return _check(request.dbsession, call, profile)
return {}
@view_config(route_name='check', renderer='check.jinja2')
def check_view(request):
if 'call' in request.params:
call = request.params['call']
if 'profile' in request.params:
profile = request.params['profile']
return _check(request.dbsession, call, profile)
return _check(request.dbsession, call)
return {}
| from pyramid.httpexceptions import HTTPNotFound
from pyramid.view import view_config
from ..models import Qso, Band, Mode, Profile, dbtools
def _check(dbsession, call, profile=None):
call = dbtools.formatters.call_formatter(call)
tmp = None
req_type = None
addrowlink = False
name = ''
if profile:
req_type = Mode
name = "%s (%s)" % (dbsession.query(Profile).get(profile).call, dbsession.query(Profile).get(profile).gridsquare)
tmp = dbsession.query(Qso.mode, Qso.band).group_by(Qso.mode, Qso.band).\
filter_by(call=call, profile=profile).all()
else:
req_type = Profile
addrowlink = True
tmp = dbsession.query(Qso.profile, Qso.band).group_by(Qso.profile, Qso.band).filter_by(call=call).all()
if not tmp:
return {'message': 'Call %s is not in the log...' % call}
col = {x[1] for x in tmp}
col = col.union([5, 7, 8, 9, 10, 11, 12, 13]) #always show 80m-10m band
col = sorted(col)
row = {x[0] for x in tmp}
row = sorted(row)
res = {}
for r, c in tmp:
res[(r, c)] = True
rowh = {id: dbsession.query(req_type).get(id).name for id in row}
colh = [dbsession.query(Band).get(id).name for id in col]
return {'call': call, 'name': name, 'res': res, 'col': col, 'row': row,
'rowh': rowh, 'colh': colh, 'addrowlink': addrowlink}
@view_config(route_name='checkwp', renderer='check.jinja2')
def checkwp_view(request):
profile = request.matchdict['profile']
tmp = request.dbsession.query(Profile).get(profile)
if not tmp:
raise HTTPNotFound()
if 'call' in request.params:
call = request.params['call']
return _check(request.dbsession, call, profile)
return {}
@view_config(route_name='check', renderer='check.jinja2')
def check_view(request):
if 'call' in request.params:
call = request.params['call']
if 'profile' in request.params:
profile = request.params['profile']
return _check(request.dbsession, call, profile)
return _check(request.dbsession, call)
return {}
| agpl-3.0 | Python |
2471ac5cfe898653e5d274d3e02d0006c768b3fe | Fix style | fnielsen/cvrminer,fnielsen/cvrminer,fnielsen/cvrminer | cvrminer/app/views.py | cvrminer/app/views.py | """Views for cvrminer app."""
from flask import render_template
from . import app
@app.route("/")
def index():
"""Return index page of for app."""
return render_template('base.html')
@app.route("/smiley/")
def smiley():
"""Return smiley page of for app."""
table = app.smiley.db.tables.smiley.head(n=10000).to_html()
return render_template('smiley.html', table=table)
| """Views for cvrminer app."""
from flask import render_template
from . import app
@app.route("/")
def index():
"""Return index page of for app."""
return render_template('base.html')
@app.route("/smiley/")
def smiley():
"""Return smiley page of for app."""
table = app.smiley.db.tables.smiley.head(n=10000).to_html()
return render_template('smiley.html', table=table)
| apache-2.0 | Python |
79158c269669fcbe506ae83e803ef58ba1b40913 | Tweak olfactory input stimulus to produce more interesting output. | cerrno/neurokernel | examples/olfaction/data/gen_olf_input.py | examples/olfaction/data/gen_olf_input.py | #!/usr/bin/env python
"""
Generate sample olfactory model stimulus.
"""
import numpy as np
import h5py
osn_num = 1375
dt = 1e-4 # time step
Ot = 2000 # number of data point during reset period
Rt = 1000 # number of data point during odor delivery period
Nt = 4*Ot + 3*Rt # number of data points in time
t = np.arange(0, dt*Nt, dt)
I = 10.0 # amplitude of odorant concentration
u_on = I*np.ones(Ot, dtype=np.float64)
u_off = np.zeros(Ot, dtype=np.float64)
u_reset = np.zeros(Rt, dtype=np.float64)
u = np.concatenate((u_off, u_reset, u_on, u_reset, u_off, u_reset, u_on))
u_all = np.transpose(np.kron(np.ones((osn_num, 1)), u))
with h5py.File('olfactory_input.h5', 'w') as f:
f.create_dataset('real', (Nt, osn_num),
dtype=np.float64,
data=u_all)
| #!/usr/bin/env python
"""
Generate sample olfactory model stimulus.
"""
import numpy as np
import h5py
osn_num = 1375
dt = 1e-4 # time step
Ot = 2000 # number of data point during reset period
Rt = 1000 # number of data point during odor delivery period
Nt = 4*Ot + 3*Rt # number of data points in time
t = np.arange(0, dt*Nt, dt)
I = -1.*0.0195 # amplitude of odorant concentration
u_on = I*np.ones(Ot, dtype=np.float64)
u_off = np.zeros(Ot, dtype=np.float64)
u_reset = np.zeros(Rt, dtype=np.float64)
u = np.concatenate((u_off, u_reset, u_on, u_reset, u_off, u_reset, u_on))
u_all = np.transpose(np.kron(np.ones((osn_num, 1)), u))
with h5py.File('olfactory_input.h5', 'w') as f:
f.create_dataset('real', (Nt, osn_num),
dtype=np.float64,
data=u_all)
| bsd-3-clause | Python |
0ff797d60c2ddc93579e7c486e8ebb77593014d8 | Add another check for a failed googleapiclient upgrade. | googleapis/google-api-python-client,googleapis/google-api-python-client | apiclient/__init__.py | apiclient/__init__.py | """Retain apiclient as an alias for googleapiclient."""
import googleapiclient
try:
import oauth2client
except ImportError:
raise RuntimeError(
'Previous version of google-api-python-client detected; due to a '
'packaging issue, we cannot perform an in-place upgrade. To repair, '
'remove and reinstall this package, along with oauth2client and '
'uritemplate. One can do this with pip via\n'
' pip install -I google-api-python-client'
)
from googleapiclient import channel
from googleapiclient import discovery
from googleapiclient import errors
from googleapiclient import http
from googleapiclient import mimeparse
from googleapiclient import model
from googleapiclient import sample_tools
from googleapiclient import schema
__version__ = googleapiclient.__version__
| """Retain apiclient as an alias for googleapiclient."""
import googleapiclient
from googleapiclient import channel
from googleapiclient import discovery
from googleapiclient import errors
from googleapiclient import http
from googleapiclient import mimeparse
from googleapiclient import model
from googleapiclient import sample_tools
from googleapiclient import schema
__version__ = googleapiclient.__version__
| apache-2.0 | Python |
4a89eebc30eac581c504fd4c250f02dec66df48d | change script path for packaging change | rcbops/opencenter-agent,rcbops/opencenter-agent | roushagent/plugins/output/plugin_chef.py | roushagent/plugins/output/plugin_chef.py | #!/usr/bin/env python
import sys
from bashscriptrunner import BashScriptRunner
name = "chef"
script = BashScriptRunner(script_path=["roushagent/plugins/lib/%s" % name])
def setup(config):
LOG.debug('Doing setup in test.py')
register_action('install_chef', install_chef)
register_action('run_chef', run_chef)
def install_chef(input_data):
payload = input_data['payload']
action = input_data['action']
required = ["CHEF_SERVER", "CHEF_VALIDATOR"]
optional = ["CHEF_RUNLIST", "CHEF_ENVIRONMENT", "CHEF_VALIDATION_NAME"]
env = dict([(k, v) for k, v in payload.iteritems()
if k in required + optional])
for r in required:
if not r in env:
return {'result_code': 22,
'result_str': 'Bad Request (missing %s)' % r,
'result_data': None}
return script.run_env("install-chef.sh", env, "")
def run_chef(input_data):
payload = input_data['payload']
action = input_data['action']
return script.run("run-chef.sh")
| #!/usr/bin/env python
import sys
from bashscriptrunner import BashScriptRunner
name = "chef"
script = BashScriptRunner(script_path=["plugins/lib/%s" % name])
def setup(config):
LOG.debug('Doing setup in test.py')
register_action('install_chef', install_chef)
register_action('run_chef', run_chef)
def install_chef(input_data):
payload = input_data['payload']
action = input_data['action']
required = ["CHEF_SERVER", "CHEF_VALIDATOR"]
optional = ["CHEF_RUNLIST", "CHEF_ENVIRONMENT", "CHEF_VALIDATION_NAME"]
env = dict([(k, v) for k, v in payload.iteritems()
if k in required + optional])
for r in required:
if not r in env:
return {'result_code': 22,
'result_str': 'Bad Request (missing %s)' % r,
'result_data': None}
return script.run_env("install-chef.sh", env, "")
def run_chef(input_data):
payload = input_data['payload']
action = input_data['action']
return script.run("run-chef.sh")
| apache-2.0 | Python |
267a2784b7ff948ad15336077a36262b393c860b | Add __repr__ | thombashi/DataProperty | dataproperty/_align.py | dataproperty/_align.py | # encoding: utf-8
'''
@author: Tsuyoshi Hombashi
'''
class Align:
class __AlignData(object):
@property
def align_code(self):
return self.__align_code
@property
def align_string(self):
return self.__align_string
def __init__(self, code, string):
self.__align_code = code
self.__align_string = string
def __repr__(self):
return self.align_string
AUTO = __AlignData(1 << 0, "auto")
LEFT = __AlignData(1 << 1, "left")
RIGHT = __AlignData(1 << 2, "right")
CENTER = __AlignData(1 << 3, "center")
| # encoding: utf-8
'''
@author: Tsuyoshi Hombashi
'''
class Align:
class __AlignData:
@property
def align_code(self):
return self.__align_code
@property
def align_string(self):
return self.__align_string
def __init__(self, code, string):
self.__align_code = code
self.__align_string = string
AUTO = __AlignData(1 << 0, "auto")
LEFT = __AlignData(1 << 1, "left")
RIGHT = __AlignData(1 << 2, "right")
CENTER = __AlignData(1 << 3, "center")
| mit | Python |
383fe196da23e01339e017a39d375313900468c8 | Add working configurable prefix | havokoc/MyManJeeves | Jeeves/jeeves.py | Jeeves/jeeves.py | import discord
import asyncio
import random
import configparser
import json
def RunBot(config_file):
config = configparser.ConfigParser()
config.read(config_file)
client = discord.Client()
@client.event
async def on_ready():
print('------')
print('Logged in as %s (%s)' % (client.user.name, client.user.id))
print('------')
@client.event
async def on_message(message):
if message.channel.id == "123410749765713920":
if message.content.startswith('%sknugen' % config['Bot']['prefix']):
with open('config/data.json') as data_file:
data = json.loads(data_file.read())
await client.send_message(message.channel, random.choice(data['knugenLinks']))
client.run(config['Bot']['token'])
if __name__ == "__main__":
print("Please use the start.py script in the root directory instead")
| import discord
import asyncio
import random
import configparser
import json
def RunBot(config_file):
config = configparser.ConfigParser()
config.read(config_file)
client = discord.Client()
@client.event
async def on_ready():
print('------')
print('Logged in as %s (%s)' % (client.user.name, client.user.id))
print('------')
@client.event
async def on_message(message):
if message.channel.id == "123410749765713920":
if message.content.startswith('-knugen'):
with open('config/data.json') as data_file:
data = json.loads(data_file.read())
await client.send_message(message.channel, random.choice(data['knugenLinks']))
client.run(config['Bot']['token'])
if __name__ == "__main__":
print("Please use the start.py script in the root directory instead")
| mit | Python |
2995f15c1bcb1bc85d83c7407be199b27882a215 | Update for fixing odd Japanese | mdmintz/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/SeleniumBase | examples/translations/japanese_test_1.py | examples/translations/japanese_test_1.py | # Japanese Language Test - Python 3 Only!
from seleniumbase.translate.japanese import セレニウムテストケース # noqa
class テストクラス(セレニウムテストケース): # noqa
def test_例1(self):
self.URLを開く("https://ja.wikipedia.org/wiki/")
self.テキストを確認する("ウィキペディア")
self.要素を確認する('[title="メインページに移動する"]')
self.テキストを更新("#searchInput", "アニメ")
self.クリックして("#searchButton")
self.テキストを確認する("アニメ", "#firstHeading")
self.テキストを更新("#searchInput", "寿司")
self.クリックして("#searchButton")
self.テキストを確認する("寿司", "#firstHeading")
self.要素を確認する('img[alt="握り寿司"]')
| # Japanese Language Test - Python 3 Only!
from seleniumbase.translate.japanese import セレンテストケース # noqa
class テストクラス(セレンテストケース): # noqa
def test_例1(self):
self.URLを開く("https://ja.wikipedia.org/wiki/")
self.テキストを確認する("ウィキペディア")
self.要素を確認する('[title="メインページに移動する"]')
self.テキストを更新("#searchInput", "アニメ")
self.クリックして("#searchButton")
self.テキストを確認する("アニメ", "#firstHeading")
self.テキストを更新("#searchInput", "寿司")
self.クリックして("#searchButton")
self.テキストを確認する("寿司", "#firstHeading")
self.要素を確認する('img[alt="握り寿司"]')
| mit | Python |
7f8013e7fb8865f9f3a1e541bb00ce8c54048cb2 | delete some marks | LanceVan/SciCycle | Interpolation.py | Interpolation.py | import numpy as np
class Interpolation:
def __init__(self, x, y):
if not isinstance(x, np.ndarray) or not isinstance(y, np.ndarray):
raise TypeError("Type of Parameter should be numpy.ndarray")
sizex = x.shape
sizey = y.shape
if len(sizex) != 1 or len(sizey) != 1:
if not (len(sizex) == 2 and sizex[1] == 1 and len(sizey) == 2 and sizey[1] == 1):
raise ValueError("Size of Parameter should be one dimension")
if sizex != sizey:
raise ValueError("Size of Parameter should be same")
self.size = sizex[0]
self.x = x.reshape(1, self.size)
self.y = y.reshape(1, self.size)
| import numpy as np
class Interpolation:
def __init__(self, x, y):
if not isinstance(x, np.ndarray) or not isinstance(y, np.ndarray):
print(isinstance(x, np.ndarray))
raise TypeError("Type of Parameter should be numpy.ndarray")
sizex = x.shape
sizey = y.shape
if len(sizex) != 1 or len(sizey) != 1:
if not (len(sizex) == 2 and sizex[1] == 1 and len(sizey) == 2 and sizey[1] == 1):
raise ValueError("Size of Parameter should be one dimension")
if sizex != sizey:
raise ValueError("Size of Parameter should be same")
self.size = sizex[0]
self.x = x.reshape(1, self.size)
self.y = y.reshape(1, self.size)
| mit | Python |
12bf3032aeb87bfb3b64607cae1e7f6d29da440c | Update version, file: setup.py | shlomiLan/tvsort_sl | setup.py | setup.py | from setuptools import setup
setup(
name='tvsort_sl',
packages=['tvsort_sl'],
version='1.1.24',
description='Sort movies and TV-shows files',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
author='Shlomi Lanton',
author_email='shlomilanton@gmail.com',
url='https://github.com/shlomiLan/tvsort_sl',
download_url='https://github.com/shlomiLan/tvsort_sl/archive/0.1.zip',
keywords=['sort', 'tv', 'show', 'movie', 'KODI', 'XBM1C'],
classifiers=[],
setup_requires=['wheel']
)
| from setuptools import setup
setup(
name='tvsort_sl',
packages=['tvsort_sl'],
version='1.1.23',
description='Sort movies and TV-shows files',
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
author='Shlomi Lanton',
author_email='shlomilanton@gmail.com',
url='https://github.com/shlomiLan/tvsort_sl',
download_url='https://github.com/shlomiLan/tvsort_sl/archive/0.1.zip',
keywords=['sort', 'tv', 'show', 'movie', 'KODI', 'XBM1C'],
classifiers=[],
setup_requires=['wheel']
)
| mit | Python |
decb9212a5e0adae31d8e7562fa8258c222aae23 | Add a logger for dbmigrator that writes to stdout | karenc/db-migrator | dbmigrator/__init__.py | dbmigrator/__init__.py | # -*- coding: utf-8 -*-
import logging
import sys
logger = logging.getLogger('dbmigrator')
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setFormatter(logging.Formatter('[%(levelname)s] %(name)s (%(filename)s) - %(message)s'))
logger.addHandler(handler)
| agpl-3.0 | Python | |
ab2f330f5e6a5fa58670fde6bed24c4e66d15f3c | add helper methods to fs hauler/receiver needed to handle multiple disks | xemul/p.haul,aburluka/p.haul,aburluka/p.haul,aburluka/p.haul,arthurlockman/p.haul,jne100/p.haul,arthurlockman/p.haul,aburluka/p.haul,jne100/p.haul,aburluka/p.haul,arthurlockman/p.haul,arthurlockman/p.haul,xemul/p.haul,xemul/p.haul,jne100/p.haul,xemul/p.haul,xemul/p.haul,arthurlockman/p.haul,jne100/p.haul,jne100/p.haul | phaul/fs_haul_ploop.py | phaul/fs_haul_ploop.py | #
# ploop disk hauler
#
import os
import logging
import threading
import libploop
DDXML_FILENAME = "DiskDescriptor.xml"
class p_haul_fs:
def __init__(self, ddxml_path, fs_sk):
"""Initialize ploop disk hauler
Initialize ploop disk hauler with specified path to DiskDescriptor.xml
file and socket.
"""
logging.info("Initilized ploop hauler (%s)", ddxml_path)
self.__ploopcopy = libploop.ploopcopy(ddxml_path, fs_sk.fileno())
def set_options(self, opts):
pass
def set_work_dir(self, wdir):
pass
def start_migration(self):
self.__ploopcopy.copy_start()
def next_iteration(self):
self.__ploopcopy.copy_next_iteration()
def stop_migration(self):
self.__ploopcopy.copy_stop()
def persistent_inodes(self):
"""Inode numbers do not change during ploop disk migration"""
return True
def __log_init_hauler(self, deltas):
logging.info("Initialize ploop hauler")
for delta in deltas:
logging.info("\t`- %s", delta[0])
def __get_ddxml_path(self, delta_path):
"""Get path to disk descriptor file by path to disk delta"""
return os.path.join(os.path.dirname(delta_path), DDXML_FILENAME)
def __check_ddxml(self, ddxml_path):
"""Check disk descriptor file exist"""
if not os.path.isfile(ddxml_path):
raise Exception("{0} file missing".format(ddxml_path))
class p_haul_fs_receiver:
def __init__(self, fname_path, fs_sk):
"""Initialize ploop disk receiver
Initialize ploop disk receiver with specified path to root.hds file
and socket.
"""
self.__delta_receiver = delta_receiver(fname_path, fs_sk)
def start_receive(self):
self.__delta_receiver.start()
def stop_receive(self):
self.__delta_receiver.join()
def __log_init_receiver(self, deltas):
logging.info("Initialize ploop receiver")
for delta in deltas:
logging.info("\t`- %s", delta[0])
def __check_delta(self, delta_path):
"""Check delta file don't exist and parent directory exist"""
delta_dir = os.path.dirname(delta_path)
if not os.path.isdir(delta_dir):
raise Exception("{0} directory missing".format(delta_dir))
if os.path.isfile(delta_path):
raise Exception("{0} already exist".format(delta_path))
class delta_receiver(threading.Thread):
def __init__(self, delta_path, delta_fd):
"""Initialize ploop single active delta receiver"""
threading.Thread.__init__(self)
self.__path = delta_path
self.__fd = delta_fd
def run(self):
try:
libploop.ploopcopy_receiver(self.__path, self.__fd)
except:
logging.exception("Exception in %s delta receiver", self.__path)
| #
# ploop disk hauler
#
import logging
import threading
import libploop
class p_haul_fs:
def __init__(self, ddxml_path, fs_sk):
"""Initialize ploop disk hauler
Initialize ploop disk hauler with specified path to DiskDescriptor.xml
file and socket.
"""
logging.info("Initilized ploop hauler (%s)", ddxml_path)
self.__ploopcopy = libploop.ploopcopy(ddxml_path, fs_sk.fileno())
def set_options(self, opts):
pass
def set_work_dir(self, wdir):
pass
def start_migration(self):
self.__ploopcopy.copy_start()
def next_iteration(self):
self.__ploopcopy.copy_next_iteration()
def stop_migration(self):
self.__ploopcopy.copy_stop()
def persistent_inodes(self):
"""Inode numbers do not change during ploop disk migration"""
return True
class p_haul_fs_receiver:
def __init__(self, fname_path, fs_sk):
"""Initialize ploop disk receiver
Initialize ploop disk receiver with specified path to root.hds file
and socket.
"""
self.__delta_receiver = delta_receiver(fname_path, fs_sk)
def start_receive(self):
self.__delta_receiver.start()
def stop_receive(self):
self.__delta_receiver.join()
class delta_receiver(threading.Thread):
def __init__(self, delta_path, delta_fd):
"""Initialize ploop single active delta receiver"""
threading.Thread.__init__(self)
self.__path = delta_path
self.__fd = delta_fd
def run(self):
try:
libploop.ploopcopy_receiver(self.__path, self.__fd)
except:
logging.exception("Exception in %s delta receiver", self.__path)
| lgpl-2.1 | Python |
b7d80c9a1e820f2b3d2df918d7943781ec9b8635 | Fix for python setup. | xintron/pycri-urltitle | setup.py | setup.py | """
pycri-urltitle
-------------
Plugin for resolving <title>-elements from URLs.
"""
from setuptools import setup
setup(
name='pycri-urltitle',
version='0.1.0',
url='https://github.com/xintron/pycri-urltitle',
license='BSD',
author='Marcus Carlsson',
author_email='carlsson.marcus@gmail.com',
description='Title-resolver for URLs',
long_description=__doc__,
packages=['pycri_urltitle'],
provides='pycri_urltitle',
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'pycri',
'lxml'
],
classifiers=[
'Environment :: Console',
'Framework :: pycri',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Communications :: Chat :: Internet Relay Chat',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| """
pycri-urltitle
-------------
Plugin for resolving <title>-elements from URLs.
"""
from setuptools import setup
setup(
name='pycri-urltitle',
version='0.1.0',
url='https://github.com/xintron/pycri-urltitle',
license='BSD',
author='Marcus Carlsson',
author_email='carlsson.marcus@gmail.com',
description='Title-resolver for URLs',
long_description=__doc__,
py_modules=['pycri_urltitle'],
provides='pycri_urltitle',
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'pycri',
'lxml'
],
classifiers=[
'Environment :: Console',
'Framework :: pycri',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Communications :: Chat :: Internet Relay Chat',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| bsd-3-clause | Python |
f53dec873d4d8d01ef8b880ecd8295593a75cd61 | Support enum in python 2 | VirusTotal/vt-graph-api,VirusTotal/vt-graph-api | setup.py | setup.py | """Setup for vt_graph_api module."""
import re
import sys
import setuptools
with open("./vt_graph_api/version.py") as f:
version = (
re.search(r"__version__ = \'([0-9]{1,}.[0-9]{1,}.[0-9]{1,})\'",
f.read()).groups()[0])
# check python version >2.7.x and >=3.2.x
installable = True
if sys.version_info.major == 3:
if sys.version_info.minor < 2:
installable = False
else:
if sys.version_info.minor < 7:
installable = False
if not installable:
sys.exit("Sorry, this python version is not supported")
with open("README.md", "r") as fh:
long_description = fh.read()
install_requires = [
"requests",
"six"
]
if(sys.version_info.major == 2):
# Support enums in python 2.
install_requires.append("enum34")
setuptools.setup(
name="vt_graph_api",
version=version,
author="VirusTotal",
author_email="vt_graph_api@virustotal.com",
description="The official Python client library for VirusTotal Graph API",
license="Apache 2",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/virustotal/vt-graph-api",
packages=setuptools.find_packages(),
install_requires=install_requires,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| """Setup for vt_graph_api module."""
import re
import sys
import setuptools
with open("./vt_graph_api/version.py") as f:
version = (
re.search(r"__version__ = \'([0-9]{1,}.[0-9]{1,}.[0-9]{1,})\'",
f.read()).groups()[0])
# check python version >2.7.x and >=3.2.x
installable = True
if sys.version_info.major == 3:
if sys.version_info.minor < 2:
installable = False
else:
if sys.version_info.minor < 7:
installable = False
if not installable:
sys.exit("Sorry, this python version is not supported")
with open("README.md", "r") as fh:
long_description = fh.read()
install_requires = [
"requests",
"six"
]
setuptools.setup(
name="vt_graph_api",
version=version,
author="VirusTotal",
author_email="vt_graph_api@virustotal.com",
description="The official Python client library for VirusTotal Graph API",
license="Apache 2",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/virustotal/vt-graph-api",
packages=setuptools.find_packages(),
install_requires=install_requires,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
)
| apache-2.0 | Python |
c3f064e9665d3011f643ec3a02d305bc046a0834 | Bump version. | mwchase/class-namespaces,mwchase/class-namespaces | setup.py | setup.py | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='class_namespaces',
version='0.3.7',
description='Class Namespaces',
long_description=long_description,
url='https://github.com/mwchase/class-namespaces',
author='Max Woerner Chase',
author_email='max.chase@gmail.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='class namespaces',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
extras_require={
'test': ['coverage', 'pytest'],
},
)
| """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='class_namespaces',
version='0.3.6',
description='Class Namespaces',
long_description=long_description,
url='https://github.com/mwchase/class-namespaces',
author='Max Woerner Chase',
author_email='max.chase@gmail.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='class namespaces',
packages=find_packages(exclude=['contrib', 'docs', 'tests']),
extras_require={
'test': ['coverage', 'pytest'],
},
)
| mit | Python |
aa09ad861f104f987928fe9f5107ccfe0e4473c3 | add some extra info to barebones setup.py | ChrisTM/Flask-CacheBust | setup.py | setup.py | from setuptools import setup
setup(
name='Flask-CacheBust',
version='1.0.0',
description='Flask extension that cache-busts static files',
packages=['flask_cache_bust'],
license='MIT',
url='https://github.com/ChrisTM/Flask-CacheBust',
install_requires=[
'Flask',
],
)
| from setuptools import setup
setup(
name='Flask-CacheBust',
version='1.0.0',
packages=['flask_cache_bust'],
)
| mit | Python |
405e7f5a3272030b145497d81739228ec31eafe0 | Fix data install | ProjetPP/PPP-QuestionParsing-Grammatical,ProjetPP/PPP-QuestionParsing-Grammatical | setup.py | setup.py | #!/usr/bin/env python3
from setuptools import setup, find_packages
setup(
name='ppp_questionparsing_grammatical',
version='0.6.1',
description='Natural language processing module for the PPP.',
url='https://github.com/ProjetPP/PPP-QuestionParsing-Grammatical',
author='Projet Pensées Profondes',
author_email='ppp2014@listes.ens-lyon.fr',
license='MIT',
classifiers=[
'Environment :: No Input/Output (Daemon)',
'Development Status :: 1 - Planning',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
'Topic :: Software Development :: Libraries',
],
install_requires=[
'python3-memcached',
'ppp_datamodel>=0.6.4,<0.7',
'ppp_libmodule>=0.6,<0.8',
'jsonrpclib-pelix',
'nltk'
],
packages=[
'ppp_questionparsing_grammatical',
'ppp_questionparsing_grammatical.data',
],
package_data={
'ppp_questionparsing_grammatical': ['data/*.pickle'],
},
)
import sys
if 'install' in sys.argv:
import nltk
nltk.download("wordnet")
| #!/usr/bin/env python3
from setuptools import setup, find_packages
setup(
name='ppp_questionparsing_grammatical',
version='0.6',
description='Natural language processing module for the PPP.',
url='https://github.com/ProjetPP/PPP-QuestionParsing-Grammatical',
author='Projet Pensées Profondes',
author_email='ppp2014@listes.ens-lyon.fr',
license='MIT',
classifiers=[
'Environment :: No Input/Output (Daemon)',
'Development Status :: 1 - Planning',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
'Topic :: Software Development :: Libraries',
],
install_requires=[
'python3-memcached',
'ppp_datamodel>=0.6.4,<0.7',
'ppp_libmodule>=0.6,<0.8',
'jsonrpclib-pelix',
'nltk'
],
packages=[
'ppp_questionparsing_grammatical',
'ppp_questionparsing_grammatical.data',
],
package_data={
'ppp_questionparsing_grammatical': ['data/*.{pickle,py}'],
},
)
import sys
if 'install' in sys.argv:
import nltk
nltk.download("wordnet")
| agpl-3.0 | Python |
5635a3fdea5b61448c18e9518c226e71c9242cd9 | bump version | cloudnative/cruddy | setup.py | setup.py | #!/usr/bin/env python
from setuptools import setup, find_packages
requires = [
'boto3',
'click',
]
setup(
name='cruddy',
version='0.7.0',
description='A CRUD wrapper class for Amazon DynamoDB',
long_description=open('README.md').read(),
author='Mitch Garnaat',
author_email='mitch@cloudnative.io',
url='https://github.com/cloudnative/cruddy',
packages=find_packages(exclude=['tests*']),
entry_points="""
[console_scripts]
cruddy=cruddy.scripts.cli:cli
""",
install_requires=requires,
license="Apache License 2.0",
classifiers=(
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5'
),
)
| #!/usr/bin/env python
from setuptools import setup, find_packages
requires = [
'boto3',
'click',
]
setup(
name='cruddy',
version='0.6.1',
description='A CRUD wrapper class for Amazon DynamoDB',
long_description=open('README.md').read(),
author='Mitch Garnaat',
author_email='mitch@cloudnative.io',
url='https://github.com/cloudnative/cruddy',
packages=find_packages(exclude=['tests*']),
entry_points="""
[console_scripts]
cruddy=cruddy.scripts.cli:cli
""",
install_requires=requires,
license="Apache License 2.0",
classifiers=(
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5'
),
)
| apache-2.0 | Python |
27be636c9a11d1bff304e1729a168de8ef3952ce | Bump version | mattions/pyfaidx | setup.py | setup.py | from setuptools import setup
setup(
name='pyfaidx',
provides='pyfaidx',
version='0.1.7',
author='Matthew Shirley',
author_email='mdshw5@gmail.com',
url='http://mattshirley.com',
description='pyfaidx: efficient pythonic random '
'access to fasta subsequences',
license='MIT',
packages=['pyfaidx'],
install_requires=['six'],
entry_points={'console_scripts': ['faidx = pyfaidx.cli:main']},
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Environment :: Console",
"Intended Audience :: Science/Research",
"Natural Language :: English",
"Operating System :: Unix",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Scientific/Engineering :: Bio-Informatics"
]
)
| from setuptools import setup
setup(
name='pyfaidx',
provides='pyfaidx',
version='0.1.6',
author='Matthew Shirley',
author_email='mdshw5@gmail.com',
url='http://mattshirley.com',
description='pyfaidx: efficient pythonic random '
'access to fasta subsequences',
license='MIT',
packages=['pyfaidx'],
install_requires=['six'],
entry_points={'console_scripts': ['faidx = pyfaidx.cli:main']},
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License",
"Environment :: Console",
"Intended Audience :: Science/Research",
"Natural Language :: English",
"Operating System :: Unix",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Scientific/Engineering :: Bio-Informatics"
]
)
| bsd-3-clause | Python |
ffbf1991de4924c0e2af60a4907a935339db59d1 | increment version | learningequality/kolibri-exercise-perseus-plugin,learningequality/kolibri-exercise-perseus-plugin | setup.py | setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import logging
import sys
from setuptools import setup
def read_file(fname):
"""
Read file and decode in py2k
"""
if sys.version_info < (3,):
return open(fname).read().decode("utf-8")
return open(fname).read()
dist_name = 'kolibri_exercise_perseus_plugin'
readme = read_file('README.rst')
# Default description of the distributed package
description = (
"""Kolibri plugin for rendering Khan Academy Perseus style exercises"""
)
######################################
# STATIC AND DYNAMIC BUILD SPECIFICS #
######################################
def enable_log_to_stdout(logname):
"""Given a log name, outputs > INFO to stdout."""
log = logging.getLogger(logname)
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
log.addHandler(ch)
setup(
name=dist_name,
version="1.0.3",
description=description,
long_description="{readme}".format(
readme=readme,
),
author='Learning Equality',
author_email='info@learningequality.org',
url='https://github.com/learningequality/kolibri-exercise-perseus-plugin',
packages=[
str('kolibri_exercise_perseus_plugin'), # https://github.com/pypa/setuptools/pull/597
],
package_dir={'kolibri_exercise_perseus_plugin': 'kolibri_exercise_perseus_plugin'},
include_package_data=True,
license='MIT',
zip_safe=False,
keywords='kolibri',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: PyPy',
],
)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import logging
import sys
from setuptools import setup
def read_file(fname):
"""
Read file and decode in py2k
"""
if sys.version_info < (3,):
return open(fname).read().decode("utf-8")
return open(fname).read()
dist_name = 'kolibri_exercise_perseus_plugin'
readme = read_file('README.rst')
# Default description of the distributed package
description = (
"""Kolibri plugin for rendering Khan Academy Perseus style exercises"""
)
######################################
# STATIC AND DYNAMIC BUILD SPECIFICS #
######################################
def enable_log_to_stdout(logname):
"""Given a log name, outputs > INFO to stdout."""
log = logging.getLogger(logname)
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
log.addHandler(ch)
setup(
name=dist_name,
version="1.0.2",
description=description,
long_description="{readme}".format(
readme=readme,
),
author='Learning Equality',
author_email='info@learningequality.org',
url='https://github.com/learningequality/kolibri-exercise-perseus-plugin',
packages=[
str('kolibri_exercise_perseus_plugin'), # https://github.com/pypa/setuptools/pull/597
],
package_dir={'kolibri_exercise_perseus_plugin': 'kolibri_exercise_perseus_plugin'},
include_package_data=True,
license='MIT',
zip_safe=False,
keywords='kolibri',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: PyPy',
],
)
| mit | Python |
181947ed282c449cda7133cb88d20ad6bdcf3465 | bump version | Mego/Seriously | setup.py | setup.py | #!/usr/bin/env python3
"""Seriously - a Python-based golfing language"""
from setuptools import setup, find_packages
need_stats = False
try:
import statistics
except:
need_stats = True
setup(
name='seriously',
version='2.1.22',
description='A Python-based golfing language',
long_description='Seriously is a Python-based golfing language. See the GitHub page for more details.',
url='https://github.com/Mego/Seriously',
author='Mego',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
install_requires = ['stats'] if need_stats else [],
packages = ['seriously', 'seriouslylib'],
keywords='codegolf recreational',
entry_points={
'console_scripts': [
'seriously=seriously:main',
],
},
)
| #!/usr/bin/env python3
"""Seriously - a Python-based golfing language"""
from setuptools import setup, find_packages
need_stats = False
try:
import statistics
except:
need_stats = True
setup(
name='seriously',
version='2.1.21',
description='A Python-based golfing language',
long_description='Seriously is a Python-based golfing language. See the GitHub page for more details.',
url='https://github.com/Mego/Seriously',
author='Mego',
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
install_requires = ['stats'] if need_stats else [],
packages = ['seriously', 'seriouslylib'],
keywords='codegolf recreational',
entry_points={
'console_scripts': [
'seriously=seriously:main',
],
},
)
| mit | Python |
e15fb53c0fd63942cafd3a6f11418447df6b6800 | Add smoketest for convering CoverageDataset to str. | dopplershift/siphon,dopplershift/siphon,Unidata/siphon | siphon/cdmr/tests/test_coveragedataset.py | siphon/cdmr/tests/test_coveragedataset.py | # Copyright (c) 2016 Unidata.
# Distributed under the terms of the MIT License.
# SPDX-License-Identifier: MIT
import warnings
from siphon.testing import get_recorder
from siphon.cdmr.coveragedataset import CoverageDataset
recorder = get_recorder(__file__)
# Ignore warnings about CoverageDataset
warnings.simplefilter('ignore')
@recorder.use_cassette('hrrr_cdmremotefeature')
def test_simple_cdmremotefeature():
'Just a smoke test for CDMRemoteFeature'
cd = CoverageDataset('http://localhost:8080/thredds/cdmrfeature/grid/'
'test/HRRR_CONUS_2p5km_20160309_1600.grib2')
assert cd.grids
@recorder.use_cassette('hrrr_cdmremotefeature')
def test_simple_cdmremotefeature_str():
'Just a smoke test for converting CoverageDataset to str'
cd = CoverageDataset('http://localhost:8080/thredds/cdmrfeature/grid/'
'test/HRRR_CONUS_2p5km_20160309_1600.grib2')
assert str(cd)
| # Copyright (c) 2016 Unidata.
# Distributed under the terms of the MIT License.
# SPDX-License-Identifier: MIT
import warnings
from siphon.testing import get_recorder
from siphon.cdmr.coveragedataset import CoverageDataset
recorder = get_recorder(__file__)
# Ignore warnings about CoverageDataset
warnings.simplefilter('ignore')
@recorder.use_cassette('hrrr_cdmremotefeature')
def test_simple_cdmremotefeature():
'Just a smoke test for CDMRemoteFeature'
cd = CoverageDataset('http://localhost:8080/thredds/cdmrfeature/grid/'
'test/HRRR_CONUS_2p5km_20160309_1600.grib2')
assert cd.grids
| bsd-3-clause | Python |
b82c0190d8f60283549ebe7fcad002576c3f3d70 | fix setup.py metadata for 1.12.1 | NahomAgidew/stripe-python,HashNuke/stripe-python,alexmic/stripe-python,stripe/stripe-python,uploadcare/stripe-python,koobs/stripe-python,Khan/stripe-python,woodb/stripe-python,zenmeso/stripe-python | setup.py | setup.py | import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
try:
from distutils.command.build_py import build_py_2to3 as build_py
except ImportError:
from distutils.command.build_py import build_py
path, script = os.path.split(sys.argv[0])
os.chdir(os.path.abspath(path))
requests = 'requests >= 0.8.8'
if sys.version_info < (2, 6):
requests += ', < 0.10.1'
install_requires = [requests]
# Don't import stripe module here, since deps may not be installed
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'stripe'))
from version import VERSION
# Get simplejson if we don't already have json
if sys.version_info < (3, 0):
try:
from util import json
except ImportError:
install_requires.append('simplejson')
setup(
name='stripe',
cmdclass={'build_py': build_py},
version=VERSION,
description='Stripe python bindings',
author='Stripe',
author_email='support@stripe.com',
url='https://stripe.com/',
packages=['stripe', 'stripe.test'],
package_data={'stripe': ['data/ca-certificates.crt', '../VERSION']},
install_requires=install_requires,
test_suite='stripe.test.all',
use_2to3=True,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.5",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries :: Python Modules",
])
| import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
try:
from distutils.command.build_py import build_py_2to3 as build_py
except ImportError:
from distutils.command.build_py import build_py
path, script = os.path.split(sys.argv[0])
os.chdir(os.path.abspath(path))
requests = 'requests >= 0.8.8'
if sys.version_info < (2, 6):
requests += ', < 0.10.1'
install_requires = [requests]
# Don't import stripe module here, since deps may not be installed
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'stripe'))
from version import VERSION
# Get simplejson if we don't already have json
if sys.version_info < (3, 0):
try:
from util import json
except ImportError:
install_requires.append('simplejson')
setup(
name='stripe',
cmdclass={'build_py': build_py},
version=VERSION,
description='Stripe python bindings',
author='Stripe',
author_email='support@stripe.com',
url='https://stripe.com/',
packages=['stripe', 'stripe.test'],
package_data={'stripe': ['data/ca-certificates.crt', '../VERSION']},
install_requires=install_requires,
test_suite='stripe.test.all',
use_2to3=True,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.5",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: Implementation :: PyPy"
"Topic :: Software Development :: Libraries :: Python Modules",
])
| mit | Python |
f15026d5040b687914f60eae632848983844308b | Include static assets in the dist | urbanairship/pasttle,thekad/pasttle,urbanairship/pasttle,thekad/pasttle,thekad/pasttle,urbanairship/pasttle | setup.py | setup.py | #!/usr/bin/env python
#
# -*- mode:python; sh-basic-offset:4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim:set tabstop=4 softtabstop=4 expandtab shiftwidth=4 fileencoding=utf-8:
#
import os
import pasttle
import sys
from setuptools import setup
extra = {}
if sys.version_info >= (3,):
extra['use_2to3'] = True
readme = os.path.join(os.path.dirname(sys.argv[0]), 'README.rst')
requirements = os.path.join(os.path.dirname(sys.argv[0]), 'requirements.txt')
setup(
name='pasttle',
packages=[
'pasttle',
],
package_data={
'pasttle': [
'views/*.html',
'views/css/*.css',
'views/images/*',
],
},
version=pasttle.__version__,
url='http://github.com/thekad/pasttle/',
description='Simple pastebin on top of bottle.',
author='Jorge Gallegos',
author_email='kad@blegh.net',
license='MIT',
platforms='any',
zip_safe=False,
entry_points={
'console_scripts': [
'pasttle-server.py=pasttle.server:main'
],
},
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
],
keywords=['pastebin', 'web', 'paste', 'bottlepy'],
long_description=open(readme).read(),
install_requires=open(requirements).readlines(),
**extra
)
| #!/usr/bin/env python
#
# -*- mode:python; sh-basic-offset:4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim:set tabstop=4 softtabstop=4 expandtab shiftwidth=4 fileencoding=utf-8:
#
import os
import pasttle
import sys
from setuptools import setup
extra = {}
if sys.version_info >= (3,):
extra['use_2to3'] = True
readme = os.path.join(os.path.dirname(sys.argv[0]), 'README.rst')
requirements = os.path.join(os.path.dirname(sys.argv[0]), 'requirements.txt')
setup(
name='pasttle',
packages=[
'pasttle',
],
package_data={
'pasttle': [
'views/*.html',
],
},
version=pasttle.__version__,
url='http://github.com/thekad/pasttle/',
description='Simple pastebin on top of bottle.',
author='Jorge Gallegos',
author_email='kad@blegh.net',
license='MIT',
platforms='any',
zip_safe=False,
entry_points={
'console_scripts': [
'pasttle-server.py=pasttle.server:main'
],
},
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
],
keywords=['pastebin', 'web', 'paste', 'bottlepy'],
long_description=open(readme).read(),
install_requires=open(requirements).readlines(),
**extra
)
| mit | Python |
3817d7390fddebd137c99865455f0ae145dbcf63 | fix typo in verify_asymmetric_ec.py (#227) | googleapis/python-kms,googleapis/python-kms | samples/snippets/verify_asymmetric_ec.py | samples/snippets/verify_asymmetric_ec.py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# [START kms_verify_asymmetric_signature_ec]
def verify_asymmetric_ec(project_id, location_id, key_ring_id, key_id, version_id, message, signature):
"""
Verify the signature of an message signed with an asymmetric EC key.
Args:
project_id (string): Google Cloud project ID (e.g. 'my-project').
location_id (string): Cloud KMS location (e.g. 'us-east1').
key_ring_id (string): ID of the Cloud KMS key ring (e.g. 'my-key-ring').
key_id (string): ID of the key to use (e.g. 'my-key').
version_id (string): ID of the version to use (e.g. '1').
message (string): Original message (e.g. 'my message')
signature (bytes): Signature from a sign request.
Returns:
bool: True if verified, False otherwise
"""
# Import the client library.
from google.cloud import kms
# Import cryptographic helpers from the cryptography package.
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import ec, utils
# Import hashlib.
import hashlib
# Convert the message to bytes.
message_bytes = message.encode('utf-8')
# Create the client.
client = kms.KeyManagementServiceClient()
# Build the key version name.
key_version_name = client.crypto_key_version_path(project_id, location_id, key_ring_id, key_id, version_id)
# Get the public key.
public_key = client.get_public_key(request={'name': key_version_name})
# Extract and parse the public key as a PEM-encoded EC key.
pem = public_key.pem.encode('utf-8')
ec_key = serialization.load_pem_public_key(pem, default_backend())
hash_ = hashlib.sha256(message_bytes).digest()
# Attempt to verify.
try:
sha256 = hashes.SHA256()
ec_key.verify(signature, hash_, ec.ECDSA(utils.Prehashed(sha256)))
print('Signature verified')
return True
except InvalidSignature:
print('Signature failed to verify')
return False
# [END kms_verify_asymmetric_signature_ec]
| # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# [START kms_verify_asymmetric_signature_ec]
def verify_asymmetric_ec(project_id, location_id, key_ring_id, key_id, version_id, message, signature):
"""
Verify the signature of an message signed with an asymmetric EC key.
Args:
project_id (string): Google Cloud project ID (e.g. 'my-project').
location_id (string): Cloud KMS location (e.g. 'us-east1').
key_ring_id (string): ID of the Cloud KMS key ring (e.g. 'my-key-ring').
key_id (string): ID of the key to use (e.g. 'my-key').
version_id (string): ID of the version to use (e.g. '1').
message (string): Original message (e.g. 'my message')
signature (bytes): Signature from a sign request.
Returns:
bool: True if verified, False otherwise
"""
# Import the client library.
from google.cloud import kms
# Import cryptographic helpers from the cryptography package.
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import ec, utils
# Import hashlib.
import hashlib
# Convert the message to bytes.
message_bytes = message.encode('utf-8')
# Create the client.
client = kms.KeyManagementServiceClient()
# Build the key version name.
key_version_name = client.crypto_key_version_path(project_id, location_id, key_ring_id, key_id, version_id)
# Get the public key.
public_key = client.get_public_key(request={'name': key_version_name})
# Extract and parse the public key as a PEM-encoded RSA key.
pem = public_key.pem.encode('utf-8')
ec_key = serialization.load_pem_public_key(pem, default_backend())
hash_ = hashlib.sha256(message_bytes).digest()
# Attempt to verify.
try:
sha256 = hashes.SHA256()
ec_key.verify(signature, hash_, ec.ECDSA(utils.Prehashed(sha256)))
print('Signature verified')
return True
except InvalidSignature:
print('Signature failed to verify')
return False
# [END kms_verify_asymmetric_signature_ec]
| apache-2.0 | Python |
75cc8e8bc4be03632e7309407d7b44e5646d4b27 | Fix setup.py link to README.md | ismailof/mopidy-json-client | setup.py | setup.py | from __future__ import unicode_literals
import re
from setuptools import find_packages, setup
def get_version(filename):
with open(filename) as fh:
metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", fh.read()))
return metadata['version']
setup(
name='Mopidy-JSON-Client',
version=get_version('mopidy_json_client/__init__.py'),
url='https://github.com/ismailof/mopidy-json-client',
license='Apache License, Version 2.0',
author='Ismael Asensio',
author_email='isma.af@gmail.com',
description='Mopidy Client via JSON/RPC Websocket interface',
long_description=open('README.md').read(),
packages=find_packages(exclude=['tests', 'tests.*']),
zip_safe=False,
include_package_data=True,
install_requires=[
'setuptools',
'Mopidy >= 1.0',
],
entry_points={
},
classifiers=[
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Topic :: Multimedia :: Sound/Audio :: Players',
],
)
| from __future__ import unicode_literals
import re
from setuptools import find_packages, setup
def get_version(filename):
with open(filename) as fh:
metadata = dict(re.findall("__([a-z]+)__ = '([^']+)'", fh.read()))
return metadata['version']
setup(
name='Mopidy-JSON-Client',
version=get_version('mopidy_json_client/__init__.py'),
url='https://github.com/ismailof/mopidy-json-client',
license='Apache License, Version 2.0',
author='Ismael Asensio',
author_email='isma.af@gmail.com',
description='Mopidy Client via JSON/RPC Websocket interface',
long_description=open('README.rst').read(),
packages=find_packages(exclude=['tests', 'tests.*']),
zip_safe=False,
include_package_data=True,
install_requires=[
'setuptools',
'Mopidy >= 1.0',
],
entry_points={
},
classifiers=[
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Topic :: Multimedia :: Sound/Audio :: Players',
],
)
| apache-2.0 | Python |
43223d67412c209074816bcee86ca53482d4be52 | Add classifiers | nblock/feeds,Lukas0907/feeds,Lukas0907/feeds,nblock/feeds | setup.py | setup.py | from setuptools import find_packages, setup
setup(
name="feeds",
version="2017.08.14",
# Author details
author="Florian Preinstorfer, Lukas Anzinger",
author_email="florian@nblock.org, lukas@lukasanzinger.at",
url="https://github.com/nblock/feeds",
packages=find_packages(),
include_package_data=True,
install_requires=[
"Click>=6.6",
"Scrapy>=1.1",
"bleach>=1.4.3",
"dateparser>=0.5.1",
"feedparser",
"lxml>=3.5.0",
"python-dateutil>=2.7.3",
"pyxdg>=0.26",
"readability-lxml>=0.7",
],
extras_require={
"docs": ["doc8", "restructuredtext_lint", "sphinx", "sphinx_rtd_theme"],
"style": ["black", "flake8", "isort"],
},
entry_points="""
[console_scripts]
feeds=feeds.cli:main
""",
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Framework :: Scrapy",
"Intended Audience :: Developers",
"License :: OSI Approved :: GNU Affero General Public License v3",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Internet :: WWW/HTTP",
],
)
| from setuptools import find_packages, setup
setup(
name="feeds",
version="2017.08.14",
# Author details
author="Florian Preinstorfer, Lukas Anzinger",
author_email="florian@nblock.org, lukas@lukasanzinger.at",
url="https://github.com/nblock/feeds",
packages=find_packages(),
include_package_data=True,
install_requires=[
"Click>=6.6",
"Scrapy>=1.1",
"bleach>=1.4.3",
"dateparser>=0.5.1",
"feedparser",
"lxml>=3.5.0",
"python-dateutil>=2.7.3",
"pyxdg>=0.26",
"readability-lxml>=0.7",
],
extras_require={
"docs": ["doc8", "restructuredtext_lint", "sphinx", "sphinx_rtd_theme"],
"style": ["black", "flake8", "isort"],
},
entry_points="""
[console_scripts]
feeds=feeds.cli:main
""",
)
| agpl-3.0 | Python |
a5448576fba761a6eb6aca4edfb738bcdd80b375 | Add classifiers to setup.py. | sumeet/artie | setup.py | setup.py | from setuptools import setup
setup(
name='artie',
version='0.1',
url='http://github.com/sumeet/artie',
author='Sumeet Agarwal',
author_email='sumeet.a@gmail.com',
description='IRC utility robot framework for Python',
packages=['artie',],
scripts=['bin/artie-run.py',],
install_requires=['twisted', 'pyyaml',],
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: No Input/Output (Daemon)',
'Intended Audience :: Developers',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Communications :: Chat :: Internet Relay Chat',
]
)
| from setuptools import setup
setup(
name='artie',
version='0.1',
url='http://github.com/sumeet/artie',
author='Sumeet Agarwal',
author_email='sumeet.a@gmail.com',
description='IRC utility robot framework for Python',
packages=['artie',],
scripts=['bin/artie-run.py',],
install_requires=['twisted', 'pyyaml',]
)
| mit | Python |
8df66e2af85b11d602c8b3d9486d8d5d30039330 | Revise setup.py | CROSoftware/pyramid_object_dispatch | setup.py | setup.py | import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.txt')) as f:
README = f.read()
with open(os.path.join(here, 'CHANGES.txt')) as f:
CHANGES = f.read()
requires = [
'pyramid',
'web.dispatch.object'
]
install_links = []
setup(name='pyramid_object_dispatch',
version='0.1',
description='Pyramid object based dispatching',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='Darrol Jordan Duty',
author_email='djdduty',
url='djdduty.com',
keywords='web pyramid pylons',
packages=find_packages(),
include_package_data=True,
zip_safe=True,
install_requires=requires,
dependency_links=install_links
)
| import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.txt')) as f:
README = f.read()
with open(os.path.join(here, 'CHANGES.txt')) as f:
CHANGES = f.read()
requires = [
'pyramid',
'web.dispatch.object'
]
install_links = []
setup(name='Pyramid_Object_Dispatch',
version='0.1',
description='Pyramid object based dispatching',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='Darrol Jordan Duty',
author_email='djdduty',
url='djdduty.com',
keywords='web pyramid pylons',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requires,
dependency_links=install_links
)
| mit | Python |
9dd654ae33c985fde224a8ddcaa383bcdbd845cc | Add touching __init__.py in the pipelines directory | djf604/chunky-pipes | setup.py | setup.py | import os
from setuptools import setup, find_packages
setup(
name='Chunky',
version='0.1.0',
description='Pipeline design and distribution framework',
author='Dominic Fitzgerald',
author_email='dominicfitzgerald11@gmail.com',
url='https://github.com/djf604/chunky',
packages=find_packages(),
entry_points={
'console_scripts': ['chunky = chunky.util:execute_from_command_line']
},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Software Development :: Libraries'
]
)
user_home = os.path.expanduser('~')
if not os.path.exists(os.path.join(user_home, '.chunky')):
os.mkdir(os.path.join(user_home, '.chunky'))
if not os.path.exists(os.path.join(user_home, '.chunky', 'pipelines')):
os.mkdir(os.path.join(user_home, '.chunky', 'pipelines'))
os.mknod(os.path.join(user_home, '.chunky', 'pipelines', '__init__.py'), mode=0o644)
if not os.path.exists(os.path.join(user_home, '.chunky', 'configs')):
os.mkdir(os.path.join(user_home, '.chunky', 'configs'))
| import os
from setuptools import setup, find_packages
setup(
name='Chunky',
version='0.1.0',
description='Pipeline design and distribution framework',
author='Dominic Fitzgerald',
author_email='dominicfitzgerald11@gmail.com',
url='https://github.com/djf604/chunky',
packages=find_packages(),
entry_points={
'console_scripts': ['chunky = chunky.util:execute_from_command_line']
},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Software Development :: Libraries'
]
)
user_home = os.path.expanduser('~')
if not os.path.exists(os.path.join(user_home, '.chunky')):
os.mkdir(os.path.join(user_home, '.chunky'))
if not os.path.exists(os.path.join(user_home, '.chunky', 'pipelines')):
os.mkdir(os.path.join(user_home, '.chunky', 'pipelines'))
if not os.path.exists(os.path.join(user_home, '.chunky', 'configs')):
os.mkdir(os.path.join(user_home, '.chunky', 'configs'))
| mit | Python |
979f37dfa1356650e8d8d4878f33ce78d386081c | upgrade ebullient to 0.1.5 | EnigmaBridge/ebaws.py,EnigmaBridge/ebaws.py | setup.py | setup.py | import sys
from setuptools import setup
from setuptools import find_packages
version = '0.0.2'
# Please update tox.ini when modifying dependency version requirements
install_requires = [
'ebclient.py>=0.1.5',
'cmd2',
'pycrypto>=2.6',
'requests',
'setuptools>=1.0',
'sarge>=0.1.4',
'six'
]
# env markers in extras_require cause problems with older pip: #517
# Keep in sync with conditional_requirements.py.
if sys.version_info < (2, 7):
install_requires.extend([
# only some distros recognize stdlib argparse as already satisfying
'argparse',
'mock<1.1.0',
])
else:
install_requires.append('mock')
dev_extras = [
'nose',
'pep8',
'tox',
]
docs_extras = [
'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags
'sphinx_rtd_theme',
'sphinxcontrib-programoutput',
]
setup(
name='ebaws.py',
version=version,
description='EnigmaBridge Python Utilities for AWS',
url='https://enigmabridge.com',
author="Enigma Bridge",
author_email='info@enigmabridge.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Security',
],
packages=find_packages(),
include_package_data=True,
install_requires=install_requires,
extras_require={
'dev': dev_extras,
'docs': docs_extras,
}
)
| import sys
from setuptools import setup
from setuptools import find_packages
version = '0.0.2'
# Please update tox.ini when modifying dependency version requirements
install_requires = [
'ebclient.py>=0.1.4',
'cmd2',
'pycrypto>=2.6',
'requests',
'setuptools>=1.0',
'sarge>=0.1.4',
'six'
]
# env markers in extras_require cause problems with older pip: #517
# Keep in sync with conditional_requirements.py.
if sys.version_info < (2, 7):
install_requires.extend([
# only some distros recognize stdlib argparse as already satisfying
'argparse',
'mock<1.1.0',
])
else:
install_requires.append('mock')
dev_extras = [
'nose',
'pep8',
'tox',
]
docs_extras = [
'Sphinx>=1.0', # autodoc_member_order = 'bysource', autodoc_default_flags
'sphinx_rtd_theme',
'sphinxcontrib-programoutput',
]
setup(
name='ebaws.py',
version=version,
description='EnigmaBridge Python Utilities for AWS',
url='https://enigmabridge.com',
author="Enigma Bridge",
author_email='info@enigmabridge.com',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Security',
],
packages=find_packages(),
include_package_data=True,
install_requires=install_requires,
extras_require={
'dev': dev_extras,
'docs': docs_extras,
}
)
| mit | Python |
4773a2ab54810f26ecfd725af40f69547530d782 | Use sql_metadata==1.1.2 | macbre/index-digest,macbre/index-digest | setup.py | setup.py | from setuptools import setup, find_packages
from indexdigest import VERSION
# @see https://github.com/pypa/sampleproject/blob/master/setup.py
setup(
name='indexdigest',
version=VERSION,
author='Maciej Brencz',
author_email='maciej.brencz@gmail.com',
license='MIT',
description='Analyses your database queries and schema and suggests indices and schema improvements',
url='https://github.com/macbre/index-digest',
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: Database',
# Pick your license as you wish
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
packages=find_packages(),
install_requires=[
'docopt==0.6.2',
'coverage==4.5.1',
'pylint==1.8.2',
'pytest==3.4.0',
'PyYAML==3.12',
'mysqlclient==1.3.12',
'sql_metadata==1.1.2',
'termcolor==1.1.0',
'yamlordereddictloader==0.4.0'
],
entry_points={
'console_scripts': [
'add_linter=indexdigest.cli.add_linter:main', # creates a new linter from a template
'index_digest=indexdigest.cli.script:main',
],
}
)
| from setuptools import setup, find_packages
from indexdigest import VERSION
# @see https://github.com/pypa/sampleproject/blob/master/setup.py
setup(
name='indexdigest',
version=VERSION,
author='Maciej Brencz',
author_email='maciej.brencz@gmail.com',
license='MIT',
description='Analyses your database queries and schema and suggests indices and schema improvements',
url='https://github.com/macbre/index-digest',
# https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 5 - Production/Stable',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: Database',
# Pick your license as you wish
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
packages=find_packages(),
install_requires=[
'docopt==0.6.2',
'coverage==4.5.1',
'pylint==1.8.2',
'pytest==3.4.0',
'PyYAML==3.12',
'mysqlclient==1.3.12',
'sql_metadata==1.0.2',
'termcolor==1.1.0',
'yamlordereddictloader==0.4.0'
],
entry_points={
'console_scripts': [
'add_linter=indexdigest.cli.add_linter:main', # creates a new linter from a template
'index_digest=indexdigest.cli.script:main',
],
}
)
| mit | Python |
97d71e1e4117168c3ec27f7844fd42b334166ce2 | Remove license classifier | openmicroscopy/weberror,openmicroscopy/weberror | setup.py | setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 University of Dundee.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Aleksandra Tarkowska <A(dot)Tarkowska(at)dundee(dot)ac(dot)uk>,
#
# Version: 1.0
import os
from setuptools import setup, find_packages
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
VERSION = '0.3.0'
setup(name="omero-weberror",
packages=find_packages(exclude=['ez_setup']),
version=VERSION,
description="A Python plugin for OMERO.web",
long_description=read('README.rst'),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: JavaScript',
'Programming Language :: Python :: 2',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Software Development :: Libraries :: '
'Application Frameworks',
'Topic :: Software Development :: Testing',
'Topic :: Text Processing :: Markup :: HTML'
], # Get strings from
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
# 'License :: OSI Approved :: GNU Affero General Public License v3.0',
author='The Open Microscopy Team',
author_email='ome-devel@lists.openmicroscopy.org.uk',
license='AGPL-3.0',
url="https://github.com/openmicroscopy/omero-weberror",
download_url='https://github.com/openmicroscopy/omero-weberror/tarball/%s' % VERSION, # NOQA
keywords=['OMERO.web', 'plugin'],
include_package_data=True,
zip_safe=False,
)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2016 University of Dundee.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Aleksandra Tarkowska <A(dot)Tarkowska(at)dundee(dot)ac(dot)uk>,
#
# Version: 1.0
import os
from setuptools import setup, find_packages
# Utility function to read the README file.
# Used for the long_description. It's nice, because now 1) we have a top level
# README file and 2) it's easier to type in the README file than to put a raw
# string in below ...
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
VERSION = '0.3.0'
setup(name="omero-weberror",
packages=find_packages(exclude=['ez_setup']),
version=VERSION,
description="A Python plugin for OMERO.web",
long_description=read('README.rst'),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU Affero General Public License v3.0',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: JavaScript',
'Programming Language :: Python :: 2',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Software Development :: Libraries :: '
'Application Frameworks',
'Topic :: Software Development :: Testing',
'Topic :: Text Processing :: Markup :: HTML'
], # Get strings from
# http://pypi.python.org/pypi?%3Aaction=list_classifiers
author='The Open Microscopy Team',
author_email='ome-devel@lists.openmicroscopy.org.uk',
license='AGPL-3.0',
url="https://github.com/openmicroscopy/omero-weberror",
download_url='https://github.com/openmicroscopy/omero-weberror/tarball/%s' % VERSION, # NOQA
keywords=['OMERO.web', 'plugin'],
include_package_data=True,
zip_safe=False,
)
| agpl-3.0 | Python |
faf2db7d13514933d3c7b51ab6ab21c3753eef82 | update dependency | zhuoju36/StructEngPy,zhuoju36/StructEngPy | setup.py | setup.py | from setuptools import setup
from setuptools import find_packages
VERSION = '0.1.9'
setup(
name='StructEngPy',
version=VERSION,
author='Zhuoju Huang',
author_email='zhuoju36@hotmail.com',
license='MIT',
description='package for structural engineering',
packages=find_packages(),
zip_safe=False,
python_requires=">=3.6, <=3.8",
install_requires=[
'numpy>=1.21',
'scipy>=1.7',
'quadpy>=0.16',
'mpmath==1.2.1',
],
) | from setuptools import setup
from setuptools import find_packages
VERSION = '0.1.9'
setup(
name='StructEngPy',
version=VERSION,
author='Zhuoju Huang',
author_email='zhuoju36@hotmail.com',
license='MIT',
description='package for structural engineering',
packages=find_packages(),
zip_safe=False,
python_requires=">=3.6",
install_requires=[
'numpy>=1.21',
'scipy>=1.7',
'quadpy==0.16',
'mpmath==1.2.1',
],
) | mit | Python |
f519815674133345a3c642203fcb73710f6efdc0 | Bump version | simplepush/simplepush-python | setup.py | setup.py | """Simplepush setup script."""
from distutils.core import setup
setup(
name='simplepush',
packages=['simplepush'],
version='1.1.4',
description='Simplepush python library',
author='Timm Schaeuble',
author_email='contact@simplepush.io',
url='https://simplepush.io',
keywords=[
'push', 'notification', 'android', 'logging', 'app', 'simple',
'encrypted'
],
license='MIT',
install_requires=[
'requests',
'cryptography'
],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
],
)
| """Simplepush setup script."""
from distutils.core import setup
setup(
name='simplepush',
packages=['simplepush'],
version='1.1.3',
description='Simplepush python library',
author='Timm Schaeuble',
author_email='contact@simplepush.io',
url='https://simplepush.io',
keywords=[
'push', 'notification', 'android', 'logging', 'app', 'simple',
'encrypted'
],
license='MIT',
install_requires=[
'requests',
'cryptography'
],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
],
)
| mit | Python |
5b592d7562fddac0cf48c71e6607cf17c009e993 | Update new name | sergiocorato/partner-contact | partner_identification/__manifest__.py | partner_identification/__manifest__.py | # -*- coding: utf-8 -*-
#
# © 2004-2010 Tiny SPRL http://tiny.be
# © 2010-2012 ChriCar Beteiligungs- und Beratungs- GmbH
# http://www.camptocamp.at
# © 2015 Antiun Ingenieria, SL (Madrid, Spain)
# http://www.antiun.com
# Antonio Espinosa <antonioea@antiun.com>
# © 2016 ACSONE SA/NV (<http://acsone.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
'name': 'Partner Identification Numbers',
'category': 'Customer Relationship Management',
'version': '10.0.1.0.0',
'depends': [
'sales_team',
],
'data': [
'views/res_partner_id_category_view.xml',
'views/res_partner_id_number_view.xml',
'views/res_partner_view.xml',
'security/ir.model.access.csv',
],
'author': 'ChriCar Beteiligungs- und Beratungs- GmbH, '
'Tecnativa,'
'Camptocamp,'
'ACSONE SA/NV,'
'Odoo Community Association (OCA)',
'website': 'https://odoo-community.org/',
'license': 'AGPL-3',
'installable': True,
}
| # -*- coding: utf-8 -*-
#
# © 2004-2010 Tiny SPRL http://tiny.be
# © 2010-2012 ChriCar Beteiligungs- und Beratungs- GmbH
# http://www.camptocamp.at
# © 2015 Antiun Ingenieria, SL (Madrid, Spain)
# http://www.antiun.com
# Antonio Espinosa <antonioea@antiun.com>
# © 2016 ACSONE SA/NV (<http://acsone.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
'name': 'Partner Identification Numbers',
'category': 'Customer Relationship Management',
'version': '10.0.1.0.0',
'depends': [
'sales_team',
],
'data': [
'views/res_partner_id_category_view.xml',
'views/res_partner_id_number_view.xml',
'views/res_partner_view.xml',
'security/ir.model.access.csv',
],
'author': 'ChriCar Beteiligungs- und Beratungs- GmbH, '
'Antiun Ingeniería S.L.,'
'Camptocamp,'
'ACSONE SA/NV,'
'Odoo Community Association (OCA)',
'website': 'https://odoo-community.org/',
'license': 'AGPL-3',
'installable': True,
}
| agpl-3.0 | Python |
95add18b382898eb82c7ff3dd0aa0fd6db0f5cb9 | Add simplejson as requirement for python 2.5 | jarus/flask-mongokit,VishvajitP/flask-mongokit,jarus/flask-mongokit,VishvajitP/flask-mongokit | setup.py | setup.py | """
Flask-MongoKit
--------------
Flask-MongoKit simplifies to use MongoKit, a powerful MongoDB ORM in Flask
applications.
Links
`````
* `documentation <http://packages.python.org/Flask-MongoKit>`_
* `development version <http://github.com/jarus/flask-mongokit/zipball/master#egg=Flask-MongoKit-dev>`_
* `MongoKit <http://namlook.github.com/mongokit/>`_
* `Flask <http://flask.pocoo.org>`_
"""
import sys
from setuptools import setup
install_requires = [
"Flask",
"MongoKit"
]
if sys.version_info < (2, 6):
install_requires.append('simplejson')
setup(
name='Flask-MongoKit',
version='0.6',
url='http://github.com/jarus/flask-mongokit',
license='BSD',
author='Christoph Heer',
author_email='Christoph.Heer@googlemail.com',
description='A Flask extension simplifies to use MongoKit',
long_description=__doc__,
py_modules=['flask_mongokit'],
zip_safe=False,
platforms='any',
install_requires=install_requires,
test_suite='tests.suite',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| """
Flask-MongoKit
--------------
Flask-MongoKit simplifies to use MongoKit, a powerful MongoDB ORM in Flask
applications.
Links
`````
* `documentation <http://packages.python.org/Flask-MongoKit>`_
* `development version <http://github.com/jarus/flask-mongokit/zipball/master#egg=Flask-MongoKit-dev>`_
* `MongoKit <http://namlook.github.com/mongokit/>`_
* `Flask <http://flask.pocoo.org>`_
"""
from setuptools import setup
setup(
name='Flask-MongoKit',
version='0.6',
url='http://github.com/jarus/flask-mongokit',
license='BSD',
author='Christoph Heer',
author_email='Christoph.Heer@googlemail.com',
description='A Flask extension simplifies to use MongoKit',
long_description=__doc__,
py_modules=['flask_mongokit'],
zip_safe=False,
platforms='any',
install_requires=[
'Flask',
'MongoKit'
],
test_suite='tests.suite',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| bsd-3-clause | Python |
00b6946d7329d2a6b6d1e0edf7ceb3c508d0d710 | Make stable | ForeverWintr/metafunctions | setup.py | setup.py | '''
MetaFunctions is a function composition and data pipelining library.
For more information, please visit the `project on github <https://github.com/ForeverWintr/metafunctions>`_.
'''
import os
import sys
import contextlib
import pathlib
import shutil
from setuptools import setup, find_packages, Command
import metafunctions
here = os.path.abspath(os.path.dirname(__file__))
class UploadCommand(Command):
"""
Support setup.py upload.
https://github.com/kennethreitz/setup.py/blob/master/setup.py
"""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
shutil.rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPi via Twine…')
os.system('twine upload dist/*')
sys.exit()
setup(
name=metafunctions.__name__,
version=metafunctions.__version__,
description='Metafunctions is a function composition and data pipelining library',
long_description=__doc__,
url='https://github.com/ForeverWintr/metafunctions',
author='Tom Rutherford',
author_email='foreverwintr@gmail.com',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
],
keywords='functional-programming function-composition',
packages=find_packages(),
test_suite='metafunctions.tests',
install_requires='ansicolors>=1.1.8',
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
)
| '''
MetaFunctions is a function composition and data pipelining library.
For more information, please visit the `project on github <https://github.com/ForeverWintr/metafunctions>`_.
'''
import os
import sys
import contextlib
import pathlib
import shutil
from setuptools import setup, find_packages, Command
import metafunctions
here = os.path.abspath(os.path.dirname(__file__))
class UploadCommand(Command):
"""
Support setup.py upload.
https://github.com/kennethreitz/setup.py/blob/master/setup.py
"""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
shutil.rmtree(os.path.join(here, 'dist'))
except OSError:
pass
self.status('Building Source and Wheel (universal) distribution…')
os.system('{0} setup.py sdist bdist_wheel --universal'.format(sys.executable))
self.status('Uploading the package to PyPi via Twine…')
os.system('twine upload dist/*')
sys.exit()
setup(
name=metafunctions.__name__,
version=metafunctions.__version__,
description='Metafunctions is a function composition and data pipelining library',
long_description=__doc__,
url='https://github.com/ForeverWintr/metafunctions',
author='Tom Rutherford',
author_email='foreverwintr@gmail.com',
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
],
keywords='functional-programming function-composition',
packages=find_packages(),
test_suite='metafunctions.tests',
install_requires='ansicolors>=1.1.8',
# $ setup.py publish support.
cmdclass={
'upload': UploadCommand,
},
)
| mit | Python |
997d2684809d25a7aec4d278c6420cc17f4b7b48 | Bump version | lambdalisue/notify | setup.py | setup.py | # coding=utf-8
import sys
from setuptools import setup, find_packages
NAME = 'notify'
VERSION = '0.1.1'
def read(filename):
import os
BASE_DIR = os.path.dirname(__file__)
filename = os.path.join(BASE_DIR, filename)
with open(filename, 'r') as fi:
return fi.read()
def readlist(filename):
rows = read(filename).split("\n")
rows = [x.strip() for x in rows if x.strip()]
return list(rows)
# if we are running on python 3, enable 2to3 and
# let it use the custom fixers from the custom_fixers
# package.
extra = {}
if sys.version_info >= (3, 0):
extra.update(
use_2to3=True,
)
setup(
name = NAME,
version = VERSION,
description = 'Notify process termination via email',
long_description = read('README.rst'),
classifiers = (
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
),
keywords = 'mail note notification notify cui command',
author = 'Alisue',
author_email = 'lambdalisue@hashnote.net',
url = 'https://github.com/lambdalisue/%s' % NAME,
download_url = 'https://github.com/lambdalisue/%s/tarball/master' % NAME,
license = 'MIT',
packages = find_packages('src'),
package_dir = {'': 'src'},
include_package_data = True,
package_data = {
'': ['LICENSE', 'README.rst',
'requirements.txt',
'requirements-test.txt',
'requirements-docs.txt'],
},
zip_safe=True,
install_requires=readlist('requirements.txt'),
test_suite='nose.collector',
tests_require=readlist('requirements-test.txt'),
entry_points={
'console_scripts': [
'notify = notify.console:main',
],
},
**extra
)
| # coding=utf-8
import sys
from setuptools import setup, find_packages
NAME = 'notify'
VERSION = '0.1.0'
def read(filename):
import os
BASE_DIR = os.path.dirname(__file__)
filename = os.path.join(BASE_DIR, filename)
with open(filename, 'r') as fi:
return fi.read()
def readlist(filename):
rows = read(filename).split("\n")
rows = [x.strip() for x in rows if x.strip()]
return list(rows)
# if we are running on python 3, enable 2to3 and
# let it use the custom fixers from the custom_fixers
# package.
extra = {}
if sys.version_info >= (3, 0):
extra.update(
use_2to3=True,
)
setup(
name = NAME,
version = VERSION,
description = 'Notify process termination via email',
long_description = read('README.rst'),
classifiers = (
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
),
keywords = 'mail note notification notify cui command',
author = 'Alisue',
author_email = 'lambdalisue@hashnote.net',
url = 'https://github.com/lambdalisue/%s' % NAME,
download_url = 'https://github.com/lambdalisue/%s/tarball/master' % NAME,
license = 'MIT',
packages = find_packages('src'),
package_dir = {'': 'src'},
include_package_data = True,
package_data = {
'': ['LICENSE', 'README.rst',
'requirements.txt',
'requirements-test.txt',
'requirements-docs.txt'],
},
zip_safe=True,
install_requires=readlist('requirements.txt'),
test_suite='nose.collector',
tests_require=readlist('requirements-test.txt'),
entry_points={
'console_scripts': [
'notify = notify.console:main',
],
},
**extra
)
| mit | Python |
66c92962d1a921c4d2708ede4ccdfd89b56b27cd | fix setup issue | interrogator/corenlp-xml-lib,relwell/corenlp-xml-lib | setup.py | setup.py | from setuptools import setup
setup(
name="corenlp-xml-lib",
version="0.0.1",
author="Robert Elwell",
author_email="robert.elwell@gmail.com",
description="Library for interacting with the XML output of the Stanford CoreNLP pipeline.",
license="Other",
packages=["corenlp_xml"],
install_requires=["PyYAML>=3.10", "bidict>=0.1.1", "lxml>=3.2.4", "nltk>=2.0.4", "wsgiref>=0.1.2"]
) | from setuptools import setup
from pip.req import parse_requirements
# parse_requirements() returns generator of pip.req.InstallRequirement objects
install_reqs = parse_requirements('requirements.txt')
# reqs is a list of requirement
# e.g. ['django==1.5.1', 'mezzanine==1.4.6']
reqs = [str(ir.req) for ir in install_reqs]
setup(
name="corenlp-xml-lib",
version="0.0.1",
author="Robert Elwell",
author_email="robert.elwell@gmail.com",
description="Library for interacting with the XML output of the Stanford CoreNLP pipeline.",
license="Other",
packages=["corenlp_xml"],
install_requires=reqs
) | apache-2.0 | Python |
c575d69ce253f9eb4d9beb6ffcd3e8a57ed804f0 | Update to version 0.2.0 (according with semantic versioning) | marekjm/diaspy | setup.py | setup.py | from setuptools import setup, find_packages
setup(name='diaspy',
version='0.2.0',
author='Moritz Kiefer',
author_email='moritz.kiefer@gmail.com',
url='https://github.com/Javafant/diaspora-api',
description='A python api to the social network diaspora',
packages=find_packages(),
install_requires=['requests']
)
| from setuptools import setup, find_packages
setup(name='diaspy',
version='0.1.0',
author='Moritz Kiefer',
author_email='moritz.kiefer@gmail.com',
url='https://github.com/Javafant/diaspora-api',
description='A python api to the social network diaspora',
packages=find_packages(),
install_requires=['requests']
)
| mit | Python |
9f510248981b61c9c7e111d45422d8e3ed1367ca | Update setup.py to set long description for PyPI | dib-lab/kevlar,dib-lab/kevlar,dib-lab/kevlar,dib-lab/kevlar | setup.py | setup.py | #!/usr/bin/env python
#
# -----------------------------------------------------------------------------
# Copyright (c) 2016 The Regents of the University of California
#
# This file is part of kevlar (http://github.com/standage/kevlar) and is
# licensed under the MIT license: see LICENSE.
# -----------------------------------------------------------------------------
from setuptools import setup, Extension
import glob
import versioneer
ksw2 = Extension(
'kevlar.alignment',
sources=[
'kevlar/alignment.c', 'third-party/ksw2/ksw2_extz.c', 'src/align.c'
],
include_dirs=['inc/', 'third-party/ksw2/'],
language='c',
)
fermilite = Extension(
'kevlar.assembly',
sources=['kevlar/assembly.c'] + glob.glob('third-party/fermi-lite/*.c'),
include_dirs=['third-party/fermi-lite/'],
extra_link_args=['-lz'],
language='c',
)
sequencemod = Extension(
'kevlar.sequence',
sources=['kevlar/sequence.c'],
language='c'
)
dependencies = [
'pysam>=0.14', 'networkx>=2.0', 'pandas>=0.23', 'scipy>=1.1',
'matplotlib>=2.2'
]
desc = 'Reference-free variant discovery scalable to large eukaryotic genomes'
with open('README.md', 'r') as infile:
longdesc = infile.read()
setup(name='biokevlar',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description=desc,
long_description=longdesc,
long_description_content_type='text/markdown',
url='https://github.com/dib-lab/kevlar',
author='Daniel Standage',
author_email='daniel.standage@gmail.com',
license='MIT',
packages=['kevlar', 'kevlar.cli', 'kevlar.tests'],
package_data={
'kevlar': ['kevlar/tests/data/*', 'kevlar/tests/data/*/*']
},
include_package_data=True,
ext_modules=[ksw2, fermilite, sequencemod],
setup_requires=dependencies,
install_requires=dependencies,
entry_points={
'console_scripts': ['kevlar = kevlar.__main__:main']
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Bio-Informatics'
],
zip_safe=True)
| #!/usr/bin/env python
#
# -----------------------------------------------------------------------------
# Copyright (c) 2016 The Regents of the University of California
#
# This file is part of kevlar (http://github.com/standage/kevlar) and is
# licensed under the MIT license: see LICENSE.
# -----------------------------------------------------------------------------
from setuptools import setup, Extension
import glob
import versioneer
ksw2 = Extension(
'kevlar.alignment',
sources=[
'kevlar/alignment.c', 'third-party/ksw2/ksw2_extz.c', 'src/align.c'
],
include_dirs=['inc/', 'third-party/ksw2/'],
language='c',
)
fermilite = Extension(
'kevlar.assembly',
sources=['kevlar/assembly.c'] + glob.glob('third-party/fermi-lite/*.c'),
include_dirs=['third-party/fermi-lite/'],
extra_link_args=['-lz'],
language='c',
)
sequencemod = Extension(
'kevlar.sequence',
sources=['kevlar/sequence.c'],
language='c'
)
dependencies = [
'pysam>=0.14', 'networkx>=2.0', 'pandas>=0.23', 'scipy>=1.1',
'matplotlib>=2.2'
]
setup(name='biokevlar',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description=('Reference-free variant discovery scalable to large '
'eukaryotic genomes'),
url='https://github.com/dib-lab/kevlar',
author='Daniel Standage',
author_email='daniel.standage@gmail.com',
license='MIT',
packages=['kevlar', 'kevlar.cli', 'kevlar.tests'],
package_data={
'kevlar': ['kevlar/tests/data/*', 'kevlar/tests/data/*/*']
},
include_package_data=True,
ext_modules=[ksw2, fermilite, sequencemod],
setup_requires=dependencies,
install_requires=dependencies,
entry_points={
'console_scripts': ['kevlar = kevlar.__main__:main']
},
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Bio-Informatics'
],
zip_safe=True)
| mit | Python |
c630aff22e6ffa957ba660d574c4ea9ef181524b | Update version number to 1.0. | grybmadsci/openhtf,jettisonjoe/openhtf,grybmadsci/openhtf,google/openhtf,jettisonjoe/openhtf,fahhem/openhtf,google/openhtf,fahhem/openhtf,google/openhtf,grybmadsci/openhtf,ShaperTools/openhtf,google/openhtf,grybmadsci/openhtf,fahhem/openhtf,jettisonjoe/openhtf,ShaperTools/openhtf,ShaperTools/openhtf,amyxchen/openhtf,ShaperTools/openhtf,ShaperTools/openhtf,amyxchen/openhtf,fahhem/openhtf,jettisonjoe/openhtf | setup.py | setup.py | # Copyright 2014 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script for OpenHTF."""
import os
from distutils.command.clean import clean
from setuptools import find_packages
from setuptools import setup
class CleanCommand(clean):
"""Custom logic for the clean command."""
def run(self):
clean.run(self)
targets = [
'./dist',
'./*.egg-info',
'./openhtf/proto/*_pb2.py',
'**/*.pyc',
'**/*.tgz',
]
os.system('rm -vrf %s' % ' '.join(targets))
requires = [ # pylint: disable=invalid-name
'contextlib2==0.4.0',
'enum==0.4.4',
'Flask==0.10.1',
'itsdangerous==0.24',
'Jinja2==2.7.3',
'libusb1==1.3.0',
'M2Crypto==0.22.3',
'MarkupSafe==0.23',
'pyaml==15.3.1',
'python-gflags==2.0',
'PyYAML==3.11',
'Rocket==1.2.4',
'singledispatch==3.4.0.3',
'six==1.9.0',
'Werkzeug==0.10.4',
]
setup(
name='openhtf',
version='1.0',
description='OpenHTF, the open hardware testing framework.',
author='John Hawley',
author_email='madsci@google.com',
maintainer='Joe Ethier',
maintainer_email='jethier@google.com',
packages=find_packages(exclude='example'),
cmdclass={
'clean': CleanCommand,
},
install_requires=requires,
)
| # Copyright 2014 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script for OpenHTF."""
import os
from distutils.command.clean import clean
from setuptools import find_packages
from setuptools import setup
class CleanCommand(clean):
"""Custom logic for the clean command."""
def run(self):
clean.run(self)
targets = [
'./dist',
'./*.egg-info',
'./openhtf/proto/*_pb2.py',
'**/*.pyc',
'**/*.tgz',
]
os.system('rm -vrf %s' % ' '.join(targets))
requires = [ # pylint: disable=invalid-name
'contextlib2==0.4.0',
'enum==0.4.4',
'Flask==0.10.1',
'itsdangerous==0.24',
'Jinja2==2.7.3',
'libusb1==1.3.0',
'M2Crypto==0.22.3',
'MarkupSafe==0.23',
'pyaml==15.3.1',
'python-gflags==2.0',
'PyYAML==3.11',
'Rocket==1.2.4',
'singledispatch==3.4.0.3',
'six==1.9.0',
'Werkzeug==0.10.4',
]
setup(
name='openhtf',
version='0.9',
description='OpenHTF, the open hardware testing framework.',
author='John Hawley',
author_email='madsci@google.com',
maintainer='Joe Ethier',
maintainer_email='jethier@google.com',
packages=find_packages(exclude='example'),
cmdclass={
'clean': CleanCommand,
},
install_requires=requires,
)
| apache-2.0 | Python |
96701435c52db2bcecde16579174b6bff3a17c0f | Rename for consistency | gabalese/mondo-python | setup.py | setup.py | #!/usr/bin/env python
from setuptools import setup
VERSION = '0.0.1'
tests_requires = [
'nose>=1.3.4',
'responses>=0.5.1'
]
install_requires = tests_requires + [
'requests>=2.4.3',
]
setup(
name="mondo",
version=VERSION,
description="Mondo Banking API Client",
author=', '.join((
'Tito Miguel Costa',
'Simon Vans-Colina <simon@simon.vc>',
)),
url="https://github.com/simonvc/mondo-python",
packages=["mondo"],
tests_require=tests_requires,
install_requires=install_requires,
license="MIT",
)
| #!/usr/bin/env python
from setuptools import setup
VERSION = '0.0.1'
test_requires = [
'nose>=1.3.4',
'responses>=0.5.1'
]
install_requires = test_requires + [
'requests>=2.4.3',
]
setup(
name="mondo",
version=VERSION,
description="Mondo Banking API Client",
author=', '.join((
'Tito Miguel Costa',
'Simon Vans-Colina <simon@simon.vc>',
)),
url="https://github.com/simonvc/mondo-python",
packages=["mondo"],
tests_require=test_requires,
install_requires=install_requires,
license="MIT",
)
| mit | Python |
7440dba84ea156e4ef01c8e68bda8a9b0490510d | Mark ProjectFuture as endOfLife | webcomics/dosage,webcomics/dosage | dosagelib/plugins/projectfuture.py | dosagelib/plugins/projectfuture.py | # SPDX-License-Identifier: MIT
# Copyright (C) 2019-2020 Tobias Gruetzmacher
# Copyright (C) 2019-2020 Daniel Ring
from .common import _ParserScraper
class ProjectFuture(_ParserScraper):
imageSearch = '//td[@class="tamid"]/img'
prevSearch = '//a[./img[@alt="Previous"]]'
def __init__(self, name, comic, first, last=None):
if name == 'ProjectFuture':
super(ProjectFuture, self).__init__(name)
else:
super(ProjectFuture, self).__init__('ProjectFuture/' + name)
self.url = 'http://www.projectfuturecomic.com/' + comic + '.php'
self.stripUrl = self.url + '?strip=%s'
self.firstStripUrl = self.stripUrl % first
if last:
self.url = self.stripUrl
self.endOfLife = True
@classmethod
def getmodules(cls):
return (
cls('AWalkInTheWoods', 'simeon', '1', last='12'),
cls('BenjaminBuranAndTheArkOfUr', 'ben', '00', last='23'),
cls('BookOfTenets', 'tenets', '01', last='45'),
cls('CriticalMass', 'criticalmass', 'cover', last='26'),
cls('DarkLordRising', 'darklord', '01-00', last='10-10'),
cls('Emily', 'emily', '01-00'),
cls('FishingTrip', 'fishing', '01-00'),
cls('HeadsYouLose', 'heads', '00-01', last='07-12'),
cls('NiallsStory', 'niall', '00'),
cls('ProjectFuture', 'strip', '0', last='664'),
cls('RedValentine', 'redvalentine', '1', last='6'),
cls('ShortStories', 'shorts', '01-00'),
cls('StrangeBedfellows', 'bedfellows', '1', last='6'),
cls('TheAxemanCometh', 'axeman', '01-01', last='02-18'),
cls('ToCatchADemon', 'daxxon', '01-00', last='03-14'),
cls('TheDarkAngel', 'darkangel', 'cover', last='54'),
cls('TheEpsilonProject', 'epsilon', '00-01'),
cls('TheHarvest', 'harvest', '01-00'),
cls('TheSierraChronicles', 'sierra', '0', last='29'),
cls('TheTuppenyMan', 'tuppenny', '00', last='16'),
cls('TurningANewPage', 'azrael', '1', last='54'),
)
| # SPDX-License-Identifier: MIT
# Copyright (C) 2019-2020 Tobias Gruetzmacher
# Copyright (C) 2019-2020 Daniel Ring
from .common import _ParserScraper
class ProjectFuture(_ParserScraper):
imageSearch = '//td[@class="tamid"]/img'
prevSearch = '//a[./img[@alt="Previous"]]'
def __init__(self, name, comic, first, last=None):
if name == 'ProjectFuture':
super(ProjectFuture, self).__init__(name)
else:
super(ProjectFuture, self).__init__('ProjectFuture/' + name)
self.url = 'http://www.projectfuturecomic.com/' + comic + '.php'
self.stripUrl = self.url + '?strip=%s'
self.firstStripUrl = self.stripUrl % first
if last:
self.url = self.stripUrl
self.endOfLife = True
@classmethod
def getmodules(cls):
return (
cls('AWalkInTheWoods', 'simeon', '1', last='12'),
cls('BenjaminBuranAndTheArkOfUr', 'ben', '00', last='23'),
cls('BookOfTenets', 'tenets', '01', last='45'),
cls('CriticalMass', 'criticalmass', 'cover', last='26'),
cls('DarkLordRising', 'darklord', '01-00', last='10-10'),
cls('Emily', 'emily', '01-00'),
cls('FishingTrip', 'fishing', '01-00'),
cls('HeadsYouLose', 'heads', '00-01', last='07-12'),
cls('NiallsStory', 'niall', '00'),
cls('ProjectFuture', 'strip', '0'),
cls('RedValentine', 'redvalentine', '1', last='6'),
cls('ShortStories', 'shorts', '01-00'),
cls('StrangeBedfellows', 'bedfellows', '1', last='6'),
cls('TheAxemanCometh', 'axeman', '01-01', last='02-18'),
cls('ToCatchADemon', 'daxxon', '01-00', last='03-14'),
cls('TheDarkAngel', 'darkangel', 'cover', last='54'),
cls('TheEpsilonProject', 'epsilon', '00-01'),
cls('TheHarvest', 'harvest', '01-00'),
cls('TheSierraChronicles', 'sierra', '0', last='29'),
cls('TheTuppenyMan', 'tuppenny', '00', last='16'),
cls('TurningANewPage', 'azrael', '1', last='54'),
)
| mit | Python |
535ac4c6eae416461e11f33c1a1ef67e92c73914 | Add a commit failed test | sangoma/safepy2,leonardolang/safepy2 | tests/test_exception_wrapping.py | tests/test_exception_wrapping.py | import safe
class MockResponse(object):
def __init__(self, data):
self.data = data
def json(self):
return self.data
def test_basic_exception():
error_message = 'Example error'
response = MockResponse({
'status': False,
'method': 'synchronize',
'module': 'cluster',
'error': {'message': error_message}
})
exception = safe.library.raise_from_json(response)
assert str(exception) == error_message
def test_commit_failed_exception():
error_message = 'Default ipv4 gateway is not on eth0 subnet'
response = MockResponse({
'status': False,
'type': 'configuration',
'method': 'smartapply',
'module': 'nsc',
'error': {
'message': 'Apply configuration failed.',
'reason': [{
'url': '/SAFe/sng_network_config/modify/network',
'obj_type': 'configuration',
'type': 'ERROR',
'description': error_message,
'module': 'network'
}]
}
})
exception = safe.library.raise_from_json(response)
assert isinstance(exception, safe.CommitFailed)
assert str(exception) == 'Apply changes failed: ' + error_message
assert len(exception.reasons) == 1
reason = exception.reasons[0]
assert reason.obj == 'configuration'
assert reason.module == 'network'
assert reason.description == error_message
| import safe
def test_simple_exception():
class MockReponse(object):
def json(self):
return {'status': False,
'method': 'synchronize',
'module': 'cluster',
'error': {'message': 'Example error'}}
exception = safe.library.raise_from_json(MockReponse())
assert str(exception) == 'Example error'
| mpl-2.0 | Python |
f48d037b93c92ac794c145aad3cd9ae6f04780d1 | Convert scores to ints when sorting | james9909/IntroCTF,james9909/IntroCTF,james9909/IntroCTF,james9909/IntroCTF,james9909/IntroCTF,james9909/IntroCTF | scoreboard.py | scoreboard.py | #!/usr/bin/env python
import os
print "Content-Type: text/html\n"
print ""
def gen_scoreboard(team_data):
if len(team_data) == 0:
print "There are no teams!"
else:
print "<br>"
print "<div class='container'>"
print "<table class='responsive-table bordered hoverable centered'>"
print "<thead>"
print "<tr><th>Rank</th><th>Team</th><th>Score</th></tr>"
print "</thead>"
length = len(team_data)
for x in range(length):
if len(team_data) == 0:
return
for key in team_data:
team_data[key] = int(team_data[key])
highest_score = max(team_data.values())
for team in team_data:
if team_data[team] == highest_score:
print "<tr><td>%d</td><td>%s</td><td>%s</td></tr>\n" %(x+1, team, highest_score)
del team_data[team]
break
def main():
fin = open("accounts/scores.txt", "r")
data = fin.readlines()
teams = {}
# Data is stored team,score
for info in data:
info = info.strip().split(",")
if info[0] == "":
continue
teams[info[0]] = info[1]
gen_scoreboard(teams)
if 'HTTP_COOKIE' not in os.environ:
html = open("templates/scoreboard_logged_out.html").read()
else:
html = open("templates/scoreboard_logged_in.html").read()
print html
main()
| #!/usr/bin/env python
import os
print "Content-Type: text/html\n"
print ""
def gen_scoreboard(team_data):
if len(team_data) == 0:
print "There are no teams!"
else:
print "<br>"
print "<div class='container'>"
print "<table class='responsive-table bordered hoverable centered'>"
print "<thead>"
print "<tr><th>Rank</th><th>Team</th><th>Score</th></tr>"
print "</thead>"
length = len(team_data)
for x in range(length):
if len(team_data) == 0:
return
highest_score = max(team_data.values())
for team in team_data:
if team_data[team] == highest_score:
print "<tr><td>%d</td><td>%s</td><td>%s</td></tr>\n" %(x+1, team, highest_score)
del team_data[team]
break
def main():
fin = open("accounts/scores.txt", "r")
data = fin.readlines()
teams = {}
# Data is stored team,score
for info in data:
info = info.strip().split(",")
if info[0] == "":
continue
teams[info[0]] = info[1]
gen_scoreboard(teams)
if 'HTTP_COOKIE' not in os.environ:
html = open("templates/scoreboard_logged_out.html").read()
else:
html = open("templates/scoreboard_logged_in.html").read()
print html
main()
| mit | Python |
bc054f5cc8c375244c8ec4b311d12cf416670871 | Fix partition sizes | keenlabs/capillary,keenlabs/capillary,evertrue/capillary,evertrue/capillary,keenlabs/capillary,evertrue/capillary,evertrue/capillary | stats-to-datadog.py | stats-to-datadog.py | import urllib2
import json
import sys
from statsd import statsd
statsd.connect('localhost', 8125)
host = sys.argv[1]
topology = sys.argv[2]
toporoot = sys.argv[3]
topic = sys.argv[4]
state = urllib2.urlopen(
"http://{}/api/status?toporoot={}&topic={}".format(
host, toporoot, topic
)
).read()
data = json.loads(state)
amount = 0
for looplord in data:
if looplord['amount'] is not None:
statsd.gauge(
'razor.kafkamon.topology.partition',
looplord['amount'],
tags = [
"topic:{}".format(topic),
"topology:{}".format(topology),
"partition:{}".format(looplord['partition'])
]
)
amount += looplord['amount']
print "Got {} for {}".format(amount, topology)
statsd.gauge(
'razor.kafkamon.total_delta',
amount, tags = [
"topic:{}".format(topic),
"topology:{}".format(topology)
]
)
| import urllib2
import json
import sys
from statsd import statsd
statsd.connect('localhost', 8125)
host = sys.argv[1]
topology = sys.argv[2]
toporoot = sys.argv[3]
topic = sys.argv[4]
state = urllib2.urlopen(
"http://{}/api/status?toporoot={}&topic={}".format(
host, toporoot, topic
)
).read()
data = json.loads(state)
amount = 0
for looplord in data:
if looplord['amount'] is not None:
statsd.gauge(
'razor.kafkamon.topology.partition',
amount,
tags = [
"topic:{}".format(topic),
"topology:{}".format(topology),
"partition:{}".format(looplord['partition'])
]
)
amount += looplord['amount']
print "Got {} for {}".format(amount, topology)
statsd.gauge(
'razor.kafkamon.total_delta',
amount, tags = [
"topic:{}".format(topic),
"topology:{}".format(topology)
]
)
| mit | Python |
5334dfd37ef46479cf054143a34b2cbed8e90b14 | Handle exceptions in wsgi. | serathius/elasticsearch-raven,pozytywnie/elasticsearch-raven,socialwifi/elasticsearch-raven | elasticsearch_raven/wsgi.py | elasticsearch_raven/wsgi.py | import queue
from elasticsearch_raven import configuration
from elasticsearch_raven.transport import SentryMessage
from elasticsearch_raven.udp_server import _get_sender
pending_logs = queue.Queue(configuration['queue_maxsize'])
exception_queue = queue.Queue()
sender = _get_sender(pending_logs, exception_queue)
sender.start()
def application(environ, start_response):
try:
exception = exception_queue.get_nowait()
except queue.Empty:
pass
else:
raise exception
length = int(environ.get('CONTENT_LENGTH', '0'))
data = environ['wsgi.input'].read(length)
pending_logs.put(SentryMessage.create_from_http(
environ['HTTP_X_SENTRY_AUTH'], data))
status = '200 OK'
response_headers = [('Content-Type', 'text/plain')]
start_response(status, response_headers)
return [''.encode('utf-8')]
| from queue import Queue
from threading import Thread
from elasticsearch_raven import configuration
from elasticsearch_raven.transport import ElasticsearchTransport
from elasticsearch_raven.transport import SentryMessage
transport = ElasticsearchTransport(configuration['host'],
configuration['use_ssl'])
pending_logs = Queue()
def send():
while True:
message = pending_logs.get()
transport.send(message)
pending_logs.task_done()
sender = Thread(target=send)
sender.start()
def application(environ, start_response):
length = int(environ.get('CONTENT_LENGTH', '0'))
data = environ['wsgi.input'].read(length)
pending_logs.put(SentryMessage.create_from_http(
environ['HTTP_X_SENTRY_AUTH'], data))
status = '200 OK'
response_headers = [('Content-Type', 'text/plain')]
start_response(status, response_headers)
return [''.encode('utf-8')]
| mit | Python |
6238243758202ad12db66fecef27cee65b71d192 | Add PID file support and status querying to new runner | CylonicRaider/Instant,CylonicRaider/Instant,CylonicRaider/Instant,CylonicRaider/Instant,CylonicRaider/Instant | script/run.py | script/run.py | #!/usr/bin/env python3
# -*- coding: ascii -*-
# An init script for running Instant and a number of bots.
import re
PID_LINE_RE = re.compile(r'^[0-9]+\s*$')
class Process:
def __init__(self, name, pidfile):
self.name = name
self.pidfile = pidfile
self._pid = Ellipsis
def _read_pidfile(self):
f = None
try:
f = open(self.pidfile)
data = f.read()
if not PID_LINE_RE.match(data):
raise ValueError('Invalid PID file contents')
ret = int(data)
if ret < 0:
raise ValueError('Invalid PID in PID file')
return ret
except IOError as e:
if e.errno == errno.ENOENT: return None
raise
finally:
if f: f.close()
def _write_pidfile(self, pid):
with open(self.pidfile, 'w') as f:
f.write('%s\n' % pid)
def get_pid(self, force=False):
if self._pid is Ellipsis or force:
self._pid = self._read_pidfile()
return self._pid
def set_pid(self, pid):
self._pid = pid
self._write_pidfile(pid)
def status(self):
pid = self.get_pid()
if pid is None:
return 'DEAD'
try:
os.kill(pid, 0)
return 'RUNNING'
except OSError as e:
if e.errno == errno.ESRCH: return 'STALEFILE'
raise
def main():
pass
if __name__ == '__main__': main()
| #!/usr/bin/env python3
# -*- coding: ascii -*-
# An init script for running Instant and a number of bots.
def main():
pass
if __name__ == '__main__': main()
| mit | Python |
ac99e90365d61b56ba654ad9bcc1baa20ccf65e5 | Install missing swig generated file | usajusaj/sga_utils,usajusaj/sga_utils | setup.py | setup.py | # -*- coding: utf-8 -*-
import numpy
from setuptools import setup, find_packages, Extension
with open('README.rst') as f:
readme = f.read()
with open('LICENSE') as f:
lic = f.read()
with open('requirements.txt') as f:
required = f.read().splitlines()
# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = numpy.get_include()
except AttributeError:
numpy_include = numpy.get_numpy_include()
correlation_module = Extension('sga.toolbox._c_impl',
sources=['sga/toolbox/src/c_impl.i', 'sga/toolbox/src/correlation.c', 'sga/toolbox/src/table_norm.c', 'sga/toolbox/src/safe.c'],
include_dirs = [numpy_include],
swig_opts=['-threads', '-modern', '-outdir', 'sga/toolbox/'],
libraries = ['gsl', 'gslcblas','m'],
extra_compile_args = ["-O3"],
)
console_scripts = [
'sga-similarity=sga.similarity:main',
'sga-safe=sga.safe:main'
]
setup(
name='sga',
version='0.1.0',
description='SGA Utilities',
install_requires=required,
long_description=readme,
author='Matej Usaj',
author_email='usaj.m@utoronto.ca',
url='https://github.com/usajusaj/sga_utils',
license=lic,
ext_modules = [correlation_module],
py_modules=['sga.toolbox.c_impl'],
packages=find_packages(exclude=('tests', 'docs')),
entry_points = {
'console_scripts': console_scripts,
}
)
| # -*- coding: utf-8 -*-
import numpy
from setuptools import setup, find_packages, Extension
with open('README.rst') as f:
readme = f.read()
with open('LICENSE') as f:
lic = f.read()
with open('requirements.txt') as f:
required = f.read().splitlines()
# Obtain the numpy include directory. This logic works across numpy versions.
try:
numpy_include = numpy.get_include()
except AttributeError:
numpy_include = numpy.get_numpy_include()
correlation_module = Extension('sga.toolbox._c_impl',
sources=['sga/toolbox/src/c_impl.i', 'sga/toolbox/src/correlation.c', 'sga/toolbox/src/table_norm.c', 'sga/toolbox/src/safe.c'],
include_dirs = [numpy_include],
swig_opts=['-threads', '-modern', '-outdir', 'sga/toolbox/'],
libraries = ['gsl', 'gslcblas','m'],
extra_compile_args = ["-O3"],
)
console_scripts = [
'sga-similarity=sga.similarity:main',
'sga-safe=sga.safe:main'
]
setup(
name='sga',
version='0.1.0',
description='SGA Utilities',
install_requires=required,
long_description=readme,
author='Matej Usaj',
author_email='usaj.m@utoronto.ca',
url='https://github.com/usajusaj/sga_utils',
license=lic,
ext_modules = [correlation_module],
packages=find_packages(exclude=('tests', 'docs')),
entry_points = {
'console_scripts': console_scripts,
}
)
| mit | Python |
8b8b6c3b9e2fbeff422cda8c5812f32d81b43a1f | Bump version to 4.0.0a23 | platformio/platformio-core,platformio/platformio,platformio/platformio-core | platformio/__init__.py | platformio/__init__.py | # Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VERSION = (4, 0, "0a23")
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio"
__description__ = (
"An open source ecosystem for IoT development. "
"Cross-platform IDE and unified debugger. "
"Remote unit testing and firmware updates. "
"Arduino, ARM mbed, Espressif (ESP8266/ESP32), STM32, PIC32, nRF51/nRF52, "
"FPGA, CMSIS, SPL, AVR, Samsung ARTIK, libOpenCM3")
__url__ = "https://platformio.org"
__author__ = "PlatformIO"
__email__ = "contact@platformio.org"
__license__ = "Apache Software License"
__copyright__ = "Copyright 2014-present PlatformIO"
__apiurl__ = "https://api.platformio.org"
| # Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
VERSION = (4, 0, "0a22")
__version__ = ".".join([str(s) for s in VERSION])
__title__ = "platformio"
__description__ = (
"An open source ecosystem for IoT development. "
"Cross-platform IDE and unified debugger. "
"Remote unit testing and firmware updates. "
"Arduino, ARM mbed, Espressif (ESP8266/ESP32), STM32, PIC32, nRF51/nRF52, "
"FPGA, CMSIS, SPL, AVR, Samsung ARTIK, libOpenCM3")
__url__ = "https://platformio.org"
__author__ = "PlatformIO"
__email__ = "contact@platformio.org"
__license__ = "Apache Software License"
__copyright__ = "Copyright 2014-present PlatformIO"
__apiurl__ = "https://api.platformio.org"
| apache-2.0 | Python |
066146dd77b00d1a597704fd18bccf1041564215 | Fix print formatting | davidgasquez/kaggle-airbnb | scripts/gb.py | scripts/gb.py | import sys
import numpy as np
import pandas as pd
import datetime
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from xgboost.sklearn import XGBClassifier
sys.path.append('..')
from utils.data_loading import load_users_data
from utils.preprocessing import one_hot_encoding
print "Loading data...",
train_users, test_users = load_users_data()
users = pd.read_csv('../datasets/processed/users_with_session.csv')
print "\tDONE"
print "Preprocessing...",
# Get train labels and ids
labels = train_users['country_destination'].values
train_users = train_users.drop(['country_destination'], axis=1)
id_test = test_users['id']
piv_train = train_users.shape[0]
drop_list = [
'id',
'country_destination',
'Unnamed: 0',
'date_account_created',
'date_first_active',
'timestamp_first_active'
]
# Drop columns
users = users.drop(drop_list, axis=1)
# Fill NaNs
users = users.fillna(-1)
# Encode categorical features
categorical_features = [
'gender', 'signup_method', 'signup_flow', 'language', 'affiliate_channel',
'affiliate_provider', 'first_affiliate_tracked', 'signup_app',
'first_device_type', 'first_browser', 'most_used_device'
]
users = one_hot_encoding(users, categorical_features)
# Splitting train and test
values = users.values
values = StandardScaler().fit_transform(values)
X = values[:piv_train]
le = LabelEncoder()
y = le.fit_transform(labels)
X_test = values[piv_train:]
print "\tDONE"
print "Fitting...",
# Classifier
xgb = XGBClassifier(
max_depth=6,
learning_rate=0.2,
n_estimators=45,
objective="multi:softprob",
nthread=-1,
gamma=0,
min_child_weight=1,
max_delta_step=0,
subsample=0.6,
colsample_bytree=0.6,
colsample_bylevel=1,
reg_alpha=0,
reg_lambda=1,
scale_pos_weight=1,
base_score=0.5,
seed=42
)
xgb.fit(X, y)
print "\tDONE"
print "Predicting...",
y_pred = xgb.predict_proba(X_test)
print "\tDONE"
print "Generating submission...",
# Taking the 5 classes with highest probabilities
ids = []
cts = []
for i in range(len(id_test)):
idx = id_test[i]
ids += [idx] * 5
cts += le.inverse_transform(np.argsort(y_pred[i])[::-1])[:5].tolist()
# Generate Submission
sub = pd.DataFrame(np.column_stack((ids, cts)), columns=['id', 'country'])
date = datetime.datetime.now().strftime("%m-%d_%H:%M")
sub.to_csv('../datasets/submissions/xgboost' + str(date) + '.csv',index=False)
print "\tDONE"
print "END"
| import sys
import numpy as np
import pandas as pd
import datetime
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from xgboost.sklearn import XGBClassifier
sys.path.append('..')
from utils.data_loading import load_users_data
from utils.preprocessing import one_hot_encoding
print "Loading data...",
train_users, test_users = load_users_data()
users = pd.read_csv('../datasets/processed/users_with_session.csv')
print "\tDONE"
print "Preprocessing...",
# Get train labels and ids
labels = train_users['country_destination'].values
train_users = train_users.drop(['country_destination'], axis=1)
id_test = test_users['id']
piv_train = train_users.shape[0]
drop_list = [
'id',
'country_destination',
'Unnamed: 0',
'date_account_created',
'date_first_active',
'timestamp_first_active'
]
# Drop columns
users = users.drop(drop_list, axis=1)
# Fill NaNs
users = users.fillna(-1)
# Encode categorical features
categorical_features = [
'gender', 'signup_method', 'signup_flow', 'language', 'affiliate_channel',
'affiliate_provider', 'first_affiliate_tracked', 'signup_app',
'first_device_type', 'first_browser', 'most_used_device'
]
users = one_hot_encoding(users, categorical_features)
# Splitting train and test
values = users.values
values = StandardScaler().fit_transform(values)
X = values[:piv_train]
le = LabelEncoder()
y = le.fit_transform(labels)
X_test = values[piv_train:]
print "\tDONE"
print "Fitting..."
# Classifier
xgb = XGBClassifier(
max_depth=6,
learning_rate=0.2,
n_estimators=45,
objective="multi:softprob",
nthread=-1,
gamma=0,
min_child_weight=1,
max_delta_step=0,
subsample=0.6,
colsample_bytree=0.6,
colsample_bylevel=1,
reg_alpha=0,
reg_lambda=1,
scale_pos_weight=1,
base_score=0.5,
seed=42
)
xgb.fit(X, y)
print "\tDONE"
print "Predicting..."
y_pred = xgb.predict_proba(X_test)
print "\tDONE"
print "Generating submission..."
# Taking the 5 classes with highest probabilities
ids = []
cts = []
for i in range(len(id_test)):
idx = id_test[i]
ids += [idx] * 5
cts += le.inverse_transform(np.argsort(y_pred[i])[::-1])[:5].tolist()
# Generate Submission
sub = pd.DataFrame(np.column_stack((ids, cts)), columns=['id', 'country'])
date = datetime.datetime.now().strftime("%m-%d_%H:%M")
sub.to_csv('../datasets/submissions/xgboost' + str(date) + '.csv',index=False)
print "\tDONE"
print "END"
| mit | Python |
2f23a758c66d100315dd6de0aed0dd16191e58a4 | update version | drgrib/dotmap | setup.py | setup.py | from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
version = '1.2.36',
name='dotmap',
packages=['dotmap'], # this must be the same as the name above
description='ordered, dynamically-expandable dot-access dictionary',
author='Chris Redford',
author_email='credford@gmail.com',
url='https://github.com/drgrib/dotmap', # use the URL to the github repo
download_url='https://github.com/drgrib/dotmap/tarball/1.0',
keywords=['dict', 'dot', 'map', 'order', 'ordered',
'ordereddict', 'access', 'dynamic'], # arbitrary keywords
classifiers=[],
long_description=long_description,
long_description_content_type="text/markdown",
)
| from setuptools import setup
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
version = '1.2.35',
name='dotmap',
packages=['dotmap'], # this must be the same as the name above
description='ordered, dynamically-expandable dot-access dictionary',
author='Chris Redford',
author_email='credford@gmail.com',
url='https://github.com/drgrib/dotmap', # use the URL to the github repo
download_url='https://github.com/drgrib/dotmap/tarball/1.0',
keywords=['dict', 'dot', 'map', 'order', 'ordered',
'ordereddict', 'access', 'dynamic'], # arbitrary keywords
classifiers=[],
long_description=long_description,
long_description_content_type="text/markdown",
)
| mit | Python |
ef0854c23c5d75c2742da3e8bde9fc258854d7aa | Add urls in setup. | fmenabe/python-clif | setup.py | setup.py | # -*- coding: utf-8 -*-
from distutils.core import setup
setup(
name='clif',
version='0.2.0',
author='François Ménabé',
author_email='francois.menabe@gmail.com',
url = 'http://github.com/fmenabe/python-clif',
download_url = 'http://github.com/fmenabe/python-clif',
license='MIT License',
description='Framework for generating command-line',
long_description=open('README.rst').read(),
keywords=['command-line', 'argparse', 'wrapper', 'clg', 'framework'],
classifiers=[
'Development Status :: 1 - Planning',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Utilities'
],
packages=['clif'],
install_requires=['clg', 'pyyaml', 'yamlordereddictloader'])
| # -*- coding: utf-8 -*-
from distutils.core import setup
setup(
name='clif',
version='0.2.0',
author='François Ménabé',
author_email='francois.menabe@gmail.com',
license='MIT License',
description='Framework for generating command-line',
long_description=open('README.rst').read(),
keywords=['command-line', 'argparse', 'wrapper', 'clg', 'framework'],
classifiers=[
'Development Status :: 1 - Planning',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Utilities'
],
packages=['clif'],
install_requires=['clg', 'pyyaml', 'yamlordereddictloader'])
| mit | Python |
25454cb6e911a9644fb3e667275531f1fce563dd | Bump version to 7.2.1 | balloob/pychromecast,balloob/pychromecast | setup.py | setup.py | from setuptools import setup, find_packages
long_description = open("README.rst").read()
setup(
name="PyChromecast",
version="7.2.1",
license="MIT",
url="https://github.com/balloob/pychromecast",
author="Paulus Schoutsen",
author_email="paulus@paulusschoutsen.nl",
description="Python module to talk to Google Chromecast.",
long_description=long_description,
packages=find_packages(),
zip_safe=False,
include_package_data=True,
platforms="any",
install_requires=list(val.strip() for val in open("requirements.txt")),
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| from setuptools import setup, find_packages
long_description = open("README.rst").read()
setup(
name="PyChromecast",
version="7.2.0",
license="MIT",
url="https://github.com/balloob/pychromecast",
author="Paulus Schoutsen",
author_email="paulus@paulusschoutsen.nl",
description="Python module to talk to Google Chromecast.",
long_description=long_description,
packages=find_packages(),
zip_safe=False,
include_package_data=True,
platforms="any",
install_requires=list(val.strip() for val in open("requirements.txt")),
classifiers=[
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| mit | Python |
bcf436e1ecfa92b5d1b1aeb42b3ebaafff8c5f32 | Bump version to 2.2.0. | box/rotunicode,box/rotunicode | setup.py | setup.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from sys import version_info
from setuptools import setup, find_packages
from os.path import dirname, join
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Topic :: Software Development :: Testing',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Operating System :: OS Independent',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
]
def main():
base_dir = dirname(__file__)
test_requirements = ['genty>=1.0.0']
test_suite = 'test'
if version_info[0] == 2 and version_info[1] == 6:
test_requirements.append('unittest2')
test_suite = 'unittest2.collector'
setup(
name='rotunicode',
version='2.2.0',
description='Python library for converting between a string of ASCII '
'and non-ASCII chars maintaining readability',
long_description=open(join(base_dir, 'README.rst')).read(),
author='Box',
author_email='oss@box.com',
url='https://github.com/box/rotunicode',
license=open(join(base_dir, 'LICENSE')).read(),
packages=find_packages(exclude=['test']),
install_requires=['six'],
tests_require=test_requirements,
test_suite=test_suite,
zip_safe=False,
entry_points={
'console_scripts': [
'rotunicode = rotunicode.console_scripts:main',
],
},
classifiers=CLASSIFIERS,
)
if __name__ == '__main__':
main()
| # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from sys import version_info
from setuptools import setup, find_packages
from os.path import dirname, join
CLASSIFIERS = [
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Topic :: Software Development :: Testing',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Operating System :: OS Independent',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
]
def main():
base_dir = dirname(__file__)
test_requirements = ['genty>=1.0.0']
test_suite = 'test'
if version_info[0] == 2 and version_info[1] == 6:
test_requirements.append('unittest2')
test_suite = 'unittest2.collector'
setup(
name='rotunicode',
version='2.0.0',
description='Python library for converting between a string of ASCII '
'and non-ASCII chars maintaining readability',
long_description=open(join(base_dir, 'README.rst')).read(),
author='Box',
author_email='oss@box.com',
url='https://github.com/box/rotunicode',
license=open(join(base_dir, 'LICENSE')).read(),
packages=find_packages(exclude=['test']),
install_requires=['six'],
tests_require=test_requirements,
test_suite=test_suite,
zip_safe=False,
entry_points={
'console_scripts': [
'rotunicode = rotunicode.console_scripts:main',
],
},
classifiers=CLASSIFIERS,
)
if __name__ == '__main__':
main()
| apache-2.0 | Python |
f715d565ea9274505ae31aeefd145a7dc921dd8a | Move setup of comparison files to separate method | leaffan/pynhldb | tests/test_summary_downloader.py | tests/test_summary_downloader.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import itertools
import tempfile
from zipfile import ZipFile
from utils.summary_downloader import SummaryDownloader
def test_download_unzipped():
base_tgt_dir, date, files = set_up_comparison_files()
sdl = SummaryDownloader(base_tgt_dir, date, zip_summaries=False)
sdl.run()
tgt_dir = sdl.get_tgt_dir()
assert sorted(os.listdir(tgt_dir)) == sorted(files)
def test_download_zipped():
tgt_dir, date, files = set_up_comparison_files()
sdl = SummaryDownloader(tgt_dir, date)
sdl.run()
zip_path = sdl.get_zip_path()
zip = ZipFile(zip_path)
assert sorted(zip.namelist()) == sorted(files)
def set_up_comparison_files():
tgt_dir = tempfile.mkdtemp(prefix='sdl_test_')
date = "Oct 24, 2016"
prefixes = ["ES", "FC", "GS", "PL", "RO", "SS", "TH", "TV"]
game_ids = ["020081", "020082"]
# setting up list of all HTML report files that should be downloaded for
# specified date
files = ["".join(c) + ".HTM" for c in list(
itertools.product(prefixes, game_ids))]
# adding JSON game report files
files.extend(["".join((gid, ".json")) for gid in game_ids])
# adding shootout report for one of the games
files.append("SO020082.HTM")
return tgt_dir, date, files
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import itertools
import tempfile
from zipfile import ZipFile
from utils.summary_downloader import SummaryDownloader
def test_download_unzipped():
date = "Oct 24, 2016"
tgt_dir = tempfile.mkdtemp(prefix='sdl_test_')
prefixes = ["ES", "FC", "GS", "PL", "RO", "SS", "TH", "TV"]
game_ids = ["020081", "020082"]
# setting up list of all HTML report files that should be downloaded for
# specified date
files = ["".join(c) + ".HTM" for c in list(
itertools.product(prefixes, game_ids))]
# adding JSON game report files
files.extend(["".join((gid, ".json")) for gid in game_ids])
# adding shootout report for one of the games
files.append("SO020082.HTM")
sdl = SummaryDownloader(tgt_dir, date, zip_summaries=False)
sdl.run()
tgt_dir = sdl.get_tgt_dir()
assert sorted(os.listdir(tgt_dir)) == sorted(files)
def test_download_zipped():
date = "Oct 24, 2016"
tgt_dir = tempfile.mkdtemp(prefix='sdl_test_')
prefixes = ["ES", "FC", "GS", "PL", "RO", "SS", "TH", "TV"]
game_ids = ["020081", "020082"]
# setting up list of all HTML report files that should be downloaded for
# specified date
files = ["".join(c) + ".HTM" for c in list(
itertools.product(prefixes, game_ids))]
# adding JSON game report files
files.extend(["".join((gid, ".json")) for gid in game_ids])
# adding shootout report for one of the games
files.append("SO020082.HTM")
sdl = SummaryDownloader(tgt_dir, date)
sdl.run()
zip_path = sdl.get_zip_path()
zip = ZipFile(zip_path)
assert sorted(zip.namelist()) == sorted(files)
| mit | Python |
4527cd762ce4121c85e2b5904161d3d27637234c | Remove ipdb call :( | mailjet/mailjet-apiv3-python | setup.py | setup.py | #!/usr/bin/env python
# coding=utf-8
import os
from setuptools import setup
HERE = os.path.abspath(os.path.dirname(__file__))
PACKAGE_NAME = 'mailjet_rest'
# Dynamically calculate the version based on mailjet_rest.VERSION.
version = __import__('mailjet_rest').get_version()
setup(
name=PACKAGE_NAME,
version=version,
author='starenka',
author_email='starenka0@gmail.com',
maintainer='Mailjet',
maintainer_email='api@mailjet.com',
download_url='https://github.com/mailjet/mailjet-apiv3-python/releases/tag/v' + version,
url='https://github.com/mailjet/mailjet-apiv3-python',
description=('Mailjet V3 API wrapper'),
classifiers=['Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Utilities'],
license='MIT',
keywords='Mailjet API v3 / v3.1 Python Wrapper',
include_package_data=True,
install_requires=['requests>=2.4.3'],
tests_require=['unittest'],
entry_points={},
packages=['mailjet_rest'],
)
| #!/usr/bin/env python
# coding=utf-8
import os
from setuptools import setup
HERE = os.path.abspath(os.path.dirname(__file__))
PACKAGE_NAME = 'mailjet_rest'
import ipdb; ipdb.set_trace()
# Dynamically calculate the version based on mailjet_rest.VERSION.
version = __import__('mailjet_rest').get_version()
setup(
name=PACKAGE_NAME,
version=version,
author='starenka',
author_email='starenka0@gmail.com',
maintainer='Mailjet',
maintainer_email='api@mailjet.com',
download_url='https://github.com/mailjet/mailjet-apiv3-python/releases/tag/v' + version,
url='https://github.com/mailjet/mailjet-apiv3-python',
description=('Mailjet V3 API wrapper'),
classifiers=['Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Utilities'],
license='MIT',
keywords='Mailjet API v3 / v3.1 Python Wrapper',
include_package_data=True,
install_requires=['requests>=2.4.3'],
tests_require=['unittest'],
entry_points={},
packages=['mailjet_rest'],
)
| mit | Python |
49cb87440acb2d29ade7104213a0cfafd8ababcb | Update setup.py | jannon/django-haystackbrowser,vmarkovtsev/django-haystackbrowser,jannon/django-haystackbrowser,vmarkovtsev/django-haystackbrowser,vmarkovtsev/django-haystackbrowser,jannon/django-haystackbrowser | setup.py | setup.py | # -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
from haystackbrowser import version
SHORT_DESC = (u'A reusable Django application for viewing and debugging '
u'all the data that has been pushed into Haystack')
REQUIREMENTS = [
'Django>=1.2.0',
'django-haystack>=1.2.0',
'django-classy-tags>=0.3.4.1',
]
TROVE_CLASSIFIERS = [
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Natural Language :: English',
'Topic :: Internet :: WWW/HTTP :: Site Management',
'Topic :: Database :: Front-Ends',
'License :: OSI Approved :: BSD License',
]
PACKAGES = find_packages()
setup(
name='django-haystackbrowser',
version=version,
description=SHORT_DESC,
author='Keryn Knight',
author_email='python-package@kerynknight.com',
license = "BSD License",
keywords = "django",
long_description=open(os.path.join(os.path.dirname(__file__), 'README.rst')).read(),
url='https://github.com/kezabelle/django-haystackbrowser/tree/master',
packages=PACKAGES,
install_requires=REQUIREMENTS,
classifiers=TROVE_CLASSIFIERS,
platforms=['OS Independent'],
package_data={'': [
'templates/admin/haystackbrowser/*.html',
]},
)
| # -*- coding: utf-8 -*-
import os
from setuptools import setup, find_packages
from haystackbrowser import version
SHORT_DESC = (u'A reusable Django application for viewing and debugging '
u'all the data that has been pushed into Haystack')
REQUIREMENTS = [
'Django>=1.2.0',
'django-haystack>=1.2.0',
'django-classy-tags>=0.3.4.1',
]
TROVE_CLASSIFIERS = [
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Natural Language :: English',
'Topic :: Internet :: WWW/HTTP :: Site Management',
'Topic :: Database :: Front-Ends',
'License :: OSI Approved :: BSD License',
]
PACKAGES = find_packages()
setup(
name='django-haystackbrowser',
version=version,
description=SHORT_DESC,
author='Keryn Knight',
author_email='python-package@kerynknight.com',
license = "BSD License",
keywords = "django",
long_description=open(os.path.join(os.path.dirname(__file__), 'README.rst')).read(),
url='https://github.com/kezabelle/django-haystackbrowser/tree/master',
packages=PACKAGES,
install_requires=REQUIREMENTS,
classifiers=TROVE_CLASSIFIERS,
platforms=['OS Independent'],
)
| bsd-2-clause | Python |
73d69274a21818830b3a0b87ad574321c958c0f7 | Add Framework::Pytest to list of classifiers | pytest-dev/pytest-cpp,pytest-dev/pytest-cpp,pytest-dev/pytest-cpp | setup.py | setup.py | from setuptools import setup
setup(
name="pytest-cpp",
version='0.4',
packages=['pytest_cpp'],
entry_points={
'pytest11': ['cpp = pytest_cpp.plugin'],
},
install_requires=['pytest', 'colorama'],
# metadata for upload to PyPI
author="Bruno Oliveira",
author_email="nicoddemus@gmail.com",
description="Use pytest's runner to discover and execute C++ tests",
long_description=open('README.rst').read(),
license="MIT",
keywords="pytest test unittest",
url="http://github.com/pytest-dev/pytest-cpp",
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Pytest',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: C++',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing',
],
)
| from setuptools import setup
setup(
name="pytest-cpp",
version='0.4',
packages=['pytest_cpp'],
entry_points={
'pytest11': ['cpp = pytest_cpp.plugin'],
},
install_requires=['pytest', 'colorama'],
# metadata for upload to PyPI
author="Bruno Oliveira",
author_email="nicoddemus@gmail.com",
description="Use pytest's runner to discover and execute C++ tests",
long_description=open('README.rst').read(),
license="MIT",
keywords="pytest test unittest",
url="http://github.com/pytest-dev/pytest-cpp",
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: C++',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Testing',
],
)
| mit | Python |
6e57b110750e3e871156a7716e95ffed3adf2cd1 | Use io.open with encoding='utf-8' and flake8 compliance | morepath/more.chameleon | setup.py | setup.py | import io
from setuptools import setup, find_packages
long_description = '\n'.join((
io.open('README.rst', encoding='utf-8').read(),
io.open('CHANGES.txt', encoding='utf-8').read()
))
setup(name='more.chameleon',
version='0.3.dev0',
description="Chameleon template integration for Morepath",
long_description=long_description,
author="Martijn Faassen",
author_email="faassen@startifact.com",
keywords='morepath chameleon',
license="BSD",
url="http://pypi.python.org/pypi/more.chameleon",
namespace_packages=['more'],
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'morepath >= 0.10',
'chameleon >= 2.20'
],
extras_require=dict(
test=['pytest >= 2.6.0',
'pytest-cov',
'WebTest'],
),
)
| import os, io
from setuptools import setup, find_packages
long_description = (
io.open('README.rst', encoding='utf-8').read()
+ '\n' +
io.open('CHANGES.txt', encoding='utf-8').read())
setup(name='more.chameleon',
version='0.3.dev0',
description="Chameleon template integration for Morepath",
long_description=long_description,
author="Martijn Faassen",
author_email="faassen@startifact.com",
keywords='morepath chameleon',
license="BSD",
url="http://pypi.python.org/pypi/more.chameleon",
namespace_packages=['more'],
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=[
'setuptools',
'morepath >= 0.10',
'chameleon >= 2.20'
],
extras_require = dict(
test=['pytest >= 2.6.0',
'pytest-cov',
'WebTest'],
),
)
| bsd-3-clause | Python |
28c6a0129475265ae571bf4fbf1c3fce2112843a | Fix setup | ericfourrier/auto-clean | setup.py | setup.py | from setuptools import setup, find_packages
def readme():
with open('README.md') as f:
return f.read()
setup(name='autoc',
version="0.1",
description='autoc is a package for data cleaning exploration and modelling in pandas',
long_description=readme(),
author=['Eric Fourrier'],
author_email='ericfourrier0@gmail.com',
license='MIT',
url='https://github.com/ericfourrier/auto-cl',
packages=find_packages(),
test_suite='test',
keywords=['cleaning', 'preprocessing', 'pandas'],
install_requires=[
'numpy>=1.7.0',
'pandas>=0.15.0',
'seaborn>=0.5',
'scipy>=0.14']
)
| from setuptools import setup, find_packages
def readme():
with open('README.md') as f:
return f.read()
setup(name='autoc',
version="0.1",
description='autoc is a package for data cleaning exploration and modelling in pandas',
long_description=readme(),
author=['Eric Fourrier'],
author_email='ericfourrier0@gmail.com',
license='MIT',
url='https://github.com/ericfourrier/auto-cl',
packages=find_packages(),
test_suite='test',
keywords=['cleaning', 'preprocessing', 'pandas'],
install_requires=[
'numpy>=1.7.0',
'pandas>=0.15.0',
'seaborn>=0.5'
'scipy>=0.14']
)
| mit | Python |
4b37bfb1f6b39b088ba19eaea38c9fbde49cb214 | Integrate nose with setuptools. | BlueDragonX/detach | setup.py | setup.py | import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
try:
readme = open(os.path.join(here, 'README.md')).read()
except IOError:
readme = ''
setup_requires = [
'nose>=1.3.0',
]
setup(
name='detach',
version='0.1',
description="Fork and detach the current processe.",
long_description=readme,
classifiers=[
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"License :: OSI Approved :: BSD License",
],
keywords='fork daemon detach',
author='Ryan Bourgeois',
author_email='bluedragonx@gmail.com',
url='https://github.com/bluedragonx/detach',
license='BSD-derived',
py_modules=['detach'],
setup_requires=setup_requires,
test_suite = 'nose.collector',
)
| import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
try:
readme = open(os.path.join(here, 'README.md')).read()
except IOError:
readme = ''
setup(
name='detach',
version='0.1',
description="Fork and detach the current processe.",
long_description=readme,
classifiers=[
"Intended Audience :: Developers",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"License :: OSI Approved :: BSD License",
],
keywords='fork daemon detach',
author='Ryan Bourgeois',
author_email='bluedragonx@gmail.com',
url='https://github.com/bluedragonx/detach',
license='BSD-derived',
py_modules=['detach'],
)
| bsd-3-clause | Python |
3089a44a090e43465d67c4983185aaa469b422a4 | use README.rst as long_description in setup.py | Muges/audiotsm | setup.py | setup.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name
"""
AudioTSM
~~~~~~~~
AudioTSM is a python library for real-time audio time-scale modification
procedures, i.e. algorithms that change the speed of an audio signal without
changing its pitch.
:copyright: (c) 2017 by Muges.
:license: MIT, see LICENSE for more details.
"""
import ast
import re
from setuptools import setup, find_packages
try:
from sphinx.setup_command import BuildDoc
except ImportError:
BuildDoc = None
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('audiotsm/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
with open('README.rst', 'r') as f:
long_description = f.read()
setup(
name="audiotsm",
version=version,
description="A real-time audio time-scale modification library",
long_description=long_description,
license="MIT",
url="https://github.com/Muges/audiotsm",
author="Muges",
author_email="git@muges.fr",
packages=find_packages(),
install_requires=[
"numpy",
],
setup_requires=[
"pytest-runner",
],
tests_require=[
"pytest",
"pytest-pylint",
"pytest-flake8",
"pytest-coverage",
],
extras_require={
"doc": ["sphinx", "sphinx_rtd_theme"],
"StreamWriter": ["sounddevice"]
},
cmdclass={
'doc': BuildDoc
},
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Multimedia :: Sound/Audio"
]
)
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# pylint: disable=invalid-name
"""
AudioTSM
~~~~~~~~
AudioTSM is a python library for real-time audio time-scale modification
procedures, i.e. algorithms that change the speed of an audio signal without
changing its pitch.
:copyright: (c) 2017 by Muges.
:license: MIT, see LICENSE for more details.
"""
import ast
import re
from setuptools import setup, find_packages
try:
from sphinx.setup_command import BuildDoc
except ImportError:
BuildDoc = None
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('audiotsm/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
setup(
name="audiotsm",
version=version,
description="A real-time audio time-scale modification library",
long_description=__doc__,
license="MIT",
url="https://github.com/Muges/audiotsm",
author="Muges",
author_email="git@muges.fr",
packages=find_packages(),
install_requires=[
"numpy",
],
setup_requires=[
"pytest-runner",
],
tests_require=[
"pytest",
"pytest-pylint",
"pytest-flake8",
"pytest-coverage",
],
extras_require={
"doc": ["sphinx", "sphinx_rtd_theme"],
"StreamWriter": ["sounddevice"]
},
cmdclass={
'doc': BuildDoc
},
classifiers=[
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python",
"Programming Language :: Python :: 3 :: Only",
"Topic :: Multimedia :: Sound/Audio"
]
)
| mit | Python |
0008a2d4b60ad827209ad40f1059ff02eac130db | Disable caching of CommentOptions temporarily. | MichalMaM/ella,MichalMaM/ella,WhiskeyMedia/ella,petrlosa/ella,petrlosa/ella,ella/ella,whalerock/ella,WhiskeyMedia/ella,whalerock/ella,whalerock/ella | ella/ellacomments/models.py | ella/ellacomments/models.py | from datetime import datetime
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _
from ella.core.cache.utils import CachedGenericForeignKey, get_cached_object
class DefaultCommentOptions(object):
blocked = False
premoderated = False
check_profanities = True
class CommentOptionsManager(models.Manager):
def get_for_object(self, object):
ct, id = ContentType.objects.get_for_model(object), object.pk
try:
#return get_cached_object(CommentOptionsObject, target_ct=ct, target_id=id)
return self.get(target_ct=ct, target_id=id)
except CommentOptionsObject.DoesNotExist:
return DefaultCommentOptions()
class CommentOptionsObject(models.Model):
"""contains comment options string for object"""
objects = CommentOptionsManager()
target_ct = models.ForeignKey(ContentType, verbose_name=_('Target content type'))
target_id = models.PositiveIntegerField(_('Target id'))
target = CachedGenericForeignKey(ct_field="target_ct", fk_field="target_id")
blocked = models.BooleanField(_('Disable comments'), default=False)
premoderated = models.BooleanField(_('Show comments only after approval'), default=False)
check_profanities = models.BooleanField(_('Check profanities in comments'), default=False, editable=False)
def __unicode__(self):
return u"%s: %s" % (_("Comment Options"), self.target)
class Meta:
unique_together = (('target_ct', 'target_id',),)
verbose_name = _('Comment Options')
verbose_name_plural = _('Comment Options')
class BannedIP(models.Model):
"""
"""
created = models.DateTimeField(_('Created'), default=datetime.now, editable=False)
ip_address = models.IPAddressField(_('IP Address'), unique=True)
reason = models.CharField(_('Reason'), max_length=255, blank=True, null=True)
def __unicode__(self):
return self.ip_address
class Meta:
verbose_name = _('Banned IP')
verbose_name_plural = _('Banned IPs')
ordering = ('-created',)
| from datetime import datetime
from django.db import models
from django.contrib.contenttypes.models import ContentType
from django.utils.translation import ugettext_lazy as _
from ella.core.cache.utils import CachedGenericForeignKey, get_cached_object
class DefaultCommentOptions(object):
blocked = False
premoderated = False
check_profanities = True
class CommentOptionsManager(models.Manager):
def get_for_object(self, object):
ct, id = ContentType.objects.get_for_model(object), object.pk
try:
return get_cached_object(CommentOptionsObject, target_ct=ct, target_id=id)
except CommentOptionsObject.DoesNotExist:
return DefaultCommentOptions()
class CommentOptionsObject(models.Model):
"""contains comment options string for object"""
objects = CommentOptionsManager()
target_ct = models.ForeignKey(ContentType, verbose_name=_('Target content type'))
target_id = models.PositiveIntegerField(_('Target id'))
target = CachedGenericForeignKey(ct_field="target_ct", fk_field="target_id")
blocked = models.BooleanField(_('Disable comments'), default=False)
premoderated = models.BooleanField(_('Show comments only after approval'), default=False)
check_profanities = models.BooleanField(_('Check profanities in comments'), default=False, editable=False)
def __unicode__(self):
return u"%s: %s" % (_("Comment Options"), self.target)
class Meta:
unique_together = (('target_ct', 'target_id',),)
verbose_name = _('Comment Options')
verbose_name_plural = _('Comment Options')
class BannedIP(models.Model):
"""
"""
created = models.DateTimeField(_('Created'), default=datetime.now, editable=False)
ip_address = models.IPAddressField(_('IP Address'), unique=True)
reason = models.CharField(_('Reason'), max_length=255, blank=True, null=True)
def __unicode__(self):
return self.ip_address
class Meta:
verbose_name = _('Banned IP')
verbose_name_plural = _('Banned IPs')
ordering = ('-created',)
| bsd-3-clause | Python |
5fcabd15218a774018b40dadd23a1a67f790a32d | Fix monkeypatching for new internals. | ionelmc/virtualenv,ionelmc/virtualenv,ionelmc/virtualenv | tests/unit/builders/test_venv.py | tests/unit/builders/test_venv.py | import subprocess
import pretend
import pytest
import virtualenv.builders.venv
from virtualenv.builders.venv import VenvBuilder, _SCRIPT
from virtualenv import _compat
def test_venv_builder_check_available_success(monkeypatch):
check_output = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(
virtualenv.builders.venv,
"check_output",
check_output,
)
assert VenvBuilder.check_available("wat")
assert check_output.calls == [
pretend.call(["wat", "-c", "import venv"], stderr=subprocess.STDOUT),
]
def test_venv_builder_check_available_fails(monkeypatch):
@pretend.call_recorder
def check_output(*args, **kwargs):
raise subprocess.CalledProcessError(1, "an error!")
monkeypatch.setattr(
virtualenv.builders.venv,
"check_output",
check_output,
)
assert not VenvBuilder.check_available("wat")
assert check_output.calls == [
pretend.call(["wat", "-c", "import venv"], stderr=subprocess.STDOUT),
]
@pytest.mark.parametrize("system_site_packages", [True, False])
def test_venv_builder_create_venv(tmpdir, monkeypatch, system_site_packages):
check_call = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(subprocess, "check_call", check_call)
monkeypatch.setattr(VenvBuilder, "_get_base_python_bin", lambda self: "real-wat")
builder = VenvBuilder(
"wat",
None,
system_site_packages=system_site_packages,
)
builder.create_virtual_environment(str(tmpdir))
script = _SCRIPT.format(
system_site_packages=system_site_packages,
destination=str(tmpdir),
)
assert check_call.calls == [
pretend.call(["real-wat", "-c", script])
]
| import subprocess
import pretend
import pytest
import virtualenv.builders.venv
from virtualenv.builders.venv import VenvBuilder, _SCRIPT
def test_venv_builder_check_available_success(monkeypatch):
check_output = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(
virtualenv.builders.venv,
"check_output",
check_output,
)
assert VenvBuilder.check_available("wat")
assert check_output.calls == [
pretend.call(["wat", "-c", "import venv"], stderr=subprocess.STDOUT),
]
def test_venv_builder_check_available_fails(monkeypatch):
@pretend.call_recorder
def check_output(*args, **kwargs):
raise subprocess.CalledProcessError(1, "an error!")
monkeypatch.setattr(
virtualenv.builders.venv,
"check_output",
check_output,
)
assert not VenvBuilder.check_available("wat")
assert check_output.calls == [
pretend.call(["wat", "-c", "import venv"], stderr=subprocess.STDOUT),
]
@pytest.mark.parametrize("system_site_packages", [True, False])
def test_venv_builder_create_venv(tmpdir, monkeypatch, system_site_packages):
check_call = pretend.call_recorder(lambda *a, **kw: None)
monkeypatch.setattr(subprocess, "check_call", check_call)
builder = VenvBuilder(
"wat",
None,
system_site_packages=system_site_packages,
)
builder.create_virtual_environment(str(tmpdir))
script = _SCRIPT.format(
system_site_packages=system_site_packages,
destination=str(tmpdir),
)
assert check_call.calls == [
pretend.call(["wat", "-c", script])
]
| mit | Python |
1628ad872ca07e225dc7e923b7824cb2b0835b20 | Make it possible to use detect_assertions.py with arbitrary log files. (Still works as a module for af_timed_run as well.) | nth10sd/funfuzz,nth10sd/funfuzz,MozillaSecurity/funfuzz,MozillaSecurity/funfuzz,MozillaSecurity/funfuzz,nth10sd/funfuzz | dom/automation/detect_assertions.py | dom/automation/detect_assertions.py | #!/usr/bin/env python
# Recognizes NS_ASSERTIONs based on condition, text, and filename (ignoring irrelevant parts of the path)
# Recognizes JS_ASSERT based on condition only :(
import os, sys
def fs(currentFile):
global ignoreList
foundSomething = False
# map from (assertion message) to (true, if seen in the current file)
seenInCurrentFile = {}
for line in currentFile:
line = line.strip("\x07").rstrip("\n")
if ((line.startswith("###!!!") or line.startswith("Assertion failure:")) and not (line in seenInCurrentFile)):
seenInCurrentFile[line] = True
if not (ignore(line)):
print line
foundSomething = True
currentFile.close()
return foundSomething
def getIgnores():
global simpleIgnoreList
ignoreFile = open("known_assertions.txt", "r")
for line in ignoreFile:
line = line.strip()
if ((len(line) > 0) and not line.startswith("#")):
mpi = line.find(", file ") # NS_ASSERTION and friends use this format
if (mpi == -1):
mpi = line.find(": file ") # NS_ABORT uses this format
if (mpi == -1):
simpleIgnoreList.append(line)
else:
twoPartIgnoreList.append((line[:mpi+7], line[mpi+7:].replace("/", os.sep)))
def ignore(assertion):
global simpleIgnoreList
for ig in simpleIgnoreList:
if assertion.find(ig) != -1:
return True
for (part1, part2) in twoPartIgnoreList:
if assertion.find(part1) != -1 and assertion.find(part2) != -1:
return True
return False
simpleIgnoreList = []
twoPartIgnoreList = []
getIgnores()
#print "detect_assertions is ready (ignoring %d strings without filenames and %d strings with filenames)" % (len(simpleIgnoreList), len(twoPartIgnoreList))
# For use by af_timed_run
def amiss(logPrefix):
currentFile = file(logPrefix + "-err", "r")
return fs(currentFile)
# For standalone use
if __name__ == "__main__":
currentFile = file(sys.argv[1], "r")
fs(currentFile)
| #!/usr/bin/env python
# Recognizes NS_ASSERTIONs based on condition, text, and filename (ignoring irrelevant parts of the path)
# Recognizes JS_ASSERT based on condition only :(
import os
def amiss(logPrefix):
global ignoreList
foundSomething = False
currentFile = file(logPrefix + "-err", "r")
# map from (assertion message) to (true, if seen in the current file)
seenInCurrentFile = {}
for line in currentFile:
line = line.strip("\x07").rstrip("\n")
if ((line.startswith("###!!!") or line.startswith("Assertion failure:")) and not (line in seenInCurrentFile)):
seenInCurrentFile[line] = True
if not (ignore(line)):
print line
foundSomething = True
currentFile.close()
return foundSomething
def getIgnores():
global simpleIgnoreList
ignoreFile = open("known_assertions.txt", "r")
for line in ignoreFile:
line = line.strip()
if ((len(line) > 0) and not line.startswith("#")):
mpi = line.find(", file ") # NS_ASSERTION and friends use this format
if (mpi == -1):
mpi = line.find(": file ") # NS_ABORT uses this format
if (mpi == -1):
simpleIgnoreList.append(line)
else:
twoPartIgnoreList.append((line[:mpi+7], line[mpi+7:].replace("/", os.sep)))
def ignore(assertion):
global simpleIgnoreList
for ig in simpleIgnoreList:
if assertion.find(ig) != -1:
return True
for (part1, part2) in twoPartIgnoreList:
if assertion.find(part1) != -1 and assertion.find(part2) != -1:
return True
return False
simpleIgnoreList = []
twoPartIgnoreList = []
getIgnores()
#print "detect_assertions is ready (ignoring %d strings without filenames and %d strings with filenames)" % (len(simpleIgnoreList), len(twoPartIgnoreList))
| mpl-2.0 | Python |
d2c9a90c1697fc04a9e4b6b8c7b114a743797620 | Raise an exception if BASE_DOMAIN is not defined | ipsosante/django-subdomains | subdomains/utils.py | subdomains/utils.py | import functools
try:
from urlparse import urlunparse
except ImportError:
from urllib.parse import urlunparse
from django.conf import settings
from django.core.urlresolvers import reverse as simple_reverse
def current_site_domain():
domain = getattr(settings, 'BASE_DOMAIN')
prefix = 'www.'
if getattr(settings, 'REMOVE_WWW_FROM_DOMAIN', False) \
and domain.startswith(prefix):
domain = domain.replace(prefix, '', 1)
return domain
get_domain = current_site_domain
def urljoin(domain, path=None, scheme=None):
"""
Joins a domain, path and scheme part together, returning a full URL.
:param domain: the domain, e.g. ``example.com``
:param path: the path part of the URL, e.g. ``/example/``
:param scheme: the scheme part of the URL, e.g. ``http``, defaulting to the
value of ``settings.DEFAULT_URL_SCHEME``
:returns: a full URL
"""
if scheme is None:
scheme = getattr(settings, 'DEFAULT_URL_SCHEME', 'http')
return urlunparse((scheme, domain, path or '', None, None, None))
def reverse(viewname, subdomain=None, scheme=None, args=None, kwargs=None, current_app=None):
"""
Reverses a URL from the given parameters, in a similar fashion to
:meth:`django.core.urlresolvers.reverse`.
:param viewname: the name of URL
:param subdomain: the subdomain to use for URL reversing
:param scheme: the scheme to use when generating the full URL
:param args: positional arguments used for URL reversing
:param kwargs: named arguments used for URL reversing
:param current_app: hint for the currently executing application
"""
urlconf = settings.SUBDOMAIN_URLCONFS.get(subdomain, settings.ROOT_URLCONF)
domain = get_domain()
if subdomain is not None:
domain = '%s.%s' % (subdomain, domain)
path = simple_reverse(viewname, urlconf=urlconf, args=args, kwargs=kwargs, current_app=current_app)
return urljoin(domain, path, scheme=scheme)
#: :func:`reverse` bound to insecure (non-HTTPS) URLs scheme
insecure_reverse = functools.partial(reverse, scheme='http')
#: :func:`reverse` bound to secure (HTTPS) URLs scheme
secure_reverse = functools.partial(reverse, scheme='https')
#: :func:`reverse` bound to be relative to the current scheme
relative_reverse = functools.partial(reverse, scheme='')
| import functools
try:
from urlparse import urlunparse
except ImportError:
from urllib.parse import urlunparse
from django.conf import settings
from django.core.urlresolvers import reverse as simple_reverse
def current_site_domain():
domain = getattr(settings, 'BASE_DOMAIN', False)
prefix = 'www.'
if getattr(settings, 'REMOVE_WWW_FROM_DOMAIN', False) \
and domain.startswith(prefix):
domain = domain.replace(prefix, '', 1)
return domain
get_domain = current_site_domain
def urljoin(domain, path=None, scheme=None):
"""
Joins a domain, path and scheme part together, returning a full URL.
:param domain: the domain, e.g. ``example.com``
:param path: the path part of the URL, e.g. ``/example/``
:param scheme: the scheme part of the URL, e.g. ``http``, defaulting to the
value of ``settings.DEFAULT_URL_SCHEME``
:returns: a full URL
"""
if scheme is None:
scheme = getattr(settings, 'DEFAULT_URL_SCHEME', 'http')
return urlunparse((scheme, domain, path or '', None, None, None))
def reverse(viewname, subdomain=None, scheme=None, args=None, kwargs=None, current_app=None):
"""
Reverses a URL from the given parameters, in a similar fashion to
:meth:`django.core.urlresolvers.reverse`.
:param viewname: the name of URL
:param subdomain: the subdomain to use for URL reversing
:param scheme: the scheme to use when generating the full URL
:param args: positional arguments used for URL reversing
:param kwargs: named arguments used for URL reversing
:param current_app: hint for the currently executing application
"""
urlconf = settings.SUBDOMAIN_URLCONFS.get(subdomain, settings.ROOT_URLCONF)
domain = get_domain()
if subdomain is not None:
domain = '%s.%s' % (subdomain, domain)
path = simple_reverse(viewname, urlconf=urlconf, args=args, kwargs=kwargs, current_app=current_app)
return urljoin(domain, path, scheme=scheme)
#: :func:`reverse` bound to insecure (non-HTTPS) URLs scheme
insecure_reverse = functools.partial(reverse, scheme='http')
#: :func:`reverse` bound to secure (HTTPS) URLs scheme
secure_reverse = functools.partial(reverse, scheme='https')
#: :func:`reverse` bound to be relative to the current scheme
relative_reverse = functools.partial(reverse, scheme='')
| mit | Python |
0e2059c4b8c975b6cfdc2197d2e35808db967980 | Build a hand-crafted response object by calling instead of a simple redirect | antoinecarme/sklearn2sql_heroku,antoinecarme/sklearn2sql_heroku | WS/Sklearn2SQL_Heroku.py | WS/Sklearn2SQL_Heroku.py |
from __future__ import absolute_import
from flask import Flask, redirect, request #import objects from the Flask model
import os, platform
import requests, json
app = Flask(__name__)
sklearn2sql_uri = os.environ.get("SKLEARN2SQL_URI", "http://c:1888")
def get_post_response(request1):
# request1 = request
print(request1.json)
r = requests.post(sklearn2sql_uri + "/model", json=request1.json)
print("JSON", r.json())
response = app.response_class(
response=json.dumps(r.json()),
status=200,
mimetype='application/json'
)
# json.dumps(data)
return response
@app.route('/', methods=['GET'])
def test():
return redirect(sklearn2sql_uri, code=307)
@app.route('/models', methods=['GET'])
def returnAllModels():
return redirect(sklearn2sql_uri, code=307)
@app.route('/model/<string:name>', methods=['GET'])
def returnOneModel(name):
return redirect(sklearn2sql_uri + "/model/" + name, code=307)
# POST requests
@app.route('/model', methods=['POST'])
def addOneModel():
# return redirect(sklearn2sql_uri + "/model", code=307)
return get_post_response(request)
# PUT requests
if __name__ == '__main__':
print(platform.platform())
print(platform.uname())
print(platform.processor())
print(platform.python_implementation(), platform.python_version());
print(os.environ);
port = int(os.environ.get("PORT", 1888))
app.run(host='0.0.0.0', port=port, debug=True)
|
from __future__ import absolute_import
from flask import Flask, redirect #import objects from the Flask model
import os, platform
app = Flask(__name__)
sklearn2sql_uri = os.environ.get("SKLEARN2SQL_URI", "http://c:1888")
@app.route('/', methods=['GET'])
def test():
return redirect(sklearn2sql_uri, code=307)
@app.route('/models', methods=['GET'])
def returnAllModels():
return redirect(sklearn2sql_uri, code=307)
@app.route('/model/<string:name>', methods=['GET'])
def returnOneModel(name):
return redirect(sklearn2sql_uri + "/model/" + name, code=307)
# POST requests
@app.route('/model', methods=['POST'])
def addOneModel():
return redirect(sklearn2sql_uri + "/model", code=307)
# PUT requests
if __name__ == '__main__':
print(platform.platform())
print(platform.uname())
print(platform.processor())
print(platform.python_implementation(), platform.python_version());
print(os.environ);
port = int(os.environ.get("PORT", 1888))
app.run(host='0.0.0.0', port=port, debug=True)
| bsd-3-clause | Python |
cd660350f82ff38b6d566b6f1c8198695e7e63de | Add more initial data | cshields/satnogs-network,cshields/satnogs-network,cshields/satnogs-network,cshields/satnogs-network | SatNOGS/base/management/commands/initialize.py | SatNOGS/base/management/commands/initialize.py | from orbit import satellite
from django.core.management.base import BaseCommand
from base.tests import ObservationFactory, StationFactory
from base.models import Satellite
class Command(BaseCommand):
help = 'Create initial fixtures'
def handle(self, *args, **options):
ObservationFactory.create_batch(200)
StationFactory.create_batch(200)
satellites = Satellite.objects.all()
for obj in satellites:
try:
sat = satellite(obj.norad_cat_id)
except:
self.stdout.write(('Satellite {} with Identifier {} does '
'not exist [deleted]').format(obj.name, obj.norad_cat_id))
obj.delete()
continue
obj.name = sat.name()
tle = sat.tle()
obj.tle0 = tle[0]
obj.tle1 = tle[1]
obj.tle2 = tle[2]
obj.save()
self.stdout.write(('Satellite {} with Identifier {} '
'found [updated]').format(obj.norad_cat_id, obj.name))
| from orbit import satellite
from django.core.management.base import BaseCommand
from base.tests import ObservationFactory, StationFactory
from base.models import Satellite
class Command(BaseCommand):
help = 'Create initial fixtures'
def handle(self, *args, **options):
ObservationFactory.create_batch(20)
StationFactory.create_batch(20)
satellites = Satellite.objects.all()
for obj in satellites:
try:
sat = satellite(obj.norad_cat_id)
except:
self.stdout.write(('Satellite {} with Identifier {} does '
'not exist [deleted]').format(obj.name, obj.norad_cat_id))
obj.delete()
continue
obj.name = sat.name()
tle = sat.tle()
obj.tle0 = tle[0]
obj.tle1 = tle[1]
obj.tle2 = tle[2]
obj.save()
self.stdout.write(('Satellite {} with Identifier {} '
'found [updated]').format(obj.norad_cat_id, obj.name)) | agpl-3.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.