commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
f874c337e0d0bb8cce8cfe6523c0d06c37b93198 | add basic setup.py definition | setup.py | setup.py | from distutils.core import setup
setup(
name='SaasyDjangoRestFramework',
version='0.1dev',
packages=[],
license='Creative Commons Attribution-Noncommercial-Share Alike license',
description="SaaS plugin for the django rest framework",
)
| Python | 0.000001 | |
63143c94cef353d7bae13f7b13650801bb901c94 | Test for explicit start/end args to str methods for unicode. | tests/unicode/unicode_pos.py | tests/unicode/unicode_pos.py | # str methods with explicit start/end pos
print("Привет".startswith("П"))
print("Привет".startswith("р", 1))
print("абвба".find("а", 1))
print("абвба".find("а", 1, -1))
| Python | 0 | |
e0b17a1778fb8946adff14614098ba6d34014746 | add some more testing | test/test_route.py | test/test_route.py | import unittest
import bottle
from tools import api
from bottle import _re_flatten
class TestReFlatten(unittest.TestCase):
def test_re_flatten(self):
self.assertEqual(_re_flatten(r"(?:aaa)(_bbb)"), '(?:aaa)(?:_bbb)')
self.assertEqual(_re_flatten(r"(aaa)(_bbb)"), '(?:aaa)(?:_bbb)')
self.assertEqual(_re_flatten(r"aaa)(_bbb)"), 'aaa)(?:_bbb)')
self.assertEqual(_re_flatten(r"aaa(_bbb)"), 'aaa(?:_bbb)')
self.assertEqual(_re_flatten(r"aaa_bbb"), 'aaa_bbb')
class TestRoute(unittest.TestCase):
@api('0.12')
def test_callback_inspection(self):
def x(a, b): pass
def d(f):
def w():
return f()
return w
route = bottle.Route(None, None, None, d(x))
self.assertEqual(route.get_undecorated_callback(), x)
self.assertEqual(set(route.get_callback_args()), set(['a', 'b']))
def d2(foo):
def d(f):
def w():
return f()
return w
return d
route = bottle.Route(None, None, None, d2('foo')(x))
self.assertEqual(route.get_undecorated_callback(), x)
self.assertEqual(set(route.get_callback_args()), set(['a', 'b']))
def test_callback_inspection_multiple_args(self):
# decorator with argument, modifying kwargs
def d2(f="1"):
def d(fn):
def w(*args, **kwargs):
# modification of kwargs WITH the decorator argument
# is necessary requirement for the error
kwargs["a"] = f
return fn(*args, **kwargs)
return w
return d
@d2(f='foo')
def x(a, b):
return
route = bottle.Route(None, None, None, x)
# triggers the "TypeError: 'foo' is not a Python function"
self.assertEqual(set(route.get_callback_args()), set(['a', 'b']))
if bottle.py3k:
def test_callback_inspection_newsig(self):
env = {}
eval(compile('def foo(a, *, b=5): pass', '<foo>', 'exec'), env, env)
route = bottle.Route(None, None, None, env['foo'])
self.assertEqual(set(route.get_callback_args()), set(['a', 'b']))
| import unittest
import bottle
from tools import api
class TestRoute(unittest.TestCase):
@api('0.12')
def test_callback_inspection(self):
def x(a, b): pass
def d(f):
def w():
return f()
return w
route = bottle.Route(None, None, None, d(x))
self.assertEqual(route.get_undecorated_callback(), x)
self.assertEqual(set(route.get_callback_args()), set(['a', 'b']))
def d2(foo):
def d(f):
def w():
return f()
return w
return d
route = bottle.Route(None, None, None, d2('foo')(x))
self.assertEqual(route.get_undecorated_callback(), x)
self.assertEqual(set(route.get_callback_args()), set(['a', 'b']))
def test_callback_inspection_multiple_args(self):
# decorator with argument, modifying kwargs
def d2(f="1"):
def d(fn):
def w(*args, **kwargs):
# modification of kwargs WITH the decorator argument
# is necessary requirement for the error
kwargs["a"] = f
return fn(*args, **kwargs)
return w
return d
@d2(f='foo')
def x(a, b):
return
route = bottle.Route(None, None, None, x)
# triggers the "TypeError: 'foo' is not a Python function"
self.assertEqual(set(route.get_callback_args()), set(['a', 'b']))
if bottle.py3k:
def test_callback_inspection_newsig(self):
env = {}
eval(compile('def foo(a, *, b=5): pass', '<foo>', 'exec'), env, env)
route = bottle.Route(None, None, None, env['foo'])
self.assertEqual(set(route.get_callback_args()), set(['a', 'b']))
| Python | 0 |
5a3971a3048adec420796ad5a781f0a84eca7d31 | Remove transifex-client dev dependency | setup.py | setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import io
import re
import sys
from setuptools import setup, find_packages
# requirements
setup_requirements = ['pytest-runner'] if {'pytest', 'test', 'ptr'}.intersection(sys.argv) else []
install_requirements = ['guessit>=2.0.1', 'babelfish>=0.5.2', 'enzyme>=0.4.1', 'beautifulsoup4>=4.2.0',
'requests>=2.0', 'click>=4.0', 'dogpile.cache>=0.5.4', 'stevedore>=1.0.0',
'chardet>=2.3.0', 'pysrt>=1.0.1', 'six>=1.9.0']
test_requirements = ['sympy', 'vcrpy>=1.6.1', 'pytest', 'pytest-pep8', 'pytest-flakes', 'pytest-cov']
if sys.version_info < (3, 3):
test_requirements.append('mock')
dev_requirements = ['tox', 'sphinx', 'wheel']
# package informations
with io.open('subliminal/__init__.py', 'r') as f:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]$', f.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with io.open('README.rst', 'r', encoding='utf-8') as f:
readme = f.read()
with io.open('HISTORY.rst', 'r', encoding='utf-8') as f:
history = f.read()
setup(name='subliminal',
version=version,
license='MIT',
description='Subtitles, faster than your thoughts',
long_description=readme + '\n\n' + history,
keywords='subtitle subtitles video movie episode tv show',
url='https://github.com/Diaoul/subliminal',
author='Antoine Bertin',
author_email='diaoulael@gmail.com',
packages=find_packages(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Multimedia :: Video'
],
entry_points={
'subliminal.providers': [
'addic7ed = subliminal.providers.addic7ed:Addic7edProvider',
'opensubtitles = subliminal.providers.opensubtitles:OpenSubtitlesProvider',
'podnapisi = subliminal.providers.podnapisi:PodnapisiProvider',
'subscenter = subliminal.providers.subscenter:SubsCenterProvider',
'thesubdb = subliminal.providers.thesubdb:TheSubDBProvider',
'tvsubtitles = subliminal.providers.tvsubtitles:TVsubtitlesProvider'
],
'babelfish.language_converters': [
'addic7ed = subliminal.converters.addic7ed:Addic7edConverter',
'thesubdb = subliminal.converters.thesubdb:TheSubDBConverter',
'tvsubtitles = subliminal.converters.tvsubtitles:TVsubtitlesConverter'
],
'console_scripts': [
'subliminal = subliminal.cli:subliminal'
]
},
setup_requires=setup_requirements,
install_requires=install_requirements,
tests_require=test_requirements,
extras_require={
'test': test_requirements,
'dev': dev_requirements
})
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import io
import re
import sys
from setuptools import setup, find_packages
# requirements
setup_requirements = ['pytest-runner'] if {'pytest', 'test', 'ptr'}.intersection(sys.argv) else []
install_requirements = ['guessit>=2.0.1', 'babelfish>=0.5.2', 'enzyme>=0.4.1', 'beautifulsoup4>=4.2.0',
'requests>=2.0', 'click>=4.0', 'dogpile.cache>=0.5.4', 'stevedore>=1.0.0',
'chardet>=2.3.0', 'pysrt>=1.0.1', 'six>=1.9.0']
test_requirements = ['sympy', 'vcrpy>=1.6.1', 'pytest', 'pytest-pep8', 'pytest-flakes', 'pytest-cov']
if sys.version_info < (3, 3):
test_requirements.append('mock')
dev_requirements = ['tox', 'sphinx', 'transifex-client', 'wheel']
# package informations
with io.open('subliminal/__init__.py', 'r') as f:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]$', f.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError('Cannot find version information')
with io.open('README.rst', 'r', encoding='utf-8') as f:
readme = f.read()
with io.open('HISTORY.rst', 'r', encoding='utf-8') as f:
history = f.read()
setup(name='subliminal',
version=version,
license='MIT',
description='Subtitles, faster than your thoughts',
long_description=readme + '\n\n' + history,
keywords='subtitle subtitles video movie episode tv show',
url='https://github.com/Diaoul/subliminal',
author='Antoine Bertin',
author_email='diaoulael@gmail.com',
packages=find_packages(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Multimedia :: Video'
],
entry_points={
'subliminal.providers': [
'addic7ed = subliminal.providers.addic7ed:Addic7edProvider',
'opensubtitles = subliminal.providers.opensubtitles:OpenSubtitlesProvider',
'podnapisi = subliminal.providers.podnapisi:PodnapisiProvider',
'subscenter = subliminal.providers.subscenter:SubsCenterProvider',
'thesubdb = subliminal.providers.thesubdb:TheSubDBProvider',
'tvsubtitles = subliminal.providers.tvsubtitles:TVsubtitlesProvider'
],
'babelfish.language_converters': [
'addic7ed = subliminal.converters.addic7ed:Addic7edConverter',
'thesubdb = subliminal.converters.thesubdb:TheSubDBConverter',
'tvsubtitles = subliminal.converters.tvsubtitles:TVsubtitlesConverter'
],
'console_scripts': [
'subliminal = subliminal.cli:subliminal'
]
},
setup_requires=setup_requirements,
install_requires=install_requirements,
tests_require=test_requirements,
extras_require={
'test': test_requirements,
'dev': dev_requirements
})
| Python | 0 |
601fd8a7b4fea5db2f23741735e6e7f1332b4417 | Fix issue #949 - Add mock as dependency | setup.py | setup.py | #!/usr/bin/env python
from __future__ import unicode_literals
from setuptools import setup, find_packages
install_requires = [
"Jinja2>=2.8",
"boto>=2.36.0",
"boto3>=1.2.1",
"cookies",
"requests>=2.0",
"xmltodict",
"dicttoxml",
"six",
"werkzeug",
"pyaml",
"pytz",
"python-dateutil",
"mock",
]
extras_require = {
'server': ['flask'],
}
setup(
name='moto',
version='1.0.0',
description='A library that allows your python tests to easily'
' mock out the boto library',
author='Steve Pulec',
author_email='spulec@gmail.com',
url='https://github.com/spulec/moto',
entry_points={
'console_scripts': [
'moto_server = moto.server:main',
],
},
packages=find_packages(exclude=("tests", "tests.*")),
install_requires=install_requires,
extras_require=extras_require,
license="Apache",
test_suite="tests",
classifiers=[
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"License :: OSI Approved :: Apache Software License",
"Topic :: Software Development :: Testing",
],
)
| #!/usr/bin/env python
from __future__ import unicode_literals
from setuptools import setup, find_packages
install_requires = [
"Jinja2>=2.8",
"boto>=2.36.0",
"boto3>=1.2.1",
"cookies",
"requests>=2.0",
"xmltodict",
"dicttoxml",
"six",
"werkzeug",
"pyaml",
"pytz",
"python-dateutil",
]
extras_require = {
'server': ['flask'],
}
setup(
name='moto',
version='1.0.0',
description='A library that allows your python tests to easily'
' mock out the boto library',
author='Steve Pulec',
author_email='spulec@gmail.com',
url='https://github.com/spulec/moto',
entry_points={
'console_scripts': [
'moto_server = moto.server:main',
],
},
packages=find_packages(exclude=("tests", "tests.*")),
install_requires=install_requires,
extras_require=extras_require,
license="Apache",
test_suite="tests",
classifiers=[
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"License :: OSI Approved :: Apache Software License",
"Topic :: Software Development :: Testing",
],
)
| Python | 0 |
84c1ee14e1717ec63782dd5a159fe5848fce1cc4 | Add Python 3.6 and 3.7 to PyPI page | setup.py | setup.py | #!/usr/bin/env python
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.version_info < (2, 7):
raise NotImplementedError("Sorry, you need at least Python 2.7 or Python 3.2+ to use bottle.")
import bottle
setup(name='bottle',
version=bottle.__version__,
description='Fast and simple WSGI-framework for small web-applications.',
long_description=bottle.__doc__,
author=bottle.__author__,
author_email='marc@gsites.de',
url='http://bottlepy.org/',
py_modules=['bottle'],
scripts=['bottle.py'],
license='MIT',
platforms='any',
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries',
'Topic :: Internet :: WWW/HTTP :: HTTP Servers',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Server',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
)
| #!/usr/bin/env python
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.version_info < (2, 7):
raise NotImplementedError("Sorry, you need at least Python 2.7 or Python 3.2+ to use bottle.")
import bottle
setup(name='bottle',
version=bottle.__version__,
description='Fast and simple WSGI-framework for small web-applications.',
long_description=bottle.__doc__,
author=bottle.__author__,
author_email='marc@gsites.de',
url='http://bottlepy.org/',
py_modules=['bottle'],
scripts=['bottle.py'],
license='MIT',
platforms='any',
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries',
'Topic :: Internet :: WWW/HTTP :: HTTP Servers',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Application',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware',
'Topic :: Internet :: WWW/HTTP :: WSGI :: Server',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
| Python | 0 |
f539736b563fb6859a6bffb03aed42b57880744f | create module | test_vbn_parser.py | test_vbn_parser.py | import networkx as nx
import matplotlib.pyplot as plt
import vbn_parser as p
# initialize
G = nx.Graph()
link = 'http://vbn.aau.dk/da/organisations/antennas-propagation-and-radio-networking(c2c38bb3-3d28-4b2c-8bc4-949211d2d486)/publications.rss?altordering=publicationOrderByPublicationYearThenCreated&pageSize=500'
# populate the graph
p.parse_vbn(link, G)
# visualize the graph
labels = nx.get_node_attributes(G, 'name')
nx.draw(G, labels=labels)
plt.show()
nx.write_graphml(G,"test1.graphml")
| Python | 0.000001 | |
30fc52d77170844c5b3820d997286df744eb56db | Add setup.py for packaging and PyPI submission. | setup.py | setup.py | from setuptools import setup
name = 'hsalf'
setup(
name=name,
version='0.0.1',
author='Nam T. Nguyen',
author_email='namn@bluemoon.com.vn',
url='https://bitbucket.org/namn/hsalf/overview',
description='Hsalf is a pure Python library to read and write Flash files (SWF).',
long_description='Hsalf is a pure Python library to read and write Flash files (SWF).',
platforms='Any',
package_dir={'':'.'},
packages=['hsalf'],
package_data={'': ['README', 'LICENSE']},
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Multimedia',
'Topic :: Security',
'Topic :: Software Development :: Assemblers',
'Topic :: Software Development :: Disassemblers',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| Python | 0 | |
d433d16a0375669c1664bbe8f20a8db5924fa92e | Add basic benchmark for length | tests/benchmark.py | tests/benchmark.py | from random import choice
from string import ascii_lowercase
import timeit
import grapheme
def random_ascii_string(n):
return "".join(choice(ascii_lowercase) for i in range(n))
long_ascii_string = random_ascii_string(1000)
statements = [
"len(long_ascii_string)",
"grapheme.length(long_ascii_string)",
]
for statement in statements:
n = 100
result = timeit.timeit(statement, setup="from __main__ import long_ascii_string; import grapheme", number=n) / 100
print("{}: {} seconds".format(statement, result))
| Python | 0.000026 | |
dd015a7bf9c69e2f96488c9239be694303b30176 | Create setup.py | setup.py | setup.py | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="deepctr",
version="0.1.3",
author="Weichen Shen",
author_email="wcshen1994@163.com",
description="DeepCTR is a Easy-to-use,Modular and Extendible package of deep-learning based CTR models ,including serval DNN-based CTR models and lots of core components layer of the models which can be used to build your own custom model.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/shenweichen/deepctr",
packages=setuptools.find_packages(),
install_requires=[],
extras_require={
"tf": ["tensorflow>=1.4.0,<1.7.0"],
"tf_gpu": ["tensorflow-gpu>=1.4.0,<1.7.0"],
},
entry_points={
},
classifiers=(
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
),
)
| Python | 0.000001 | |
5a2a2aa33a2e206042b3d28a830d00bdae2f5ad8 | Add setup.py for distribution | setup.py | setup.py | from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup, find_packages
setup(
name = "rw",
version = "0.0.1",
packages = find_packages(),
scripts = ['scripts/rw'],
install_requires = ['docopt'],
author = "Ben Pringle",
author_email = "ben.pringle@gmail.com",
url = "http://github.com/Pringley/rw",
description = "Generate random words (e.g. for passwords)",
license = "MIT",
)
| Python | 0 | |
91affa8b785e0b5261f69448c1c08de429460bb9 | Add setup.py | setup.py | setup.py | from setuptools import setup
setup(
name='django-yadt',
packages=(
'django_yadt',
'django_yadt.management',
'django_yadt.management.commands',
),
)
| Python | 0.000001 | |
090568a6e31fd8de1975d0e2cecb2fcd559acd3e | Add natsort to setup.py | setup.py | setup.py | #!/usr/bin/env python
# -----------------------------------------------------------------------------
# Copyright (c) 2013, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
__version__ = "0.1.0-dev"
from setuptools import setup
from glob import glob
classes = """
Development Status :: 4 - Beta
License :: OSI Approved :: BSD License
Topic :: Scientific/Engineering :: Bio-Informatics
Topic :: Software Development :: Libraries :: Application Frameworks
Topic :: Software Development :: Libraries :: Python Modules
Programming Language :: Python
Programming Language :: Python :: 2.7
Programming Language :: Python :: Implementation :: CPython
Operating System :: OS Independent
Operating System :: POSIX :: Linux
Operating System :: MacOS :: MacOS X
"""
long_description = """Qiita is a databasing and UI effort for QIIME"""
classifiers = [s.strip() for s in classes.split('\n') if s]
setup(name='qiita',
version=__version__,
long_description=long_description,
license="BSD",
description='Qiita',
author="Qiita development team",
author_email="mcdonadt@colorado.edu",
url='http://biocore.github.io/qiita',
test_suite='nose.collector',
packages=['qiita_core',
'qiita_db',
'qiita_pet',
'qiita_ware',
],
package_data={'qiita_core': ['support_files/config_test.txt'],
'qiita_db': ['support_files/*sql',
'support_files/test_data/preprocessed_data/*',
'support_files/test_data/processed_data/*',
'support_files/test_data/raw_data/*',
'support_files/test_data/analysis/*',
'support_files/test_data/reference/*',
'support_files/test_data/job/*.txt',
'support_files/test_data/job/2_test_folder/*',
'support_files/work_data/*']},
scripts=glob('scripts/*'),
extras_require={'test': ["nose >= 0.10.1", "pep8"],
'doc': ["Sphinx >= 1.2.2", "sphinx-bootstrap-theme"]},
install_requires=['psycopg2', 'click == 1.0', 'future >= 0.13.0',
'bcrypt', 'pandas', 'numpy >= 1.7', 'tornado==3.1.1',
'tornado_redis', 'redis', 'ipython[all]', 'pyparsing',
'mock', 'h5py', 'biom-format', 'natsort'
'scikit-bio == 0.2.0'],
classifiers=classifiers
)
| #!/usr/bin/env python
# -----------------------------------------------------------------------------
# Copyright (c) 2013, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
__version__ = "0.1.0-dev"
from setuptools import setup
from glob import glob
classes = """
Development Status :: 4 - Beta
License :: OSI Approved :: BSD License
Topic :: Scientific/Engineering :: Bio-Informatics
Topic :: Software Development :: Libraries :: Application Frameworks
Topic :: Software Development :: Libraries :: Python Modules
Programming Language :: Python
Programming Language :: Python :: 2.7
Programming Language :: Python :: Implementation :: CPython
Operating System :: OS Independent
Operating System :: POSIX :: Linux
Operating System :: MacOS :: MacOS X
"""
long_description = """Qiita is a databasing and UI effort for QIIME"""
classifiers = [s.strip() for s in classes.split('\n') if s]
setup(name='qiita',
version=__version__,
long_description=long_description,
license="BSD",
description='Qiita',
author="Qiita development team",
author_email="mcdonadt@colorado.edu",
url='http://biocore.github.io/qiita',
test_suite='nose.collector',
packages=['qiita_core',
'qiita_db',
'qiita_pet',
'qiita_ware',
],
package_data={'qiita_core': ['support_files/config_test.txt'],
'qiita_db': ['support_files/*sql',
'support_files/test_data/preprocessed_data/*',
'support_files/test_data/processed_data/*',
'support_files/test_data/raw_data/*',
'support_files/test_data/analysis/*',
'support_files/test_data/reference/*',
'support_files/test_data/job/*.txt',
'support_files/test_data/job/2_test_folder/*',
'support_files/work_data/*']},
scripts=glob('scripts/*'),
extras_require={'test': ["nose >= 0.10.1", "pep8"],
'doc': ["Sphinx >= 1.2.2", "sphinx-bootstrap-theme"]},
install_requires=['psycopg2', 'click == 1.0', 'future >= 0.13.0',
'bcrypt', 'pandas', 'numpy >= 1.7', 'tornado==3.1.1',
'tornado_redis', 'redis', 'ipython[all]', 'pyparsing',
'mock', 'h5py', 'biom-format',
'scikit-bio == 0.2.0'],
classifiers=classifiers
)
| Python | 0 |
631afff160077cc629054613d59cb47747f6c20d | Fix setup to exclude tests | setup.py | setup.py | #!/usr/bin/env python
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014, 2015 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from setuptools import setup, find_packages
from pip.req import parse_requirements
from pip.download import PipSession
LONG_DESCRIPTION = open('README.md').read()
REQUIREMENTS = [str(ir.req) for ir in parse_requirements('requirements.txt', session=PipSession())
if not (getattr(ir, 'link', False) or getattr(ir, 'url', False))]
setup(
name='Superdesk-Core',
version='0.0.1-dev',
description='Superdesk Core library',
long_description=LONG_DESCRIPTION,
author='petr jasek',
author_email='petr.jasek@sourcefabric.org',
url='https://github.com/superdesk/superdesk-core',
license='GPLv3',
platforms=['any'],
packages=find_packages(exclude=['tests']),
install_requires=REQUIREMENTS,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
]
)
| #!/usr/bin/env python
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014, 2015 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
from setuptools import setup, find_packages
from pip.req import parse_requirements
from pip.download import PipSession
LONG_DESCRIPTION = open('README.md').read()
REQUIREMENTS = [str(ir.req) for ir in parse_requirements('requirements.txt', session=PipSession())
if not (getattr(ir, 'link', False) or getattr(ir, 'url', False))]
setup(
name='Superdesk-Core',
version='0.0.1-dev',
description='Superdesk Core library',
long_description=LONG_DESCRIPTION,
author='petr jasek',
author_email='petr.jasek@sourcefabric.org',
url='https://github.com/superdesk/superdesk-core',
license='GPLv3',
platforms=['any'],
packages=find_packages(),
install_requires=REQUIREMENTS,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
]
)
| Python | 0.000001 |
5b251911d112abea610477a3f552a78be6b5b1e1 | add utils module | utils.py | utils.py | from flask import Response, request
def add_basic_auth(blueprint, username, password, realm='RQ Dashboard'):
'''Add HTTP Basic Auth to a blueprint.
Note this is only for casual use!
'''
@blueprint.before_request
def basic_http_auth(*args, **kwargs):
auth = request.authorization
if (auth is None or auth.password != password or auth
.username != username):
return Response(
'Please login',
401,
{'WWW-Authenticate': 'Basic realm="{0}"'.format(realm)})
| Python | 0 | |
70ad81a24e218fd2b5fed03224611eae63e0d58f | add main argument processing file | boxes/argsParse.py | boxes/argsParse.py | import argparse
| Python | 0 | |
a633cc0b4ee376ff02af101154e60b8b33dfda08 | add migration for old logs | scripts/migrate_preprint_logs.py | scripts/migrate_preprint_logs.py | import sys
import logging
from datetime import datetime
from modularodm import Q
from modularodm.exceptions import NoResultsFound
from website.app import init_app
from website.models import NodeLog, PreprintService
logger = logging.getLogger(__name__)
def main(dry):
if dry:
logging.warn('DRY mode running')
now = datetime.utcnow()
initiated_logs = NodeLog.find(Q('action', 'eq', NodeLog.PREPRINT_INITIATED) & Q('date', 'lt', now))
for log in initiated_logs:
try:
preprint = PreprintService.find_one(Q('node', 'eq', log.node))
log.params.update({
'preprint': {
'id': preprint._id
},
'service': {
'name': preprint.provider.name
}
})
logging.info('Updating log {} from node {}, with preprint id: {}'.format(log._id, log.node.title, preprint._id))
if not dry:
log.save()
except NoResultsFound:
pass
updated_logs = NodeLog.find(Q('action', 'eq', NodeLog.PREPRINT_FILE_UPDATED) & Q('date', 'lt', now))
for log in updated_logs:
try:
preprint = PreprintService.find_one(Q('node', 'eq', log.node))
log.params.update({
'preprint': {
'id': preprint._id
}
})
logging.info('Updating log {} from node {}, with preprint id: {}'.format(log._id, log.node.title, preprint._id))
if not dry:
log.save()
except NoResultsFound:
pass
if __name__ == '__main__':
init_app(routes=False) # Sets the storage backends on all models
dry = 'dry' in sys.argv
main(dry)
| Python | 0 | |
caf2d7108d7329da562a012775bac0a87d4c62b6 | Create db_create.py | fade/db_create.py | fade/db_create.py | #!flask/bin/python
"""
See LICENSE.txt file for copyright and license details.
"""
from migrate.versioning import api
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
from app import db
import os.path
db.create_all()
if not os.path.exists(SQLALCHEMY_MIGRATE_REPO):
api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
else:
api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO,
api.version(SQLALCHEMY_MIGRATE_REPO))
| Python | 0.000011 | |
f6f75172b1b8a41fc5ae025416ea665258d4ff4c | Add script for updating favicon from gh avatar | favicon-update.py | favicon-update.py | from PIL import Image
import requests
from io import BytesIO
# This whole script was done using Google and StackOverflow
# How to generate ico files
# https://stackoverflow.com/a/36168447/1697953
# How to get GitHub avatar location from username
# https://stackoverflow.com/a/36380674/1697953
# How to read image data from URL
# https://stackoverflow.com/a/23489503/1697953
# How to follow redirects in requests
# https://stackoverflow.com/a/50606372/1697953
avatarUrl = 'https://github.com/sorashi.png'
if __name__ == "__main__":
r = requests.head(avatarUrl, allow_redirects=True)
print('Avatar located at ' + r.url)
response = requests.get(r.url)
img = Image.open(BytesIO(response.content))
img.save('favicon.ico', sizes=[(16, 16), (32, 32), (48, 48), (64, 64)]) | Python | 0 | |
75031595de8726dcd21535b13385c4e6c89aa190 | Add run meter task | datastore/tasks.py | datastore/tasks.py | from __future__ import absolute_import
from celery import shared_task
from datastore.models import Project
@shared_task
def run_meter(project_pk):
project = Project.objects.get(pk=project_pk):
project.run_meter()
| Python | 0.999872 | |
3ed6a0e337c99d12fb4abd96b2230e13388289e7 | Add tests for Process functionality. | jarn/mkrelease/tests/test_process.py | jarn/mkrelease/tests/test_process.py | import unittest
import os
from jarn.mkrelease.process import Process
from jarn.mkrelease.testing import JailSetup
from jarn.mkrelease.testing import quiet
class PopenTests(unittest.TestCase):
@quiet
def test_simple(self):
process = Process()
rc, lines = process.popen('echo "Hello world"')
self.assertEqual(rc, 0)
self.assertEqual(lines, ['Hello world'])
def test_quiet(self):
process = Process(quiet=True)
rc, lines = process.popen('echo "Hello world"')
self.assertEqual(rc, 0)
self.assertEqual(lines, ['Hello world'])
def test_env(self):
env = os.environ.copy()
env['HELLO'] = 'Hello world'
process = Process(quiet=True, env=env)
rc, lines = process.popen('echo ${HELLO}')
self.assertEqual(rc, 0)
self.assertEqual(lines, ['Hello world'])
def test_echo(self):
process = Process()
rc, lines = process.popen('echo "Hello world"', echo=False)
self.assertEqual(rc, 0)
self.assertEqual(lines, ['Hello world'])
def test_echo2(self):
process = Process()
rc, lines = process.popen('$ "Hello world"', echo2=False)
self.assertEqual(rc, 127)
self.assertEqual(lines, [])
@quiet
def test_bad_cmd(self):
process = Process()
rc, lines = process.popen('$ "Hello world"')
self.assertEqual(rc, 127)
self.assertEqual(lines, [])
class PipeTests(unittest.TestCase):
def test_simple(self):
process = Process()
value = process.pipe('echo "Hello world"')
self.assertEqual(value, 'Hello world')
def test_quiet(self):
process = Process(quiet=True)
value = process.pipe('echo "Hello world"')
self.assertEqual(value, 'Hello world')
def test_env(self):
env = os.environ.copy()
env['HELLO'] = 'Hello world'
process = Process(quiet=True, env=env)
value = process.pipe('echo ${HELLO}')
self.assertEqual(value, 'Hello world')
@quiet
def test_bad_cmd(self):
process = Process()
value = process.pipe('$ "Hello world"')
self.assertEqual(value, '')
class SystemTests(JailSetup):
def test_simple(self):
process = Process()
rc = process.system('echo "Hello world" > output')
self.assertEqual(rc, 0)
self.assertEqual(process.pipe('cat output'), 'Hello world')
def test_quiet(self):
process = Process(quiet=True)
rc = process.system('echo "Hello world"')
self.assertEqual(rc, 0)
def test_env(self):
env = os.environ.copy()
env['HELLO'] = 'Hello world'
process = Process(env=env)
rc = process.system('echo ${HELLO} > output')
self.assertEqual(rc, 0)
self.assertEqual(process.pipe('cat output'), 'Hello world')
def test_bad_cmd(self):
process = Process()
rc = process.system('$ "Hello world" 2> output')
self.assertEqual(rc, 127)
class OsSystemTests(JailSetup):
def test_simple(self):
process = Process()
rc = process.os_system('echo "Hello world" > output')
self.assertEqual(rc, 0)
self.assertEqual(process.pipe('cat output'), 'Hello world')
def test_quiet(self):
process = Process(quiet=True)
rc = process.os_system('echo "Hello world"')
self.assertEqual(rc, 0)
def test_env(self):
env = {'HELLO': 'Hello world'}
process = Process(env=env)
rc = process.os_system('echo ${HELLO} > output')
self.assertEqual(rc, 0)
self.assertEqual(process.pipe('cat output'), 'Hello world')
def test_bad_cmd(self):
process = Process()
rc = process.os_system('$ "Hello world" 2> output')
self.assertNotEqual(rc, 0)
def test_suite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
| Python | 0 | |
27c5a09ddbe2ddf14b2f4c84ebb668adbdfd7070 | ADD example.basicserver for test | example/basicserver.py | example/basicserver.py |
from wood import Wood
w = Wood(__name__,debug=True)
IndexHandler = w.empty(uri='/',name='IndexHandler')
@IndexHandler.get
def index_page(self):
self.write('滑稽,这里什么都没有\n(HuajiEnv)')
if __name__ == '__main__':
w.start(port=6000)
| Python | 0 | |
537bb46c6806ef69ab3022641a76f50f97630e11 | Add first migration: Create the database tables. | boson/db/sqlalchemy/alembic/versions/1f22e3c5ff66_initial_revision.py | boson/db/sqlalchemy/alembic/versions/1f22e3c5ff66_initial_revision.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Initial revision
Revision ID: 1f22e3c5ff66
Revises: None
Create Date: 2012-10-26 17:37:18.592202
"""
# revision identifiers, used by Alembic.
revision = '1f22e3c5ff66'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
"""
Create the tables.
"""
op.create_table(
'services',
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('id', sa.String(36), primary_key=True),
sa.Column('name', sa.String(64), nullable=False),
sa.Column('auth_fields', sa.Text),
)
op.create_table(
'categories',
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('id', sa.String(36), primary_key=True),
sa.Column('service_id', sa.String(36), sa.ForeignKey('services.id'),
nullable=False),
sa.Column('name', sa.String(64), nullable=False),
sa.Column('usage_fset', sa.Text),
sa.Column('quota_fsets', sa.Text),
)
op.create_table(
'resources',
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('id', sa.String(36), primary_key=True),
sa.Column('service_id', sa.String(36), sa.ForeignKey('services.id'),
nullable=False),
sa.Column('category_id', sa.String(36), sa.ForeignKey('categories.id'),
nullable=False),
sa.Column('name', sa.String(64), nullable=False),
sa.Column('parameters', sa.Text),
sa.Column('absolute', sa.Boolean, nullable=False),
)
op.create_table(
'usages',
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('id', sa.String(36), primary_key=True),
sa.Column('resource_id', sa.String(36), sa.ForeignKey('resources.id'),
nullable=False),
sa.Column('parameter_data', sa.Text),
sa.Column('auth_data', sa.Text),
sa.Column('used', sa.BigInteger, nullable=False),
sa.Column('reserved', sa.BigInteger, nullable=False),
sa.Column('until_refresh', sa.Integer),
sa.Column('refresh_id', sa.String(36)),
)
op.create_table(
'quotas',
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('id', sa.String(36), primary_key=True),
sa.Column('resource_id', sa.String(36), sa.ForeignKey('resources.id'),
nullable=False),
sa.Column('auth_data', sa.Text),
sa.Column('limit', sa.BigInteger),
)
op.create_table(
'reservations',
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('id', sa.String(36), primary_key=True),
sa.Column('expire', sa.DateTime, nullable=False),
)
op.create_table(
'reserved_items',
sa.Column('created_at', sa.DateTime),
sa.Column('updated_at', sa.DateTime),
sa.Column('id', sa.String(36), primary_key=True),
sa.Column('reservation_id', sa.String(36),
sa.ForeignKey('reservations.id'), nullable=False),
sa.Column('resource_id', sa.String(36), sa.ForeignKey('resources.id'),
nullable=False),
sa.Column('usage_id', sa.String(36), sa.ForeignKey('usages.id'),
nullable=False),
sa.Column('delta', sa.BigInteger, nullable=False),
)
def downgrade():
"""
Drop the tables.
"""
op.drop_table('services')
op.drop_table('categories')
op.drop_table('resources')
op.drop_table('usages')
op.drop_table('quotas')
op.drop_table('reservations')
op.drop_table('reserved_items')
| Python | 0 | |
8049e2f0bb0a12bb301ab4390c3e4da3d90f0369 | Move stagingsettings to new 'cosmos' project tree | cosmos/platform/frontend/src/bdp_fe/conf/stagingsettings.py | cosmos/platform/frontend/src/bdp_fe/conf/stagingsettings.py | """
Module testsettings
These settings allow Django unittests to setup a temporary databse and run the
tests of the installed applications.
"""
DEBUG = True
TEMPLATE_DEBUG = DEBUG
from bdp_fe.conf.base_settings import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': '/tmp/bdp_fe.db'
}
}
MEDIA_ROOT = ''
MEDIA_URL = ''
STATIC_ROOT = ''
STATIC_URL = '/static/'
ADMIN_MEDIA_PREFIX = '/static/admin/'
ADMINS = (
('admin', 'cosmos-prod@tid.es'),
)
MANAGERS = ADMINS
LANDING_ROOT = '/tmp/landing/'
CLUSTER_CONF = {
'host': 'localhost',
'port': 9888,
'mongobase': 'mongodb://pshdp04',
}
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
},
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'simple'
},
'null': {
'level': 'DEBUG',
'class': 'django.utils.log.NullHandler',
}
},
'formatters': {
'verbose': {
'format': '%(levelname)s %(asctime)s %(module)s %(message)s'
},
'simple': {
'format': '%(levelname)s %(message)s'
},
},
'loggers': {
'django.request': {
'handlers': ['console', 'mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'bdp_fe': {
'handlers': ['console', 'mail_admins'],
'level': 'DEBUG',
'propagate': True,
}
}
}
| Python | 0 | |
1b8edd0d6ce3f66c8bacad8bad62de0d40284799 | Add dodge game example | examples/dodge_game.py | examples/dodge_game.py | """
Dodge game
Get the player back and forth across the screen while dodging the enemy
"""
from microbit import *
import music
class Enemy:
"""
Enemy which moves vertically down the screen
"""
def __init__(self):
self.x, self.y = 2, -1
def get_positions(self):
return ((self.x, self.y), (self.x, self.y + 1 if self.y < 4 else 0))
def move(self):
# Rotate back round to the top
self.y = (self.y + 1) % 5
def draw(self):
for x, y in self.get_positions():
display.set_pixel(x, y, 9)
class Player:
"""
Left-right moving player which can be controlled with buttons
"""
RIGHT = 1
LEFT = -1
STOPPED = 0
LEFT_EDGE = 0
RIGHT_EDGE = 4
def __init__(self):
self.alive = True
self.score = 0
self.just_scored = False
self.x, self.y = self.LEFT_EDGE, 2
self.direction = self.STOPPED
def get_position(self):
return (self.x, self.y)
def die(self):
"""
Player dies - show their score and play sad music
"""
self.alive = False
display.show(str(self.score))
music.play(music.WAWAWAWAA)
def move(self):
"""
Move the player one step further in their
current direction
"""
self.just_scored = False
self.x += self.direction
if self.x in (self.LEFT_EDGE, self.RIGHT_EDGE):
# Player reached the edge - another run survived!
if self.direction != self.STOPPED:
self.score += 1
self.just_scored = True
self.direction = self.STOPPED
def draw(self):
"""
Draw the player
"""
display.set_pixel(self.x, self.y, 9)
if self.just_scored:
music.pitch(400, 40)
def act_on_input(self):
# If we're standing still, look for a button press.
if self.direction == self.STOPPED:
if button_b.was_pressed() and self.x == self.LEFT_EDGE:
self.direction = self.RIGHT
elif button_a.was_pressed() and self.x == self.RIGHT_EDGE:
self.direction = self.LEFT
class Game:
def __init__(self):
self.enemy = Enemy()
self.player = Player()
self.frame_rate = 1
def detect_collisions(self):
"""
Have the player and the enemy collided?
"""
return self.player.get_position() in self.enemy.get_positions()
def do_frame(self):
"""
Called once per frame to advance the game state
"""
# Adjust the speed as the player's score gets higher
# (But don't let it exceed the actual frame rate)
self.frame_rate = max(1, min(100, self.player.score))
if self.player.alive:
display.clear()
self.enemy.move()
self.player.act_on_input()
self.player.move()
if self.detect_collisions():
self.player.die()
else:
self.enemy.draw()
self.player.draw()
game = Game()
while True:
timestamp = running_time()
game.do_frame()
# Keep the frame rate consistent
new_timestamp = running_time()
time_taken = (new_timestamp - timestamp)
interval = 1000 // game.frame_rate
if time_taken < interval:
sleep(interval - time_taken)
timestamp = new_timestamp
| Python | 0 | |
70d912bfb1ccec03edfe92b9b2c87610346c8f42 | Add blocking migration for new domain db | corehq/doctypemigrations/migrations/0006_domain_migration_20151118.py | corehq/doctypemigrations/migrations/0006_domain_migration_20151118.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations
from corehq.doctypemigrations.djangomigrations import assert_initial_complete
from corehq.doctypemigrations.migrator_instances import domains_migration
class Migration(migrations.Migration):
dependencies = [
('doctypemigrations', '0005_auto_20151013_0819'),
]
operations = [
migrations.RunPython(assert_initial_complete(domains_migration))
]
| Python | 0 | |
c76028ee46c3ea5d1021dcb87a78dec98d032367 | Add file level transformation tests | data/file_level_transformation_test.py | data/file_level_transformation_test.py | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from data.file_level_transformation import *
class FileLevelTransformationTest(unittest.TestCase):
def test_get_value_dict(self):
column_names = {'col1': int, 'col2': str, 'col3': eval}
mock_df = pd.DataFrame([["1","a","[3, 4]"], ["5","b","[]"]],
columns=list(column_names.keys()))
first_row = mock_df.iloc[0]
second_row = mock_df.iloc[1]
first_row_result = {}
for column in column_names:
first_row_result[column] = column_names[column](first_row[column])
second_row_result = {}
for column in column_names:
second_row_result[column] = column_names[column](second_row[column])
expected_first_result = {'col1': 1, 'col2': 'a', 'col3': [3,4]}
expected_second_result = {'col1': 5, 'col2': 'b', 'col3': []}
self.assertEqual(first_row_result, expected_first_result)
self.assertEqual(second_row_result, expected_second_result)
def test_transform_file_versions(self):
file_data_dict = defaultdict(FileData)
mock_file_versions = {'file1': 1, 'file2': 2, 'file3': 5}
for file_name, version in mock_file_versions.items():
file_data_dict[file_name].data['file versions'] = version
expected_results = {'file1': {'file versions': 1},
'file2': {'file versions': 2},
'file3': {'file versions': 5}}
self.assertEqual(str(dict(file_data_dict)), str(expected_results))
def test_transform_file_changes(self):
file_data_dict = defaultdict(FileData)
mock_file_related_values = {'files changes': [('file1', 20, 10, 30),
('file2', 50, 100, 150)]}
file_changes = mock_file_related_values['files changes']
for file_change in file_changes:
file_name, addition, deletion, changes = file_change
file_data_dict[file_name].data['files changes'] = \
(addition, deletion, changes)
expected_results = {'file1': {'files changes': (20, 10, 30)},
'file2': {'files changes': (50, 100, 150)}}
self.assertEqual(str(dict(file_data_dict)), str(expected_results))
def test_transform_review_comments(self):
file_data_dict = defaultdict(FileData)
mock_file_related_values = {'review comments msg':
[('file1', 'This file looks good to me'),
('file2', 'I wont approve this change'),
('file2', 'Please change this back')]}
review_comments_msg = mock_file_related_values['review comments msg']
for review_msg in review_comments_msg:
file_name, msg = review_msg
if 'review comments msg' not in file_data_dict[file_name].data or \
not file_data_dict[file_name].data['review comments msg']:
file_data_dict[file_name].data['review comments msg'] = []
file_data_dict[file_name].data['review comments msg'].append(msg)
expected_results = {'file1': {'review comments msg':
['This file looks good to me']},
'file2': {'review comments msg':
['I wont approve this change',
'Please change this back']}}
self.assertEqual(str(dict(file_data_dict)), str(expected_results))
def test_transform_pr_related_signals(self):
file_data_dict = defaultdict(FileData)
mock_pr_related_values = {'num review comments': 10,
'num issue comments': 5,
'approved reviewers': ['kj10bc', '19uvba']}
mock_file_names = ['file1', 'file2']
mock_repo_name = 'google/jax'
for file_name in mock_file_names:
if not file_data_dict[file_name].file_name:
file_data_dict[file_name].file_name = file_name
if not file_data_dict[file_name].repo_name:
file_data_dict[file_name].repo_name = mock_repo_name
for column in mock_pr_related_values:
value = mock_pr_related_values[column]
file_data_dict[file_name].data[column] = value
expected_results = {'file1':
{'num review comments': 10,
'num issue comments': 5,
'approved reviewers': ['kj10bc', '19uvba']},
'file2':
{'num review comments': 10,
'num issue comments': 5,
'approved reviewers': ['kj10bc', '19uvba']}}
self.assertEqual(str(dict(file_data_dict)), str(expected_results))
def test_count_check_run_status(self):
mock_check_run_results = ['passed', 'failed', 'passed', 'passed']
num_passed = 0
num_failed = 0
for check_run_result in mock_check_run_results:
if check_run_result == 'passed':
num_passed += 1
if check_run_result == 'failed':
num_failed += 1
self.assertEqual(num_passed, 3)
self.assertEqual(num_failed, 1)
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
0378f8cde69a18d954341b861a724592ef7a5949 | Extend RANSAC example with comparison to BaggingRegressor | examples/linear_model/plot_ransac.py | examples/linear_model/plot_ransac.py | """
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, ensemble
# Set random seed for both equal data noise and equal random sample selection
np.random.seed(seed=1)
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add faulty data
faulty = np.array(30 * [(180, -100)], dtype=np.double)
faulty += 5 * np.random.normal(size=faulty.shape)
data[:faulty.shape[0]] = faulty
# Add gaussian noise to coordinates
noise = np.random.normal(size=data.shape)
data += 0.5 * noise
data[::2] += 5 * noise[::2]
data[::4] += 20 * noise[::4]
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSAC(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Robustly fit linear model with bagged linear regressor
model_bagged = ensemble.BaggingRegressor(linear_model.LinearRegression())
model_bagged.fit(X, y)
# Generate coordinates of estimated models
line_X = np.arange(-250, 250)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
line_y_bagged = model_bagged.predict(line_X[:, np.newaxis])
plt.plot(data[inlier_mask, 0], data[inlier_mask, 1], '.g',
label='RANSAC inliers')
plt.plot(data[outlier_mask, 0], data[outlier_mask, 1], '.r',
label='RANSAC outliers')
plt.plot(line_X, line_y, '-k', label='Linear regressor')
plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor')
plt.plot(line_X, line_y_bagged, '-y', label='Bagging regressor')
plt.legend(loc='lower left')
plt.show()
| """
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model
# Set random seed for both equal data noise and equal random sample selection
np.random.seed(seed=1)
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add faulty data
faulty = np.array(30 * [(180, -100)], dtype=np.double)
faulty += 5 * np.random.normal(size=faulty.shape)
data[:faulty.shape[0]] = faulty
# Add gaussian noise to coordinates
noise = np.random.normal(size=data.shape)
data += 0.5 * noise
data[::2] += 5 * noise[::2]
data[::4] += 20 * noise[::4]
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_robust = linear_model.RANSAC(linear_model.LinearRegression())
model_robust.fit(X, y)
inlier_mask = model_robust.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Generate coordinates of estimated models
line_X = np.arange(-250, 250)
line_y = model.predict(line_X[:, np.newaxis])
line_y_robust = model_robust.predict(line_X[:, np.newaxis])
plt.plot(data[inlier_mask, 0], data[inlier_mask, 1], '.g',
label='Inlier data')
plt.plot(data[outlier_mask, 0], data[outlier_mask, 1], '.r',
label='Outlier data')
plt.plot(line_X, line_y, '-k', label='Linear model from all data')
plt.plot(line_X, line_y_robust, '-b', label='Robustly fitted linear model')
plt.legend(loc='lower left')
plt.show()
| Python | 0 |
d0e5ea752912b10e473b2a05da9196800eb6ca86 | Add an example for the RedisLock | examples/redis_lock.py | examples/redis_lock.py | import random
from diesel import fork, quickstop, quickstart, sleep
from diesel.protocols.redis import RedisClient, RedisTransactionError, RedisLock, LockNotAcquired
"""Implement the Redis INCR command using a lock. Obviously this is inefficient, but it's a good
example of how to use the RedisLock class"""
key = 'test-lock-key'
incr_key = 'test-incr-key'
counter = 0
"""If sleep_factor > lock_timeout you are exercising the timeout loop, otherwise, that loop should be a noop"""
lock_timeout = 3
sleep_factor = 1
def take_lock():
global counter
client = RedisClient('localhost', 6379)
try:
with RedisLock(client, key, timeout=lock_timeout) as lock:
v = client.get(incr_key)
sleep(random.random() * sleep_factor)
client.set(incr_key, int(v) + 1)
counter += 1
except LockNotAcquired:
pass
def main():
client = RedisClient('localhost', 6379)
client.delete(key)
client.set(incr_key, 0)
for _ in xrange(500):
fork(take_lock)
if random.random() > 0.1:
sleep(random.random() / 10)
sleep(2)
assert counter == int(client.get(incr_key)), 'Incr failed!'
quickstop()
quickstart(main)
| Python | 0.000003 | |
1d77849b048c424ebc042a61c047c2c74e27277f | minus 1 | leetcode_python/zigzag_conversion.py | leetcode_python/zigzag_conversion.py | class Solution:
# @return a string
def convert(self, s, nRows):
if nRows == 1:
return s
result = [[] for i in range(nRows)]
for i, c in enumerate(s):
if (i / (nRows - 1)) % 2 == 0:
result[i % (nRows - 1)].append(c)
else:
result[nRows - 1 - i % (nRows - 1)].append(c)
result = [''.join(row) for row in result]
result = ''.join(result)
return result
| Python | 0.999994 | |
d0b8c68ae3c8acbc3d5dfe13842e3c41a198b978 | Add script to fix all notions | fix_notions_db.py | fix_notions_db.py | from alignements_backend.db import DB
from alignements_backend.notion import Notion
for notion in DB.scan_iter(match='notion:*'):
n = Notion(list(DB.sscan_iter(notion)))
| Python | 0.000001 | |
ad6e0bad22b0c5b0e6f97ceb13694ab804041443 | Add model resources. | tracker/api.py | tracker/api.py | from tastypie.resources import ModelResource
from tracker.models import Task, WorkSession
from django.contrib.auth.models import User
from tastypie import fields
class UserResource(ModelResource):
class Meta:
queryset = User.objects.all()
resource_name = 'user'
class TaskResource(ModelResource):
user = fields.ForeignKey(UserResource, 'user')
class Meta:
queryset = Task.objects.all()
resource_name = 'task'
class WorkSessionResource(ModelResource):
user = fields.ForeignKey(UserResource, 'user')
task = fields.ForeignKey(TaskResource, 'task')
class Meta:
queryset = WorkSession.objects.all()
resource_name = 'work_session'
| Python | 0 | |
8f05695a153eef415f752c42c2f737de3f120b55 | Create dgen_red_a.py | bin/dgen_red_a.py | bin/dgen_red_a.py | #!/usr/bin/env python
"""
Created on March 4, 2016
@author: Gus Segura
"""
# imports
import time
import random
import base64
import os
import sys
import math
import json
import redis
from collections import OrderedDict
# redis server connection
# Connection to Database: this is how we work with Redis
database = redis.StrictRedis(host='localhost', port=6379, db=0)
# Output file for testing when not pushing to Kafka, Redis, SparkStreaming.
# pwd = os.path.dirname(__file__)
# print pwd
# outputpath = os.path.normpath(pwd + '/../sample_data/' + sys.argv[1])
# outputpath = os.path.normpath(sys.argv[1])
# outputpath = "file.log"
# print outputpath
# startng values for simulated sensors
start = time.time()
start_value = 0
baseTemp = 32.0
basePresure = 1000
baseLevel = 10
jmsg = {}
# create new redis cache object : TODO - Migrate Object Creation
def Sensmsg(object):
""" Holds the model for Sensor Message
Performs sensor message data storage management using the next data structure:
next-sensor-msg-id: <uid> - holds the next sensor message id to generate
"sensmsg:<uid>": {
timestamp:<timestamp>
timezone:<timezone>
millisec:<millis>
sensname:<sensor_name>
senstype:<sensor_type>
metric:<value>
}
"""
pass
# create dictionary
def create_jmsg(timestamp, timezone, millis, sensor, senstype, metric):
msg = OrderedDict() # ordered dictionary
if(timestamp != ""):
msg["timestamp"] = timestamp
if(timezone != ""):
msg["timezone"] = timezone
if(millis != ""):
msg["millis"] = millis
if(sensor != ""):
msg["sensor"] = sensor
if(senstype != ""):
msg["senstype"] = senstype
if(metric != ""):
msg["metric"] = metric
print(json.dumps(msg))
return msg
# main infinite loop
while (True):
t = time.strftime('%Y-%m-%dT%H:%M:%S')
timezone = time.strftime('%z')
millis = "%.3d" % (time.time() % 1 * 1000)
sin_value = math.sin(start_value)
start_value += 1
#open file for append
# outputfile = open(outputpath, 'a+')
# sensor name
#create random values - well match sensor id to type for now.
sensor = random.sample(['sen/1', 'sen/2', 'sen/3', 'send/4'], 1)[0]
# metric type
metric = random.sample(['currentTemp', 'currentPresure', 'currentLevel'], 1)[0]
# case -- yuk: python uses if else.
# -------------------------------- #
if metric == 'currentTemp':
baseTemp = baseTemp + sin_value
if baseTemp <= 0:
baseTemp = 32.0 # reset if sin function takes you negative
# create message dictionary
jmsg = create_jmsg(t,timezone,millis, "sen/1", metric, baseTemp)
if metric == 'currentPresure':
basePresure = basePresure + sin_value*10
if basePresure <= 0:
basePresure = 1000 # reset if sin function takes you negative
# create message dictionary
jmsg = create_jmsg(t,timezone,millis, "sen/2", metric, basePresure)
if metric == 'currentLevel':
baseLevel = baseLevel + sin_value*.10
if baseLevel <= 0:
baseLevel = 10
# create message dictionary
jmsg = create_jmsg(t,timezone,millis, "sen/3", metric, baseLevel)
# TODO: Push to Redis
msg_id = database.incr("next-senmsg-id")
print (msg_id)
# "HMSET" allows to set many keys for hash map
database.hmset("sensmsg:{0}".format(msg_id),
{
"timestamp":jmsg.get("timestamp"),
"timezone":jmsg.get("timezone"),
"millisec":jmsg.get("millis"),
"sensname":jmsg.get("sensor"),
"senstype":jmsg.get("senstype"),
"metric":jmsg.get("metric")
}
)
# sleep to slow down generation
time.sleep( .7750 / 1000.0 )
# reset values for next cycle
jmsg = {}
| Python | 0.000011 | |
c97680113fb25ed43e96c26d02bfd57e15e427b8 | Add missing migrations | nodeconductor/billing/migrations/0004_invoice_usage_pdf.py | nodeconductor/billing/migrations/0004_invoice_usage_pdf.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('billing', '0003_invoice_status'),
]
operations = [
migrations.AddField(
model_name='invoice',
name='usage_pdf',
field=models.FileField(null=True, upload_to=b'invoices_usage', blank=True),
preserve_default=True,
),
]
| Python | 0 | |
e456e4799f5cee13ce1b5f93a9cc91b28059db16 | Remove tests that use real data from the individual inferred test | emission/individual_tests/TestMetricsInferredSections.py | emission/individual_tests/TestMetricsInferredSections.py | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import *
import unittest
import logging
import arrow
import os
import emission.core.get_database as edb
import emission.core.wrapper.localdate as ecwl
import emission.tests.common as etc
import emission.analysis.intake.cleaning.filter_accuracy as eaicf
import emission.storage.timeseries.format_hacks.move_filter_field as estfm
import emission.storage.decorations.local_date_queries as esdldq
from emission.net.api import metrics
class TestMetricsInferredSections(unittest.TestCase):
def setUp(self):
self.seed_mode_path = etc.copy_dummy_seed_for_inference()
etc.setupRealExample(self,
"emission/tests/data/real_examples/shankari_2015-aug-21")
self.testUUID1 = self.testUUID
etc.setupRealExample(self,
"emission/tests/data/real_examples/shankari_2015-aug-27")
etc.runIntakePipeline(self.testUUID1)
etc.runIntakePipeline(self.testUUID)
logging.info(
"After loading, timeseries db size = %s" % edb.get_timeseries_db().count())
self.aug_start_ts = 1438387200
self.aug_end_ts = 1441065600
self.day_start_dt = esdldq.get_local_date(self.aug_start_ts, "America/Los_Angeles")
self.day_end_dt = esdldq.get_local_date(self.aug_end_ts, "America/Los_Angeles")
def tearDown(self):
self.clearRelatedDb()
os.remove(self.seed_mode_path)
def clearRelatedDb(self):
edb.get_timeseries_db().delete_many({"user_id": self.testUUID})
edb.get_analysis_timeseries_db().delete_many({"user_id": self.testUUID})
edb.get_pipeline_state_db().delete_many({"user_id": self.testUUID})
edb.get_timeseries_db().delete_many({"user_id": self.testUUID1})
edb.get_analysis_timeseries_db().delete_many({"user_id": self.testUUID1})
edb.get_pipeline_state_db().delete_many({"user_id": self.testUUID1})
def testCountNoEntries(self):
# Ensure that we don't crash if we don't find any entries
# Should return empty array instead
# Unlike in https://amplab.cs.berkeley.edu/jenkins/job/e-mission-server-prb/591/
met_result_ld = metrics.summarize_by_local_date(self.testUUID,
ecwl.LocalDate({'year': 2000}),
ecwl.LocalDate({'year': 2001}),
'MONTHLY', ['count'], True)
self.assertEqual(list(met_result_ld.keys()), ['aggregate_metrics', 'user_metrics'])
self.assertEqual(met_result_ld['aggregate_metrics'][0], [])
self.assertEqual(met_result_ld['user_metrics'][0], [])
met_result_ts = metrics.summarize_by_timestamp(self.testUUID,
arrow.get(2000,1,1).timestamp,
arrow.get(2001,1,1).timestamp,
'm', ['count'], True)
self.assertEqual(list(met_result_ts.keys()), ['aggregate_metrics', 'user_metrics'])
self.assertEqual(met_result_ts['aggregate_metrics'][0], [])
self.assertEqual(met_result_ts['user_metrics'][0], [])
if __name__ == '__main__':
import emission.tests.common as etc
etc.configLogging()
unittest.main()
| Python | 0.000005 | |
3d19606b83f6a4a7906f88b15c6e215620394560 | Implemented the Ford-Fulkerson algorithm | ford_fulkerson.py | ford_fulkerson.py | #!/usr/bin/env python
#coding: UTF-8
#
# Implementation of the Ford-Fulkerson algorithm to solve the maximum flow problem.
#
# Copyright (c) 2013 Samuel Groß
#
from graph import *
from basics import depth_first_search
def solve_max_flow_ff(graph, s, t):
"""
Solves the maximum flow prolem using the ford-fulkerson algorithm for the given
graph and source/target node.
"""
while True:
path = depth_first_search(graph, s, t)
if path is None:
break
# find maximum capacity on the current path
min_capacity = None
for edge in path.edges():
if min_capacity is None or edge.capacity < min_capacity:
min_capacity = edge.capacity
# subtract min_capacity from all edges and add return edge
for edge in path.edges():
edge.capacity -= min_capacity
if not graph.has_reverse_edge(edge):
graph.add_edge(edge.destination(), edge.source(), {"capacity" : min_capacity, "tmp" : True})
else:
graph.get_reverse_edge(edge).capacity += min_capacity
if edge.capacity == 0:
graph.remove_edge(edge)
# reverse edges and cleanup
for edge in graph.edges():
if hasattr(edge, "tmp"):
if graph.has_reverse_edge(edge):
graph.get_reverse_edge(edge).load = edge.capacity
else:
graph.add_edge(edge.destination(), edge.source(), {"load" : edge.capacity})
graph.remove_edge(edge)
| Python | 0.998064 | |
321463a5d7f102431ed286d57d1a8fa8c576cca7 | add plotting fns | terrapin/plot.py | terrapin/plot.py | import matplotlib.pyplot as plt
def flow_grid(dem, angles):
pass | Python | 0.000001 | |
72013d0dba1bd75cab86f00a5a23496f87b66dca | First work on the rhythm version | 0-03_rhythm.py | 0-03_rhythm.py | from music import *
from random import *
from itertools import *
# --- INPUT ---
# Note there's currently very little flexibility in the input file structure
# open the file and read the lines as one chord per line
fname = "Autumn.txt"
with open(fname) as f:
all_chords = f.readlines()
# Remove the 'newline' characters from the elements
all_chords = [x.strip('\n') for x in all_chords]
# set up a phrase object
improv = Phrase()
# --- BREAK INTO FOUR-BAR PHRASES ---
# In this version, we will deal with the improvisation four bars at a time
# The following two lines create a loop over 8 chords at a time
for chord1, chord2, chord3, chord4, chord5, chord6, chord7, chord8 in izip(*[iter(all_chords)]*8):
chords = [chord1, chord2, chord4, chord4, chord5, chord6, chord7, chord8]
# --- ANALYSIS ---
# Set up empty lists that will hold the chord note sequences
roots = []
thirds = []
fifths = []
sevenths = []
ninths = []
preferred_scales = []
# Identify the chord notes of each chord and write them into the lists
for chord in chords:
quality = '' # reset the chord quality to be empty
flat5 = False # reset the flat 5th flag
chars = len(chord) # the chord symbol might be any length from 1 to 5+
root = chord[0] # the first symbol must be the root
if chars == 1:
quality = 'Major' # a single-letter chord is a Major
else: # second digit could be sharp/flat or a modifier
if chord[1] == 'b': # flat
root+= 'F'
elif chord[1] == 's': # sharp
root+= 'S'
elif chord[1] == 'M': # major 7th
quality = 'Major'
elif chord[1] == 'm' or chord[1] == '-': # minor 7th
quality = 'minor'
elif chord[1] == '7': # dominant 7th
quality = 'dominant'
root+= '3' # this is a string version of the root
rt_val = eval(root) # evaluate the numerical value of the root note (python music function)
roots.append(rt_val) # the roots are known
# third digit would either be the main modifier or a subsequent modifier
if chars == 2 and quality == '': # would be a sharp or flat with no further symbols, i.e. Major
quality = 'Major'
if chars > 2 and quality == '':
if chord[2] == 'M':
quality = 'Major'
elif chord[2] == 'm' or chord[2] == '-':
quality = 'minor'
elif chord[2] == '7':
quality = 'dominant'
# check if there's a flat 5th on a minor chord by searching for 'b5'
if quality == 'minor' and chars > 3 and chord[2] == 'b' and chord[3] == '5' \
or quality == 'minor' and chars > 4 and chord[3] == 'b' and chord[4] == '5' \
or quality == 'minor' and chars > 5 and chord[4] == 'b' and chord[5] == '5':
flat5 = True
# Now we know the root and the chord quality. We can build up the chords
if quality == 'Major':
third = rt_val+4
fifth = rt_val+7
seventh = rt_val+11
ninth = rt_val+14
preferred_scale = 'Major'
elif quality == 'minor':
third = rt_val+3
fifth = rt_val+7
seventh = rt_val+10
ninth = rt_val+14
preferred_scale = 'Dorian'
if flat5 == True: # don't forget to flatten the 5th if required
fifth = rt_val+6
ninth = rt_val+13 # flatten the 9th of a minor flat 5
preferred_scale = 'Min-b5'
else: # at this stage, anything unspecified will be treated as a dom 7th
third = rt_val+4
fifth = rt_val+7
seventh = rt_val+10
ninth = rt_val+14
preferred_scale = 'Mixolydian'
# put these chord notes into the chord note lists
thirds.append(third)
fifths.append(fifth)
sevenths.append(seventh)
ninths.append(ninth)
preferred_scales.append(preferred_scale)
# End of loop: now it goes on to the next chord in the chord list
# --- ANALYSIS IS COMPLETE ---
# We now have lists containing the roots, 3rds, 5ths, 7ths and 9ths in sequence
# --- MELODY GENERATION WITH RHYTHM ---
# The steps are as follows:
# 1. pick target chord notes to end each 2-bar section on
# 2. pick phrase shapes for each 2-bar section
# 3. pick a phrase melodic range; this may be determined partly by the end notes
# 4. construct the phrase based on the parameters above, using scale notes
# Pick two target notes
# Randomly choose either the 3rd or 7th in the lower, middle or higher octave
degree1 = choice(['third', 'seventh'])
octave1 = choice([3, 4, 5])
degree2 = choice(['third', 'seventh'])
octave2 = choice([3, 4, 5])
# Translate those selections into actual notes, using the 'third' and 'seventh' lists
if degree1 == 'third':
target_note1 = thirds[2]
else: target_note1 = sevenths[2]
# The lists are in octave 3. Add 12 or 24 for octave 4 or 5
target_note1 = target_note1+((octave1-3)*12)
# Now do the same for the second target note
if degree2 == 'third':
target_note2 = thirds[6]
else: target_note2 = sevenths[6]
target_note2 = target_note2+((octave2-3)*12)
# Pick two phrase shapes. These are simply numbered 0 to 5 so it's an integer selection
shape1 = randrange(6)
shape2 = randrange(6)
# It can be interesting to have the same shape over different chords.
# Toss a coin to see if we'll force shape2 to match shape1
if choice([True, False]):
shape2 = shape1
# Pick a target melodic range in general terms (small, medium, large)
# This will apply to the whole four bars
target_range = choice(['small', 'medium', 'large'])
# OUTPUT
# Play.midi(smallSteps) | Python | 0.997938 | |
ed23fb301503d331af243a37d1b0a934d5d2f21c | add laser plugin object | mythril/laser/ethereum/plugins/plugin.py | mythril/laser/ethereum/plugins/plugin.py | from mythril.laser.ethereum.svm import LaserEVM
class LaserPlugin:
""" Base class for laser plugins
Functionality in laser that the symbolic execution process does not need to depend on
can be implemented in the form of a laser plugin.
Laser plugins implement the function initialize(symbolic_vm) which is called with the laser virtual machine
when they are loaded.
Regularly a plugin will introduce several hooks into laser in this function
Plugins can direct actions by raising Signals defined in mythril.laser.ethereum.plugins.signals
For example, a pruning plugin might raise the PluginSkipWorldState signal.
"""
def initialize(self, symbolic_vm: LaserEVM):
""" Initializes this plugin on the symbolic virtual machine
:param symbolic_vm: symbolic virtual machine to initialize the laser plugin on
"""
raise NotImplementedError
| Python | 0 | |
550469032843eb2af3b4a9faaed34d9754f00700 | Add command to test managers emails | geotrek/common/management/commands/test_managers_emails.py | geotrek/common/management/commands/test_managers_emails.py | from django.core.mail import mail_managers
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Test if email settings are OK by sending mail to site managers"
def execute(self, *args, **options):
subject = u'Test email for managers'
message = u'If you receive this email, it seems that conf is OK !'
mail_managers(subject, message, fail_silently=False)
| Python | 0.000002 | |
edb9500824faffd9f1d0d1b59ca29966e3b18282 | Customize behave formatter to output json | modules/formatter_record.py | modules/formatter_record.py | from behave.formatter.json import PrettyJSONFormatter
from pprint import pprint
class RecordFormatter(PrettyJSONFormatter):
name = "super"
description = "Formatter for adding REST calls to JSON output."
jsteps = {} # Contains an array of features, that contains array of steps in each feature
# Overriding Background Function. This runs evertime a Background is ran.
# This step
def background(self, background):
# Let the parent run first
super(RecordFormatter, self).background(background)
# Check if the current feature has a name - Could be removed
if (self.isnotBackground()):
# Remove all the background steps from our jsteps, as they are not required
for step in background.steps:
self.jsteps[self.current_feature_element.name].pop(0)
# Overriding Step feature. This is called everytime a step is found in feature file. This happens before the feature/scenario are executed.
def step(self, step):
# Let the parent run first
super(RecordFormatter, self).step(step)
# Check if the current feature has a name - Could be removed
if (self.isnotBackground()):
# Append the step into our own collection of jsteps.
self.jsteps[self.current_feature_element['name']].append(step);
# Overriding End of Feature. This is ran once the entire feature has completed running
def eof(self):
# Iterate through each scenarios
for scenario in self.current_feature_data['elements']:
# Check if Scenario valid
if (scenario['name'] != ''):
steps = scenario['steps']
jscenariosteps = self.jsteps[scenario['name']]
status = "passed" # Add Scenario status
# Iterate through the jstep, and step results
for (j, jstep) in enumerate(jscenariosteps):
# Check if any of the above status failed, if so, mark the status as failed
if ('result' in steps[j]):
if steps[j]['result']['status'] == 'failed':
status = 'failed'
# Add configurations in scenario level. generally used for sdk_language and sdk_version
if (hasattr(jstep, "details")):
scenario['details'] = jstep.details
if (hasattr(jstep, "date")):
steps[j]['date'] = jstep.date
# Check if jstep has attribute calls, where our custom data is stored - Could be generalized further
if (hasattr(jstep, "calls") and 'result' in steps[j]):
# add the calls to our step object, that would be later added to json output.
steps[j]['result']['calls'] = jstep.calls
# Add feature name and Status as a part of scenario
scenario['feature'] = self.current_feature.name
scenario['status'] = status
# Let the parent run last here
super(RecordFormatter, self).eof()
def isnotBackground(self):
if(self.current_feature_element['name'] != ''):
if(self.current_feature_element['name'] not in self.jsteps):
self.jsteps[self.current_feature_element['name']] = []
return True
return False
| Python | 0.000001 | |
def7e3aeaf3b0cd1a6486c72c68a3baad77ef3e5 | Create leetcode-50.py | python_practice/leetCode/leetcode-50.py | python_practice/leetCode/leetcode-50.py | class Solution:
def myPow(self, x: 'float', n: 'int') -> 'float':
return x**n
def myPow2(self, x: 'float', n: 'int') -> 'float':
if n == 0:
return 1
if n < 0:
n = 0-n
x = 1/x
return x**(n%2)*myPow2(x*x, n//2)
| Python | 0.000004 | |
71e431a5eccc6483847888fb0f8f5f30f182913a | add a script to convert xml documentation into json | doc/xmldoc2json.py | doc/xmldoc2json.py | #!/usr/bin/python
import sys
import xml.etree.ElementTree as ET
import json
def parseClass(data):
dictCls = dict(data.attrib)
dictCls['brief_description'] = data.find("brief_description").text.strip()
dictCls['description'] = data.find("description").text.strip()
dictCls['methods'] = []
for m in data.find("methods"):
dictCls['methods'].append(parseMethod(m))
dictCls['signals'] = []
for s in (data.find("signals") if data.find("signals") is not None else []):
dictCls['signals'].append(parseMethod(s))
dictCls['constants'] = []
for c in (data.find("constants") if data.find("constants") is not None else []):
dictCls['constants'].append(parseConstant(c))
dictCls['properties'] = []
for m in (data.find("members") if data.find("members") is not None else []):
dictCls['properties'].append(parseProperty(m))
dictCls['theme_properties'] = []
for thi in (data.find("theme_items") if data.find("theme_items") is not None else []):
dictCls['theme_properties'].append(parseProperty(thi))
return dictCls
def parseMethod(data):
dictMethod = dict(data.attrib)
dictMethod['description'] = data.find("description").text.strip()
dictMethod['return_type'] = data.find("return").attrib["type"] if data.find("return") is not None else ""
if "qualifiers" not in dictMethod: dictMethod["qualifiers"] = ""
dictMethod["arguments"] = []
for arg in data.iter('argument'):
dictMethod["arguments"].append(parseArgument(arg))
return dictMethod
def parseArgument(data):
dictArg = dict(data.attrib)
if "dictArg" in dictArg: dictArg.pop("index")
dictArg["default_value"] = dictArg["default"] if "default" in dictArg else ""
if "default" in dictArg: dictArg.pop("default")
return dictArg
def parseConstant(data):
dictConst = dict(data.attrib)
dictConst["description"] = data.text.strip()
return dictConst
def parseProperty(data):
dictProp = dict(data.attrib)
dictProp["description"] = data.text.strip()
return dictProp
def main():
if len(sys.argv) >=2 :
tree = ET.parse(open(sys.argv[1], 'r'))
classes = {}
for cls in tree.getroot():
dictCls = parseClass(cls)
classes[dictCls['name']] = dictCls
jsonContent = json.dumps({"classes": classes, "version": "2.1.3"}, ensure_ascii=False, indent=2)
print(jsonContent)
if __name__ == '__main__':
main()
| Python | 0.000003 | |
4ca8d43d8e6ec243d9812bb313a8e7a21ad781ea | Add DB exercise. | Exercise/DB.py | Exercise/DB.py | import mysql.connector
conn = mysql.connector.connect(user='root', password='blue', database='test')
cursor = conn.cursor()
cursor.execute('create table user (id varchar(20) primary key, name varchar(20))')
cursor.execute('insert into user (id, name) values (%s, %s)', ['1', 'Dai'])
print(cursor.rowcount)
conn.commit()
cursor.close()
cursor = conn.cursor()
cursor.execute('select * from user where id = %s', ('1',))
values = cursor.fetchall()
print(values)
cursor.close()
conn.close()
| Python | 0 | |
c4e1e034a3f0be3590dc78c5683d9deaf44d696f | add example of escape character | scripts/escape/backslash.py | scripts/escape/backslash.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
'''
This tests the acceptance of backslashes
\\f should be okay
\f is not necessarily okay, because json.dumps will not dump this
'''
print json.dumps({
"foogroup": {
"hosts": [
"foobar"
]
},
"_meta": {
"hostvars": {
"foobar": {
"host_specific_var": "ba\frrr",
"from_issue": "H%]~7\f0$ and this... O'Jw\u00188\u0006\b... "
}
}
}
}, indent=4) | Python | 0.000001 | |
c5c0d3f447b1295bdbbda60c38c4123f2e8d871c | add gnucash2ledger converter | gnucash2ledger.py | gnucash2ledger.py | #!/usr/bin/env python
import sys
import codecs
import gnucash
out = codecs.getwriter('UTF-8')(sys.stdout)
if len(sys.argv) == 1:
sys.stderr.write("Invocation: %s gnucash_filename\n" % sys.argv[0])
sys.exit(1)
data = gnucash.read_file(sys.argv[1])
def format_commodity(commodity):
mnemonic = commodity.mnemonic
try:
if mnemonic.encode('ascii').isalpha():
return mnemonic
except:
pass
return "\"%s\"" % mnemonic # TODO: escape " char in mnemonic
def full_acc_name(acc):
result = ""
if acc.parent.parent.parent is not None:
result = full_acc_name(acc.parent) + ":"
result += acc.name
return result
commodities = data.commodities.values()
for commodity in commodities:
if commodity.mnemonic == "":
continue
out.write("commodity %s\n" % format_commodity(commodity))
if commodity.fullname != "":
out.write("\tnote %s\n" % commodity.fullname)
out.write("\n")
accounts = data.accounts.values()
for acc in accounts:
# ignore "dummy" accounts
if acc.type is None or acc.type == "ROOT":
continue
if str(acc.commodity) == "template":
continue
out.write("account %s\n" % (full_acc_name(acc), ))
if acc.description != "":
out.write("\tnote %s\n" % (acc.description,))
formated_commodity = format_commodity(acc.commodity)
formated_commodity = formated_commodity.replace("\"", "\\\"")
out.write("\tcheck commodity == \"%s\"\n" % formated_commodity)
out.write("\n")
# Prices
prices = data.prices.values()
prices.sort(key = lambda x: x.date)
for price in prices:
date = price.date.strftime("%Y/%m/%d %H:%M:%S")
out.write("P %s %s %s %s\n" % (date, format_commodity(price.commodity), price.value, format_commodity(price.currency)))
out.write("\n")
transactions = data.transactions.values()
transactions.sort(key=lambda x: x.post_date)
for trans in transactions:
date = trans.post_date.strftime("%Y/%m/%d")
out.write("%s * %s\n" % (date, trans.description))
for split in trans.splits:
out.write("\t%-40s " % full_acc_name(split.account))
if split.account.commodity != trans.currency:
out.write("%10.2f %s @@ %.2f %s" % (split.quantity, format_commodity(split.account.commodity), abs(split.value), format_commodity(trans.currency)))
else:
out.write("%10.2f %s" % (split.value, format_commodity(trans.currency)))
out.write("\n")
out.write("\n")
| Python | 0.000002 | |
bfbd2c792aacd307f8d7ed68ea0f2a7db681431d | add functions that generate mask image of the target bin | jsk_apc2016_common/python/jsk_apc2016_common/mask_bin.py | jsk_apc2016_common/python/jsk_apc2016_common/mask_bin.py | #!/usr/bin/env python
import numpy as np
from matplotlib.path import Path
import jsk_apc2016_common.segmentation_helper as helper
from tf2_geometry_msgs import do_transform_point
def get_mask_img(transform, target_bin, camera_model):
"""
:param point: point that is going to be transformed
:type point: PointStamped
:param transform: camera_frame -> bbox_frame
:type transform: Transform
"""
# check frame_id of a point and transform just in case
assert camera_model.tf_frame == transform.header.frame_id
assert target_bin.bbox.header.frame_id == transform.child_frame_id
transformed_list = [
do_transform_point(corner, transform)
for corner in target_bin.corners]
projected_points = project_points(transformed_list, camera_model)
# generate an polygon that covers the region
path = Path(projected_points)
x, y = np.meshgrid(
np.arange(camera_model.width),
np.arange(camera_model.height))
x, y = x.flatten(), y.flatten()
points = np.vstack((x, y)).T
mask_img = path.contains_points(
points).reshape(
camera_model.height, camera_model.width
).astype('bool')
return mask_img
def project_points(points, camera_model):
"""
:param points: list of geometry_msgs.msg.PointStamped
:type list of stamped points :
:param projected_points: list of camera_coordinates
:type projected_points: (u, v)
The frames of the points and the camera_model are same.
"""
# generate mask iamge
for point in points:
if point.header.frame_id != camera_model.tf_frame:
raise ValueError('undefined')
if len(points) != 4:
raise ValueError('undefined')
projected_points = []
for point in points:
projected_points.append(
camera_model.project3dToPixel(
helper.list_from_point(point.point)
)
)
return projected_points
| Python | 0.000003 | |
852c6639bb0a71b9ef2dd81b2830193d0c9fe23d | Create FractalPoke.py | FractalPoke.py | FractalPoke.py | bl_info = {
"name": "FractalPoke",
"author": "Christopher Kopic",
"version": (1, 0),
"blender": (2, 7, 8),
"location": "",
"description": "Iterative Poking inspired by Simon Holmedal's Always Forever",
"warning": "",
"wiki_url": "",
"tracker_url": "",
"category": "Mesh"}
import bpy
from bpy.types import Operator
from bpy.props import FloatProperty, IntProperty, BoolProperty
class FractalPoke(bpy.types.Operator):
"""Fractal Poke"""
bl_idname = "mesh.fractal_poke"
bl_label = "Fractal Poke"
bl_options = {'REGISTER', 'UNDO'}
iterations = IntProperty(
name = "Iterations",
default = 3,
min = 1,
description = "Be careful as complexity will increase exponentially"
)
start_offset = FloatProperty(
name = "Start Offset",
default = 1.0,
description = "Offset for first poke iteration"
)
offset_multiplier = FloatProperty(
name = "Offset Multiplier",
default = 0.5,
description = "Increases or decreases offset for each iteration"
)
offset_flip = BoolProperty(
name = "Flip Offset",
default = False,
description = "Flips offsetting inward or outward for each iteration"
)
grow_selection = BoolProperty(
name = "Grow Selection",
default = False,
description = "Grows selection for each iteration"
)
shrink_selection = BoolProperty(
name = "Shrink Selection",
default = False,
description = "Shrinks selection for each iteration"
)
def execute(self, context):
my_offset = self.start_offset
for i in range(self.iterations):
bpy.ops.mesh.poke(offset = my_offset)
my_offset *= self.offset_multiplier
if self.offset_flip:
my_offset *= -1
if self.grow_selection:
bpy.ops.mesh.select_more()
if self.shrink_selection:
bpy.ops.mesh.select_less()
return {'FINISHED'}
@classmethod
def poll(cls, context):
ob = context.active_object
return ob is not None and ob.mode == 'EDIT'
def register():
bpy.utils.register_class(FractalPoke)
def unregister():
bpy.utils.unregister_class(FractalPoke)
if __name__ == "__main__":
register()
| Python | 0.000001 | |
2dff378e7f446e83aa7c105bded3f3330fe9fa20 | Add a script to generate a Javascript file encoding_<enc>.js containing encoding and decoding tables for the specified <enc> encoding. Uses Unicode table at location http://unicode.org/Public/MAPPINGS/VENDORS/MICSFT/WINDOWS/<enc>.TXT. Related to issue #1541. | scripts/make_encoding_js.py | scripts/make_encoding_js.py | """Create a Javascript script to encode / decode for a specific encoding
described in a file available at
http://unicode.org/Public/MAPPINGS/VENDORS/MICSFT/WINDOWS/<ENCODING>.TXT
"""
import os
import re
import json
import urllib.request
line_re = re.compile("^(0x[A-Z0-9]+)\s+(0x[A-Z0-9]+)*", re.M)
tmpl = "http://unicode.org/Public/MAPPINGS/VENDORS/MICSFT/WINDOWS/{}.TXT"
encoding = input("Encoding name: ")
req = urllib.request.urlopen(tmpl.format(encoding.upper()))
data = req.read().decode("ascii")
root_dir = os.path.dirname(os.path.dirname(__file__))
libs_dir = os.path.join(root_dir, "www", "src", "libs")
filename = os.path.join(libs_dir, f"encoding_{encoding.lower()}.js")
with open(filename, "w", encoding="utf-8") as out:
out.write("var _table = [")
for line in data.split("\n"):
mo = line_re.match(line)
if mo:
key, value = mo.groups()
out.write(f"{key}, {value or -1},")
out.write("]\n")
out.write("var decoding_table = [],\n encoding_table = []\n")
out.write("""for(var i = 0, len = _table.length; i < len; i += 2){
var value = _table[i + 1]
if(value !== null){
encoding_table[value] = _table[i]
}
decoding_table[_table[i]] = _table[i + 1]
}
$module = {encoding_table, decoding_table}
""")
| Python | 0 | |
f1c65cf208b4a6275214d82a765ad75c47c75715 | add example of how to use KT without defines | examples/cuda-c++/vector_add_defines.py | examples/cuda-c++/vector_add_defines.py | #!/usr/bin/env python
""" This is the example demonstrates how to use Kernel Tuner
to insert tunable parameters into template arguments
without using any C preprocessor defines
"""
import numpy as np
import kernel_tuner as kt
def tune():
kernel_string = """
template<typename T, int blockSize>
__global__ void vector_add(T *c, T *a, T *b, int n) {
auto i = blockIdx.x * blockSize + threadIdx.x;
if (i<n) {
c[i] = a[i] + b[i];
}
}
"""
size = 10000000
a = np.random.randn(size).astype(np.float32)
b = np.random.randn(size).astype(np.float32)
c = np.zeros_like(b)
n = np.int32(size)
args = [c, a, b, n]
tune_params = dict()
tune_params["block_size_x"] = [128+64*i for i in range(15)]
result, env = kt.tune_kernel("vector_add<float, block_size_x>", kernel_string, size, args, tune_params, defines={})
return result
if __name__ == "__main__":
tune()
| Python | 0 | |
00cc1f17796897ca2f4351bbea74ee22aad98f14 | Create quadrants_HH_HL_LH_LL.py | quadrants_HH_HL_LH_LL.py | quadrants_HH_HL_LH_LL.py | # python3 for categorizing data into 4 quadrants from 2 numerical fields
# this case is for vis minoirty + avg income in Toronto census tracts
import csv
import statistics as st
# just the toronto cts
tor_cts = []
with open('ct_tor.csv', 'r') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
tor_cts.append(row['ctuid'])
var_1 = [] # avg inc
var_2 = [] # perc vis min
with open('in_inc_vis.csv', 'r') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['ctuid'] in tor_cts:
try:
var_1.append(float(row['avg_inc']))
perc_vis = float(row['vis_min_pop']) / float(row['total_pop'])
var_2.append(perc_vis)
except:
print(row['ctuid'])
print(len(var_1))
print(len(var_2))
v1b=v2b=0
print("----------------------------------")
# for var 1
print("median", st.median(var_1))
print("mean", st.mean(var_1))
print("input break value:")
v1b = float(input())
# for var 2
print("----------------------------------")
print("median", st.median(var_2))
print("mean", st.mean(var_2))
print("input break value:")
v2b = float(input())
HHc = 0
HLc = 0
LHc = 0
LLc = 0
# break the data via the set breaks
with open('in_inc_vis.csv', 'r') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
if row['ctuid'] in tor_cts:
try:
perc_vis = float(row['vis_min_pop']) / float(row['total_pop'])
inc = float(row['avg_inc'])
# ye olde if statements
if inc > v1b and perc_vis > v2b:
q = 'HH'
HHc += 1
elif inc > v1b and perc_vis <= v2b:
q = 'HL'
HLc += 1
elif inc <= v1b and perc_vis > v2b:
q = 'LH'
LHc += 1
elif inc <= v1b and perc_vis <= v2b:
q = 'LL'
LLc += 1
orow = [row['ctuid'],inc,perc_vis,q]
#print(orow)
except:
#print(row['ctuid'])
None
print("HH", HHc)
print("LH", LHc)
print("HL", HLc)
print("LL", LLc)
| Python | 0.999018 | |
9dae55d2ef2e786799554ec2121cf9ecfe59eb62 | Rename file | dnsdiff/dnsdiff.py | dnsdiff/dnsdiff.py | '''Module to quickly look up and compare NS records for differences'''
import dns.resolver
import pprint
import sys
pp = pprint.PrettyPrinter(indent=4)
def compare_dns(nameservers, domain):
'''Compares records between nameservers using dnspython'''
responses = {}
resolver = dns.resolver.Resolver(configure=False)
for ns in nameservers:
ns_list = []
resolver.nameservers = ns
answer = dns.resolver.query(domain, 'NS')
for record in answer:
ns_list.append(record.target)
responses[ns] = sorted(ns_list)
pp.pprint(responses)
print "Determining differences"
set_list = []
for val in responses.values():
set_list.append(set(val))
differences = set.difference(*set_list)
if len(differences) == 0 or len(nameservers) == 1:
print "No discrepancies found"
sys.exit(0)
else:
print "Discrepancies found!"
print differences
sys.exit(1)
| Python | 0.000002 | |
bef5333edf60779f645603b3d4c7611867ad7382 | Day25 and final day! yaaaay | day25/code_generator.py | day25/code_generator.py | row = 2978
column = 3083
x = 1
y = 1
value = 20151125
step = 1
while x <= column or y <= row:
if x == step and y == 1:
step += 1
y = step
x = 1
else:
x += 1
y -= 1
value = (value * 252533) % 33554393
if x == column and y == row:
print 'The code for the machine is:', value | Python | 0.999998 | |
d3b4d53e84bdb1f50b244b282d6cb1b3b0d10ee3 | Add scraper | project/scrape.py | project/scrape.py | import requests
import re
from bs4 import BeautifulSoup
def main():
department_list = scrape()
department_scrape(department_list)
def scrape():
# Set up pull requests and soup object
front_html = requests.get("http://general-catalog.berkeley.edu/catalog/gcc_search_menu")
soup = BeautifulSoup(front_html.content, from_encoding="utf-8")
# variable for info
text = []
# extract department list and write it in a file
with open("list.txt", "w") as f:
#solving the ascii problem
problem_str = u'This is not all ascii\xf8 man'
safe_str = problem_str.encode('ascii', 'ignore')
for sp in soup.find_all('option'):
text.append(sp.string)
#print(type(sp.string))
safe_str = sp.string.encode('ascii','ignore')
f.write(safe_str +"\n")
return text
#(TEST) testing Biology deparment
def department_scrape(d_list):
# set up post url
url = "http://general-catalog.berkeley.edu/catalog/gcc_search_sends_request"
# set up post parameter
#iterate all
for department in d_list:
payload = {'p_dept_name': department}
# posting website and constructing soup object
r = requests.post(url, params=payload)
soup = BeautifulSoup(r.content, from_encoding="utf-8")
# variable for scrap object
text = []
# iterate the table row element
for sp in soup.find_all("tr"):
text.append(sp.text.strip())
# formatting text array
format_text = []
class_name = []
i = 0
title_indicator = False
after_format_indicator = False
while i < len(text):
if ("Course Format" in text[i]) and title_indicator == False:
i = i - 1
title_indicator = True
after_format_indicator = False
elif "Course Format" in text[i]:
format_text.append(text[i])
title_indicator = False
after_format_indicator = True
if "Prerequisites" in text[i]:
format_text.append(text[i])
if "Description" in text[i]:
format_text.append(text[i])
if title_indicator == True:
class_name.append(text[i])
format_text.append(text[i])
i = i + 1
#### List of spliter with indicator word
s1 = "Course Format:"
s2 = "Prerequisites:"
s3 = "Credit option"
s4 = "Description:"
save_indicator = False
for element in class_name:
name = element + ".txt"
name = name.replace("/", " ")
safe_name = name.encode('ascii', 'ignore')
for info in format_text:
if element in info:
save_indicator = True
if save_indicator == True:
if s4 in info:
save_indicator = False
with open("data/" + safe_name, "w") as f:
problem_str = u'This is not all ascii\xf8 man'
safe_str = info.encode('ascii', 'ignore')
safe_element = element.encode('ascii', 'ignore')
f.write(safe_element + "\n")
f.write(safe_str + "\n")
if __name__ == "__main__":
main()
| Python | 0.000002 | |
be189d9d01f916af87b45f36ac36f7c5d302dbbf | add an experimental command for setting the login background image | kolibri/content/management/commands/background.py | kolibri/content/management/commands/background.py | from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import logging
import os
import shutil
from django.conf import settings
from django.core.management.base import BaseCommand
logger = logging.getLogger(__name__)
class Command(BaseCommand):
def add_arguments(self, parser):
subparsers = parser.add_subparsers(dest='command', help="The following subcommands are available.")
movedir_subparser = subparsers.add_parser(
name='set',
cmd=self,
help="EXPERIMENTAL: Sets the login screen background image"
)
movedir_subparser.add_argument(
'destination',
type=str,
help='Image file'
)
subparsers.add_parser(
name='reset',
cmd=self,
help="Set default"
)
def handle(self, *args, **options):
user_static_directory = os.path.join(settings.STATIC_ROOT, 'user_module')
if not os.path.exists(user_static_directory):
self.stderr.write(self.style.ERROR('\nStatic directory does not exist.'))
raise SystemExit(1)
img_path = os.path.join(user_static_directory, 'background.jpg')
backup_img_path = os.path.join(user_static_directory, 'background-backup')
if options['command'] == 'set':
new_img_path = os.path.abspath(os.path.expanduser(options['destination']))
if not os.path.exists(new_img_path):
self.stderr.write(
self.style.ERROR('\n{} does not exist.').format(options['destination'])
)
raise SystemExit(1)
# Only save a backup if it didn't exist before.
# This should only back up the default Kolibri image.
if not os.path.exists(backup_img_path):
shutil.copy(img_path, backup_img_path)
shutil.copy(new_img_path, img_path)
elif options['command'] == 'reset':
if os.path.exists(backup_img_path):
shutil.copy(backup_img_path, img_path)
| Python | 0 | |
1f48fee7ffcef3eefa6aaedb5ca963c10bb7c58c | Add test case for user creation form | {{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/users/test_forms.py | {{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/users/test_forms.py | from django.test import TestCase
from users.forms import ZionsUserCreationForm
from users.models import User
class {{cookiecutter.project_camel_name}}UserCreationTestCase(TestCase):
def setUp(self):
self.test_user = User.objects.create(
username='testuser',
email='test@test.com',
password='password'
)
self.bad_form = ZionsUserCreationForm({
'username': 'testuser',
'password1': 'password',
'password2': 'password',
})
self.good_form = ZionsUserCreationForm({
'username': 'testuser2',
'password1': 'password',
'password2': 'password',
})
def test_username_good(self):
self.assertTrue(self.good_form.is_valid())
def test_clean_username_bad(self):
self.assertFalse(self.bad_form.is_valid())
| Python | 0.000001 | |
616bb27db3daef8939fe706d1c41cf79f35b40fa | set of default rules in common module | common.py | common.py | #/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2012 Denis Zalevskiy
# Licensed under MIT License
import string
from parser import *
def vspace(): return '\n\r', ignore
def hspace(): return ' \t', ignore
def eol(): return choice(eof, vspace), ignore
def space(): return ' \n\r\t', ignore
def spaces(): return r0_inf(space), ignore
def any_char(): return ne(eof), value
def digit_dec() : return '0123456789', value
def digit_hex() : return '0123456789ABCDEFabcdef', value
def ascii(): return sym(lambda s: s in string.ascii_letters), value
| Python | 0 | |
b8c4fdc1ebba18ab832160bece4ce8b391a15b7a | add sampled stochastic games serialization tests | open_spiel/python/tests/sampled_stochastic_games_test.py | open_spiel/python/tests/sampled_stochastic_games_test.py | # Copyright 2019 DeepMind Technologies Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pickle
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
import pyspiel
# All games with kSampledStochastic chance mode.
SPIEL_SAMPLED_STOCHASTIC_GAMES_LIST = [
g for g in pyspiel.registered_games() if g.default_loadable
and g.chance_mode == pyspiel.GameType.ChanceMode.SAMPLED_STOCHASTIC
]
assert len(SPIEL_SAMPLED_STOCHASTIC_GAMES_LIST) >= 2
class SampledStochasticGamesTest(parameterized.TestCase):
def random_playout(self, state):
np.random.seed(0)
while not state.is_terminal():
state.apply_action(np.random.choice(state.legal_actions()))
return state
@parameterized.parameters(*SPIEL_SAMPLED_STOCHASTIC_GAMES_LIST)
def test_stateful_game_serialization(self, game_info):
game = pyspiel.load_game(game_info.short_name,
{"rng_seed": pyspiel.GameParameter(0)})
# mutate game's internal RNG state
state = self.random_playout(game.new_initial_state())
deserialized_game = pickle.loads(pickle.dumps(game))
# make sure initial states are the same after game deserialization
state1 = self.random_playout(game.new_initial_state())
d_state1 = self.random_playout(deserialized_game.new_initial_state())
self.assertNotEqual(str(state1), str(state))
self.assertEqual(str(state1), str(d_state1))
# try one more time
state2 = self.random_playout(game.new_initial_state())
d_state2 = self.random_playout(deserialized_game.new_initial_state())
self.assertNotEqual(str(state2), str(state1))
self.assertEqual(str(state2), str(d_state2))
if __name__ == "__main__":
absltest.main()
| Python | 0 | |
ed9d640a11c02ca4b42e62d975e4ae9a2bd33093 | add tests for simtk! | openpathsampling/experimental/storage/test_simtk_unit.py | openpathsampling/experimental/storage/test_simtk_unit.py | import pytest
import numpy as np
from ..simstore.custom_json import JSONSerializerDeserializer, DEFAULT_CODECS
from .simtk_unit import *
try:
from simtk import unit
except ImportError:
HAS_SIMTK = False
else:
HAS_SIMTK = True
class TestSimtkUnitCodec(object):
def setup(self):
pytest.importorskip('simtk.unit')
my_unit = unit.nanometer / unit.picosecond**2
self.values = {
'float': 1.0 * my_unit,
'array': np.array([1.0, 2.0]) * my_unit,
}
self.serialization = JSONSerializerDeserializer(
DEFAULT_CODECS + [simtk_quantity_codec]
)
@pytest.mark.parametrize('obj_type', ['float', 'array'])
def test_serialization_cycle(self, obj_type):
obj = self.values[obj_type]
ser = self.serialization.serializer(obj)
deser = self.serialization.deserializer(ser)
reser = self.serialization.serializer(deser)
if obj_type == 'array':
np.testing.assert_array_equal(obj, deser)
else:
assert obj == deser
assert ser == reser
class TestSimtkQuantityHandler(object):
def setup(self):
pytest.importorskip('simtk.unit')
self.handlers = {
'float': SimtkQuantityHandler(
('unit.nanometer/unit.picosecond**2', 'float')
),
'array': SimtkQuantityHandler(
('unit.nanometer', 'ndarray.float32(2,3)')
),
}
self.objects = {
'float': 1.0 * unit.nanometer / unit.picosecond**2,
'array': np.array([[1.0, 2.0, 3.0],
[4.0, 5.0, 6.0]]) * unit.nanometer,
}
@pytest.mark.parametrize('type_str, expected', [
(
'simtk(unit.nanometer/unit.picosecond**2)*float',
('unit.nanometer/unit.picosecond**2', 'float')
), (
'simtk(unit.nanometer)*ndarray.float32(3,3)',
('unit.nanometer', 'ndarray.float32(3,3)')
),
])
def test_is_my_type(self, type_str, expected):
assert SimtkQuantityHandler.is_my_type(type_str) == expected
@pytest.mark.parametrize('obj_type', ['float', 'array'])
def test_serialization_cycle(self, obj_type):
handler = self.handlers[obj_type]
obj = self.objects[obj_type]
ser = handler.serialize(obj)
deser = handler.deserialize(ser)
reser = handler.serialize(deser)
assert ser == reser
if obj_type == 'array':
np.testing.assert_array_equal(obj, deser)
else:
assert obj == deser
assert obj.unit == deser.unit
| Python | 0 | |
4d85702561c000824083544de98693e244c8aab7 | Add test for decoder stack | tests/test_decoding_stack.py | tests/test_decoding_stack.py | #! /usr/bin/env python
from __future__ import division
from timeside.decoder import FileDecoder
from timeside.analyzer import AubioPitch
from timeside.core import ProcessPipe
import numpy as np
from unit_timeside import *
import os.path
#from glib import GError as GST_IOError
# HINT : to use later with Gnonlin only
class TestDecodingFromStack(unittest.TestCase):
"Test decoder stack"
def setUp(self):
self.samplerate, self.channels, self.blocksize = None, None, None
self.start = 0
self.duration = None
self.expected_samplerate = 44100
self.expected_channels = 2
self.expected_totalframes = 352800
self.test_exact_duration = True
self.source_duration = 8
self.expected_mime_type = 'audio/x-wav'
self.source = os.path.join(os.path.dirname(__file__),
"samples/sweep.wav")
def testProcess(self):
"Test decoder stack: test process"
decoder = FileDecoder(uri=self.source,
start=self.start,
duration=self.duration,
stack=True)
self.assertTrue(decoder.stack)
self.assertFalse(decoder.from_stack)
pipe = ProcessPipe(decoder)
pipe.run()
self.assertFalse(decoder.stack)
self.assertTrue(decoder.from_stack)
self.assertEqual(len(pipe.frames_stack), 44)
pipe.run()
def testResults(self):
"Test decoder stack: test frames content"
decoder = FileDecoder(uri=self.source,
start=self.start,
duration=self.duration,
stack=True)
pitch_on_file = AubioPitch()
pipe = (decoder | pitch_on_file)
pipe.run()
self.assertIsInstance(pipe.frames_stack, list)
pitch_results_on_file = pipe.results['aubio_pitch.pitch'].data.copy()
# If the pipe is used for a second run, the processed frames stored
# in the stack are passed to the other processors
# without decoding the audio source again.
#Let's define a second analyzer equivalent to the previous one:
pitch_on_stack = AubioPitch()
pipe |= pitch_on_stack
pipe.run()
# to assert that the frames passed to the two analyzers are the same,
# we check that the results of these analyzers are equivalent:
pitch_results_on_stack = pipe.results['aubio_pitch.pitch'].data
self.assertTrue(np.array_equal(pitch_results_on_stack,
pitch_results_on_file))
if __name__ == '__main__':
unittest.main(testRunner=TestRunner())
| Python | 0.000001 | |
15815f0c63b29fabe42e9abe2bfae736012d9b9f | Initialize descentmethods testing | tests/test_descentmethods.py | tests/test_descentmethods.py | import ML.descentmethods as descentmethods
import ML.regression as regression
import data
import numpy as np
import pytest
@pytest.fixture
def gradient():
def grad(X, y, weights):
hypothesis = np.dot(X, weights) - y
gradient = np.dot(np.transpose(X), hypothesis) / np.size(y)
return gradient
return grad
@pytest.fixture
def hessian():
def hess(X, weights):
hessian = np.matmul(X.T, X)
return hessian
return hess
def test_gradientdescent():
x, y = data.continuous_data_complicated()
X = np.column_stack((np.ones(np.shape(x)[0]), x))
weights = descentmethods.gradientdescent(X, y, gradient())
target = [0.47, 0.84]
np.testing.assert_array_almost_equal(weights, target, 1)
def test_gradientdescent_alpha():
x, y = data.continuous_data_complicated()
X = np.column_stack((np.ones(np.shape(x)[0]), x))
weights = descentmethods.gradientdescent(X, y, gradient(), alpha=0.001)
target = [0.47, 0.84]
np.testing.assert_array_almost_equal(weights, target, 1)
def test_gradientdescent_lowiterations():
x, y = data.continuous_data_complicated()
X = np.column_stack((np.ones(np.shape(x)[0]), x))
weights = descentmethods.gradientdescent(X, y, gradient(), iterations=2)
target = [0.1, 0.4]
np.testing.assert_array_almost_equal(weights, target, 1)
def test_gradientdescent_initialweights():
x, y = data.continuous_data_complicated()
X = np.column_stack((np.ones(np.shape(x)[0]), x))
weights = descentmethods.gradientdescent(X, y, gradient(),
initial_weights=np.array([0.2, 0.2]))
target = [0.47, 0.84]
np.testing.assert_array_almost_equal(weights, target, 1)
def test_gradientdescent_stochastic():
x, y = data.continuous_data_complicated()
X = np.column_stack((np.ones(np.shape(x)[0]), x))
weights = descentmethods.gradientdescent(X, y, gradient(),
stochastic=True)
target = [0.47, 0.84]
np.testing.assert_array_almost_equal(weights, target, 1)
def test_gradientdescent_regparam():
x, y = data.continuous_data_complicated()
X = np.column_stack((np.ones(np.shape(x)[0]), x))
weights = descentmethods.gradientdescent(X, y, gradient(),
reg_param=0.01)
target = [0.47, 0.84]
np.testing.assert_array_almost_equal(weights, target, 1)
def test_steepestdescent():
x, y = data.continuous_data_complicated()
X = np.column_stack((np.ones(np.shape(x)[0]), x))
weights = descentmethods.steepestdescent(X, y, gradient())
target = [0.47, 0.84]
np.testing.assert_array_almost_equal(weights, target, 1)
def test_steepestdescent_alpha():
x, y = data.continuous_data_complicated()
X = np.column_stack((np.ones(np.shape(x)[0]), x))
weights = descentmethods.steepestdescent(X, y, gradient(),
alpha=0.001)
target = [0.47, 0.84]
np.testing.assert_array_almost_equal(weights, target, 1)
def test_steepestdescent_lowiterations():
x, y = data.continuous_data_complicated()
X = np.column_stack((np.ones(np.shape(x)[0]), x))
weights = descentmethods.steepestdescent(X, y, gradient(),
iterations=2)
target = [0, 0.4]
np.testing.assert_array_almost_equal(weights, target, 1)
def test_newtonsmethod():
x, y = data.continuous_data_complicated()
X = np.column_stack((np.ones(np.shape(x)[0]), x))
weights = descentmethods.newtonsmethod(X, y, gradient(),
hessian())
target = [0.47, 0.84]
np.testing.assert_array_almost_equal(weights, target, 1)
def test_newtonsmethod_alpha():
x, y = data.continuous_data_complicated()
X = np.column_stack((np.ones(np.shape(x)[0]), x))
weights = descentmethods.newtonsmethod(X, y, gradient(),
hessian(), alpha=0.001)
target = [0.3, 0.5]
np.testing.assert_array_almost_equal(weights, target, 1)
def test_newtonsmethod_lowiterations():
x, y = data.continuous_data_complicated()
X = np.column_stack((np.ones(np.shape(x)[0]), x))
weights = descentmethods.newtonsmethod(X, y, gradient(),
hessian(), iterations=2)
target = [0, 0]
np.testing.assert_array_almost_equal(weights, target, 1)
def test_newtonsmethod_initialweights():
x, y = data.continuous_data_complicated()
X = np.column_stack((np.ones(np.shape(x)[0]), x))
weights = descentmethods.newtonsmethod(X, y, gradient(),
hessian(),
initial_weights=[.2, .2])
target = [0.47, 0.84]
np.testing.assert_array_almost_equal(weights, target, 1)
| Python | 0.000003 | |
40bf8d4773eb659ac2ac22aef50c2f63084924be | add profiler test case | rfcs/20200624-pluggable-device-for-tensorflow/sample/test_profiler.py | rfcs/20200624-pluggable-device-for-tensorflow/sample/test_profiler.py | #!/usr/bin/env python
# coding=utf-8
import tensorflow as tf
import numpy as np
import os
tf.compat.v1.disable_eager_execution()
profile_options = tf.profiler.experimental.ProfilerOptions(
host_tracer_level = 3,
device_tracer_level = 1)
logpath = os.path.join('data', 'logs', 'profiler_demo')
a = tf.random.normal(shape=[1,10, 10, 8], dtype=tf.float32, seed=1)
w = tf.random.normal(shape=[3, 3, 8, 4], dtype=tf.float32, seed=1)
a1 = tf.random.normal(shape=[1, 10, 10, 8], dtype=tf.float32, seed=1)
w1 = tf.random.normal(shape=[3, 3, 8, 4], dtype=tf.float32, seed=1)
with tf.device("/MY_DEVICE:0"):
tf.profiler.experimental.start(logpath)
b = tf.nn.relu(a)
c = tf.nn.conv2d(b, w, strides=[1, 1, 1, 1], padding='SAME', data_format='NHWC')
tf.profiler.experimental.stop()
with tf.device("/CPU:0"):
b1 = tf.nn.relu(a1)
c1 = tf.nn.conv2d(b1, w1, strides=[1, 1, 1, 1], padding='SAME', data_format='NHWC')
sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(allow_soft_placement=False, log_device_placement=True))
print(sess.run(tf.reduce_all(tf.less(c - c1, 1e-5))))
| Python | 0.000002 | |
1670438ac9becf93e9ba428065e4b19b219e8ffc | Add WebSockets and SSL supports with Twisted :) | helenae/server.py | helenae/server.py | import sys
from json import dumps, loads
import sqlalchemy
from sqlalchemy.orm import sessionmaker
from db.create_db import Users
from twisted.internet import reactor, ssl
from twisted.python import log
from twisted.web.server import Site
from twisted.web.static import File
from autobahn.twisted.websocket import WebSocketServerFactory, WebSocketServerProtocol, listenWS
# TODO: Add logger (from Twisted, not original library)
# TODO: Create PLUGIN architecture (using twistd)
# TODO: Define PostgreSQL DB structure
# TODO: Authentication under PostgreSQL+SQLAlchemy ORM
# TODO: Errors/Exceptions processing
engine = sqlalchemy.create_engine('postgresql://Relrin:05909333@localhost/csan', pool_size=20, max_overflow=0)
class DFSServerProtocol(WebSocketServerProtocol):
def __init__(self):
# get object from connection pool and create session
# DONT FORGET use after all "self.sesison.close()"!!!
self.Session = sessionmaker(bind=engine)
def __del__(self):
self.session.close()
def authorization(self, data):
"""
Checking user with DB
"""
session = self.Session()
result = session.execute(sqlalchemy.select([Users]).where(Users.name == data['user']))
result = result.fetchone()
if result is None:
data['cmd'] = 'RAUT'
data['error'] = 'User not found'
else:
if result['name'] == data['user']:
# correct users info --> real user
if result['password'] == data['password']:
data['cmd'] = 'HELP'
data['auth'] = True
# incorrect password --> fake user
else:
data['cmd'] = 'RAUT'
data['error'] = 'Incorrect password. Try again...'
session.close()
return data
def onMessage(self, payload, isBinary):
"""
Processing request from user and send response
"""
json_data = loads(payload)
# for none-authorized users
if json_data['auth'] == False:
# first action with server --> authorization
if json_data['cmd'] == 'AUTH':
json_data = self.authorization(json_data)
# for authorized users
else:
pass
response = dumps(json_data)
self.sendMessage(str(response))
def readFileStructure(self):
"""
Get all files/folders/etc. structure from DB
"""
pass
def getServerParams(self):
"""
Getting (IP, PORT) of "File Server" to read/write operations
"""
pass
def fileSync(self):
"""
Synchronization files using rsync tool
"""
pass
if __name__ == '__main__':
if len(sys.argv) > 1 and sys.argv[1] == 'debug':
log.startLogging(sys.stdout)
debug = True
else:
debug = False
contextFactory = ssl.DefaultOpenSSLContextFactory('keys/server.key', 'keys/server.crt')
factory = WebSocketServerFactory("wss://localhost:9000", debug = debug, debugCodePaths = debug)
factory.protocol = DFSServerProtocol
factory.setProtocolOptions(allowHixie76 = True)
listenWS(factory, contextFactory)
webdir = File(".")
webdir.contentTypes['.crt'] = 'application/x-x509-ca-cert'
web = Site(webdir)
reactor.listenSSL(8080, web, contextFactory)
#reactor.listenTCP(8080, web)
reactor.run()
| Python | 0 | |
3d0827fa805a08eaaaa07e037f6ce3da6d8e1c4e | add guess module | yoink/guess.py | yoink/guess.py | import numpy as np
from scipy import ndimage
try:
from skimage.feature import corner_harris
from skimage.measure import approximate_polygon
except ImportError:
from yoink.mini_skimage import corner_harris, approximate_polygon
def guess_corners(bw):
"""
Infer the corners of an image using a Sobel filter to find the edges and a
Harris filter to find the corners. Takes only as single color chanel.
Parameters
----------
bw : (m x n) ndarray of ints
Returns
-------
corners : pixel coordinates of plot corners
outline : (m x n) ndarray of bools True -> plot area
"""
e_map = ndimage.sobel(bw)
markers = np.zeros_like(bw)
markers[bw < 30] = 1
markers[bw > 150] = 2
seg = ndimage.watershed_ift(e_map, np.asarray(markers, dtype=int))
outline = ndimage.binary_fill_holes(1-seg)
corners = corner_harris(np.asarray(outline, dtype=int))
corners = approximate_polygon(corners, 1)
return corners, outline
def get_angle(p1, p2):
return np.arctan2(p1[0]-p2[0], p1[1]-p2[1]) * 180./np.pi
def get_angle2(corners):
order = np.argsort(corners[:, 0])
top = corners[order[:2]]
bot = corners[order[2:]]
order = np.argsort(corners[:, 1])
left = corners[order[:2]]
right = corners[order[2:]]
angles = [get_angle(top[0, :], top[1, :]),
get_angle(bot[0, :], bot[1, :]),
get_angle(left[0, :], left[1, :]) + 90,
get_angle(right[0, :], right[1, :]) + 90,
]
angle = sum(angles) / len(angles)
return angle
def clear_border(im, outline):
im_fixed = im.copy()
im_fixed[-outline] = 255
return im_fixed
| Python | 0.000001 | |
4224761522c1e058979f3901f9af1d037398576c | Add cache_key method to be used by Django 1.7 | django_mobile/loader.py | django_mobile/loader.py | import hashlib
from django.template import TemplateDoesNotExist
from django.template.loader import find_template_loader, BaseLoader
from django.template.loader import get_template_from_string
from django.template.loaders.cached import Loader as DjangoCachedLoader
from django_mobile import get_flavour
from django_mobile.conf import settings
from django.utils.encoding import force_bytes
class Loader(BaseLoader):
is_usable = True
def __init__(self, *args, **kwargs):
loaders = []
for loader_name in settings.FLAVOURS_TEMPLATE_LOADERS:
loader = find_template_loader(loader_name)
if loader is not None:
loaders.append(loader)
self.template_source_loaders = tuple(loaders)
super(BaseLoader, self).__init__(*args, **kwargs)
def get_template_sources(self, template_name, template_dirs=None):
template_name = self.prepare_template_name(template_name)
for loader in self.template_source_loaders:
if hasattr(loader, 'get_template_sources'):
try:
for result in loader.get_template_sources(
template_name,
template_dirs):
yield result
except UnicodeDecodeError:
# The template dir name was a bytestring that wasn't valid UTF-8.
raise
except ValueError:
# The joined path was located outside of this particular
# template_dir (it might be inside another one, so this isn't
# fatal).
pass
def prepare_template_name(self, template_name):
template_name = u'%s/%s' % (get_flavour(), template_name)
if settings.FLAVOURS_TEMPLATE_PREFIX:
template_name = settings.FLAVOURS_TEMPLATE_PREFIX + template_name
return template_name
def load_template(self, template_name, template_dirs=None):
template_name = self.prepare_template_name(template_name)
for loader in self.template_source_loaders:
try:
return loader(template_name, template_dirs)
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist("Tried %s" % template_name)
def load_template_source(self, template_name, template_dirs=None):
template_name = self.prepare_template_name(template_name)
for loader in self.template_source_loaders:
if hasattr(loader, 'load_template_source'):
try:
return loader.load_template_source(
template_name,
template_dirs)
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist("Tried %s" % template_name)
class CachedLoader(DjangoCachedLoader):
is_usable = True
def cache_key(self, template_name, template_dirs):
if template_dirs:
key = '-'.join([
template_name,
hashlib.sha1(force_bytes('|'.join(template_dirs))).hexdigest()
])
else:
key = template_name
return '{0}:{1}'.format(get_flavour(), key)
def load_template(self, template_name, template_dirs=None):
key = "{0}:{1}".format(get_flavour(), template_name)
if template_dirs:
# If template directories were specified, use a hash to differentiate
key = '-'.join([
template_name,
hashlib.sha1('|'.join(template_dirs)).hexdigest()])
if key not in self.template_cache:
template, origin = self.find_template(template_name, template_dirs)
if not hasattr(template, 'render'):
try:
template = get_template_from_string(template, origin, template_name)
except TemplateDoesNotExist:
# If compiling the template we found raises TemplateDoesNotExist,
# back off to returning the source and display name for
# the template we were asked to load. This allows for
# correct identification (later) of the actual template
# that does not exist.
return template, origin
self.template_cache[key] = template
return self.template_cache[key], None
| import hashlib
from django.template import TemplateDoesNotExist
from django.template.loader import find_template_loader, BaseLoader
from django.template.loader import get_template_from_string
from django.template.loaders.cached import Loader as DjangoCachedLoader
from django_mobile import get_flavour
from django_mobile.conf import settings
class Loader(BaseLoader):
is_usable = True
def __init__(self, *args, **kwargs):
loaders = []
for loader_name in settings.FLAVOURS_TEMPLATE_LOADERS:
loader = find_template_loader(loader_name)
if loader is not None:
loaders.append(loader)
self.template_source_loaders = tuple(loaders)
super(BaseLoader, self).__init__(*args, **kwargs)
def get_template_sources(self, template_name, template_dirs=None):
template_name = self.prepare_template_name(template_name)
for loader in self.template_source_loaders:
if hasattr(loader, 'get_template_sources'):
try:
for result in loader.get_template_sources(
template_name,
template_dirs):
yield result
except UnicodeDecodeError:
# The template dir name was a bytestring that wasn't valid UTF-8.
raise
except ValueError:
# The joined path was located outside of this particular
# template_dir (it might be inside another one, so this isn't
# fatal).
pass
def prepare_template_name(self, template_name):
template_name = u'%s/%s' % (get_flavour(), template_name)
if settings.FLAVOURS_TEMPLATE_PREFIX:
template_name = settings.FLAVOURS_TEMPLATE_PREFIX + template_name
return template_name
def load_template(self, template_name, template_dirs=None):
template_name = self.prepare_template_name(template_name)
for loader in self.template_source_loaders:
try:
return loader(template_name, template_dirs)
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist("Tried %s" % template_name)
def load_template_source(self, template_name, template_dirs=None):
template_name = self.prepare_template_name(template_name)
for loader in self.template_source_loaders:
if hasattr(loader, 'load_template_source'):
try:
return loader.load_template_source(
template_name,
template_dirs)
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist("Tried %s" % template_name)
class CachedLoader(DjangoCachedLoader):
is_usable = True
def load_template(self, template_name, template_dirs=None):
key = "{0}:{1}".format(get_flavour(), template_name)
if template_dirs:
# If template directories were specified, use a hash to differentiate
key = '-'.join([
template_name,
hashlib.sha1('|'.join(template_dirs)).hexdigest()])
if key not in self.template_cache:
template, origin = self.find_template(template_name, template_dirs)
if not hasattr(template, 'render'):
try:
template = get_template_from_string(template, origin, template_name)
except TemplateDoesNotExist:
# If compiling the template we found raises TemplateDoesNotExist,
# back off to returning the source and display name for
# the template we were asked to load. This allows for
# correct identification (later) of the actual template
# that does not exist.
return template, origin
self.template_cache[key] = template
return self.template_cache[key], None
| Python | 0 |
4ef2344b3abf3d8c0542ffd97425557ae092f21d | handle ZeroDivisionError | tensorflow/python/data/experimental/benchmarks/map_defun_benchmark.py | tensorflow/python/data/experimental/benchmarks/map_defun_benchmark.py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for MapDefunOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import map_defun
from tensorflow.python.data.benchmarks import benchmark_base
from tensorflow.python.eager import function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
class MapDefunBenchmark(benchmark_base.DatasetBenchmarkBase):
"""Benchmarks for MapDefunOp."""
def _run(self, op, name=None, num_iters=3000):
wall_time = self.run_op_benchmark(
op=op,
iters=num_iters,
warmup=True
)
zero_division_delta = 1e-100
wall_time = wall_time + zero_division_delta
self.report_benchmark(
name=name,
iters=num_iters,
wall_time=wall_time,
extras={"examples_per_sec": 1 / float(wall_time)})
def benchmark_defun_vs_map_fn(self):
"""Benchmarks to compare the performance of MapDefun vs tf.map_fn."""
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
def defun(x):
return array_ops.identity(x)
def fn(x):
return array_ops.identity(x)
base = math_ops.range(10000)
for input_size in [10, 100, 1000, 10000]:
num_iters = 10000 // input_size
map_defun_op = map_defun.map_defun(defun, [base], [dtypes.int32], [()])
map_fn_op = map_fn.map_fn(fn, base)
self._run(
op=map_defun_op,
name="with_defun_size_%d" % input_size,
num_iters=num_iters
)
self._run(
op=map_fn_op,
name="without_defun_size_%d" % input_size,
num_iters=num_iters
)
if __name__ == "__main__":
benchmark_base.test.main()
| # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for MapDefunOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import map_defun
from tensorflow.python.data.benchmarks import benchmark_base
from tensorflow.python.eager import function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
class MapDefunBenchmark(benchmark_base.DatasetBenchmarkBase):
"""Benchmarks for MapDefunOp."""
def _run(self, op, name=None, num_iters=3000):
wall_time = self.run_op_benchmark(
op=op,
iters=num_iters,
warmup=True
)
self.report_benchmark(
name=name,
iters=num_iters,
wall_time=wall_time,
extras={"examples_per_sec": float(1 / wall_time)})
def benchmark_defun_vs_map_fn(self):
"""Benchmarks to compare the performance of MapDefun vs tf.map_fn."""
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
def defun(x):
return array_ops.identity(x)
def fn(x):
return array_ops.identity(x)
base = math_ops.range(10000)
for input_size in [10, 100, 1000, 10000]:
num_iters = 10000 // input_size
map_defun_op = map_defun.map_defun(defun, [base], [dtypes.int32], [()])
map_fn_op = map_fn.map_fn(fn, base)
self._run(
op=map_defun_op,
name="with_defun_size_%d" % input_size,
num_iters=num_iters
)
self._run(
op=map_fn_op,
name="without_defun_size_%d" % input_size,
num_iters=num_iters
)
if __name__ == "__main__":
benchmark_base.test.main()
| Python | 0.000006 |
ef96c4e1a27289f5cdad5de78ee2a2dfc1b91bd0 | Create network-delay-time.py | Python/network-delay-time.py | Python/network-delay-time.py | # Time: O((|E| + |V|) * log|V|)
# Space: O(|E| + |V|)
# Dijkstra's algorithm
class Solution(object):
def networkDelayTime(self, times, N, K):
"""
:type times: List[List[int]]
:type N: int
:type K: int
:rtype: int
"""
min_heap = []
adj = [[] for _ in xrange(N)]
for u, v, w in times:
adj[u-1].append((v-1, w))
lookup, result = set(), 0
heapq.heappush(min_heap, (0, K-1))
while min_heap and len(lookup) != N:
result, u = heapq.heappop(min_heap)
lookup.add(u)
for v, w in adj[u]:
if v in lookup: continue
heapq.heappush(min_heap, (result+w, v))
return result if len(lookup) == N else -1
| Python | 0.000037 | |
842d7337f236d94d1b7ed70aaa98eff73b4000cd | Create pyside_houdini.py | pyside_houdini.py | pyside_houdini.py | """
This module helps you use PyQt in Houdini's GUI by integrating PyQt's event
loop into Houdini's. Replace calls to QApplication.exec_() in your
code with calls to pyqt_houdini.exec_(app).
"""
from email.mime import image
import hou
from PySide import QtCore
from PySide import QtGui
class IntegratedEventLoop(object):
"""This class behaves like QEventLoop except it allows PyQt to run inside
Houdini's event loop on the main thread. You probably just want to
call exec_() below instead of using this class directly.
"""
def __init__(self, application, dialogs):
# We need the application to send posted events. We hold a reference
# to any dialogs to ensure that they don't get garbage collected
# (and thus close in the process). The reference count for this object
# will go to zero when it removes itself from Houdini's event loop.
self.application = application
self.dialogs = dialogs
self.event_loop = QtCore.QEventLoop()
def exec_(self):
hou.ui.addEventLoopCallback(self.processEvents)
def processEvents(self):
# There is no easy way to know when the event loop is done. We can't
# use QEventLoop.isRunning() because it always returns False since
# we're not inside QEventLoop.exec_(). We can't rely on a
# lastWindowClosed signal because the window is usually made invisible
# instead of closed. Instead, we need to explicitly check if any top
# level widgets are still visible.
if not anyQtWindowsAreOpen():
hou.ui.removeEventLoopCallback(self.processEvents)
self.event_loop.processEvents()
self.application.sendPostedEvents(None, 0)
def anyQtWindowsAreOpen():
return any(w.isVisible() for w in QtGui.QApplication.topLevelWidgets())
def exec_(application, *args):
"""You cannot call QApplication.exec_, or Houdini will freeze while PyQt
waits for and processes events. Instead, call this function to allow
Houdini's and PyQt's event loops to coexist. Pass in any dialogs as
extra arguments, if you want to ensure that something holds a reference
to them while the event loop runs.
This function returns right away.
"""
IntegratedEventLoop(application, args).exec_()
def execSynchronously(application, *args):
"""This function is like exec_, except it will not return until all PyQt
windows have closed. Houdini will remain responsive while the PyQt window
is open.
"""
exec_(application, *args)
hou.ui.waitUntil(lambda: not anyQtWindowsAreOpen())
| Python | 0 | |
90ef0ed82a4d22f277ccc0c3275f0a07189fadc0 | Make title pictures. | title_pics.py | title_pics.py | # -*- coding: utf-8 -*-
#
# title_pics.py
#
# purpose: Create map and time-series for title
# author: Filipe P. A. Fernandes
# e-mail: ocefpaf@gmail
# web: http://ocefpaf.github.io/
# created: 20-Jan-2015
# modified: Tue 20 Jan 2015 11:18:15 AM BRT
#
# obs:
#
import matplotlib
import numpy as np
import cartopy.crs as ccrs
import matplotlib.pyplot as plt
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
matplotlib.style.use('ggplot')
def make_map(projection=ccrs.PlateCarree(),
extent=[-43.5, -32.5, -24.5, -14.5]):
subplot_kw = dict(projection=projection)
fig, ax = plt.subplots(figsize=(3.25, 3.25), subplot_kw=subplot_kw)
ax.set_extent(extent)
#gl = ax.gridlines(draw_labels=True)
#gl.xlabels_top = gl.ylabels_right = False
#gl.xformatter = LONGITUDE_FORMATTER
#gl.yformatter = LATITUDE_FORMATTER
return fig, ax
def fake_tide(t, M2amp, M2phase, S2amp, S2phase, randamp):
"""
Generate a minimally realistic-looking fake semidiurnal tide.
t is time in hours
phases are in radians
"""
out = M2amp * np.sin(2 * np.pi * t / 12.42 - M2phase)
out += S2amp * np.sin(2 * np.pi * t / 12.0 - S2phase)
out += randamp * np.random.randn(len(t))
return out
if __name__ == '__main__':
# Map.
layer = 'BlueMarble_ShadedRelief_Bathymetry'
url = 'http://map1c.vis.earthdata.nasa.gov/wmts-geo/wmts.cgi'
fig, ax = make_map()
ax.add_wmts(url, layer)
ax.axis('off')
fig.savefig('map.png', format='png', dpi=72, orientation='portrait',
transparent=True)
# Time-series.
t = np.arange(500)
u = fake_tide(t, 2.2, 0.3, 1, .3, 0.4)
v = fake_tide(t, 1.1, 0.3 + np.pi / 2, 0.6, 0.3 + np.pi / 2, 0.4)
fig, ax = plt.subplots(figsize=(3.25, 3.25))
legendkw = dict(loc='lower right', fancybox=True, fontsize='small')
kw = dict(alpha=0.5, linewidth=2.5)
ax.plot(t, u, label='U', color='cornflowerblue', **kw)
ax.plot(t, v, label='V', color='lightsalmon', **kw)
ax.axis([200, 500, -8, 8])
# Keep the y tick labels from getting too crowded.
ax.locator_params(axis='y', nbins=5)
ax.axis('off')
fig.savefig('timeSeries.png', format='png', dpi=72, orientation='portrait',
transparent=True)
| Python | 0.000006 | |
cf95ab6ee1bf53ff1a998824dc3718c1ae19336e | Create train_dots.py | train_dots.py | train_dots.py | #!/bin/python
import pylab as pl
import cPickle
import matplotlib.pyplot as plt
from sklearn import svm, metrics
import numpy as np
import sys
square = 13
imgloc = '../images/v012-penn.10-1hA5D1-cropb.png'
resd={'dot':0,'noise':1,'vein':2}
currimg=plt.imread(imgloc)
pkl_file=open('dots.pkl', 'r')
dots = cPickle.load(pkl_file)
pkl_file.close()
pkl_file=open('noise.pkl', 'r')
noise = cPickle.load(pkl_file)
pkl_file.close()
pkl_file=open('veins.pkl','r')
veins = cPickle.load(pkl_file)
pkl_file.close()
#dots = zip(dots, [0 for i in range(len(dots))])
#noise = zip(noise, [1 for i in range(len(noise))])
#veins = zip(veins, [2 for i in range(len(veins))])
print np.shape(np.asarray(dots))
print np.shape(np.asarray(noise))
print np.shape(np.asarray(veins))
dots_data = np.asarray(dots).reshape((len(dots),-1))
noise_data= np.asarray(noise).reshape((len(noise),-1))
veins_data= np.asarray(veins).reshape((len(veins),-1))
data = np.concatenate((np.concatenate((dots_data,noise_data)),veins_data))
print len(data)
target = [resd['dot'] for i in range(len(dots_data))] + [resd['noise'] for i in range(len(noise_data))] + [resd['vein'] for i in range(len(veins_data))]
print len(target)
classifier = svm.SVC(gamma=0.001)
classifier.fit(data, target)
tmpx, tmpy = len(currimg[0][:]), len(currimg[:][0])
final_image=np.ones((tmpy,tmpx))
blocks=[]
print 'Going through the blocks...'
sys.stdout.flush()
for i in [i+square/2 for i in xrange(tmpy-square)]:
for j in [j+square/2 for j in xrange(tmpx-square)]:
currblock=currimg[i-square/2:i+square/2+1,j-square/2:j+square/2+1]
blocks.append(currblock)
blocks=np.asarray(blocks)
print np.shape(blocks)
blocks = np.asarray(blocks).reshape(len(blocks),-1)
print np.shape(blocks)
print 'About to make predictions...'
sys.stdout.flush()
predicted = classifier.predict(blocks)
voting = np.zeros((tmpy, tmpx, 3))
print 'About to count votes...'
sys.stdout.flush()
for p in xrange(len(predicted)):
j=p%(tmpx-square)+square/2
i=(p-j+square/2)/(tmpx-square)+square/2
#[i,j] are the coordinates of the center of that box
#since p=(i-s/2)(X-s)+j-s/2
for y in range(i-square/2,i+square/2):
for x in range(j-square/2,j+square/2):
voting[y,x][predicted[p]]+=1
for i in xrange(tmpy):
for j in xrange(tmpx):
if voting[i,j].argmax()==resd['vein']:
final_image[i,j]=0
plt.imshow(final_image, cmap=plt.cm.gray)
plt.show()
#for i in [i+square/2 for i in xrange(tmpx-square)]:
# for j in [j+square/2 for j in xrange(tmpy-square)]:
# for k in range(i-square/2,i+square/2+1):
# for
| Python | 0 | |
84153b0be78998ab8ec6914df8623c99255457b5 | Improve code for creating temporary locustfiles that can be used in tests | locust/test/mock_locustfile.py | locust/test/mock_locustfile.py | import os
import random
import time
from contextlib import contextmanager
MOCK_LOUCSTFILE_CONTENT = '''
"""This is a mock locust file for unit testing"""
from locust import HttpLocust, TaskSet, task, between
def index(l):
l.client.get("/")
def stats(l):
l.client.get("/stats/requests")
class UserTasks(TaskSet):
# one can specify tasks like this
tasks = [index, stats]
class LocustSubclass(HttpLocust):
host = "http://127.0.0.1:8089"
wait_time = between(2, 5)
task_set = UserTasks
class NotLocustSubclass():
host = "http://localhost:8000"
'''
class MockedLocustfile:
__slots__ = ["filename", "directory", "file_path"]
@contextmanager
def mock_locustfile(filename_prefix="mock_locustfile", content=MOCK_LOUCSTFILE_CONTENT):
mocked = MockedLocustfile()
mocked.directory = os.path.dirname(os.path.abspath(__file__))
mocked.filename = "%s_%s_%i.py" % (
filename_prefix,
str(time.time()).replace(".", "_"),
random.randint(0,100000),
)
mocked.file_path = os.path.join(mocked.directory, mocked.filename)
with open(mocked.file_path, 'w') as file:
file.write(content)
yield mocked
os.remove(mocked.file_path)
| Python | 0 | |
fea7f350ce711d183fd9011c43ca68fff88400eb | Add cython compile util | utils/cython_compile_libs.py | utils/cython_compile_libs.py | #!/bin/env python
from __future__ import division, absolute_import, with_statement, print_function, unicode_literals
import os
import sys
import shutil
from pyximport.pyxbuild import pyx_to_dll
WD = os.path.dirname(os.path.dirname((os.path.abspath(__file__))))
LIBS = os.path.join(WD, 'libs')
# Adds the libs directory to the path
sys.path.append(LIBS)
import common
def main():
filelist = common.parseFileList([LIBS], recursive=True)
try:
import pefile
filepath = pefile.__file__[:-1]
filelist.append(filepath)
except:
print('pefile not installed...')
for filename in filelist:
if filename.endswith('.py'):
filename = str(filename)
try:
pyx_to_dll(filename, inplace=True)
print(filename, 'successful!')
except Exception as e:
print('ERROR:', filename, 'failed')
try:
os.remove(filename[:-2] + 'c')
except:
pass
# Cleanup build dirs
walk = os.walk(LIBS)
for path in walk:
path = path[0]
if os.path.basename(path) == '_pyxbld' and os.path.isdir(path):
shutil.rmtree(path)
if __name__ == '__main__':
main()
| Python | 0.000001 | |
d31f63a914877fe12d66497bdbc7dd6d871672fc | add solution for Best Time to Buy and Sell Stock | src/bestTimeToBuyAndSellStock.py | src/bestTimeToBuyAndSellStock.py | class Solution:
# @param prices, a list of integer
# @return an integer
def maxProfit(self, prices):
n = len(prices)
if n < 2:
return 0
min_price = prices[0]
res = 0
for i in xrange(1, n):
res = max(res, prices[i]-min_price)
min_price = min(min_price, prices[i])
return res
| Python | 0 | |
595c8fad76696240f96e61d9a2299de3d6cda16a | Add utility for walking etree and yielding nodes if options class type match. | skcode/utility/walketree.py | skcode/utility/walketree.py | """
SkCode utility for walking across a document tree.
"""
def walk_tree_for_cls(tree_node, opts_cls):
"""
Walk the tree and yield any tree node matching the given options class.
:param tree_node: The current tree node instance.
:param opts_cls: The options class to search for.
"""
# Check the current tree node first
if isinstance(tree_node.opts, opts_cls):
yield tree_node
# Check all children nodes
for child in tree_node.children:
for node in walk_tree_for_cls(child, opts_cls):
yield node
| Python | 0 | |
4e50597100b5e84b1ed3c304a3a7323e7bab7918 | Create removeSequence.py | removeSequence.py | removeSequence.py | #!/usr/bin/python
###############################################################################
#
# removeSequence.py version 1.0
#
# Removes a specified nucleotide sequence from the beginning of a larger sequence
#
# Useful for preparing FASTA files for certain processing pipelines that do not
# allow for distal barcodes or primers
#
# Copyright (C) 2014 Evan Denmark
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
###############################################################################
import argparse
parser = argparse.ArgumentParser(description = ' ')
parser.add_argument('fasta',help= 'fasta file with adaptor sequences')
parser.add_argument('adaptor_sequence', help= 'string of nucleotides')
fasta = parser.parse_args().fasta
adaptor_sequence = parser.parse_args().adaptor_sequence
def remove_fusion_adaptors(fasta, adaptor_sequence):
"""
Removes the fusion adaptor at the beginning of each sequence of a FASTA file
"""
fasta = str(fasta)
old_file = open(fasta, 'r')
new_file = open('new_'+fasta+'.fasta', 'w')
length_adaptor = len(adaptor_sequence)
for each_line in old_file:
each_line = str(each_line)
if each_line[0] == '>':
#name line
new_file.write(each_line)
else:
#sequence line
if each_line[:(length_adaptor)] == adaptor_sequence:
current_line = each_line[:].rstrip('\n').lstrip()
current_line = str(current_line)
adaptor_sequence=adaptor_sequence[:length_adaptor]
new_line = current_line[length_adaptor:]
new_file.write(new_line+'\n')
old_file.close()
new_file.close()
remove_fusion_adaptors(fasta, adaptor_sequence)
| Python | 0.000001 | |
b7d15547bd88c6304c5d8ceb1f74481cb4d162e7 | Add parser hacking example | repeat_n_times.py | repeat_n_times.py | # -*- encoding: utf-8 -*-
from jinja2 import Environment
from jinja2.ext import Extension
from jinja2 import nodes
class RepeatNTimesExtension(Extension):
tags = {"repeat"}
def parse(self, parser):
lineno = next(parser.stream).lineno
index = nodes.Name("_", "store", lineno=lineno)
how_many_times = parser.parse_expression()
iterable = nodes.Call(nodes.Name("range", "load"), [how_many_times], [], None, None)
parser.stream.expect("name:times")
body = parser.parse_statements(["name:endrepeat"], drop_needle=True)
return nodes.For(index, iterable, body, [], None, False, lineno=lineno)
if __name__ == "__main__":
env = Environment()
env.add_extension(RepeatNTimesExtension)
template = env.from_string(u"""
{%- repeat 3 times -%}
{% if not loop.first and not loop.last %}, {% endif -%}
{% if loop.last %} и ещё раз {% endif -%}
учиться
{%- endrepeat -%}
""")
print(template.render())
| Python | 0.000002 | |
e3365aa8d9f5e49d3aff732d169c22a46ef22904 | Create viriback_tracker.py (#452) | plugins/feeds/public/viriback_tracker.py | plugins/feeds/public/viriback_tracker.py | import logging
from dateutil import parser
from datetime import timedelta, datetime
from core import Feed
from core.errors import ObservableValidationError
from core.observables import Url, Ip
class ViriBackTracker(Feed):
default_values = {
"frequency": timedelta(hours=24),
"name": "ViriBackTracker",
"source": "http://tracker.viriback.com/dump.php",
"description":
"Malware C2 Urls and IPs",
}
def update(self):
for line in self.update_csv(delimiter=',', quotechar='"'):
if not line or line[0].startswith(("Family", "#")):
continue
family, url, ip, first_seen = line
first_seen = parser.parse(first_seen)
if self.last_run is not None:
if self.last_run > first_seen:
continue
self.analyze(family, url, ip, first_seen)
def analyze(self, family, url, ip, first_seen):
url_obs = False
ip_obs = False
family = family.lower()
context = {
'first_seen': first_seen,
'source': self.name
}
if url:
try:
url_obs = Url.get_or_create(value=url)
url_obs.add_context(context)
url_obs.add_source(self.name)
url_obs.tag(["c2", family])
except ObservableValidationError as e:
logging.error(e)
if ip:
try:
ip_obs = Ip.get_or_create(value=ip)
ip_obs.add_context(context)
ip_obs.tag(family.lower())
except ObservableValidationError as e:
logging.error(e)
if url_obs and ip_obs:
url_obs.active_link_to(ip_obs, 'ip', self.name)
| Python | 0 | |
5a5c30e701220cc874d08a442af0e81d2020aacf | bump dev version | symposion/__init__.py | symposion/__init__.py | __version__ = "1.0b1.dev43"
| __version__ = "1.0b1.dev42"
| Python | 0 |
85336dfed46145c36307f218612db7c4d8dbf637 | bump version | symposion/__init__.py | symposion/__init__.py | __version__ = "1.0b1.dev18"
| __version__ = "1.0b1.dev17"
| Python | 0 |
c642a32b1aff0c9adc8e62aad8ceb7e0396512ed | bump version | symposion/__init__.py | symposion/__init__.py | __version__ = "1.0b1.dev14"
| __version__ = "1.0b1.dev13"
| Python | 0 |
c36a954dbdfcca6e520dca6b96c1c97f496880ca | Add test for forcefield_labeler | smarty/tests/test_forcefield_labeler.py | smarty/tests/test_forcefield_labeler.py | from functools import partial
import smarty
import openeye
from openeye.oechem import *
import os
from smarty.utils import get_data_filename
import numpy as np
from smarty.forcefield_labeler import *
def test_read_ffxml():
"""Test reading of ffxml files.
"""
labeler = ForceField_labeler(get_data_filename('forcefield/Frosst_AlkEtOH.ffxml'))
def test_molecule_labeling(verbose = False):
"""Test using ForceField_labeler to provide force terms applied to an oemol."""
mol = OEMol()
OEParseSmiles(mol, 'CCC')
OEAddExplicitHydrogens(mol)
labeler = ForceField_labeler(get_data_filename('forcefield/Frosst_AlkEtOH.ffxml'))
labels = labeler.labelMolecules( [mol], verbose = verbose)
# Check that force terms aren't empty
if not 'HarmonicBondForce' in labels[0].keys():
raise Exception("No force term assigned for harmonic bonds.")
if not 'HarmonicAngleForce' in labels[0].keys():
raise Exception("No force term assigned for harmonic angles.")
if not 'PeriodicTorsionForce' in labels[0].keys():
raise Exception("No force term assigned for periodic torsions.")
if not 'NonbondedForce' in labels[0].keys():
raise Exception("No nonbonded force term assigned.")
| Python | 0 | |
7d1fde66e0fd6b3b8cc9876e0d3271d6776b347f | convert tiffs to video added | image_to_video.py | image_to_video.py | # -*- coding: utf-8 -*-
"""
Created on Tue May 15 16:11:55 2018
@author: LaVision
"""
#!/usr/local/bin/python3
import cv2
import argparse
import os
# Construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-ext", "--extension", required=False, default='png', help="extension name. default is 'tif'.")
ap.add_argument("-o", "--output", required=False, default='output.mp4', help="output video file")
args = vars(ap.parse_args())
# Arguments
dir_path = '.'
ext = args['extension']
output = args['output']
images = []
for f in os.listdir(dir_path):
if f.endswith(ext):
images.append(f)
# Determine the width and height from the first image
image_path = os.path.join(dir_path, images[0])
frame = cv2.imread(image_path)
cv2.imshow('video',frame)
height, width, channels = frame.shape
# Define the codec and create VideoWriter object
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # Be sure to use lower case
out = cv2.VideoWriter(output, fourcc, 20.0, (width, height))
for image in images:
image_path = os.path.join(dir_path, image)
print("processing", image_path)
frame = cv2.imread(image_path)
out.write(frame) # Write out frame to video
cv2.imshow('video',frame)
if (cv2.waitKey(1) & 0xFF) == ord('q'): # Hit `q` to exit
break
# Release everything if job is finished
out.release()
cv2.destroyAllWindows()
print("The output video is {}".format(output)) | Python | 0.001017 | |
f67514bf9ed193c0a8ac68c2258913bb54df8a88 | Create save_py_source.py | save_py_source.py | save_py_source.py | import datetime, os, zipfile
exts = '.py pyui'.split()
zip_file_name = 'aa_source_code_%Y_%m_%d_%H_%M_%S.zip'
zip_file_name = datetime.datetime.strftime(datetime.datetime.now(), zip_file_name)
def get_filenames(in_dir=None):
def visit(_, dirname, names):
for name in names:
filename = os.path.join(dirname, name)
if os.path.isfile(filename):
filenames.append(filename)
in_dir = in_dir or os.curdir
filenames = []
os.path.walk(in_dir or os.curdir, visit, None)
return filenames
filenames = get_filenames()
if exts:
filenames = [fn for fn in filenames if os.path.splitext(fn)[1] in exts]
file_count = len(filenames)
print('{} files found.'.format(file_count))
if filenames:
with zipfile.ZipFile(zip_file_name, 'w') as zip_file:
for i, filename in enumerate(filenames):
zip_file.write(filename)
if not i % 50:
print('{} of {}: {}'.format(i, file_count, filename))
print('{}\n{} files copied into zip file: "{}".'.format('=' * 13, file_count, zip_file_name))
| Python | 0 | |
2519e7c8289a6045208013b0958fc4c9f49ff39a | lexographic permutations: python | lexographic_permutations/python/lexographic_permutations.py | lexographic_permutations/python/lexographic_permutations.py | import itertools
def permute(input):
if len(input) == 2:
return [input, input[::-1]]
permutations = []
for i in range(0,len(input)):
permutations.append(map(lambda x: input[i]+x, permute(input[:i]+input[i+1:])))
return sum(permutations, [])
def prepend(str1, str2):
return str1+str2
print permute("0123456789")[999999]
| Python | 0.999828 | |
c7c02febb43eb2466484f5c99d6dcc2d60e67e09 | add docker.py | zblogsite/settings/docker.py | zblogsite/settings/docker.py | from .base import *
DEBUG = True
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'zblog', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'root',
'PASSWORD': '1234',
#'HOST': '127.0.0.1', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'HOST': 'db', # docker mysql.
'PORT': '3306', # Set to empty string for default.
'OPTIONS': {
'init_command': "Set sql_mode='STRICT_TRANS_TABLES'"
}
}
}
| Python | 0.000004 | |
f60f31c73deef7768af5eb45046a8848f2dc40c4 | Create draw_neural_net.py | draw/draw_neural_net.py | draw/draw_neural_net.py | import matplotlib.pyplot as plt
def draw_neural_net(ax, left, right, bottom, top, layer_sizes):
'''
Draw a neural network cartoon using matplotilb.
:usage:
>>> fig = plt.figure(figsize=(12, 12))
>>> draw_neural_net(fig.gca(), .1, .9, .1, .9, [4, 7, 2])
:parameters:
- ax : matplotlib.axes.AxesSubplot
The axes on which to plot the cartoon (get e.g. by plt.gca())
- left : float
The center of the leftmost node(s) will be placed here
- right : float
The center of the rightmost node(s) will be placed here
- bottom : float
The center of the bottommost node(s) will be placed here
- top : float
The center of the topmost node(s) will be placed here
- layer_sizes : list of int
List of layer sizes, including input and output dimensionality
'''
n_layers = len(layer_sizes)
v_spacing = (top - bottom)/float(max(layer_sizes))
h_spacing = (right - left)/float(len(layer_sizes) - 1)
# Nodes
for n, layer_size in enumerate(layer_sizes):
layer_top = v_spacing*(layer_size - 1)/2. + (top + bottom)/2.
for m in xrange(layer_size):
circle = plt.Circle((n*h_spacing + left, layer_top - m*v_spacing), v_spacing/4.,
color='w', ec='k', zorder=4)
ax.add_artist(circle)
# Edges
for n, (layer_size_a, layer_size_b) in enumerate(zip(layer_sizes[:-1], layer_sizes[1:])):
layer_top_a = v_spacing*(layer_size_a - 1)/2. + (top + bottom)/2.
layer_top_b = v_spacing*(layer_size_b - 1)/2. + (top + bottom)/2.
for m in xrange(layer_size_a):
for o in xrange(layer_size_b):
line = plt.Line2D([n*h_spacing + left, (n + 1)*h_spacing + left],
[layer_top_a - m*v_spacing, layer_top_b - o*v_spacing], c='k')
ax.add_artist(line)
| Python | 0.000023 | |
041b55f3a9ded360146f6e2dda74a6b20b3e6f7e | Add scrape_results | scrape_results.py | scrape_results.py | from selenium import webdriver
from time import sleep
from bs4 import BeautifulSoup
driver = webdriver.Firefox()
driver.get("http://www.nitt.edu/prm/ShowResult.htm")
driver.get("javascript:(function(){document.getElementsByName('main')[0].contentWindow.document.getElementById('TextBox1').value=110113006;}());")
driver.get("javascript:(function(){document.getElementsByName('main')[0].contentWindow.document.getElementById('Button1').click();}());")
sleep(1)
driver.get("javascript:(function(){document.getElementsByName('main')[0].contentWindow.document.getElementById('Dt1').selectedIndex = 1;document.getElementsByName('main')[0].contentWindow.document.getElementById('Dt1').onchange();}());")
print "I have got the page to a specific student's result!!"
| Python | 0 | |
5745bf81a32915cb85a60093dc2a7123e5814767 | Add personal problem 001 | problems/001_parse_navs_from_markdown.py | problems/001_parse_navs_from_markdown.py | """从 Markdown 文本中解析出目录信息
现在有格式如下的文档:
```markdown
* [目录 1](chapter1/1.md)
* [目录 1.1](chapter1/1-1.md)
* [目录 1.1.1](chapter1/1-1-1.md)
* [目录 2](chapter2/1.md)
```
要求写一个解析器,解析返回所有目录信息,并包含层级关系。返回的示例数据如下。
```python
[
{
'name': '目录 1',
'path': 'chaper1/1.md',
'chapters': [
{
'name': '目录 1.1',
'path': 'chapter1/1-1.md',
'chapters': [
{'name': '目录 1.1.1', 'path': 'chapter1/1-1-1.md'}
]
}
]},
{'name': '目录 2', 'path': 'chapter2/1.md'}
]
```
"""
import re
import pprint
class Solution:
def parse_navs(self, content):
stack = [({'name': 'dummy', 'path': 'dummy', 'children': []}, -1)]
for line in content.splitlines():
# 有效行判断
meta = re.match(r'.*\[(.+)\]\((.+)\)', line)
if meta is None:
continue
# 缩进匹配
result = re.match(r'^\s+', line)
if result is None:
indent = 0
else:
indent = result.span()[1]
name, path = meta.groups()
item = {'name': name, 'path': path, 'children': []}
pre_indent = stack[-1][1]
if indent == pre_indent:
stack.pop()
parent = stack[-1][0]
elif indent > pre_indent:
parent = stack[-1][0]
else:
while indent != stack.pop()[1]:
pass
parent = stack[-1][0]
stack.append((item, indent))
parent['children'].append(item)
return stack[0][0]['children']
if __name__ == '__main__':
summary = """
* [a](content/preface/preface-chinese.md)
* [b](content/chapter1/1.1-chinese.md)
* [c](content/chapter1/1.1-chinese.md)
* [d](content/chapter2/2.1-chinese.md)
"""
solutions = [Solution]
for s in solutions:
result = s().parse_navs(summary)
pprint.pprint(result, indent=4)
assert result[0]['children'][1]['children'][0]['name'] == 'd'
| Python | 0.999571 | |
dc52b5914c4d0024458eefeb3b3576aa58692345 | Remove print | organizations/decorators.py | organizations/decorators.py | # encoding: utf-8
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from seaserv import get_user_current_org
def org_staff_required(func):
"""
Decorator for views that checks the user is org staff.
"""
def _decorated(request, *args, **kwargs):
user = request.user.username
url_prefix = kwargs.get('url_prefix', '')
org = get_user_current_org(user, url_prefix)
if org and org.is_staff:
return func(request, *args, **kwargs)
return HttpResponseRedirect(reverse('myhome'))
return _decorated
| # encoding: utf-8
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from seaserv import get_user_current_org
def org_staff_required(func):
"""
Decorator for views that checks the user is org staff.
"""
def _decorated(request, *args, **kwargs):
user = request.user.username
url_prefix = kwargs.get('url_prefix', '')
org = get_user_current_org(user, url_prefix)
print url_prefix
print org._dict
if org and org.is_staff:
return func(request, *args, **kwargs)
return HttpResponseRedirect(reverse('myhome'))
return _decorated
| Python | 0.000016 |
55bf42057bcd9e14d964b2064f9322c164ba91ff | Test request construction (#91) | test/test_requests.py | test/test_requests.py | import unittest
import requests
import requests_mock
import tableauserverclient as TSC
class RequestTests(unittest.TestCase):
def setUp(self):
self.server = TSC.Server('http://test')
# Fake sign in
self.server._site_id = 'dad65087-b08b-4603-af4e-2887b8aafc67'
self.server._auth_token = 'j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM'
self.baseurl = self.server.workbooks.baseurl
def test_make_get_request(self):
with requests_mock.mock() as m:
m.get(requests_mock.ANY)
url = "http://test/api/2.3/sites/dad65087-b08b-4603-af4e-2887b8aafc67/workbooks"
opts = TSC.RequestOptions(pagesize=13, pagenumber=13)
resp = self.server.workbooks._make_request(requests.get,
url,
content=None,
request_object=opts,
auth_token='j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM',
content_type='text/xml')
self.assertEquals(resp.request.query, 'pagenumber=13&pagesize=13')
self.assertEquals(resp.request.headers['x-tableau-auth'], 'j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM')
self.assertEquals(resp.request.headers['content-type'], 'text/xml')
def test_make_post_request(self):
with requests_mock.mock() as m:
m.post(requests_mock.ANY)
url = "http://test/api/2.3/sites/dad65087-b08b-4603-af4e-2887b8aafc67/workbooks"
resp = self.server.workbooks._make_request(requests.post,
url,
content=b'1337',
request_object=None,
auth_token='j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM',
content_type='multipart/mixed')
self.assertEquals(resp.request.headers['x-tableau-auth'], 'j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM')
self.assertEquals(resp.request.headers['content-type'], 'multipart/mixed')
self.assertEquals(resp.request.body, b'1337')
| Python | 0 | |
69c3e33df15dca13cf310062216525dfbe98639e | add spectandus for index analysis | spectandus.py | spectandus.py | #!/usr/bin/env python
# Author: Eben Olson <eben.olson@gmail.com>
# Licensed under the MIT license <http://opensource.org/licenses/MIT>
import logging
from colorlog import ColoredFormatter
import plac
import sys
import json
from fs import zipfs
from collections import defaultdict
formatter = ColoredFormatter(
"%(log_color)s%(levelname)-8s%(reset)s %(white)s%(message)s",
datefmt=None,
reset=True,
log_colors={
'DEBUG': 'cyan',
'INFO': 'green',
'WARNING': 'yellow',
'ERROR': 'red',
'CRITICAL': 'red',
}
)
stream = logging.StreamHandler()
stream.setFormatter(formatter)
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.addHandler(stream)
def convert_index(reference, outfile):
logger.info(u'Converting ZipFS directory index {} to hashmap'.format(reference))
hashes = defaultdict(list)
count = 0
encoding = sys.getfilesystemencoding()
with zipfs.ZipFS(reference, 'r', encoding=encoding) as hashfs:
for logfilename in hashfs.walkfiles('/'):
filehash = hashfs.open(logfilename).read()
hashes[filehash].append(logfilename)
count += 1
logger.info(u'{} files with {} unique hashes found in index'.format(count, len(hashes)))
logger.info(u'Writing results to {}'.format(outfile))
json.dump(hashes, open(outfile, 'w'), indent=4)
def list_duplicates(reference, outfile):
logger.info(u'Searching for duplicated files in {}'.format(reference))
hashes = json.load(open(reference))
duplicates = {}
for filehash, files in hashes.items():
if len(files) > 1:
duplicates[filehash] = files
logger.info(u'{} hashes with multiple files found out of {} in reference'.format(len(duplicates), len(hashes)))
logger.info(u'Writing results to {}'.format(outfile))
json.dump(duplicates, open(outfile, 'w'), indent=4)
def list_unmatched(reference, target, outfile):
logger.info(u'Searching for files in {} not in reference {}'.format(target, reference))
reference = json.load(open(reference))
target = json.load(open(target))
unmatched = {}
logger.info(u'Reference has {} hashes, target has {}'.format(len(reference), len(target)))
for filehash, files in target.items():
if filehash not in reference:
unmatched[filehash] = files
if len(unmatched):
logger.warn(u'{} hashes were not matched in reference'.format(len(unmatched)))
else:
logger.info(u'All hashes in target were found in reference')
logger.info(u'Writing results to {}'.format(outfile))
json.dump(unmatched, open(outfile, 'w'), indent=4)
@plac.annotations(
convert=('List all files in reference (zipfile) as json hashmap', 'flag', 'c'),
dupcheck=('Show all hashes in reference (json) with multiple files', 'flag', 'm'),
newcheck=('Show all hashes in target (json) not in reference (json)', 'option', 'n'),
debug=('Show all log output', 'flag', 'd'),
reference='Reference index',
)
def main(convert, dupcheck, newcheck, debug, reference, outfile='result.json'):
if debug:
logger.setLevel(logging.DEBUG)
if convert:
convert_index(reference, outfile)
if dupcheck:
list_duplicates(reference, outfile)
if newcheck:
list_unmatched(reference, newcheck, outfile)
if __name__ == '__main__':
plac.call(main)
| Python | 0 | |
35f98c14a74e207c616fcb57538bb176842c0d1e | Add procfile and wsgi entrypoint | nhs/wsgi.py | nhs/wsgi.py | """
WSGI config for Nhs Prescriptions project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "nhs.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| Python | 0 | |
ebd41d7f264de92be19347042749ef48d5820b7d | add inner product demo | study/language_core_and_lib/function/functional_example.py | study/language_core_and_lib/function/functional_example.py | def demo_inner_product():
vec0 = range(10)
vec1 = [i ** 2 for i in range(10)]
print 'inner product:', reduce(lambda l, r: l + r, map(lambda ele: ele[0] * ele[1], zip(vec0, vec1)), 0)
print 'verify:', sum([i ** 3 for i in range(10)])
if __name__ == '__main__':
demo_inner_product()
| Python | 0 | |
1193f4b234ec4c7d3f390dbaa39883b4d4d41802 | Create multi_threaded_head_request.py | multi_threaded_head_request.py | multi_threaded_head_request.py | #!/usr/bin/env python
'''
Author: Christopher Duffy
Date: April 2015
Name: multi_threaded.py
Purpose: To identify live web applications with a list of IP addresses, using concurrent processes
'''
import urllib2, argparse, sys, math, threading, logging, Queue, time
queue = Queue.Queue()
lock = threading.Lock()
class Agent(threading.Thread):
def __init__(self, queue, logger, verbose):
threading.Thread.__init__(self)
self.queue = queue
self.logger = logger
self.verbose = verbose
def run(self):
while True:
host = self.queue.get()
print("[*] Testing %s") % (str(host))
target = "http://" + host
target_secure = "https://" + host
try:
request = urllib2.Request(target)
request.get_method = lambda : 'HEAD'
response = urllib2.urlopen(request)
except:
with lock:
self.logger.debug("[-] No web server at %s reported by thread %s" % (str(target), str(threading.current_thread().name)))
print("[-] No web server at %s reported by thread %s") % (str(target), str(threading.current_thread().name))
response = None
if response != None:
with lock:
self.logger.debug("[+] Response from %s reported by thread %s" % (str(target), str(threading.current_thread().name)))
print("[*] Response from insecure service on %s reported by thread %s") % (str(target), str(threading.current_thread().name))
self.logger.debug(response.info())
try:
response_secure = urllib2.urlopen(request_secure)
request_secure.get_method = lambda : 'HEAD'
response_secure = urllib2.urlopen(request_secure)
except:
with lock:
self.logger.debug("[-] No secure web server at %s reported by thread %s" % (str(target_secure), str(threading.current_thread().name)))
print("[-] No secure web server at %s reported by thread %s") % (str(target_secure), str(threading.current_thread().name))
response_secure = None
if response_secure != None:
with lock:
self.logger.debug("[+] Secure web server at %s reported by thread %s" % (str(target_secure), str(threading.current_thread().name)))
print("[*] Response from secure service on %s reported by thread %s") % (str(target_secure), str(threading.current_thread().name))
self.logger.debug(response_secure.info())
# Execution is complete
self.queue.task_done()
def main():
# If script is executed at the CLI
usage = '''usage: %(prog)s [-t hostfile] [-f logfile.log] [-m 2] -q -v -vv -vvv'''
parser = argparse.ArgumentParser(usage=usage)
parser.add_argument("-t", action="store", dest="targets", default=None, help="Filename for hosts to test")
parser.add_argument("-f", "--filename", type=str, action="store", dest="filename", default="xml_output", help="The filename that will be used to create an XLSX")
parser.add_argument("-m", "--multi", action="store", dest="threads", default=1, type=int, help="Number of threads, defaults to 1")
parser.add_argument("-l", "--logfile", action="store", dest="log", default="results.log", type=str, help="The log file to output the results")
parser.add_argument("-v", action="count", dest="verbose", default=1, help="Verbosity level, defaults to one, this outputs each command and result")
parser.add_argument("-q", action="store_const", dest="verbose", const=0, help="Sets the results to be quiet")
parser.add_argument('--version', action='version', version='%(prog)s 0.42b')
args = parser.parse_args()
# Argument Validator
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
if (args.targets == None):
parser.print_help()
sys.exit(1)
# Set Constructors
targets = args.targets # Targets to be parsed
verbose = args.verbose # Verbosity level
threads = args.threads # Threads to be used
log = args.log # Configure the log output file
if ".log" not in log:
log = log + ".log"
level = logging.DEBUG # Logging level
format = logging.Formatter("%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s") # Log format
logger_obj = logging.getLogger() # Getter for logging agent
file_handler = logging.FileHandler(args.log) # File Handler
#stderr_handler = logging.StreamHandler() # STDERR Handler
targets_list = []
# Configure logger formats for STDERR and output file
file_handler.setFormatter(format)
#stderr_handler.setFormatter(format)
# Configure logger object
logger_obj.addHandler(file_handler)
#logger_obj.addHandler(stderr_handler)
logger_obj.setLevel(level)
# Load the targets into a list and remove trailing "\n"
with open(targets) as f:
targets_list = [line.rstrip() for line in f.readlines()]
# Spawn workers to access site
for thread in range(0, threads):
worker = Agent(queue, logger_obj, verbose)
worker.setDaemon(True)
worker.start()
# Build queue of work
for target in targets_list:
queue.put(target)
# Wait for the queue to finish processing
queue.join()
if __name__ == '__main__':
main()
| Python | 0 | |
7bd3d26427c08cf38f2f7dedbf075e1335447f70 | add config for database | config/database.py | config/database.py | mongorc = {
'host': '127.0.0.1',
'port': 27017,
'db': 'demo'
}
| Python | 0.000001 | |
f3325695a78f528af6f3c2adb6024dc71405af8f | Create kaynaksız_sil.py | kaynaksız_sil.py | kaynaksız_sil.py | # -*- coding: utf-8 -*-
# !/usr/bin/python
from bs4 import BeautifulSoup
import requests
import mavri
import re
import random
xx= mavri.login('tr.wikipedia','Mavrikant Bot')
wiki='tr.wikipedia'
template='Şablon:Kaynaksız'
ticontinue = ''
while ticontinue != 'DONE':
allpages= requests.get('https://' + wiki + '.org/w/api.php?action=query&utf8&format=json&tiprop=title&titles='+template+'&prop=transcludedin&tilimit=500&ticontinue='+str(ticontinue))
try:
ticontinue =allpages.json()['continue']['ticontinue']
except:
ticontinue = 'DONE'
for page in allpages.json()['query']['pages'].itervalues().next()['transcludedin']:
title = page['title']
#print title
content = mavri.content_of_page(wiki, title)
kaynak_sayisi= len(re.findall(ur'<\s?ref\s?\>', content))
print kaynak_sayisi
if (kaynak_sayisi>0):
print title
content = mavri.content_of_section(wiki,title,0,xx)
content = re.sub(ur'\{\{\s?[Kk]aynaksız[^\}]*\}\}\s?\n?', '', content)
params3 = '?format=json&action=tokens'
r3 = requests.get('https://' + wiki + '.org/w/api.php' + params3, cookies=xx.cookies)
edit_token = r3.json()['tokens']['edittoken']
edit_cookie = xx.cookies.copy()
edit_cookie.update(r3.cookies)
payload = {'action': 'edit', 'assert': 'user', 'format': 'json', 'utf8': '', 'section': str(0), 'text': content, 'summary': '-Kaynaksız şablonu, '+str(kaynak_sayisi)+' adet kaynak var', 'title': title, 'token': edit_token, 'bot': ''}
requests.post('https://' + wiki + '.org/w/api.php', data=payload, cookies=edit_cookie)
exit(0)
| Python | 0.000032 | |
118e47c2bc307d8de447e9d37973feca44763ab5 | Create __init__.py | packs/astral/actions/lib/__init__.py | packs/astral/actions/lib/__init__.py | from .BaseAction import BaseAction
| Python | 0.000429 | |
f5c56152771fbafc5ac9161ccd453a240bfca5cc | Add get_history example. | examples/get_history.py | examples/get_history.py | import sys
sys.path.append('../')
import zabbix
from datetime import datetime
from datetime import timedelta
from calendar import timegm
# read config file
config = {}
execfile("config.py", config)
# new api instance
server = config["server"]
api = zabbix.Api(server)
# log in
username = config["user"]
password = config["password"]
api.login(username, password)
# get history
# host id
http_host_id = config["http_host_id"]
# item id
http_processor_time = config["http_processor_time_id"]
# start time and end time
time_from = timegm((datetime.now() - timedelta(minutes = 100)).utctimetuple()) - 150000
time_till = timegm(datetime.now().utctimetuple()) - 150000
print api.get_history('float', http_host_id, http_processor_time, time_from, time_till)
# log out
api.logout()
| Python | 0 | |
26afdc032087693d274966a803a6bb3c77d17549 | add request example | examples/request/req.py | examples/request/req.py | from app import Application
def dump(request):
text = """
Method: {0.method}
Path: {0.path}
Version: {0.version}
Headers: {0.headers}
Match: {0.match_dict}
Body: {0.body}
QS: {0.query_string}
query: {0.query}
mime_type: {0.mime_type}
encoding: {0.encoding}
form: {0.form}
keep_alive: {0.keep_alive}
route: {0.route}
hostname: {0.hostname}
port: {0.port}
remote_addr: {0.remote_addr}
""".strip().format(request)
return request.Response(text=text)
if __name__ == '__main__':
app = Application()
app.router.add_route('/', dump)
app.router.add_route('/{a}/{b}', dump)
app.serve()
| Python | 0 | |
b84a2667b5071ede3eb983364195c3a2d3c97543 | Create MQTTstage.py | MQTTstage.py | MQTTstage.py | #!/usr/bin/python
#Check if the
def CheckDirectories():
| Python | 0.000006 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.