commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
92138e7ab37e6a69eb3808f9888b52b9e38deaa0 | remove duplicate classifier | setup.py | setup.py | from distutils.core import setup
from require import __version__
version_str = ".".join(str(n) for n in __version__)
setup(
name = "django-require",
version = version_str,
license = "BSD",
description = "A Django staticfiles post-processor for optimizing with RequireJS.",
author = "Dave Hall",
author_email = "dave@etianen.com",
url = "https://github.com/etianen/django-require",
packages = [
"require",
"require.management",
"require.management.commands",
"require.templatetags",
],
package_data = {
"require": [
"resources/*.jar",
"resources/*.js",
"resources/tests/*.js",
],
},
classifiers = [
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Django",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Topic :: Internet :: WWW/HTTP",
],
)
| from distutils.core import setup
from require import __version__
version_str = ".".join(str(n) for n in __version__)
setup(
name = "django-require",
version = version_str,
license = "BSD",
description = "A Django staticfiles post-processor for optimizing with RequireJS.",
author = "Dave Hall",
author_email = "dave@etianen.com",
url = "https://github.com/etianen/django-require",
packages = [
"require",
"require.management",
"require.management.commands",
"require.templatetags",
],
package_data = {
"require": [
"resources/*.jar",
"resources/*.js",
"resources/tests/*.js",
],
},
classifiers = [
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Framework :: Django",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
"Topic :: Internet :: WWW/HTTP",
],
)
| Python | 0.999991 |
ac94d2cf9b4ab775fb7a125a83abc4fa59d56136 | Add setuptools build | setup.py | setup.py | from setuptools import setup, find_packages
import os
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.md')) as f:
long_description = f.read()
setup(
name='pyshadowcopy',
version='0.0.1',
description='Python class to work with Shadow Copy on Windows',
long_description=long_description,
url='https://github.com/sblosser/pyshadowcopy',
author='sblosser',
license='MIT',
keywords=['Windows', 'VSS', 'win32'],
py_modules=['vss'],
install_requires=['pypiwin32'],
)
| Python | 0 | |
edcf0e371ea3430c7d0c515dbf59e39e3522c076 | Add license information to setup.py | setup.py | setup.py | from distutils.core import setup
import loginurl
setup(name='django-loginurl',
version=loginurl.__version__,
description='Allowing an anonymous user to log in by only visiting a URL',
author='Fajran Iman Rusadi',
author_email='fajran@gmail.com',
url='http://github.com/fajran/django-loginurl/',
license='BSD',
download_url='http://github.com/fajran/django-loginurl/tarball/v0.1.2',
packages=['loginurl', 'loginurl.management', 'loginurl.management.commands'],
package_dir={'loginurl': 'loginurl'},
classifiers=['Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities'],
)
| from distutils.core import setup
import loginurl
setup(name='django-loginurl',
version=loginurl.__version__,
description='Allowing an anonymous user to log in by only visiting a URL',
author='Fajran Iman Rusadi',
author_email='fajran@gmail.com',
url='http://github.com/fajran/django-loginurl/',
download_url='http://github.com/fajran/django-loginurl/tarball/v0.1.2',
packages=['loginurl', 'loginurl.management', 'loginurl.management.commands'],
package_dir={'loginurl': 'loginurl'},
classifiers=['Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Utilities'],
)
| Python | 0 |
ef53ea9d1754ce5056b7b872ad0b7cd99e4af2bc | Add setup file | setup.py | setup.py | # -*- coding: utf-8 -*-
import re
from distutils.core import setup
version = re.search(
'^__version__\s*=\*"(.*)"',
open('bundigo/bundigo.py').read(),
re.M
).group(1)
with open('README.md', 'rb') as f:
long_descr = f.read().decode('utf-8')
setup(
name = 'bundigo',
packages = ['bundigo'],
entry_points = {
'console_scripts': ['bundigo = bundigo.bundigo.main']
},
version = version,
description = "Your one-stop shop for starting a software project",
long_description = long_descr,
license = 'MIT',
author = 'Jared Smith',
author_email = 'jared@jaredsmith.io',
url = 'https://jaredmichaelsmith.com/bundigo',
install_requires=[
],
)
| Python | 0.000001 | |
d9b844db2dc0453c073050c6ce7db18c3d48b57c | add setup.py file | setup.py | setup.py | import setuptools
setuptools.setup(
install_requires=['pyyaml'],
author = 'Caleb Boylan',
name = 'apt-package-mirror',
description = 'Python script for running an apt package mirror',
author_email = 'calebboylan@gmail.com',
url = 'https://github.com/squidboylan/apt-package-mirror',
version = '0.1.1',
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
],
packages=setuptools.find_packages(),
entry_points = {
'console_scripts': ['apt-package-mirror=mirror:main'],
}
)
| Python | 0.000001 | |
ac2f2b72c1f653f15058b300c82060c90adf146b | Update for 1.3.0 release | setup.py | setup.py | # Importing these adds a 'bdist_mpkg' option that allows building binary
# packages on OS X.
try:
import setuptools
import bdist_mpkg
except ImportError:
pass
import os
import numpy.distutils.core as core
# Configure our C modules that are built with f2py.
tridiag = core.Extension(name = 'dadi.tridiag',
sources = ['dadi/tridiag.pyf', 'dadi/tridiag.c'])
int_c = core.Extension(name = 'dadi.integration_c',
sources = ['dadi/integration_c.pyf',
'dadi/integration1D.c',
'dadi/integration2D.c',
'dadi/integration3D.c',
'dadi/integration_shared.c',
'dadi/tridiag.c'])
# If we're building a distribution, try to update svnversion. Note that this
# fails silently.
for arg in os.sys.argv:
if arg.count('sdist') or arg.count('bdist'):
os.system("svn up")
os.system("svn info > dadi/svnversion")
core.setup(name='dadi',
version='1.3.0',
author='Ryan Gutenkunst',
author_email='rng7@cornell.edu',
url='http://dadi.googlecode.com',
ext_modules = [tridiag, int_c],
scripts=['scripts/ms_jsfs.py'],
packages=['dadi'],
package_data = {'dadi':['svnversion'],
'tests':['IM.fs']},
license='BSD'
)
| # Importing these adds a 'bdist_mpkg' option that allows building binary
# packages on OS X.
try:
import setuptools
import bdist_mpkg
except ImportError:
pass
import os
import numpy.distutils.core as core
# Configure our C modules that are built with f2py.
tridiag = core.Extension(name = 'dadi.tridiag',
sources = ['dadi/tridiag.pyf', 'dadi/tridiag.c'])
int_c = core.Extension(name = 'dadi.integration_c',
sources = ['dadi/integration_c.pyf',
'dadi/integration1D.c',
'dadi/integration2D.c',
'dadi/integration3D.c',
'dadi/integration_shared.c',
'dadi/tridiag.c'])
# If we're building a distribution, try to update svnversion. Note that this
# fails silently.
for arg in os.sys.argv:
if arg.count('sdist') or arg.count('bdist'):
os.system("svn up")
os.system("svn info > dadi/svnversion")
core.setup(name='dadi',
version='1.2.3',
author='Ryan Gutenkunst',
author_email='rng7@cornell.edu',
url='http://dadi.googlecode.com',
ext_modules = [tridiag, int_c],
scripts=['scripts/ms_jsfs.py'],
packages=['dadi'],
package_data = {'dadi':['svnversion'],
'tests':['IM.fs']},
license='BSD'
)
| Python | 0 |
916cdddfa1e861b8402bdda935c2a9c46a5b6566 | Bump version to 1.2. | setup.py | setup.py | import glob
import os
import platform
import subprocess
import sys
from setuptools import setup, Command, Extension
from setuptools.command.test import test as TestCommand
def define_extensions(file_ext):
return [Extension("lightfm.lightfm_fast",
['lightfm/lightfm_fast%s' % file_ext],
extra_link_args=["-fopenmp"],
extra_compile_args=['-fopenmp',
'-march=native',
'-ffast-math'])]
def set_gcc():
"""
Try to find and use GCC on OSX for OpenMP support.
"""
if 'darwin' in platform.platform().lower():
gcc_binaries = sorted(glob.glob('/usr/local/bin/gcc-*'))
if gcc_binaries:
_, gcc = os.path.split(gcc_binaries[-1])
os.environ["CC"] = gcc
else:
raise Exception('No GCC available. Install gcc from Homebrew '
'using brew install gcc.')
class Cythonize(Command):
"""
Compile the extension .pyx files.
"""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import Cython
from Cython.Build import cythonize
cythonize(define_extensions('.pyx'))
class Clean(Command):
"""
Clean build files.
"""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
pth = os.path.dirname(os.path.abspath(__file__))
subprocess.call(['rm', '-rf', os.path.join(pth, 'build')])
subprocess.call(['rm', '-rf', os.path.join(pth, 'lightfm.egg-info')])
subprocess.call(['find', pth, '-name', 'lightfm*.pyc', '-type', 'f', '-delete'])
subprocess.call(['rm', os.path.join(pth, 'lightfm', 'lightfm_fast.so')])
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
set_gcc()
setup(
name='lightfm',
version='1.2',
description='LightFM recommendation model',
url='https://github.com/lyst/lightfm',
download_url='https://github.com/lyst/lightfm/tarball/1.2',
packages=['lightfm'],
install_requires=['numpy'],
tests_require=['pytest', 'requests', 'scikit-learn', 'scipy'],
cmdclass={'test': PyTest, 'cythonize': Cythonize, 'clean': Clean},
author='Lyst Ltd (Maciej Kula)',
author_email='data@ly.st',
license='MIT',
classifiers=['Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering :: Artificial Intelligence'],
ext_modules=define_extensions('.c')
)
| import glob
import os
import platform
import subprocess
import sys
from setuptools import setup, Command, Extension
from setuptools.command.test import test as TestCommand
def define_extensions(file_ext):
return [Extension("lightfm.lightfm_fast",
['lightfm/lightfm_fast%s' % file_ext],
extra_link_args=["-fopenmp"],
extra_compile_args=['-fopenmp',
'-march=native',
'-ffast-math'])]
def set_gcc():
"""
Try to find and use GCC on OSX for OpenMP support.
"""
if 'darwin' in platform.platform().lower():
gcc_binaries = sorted(glob.glob('/usr/local/bin/gcc-*'))
if gcc_binaries:
_, gcc = os.path.split(gcc_binaries[-1])
os.environ["CC"] = gcc
else:
raise Exception('No GCC available. Install gcc from Homebrew '
'using brew install gcc.')
class Cythonize(Command):
"""
Compile the extension .pyx files.
"""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import Cython
from Cython.Build import cythonize
cythonize(define_extensions('.pyx'))
class Clean(Command):
"""
Clean build files.
"""
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
pth = os.path.dirname(os.path.abspath(__file__))
subprocess.call(['rm', '-rf', os.path.join(pth, 'build')])
subprocess.call(['rm', '-rf', os.path.join(pth, 'lightfm.egg-info')])
subprocess.call(['find', pth, '-name', 'lightfm*.pyc', '-type', 'f', '-delete'])
subprocess.call(['rm', os.path.join(pth, 'lightfm', 'lightfm_fast.so')])
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
set_gcc()
setup(
name='lightfm',
version='1.1',
description='LightFM recommendation model',
url='https://github.com/lyst/lightfm',
download_url='https://github.com/lyst/lightfm/tarball/1.1',
packages=['lightfm'],
install_requires=['numpy'],
tests_require=['pytest', 'requests', 'scikit-learn', 'scipy'],
cmdclass={'test': PyTest, 'cythonize': Cythonize, 'clean': Clean},
author='Lyst Ltd (Maciej Kula)',
author_email='data@ly.st',
license='MIT',
classifiers=['Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Topic :: Scientific/Engineering :: Artificial Intelligence'],
ext_modules=define_extensions('.c')
)
| Python | 0 |
379488ee2980e1b33753d098d88fb1139a69deeb | add setup.py | setup.py | setup.py | from setuptools import setup, find_packages
setup(
name="hs-onliner",
version="0.0.1",
author="Firemark & Kytes",
author_email="marpiechula@gmail.com",
description="Site to view who will be in hackerspace every week."
license="MIT",
keywords="example documentation tutorial",
url="https://github.com/firemark/hs-onliner",
packages=find_packages(),
install_requires=(
'Flask==0.10.1'
)
) | Python | 0.000001 | |
b63a6ababb1a66ed3766399328c5b9c4ac0a7ce3 | Bump version | setup.py | setup.py | from setuptools import setup
setup(
name="funsize",
version="0.29",
description="Funsize Scheduler",
author="Mozilla Release Engineering",
packages=["funsize"],
include_package_data=True,
# Not zip safe because we have data files in the package
zip_safe=False,
entry_points={
"console_scripts": [
"funsize-scheduler = funsize.scheduler:main",
],
},
install_requires=[
"amqp",
"anyjson",
"argparse",
"cffi",
# PGPy depends on this _specific_ version of cryptography
"cryptography==0.6",
"enum34",
"kombu",
"PGPy",
"pycparser",
"PyHawk-with-a-single-extra-commit",
"Jinja2",
"PyYAML",
"redo",
# Because taskcluster hard pins this version...
"requests==2.4.3",
"singledispatch",
"six",
"taskcluster>=0.0.16",
"wsgiref",
],
tests_require=[
'hypothesis',
'pytest',
'mock',
],
)
| from setuptools import setup
setup(
name="funsize",
version="0.28",
description="Funsize Scheduler",
author="Mozilla Release Engineering",
packages=["funsize"],
include_package_data=True,
# Not zip safe because we have data files in the package
zip_safe=False,
entry_points={
"console_scripts": [
"funsize-scheduler = funsize.scheduler:main",
],
},
install_requires=[
"amqp",
"anyjson",
"argparse",
"cffi",
# PGPy depends on this _specific_ version of cryptography
"cryptography==0.6",
"enum34",
"kombu",
"PGPy",
"pycparser",
"PyHawk-with-a-single-extra-commit",
"Jinja2",
"PyYAML",
"redo",
# Because taskcluster hard pins this version...
"requests==2.4.3",
"singledispatch",
"six",
"taskcluster>=0.0.16",
"wsgiref",
],
tests_require=[
'hypothesis',
'pytest',
'mock',
],
)
| Python | 0 |
a281bad5905da4710314d657943cc145b7d748d4 | add minimal setup.py | setup.py | setup.py | import setuptools
setuptools.setup(
name='tvb-hpc',
version='0.0',
description='HPC code generation for TVB',
author='TVB-HPC Contributors',
url='https://github.com/the-virtual-brain/tvb-hpc',
packages=setuptools.find_packages(),
)
| Python | 0.000001 | |
b383fadf43d3fb31d1501c780d4436717cc43776 | add setup.py | setup.py | setup.py | import os
from setuptools import setup, find_packages
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-payline-dotir',
version='0.1',
author='Mahdi Bornazadeh',
author_email='Bornazadeh@gmail.com',
description='Persian payline.ir payment gateway in django.',
long_description=open("README.md", 'rb').read().decode('utf-8'),
license='BSD License',
url='http://www.bornazadeh.ir/payline',
zip_safe=False,
include_package_data=True,
packages=find_packages(),
install_requires=[
"requests",
],
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Web Environment",
"Framework :: Django",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
"Topic :: Internet :: WWW/HTTP :: WSGI",
"Topic :: Software Development :: Libraries :: "
"Application Frameworks",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| Python | 0.000001 | |
6bc555b93e09ab18a5778487cf3eb47329e83098 | Set version to our own. | setup.py | setup.py | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(name="python-instagram",
version="0.8.0powll1",
description="Instagram API client",
license="MIT",
install_requires=["simplejson","httplib2"],
author="Instagram, Inc",
author_email="apidevelopers@instagram.com",
url="http://github.com/Instagram/python-instagram",
packages = find_packages(),
keywords= "instagram",
zip_safe = True)
| #!/usr/bin/env python
from setuptools import setup, find_packages
setup(name="python-instagram",
version="0.8.0",
description="Instagram API client",
license="MIT",
install_requires=["simplejson","httplib2"],
author="Instagram, Inc",
author_email="apidevelopers@instagram.com",
url="http://github.com/Instagram/python-instagram",
packages = find_packages(),
keywords= "instagram",
zip_safe = True)
| Python | 0 |
68fac699c5506f80ab727a4c569d8797294584bd | Bump the version number. | setup.py | setup.py | #!/usr/bin/env python
from setuptools import setup
setup(
name='hapipy',
version='2.9.1',
description="A python wrapper around HubSpot's APIs",
long_description=open('README.md').read(),
author='HubSpot Dev Team',
author_email='devteam+hapi@hubspot.com',
url='https://github.com/HubSpot/hapipy',
download_url='https://github.com/HubSpot/hapipy/tarball/v2.9.0',
license='LICENSE.txt',
packages=['hapi', 'hapi.mixins'],
install_requires=[
'nose==1.1.2',
'unittest2==0.5.1',
'simplejson>=2.1.2'
],
)
| #!/usr/bin/env python
from setuptools import setup
setup(
name='hapipy',
version='2.9.0',
description="A python wrapper around HubSpot's APIs",
long_description=open('README.md').read(),
author='HubSpot Dev Team',
author_email='devteam+hapi@hubspot.com',
url='https://github.com/HubSpot/hapipy',
download_url='https://github.com/HubSpot/hapipy/tarball/v2.9.0',
license='LICENSE.txt',
packages=['hapi', 'hapi.mixins'],
install_requires=[
'nose==1.1.2',
'unittest2==0.5.1',
'simplejson>=2.1.2'
],
)
| Python | 0.000002 |
c1c49b0e1718331663ee109f3417aff97fd23b70 | Add minimal setup.py for RTD | setup.py | setup.py | # Minimal setup.py to get readthedocs working, not recommended for real use
from distutils.core import setup
setup(name="h11",
version="0.0.0",
packages=["h11"],
)
| Python | 0 | |
f34dd8ab047275b8d29366599621443a8bc468c9 | Add launcher script for nbconvert | databaker/databaker_nbconvert.py | databaker/databaker_nbconvert.py | #!/usr/bin/env python
import os
import subprocess
import sys
def main(argv):
if len(argv) == 0 or len(argv) > 2:
print("Usage: databaker_process.py <notebook_file> <input_file>")
print()
print("<input_file> is optional; it replaces DATABAKER_INPUT_FILE")
print("in the notebook.")
print("The input file should also be in the same directory as the")
print("notebook.")
sys.exit(1)
process_env = os.environ.copy()
if len(argv) == 2:
process_env['DATABAKER_INPUT_FILE'] = argv[1]
# TODO get custom templates working; according to this:
# https://github.com/jupyter/nbconvert/issues/391
# they should work, but I get TemplateNotFound when using absolute path
# for template.
cmd_line = ['jupyter', 'nbconvert', '--to', 'html', '--execute', argv[0]]
print("Running:", ' '.join(cmd_line))
subprocess.call(args=cmd_line, env=process_env)
if __name__ == '__main__':
main(sys.argv[1:])
| Python | 0 | |
a18c6d560a02049bf3dae08bebf6d3598f29c35d | Add pywinauto automated test for top-down view | contrib/automation_tests/orbit_top_down.py | contrib/automation_tests/orbit_top_down.py | """
Copyright (c) 2020 The Orbit Authors. All rights reserved.
Use of this source code is governed by a BSD-style license that can be
found in the LICENSE file.
"""
"""Inspect the top-down view in Orbit using pywinauto.
Before this script is run there needs to be a gamelet reserved and
"hello_ggp_standalone" has to be started.
The script requires absl and pywinauto. Since pywinauto requires the bitness of
the python installation to match the bitness of the program under test it needs
to by run from 64 bit python.
This automation script covers a basic workflow:
- start Orbit
- connect to a gamelet
- select a process
- take a capture
- verify that the top-down view contains at least 3 rows
- verify that the first item is "hello_* (all threads)"
- verify that the second item is "GgpSwapchain [*]"
- verify that the children of the first item are "*clone" and "_start"
"""
import orbit_testing
import logging
import time
from absl import app
import pywinauto
from pywinauto.application import Application
def main(argv):
orbit_testing.WaitForOrbit()
application = Application(backend='uia').connect(title_re='orbitprofiler')
orbit_testing.ConnectToGamelet(application)
orbit_testing.SelectProcess(application, 'hello_')
orbit_testing.FocusOnCaptureWindow(application)
orbit_testing.Capture(application, 5);
main_wnd = application.window(title_re='orbitprofiler', found_index=0)
main_wnd.child_window(title="Top-Down").click_input()
logging.info('Switched to Top-Down tab')
# Now that the "Top-Down" tab is selected,
# main_wnd.TreeView is the QTreeView of the top-down view.
# main_wnd.TreeView.children(control_type='TreeItem') returns
# every cell in the top-down view, in order by row and then column.
# It can take a few seconds.
logging.info('Listing items of the top-down view...')
tree_items = main_wnd.TreeView.children(control_type='TreeItem')
TOP_DOWN_ROW_CELL_COUNT = 6
row_count_before_expansion = len(tree_items) / TOP_DOWN_ROW_CELL_COUNT
if row_count_before_expansion < 3:
raise RuntimeError('Less than 3 rows in the top-down view')
if (not tree_items[0].window_text().startswith('hello_') or
not tree_items[0].window_text().endswith(' (all threads)')):
raise RuntimeError('First item of the top-down view is not "hello_* (all threads)"')
logging.info('Verified that first item is "hello_* (all threads)"')
if (not tree_items[TOP_DOWN_ROW_CELL_COUNT].window_text().startswith('GgpSwapchain [') or
not tree_items[TOP_DOWN_ROW_CELL_COUNT].window_text().endswith(']')):
raise RuntimeError('Second item of the top-down view is not "GgpSwapchain [*]"')
logging.info('Verified that second item is "GgpSwapchain [*]"')
tree_items[0].double_click_input()
logging.info('Expanded the first item')
logging.info('Re-listing items of the top-down view...')
tree_items = main_wnd.TreeView.children(control_type='TreeItem')
row_count_after_expansion = len(tree_items) / TOP_DOWN_ROW_CELL_COUNT
if row_count_after_expansion != row_count_before_expansion + 2:
raise RuntimeError('First item of the top-down view doesn\'t have exactly two children')
if (not tree_items[TOP_DOWN_ROW_CELL_COUNT].window_text().endswith('clone') or
tree_items[2 * TOP_DOWN_ROW_CELL_COUNT].window_text() != '_start'):
raise RuntimeError('Children of the first item of the top-down view '
'are not "*clone" and "_start"')
logging.info('Verified that children of the first item are "*clone" and "_start"')
main_wnd.CloseButton.click_input()
logging.info('Closed Orbit.')
if __name__ == '__main__':
app.run(main)
| Python | 0.999999 | |
afe216da917c171ff857de122be64a9b2a7d3e9c | migrate doaj client test from harvester | doajtest/unit/test_api_client.py | doajtest/unit/test_api_client.py | """
Unit tests for the DOAJ client
"""
from unittest import TestCase
from doajtest.fixtures.journals import JournalFixtureFactory
from portality.api.v1.client import client as doajclient, models
from portality.lib import dataobj
class TestDOAJ(TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_01_journal_issns(self):
source = JournalFixtureFactory.make_journal_source()
j = models.Journal(source)
issns = j.all_issns()
assert "1234-5678" in issns
assert "9876-5432" in issns
assert "4444-4444" in issns
assert "5555-5555" in issns
assert "0101-0101" in issns
assert len(issns) == 5
def test_02_validate_article(self):
invalid = {"bibjson" : {}}
# first check the article validator works
with self.assertRaises(dataobj.DataStructureException):
models.ArticleValidator(invalid)
# then check that the api validation method works
a = models.Article(invalid)
assert not a.is_api_valid()
| Python | 0 | |
fc9dd735c96ae21b4a64286e4c9ebcedc0e1fbca | Add script to subset kerning plist. | subsetKerning.py | subsetKerning.py | import sys
from plistlib import writePlist
from defcon import Font
__doc__ = '''
Subset kerning in UFO given a list of glyphs provided.
Will export new plist files that can be swapped into the UFO.
Usage:
python subsetKerning.py subsetList font.ufo
'''
class SubsetKerning(object):
"""docstring for SubsetKerning"""
def __init__(self, font, subsetFile):
self.font = Font(font)
self.subsetFile = subsetFile
with open(self.subsetFile, 'r') as ssfile:
rawData = ssfile.read()
self.subsetGlyphList = [line.split()[0] for line in rawData.splitlines()]
def subsetGroups(self):
newGroups = {}
for groupName, glyphList in self.font.groups.items():
combinedGlyphs = set(self.subsetGlyphList) & set(glyphList)
newGlyphList = sorted(list(combinedGlyphs))
if len(newGlyphList):
newGroups[groupName] = newGlyphList
return newGroups
def subsetKerning(self):
newGroups = self.subsetGroups()
newKerning = {}
plistStyleKerning = {}
# All allowed items for kerning, which are our subset glyphs,
# plus the groups filtered earlier:
allowedItems = set(newGroups) | set(self.subsetGlyphList)
for [left, right], value in self.font.kerning.items():
if set([left, right]) <= allowedItems:
newKerning[left, right] = value
# Since the kerning paradigm stored in the plist differs from the
# in the kerning object, the data structure needs some modification:
for [left, right], value in newKerning.items():
partnerDict = plistStyleKerning.setdefault(left, {})
partnerDict[right] = value
return plistStyleKerning
def run():
sk = SubsetKerning(sys.argv[-1], sys.argv[-2])
writePlist(sk.subsetGroups(), 'subset_groups.plist')
writePlist(sk.subsetKerning(), 'subset_kerning.plist')
print 'done'
if len(sys.argv) == 3:
run()
else:
print __doc__
| Python | 0 | |
4a7ccea9a70642bb73f33b70147ba78877cae9f6 | Add regular roi from csv file dialog, which can read MNI cordinates to generate sphere or cube roi based on the input radius in x, y ,z orientation. | froi/gui/component/regularroifromcsvfiledialog.py | froi/gui/component/regularroifromcsvfiledialog.py | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from froi.algorithm import imtool
from froi.io.csv import get_cord_from_file
import numpy as np
import os
class RegularROIFromCSVFileDialog(QDialog):
"""A dialog for generate a regular ROI."""
def __init__(self, model, parent=None):
super(RegularROIFromCSVFileDialog, self).__init__(parent)
self._model = model
self._temp_dir = None
self._init_gui()
self._create_actions()
def _init_gui(self):
self.setWindowTitle("Generate Regular ROI based on CSV file")
cordinate_file_label= QLabel('Cordinate File :')
self._cordinate_file_dir = QLineEdit('')
self._cordinate_file_dir.setReadOnly(True)
self._cordinate_file_button = QPushButton('Browse')
out_label = QLabel("Output")
self.out_edit = QLineEdit()
self._create_output()
self.run_button = QPushButton("Run")
self.cancel_button = QPushButton("Cancel")
grid_layout = QGridLayout()
grid_layout.addWidget(cordinate_file_label, 0, 0)
grid_layout.addWidget(self._cordinate_file_dir, 0, 1)
grid_layout.addWidget(self._cordinate_file_button, 0, 2)
grid_layout.addWidget(out_label, 1, 0)
grid_layout.addWidget(self.out_edit, 1, 1, 1, 2)
hbox_layout = QHBoxLayout()
hbox_layout.addWidget(self.run_button)
hbox_layout.addWidget(self.cancel_button)
vbox_layout = QVBoxLayout()
vbox_layout.addLayout(grid_layout)
vbox_layout.addLayout(hbox_layout)
self.setLayout(vbox_layout)
def _create_actions(self):
self.run_button.clicked.connect(self._regular_roi)
self._cordinate_file_button.clicked.connect(self._cordinate_file_browse)
self.cancel_button.clicked.connect(self.done)
def _cordinate_file_browse(self):
cordinate_file_filepath = self._open_file_dialog("Add cordinate txt file.")
if cordinate_file_filepath is not None:
self._temp_dir = os.path.dirname(cordinate_file_filepath)
self._cordinate_file_dir.setText(cordinate_file_filepath)
def _open_file_dialog(self, title):
if self._temp_dir == None:
temp_dir = QDir.currentPath()
else:
temp_dir = self._temp_dir
file_name = QFileDialog.getOpenFileName(self,
title,
temp_dir,
"Cordinate files (*.txt *.csv)")
import sys
file_path = None
if not file_name.isEmpty():
if sys.platform == 'win32':
file_path = unicode(file_name).encode('gb2312')
else:
file_path = str(file_name)
return file_path
def _update_output_name(self):
row = self._model.currentIndex()
vol_name = self._model.data(row, Qt.DisplayRole)
output_name = '_'.join([str(vol_name), str('sphere'), 'ROI'])
self.out_edit.setText(output_name)
def _create_output(self):
self._update_output_name()
def _regular_roi(self):
out = self.out_edit.text()
cord_filepath = str(self._cordinate_file_dir.text())
if not out:
self.out_edit.setFocus()
return
roi_generater = imtool.sphere_roi
header = self._model.data(self._model.currentIndex(), Qt.UserRole + 11)
data = self._model.data(self._model.currentIndex(), Qt.UserRole + 6)
new_data = np.zeros_like(data)
try:
coord_list, radius_list, id_list = get_cord_from_file(header, cord_filepath)
except ValueError, error_info:
QMessageBox.critical(self, 'Please check the cordinate in the file.', str(error_info))
return
for idx in range(len(coord_list)):
new_data = roi_generater(new_data, coord_list[idx][0],
coord_list[idx][1], coord_list[idx][2],
radius_list[idx], id_list[idx])
self._model.addItem(new_data,
None,
out,
self._model._data[0].get_header(),
None, None, 255, 'rainbow')
self.done(0)
| Python | 0 | |
0e6d0def4e00868ed5e788f5319440ab6382f10f | include provision script (#176) | cxs/libcxs/scripts/provision_agent_keys.py | cxs/libcxs/scripts/provision_agent_keys.py | #!/usr/bin/env python3
# Provided by The Python Standard Library
import json
import argparse
import asyncio
import time
import os
import urllib.request
import sys
from ctypes import *
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("WALLET_NAME")
parser.add_argument("AGENCY_URL")
parser.add_argument("WALLET_KEY")
parser.add_argument("--agent-seed", help="optional seed used to create enterprise->agent DID/VK")
parser.add_argument("--enterprise-seed", help="optional seed used to create enterprise DID/VK")
parser.add_argument("--verbose", action="store_true")
return parser.parse_args()
def get_agency_info(agency_url):
agency_info = {}
agency_resp = ''
#Get agency's did and verkey:
try:
agency_req=urllib.request.urlopen('{}/agency'.format(agency_url))
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
sys.stderr.write("Failed looking up agency did/verkey: '{}': {}\n".format(exc_type.__name__,exc_value))
print(json.dumps({
'provisioned': False,
'provisioned_status': "Failed: Could not retrieve agency info from: {}/agency: '{}': {}".format(agency_url,exc_type.__name__,exc_value)
},indent=2))
sys.exit(1)
agency_resp = agency_req.read()
try:
agency_info = json.loads(agency_resp.decode())
except:
exc_type, exc_value, exc_traceback = sys.exc_info()
sys.stderr.write("Failed parsing response from agency endpoint: {}/agency: '{}': {}\n".format(agency_url,exc_type.__name__,exc_value))
sys.stderr.write("RESPONSE: {}".format(agency_resp))
print(json.dumps({
'provisioned': False,
'provisioned_status': "Failed: Could not parse response from agency endpoint from: {}/agency: '{}': {}".format(agency_url,exc_type.__name__,exc_value)
},indent=2))
sys.exit(1)
return agency_info
def register_agent(args):
cxs = CDLL('./libcxs.so')
agency_info = get_agency_info(args.AGENCY_URL)
json_str = json.dumps({'agency_url':args.AGENCY_URL,
'agency_did':agency_info['DID'],
'agency_verkey':agency_info['verKey'],
'wallet_name':args.WALLET_NAME,
'wallet_key':args.WALLET_KEY,
'agent_seed':args.agent_seed,
'enterprise_seed':args.enterprise_seed})
c_json = c_char_p(json_str.encode('utf-8'))
rc = cxs.cxs_provision_agent(c_json)
if rc == 0:
sys.stderr.write("could not register agent, see log\n")
print(json.dumps({
'provisioned': False,
'provisioned_status': 'Failed: Could not register agenct, see log\n'
},indent=2))
else:
pointer = c_int(rc)
string = cast(pointer.value, c_char_p)
new_config = json.loads(string.value.decode('utf-8'))
print(json.dumps(new_config, indent=2))
async def main():
args = parse_args()
if args.verbose:
os.environ["RUST_LOG"] = "info"
else:
os.environ["RUST_LOG"] = "error"
register_agent(args)
if __name__ == "__main__":
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
time.sleep(.1)
| Python | 0 | |
35258c9b37997801af05875f04f450050a3e5273 | Create tarea5.py | tareas/tarea5.py | tareas/tarea5.py | #josue de leon
#lista de supermercado
#una tupla para las opciones, y una lista para la lista
import os
lista = []
opciones = ("1. Añadir producto a la lista.","2. Borrar el ultimo producto de la lista.","3. Mostrar toda la lista.")
control = 1
print("\n\n\tBienvenido a su lista de compras.\n\nRecuerde que su lista esta en blanco, asi que lo primero que debe hacer es\nagregar nuevos elementos.")
while control == 1:
print("\nSeleccione que desea hacer:\n\n\t"+str(opciones[0])+"\n\t"+str(opciones[1])+"\n\t"+str(opciones[2])+"\n")
seleccion = int(input("Escoja una opcion: "))
if seleccion == 1:
print('*Ingrese un producto para añadir a su lista.\n*Ingrese "fin" para finalizar su lista.')
producto_lista = ''
while producto_lista.lower() != "fin":
producto_lista = input()
if producto_lista.lower() != "fin":
lista.append(producto_lista)
elif seleccion == 2:
del lista[(len(lista)-1)]
print("El ultimo elemento ha sido borrado!")
elif seleccion == 3:
centinela = 1
while centinela <= len(lista):
print("- "+lista[centinela-1])
centinela = centinela + 1
control = int(input('\n¿Desea continuar con su lista?\n- Presione "1" para CONTINUAR.\n- Si desea SALIR presione "0": '))
os.system("cls")
| Python | 0.000001 | |
0c4d6491fe89e339e9d9505e6e46e8317e78034a | Add telnet testing script | telnet/telnet.py | telnet/telnet.py | #!/usr/bin/env python3
import pexpect
import os, sys, time
ip = "127.0.0.1"
port = "10000"
username = "nikitapekin@gmail.com"
password = "12345"
os.remove('../maildir/.lock')
child = pexpect.spawn('telnet '+ ip + ' ' + port)
child.expect('.\n')
child.logfile = sys.stdout.buffer
time.sleep(1)
child.sendline('1 login ' + username + ' ' + password)
child.expect('1 OK logged in successfully as nikitapekin@gmail.com')
child.sendline('2 select INBOX')
child.expect('successful')
child.sendline('3 fetch 1:2 (FLAGS BODY[HEADER.FIELDS (DATE FROM)])')
child.expect('unimplemented')
| Python | 0.000001 | |
419d31f817af0436c699c0b565e2d6a058c487db | test tls case | test/test_tls.py | test/test_tls.py | #!/usr/bin/env python
import unittest
import os
import re
import subprocess
from framework import VppTestCase, VppTestRunner
from vpp_ip_route import VppIpTable, VppIpRoute, VppRoutePath
def checkQat():
r = os.path.exists("/dev/qat_dev_processes")
if r:
return True
else:
print("NO QAT! EXIT!")
return False
def checkOpenSSLVersion():
ret = False
r = "OPENSSL_ROOT_DIR" in os.environ
if r:
ssl = os.environ["OPENSSL_ROOT_DIR"] + "/bin/openssl version"
p = subprocess.Popen(ssl, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, shell=True)
p.wait()
output = p.stdout.read()
status = p.returncode
if status:
print("openssl version error!")
else:
ssl_ver_src = re.findall(r"(\d+)\.+\d+.+\d+", output)
ssl_ver = int(ssl_ver_src[0])
if ssl_ver < 3:
ret = False
else:
ret = True
else:
print("NO OPENSSL_ROOT_DIR!")
return ret
def checkAll():
ret = checkQat() & checkOpenSSLVersion()
return ret
class TestTLS(VppTestCase):
""" TLS Qat Test Case. """
@classmethod
def setUpClass(cls):
super(TestTLS, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestTLS, cls).tearDownClass()
def setUp(self):
super(TestTLS, self).setUp()
self.vapi.session_enable_disable(is_enabled=1)
self.create_loopback_interfaces(2)
table_id = 0
for i in self.lo_interfaces:
i.admin_up()
if table_id != 0:
tbl = VppIpTable(self, table_id)
tbl.add_vpp_config()
i.set_table_ip4(table_id)
i.config_ip4()
table_id += 1
# Configure namespaces
self.vapi.app_namespace_add_del(namespace_id=b"0",
sw_if_index=self.loop0.sw_if_index)
self.vapi.app_namespace_add_del(namespace_id=b"1",
sw_if_index=self.loop1.sw_if_index)
def tearDown(self):
for i in self.lo_interfaces:
i.unconfig_ip4()
i.set_table_ip4(0)
i.admin_down()
self.vapi.session_enable_disable(is_enabled=0)
super(TestTLS, self).tearDown()
@unittest.skipUnless(checkAll(),
"QAT or OpenSSL not satisfied,skip.")
def test_tls_transfer(self):
""" TLS qat echo client/server transfer """
# Add inter-table routes
ip_t01 = VppIpRoute(self, self.loop1.local_ip4, 32,
[VppRoutePath("0.0.0.0",
0xffffffff,
nh_table_id=1)])
ip_t10 = VppIpRoute(self, self.loop0.local_ip4, 32,
[VppRoutePath("0.0.0.0",
0xffffffff,
nh_table_id=0)], table_id=1)
ip_t01.add_vpp_config()
ip_t10.add_vpp_config()
# Enable QAT engine and TLS async
r = self.vapi.tls_openssl_set_engine(
async_enable=1,
engine="qat",
algorithm="RSA,PKEY_CRYPTO",
ciphers="RSA")
self.assertIsNotNone(r,
'No response msg ')
# Start builtin server and client
uri = "tls://" + self.loop0.local_ip4 + "/1234"
error = self.vapi.cli("test echo server appns 0 fifo-size 4 "
"tls-engine 1 uri " +
uri)
if error:
self.logger.critical(error)
self.assertNotIn("failed", error)
error = self.vapi.cli("test echo client mbytes 10 appns 1 "
"fifo-size 4 no-output test-bytes "
"tls-engine 1 "
"syn-timeout 2 uri " + uri)
if error:
self.logger.critical(error)
self.assertNotIn("failed", error)
# Delete inter-table routes
ip_t01.remove_vpp_config()
ip_t10.remove_vpp_config()
if __name__ == '__main__':
unittest.main(testRunner=VppTestRunner)
| Python | 0.000001 | |
faf9397c2cd2c8384bbf8dad9b1122c78380cd2a | Fix and migrate missing contact-details #15 #16 | migrateContacts.py | migrateContacts.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import requests
import re
base_url = "http://api.popit.sinarproject.org"
token = open('token.txt')
headers = {'Authorization': token.read().rstrip()}
# ========= UTILS ========= #
def clean(label):
return re.sub(r'[^\w\s]', '', label.lower())
def getUsernameFromLink(linkURL):
userName = linkURL.split(".com/")[-1]
return userName
def twitterLinkToContact(linkURL):
'''
Posts Twitter link entry as contact entry
'''
#Obtain username from URL
userName = getUsernameFromLink(linkURL)
newContact = {"type": "twitter", "value": userName, "label": "Twitter"}
return newContact
def contactToLink(c):
'''
Posts contact entry as link entry
'''
ctype= c['type']
#Generate URL from username
urlPrefixes = {"facebook": "https://www.facebook.com" , "twitter": "https://twitter.com"}
noteType = {"facebook": "Official Facebook account" , "twitter": "Official Twitter account"}
newURL = urlPrefixes[ctype] + "/" + c['value']
link_contact = {"url": newURL, "note": noteType[ctype]}
return link_contact
def delLinksandContacts(personID):
base_url = "http://api.popit.sinarproject.org"
token = "Token 3e6a794d84fc00dc613f40e426cbc4f19b69a68e"
headers = {'Authorization': token}
URL = base_url+ "/en/persons/"+ personID
resp = requests.get(URL).json()
resp= resp['result']
if resp['links']:
for link in resp['links']:
url = URL +"/links/"+link['id']
requests.delete(url, headers= headers)
if resp['contact_details']:
for con in resp['contact_details']:
url = URL +"/contact_details/"+con['id']
r = requests.delete(url, headers= headers)
def fixContactType(c):
''' Standardise contact type '''
clabel = clean(c['label'])
ctype = clean(c['type'])
if "email" in clabel or "email" in ctype: #change type to email
c['type'] = 'email'
elif "handphone" in clabel or "mobile" in clabel or "handphone" in ctype or "mobile" in ctype: #change type to cell
c['type'] = 'cell'
elif "fax" in clabel or "fax" in ctype:
c['type'] = 'fax'
elif "phone" in clabel or "phone" in ctype:
c['type'] = 'voice'
elif "address" in clabel or "address" in ctype:
c['type'] = 'address'
else:
pass
return c
# ========= MAIN ========= #
def missingContacts_aux(r):
''' Migrate missing contacts for a single page '''
for person in r['result']:
twitAdded = [] #Keeps track of added accounts to prevent duplication
fbAdded = []
if person['contact_details']:
for contact in person['contact_details']:
if contact['type'] == "facebook":
link_contact = contactToLink(contact)
#Post new link entry for contact
postURL = base_url+"/en/persons/"+person['id']+ "/links"
r = requests.post(postURL, headers = headers, json = link_contact)
fbAdded.append(contact['value'])
elif contact['type'] == "twitter":
link_contact = contactToLink(contact)
#Post new link entry for contact
postURL = base_url+"/en/persons/"+person['id']+ "/links"
r = requests.post(postURL, headers = headers, json = link_contact)
#print(link_contact)
#Proceed with posting contact entry
url = base_url+"/en/persons/"+person['id']+ "/contact_details"
r = requests.post(url, headers = headers, json = contact)
twitAdded.append(contact['value'])
#print(contact)
else:
contact = fixContactType(contact)
url = base_url+"/en/persons/"+person['id']+ "/contact_details"
r = requests.post(url, headers = headers, json = contact)
#print(contact)
if person['links']:
for link in person['links']:
if "twitter" in clean(link['note']):
twitContact = twitterLinkToContact(link['url'])
if twitContact['value'] in twitAdded:
pass
else:
url = base_url+"/en/persons/"+person['id']+ "/contact_details"
r = requests.post(url, headers = headers, json = twitContact)
#print(twitContact)
elif "facebook" in clean(link['note']):
if getUsernameFromLink(link['url']) in fbAdded:
pass
else:
url = base_url+"/en/persons/"+person['id']+ "/links"
r = requests.post(url, headers = headers, json = link)
#print(link)
else:
url = base_url+"/en/persons/"+person['id']+ "/links"
r = requests.post(url, headers = headers, json = link)
#print r.content
def missingContacts():
''' Migrate missing contacts for all pages '''
has_more = True
pg = 1
while has_more:
print("Page: "+ str(pg)+ "\n")
r = requests.get('https://sinar-malaysia.popit.mysociety.org/api/v0.1/persons/?page='+str(pg)).json()
missingContacts_aux(r)
has_more = r['has_more']
pg +=1 | Python | 0.000001 | |
40832b561437d8a022b2dbe9f19e5fcf622fb6d4 | Add genspec.py, used to convert from the current Python-based spec to the new XML-based | tools/genspec.py | tools/genspec.py | #!/usr/bin/python2.4
import sys
try:
from elementtree.ElementTree import fromstring, tostring, Element, SubElement
except ImportError:
print "You need to install ElementTree (http://effbot.org/zone/element-index.htm)"
sys.exit(1)
import dbus
from xml.dom.minidom import parseString
from telepathy.server import *
copyright = """\
Copyright (C) 2005, 2006 Collabora Limited
Copyright (C) 2005, 2006 Nokia Corporation
Copyright (C) 2006 INdT
"""
license = """\
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""
def strip (element):
if element.text:
element.text = element.text.strip()
if element.tail:
element.tail = element.tail.strip()
for child in element:
strip (child)
nameconsts = {}
f = file(sys.argv[2])
for line in f:
name, const = line.split()
nameconsts[name] = const
classes = file(sys.argv[1])
for line in classes:
if line[0] == '#':
continue
elif line == '\n':
continue
line = line.strip()
print line
cls = eval(line)
bases = (cls, dbus.service.Object)
# classes half-baked to order... :)
subclass = type(line, bases, {'__init__':lambda self: None,
'__del__':lambda self: None,
'_object_path':'/'+line,
'_name':line})
instance = subclass()
xml = instance.Introspect()
# sort
root = fromstring(xml)
for i, e in enumerate(root):
if e.get('name') == 'org.freedesktop.DBus.Introspectable':
del root[i]
# embrace and extend the D-Bus introspection data, because it only supports
# annotations which are effectively an attribute value, and we want
# multi-line docstrings
root.set('xmlns:tp', 'http://telepathy.freedesktop.org/wiki/DbusSpec#extensions-v0')
for interface in root:
interface[:] = sorted(interface[:], key=lambda e: e.get('name'))
for member in interface:
SubElement(member, 'tp:docstring').text = '\n%s\n' % getattr(cls, member.get('name')).__doc__
text = cls.__doc__
interface.set('tp:name-const', nameconsts[interface.get('name')])
if text is not None:
SubElement(interface, 'tp:docstring').text = '\n%s\n' % text
break
else:
# ContactList has no methods
interface = SubElement(root, 'interface', name=cls._dbus_interfaces[0])
text = cls.__doc__
if text is not None:
SubElement(interface, 'tp:docstring').text = '\n%s\n' % text
interface.set('tp:name-const', nameconsts[cls._dbus_interfaces[0]])
basename = root[0].get('name')
elt = Element('tp:license')
elt.text = license
root.insert(0, elt)
elt = Element('tp:copyright')
elt.text = copyright
root.insert(0, elt)
# pretty print
strip(root)
xml = tostring(root)
dom = parseString(xml)
basename = basename.replace('org.freedesktop.Telepathy.', '')
basename = basename.replace('.', '-')
file = open(basename + '.xml', 'w')
s = dom.toprettyxml(' ', '\n')
file.write(s)
# keep the string splitting here - it stops vim thinking this file
# is XML!
file.write('<!-- v''im:set sw=2 sts=2 et ft=xml: -->\n')
file.close()
| Python | 0.000001 | |
945fe81c4a0f970e57ff7c5a13d8c3aa03df5fc6 | Add function to save/restore environment between configuration checks. | numscons/checkers/new/common.py | numscons/checkers/new/common.py | from copy import deepcopy
def save_and_set(env, opts, keys=None):
"""Put informations from option configuration into a scons environment, and
returns the savedkeys given as config opts args."""
saved_keys = {}
if keys is None:
keys = opts.keys()
for k in keys:
saved_keys[k] = (env.has_key(k) and deepcopy(env[k])) or []
kw = dict(zip(keys, [opts[k] for k in keys]))
if kw.has_key('LINKFLAGSEND'):
env.AppendUnique(**{'LINKFLAGSEND' : kw['LINKFLAGSEND']})
del kw['LINKFLAGSEND']
env.Prepend(**kw)
return saved_keys
def restore(env, saved):
keys = saved.keys()
kw = dict(zip(keys, [saved[k] for k in keys]))
env.Replace(**kw)
| Python | 0 | |
207f9f1ed34066c0ed00842cd6287eb6907078f8 | fix NameError in stub functions returning 'a' programmatically call all stub functions using inspect | 0mq/stub_server.py | 0mq/stub_server.py | import argparse
import inspect
import re
import operator
import time
import sys
#
import jsonrpc2_zeromq
import jsonrpc2_zeromq.common
class RPCTestServer(jsonrpc2_zeromq.RPCServer):
def handle_initialize_method(self, M_c, M_r, T, i):
X_L = {}
X_D = [[]]
return M_c, M_r, X_L, X_D
def handle_analyze_method(self, S, T, X_L, X_D, M_C, M_R, kernel_list,
n_steps, c, r, max_iterations, max_time):
X_L_prime = {}
X_D_prime = [[]]
return X_L_prime, X_D_prime
def handle_simple_predictive_sample_method(self, M_c, X_L, X_D, Y, q):
x = []
return x
def handle_simple_predictive_probability_method(self, M_c, X_L, X_D, Y, Q,
n):
p = None
return p
def handle_impute_method(self, M_c, X_L, X_D, Y, q, n):
e = []
return e
def handle_conditional_entropy_method(M_c, X_L, X_D, d_given, d_target,
n=None, max_time=None):
e = None
return e
def handle_predictively_related_method(self, M_c, X_L, X_D, d,
n=None, max_time=None):
m = []
return m
def handle_contextual_structural_similarity_method(self, X_D, r, d):
s = []
return s
def handle_structural_similarity_method(self, X_D, r):
s = []
return s
def handle_structural_anomalousness_columns_method(self, X_D):
a = []
return a
def handle_structural_anomalousness_rows_method(self, X_D):
a = []
return a
def handle_predictive_anomalousness_method(self, M_c, X_L, X_D, T, q, n):
a = []
return a
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--is_client', action='store_true')
parser.add_argument('--port', type=int, default=5557)
parser.add_argument('--lifetime', type=int, default=-1)
args = parser.parse_args()
is_client = args.is_client
port = args.port
lifetime = args.lifetime
#
endpoint = "tcp://127.0.0.1:%s" % port
#
if is_client:
client = jsonrpc2_zeromq.RPCClient(endpoint=endpoint)
method_re = re.compile('handle_(.*)_method')
server_method_names = filter(method_re.match, dir(RPCTestServer))
for server_method_name in server_method_names:
server_method = RPCTestServer.__dict__[server_method_name]
arg_str_list = inspect.getargspec(server_method).args[1:]
arg_str_list_joined = ", ".join(arg_str_list)
#
method_name = method_re.match(server_method_name).groups()[0]
msg = client.__getattr__(method_name)(*arg_str_list)
print msg, " = client." + method_name + "(" + arg_str_list_joined + ")"
else:
print "starting server"
server = RPCTestServer(endpoint)
server.start()
if lifetime != -1:
print "killing server in ", lifetime, " seconds"
time.sleep(lifetime)
print "killing server"
server.stop()
server.join()
server.close()
time.sleep(0.1)
print "server killed"
| import argparse
import inspect
import re
import operator
import time
import sys
#
import jsonrpc2_zeromq
import jsonrpc2_zeromq.common
class RPCTestServer(jsonrpc2_zeromq.RPCServer):
def handle_initialize_method(self, M_c, M_r, T, i):
X_L = {}
X_D = [[]]
return M_c, M_r, X_L, X_D
def handle_analyze_method(self, S, T, X_L, X_D, M_C, M_R, kernel_list,
n_steps, c, r, max_iterations, max_time):
X_L_prime = {}
X_D_prime = [[]]
return X_L_prime, X_D_prime
def handle_simple_predictive_sample_method(self, M_c, X_L, X_D, Y, q):
x = []
return x
def handle_simple_predictive_probability_method(self, M_c, X_L, X_D, Y, Q,
n):
p = None
return p
def handle_impute_method(self, M_c, X_L, X_D, Y, q, n):
e = []
return e
def handle_conditional_entropy_method(M_c, X_L, X_D, d_given, d_target,
n=None, max_time=None):
e = None
return e
def handle_predictively_related_method(self, M_c, X_L, X_D, d,
n=None, max_time=None):
m = []
return m
def handle_contextual_structural_similarity_method(self, X_D, r, d):
s = []
return s
def handle_structural_similarity_method(self, X_D, r):
s = []
return s
def handle_structural_anomalousness_columns_method(self, X_D):
a = []
return s
def handle_structural_anomalousness_rows_method(self, X_D):
a = []
return s
def handle_predictive_anomalousness_method(self, M_c, X_L, X_D, T, q, n):
a = []
return s
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--is_client', action='store_true')
parser.add_argument('--port', type=int, default=5557)
parser.add_argument('--lifetime', type=int, default=-1)
args = parser.parse_args()
is_client = args.is_client
port = args.port
lifetime = args.lifetime
endpoint = "tcp://127.0.0.1:%s" % port
if is_client:
client = jsonrpc2_zeromq.RPCClient(endpoint=endpoint)
args = ("M_c", "X_L", "X_D", "Y", "q")
args_joined = ", ".join(args)
msg = client.simple_predictive_sample(*args)
print msg, " = client.simple_predictive_sample(" + args_joined + ")"
#
# method_re = re.compile('handle_(.*)_method')
# server_method_names = filter(method_re.match, dir(RPCTestServer))
# for server_method_name in server_method_names:
# print "server_method_name: ", server_method_name
# method_name = method_re.match(server_method_name).groups()[0]
# method = RPCTestServer.__dict__[method_name]
# arg_str_list = inspect.getargspec(method).args[1:]
# arg_str_list_joined = ", ".join(arg_str_list)
# print arg_str_list
# msg = client.__getattr__(method_name)(*args)
# print msg, " = client." + method_name + "(" + arg_str_list_joined + ")"
else:
print "starting server"
server = RPCTestServer(endpoint)
server.start()
if lifetime != -1:
print "killing server in ", lifetime, " seconds"
time.sleep(lifetime)
print "killing server"
server.stop()
server.join()
server.close()
time.sleep(0.1)
print "server killed"
| Python | 0 |
bef94fea3318c835c1474ebdfe74f89d8251baf9 | add test_cover.py | pylayers/gis/test/test_cover.py | pylayers/gis/test/test_cover.py | import pylayers.gis.ezone as ez
from pylayers.gis.gisutil import ent,ext2qt
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import os
import smopy
from cartopy import config
import cartopy.crs as ccrs
fig = plt.figure(figsize=(12,12))
white = np.zeros((10,10))
ax = fig.add_subplot(111)
z = ez.Ezone('N48W002')
z.loadh5()
z.rebase()
zoom=11
p = (48.721095,-1.830548)
print "p : ",p
xtile,ytile=smopy.deg2num(p[0],p[1],zoom,do_round=True)
print "xtile,ytile : ",xtile,ytile
(lat0,lon0)=smopy.num2deg(xtile,ytile,zoom,do_round=True)
(lat1,lon1)=smopy.num2deg(xtile+1,ytile+1,zoom,do_round=True)
print "lat,lon WN",lat0,lon0
print "lat,lon ES",lat1,lon1
#mp = smopy.Map((lat1,lon0,lat0,lon1),z=zoom)
mp = smopy.Map((48,-2,49,-1),z=zoom)
##f,a = z.show(alpha=0.3)
box_tile = mp.box_tile
print box_tile
L_ll,l_ll=smopy.num2deg(box_tile[0],box_tile[1]+1,zoom)
L_ur,l_ur=smopy.num2deg(box_tile[2]+1,box_tile[3],zoom)
extent_true = np.array((l_ll,l_ur,L_ll,L_ur))
print extent_true
#print extent_true
##print z.extent
f,a = z.show(fig=fig,ax=ax,alpha=0.4)
#f,a=plt.subplots(1,1)
im1 = a.imshow(mp.img,extent=extent_true,alpha=0.6)
im2 = a.imshow(white,extent=(-2.2,-0.9,47.9,49.1),alpha=0)
a.plot(p[1],p[0],'ob')
###mp.box_tile=(0,0,73000,111000)
###mp.h=73000
###mp.w=111000
###mp.box_tile=(0,111000,73000,0)
###mp.xmin = 0
###mp.ymin=0
###ax = mp.show_mpl(figsize=(20,10),alpha=1)
##fig=plt.gcf()
###z.extent_c=(0,1024,0,1280)
###z.extent_c=(506,509,351,355)
###print z.extent_c
a = z.cover(Ht=2,Hr=2,Rmax=10000)
##
| Python | 0.000003 | |
1a29e182a196e3fc4fbe00c0db6e22c2619473f3 | Add iOSExtractor test | strings2pot/extractors/ios_test.py | strings2pot/extractors/ios_test.py | # -*- coding: utf-8 -*-
import os
import unittest
import ios
class iOSExtractorTest(unittest.TestCase):
def setUp(self):
self.mock_source_file = 'mock_source_ios.strings'
self.mock_destination_file = 'mock_destination_ios.pot'
def mock_context_id_generator(s): return 'MOCK_CONTEXT_ID'
self.mock_context_id_generator = mock_context_id_generator
with open(self.mock_source_file, 'a') as source_file:
source_file.write("""
/* Test string with a placeholder */
"Test string with a \"%@\" here" = "Test string with a \"%@\" here";
""")
def tearDown(self):
try:
os.unlink(self.mock_source_file)
os.unlink(self.mock_destination_file)
except Exception, e:
pass
# test that the iOSExtractor class constructor sets source_file and destination_file attributes
def test_ctor(self):
sut = ios.iOSExtractor(
self.mock_source_file,
self.mock_destination_file,
self.mock_context_id_generator
)
self.assertEqual(sut.source_file, self.mock_source_file)
self.assertEqual(sut.destination_file, self.mock_destination_file)
# test that iOSExtractor parse_string method converts string in POT format
def test_parse_string(self):
sut = ios.iOSExtractor('', '', self.mock_context_id_generator)
single_line_string = "\' \" %@"
self.assertEqual(
sut.parse_string(single_line_string),
'"\' \" %s"'
)
multi_line_string = "\' \" \\n %@"
self.assertEqual(
sut.parse_string(multi_line_string),
'''""
"\' \" \\n"
" %s"'''
)
# test that iOSExtractor run method converts an input file in POT format
def test_run(self):
sut = ios.iOSExtractor(
self.mock_source_file,
self.mock_destination_file,
self.mock_context_id_generator
)
sut.run()
with open(self.mock_destination_file, 'r') as destination_file:
lines = destination_file.readlines()
pot_content_as_string = "".join(lines)
self.assertEqual(
pot_content_as_string,
'''
#: mock_source_ios.strings:4
msgctxt "MOCK_CONTEXT_ID"
msgid "Test string with a \"%s\" here"
msgstr ""
'''
)
if __name__ == '__main__':
unittest.main() | Python | 0 | |
f1c95af353c741f26a9bd95f8228ef74e90bca75 | Add itest | itests/tests.py | itests/tests.py | from unittest import TestCase
import inferi
class Tests(TestCase):
def test_variables(self):
# Basic variable behaviour
var = inferi.Variable(4, 8, 15, 16, 23, 42, name="Numbers")
self.assertEqual(var.values(), (4, 8, 15, 16, 23, 42))
self.assertEqual(var.name(), "Numbers")
self.assertEqual(len(var), 6)
self.assertEqual(var.length(), 6)
var[4] = 24
self.assertEqual(var.values(), (4, 8, 15, 16, 24, 42))
var[4] = 23
self.assertEqual(var[4], 23)
var.set(2, 14)
self.assertEqual(var.values(), (4, 8, 14, 16, 23, 42))
var.set(2, 15)
self.assertEqual(var.get(2), 15)
var.add(108)
self.assertEqual(var.values(), (4, 8, 15, 16, 23, 42, 108))
var.remove(108)
self.assertEqual(var.values(), (4, 8, 15, 16, 23, 42))
self.assertEqual(var.pop(), 42)
self.assertEqual(var.values(), (4, 8, 15, 16, 23))
var.add(42)
self.assertEqual(var[-1], 42)
var.name("The Numbers")
self.assertEqual(var.name(), "The Numbers")
# Variable metrics
self.assertEqual(var.min(), 4)
self.assertEqual(var.max(), 42)
self.assertEqual(var.sum(), 108)
self.assertEqual(var.mean(), 18)
self.assertEqual(var.median(), 15.5)
self.assertEqual(var.mode(), None)
var.add(15)
self.assertEqual(var.mode(), 15)
var.pop()
self.assertEqual(var.range(), 38)
self.assertAlmostEqual(var.variance(), 182, delta=0.005)
self.assertAlmostEqual(var.variance(population=True), 151.67, delta=0.005)
self.assertAlmostEqual(var.st_dev(), 13.49, delta=0.005)
self.assertAlmostEqual(var.st_dev(population=True), 12.32, delta=0.005)
self.assertAlmostEqual(var.zscore(4.51), -1, delta=0.005)
# Variable comparison
var2 = inferi.Variable(34, 21, 56, 43, 78, 79)
self.assertAlmostEqual(var.covariance_with(var2), 269.2, delta=0.05)
self.assertAlmostEqual(var.correlation_with(var2), 0.845, delta=0.005)
# Variable arithmetic
var3 = inferi.Variable.average(var, var2)
self.assertEqual(var3.length(), 6)
self.assertEqual(var3[0], 19)
self.assertEqual(var3[2], 35.5)
var3 = var + var2
self.assertEqual(var3.length(), 6)
self.assertEqual(var3[0], 38)
self.assertEqual(var3[2], 71)
var3 = var2 - var
self.assertEqual(var3.length(), 6)
self.assertEqual(var3[0], 30)
self.assertEqual(var3[2], 41)
# Variable errors
var = inferi.Variable(4, 8, 15, name="Numbers", error=[0.8, 0.5, 0.3])
self.assertEqual(var.values(), (4, 8, 15))
self.assertEqual(var.error(), (0.8, 0.5, 0.3))
self.assertEqual(var.get(0), 4)
self.assertEqual(var.get(0, error=True).error(), 0.8)
self.assertEqual(var.get(0, error=True).relative_error(), 0.2)
| Python | 0.999957 | |
90642d734fbdcc3a97693106259c35c25f19d38e | Add problem 1 | problem_1.py | problem_1.py | import sys
hex_string = sys.argv[1]
print hex_string.decode('hex').encode('base64')
| Python | 0.000022 | |
e19e45f7c6ff68599503c3ee0d6712974a8b4e66 | Document current pycurl exception behavior | tests/error_test.py | tests/error_test.py | #! /usr/bin/env python
# -*- coding: iso-8859-1 -*-
# vi:ts=4:et
import pycurl
import sys
import unittest
class ErrorTest(unittest.TestCase):
def setUp(self):
self.curl = pycurl.Curl()
def tearDown(self):
self.curl.close()
# error originating in libcurl
def test_pycurl_error_libcurl(self):
try:
# perform without a url
self.curl.perform()
except pycurl.error:
exc_type, exc = sys.exc_info()[:2]
assert exc_type == pycurl.error
# pycurl.error's arguments are libcurl errno and message
self.assertEqual(2, len(exc.args))
self.assertEqual(int, type(exc.args[0]))
self.assertEqual(str, type(exc.args[1]))
# unpack
err, msg = exc
self.assertEqual(pycurl.E_URL_MALFORMAT, err)
# possibly fragile
self.assertEqual('No URL set!', msg)
# pycurl raises standard library exceptions in some cases
def test_pycurl_error_stdlib(self):
try:
# set an option of the wrong type
self.curl.setopt(pycurl.WRITEFUNCTION, True)
except TypeError:
exc_type, exc = sys.exc_info()[:2]
# error originating in pycurl
def test_pycurl_error_pycurl(self):
try:
# invalid option combination
self.curl.setopt(pycurl.WRITEFUNCTION, lambda x: x)
with open(__file__) as f:
self.curl.setopt(pycurl.WRITEHEADER, f)
except pycurl.error:
exc_type, exc = sys.exc_info()[:2]
assert exc_type == pycurl.error
# for non-libcurl errors, arguments are just the error string
self.assertEqual(1, len(exc.args))
self.assertEqual(str, type(exc.args[0]))
self.assertEqual('cannot combine WRITEHEADER with WRITEFUNCTION.', exc.args[0])
| Python | 0 | |
b6500cc5ae48212b7cabefc313b417a42273274b | Add test for parsing the man page | tests/test_parse.py | tests/test_parse.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import unittest
import mock
from tldr.parser import parse_page
class TestParse(unittest.TestCase):
def test_parse_page(self):
mock_config = {
'colors': {
'command': 'cyan',
'description': 'blue',
'usage': 'green'
},
'platform': 'linux',
'repo_directory': '/tmp/tldr'
}
with mock.patch('tldr.parser.get_config', return_value=mock_config):
result = parse_page('/tmp/tldr/pages/sunos/prctl.md')
assert ''.join(result) == (
'\n\x1b[0m\x1b[34m Get or set the resource controls of '
'running processes,\n\x1b[0m\x1b[34m tasks, and projects\n'
'\x1b[0m\n\x1b[0m\x1b[32m- examine process limits and '
'permissions\n\x1b[0m\n\x1b[0m\x1b[36m prctl {{PID}}\n\x1b'
'[0m\n\x1b[0m\x1b[32m- examine process limits and permissions '
'in machine parseable format\n\x1b[0m\n\x1b[0m\x1b[36m prctl '
'-P {{PID}}\n\x1b[0m\n\x1b[0m\x1b[32m- Get specific limit for '
'a running process\n\x1b[0m\n\x1b[0m\x1b[36m prctl -n '
'process.max-file-descriptor {{PID}}\x1b[0m'
)
| Python | 0.000001 | |
38dee68b15e2daf3c9d6ece845dc561500545258 | Create test_plots.py | tests/test_plots.py | tests/test_plots.py | from test_model import test_add_stressmodel
from pastas.plots import TrackSolve
def test_plot():
ml = test_add_stressmodel()
ml.plot()
def test_decomposition():
ml = test_add_stressmodel()
ml.plots.decomposition(min_ylim_diff=0.1)
def test_results():
ml = test_add_stressmodel()
ml.plots.results()
def test_block_response():
ml = test_add_stressmodel()
ml.plots.block_response()
def test_step_response():
ml = test_add_stressmodel()
ml.plots.step_response()
def test_diagnostics():
ml = test_add_stressmodel()
ml.plots.diagnostics()
def test_stresses():
ml = test_add_stressmodel()
ml.plots.stresses()
def test_contributions_pie():
ml = test_add_stressmodel()
ml.plots.contributions_pie()
def test_tracksolve():
ml = test_add_stressmodel()
track = TrackSolve(ml)
track.initialize_figure()
ml.solve(callback=track.update_figure)
| Python | 0.000104 | |
8a573baabee65bfbd348901e0d1c7828cdadd337 | Add tests for stats.normalize | tests/test_stats.py | tests/test_stats.py | import numpy as np
np.seterr(all='raise')
from stats import normalize
def check_normalization_constants(arr, axis):
sum = np.log(np.sum(arr, axis=axis))
z = normalize(np.log(arr), axis=axis)[0]
zdiff = np.abs(sum - z)
if not (zdiff < 1e-8).all():
print sum
print z
raise AssertionError("wrong normalization constant")
def check_normalization(arr, axis):
sum = np.sum(arr, axis=axis)
norm = np.log(arr / np.expand_dims(sum, axis=axis))
n = normalize(np.log(arr), axis=axis)[1]
ndiff = np.abs(norm - n)
if not(ndiff < 1e-8).all():
print norm
print n
raise AssertionError("wrong normalized values")
def test_normalize_10():
"""Test stats.normalize for a vector"""
for i in xrange(5):
arr = np.random.gamma(2, scale=2, size=10)
yield (check_normalization_constants, arr, 0)
yield (check_normalization, arr, 0)
def test_normalize_5x10x15():
"""Test stats.normalize for a multidimensional array"""
for i in xrange(5):
arr = np.random.gamma(2, scale=2, size=(5, 15, 20))
for axis in xrange(3):
yield (check_normalization_constants, arr, axis)
yield (check_normalization, arr, axis)
def test_normalize_2x100000():
"""Test stats.normalize for a large array"""
for i in xrange(1):
arr = np.random.gamma(2, scale=2, size=(2, 100000))
for axis in xrange(2):
yield (check_normalization_constants, arr, axis)
yield (check_normalization, arr, axis)
| Python | 0.000005 | |
88f6c8c3657cba81c65da34a7161c860c8a23c5f | add RPC test for InvalidateBlock | qa/rpc-tests/invalidateblock.py | qa/rpc-tests/invalidateblock.py | #!/usr/bin/env python2
# Copyright (c) 2014 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test InvalidateBlock code
#
from test_framework import BitcoinTestFramework
from bitcoinrpc.authproxy import AuthServiceProxy, JSONRPCException
from util import *
class InvalidateTest(BitcoinTestFramework):
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 2)
def setup_network(self):
self.nodes = []
self.is_network_split = False
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug"]))
def run_test(self):
print "Mine 4 blocks on Node 0"
self.nodes[0].setgenerate(True, 4)
assert(self.nodes[0].getblockcount() == 4)
besthash = self.nodes[0].getbestblockhash()
print "Mine competing 6 blocks on Node 1"
self.nodes[1].setgenerate(True, 6)
assert(self.nodes[1].getblockcount() == 6)
print "Connect nodes to force a reorg"
connect_nodes_bi(self.nodes,0,1)
sync_blocks(self.nodes)
assert(self.nodes[0].getblockcount() == 6)
badhash = self.nodes[1].getblockhash(2)
print "Invalidate block 2 on node 0 and verify we reorg to node 0's original chain"
self.nodes[0].invalidateblock(badhash)
newheight = self.nodes[0].getblockcount()
newhash = self.nodes[0].getbestblockhash()
if (newheight != 4 or newhash != besthash):
raise AssertionError("Wrong tip for node0, hash %s, height %d"%(newhash,newheight))
if __name__ == '__main__':
InvalidateTest().main()
| Python | 0 | |
fadac460052cb1a778bf8398879e1cb616c26228 | Add new migration for Django 1.8 | propaganda/migrations/0002_auto_20150802_1841.py | propaganda/migrations/0002_auto_20150802_1841.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('propaganda', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='subscriber',
name='email',
field=models.EmailField(unique=True, max_length=254, verbose_name='email'),
),
]
| Python | 0 | |
b29417d3b387c8ab62c1e09589c2d93dae905993 | Add skeleton tle.api | tle/api.py | tle/api.py | import json
import logging
import bottle
import functools
from paste import httpserver
from paste.translogger import TransLogger
from collections import OrderedDict
log = logging.getLogger(__name__)
class APILogger(TransLogger):
def write_log(
self,
environ,
method,
req_uri,
start,
status,
bytes_,
):
remote_addr = environ['REMOTE_ADDR']
protocol = environ['SERVER_PROTOCOL']
referer = environ.get('HTTP_REFERER', '-')
user_agent = environ.get('HTTP_USER_AGENT', '-')
msg = ('{remote_addr} {method} {req_uri} {protocol} {status} '
'{bytes_} {referer} {user_agent}'
).format(
remote_addr=remote_addr,
method=method,
req_uri=req_uri,
protocol=protocol,
status=status,
bytes_=bytes_,
referer=referer,
user_agent=user_agent,
)
log.info(msg)
class APIServer(bottle.ServerAdapter):
def run(self, handler):
handler = APILogger(handler)
httpserver.serve(
handler,
host=self.host,
port=str(self.port),
**self.options
)
def set_content(type_, charset='charset=UTF-8'):
bottle.response.content_type = '{type_}; {charset}'.format(
type_=type_,
charset=charset,
)
def json_content(fn):
@functools.wraps(fn)
def wrapper(*args, **kwargs):
set_content('application/json')
return fn(*args, **kwargs)
return wrapper
@bottle.error(404)
@bottle.error(403)
@bottle.error(500)
@json_content
def api_error(error):
status = OrderedDict([
('code', error.status),
('message', error.body)
])
status = OrderedDict([
('status', status),
])
return json.dumps(status)
class EventAPI01(object):
def __init__(self):
pass
def apply(self, callback, context):
"""
Similar to a bottle.JSONPlugin's apply
method. This one also ensures that self
is available to methods with bottle
decorators.
"""
@functools.wraps(callback)
@json_content
def wrapper(*args, **kwargs):
kwargs['self'] = self
return callback(*args, **kwargs)
return wrapper
| Python | 0.000069 | |
b042675463c34340d4d3ae5d6868b243abf9741b | Create Average_sorting.py | Average_sorting.py | Average_sorting.py | # coding: utf-8
import rw
success_list=[] #Meet the requirements of the combined group
max_min=[] #Max ad min volue save var;[function_name : max_min_mark]
def count(x,y):
result=x+y
return result
def count_list(x,y):
total=count(len(x),len(y))
return total
def max_min_mark(var):
for i in var:
length=len(i)
max_min.append(length)
def merger_group(textdir):
textlines = open(textdir,'r').readlines()
b_split=[]
for i in xrange(0,len(textlines)):
if i%2!=0:
if count_list(x, textlines[i])>35:
b_split.append(x)
b_split.append(textlines[i])
else:
success_list.append(x.replace('\n','')+' '+textlines[i])
else:
x=textlines[i]
return b_split
def best_value(b_split):
max_min_mark(b_split)
min_value_location=max_min.index(min(max_min))
while min_value_location:
max_value_location=max_min.index(max(max_min))
if max_min[max_value_location]+max_min[min_value_location]>35:
success_list.append(b_split[max_value_location])
success_list.append(b_split[max_value_location])
max_min[max_value_location]=None
else:
success_list.append(b_split[max_value_location].replace('\n','')+' '+b_split[min_value_location])
max_min[max_value_location]=None
max_min[min_value_location]=None
min_value_location=max_min.index(min(max_min))
def main(textdir):
path=raw_input('save_filename:')
best_value(merger_group(textdir))
rw.handle(success_list,path)
if __name__ == '__main__':
textdir = 'd:/name.txt'
main(textdir)
| Python | 0.000001 | |
2b7d1de9f1db2ca2c0b9cf146063f58eb4a2aad5 | add hwuitest | wa/workloads/hwuitest/__init__.py | wa/workloads/hwuitest/__init__.py | # Copyright 2013-2018 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#pylint: disable=E1101,W0201
import os
import re
from collections import defaultdict
from wa import Workload, Parameter, Executable
from wa.utils.exec_control import once
from wa.utils.types import caseless_string
BINARY = "hwuitest"
IGNORED_METRICS = ["Stats since", "Total frames rendered"]
class HWUITest(Workload):
name = 'hwuitest'
description = """
Tests UI rendering latency on Android devices.
The binary for this workload is built as part of AOSP's
frameworks/base/libs/hwui component.
"""
supported_platforms = ['android']
parameters = [
Parameter('test', kind=caseless_string, default="shadowgrid",
allowed_values=["shadowgrid", "rectgrid", "oval"],
description="""
The test to run:
- ``'shadowgrid'``: creates a grid of rounded rects that
cast shadows, high CPU & GPU load
- ``'rectgrid'``: creates a grid of 1x1 rects
- ``'oval'``: draws 1 oval
"""),
Parameter('loops', kind=int, default=3,
description="The number of test iterations."),
Parameter('frames', kind=int, default=150,
description="The number of frames to run the test over."),
]
def __init__(self, target, *args, **kwargs):
super(HWUITest, self).__init__(target, *args, **kwargs)
HWUITest.target_exe = None
@once
def initialize(self, context):
host_exe = context.resolver.get(Executable(self,
self.target.abi,
BINARY))
HWUITest.target_exe = self.target.install(host_exe)
def run(self, context):
self.output = self.target.execute("{} {} {} {}".format(self.target_exe,
self.test.lower(),
self.loops,
self.frames))
def extract_results(self, context):
outfile = os.path.join(context.output_directory, 'hwuitest.output')
with open(outfile, 'w') as wfh:
wfh.write(self.output)
context.add_artifact('hwuitest', outfile, kind='raw')
def update_output(self, context):
normal = re.compile(r'(?P<value>\d*)(?P<unit>\w*)')
with_pct = re.compile(r'(?P<value>\d*) \((?P<percent>.*)%\)')
count = 0
for line in self.output.splitlines():
#Filters out "Success!" and blank lines
try:
metric, value_string = [p.strip() for p in line.split(':', 1)]
except ValueError:
continue
# Filters out unwanted lines
if metric in IGNORED_METRICS:
continue
if metric == "Janky frames":
count += 1
match = with_pct.match(value_string).groupdict()
context.add_metric(metric,
match['value'],
None,
classifiers={"loop": count,
"frames": self.frames})
context.add_metric(metric + "_pct",
match['percent'],
"%",
classifiers={"loop": count,
"frames": self.frames})
else:
match = normal.match(value_string).groupdict()
context.add_metric(metric,
match['value'],
match['unit'],
classifiers={"loop": count,
"frames": self.frames})
@once
def finalize(self, context):
if (self.target_exe):
self.target.uninstall(self.target_exe)
| Python | 0 | |
1136824ab60dbb8774ba5cb8d011e898f9286e06 | Add a missing file | reviewboard/admin/validation.py | reviewboard/admin/validation.py | from django import forms
def validate_bug_tracker(input_url):
"""
Validates that an issue tracker URI string contains one `%s` Python format
specification type (no other types are supported).
"""
try:
# Ignore escaped `%`'s
test_url = input_url.replace('%%', '')
if test_url.find('%s') == -1:
raise TypeError
# Ensure an arbitrary value can be inserted into the URL string
test_url = test_url % 1
except (TypeError, ValueError):
raise forms.ValidationError(["%s has invalid format specification "
"type(s). Use only one '%%s' to mark the "
"location of the bug id. If the URI "
"contains encoded values (e.g. '%%20'), "
"prepend the encoded values with an "
"additional '%%'."
% input_url])
| Python | 0 | |
298d3e352193e574e0c8980e37a50d226552109e | Create conf.py | docs/conf.py | docs/conf.py | extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'repoze.sphinx.autointerface',
'sphinxcontrib.programoutput',
'sphinxcontrib.images',
]
| Python | 0.000001 | |
acdb13c3680b7958f9a1def3e538ef9ebd166922 | add migration for org name + apptext | portal/migrations/versions/9b1bedfa916b_.py | portal/migrations/versions/9b1bedfa916b_.py | from alembic import op
import sqlalchemy as sa
from sqlalchemy.orm import sessionmaker
from portal.models.app_text import AppText
from portal.models.organization import Organization
"""empty message
Revision ID: 9b1bedfa916b
Revises: 441185240f62
Create Date: 2017-10-26 15:24:32.623899
"""
# revision identifiers, used by Alembic.
revision = '9b1bedfa916b'
down_revision = '441185240f62'
Session = sessionmaker()
def update_org_name(old, new):
bind = op.get_bind()
session = Session(bind=bind)
session.execute("UPDATE organizations SET name='{}' "
"WHERE name='{}'".format(new, old))
for at in session.query(AppText).filter(AppText.name.contains(old)):
at.name = at.name.replace(old, new)
session.commit()
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
update_org_name('CRV', 'TrueNTH Global Registry')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
update_org_name('TrueNTH Global Registry', 'CRV')
# ### end Alembic commands ###
| Python | 0.000001 | |
ac45b330072ec7d171602c698711844f29af77e8 | Create bgg_games.py | bgg_games.py | bgg_games.py | import csv
import time
from boardgamegeek import BoardGameGeek
bgg = BoardGameGeek()
# first import the game id list
f = open("bgg_game_ids.txt")
idlist = []
for line in f:
idlist.append(int(line.split()[0]))
f.close()
# data file
datafile = "bgg_games.csv"
# max and min game id
# (if you don't want to scrape the whole dataset in one go)
min_game_id = 1
max_game_id = 100
# header line (variable names)
header = ( 'snapshot_date', \
'id', \
'name', \
'year', \
'artists', \
'categories', \
'designers', \
'expansion', \
'expands', \
'expansions', \
'families', \
'implementations', \
'max_players', \
'mechanics', \
'min_age', \
'min_players', \
'playing_time', \
'publishers', \
'ranks', \
'rating_average', \
'rating_average_weight', \
'rating_bayes_average', \
'rating_median', \
'rating_num_weights', \
'rating_stddev', \
'users_commented', \
'users_owned', \
'users_rated', \
'users_trading', \
'users_wanting', \
'users_wishing')
# comment this part out if data file exists and you are appending
f = open(datafile, 'w', newline='')
csv.writer(f).writerow(header)
f.close()
# begin data collection
f = open(datafile, 'a', newline='')
writer = csv.writer(f)
for id in idlist:
if (id>=min_game_id and id<=max_game_id):
print(id, end="")
print('... ', end="")
try:
g = bgg.game(game_id = id)
line = ( time.strftime("%Y-%m-%d"), \
g.id, \
g.name, \
g.year, \
(' | '.join(g.artists)), \
(' | '.join(g.categories)), \
(' | '.join(g.designers)), \
g.expansion, \
str(g.expands), \
str(g.expansions), \
(' | '.join(g.families)), \
(' | '.join(g.implementations)), \
g.max_players, \
(' | '.join(g.mechanics)), \
g.min_age, \
g.min_players, \
g.playing_time, \
(' | '.join(g.publishers)), \
str(g.ranks), \
g.rating_average, \
g.rating_average_weight, \
g.rating_bayes_average, \
g.rating_median, \
g.rating_num_weights, \
g.rating_stddev, \
g.users_commented, \
g.users_owned, \
g.users_rated, \
g.users_trading, \
g.users_wanting, \
g.users_wishing)
writer.writerow(line)
except:
print('failed... ', end="")
f.close()
| Python | 0.000003 | |
4ce5e57b882ae057fa21d0397925512073447b77 | Add admin interface | chunked_upload/admin.py | chunked_upload/admin.py | from django.contrib import admin
from .models import ChunkedUpload
class ChunkedUploadAdmin(admin.ModelAdmin):
list_display = ('upload_id', 'file', 'filename', 'user', 'offset',
'created_on', 'status', 'completed_on')
admin.site.register(ChunkedUpload, ChunkedUploadAdmin)
| Python | 0.000001 | |
1db74fafd5f281053dc82d2d4ff2d24447db8338 | add initial Nose tests | tests/test_connection.py | tests/test_connection.py | from nose.tools import raises
from unittest.case import SkipTest
from urllib2 import urlopen
import StringIO
import mock
import datetime, md5
import harvestmedia.api.exceptions
import harvestmedia.api.config
import harvestmedia.api.client
api_key = '12345'
webservice_url = 'https://service.harvestmedia.net/HMP-WS.svc'
@mock.patch('harvestmedia.api.client.urlopen')
@raises(harvestmedia.api.exceptions.InvalidAPIResponse)
def test_xml_failure(urlopen_mock):
urlopen_mock.return_value = StringIO.StringIO('<xml><this xml is malformed</xml>')
hmconfig = harvestmedia.api.config.Config()
hmconfig.api_key = api_key
hmconfig.webservice_url = webservice_url
client = harvestmedia.api.client.Client()
@mock.patch('harvestmedia.api.client.urlopen')
def test_get_service_token(UrlOpenMock):
u = UrlOpenMock()
expiry = datetime.datetime.today().isoformat()
test_token = md5.md5(expiry).hexdigest() # generate an md5 from the date for testing
u.read.return_value = '<?xml version="1.0" encoding="utf-8"?><responseservicetoken><token value="%s" expiry="%s"/></responseservicetoken>' % (test_token, expiry)
hmconfig = harvestmedia.api.config.Config()
hmconfig.api_key = api_key
hmconfig.webservice_url = webservice_url
client = harvestmedia.api.client.Client()
assert client.service_token == test_token
assert client.service_token_expires == expiry
| Python | 0.000001 | |
a3deadbc54fad13e4e40da143f25ae4b26cf690b | Add missed travis-ci manage.py. | travis-ci/manage.py | travis-ci/manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "travis-ci.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| Python | 0 | |
58a5257505a4ae9d32cf233d059b4350f9494d86 | Create timer.py | timer.py | timer.py | #
# jasoncg
# 2015-02-23
#
# timer.py
#
# A simple timer supporting the Python "with" statement
#
import time
#
# Use in a "with" statement:
# with timer.Timer():
# perform_expensive_calculation()
#
# May also print the current progress:
# with timer.Timer() as t:
# perform_expensive_calculation_1()
# t.print_progress()
# perform_expensive_calculation_2()
#
class Timer():
def __enter__(self):
self.reset()
return self
def __exit__(self, type, value, traceback):
end = time.time()
print("Took %s seconds\n" %(end-self.start))
def reset(self):
# Reset the start to now
self.start = time.time()
self.elapsed = time.time()
def get_progress(self):
# Get the current time elapsed since start
return time.time() - self.start
def print_progress(self, message=None):
if message is None:
message=""
else:
message=message+" "
print("%s%s seconds\n" %(message, self.get_progress()))
def get_elapsed(self):
# Get the current time elapsed since start
newelapsed = time.time()
e = newelapsed - self.elapsed
self.elapsed = newelapsed
return e
def print_elapsed(self, message=None):
if message is None:
message=""
else:
message=message+" "
print("%s%s seconds\n" %(message, self.get_elapsed()))
| Python | 0.000003 | |
d1e568ab1e238586ed914de35ed44dc2231af3d2 | Create version.py | ngboost/version.py | ngboost/version.py | __version__ = "0.2.0"
| Python | 0.000001 | |
29bdfc794f759a5f8189d4c89dcaa3fa9699bc2b | Add sfp_numinfo | modules/sfp_numinfo.py | modules/sfp_numinfo.py | #-------------------------------------------------------------------------------
# Name: sfp_numinfo
# Purpose: SpiderFoot plug-in to search numinfo.net for a phone number
# and retrieve email address.
#
# Author: <bcoles@gmail.com>
#
# Created: 2019-05-28
# Copyright: (c) bcoles 2019
# Licence: GPL
#-------------------------------------------------------------------------------
import json
import re
import time
from sflib import SpiderFoot, SpiderFootPlugin, SpiderFootEvent
class sfp_numinfo(SpiderFootPlugin):
"""numinfo:Footprint,Investigate,Passive:Real World::Lookup phone number information."""
# Default options
opts = {
}
# Option descriptions
optdescs = {
}
results = dict()
errorState = False
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.__dataSource__ = 'numinfo'
self.results = dict()
self.errorState = False
for opt in userOpts.keys():
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
def watchedEvents(self):
return ['PHONE_NUMBER']
# What events this module produces
def producedEvents(self):
return ['RAW_RIR_DATA', 'EMAILADDR']
# Query numinfo for the specified phone number
def query(self, qry):
number = qry.strip('+').strip('(').strip(')').strip('-').strip(' ')
if not number.isdigit():
self.sf.debug('Invalid phone number: ' + number)
return None
res = self.sf.fetchUrl("http://" + number + '.numinfo.net/',
timeout=self.opts['_fetchtimeout'],
useragent=self.opts['_useragent'])
time.sleep(1)
if res['content'] is None:
self.sf.debug('No response from numinfo.net')
return None
if res['code'] != '200':
return None
json_data = re.findall(r'<script type="application/ld\+json">(.+?)</script>',
res['content'], re.MULTILINE | re.DOTALL)
if not json_data:
return None
try:
data = json.loads(json_data[0])
except BaseException as e:
self.sf.debug('Error processing JSON response: ' + str(e))
return None
return data
# Handle events sent to this module
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
if self.errorState:
return None
if eventData in self.results:
return None
self.results[eventData] = True
self.sf.debug("Received event, " + eventName + ", from " + srcModuleName)
data = self.query(eventData)
if data is None:
self.sf.debug('No phone information found for ' + eventData)
return None
evt = SpiderFootEvent("RAW_RIR_DATA", str(data), self.__name__, event)
self.notifyListeners(evt)
if data.get('email'):
email_match = re.findall(r'^mailto:([a-zA-Z\.0-9_\-]+@[a-zA-Z\.0-9\-]+\.[a-zA-Z\.0-9\-]+)$', data.get('email'))
if email_match is not None:
evt = SpiderFootEvent('EMAILADDR', email_match[0], self.__name__, event)
self.notifyListeners(evt)
# End of sfp_numinfo class
| Python | 0.999397 | |
b6a6e6a9bf0254f9c79215c98b392b02db53827b | Add wireless module #305 | cme/modules/wireless.py | cme/modules/wireless.py | class CMEModule:
name = 'wireless'
description = "Get key of all wireless interfaces"
supported_protocols = ['smb']
opsec_safe = True
multiple_hosts = True
def options(self, context, module_options):
'''
'''
def on_admin_login(self, context, connection):
command = 'powershell.exe -c "(netsh wlan show profiles) | Select-String """"\:(.+)$"""" | %{$name=$_.Matches.Groups[1].Value.Trim(); $_} | %{(netsh wlan show profile name="$name" key=clear)}"'
context.log.info('Executing command')
p = connection.execute(command, True)
context.log.success(p)
| Python | 0 | |
74aaf7f459875c4dec9ed1076bf748786db4af0d | Add example for downloading TOP N files from VT matching an Intelligence search | examples/search_and_download_topn_files.py | examples/search_and_download_topn_files.py | #!/usr/bin/python
"""
This example program shows how to download files from VirusTotal matching a
VirusTotal Intelligence search.
NOTE: In order to use this script you will need to have access to
VT Intelligence or to the Premium API. Learn more about these services at:
https://www.virustotal.com/gui/intelligence-overview
https://developers.virustotal.com/v3.0/reference#search
https://www.virustotal.com/learn/
"""
import argparse
import asyncio
import logging
import os
import sys
import time
import vt
DEFAULT_PATH = 'intelligencefiles'
LOGGING_LEVEL = logging.INFO # Modify if you just want to focus on errors
logging.basicConfig(level=LOGGING_LEVEL,
format='%(asctime)s %(levelname)-8s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
stream=sys.stdout)
class DownloadTopNFilesHandler:
"""Handler for Downloading files from VT."""
def __init__(self, apikey, num_files):
self.apikey = apikey
self.num_files = num_files
self.queue = asyncio.Queue()
async def download_files(self, download_path):
"""Download files in queue to the path referenced by `download_path`.
Args:
download_path: string representing the path where the files will be
stored.
"""
async with vt.Client(self.apikey) as client:
while True:
file_hash = await self.queue.get()
file_path = os.path.join(download_path, file_hash)
with open(file_path, 'wb') as f:
await client.download_file_async(file_hash, f)
self.queue.task_done()
async def queue_file_hashes(self, search):
"""Retrieve files from VT and enqueue them for being downloaded.
Args:
search: VT intelligence search query.
"""
async with vt.Client(self.apikey) as client:
it = client.iterator(
'/intelligence/search',
params={'query': search}, limit=self.num_files)
async for file_obj in it:
await self.queue.put(file_obj.sha256)
@staticmethod
def create_download_folder(path=None):
"""Create the folder where the downloaded files will be put."""
local_path = path or DEFAULT_PATH
folder_name = time.strftime('%Y%m%dT%H%M%S')
folder_path = os.path.join(local_path, folder_name)
if not os.path.exists(local_path):
os.mkdir(local_path)
if not os.path.exists(folder_path):
os.mkdir(folder_path)
return folder_path
async def main():
"""Download the top-n results of a given Intelligence search."""
usage = 'usage: prog [options] <intelligence_query/local_file_with_hashes>'
parser = argparse.ArgumentParser(
usage=usage,
description='Allows you to download the top-n files returned by a given'
'VirusTotal Intelligence search. Example: '
'python %prog type:"peexe" positives:5+ -n 10 --apikey=<your api key>')
parser.add_argument(
'query', type=str, nargs='+',
help='a VirusTotal Intelligence search query.')
parser.add_argument(
'-n', '--numfiles', dest='numfiles', default=100,
help='Number of files to download')
parser.add_argument('--apikey', required=True, help='Your VirusTotal API key')
parser.add_argument(
'-o', '--output-path', required=False,
help='The path where you want to put the files in')
parser.add_argument(
'-w', '--workers', dest='workers', default=4,
help='Concurrent workers for downloading files')
args = parser.parse_args()
if not args.query:
parser.error('No search query provided')
if not args.apikey:
parser.error('No API key provided')
search = ' '.join(args.query)
search = search.strip().strip('\'')
storage_path = args.output_path
numfiles = int(args.numfiles)
workers = int(args.workers)
api_key = args.apikey
loop = asyncio.get_event_loop()
handler = DownloadTopNFilesHandler(api_key, numfiles)
logging.info('Starting VirusTotal Intelligence downloader')
logging.info('* VirusTotal Intelligence search: %s', search)
logging.info('* Number of files to download: %s', numfiles)
files_path = handler.create_download_folder(storage_path)
enqueue_files_task = loop.create_task(handler.queue_file_hashes(search))
download_tasks = []
for i in range(workers):
download_tasks.append(loop.create_task(handler.download_files(files_path)))
await asyncio.gather(enqueue_files_task)
# Wait until all the files have been queued and downloaded, then cancel
# download tasks that are idle
await handler.queue.join()
for w in download_tasks:
w.cancel()
if __name__ == '__main__':
loop = asyncio.get_event_loop()
loop.run_until_complete(main())
loop.close()
| Python | 0 | |
5ad1170c2515fd799acc43e99e35299bbab9cec1 | Add tests for harmonic in 791628c4df60369583474c07d64f1439bd5c19e0 | tests/test_transforms.py | tests/test_transforms.py | """ Test for `yatsm.regression.transforms`
"""
import numpy as np
import patsy
import py.test
from yatsm.regression.transforms import harm
def test_harmonic_transform():
x = np.arange(735688, 735688 + 100, 1)
design = patsy.dmatrix('0 + harm(x, 1)')
truth = np.vstack((np.cos(2 * np.pi / 365.25 * x),
np.sin(2 * np.pi / 365.25 * x))).T
np.testing.assert_equal(np.asarray(design), truth)
| Python | 0.000002 | |
5681684a4df6cd70ba4c2e4c667b81a7e8367e25 | add missing wrapmodule.py | dune/pymor/core/wrapmodule.py | dune/pymor/core/wrapmodule.py | # This file is part of the dune-pymor project:
# https://github.com/pyMor/dune-pymor
# Copyright Holders: Felix Albrecht, Stephan Rave
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
from inspect import isclass
from types import ModuleType
from dune.pymor.core.wrapper import Wrapper
from dune.pymor.la.container import wrap_vector
from dune.pymor.discretizations import wrap_stationary_discretization
from dune.pymor.functionals import wrap_affinely_decomposed_functional, wrap_functional
from dune.pymor.operators import wrap_affinely_decomposed_operator, wrap_operator
def wrap_module(mod):
AffinelyDecomposedFunctionalInterface = mod.Dune.Pymor.AffinelyDecomposedFunctionalInterfaceDynamic
AffinelyDecomposedOperatorInterface = mod.Dune.Pymor.AffinelyDecomposedOperatorInterfaceDynamic
FunctionalInterface = mod.Dune.Pymor.FunctionalInterfaceDynamic
VectorInterface = mod.Dune.Pymor.LA.VectorInterfaceDynamic
OperatorInterface = mod.Dune.Pymor.OperatorInterfaceDynamic
Parameter = mod.Dune.Pymor.Parameter
ParameterFunctional = mod.Dune.Pymor.ParameterFunctional
ParameterType = mod.Dune.Pymor.ParameterType
StationaryDiscretizationInterface = mod.Dune.Pymor.StationaryDiscretizationInterfaceDynamic
wrapped_modules = {}
wrapper = Wrapper(DuneParameterType = ParameterType,
DuneParameter = Parameter)
def create_modules(mod, parent_name=''):
wrapped_mod = ModuleType(mod.__name__.lower())
full_name = '.'.join([parent_name, wrapped_mod.__name__]) if parent_name else wrapped_mod.__name__
wrapped_modules[mod] = {'wrapped': wrapped_mod, 'empty': True, 'full_name': full_name}
for k, v in mod.__dict__.iteritems():
if isinstance(v, ModuleType):
create_modules(v, full_name)
def add_to_module(k, v, mod):
wrapped_mod = wrapped_modules[mod]['wrapped']
full_name = wrapped_modules[mod]['full_name']
try:
v.__module__ = full_name
except AttributeError:
pass
wrapped_mod.__dict__[k] = v
wrapped_modules[mod]['empty'] = False
def add_modules(mod):
wrapped_mod = wrapped_modules[mod]['wrapped']
for k, v in mod.__dict__.iteritems():
if isinstance(v, ModuleType):
wv = add_modules(v)
if not wrapped_modules[v]['empty']:
wrapped_mod.__dict__[k.lower()] = wv
wrapped_modules[mod]['empty'] = False
return wrapped_mod
def wrap_vectors(mod):
for k, v in mod.__dict__.iteritems():
if isinstance(v, ModuleType):
wrap_vectors(v)
elif v == VectorInterface:
continue
elif isclass(v) and issubclass(v, VectorInterface):
wrapped_vector, vector_array = wrap_vector(v)
add_to_module(k, wrapped_vector, mod)
add_to_module(vector_array.__name__, vector_array, mod)
wrapper.add_vector_class(v, wrapped_vector, vector_array)
def wrap_classes(mod):
for k, v in mod.__dict__.iteritems():
if isinstance(v, ModuleType):
wrap_classes(v)
elif hasattr(v, '__name__') and 'Interface' in v.__name__:
continue
elif isclass(v):
if issubclass(v, AffinelyDecomposedFunctionalInterface):
wrapped_class = wrap_affinely_decomposed_functional(v, wrapper)
elif issubclass(v, AffinelyDecomposedOperatorInterface):
wrapped_class = wrap_affinely_decomposed_operator(v, wrapper)
elif issubclass(v, FunctionalInterface):
wrapped_class = wrap_functional(v, wrapper)
elif issubclass(v, OperatorInterface):
wrapped_class = wrap_operator(v, wrapper)
elif issubclass(v, StationaryDiscretizationInterface):
wrapped_class = wrap_stationary_discretization(v, wrapper)
else:
continue
add_to_module(k, wrapped_class, mod)
wrapper.add_class(v, wrapped_class)
create_modules(mod)
wrap_vectors(mod)
wrap_classes(mod)
wrapped_module = add_modules(mod)
return wrapped_module, wrapper
| Python | 0.000001 | |
3c37704b3b819bee5d441c75a6fd59a64279a0e8 | use unicode.strip instead of string.strip of the string module for metadata processors | pelican/readers.py | pelican/readers.py | # -*- coding: utf-8 -*-
try:
from docutils import core
# import the directives to have pygments support
from pelican import rstdirectives
except ImportError:
core = False
try:
from markdown import Markdown
except ImportError:
Markdown = False
import re
from pelican.utils import get_date, open
_METADATAS_PROCESSORS = {
'tags': lambda x: map(unicode.strip, x.split(',')),
'date': lambda x: get_date(x),
'status': unicode.strip,
}
class Reader(object):
enabled = True
class RstReader(Reader):
enabled = bool(core)
extension = "rst"
def _parse_metadata(self, content):
"""Return the dict containing metadatas"""
output = {}
for m in re.compile('^:([a-z]+): (.*)\s', re.M).finditer(content):
name, value = m.group(1).lower(), m.group(2)
output[name] = _METADATAS_PROCESSORS.get(
name, lambda x:x
)(value)
return output
def read(self, filename):
"""Parse restructured text"""
text = open(filename)
metadatas = self._parse_metadata(text)
extra_params = {'input_encoding': 'unicode',
'initial_header_level': '2'}
rendered_content = core.publish_parts(text, writer_name='html',
settings_overrides=extra_params)
title = rendered_content.get('title')
content = rendered_content.get('body')
if not metadatas.has_key('title'):
metadatas['title'] = title
return content, metadatas
class MarkdownReader(Reader):
enabled = bool(Markdown)
extension = "md"
def read(self, filename):
"""Parse content and metadata of markdown files"""
text = open(filename)
md = Markdown(extensions = ['meta', 'codehilite'])
content = md.convert(text)
metadatas = {}
for name, value in md.Meta.items():
name = name.lower()
metadatas[name] = _METADATAS_PROCESSORS.get(
name, lambda x:x
)(value[0])
return content, metadatas
class HtmlReader(Reader):
extension = "html"
_re = re.compile('\<\!\-\-\#\s?[A-z0-9_-]*\s?\:s?[A-z0-9\s_-]*\s?\-\-\>')
def read(self, filename):
"""Parse content and metadata of (x)HTML files"""
content = open(filename)
metadatas = {'title':'unnamed'}
for i in self._re.findall(content):
key = i.split(':')[0][5:].strip()
value = i.split(':')[-1][:-3].strip()
metadatas[key.lower()] = value
return content, metadatas
_EXTENSIONS = dict((cls.extension, cls) for cls in Reader.__subclasses__())
def read_file(filename, fmt=None):
"""Return a reader object using the given format."""
if not fmt:
fmt = filename.split('.')[-1]
if fmt not in _EXTENSIONS.keys():
raise TypeError('Pelican does not know how to parse %s' % filename)
reader = _EXTENSIONS[fmt]()
if not reader.enabled:
raise ValueError("Missing dependencies for %s" % fmt)
return reader.read(filename)
| # -*- coding: utf-8 -*-
try:
from docutils import core
# import the directives to have pygments support
from pelican import rstdirectives
except ImportError:
core = False
try:
from markdown import Markdown
except ImportError:
Markdown = False
import re
import string
from pelican.utils import get_date, open
_METADATAS_PROCESSORS = {
'tags': lambda x: map(string.strip, x.split(',')),
'date': lambda x: get_date(x),
'status': string.strip,
}
class Reader(object):
enabled = True
class RstReader(Reader):
enabled = bool(core)
extension = "rst"
def _parse_metadata(self, content):
"""Return the dict containing metadatas"""
output = {}
for m in re.compile('^:([a-z]+): (.*)\s', re.M).finditer(content):
name, value = m.group(1).lower(), m.group(2)
output[name] = _METADATAS_PROCESSORS.get(
name, lambda x:x
)(value)
return output
def read(self, filename):
"""Parse restructured text"""
text = open(filename)
metadatas = self._parse_metadata(text)
extra_params = {'input_encoding': 'unicode',
'initial_header_level': '2'}
rendered_content = core.publish_parts(text, writer_name='html',
settings_overrides=extra_params)
title = rendered_content.get('title')
content = rendered_content.get('body')
if not metadatas.has_key('title'):
metadatas['title'] = title
return content, metadatas
class MarkdownReader(Reader):
enabled = bool(Markdown)
extension = "md"
def read(self, filename):
"""Parse content and metadata of markdown files"""
text = open(filename)
md = Markdown(extensions = ['meta', 'codehilite'])
content = md.convert(text)
metadatas = {}
for name, value in md.Meta.items():
name = name.lower()
metadatas[name] = _METADATAS_PROCESSORS.get(
name, lambda x:x
)(value[0])
return content, metadatas
class HtmlReader(Reader):
extension = "html"
_re = re.compile('\<\!\-\-\#\s?[A-z0-9_-]*\s?\:s?[A-z0-9\s_-]*\s?\-\-\>')
def read(self, filename):
"""Parse content and metadata of (x)HTML files"""
content = open(filename)
metadatas = {'title':'unnamed'}
for i in self._re.findall(content):
key = i.split(':')[0][5:].strip()
value = i.split(':')[-1][:-3].strip()
metadatas[key.lower()] = value
return content, metadatas
_EXTENSIONS = dict((cls.extension, cls) for cls in Reader.__subclasses__())
def read_file(filename, fmt=None):
"""Return a reader object using the given format."""
if not fmt:
fmt = filename.split('.')[-1]
if fmt not in _EXTENSIONS.keys():
raise TypeError('Pelican does not know how to parse %s' % filename)
reader = _EXTENSIONS[fmt]()
if not reader.enabled:
raise ValueError("Missing dependencies for %s" % fmt)
return reader.read(filename)
| Python | 0.000001 |
ddb9c9b3108ac587e3c29e7a45bacea6afd488cc | add python solution to "project euler - problem 11" | problem11.py | problem11.py | number_string = """08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08
49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00
81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65
52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91
22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80
24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50
32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70
67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21
24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72
21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95
78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92
16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57
86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58
19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40
04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66
88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69
04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36
20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16
20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54
01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48"""
# convert the big block number string into a two dimensional array of integers
# This list comprehension parses the rows first and then each column, which means that we will
# end up with matrix[y][x] instead of matrix[x][y] which would have been more intuitive
int_matrix = [[int(number_string) for number_string in row_string.split(" ")] for row_string in number_string.split("\n")]
def get_cell(x, y):
if (0 <= x <= 19 and 0 <= y <= 19):
# reversed coordinate axis (use y,x instead of x,y) due to parsing
return int_matrix[y][x]
else:
# hack to make sure products involving this cell value will be zero
# wow this is sooo ugly :-(
return 0
def check_vertical(x, y):
return get_cell(x,y) * get_cell(x,y+1) * get_cell(x,y+2) * get_cell(x,y+3)
def check_horizontal(x, y):
return get_cell(x,y) * get_cell(x+1,y) * get_cell(x+2,y) * get_cell(x+3,y)
# south west (sw) to north east (ne)
def check_nw_se_diagonal(x, y):
return get_cell(x,y) * get_cell(x+1,y+1) * get_cell(x+2,y+2) * get_cell(x+3,y+3)
# north east (ne) to south west (sw)
def check_ne_sw_diagonal(x, y):
return get_cell(x,y) * get_cell(x-1,y+1) * get_cell(x-2,y+2) * get_cell(x-3,y+3)
def get_highest_cell_product(x, y):
return max(check_vertical(x, y), check_horizontal(x, y), check_nw_se_diagonal(x, y), check_ne_sw_diagonal(x, y))
for y in xrange(0,20):
for x in xrange(0,20):
print str(get_cell(x,y)).zfill(2),
print ""
greatest_cell_product = 0
for y in xrange(0,20):
for x in xrange(0,20):
cell_product = get_highest_cell_product(x, y)
if (cell_product > greatest_cell_product):
greatest_cell_product = cell_product
print "greatest_product==", greatest_cell_product
| Python | 0 | |
0d390edeeb8829c0b8afef090f133d0fee8bce4f | Bump PROVISION_VERSION for latest changes. | version.py | version.py | ZULIP_VERSION = "1.6.0+git"
PROVISION_VERSION = '9.1'
| ZULIP_VERSION = "1.6.0+git"
PROVISION_VERSION = '9.0'
| Python | 0 |
59a228312bb3091db8bfb6bf9a75ce4ae47431f4 | Add zero system test to neural net | neuralnets/net_test.py | neuralnets/net_test.py | from net import NeuralNet
import numpy as np
#TODO(Wesley) More tests
class TestNeuralNet(object):
def test_zero_system(self):
net = NeuralNet(3, 2, 4, 1, seed=0)
net.weights = [ np.zeros((3,4)),
np.zeros((4,4)),
np.zeros((4,4)),
np.zeros((4,1)) ]
inpt = np.asarray([1, 1, 1])
print(net.forward(inpt))
for layer in net.forward(inpt)[1:]:
for neuron in layer:
assert neuron == 0.5
| Python | 0.004377 | |
0a7fb32471fa5ae6e66348527c0ae2f299361211 | Allow to pass extra argument to the Base class initializer | opbeat/handlers/logging.py | opbeat/handlers/logging.py | """
opbeat.handlers.logging
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2011-2012 Opbeat
Large portions are
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import datetime
import logging
import sys
import traceback
from opbeat.base import Client
from opbeat.utils import six
from opbeat.utils.encoding import to_string
from opbeat.utils.stacks import iter_stack_frames
class OpbeatHandler(logging.Handler, object):
def __init__(self, *args, **kwargs):
client = kwargs.pop('client_cls', Client)
if len(args) == 1:
arg = args[0]
args = args[1:]
if isinstance(arg, Client):
self.client = arg
else:
raise ValueError(
'The first argument to %s must be a Client instance, '
'got %r instead.' % (
self.__class__.__name__,
arg,
))
elif 'client' in kwargs:
self.client = kwargs.pop('client')
else:
self.client = client(*args, **kwargs)
super(OpbeatHandler, self).__init__(*args, **kwargs)
def emit(self, record):
self.format(record)
# Avoid typical config issues by overriding loggers behavior
if record.name.startswith('opbeat.errors'):
six.print_(to_string(record.message), file=sys.stderr)
return
try:
return self._emit(record)
except Exception:
six.print_(
"Top level Opbeat exception caught - "
"failed creating log record",
sys.stderr)
six.print_(to_string(record.msg), sys.stderr)
six.print_(to_string(traceback.format_exc()), sys.stderr)
try:
self.client.capture('Exception')
except Exception:
pass
def _emit(self, record, **kwargs):
data = {}
for k, v in six.iteritems(record.__dict__):
if '.' not in k and k not in ('culprit',):
continue
data[k] = v
stack = getattr(record, 'stack', None)
if stack is True:
stack = iter_stack_frames()
if stack:
frames = []
started = False
last_mod = ''
for item in stack:
if isinstance(item, (list, tuple)):
frame, lineno = item
else:
frame, lineno = item, item.f_lineno
if not started:
f_globals = getattr(frame, 'f_globals', {})
module_name = f_globals.get('__name__', '')
if last_mod.startswith(
'logging') and not module_name.startswith(
'logging'):
started = True
else:
last_mod = module_name
continue
frames.append((frame, lineno))
stack = frames
extra = getattr(record, 'data', {})
# Add in all of the data from the record that we aren't already capturing
for k in record.__dict__.keys():
if k in (
'stack', 'name', 'args', 'msg', 'levelno', 'exc_text',
'exc_info', 'data', 'created', 'levelname', 'msecs',
'relativeCreated'):
continue
if k.startswith('_'):
continue
extra[k] = record.__dict__[k]
date = datetime.datetime.utcfromtimestamp(record.created)
# If there's no exception being processed,
# exc_info may be a 3-tuple of None
# http://docs.python.org/library/sys.html#sys.exc_info
if record.exc_info and all(record.exc_info):
handler = self.client.get_handler('opbeat.events.Exception')
data.update(handler.capture(exc_info=record.exc_info))
# data['checksum'] = handler.get_hash(data)
data['level'] = record.levelno
data['logger'] = record.name
return self.client.capture('Message',
param_message={'message': record.msg,
'params': record.args},
stack=stack, data=data, extra=extra,
date=date, **kwargs)
| """
opbeat.handlers.logging
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2011-2012 Opbeat
Large portions are
:copyright: (c) 2010 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import datetime
import logging
import sys
import traceback
from opbeat.base import Client
from opbeat.utils import six
from opbeat.utils.encoding import to_string
from opbeat.utils.stacks import iter_stack_frames
class OpbeatHandler(logging.Handler, object):
def __init__(self, *args, **kwargs):
client = kwargs.get('client_cls', Client)
if len(args) == 1:
arg = args[0]
if isinstance(arg, Client):
self.client = arg
else:
raise ValueError(
'The first argument to %s must be a Client instance, '
'got %r instead.' % (
self.__class__.__name__,
arg,
))
elif 'client' in kwargs:
self.client = kwargs['client']
else:
self.client = client(*args, **kwargs)
logging.Handler.__init__(self)
def emit(self, record):
self.format(record)
# Avoid typical config issues by overriding loggers behavior
if record.name.startswith('opbeat.errors'):
six.print_(to_string(record.message), file=sys.stderr)
return
try:
return self._emit(record)
except Exception:
six.print_(
"Top level Opbeat exception caught - "
"failed creating log record",
sys.stderr)
six.print_(to_string(record.msg), sys.stderr)
six.print_(to_string(traceback.format_exc()), sys.stderr)
try:
self.client.capture('Exception')
except Exception:
pass
def _emit(self, record, **kwargs):
data = {}
for k, v in six.iteritems(record.__dict__):
if '.' not in k and k not in ('culprit',):
continue
data[k] = v
stack = getattr(record, 'stack', None)
if stack is True:
stack = iter_stack_frames()
if stack:
frames = []
started = False
last_mod = ''
for item in stack:
if isinstance(item, (list, tuple)):
frame, lineno = item
else:
frame, lineno = item, item.f_lineno
if not started:
f_globals = getattr(frame, 'f_globals', {})
module_name = f_globals.get('__name__', '')
if last_mod.startswith(
'logging') and not module_name.startswith(
'logging'):
started = True
else:
last_mod = module_name
continue
frames.append((frame, lineno))
stack = frames
extra = getattr(record, 'data', {})
# Add in all of the data from the record that we aren't already capturing
for k in record.__dict__.keys():
if k in (
'stack', 'name', 'args', 'msg', 'levelno', 'exc_text',
'exc_info', 'data', 'created', 'levelname', 'msecs',
'relativeCreated'):
continue
if k.startswith('_'):
continue
extra[k] = record.__dict__[k]
date = datetime.datetime.utcfromtimestamp(record.created)
# If there's no exception being processed,
# exc_info may be a 3-tuple of None
# http://docs.python.org/library/sys.html#sys.exc_info
if record.exc_info and all(record.exc_info):
handler = self.client.get_handler('opbeat.events.Exception')
data.update(handler.capture(exc_info=record.exc_info))
# data['checksum'] = handler.get_hash(data)
data['level'] = record.levelno
data['logger'] = record.name
return self.client.capture('Message',
param_message={'message': record.msg,
'params': record.args},
stack=stack, data=data, extra=extra,
date=date, **kwargs)
| Python | 0 |
d97b9f6c508dd24da0f86bc1587ea64708c84a89 | Add parser for the advisory mail recipients. | tools/dist/security/mailinglist.py | tools/dist/security/mailinglist.py | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
Parser for advisory e-mail distribution addresses
"""
from __future__ import absolute_import
import os
import re
class MailingList(object):
"""
A list of e-mail addresses for security advisory pre-notifications.
Parses ^/pmc/subversion/security/pre-notifications.txt
"""
__PRE_NOTIFICATIONS = 'pre-notifications.txt'
__ADDRESS_LINE = re.compile(r'^\s{6}(?:[^<]+)?<[^<>]+>\s*$')
def __init__(self, rootdir):
self.__addresses = []
self.__parse_addresses(rootdir)
def __iter__(self):
return self.__addresses.__iter__()
def __len__(self):
return len(self.__addresses)
def __parse_addresses(self, rootdir):
pre_notifications = os.path.join(rootdir, self.__PRE_NOTIFICATIONS)
with open(pre_notifications, 'rt') as pn:
for line in pn:
m = self.__ADDRESS_LINE.match(line)
if not m:
continue
self.__addresses.append(line.strip())
| Python | 0.000296 | |
80a7493e56b1ba6b01bf44f6dd9140de916511a7 | add twisted interface to psycopg2 | pyiem/twistedpg.py | pyiem/twistedpg.py | """
module twistedpg.py
Author: Federico Di Gregorio
http://twistedmatrix.com/pipermail/twisted-python/2006-April/012955.html
"""
from psycopg2 import *
from psycopg2 import connect as _2connect
from psycopg2.extensions import connection as _2connection
from psycopg2.extras import RealDictCursor
del connect
def connect(*args, **kwargs):
kwargs['connection_factory'] = connection
return _2connect(*args, **kwargs)
class connection(_2connection):
def cursor(self):
return _2connection.cursor(self, cursor_factory=RealDictCursor)
| Python | 0 | |
009182d0c603f9c1f8fa650f6a9771b38a74c6cc | Add a proper validator for disable_builtins | flexget/plugins/plugin_disable_builtins.py | flexget/plugins/plugin_disable_builtins.py | import logging
from flexget import plugin
from flexget.plugin import priority, register_plugin, plugins
log = logging.getLogger('builtins')
def all_builtins():
"""Helper function to return an iterator over all builtin plugins."""
return (plugin for plugin in plugins.itervalues() if plugin.builtin)
class PluginDisableBuiltins(object):
"""Disables all (or specific) builtin plugins from a feed."""
def validator(self):
from flexget import validator
root = validator.factory()
root.accept('boolean')
root.accept('list').accept('choice').accept_choices(plugin.name for plugin in all_builtins())
return root
def debug(self):
log.debug('Builtin plugins: %s' % ', '.join(plugin.name for plugin in all_builtins()))
@priority(255)
def on_feed_start(self, feed, config):
self.disabled = []
if not config:
return
for plugin in all_builtins():
if config is True or plugin.name in config:
plugin.builtin = False
self.disabled.append(plugin.name)
log.debug('Disabled builtin plugin(s): %s' % ', '.join(self.disabled))
@priority(-255)
def on_feed_exit(self, feed, config):
if not self.disabled:
return
for name in self.disabled:
plugin.plugins[name].builtin = True
log.debug('Enabled builtin plugin(s): %s' % ', '.join(self.disabled))
self.disabled = []
on_feed_abort = on_feed_exit
register_plugin(PluginDisableBuiltins, 'disable_builtins', api_ver=2)
| import logging
from flexget import plugin
from flexget.plugin import priority, register_plugin
log = logging.getLogger('builtins')
class PluginDisableBuiltins(object):
"""
Disables all builtin plugins from a feed.
"""
def __init__(self):
self.disabled = []
def validator(self):
from flexget import validator
# TODO: accept only list (of texts) or boolean
return validator.factory('any')
def debug(self):
for name, info in plugin.plugins.iteritems():
if not info.builtin:
continue
log.debug('Builtin plugin: %s' % name)
def on_feed_start(self, feed):
for name, info in plugin.plugins.iteritems():
if info.builtin:
if isinstance(feed.config['disable_builtins'], list):
if info.name in feed.config['disable_builtins']:
info.builtin = False
self.disabled.append(name)
else:
# disabling all builtins
info.builtin = False
self.disabled.append(name)
log.debug('Disabled builtin plugin %s' % ', '.join(self.disabled))
@priority(-255)
def on_feed_exit(self, feed):
names = []
for name in self.disabled:
names.append(name)
plugin.plugins[name].builtin = True
self.disabled = []
log.debug('Enabled builtin plugins %s' % ', '.join(names))
on_feed_abort = on_feed_exit
register_plugin(PluginDisableBuiltins, 'disable_builtins')
| Python | 0.000064 |
98524c4e7c7c4b6e8b51b7fd89501d8ac00e0d8e | generates a hash of the string input | elements/GenerateHashOfString.py | elements/GenerateHashOfString.py | # coding: utf-8
from ElementBase import ElementBase
from ElementParameter import ElementParameter
from ElementValue import ElementValue
import hashlib
class GenerateHashOfString(ElementBase):
def __init__(self):
self.status = 'running'
self.output = None
self.params = []
self.type = 'Standard'
self.setup_params()
def can_handle_list(self):
return False
def setup_params(self):
algs = []
for t in hashlib.algorithms_available:
algs.append(t)
self.params.append(ElementParameter(name='algorithms',displayName='Hash Algorithm',display=True, type='list',value='md5',allowedValues=algs))
def get_status(self):
return self.status
def get_input_type(self):
return 'string'
def get_output(self):
return self.output
def get_output_type(self):
return 'string'
def get_params(self):
return self.params
def set_params(self, params = None):
self.params = params or []
def get_description(self):
return 'Generates a hash of the input value and returns it'
def get_title(self):
return 'Generate Hash of string'
def get_icon(self):
return 'iob:ios7_cog_outline_32'
def get_category(self):
return 'Utility'
def get_type(self):
return self.type
def run(self, input=''):
algo = self.get_param_by_name('algorithms')
self.status = 'complete'
return ElementValue(type=self.output, value=hashlib.new(algo.value, input.value.encode('utf-8')).hexdigest())
| Python | 0.999975 | |
f47482df83a8ab643a55062b12fce11fbd703886 | add 90. The first 100 problems have been solved! Oh~~~~~~~~Yeah | vol2/90.py | vol2/90.py | from itertools import combinations
def valid(c1, c2):
return all(x in c1 and y in c2 or x in c2 and y in c1 for x, y in squares)
if __name__ == "__main__":
squares = [(0,1), (0,4), (0,6), (1,6), (2,5), (3,6), (4,6), (8,1)]
cube = list(combinations([0,1,2,3,4,5,6,7,8,6], 6))
print sum(1 for i, c1 in enumerate(cube)
for c2 in cube[i+1:] if valid(c1, c2))
| Python | 0.999935 | |
ddbbfbb5db9cf2f4ff4338924a7965c2dffc1e3f | add VWS API client implement file | vuforia.py | vuforia.py | #!/usr/bin/env python
# Copyright 2013
"""Simple VWS client implementation using python.
The client supports add, update, retrieving, delete and listing targets
on a Vuforia Cloud Database.
"""
import hmac
import hashlib
import base64
import json
from time import strftime, gmtime
from httplib import HTTPSConnection
class Vuforia(object):
"""A blocking vuforia vws client.
This class is used to interacte with VWS API, it implements all the
operations defined in this module.
"""
def __init__(self, access_key, secret_key, host='vws.vuforia.com'):
self.host = host # https://vws.vuforia.com.
# access_key and secret_key is provided when you set up your
# Cloud Recognition Database, you can retrieve these values from
# the Target Manager at any time.
self.access_key = access_key
self.secret_key = secret_key
def _gmtnow(self):
"""Return GMT time."""
return strftime("%a, %d %b %Y %H:%M:%S GMT", gmtime())
def _build_authorization(self, method, request_path, date,
content_type='', content=''):
"""Calculate the signature string from the http request.
signature = base64(hmac-sha1(secret_key, string_to_sign))
string_to_sign =
HTTP-Verb + '\n' + # HTTP request method.
Content-MD5 + '\n' + # Hexdecimal MD5 hash of the whole request body.
# Use an empty string if the body is empty.
Content-Type + '\n' + # The content type of the request body. Use an
# empty string for request types without body.
Date + '\n' + # Current date in GMT format,
# e.g.: Sun, 22 Apr 2013 09:24:28 GMT
Request-Path # Request path in url.
"""
# The md5 hash needs to be transformed to hexdecimal.
content_md5 = hashlib.md5(content).hexdigest()
string_to_sign = '\n'.join([method,
content_md5,
content_type,
date,
request_path])
# Calculate hmac-sha1 hash string using secret_key.
sign_sha1_hash_digest = hmac.new(self.secret_key,
string_to_sign, hashlib.sha1).digest()
# The sha1 hash needs to be transformed to base64.
signature = base64.b64encode(sign_sha1_hash_digest)
return 'VWS ' + self.access_key + ':' + signature
def _do_request(self, method, request_path, body=None, headers=None):
"""Execute the HTTP request and return the response.
"""
conn = HTTPSConnection(host=self.host, port=443)
try:
conn.request(method=method, url=request_path,
body=body, headers=headers)
response = conn.getresponse()
body = response.read()
finally:
conn.close()
return (response.status, response.reason), json.loads(body)
def add_target(self, target):
"""Add target.
Add a new target into your database, it performs a HTTPS POST
on https://{host}/targets and return the http response.
"""
request_path = '/targets'
method = 'POST'
date_value = self._gmtnow()
content_type = 'application/json'
content = json.dumps(target)
authorization = self._build_authorization(method=method,
request_path=request_path,
date=date_value,
content_type=content_type,
content=content)
headers = {
'Date': date_value,
'Content-Type': content_type + '; charset=utf-8',
'Authorization': authorization,
}
return self._do_request(method=method, request_path=request_path,
body=content, headers=headers)
def update_target(self, target_id, updates):
"""Update target.
Update target information in database by target_id, it performs
a HTTPS PUT on https://{host}/targets/{target_id} and return
the response.
"""
request_path = '/targets/' + target_id
method = 'PUT'
date_value = self._gmtnow()
content_type = "application/json"
content = json.dumps(updates)
authorization = self._build_authorization(method=method,
request_path=request_path,
date=date_value,
content_type=content_type,
content=content)
headers = {
'Date': date_value,
'Content-Type': content_type + '; charset=utf-8',
'Authorization': authorization
}
return self._do_request(method=method,
request_path=request_path,
body=content,
headers=headers)
def get_target_by_id(self, target_id):
"""Get target information.
Retrieve target information of target in your Cloud Recongnition
Database by target_id, it performs a HTTPS GET on
https://{host}/targets/{target_id}
and return the response.
"""
request_path = '/targets/' + target_id
method = 'GET'
date_value = self._gmtnow()
authorization = self._build_authorization(method=method,
request_path=request_path,
date=date_value)
headers = {
'Date': date_value,
'Authorization': authorization
}
return self._do_request(method=method,
request_path=request_path,
headers=headers)
def delete_target(self, target_id):
"""Delete a target from your Cloud Recognition Database by target_id,
it performs a HTTPS DELETE on https://{host}/targets/{target_id} and
return the response.
"""
request_path = '/targets/' + target_id
method = 'DELETE'
date_value = self._gmtnow()
authorization = self._build_authorization(method=method,
request_path=request_path,
date=date_value)
headers = {
'Date': date_value,
'Authorization': authorization
}
return self._do_request(method=method, request_path=request_path,
headers=headers)
def list_targets(self):
"""Get target list on vuforia database.
Retrieve all the target_ids from your Cloud Recognition
Database, it performs a HTTPS GET on https://{host}/targets.
"""
request_path = '/targets'
method = 'GET'
date_value = self._gmtnow()
authorization = self._build_authorization(method=method,
request_path=request_path,
date=date_value)
headers = {
'Date': date_value,
'Authorization': authorization
}
return self._do_request(method=method, request_path=request_path,
headers=headers)
def get_target_summary(self, target_id):
"""Get target summary on vuforia Database.
"""
request_path = '/summary/' + target_id
method = 'GET'
date_value = self._gmtnow()
authorization = self._build_authorization(method=method,
request_path=request_path,
date=date_value)
headers = {
'Date': date_value,
'Authorization': authorization
}
return self._do_request(method=method, request_path=request_path,
headers=headers)
def get_db_summary(self):
"""Get summary of remote vuforia database.
It performs a HTTPS GET on https://{host}/summary.
"""
request_path = '/summary'
method = 'GET'
date_value = self._gmtnow()
authorization = self._build_authorization(method=method,
request_path=request_path,
date=date_value)
headers = {
'Date': date_value,
'Authorization': authorization
}
return self._do_request(method=method, request_path=request_path,
headers=headers)
def close(self):
pass
| Python | 0 | |
c6cd7d2a310bc0b107e0d2a481260b2e95bac577 | add prime_factors function to utils | utils.py | utils.py | "Utilities to help solving problems."
def prime_factors(num):
i = 2
while i * i <= num:
if num % i:
i += 1
else:
num //= i
yield i
if num > 1:
yield num
| Python | 0.000008 | |
6be93bfbaf254234f008e2c714b0aae10434fe68 | add orm | www/orm.py | www/orm.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
__author__ = 'Jiayi Li'
import asyncio, aiomysql, logging
def log(sql, args=()):
logging.info('SQL: %s' % sql)
# create a connection pool, stored by global variable '__pool'
async def create_pool(loop, **kw):
logging.info('create database connection pool...')
global __pool
__pool = await aiomysql.create_pool(
host = kw.get('host', 'localhost'),
port = kw.get('port', 3306),
user = kw.get('user'),
password = kw.get('password'),
db = kw.get('db'),
charset = kw.get('charset', 'utf-8'),
autocommit = kw.get('autocommit', True),
maxsize = kw.get('maxsize', 10),
minsize = kw.get('minsize', 1),
loop = loop
)
# SELECT
async def select(sql, args, size=None):
log(sql, args)
global __pool
async with __pool.get() as conn:
async with conn.cursor(aiomysql.DictCursor) as cur:
await cur.execute(sql.replace('?', '%s'), args or ())
if size:
rs = await cur.fetchmany(size)
else:
rs = yield from cur.fetchall()
await cur.close()
logging.info('rows returned: %s' % len(rs))
return rs
# INSERT, UPDATE and DELETE
async def execute(sql, args, autocommit=True):
log(sql)
async with __pool.get() as conn:
if not autocommit:
await conn.begin()
try:
async with conn.cursor(aiomysql.DictCursor) as cur:
await cur.execute(sql.replace('?', '%s'), args)
affected = cur.rowcount
if not autocommit:
await conn.commit()
except BaseException as e:
if not autocommit:
await conn.rollback()
raise
return affected | Python | 0.000047 | |
c596f7cec968ccd450508e17a2534d87feb5615a | add the googlecode upload script | gc_upload.py | gc_upload.py | #!/usr/bin/env python
#
# Copyright 2006 Google Inc. All Rights Reserved.
# Author: danderson@google.com (David Anderson)
#
# Script for uploading files to a Google Code project.
#
# This is intended to be both a useful script for people who want to
# streamline project uploads and a reference implementation for
# uploading files to Google Code projects.
#
# To upload a file to Google Code, you need to provide a path to the
# file on your local machine, a small summary of what the file is, a
# project name, and a valid account that is a member or owner of that
# project. You can optionally provide a list of labels that apply to
# the file. The file will be uploaded under the same name that it has
# in your local filesystem (that is, the "basename" or last path
# component). Run the script with '--help' to get the exact syntax
# and available options.
#
# Note that the upload script requests that you enter your
# googlecode.com password. This is NOT your Gmail account password!
# This is the password you use on googlecode.com for committing to
# Subversion and uploading files. You can find your password by going
# to http://code.google.com/hosting/settings when logged in with your
# Gmail account.
#
# If you are looking at this script as a reference for implementing
# your own Google Code file uploader, then you should take a look at
# the upload() function, which is the meat of the uploader. You
# basically need to build a multipart/form-data POST request with the
# right fields and send it to https://PROJECT.googlecode.com/files .
# Authenticate the request using HTTP Basic authentication, as is
# shown below.
#
# Licensed under the terms of the Apache Software License 2.0:
# http://www.apache.org/licenses/LICENSE-2.0
#
# Questions, comments, feature requests and patches are most welcome.
# Please direct all of these to the Google Code users group:
# http://groups-beta.google.com/group/google-code-hosting
"""Google Code file uploader script.
"""
__author__ = 'danderson@google.com (David Anderson)'
import httplib
import os.path
import optparse
import getpass
import base64
def upload(file, project_name, user_name, password, summary, labels=None):
"""Upload a file to a Google Code project's file server.
Args:
file: The local path to the file.
project_name: The name of your project on Google Code.
user_name: Your Google account name.
password: The googlecode.com password for your account.
Note that this is NOT your global Google Account password!
summary: A small description for the file.
labels: an optional list of label strings with which to tag the file.
Returns: a tuple:
http_status: 201 if the upload succeeded, something else if an
error occured.
http_reason: The human-readable string associated with http_status
file_url: If the upload succeeded, the URL of the file on Google
Code, None otherwise.
"""
if '@' in user_name: user_name = user_name.index[: user_name.index('@')]
form_fields = [('summary', summary)]
if labels is not None:
form_fields.extend([('label', l.strip()) for l in labels])
content_type, body = encode_upload_request(form_fields, file)
upload_host = '%s.googlecode.com' % project_name
upload_uri = '/files'
auth_token = base64.b64encode('%s:%s'% (user_name, password))
headers = {
'Authorization': 'Basic %s' % auth_token,
'User-Agent': 'Googlecode.com uploader v0.9.4',
'Content-Type': content_type,
}
server = httplib.HTTPSConnection(upload_host)
server.request('POST', upload_uri, body, headers)
resp = server.getresponse()
server.close()
if resp.status == 201:
location = resp.getheader('Location', None)
else:
location = None
return resp.status, resp.reason, location
def encode_upload_request(fields, file_path):
"""Encode the given fields and file into a multipart form body.
fields is a sequence of (name, value) pairs. file is the path of
the file to upload. The file will be uploaded to Google Code with
the same file name.
Returns: (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------Googlecode_boundary_reindeer_flotilla'
CRLF = '\r\n'
body = []
# Add the metadata about the upload first
for key, value in fields:
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="%s"' % key,
'',
value,
])
# Now add the file itself
file_name = os.path.basename(file_path)
f = open(file_path)
file_content = f.read()
f.close()
body.extend(
['--' + BOUNDARY,
'Content-Disposition: form-data; name="filename"; filename="%s"'
% file_name,
# The upload server determines the mime-type, no need to set it.
'Content-Type: application/octet-stream',
'',
file_content,
])
# Finalize the form body
body.extend(['--' + BOUNDARY + '--', ''])
return 'multipart/form-data; boundary=%s' % BOUNDARY, CRLF.join(body)
def main():
parser = optparse.OptionParser(usage='googlecode-upload.py -s SUMMARY '
'-p PROJECT -u USERNAME FILE')
parser.add_option('-s', '--summary', dest='summary',
help='Short description of the file')
parser.add_option('-p', '--project', dest='project',
help='Google Code project name')
parser.add_option('-u', '--user', dest='user',
help='Your Google Code username')
parser.add_option('-l', '--labels', dest='labels',
help='An optional list of labels to attach to the file')
options, args = parser.parse_args()
if not options.summary:
parser.error('File summary is missing.')
elif not options.project:
parser.error('Project name is missing.')
elif not options.user:
parser.error('User name is missing.')
elif len(args) < 1:
parser.error('File to upload not provided.')
print 'Please enter your googlecode.com password.'
print 'Note that this is NOT your main Gmail account password!'
password = getpass.getpass()
file_path = args[0]
if options.labels:
labels = options.labels.split(',')
else:
labels = None
status, reason, url = upload(file_path, options.project,
options.user, password,
options.summary, labels)
if url:
print 'The file was uploaded successfully.'
print 'URL: %s' % url
else:
print 'An error occurred. Your file was not uploaded.'
print 'Google Code upload server said: %s (%s)' % (reason, status)
if __name__ == '__main__':
main()
| Python | 0.000001 | |
5b05640a60c66d9d12b9794f2ae55785efe1e099 | Define solidfill. | riot/tags/solidfill.py | riot/tags/solidfill.py | # -*- coding: utf-8 -*-
from urwid import SolidFill
def parse_tag_from_node(node):
return SolidFill()
| Python | 0.000002 | |
785f2d3a6d10d8d6ba72712eec29c5be5849f671 | Add build_raw_data.py | fluid/PaddleNLP/text_classification/async_executor/data_generator/build_raw_data.py | fluid/PaddleNLP/text_classification/async_executor/data_generator/build_raw_data.py | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Build lego raw data
"""
from __future__ import print_function
import sys
import os
import random
import re
data_type = sys.argv[1]
if not (data_type == "train" or data_type == "test"):
print("python %s [test/train]" % sys.argv[0], file=sys.stderr)
sys.exit(-1)
pos_folder = "aclImdb/" + data_type + "/pos/"
neg_folder = "aclImdb/" + data_type + "/neg/"
pos_train_list = [(pos_folder + x, "1") for x in os.listdir(pos_folder)]
neg_train_list = [(neg_folder + x, "0") for x in os.listdir(neg_folder)]
all_train_list = pos_train_list + neg_train_list
random.shuffle(all_train_list)
def load_dict(dictfile):
"""
Load word id dict
"""
vocab = {}
wid = 0
with open(dictfile) as f:
for line in f:
vocab[line.strip()] = str(wid)
wid += 1
return vocab
vocab = load_dict("aclImdb/imdb.vocab")
unk_id = str(len(vocab))
print("vocab size: ", len(vocab), file=sys.stderr)
pattern = re.compile(r'(;|,|\.|\?|!|\s|\(|\))')
for fitem in all_train_list:
label = str(fitem[1])
fname = fitem[0]
with open(fname) as f:
sent = f.readline().lower().replace("<br />", " ").strip()
out_s = "%s | %s" % (sent, label)
print(out_s, file=sys.stdout)
| Python | 0.000008 | |
f99c8e6e26b85ae7805ff38e4d89978d06e93c97 | Add SQSRequest base class | sqs.py | sqs.py | from tornado.httpclient import AsyncHTTPClient, HTTPRequest, HTTPClient
from tornado.httputil import url_concat
import datetime
import hashlib
import hmac
class SQSRequest(HTTPRequest):
"""SQS AWS Adapter for Tornado HTTP request"""
def __init__(self, *args, **kwargs):
super(SQSRequest, self).__init__(*args, **kwargs)
| Python | 0 | |
972f58d3280c95c2823b04a76469c401e7d9fc02 | add border and basic functional | xoi.py | xoi.py | #! /usr/bin/env python
import sys
import curses
from curses import KEY_ENTER
import time
from collections import namedtuple
KEY = "KEY"
K_A = ord("a")
K_D = ord("d")
class Point:
def __init__(self, x, y):
self._x = x
self._y = y
@property
def x(self):
return self._x
@x.setter
def x(self, val):
self._x = val
@property
def y(self):
return self._y
@y.setter
def y(self, val):
self._y = val
Event = namedtuple("Event", ["type", "val"])
class Spaceship(object):
def __init__(self, border):
self._image = "<i>"
self._dx = 1
self.border = border
self._pos = Point(self.border.x // 2, self.border.y - 1)
def events(self, event):
if event.type == KEY:
if event.val == K_A:
self._dx = -1
if event.val == K_D:
self._dx = 1
def update(self):
if self._pos.x == self.border.x - len(self._image) - 1 and self._dx > 0:
self._pos.x = 0
elif self._pos.x == 1 and self._dx < 0:
self._pos.x = self.border.x - len(self._image)
self._pos.x += self._dx
self._dx = 0
def draw(self, screen):
screen.addstr(self._pos.y, self._pos.x, self._image, curses.A_BOLD)
class App(object):
def __init__(self):
#self.screen = curses.initscr()
curses.initscr()
self.border = namedtuple("border", ["y", "x"])(24, 80)
self.field = namedtuple("field", ["y". "x"])(self.border.y-1, self.border.x-1)
self.screen = curses.newwin(self.border.y, self.border.x, 0, 0)
self.screen.keypad(True)
self.screen.nodelay(True)
curses.noecho()
curses.cbreak()
curses.curs_set(False)
self.spaceship = Spaceship(self.field)
self._objects = []
self._objects.append(self.spaceship)
def deinit(self):
#self.screen.nodelay(False)
self.screen.keypad(False)
curses.nocbreak()
curses.echo()
curses.curs_set(True)
curses.endwin()
def events(self):
c = self.screen.getch()
if c == 27: #Escape
#self.deinit()
sys.exit(1)
else:
for o in self._objects:
o.events(Event(type="KEY", val=c))
def update(self):
for o in self._objects:
o.update()
def render(self):
self.screen.clear()
self.screen.border(0)
for o in self._objects:
o.draw(self.screen)
self.screen.refresh()
def loop(self):
while True:
self.events()
self.update()
self.render()
def main():
app = App()
app.loop()
if __name__ == "__main__":
main()
| Python | 0 | |
9e2669539c5d7662bb6d6a89877b30235eef1bc2 | Write solution to DEC14 XOR question. | xor.py | xor.py | # http://www.codechef.com/DEC14/problems/XORSUB
import operator
def f(p):
if p == []:
return 0
elif len(p) == 1:
return p[0]
else:
return reduce(operator.xor, p)
def list_powerset(lst):
result = [[]]
for x in lst:
result.extend([subset + [x] for subset in result])
return result
t = int(raw_input())
while t:
k = int(raw_input().split()[1])
array = map(int, raw_input().split())
max = -1
for i in list_powerset(array):
if max < (k ^ f(i)):
max = k ^ f(i)
print max
t -= 1 | Python | 0.999905 | |
135cdb7f16372978774acf06d4da556d0a7a7db7 | add solution template | exercises/error-handling/error_handling.py | exercises/error-handling/error_handling.py | def handle_error_by_throwing_exception():
pass
def handle_error_by_returning_none(input_data):
pass
def handle_error_by_returning_tuple(input_data):
pass
def filelike_objects_are_closed_on_exception(filelike_object):
pass
| Python | 0.000001 | |
286e996c8dd7a299a5db148e78bbdaa0e1cb1b5c | Add sample base. | samples/sample.py | samples/sample.py | # -*- coding: utf-8 -*-
"""AirWaveAPIClient sample."""
def main():
"""Sample main."""
if __name__ == "__main__":
main()
| Python | 0 | |
c1fae9e5ace57320b4f4e69efc941c7fe6266381 | add stft graph writer | write_stft_graph.py | write_stft_graph.py | import pdb
import tensorflow as tf
from birdwatcher.generators import compose, stft, amplitude_to_db, read_audio, reshape
AUDIO_SHAPE = (44100*3, 1)
clean_samples = compose(reshape, amplitude_to_db, stft, read_audio)
x = tf.placeholder(tf.float32, shape=AUDIO_SHAPE)
out = clean_samples(x)
sess = tf.Session()
tf.train.write_graph(sess.graph_def, 'models', 'stft.pbtxt')
| Python | 0 | |
aa517e0300e3e5079523d30c0bb7bfe1fe9640a4 | Add script to wait for ELB instances to come into service. | scripts/wait-for-elb-instances-in-service.py | scripts/wait-for-elb-instances-in-service.py | #!/usr/bin/env python
import argparse, re, time
from assume_role_lib import log, sts
from assume_role_lib.util import unwrap
from datetime import datetime, timedelta
from collections import Counter as counter
__version__ = '0.1'
logger = None
def add_arguments(argument_parser):
argument_parser.add_argument(
'--version', '-V',
action = 'version',
version = '%(prog)s {0}'.format(__version__),
help = 'Show version information and exit.',
)
argument_parser.add_argument(
'--timeout',
metavar = 'TIME',
default = '15m',
help = unwrap("""
Stack timeout: XXn; for n - d = days, h = hour, m = mins, s =
seconds.
""")
)
argument_parser.add_argument(
'elb',
metavar = 'NAME',
nargs = '+',
help = unwrap("""
The ELB to watch. Multiple are allowed.
""")
)
def parse_timeout(timeout):
days, hours, minutes, seconds = 0, 0, 0, 0
findall = re.findall(r'([0-9]+)([dhms])', timeout.lower())
for number, unit in findall:
number = int(number)
if unit == 'd':
days = number
elif unit == 'h':
hours = number
elif unit == 'm':
minutes = number
elif unit == 's':
seconds = number
return timedelta(days, seconds, 0, 0, minutes, hours)
def get_elb_instance_info(elb, elb_client):
instance_health = elb_client.describe_instance_health(
LoadBalancerName = elb,
).get('InstanceStates', [])
instance_states = counter(( i.get('State') for i in instance_health ))
return len(instance_health), instance_states
def main():
global logger
p = argparse.ArgumentParser(
description=unwrap("""
Wait for all instances added to an ELB to come into service. Exits
non-zero if there are no instances added to an ELB.
"""),
)
sts.add_arguments(p)
log.add_arguments(p)
add_arguments(p)
args = p.parse_args()
logger = log.get_logger(args)
logger.debug('Args: %r', args)
session = sts.get_session(args)
elb_client = session.client('elb')
elbs = set( args.elb )
complete_elbs = set()
remaining_elbs = elbs - complete_elbs
timeout = datetime.now() + parse_timeout(args.timeout)
while remaining_elbs:
for elb in remaining_elbs:
count, states = get_elb_instance_info(elb, elb_client)
logger.debug('%s: %r', elb, states)
if states.get('InService') == count:
complete_elbs.add(elb)
logger.info(
'%s has %d of %d instances in service',
elb, states.get('InService', 0), count,
)
remaining_elbs = elbs - complete_elbs
if remaining_elbs:
if datetime.now() > timeout:
logger.error(
'Timeout waiting for ELBs. %d have not completed.',
len(remaining_elbs),
)
for elb in remaining_elbs:
count, states = get_elb_instance_info(elb, elb_client)
logger.error(
'%s has only %d of %d instances in service',
elb, states.get('InService', 0), count,
)
raise SystemExit(2)
logger.info('Waiting for %d ELB\'s instances to come into service.', len(remaining_elbs))
logger.debug('%r', remaining_elbs)
time.sleep(15)
if __name__ == '__main__':
main()
| Python | 0 | |
eff5016653980f24c5c55dfb866dbe108f50dedf | Add the cbtf spack build package. cbtf is the base package for the component based tool framework and is used for building and connecting cbtf components, including distributed components via the MRNet transfer mechanism. | var/spack/packages/cbtf/package.py | var/spack/packages/cbtf/package.py | ################################################################################
# Copyright (c) 2015 Krell Institute. All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59 Temple
# Place, Suite 330, Boston, MA 02111-1307 USA
################################################################################
from spack import *
class Cbtf(Package):
"""CBTF project contains the base code for CBTF that supports creating components,
component networks and the support to connect these components and component
networks into sequential and distributed network tools."""
homepage = "http://sourceforge.net/p/cbtf/wiki/Home"
url = "http://sourceforge.net/projects/cbtf/files/cbtf-1.5/cbtf-1.5.tar.gz/download"
version('1.5', '75a97e0161d19b3a12305ed1ffb3d3e2')
# Mirror access template example
#url = "file:/opt/spack-mirror-2015-02-27/cbtf/cbtf-1.5.tar.gz"
#version('1.5', '1ca88a8834759c4c74452cb97fe7b70a')
# Use when the git repository is available
#version('1.5', branch='master', git='http://git.code.sf.net/p/cbtf/cbtf')
depends_on("cmake")
depends_on("boost@1.41:")
depends_on("mrnet@4.1.0+krelloptions")
depends_on("xerces-c@3.1.1:")
depends_on("libxml2")
parallel = False
def install(self, spec, prefix):
with working_dir('build', create=True):
# Boost_NO_SYSTEM_PATHS Set to TRUE to suppress searching
# in system paths (or other locations outside of BOOST_ROOT
# or BOOST_INCLUDEDIR). Useful when specifying BOOST_ROOT.
# Defaults to OFF.
cmake('..',
'--debug-output',
'-DBoost_NO_SYSTEM_PATHS=TRUE',
'-DXERCESC_DIR=%s' % spec['xerces-c'].prefix,
'-DBOOST_ROOT=%s' % spec['boost'].prefix,
'-DMRNET_DIR=%s' % spec['mrnet'].prefix,
'-DCMAKE_MODULE_PATH=%s' % join_path(prefix.share,'KrellInstitute','cmake'),
*std_cmake_args)
make("clean")
make()
make("install")
| Python | 0 | |
fc21bb14600f79a3d9970272fb7edd4eba548262 | Add test for python runner action wrapper process script performance. | st2actions/tests/integration/test_python_action_process_wrapper.py | st2actions/tests/integration/test_python_action_process_wrapper.py | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test case which tests that Python runner action wrapper finishes in <= 200ms. If the process takes
more time to finish, this means it probably directly or in-directly imports some modules which have
side affects and are very slow to import.
Examples of such modules include:
* jsonschema
* pecan
* jinja2
* kombu
* mongoengine
If the tests fail, look at the recent changes and analyze the import graph using the following
command: "profimp "from st2common.runners import python_action_wrapper" --html > report.html"
"""
import os
import unittest2
from st2common.util.shell import run_command
# Maximum limit for the process wrapper script execution time (in seconds)
WRAPPER_PROCESS_RUN_TIME_UPPER_LIMIT = 0.35
ASSERTION_ERROR_MESSAGE = ("""
Python wrapper process script took more than %s seconds to execute (%s). This most likely means
that a direct or in-direct import of a module which takes a long time to load has been added (e.g.
jsonschema, pecan, kombu, etc).
Please review recently changed and added code for potential slow import issues and refactor /
re-organize code if possible.
""".strip())
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
WRAPPER_SCRIPT_PATH = os.path.join(BASE_DIR, '../../st2common/runners/python_action_wrapper.py')
class PythonRunnerActionWrapperProcess(unittest2.TestCase):
def test_process_wrapper_exits_in_reasonable_timeframe(self):
_, _, stderr = run_command('/usr/bin/time -f "%%e" python %s' % (WRAPPER_SCRIPT_PATH),
shell=True)
stderr = stderr.strip().split('\n')[-1]
run_time_seconds = float(stderr)
assertion_msg = ASSERTION_ERROR_MESSAGE % (WRAPPER_PROCESS_RUN_TIME_UPPER_LIMIT,
run_time_seconds)
self.assertTrue(run_time_seconds <= WRAPPER_PROCESS_RUN_TIME_UPPER_LIMIT, assertion_msg)
| Python | 0 | |
c6df42ca99c8f633c2f1efeb9af26ad4b88c4d75 | Create 04.py | 02/hw/04.py | 02/hw/04.py | # Define a procedure, find_last, that takes as input
# two strings, a search string and a target string,
# and returns the last position in the search string
# where the target string appears, or -1 if there
# are no occurences.
#
# Example: find_last('aaaa', 'a') returns 3
# Make sure your procedure has a return statement.
def find_last(search, target):
ii = 0
ans = -1
while ii >= 0:
ii = search.find(target, ii)
if ii != -1:
ans = ii
ii = ii + 1
return ans
#print find_last('aaaa', 'a')
#>>> 3
#print find_last('aaaaa', 'aa')
#>>> 3
#print find_last('aaaa', 'b')
#>>> -1
#print find_last("111111111", "1")
#>>> 8
#print find_last("222222222", "")
#>>> 9
#print find_last("", "3")
#>>> -1
#print find_last("", "")
#>>> 0
| Python | 0 | |
ba0093c8b6801bdbded870ea5cc27eeec05abb58 | create db script | web/create_db.py | web/create_db.py | __author__ = 'David Mitchell'
#This script creates an example/test db.
from app import db
from app import MenuCategory, MenuItem
db.drop_all()
db.create_all()
appetizer_category = MenuCategory(name='Appetizers')
entree_category = MenuCategory(name='Entrees')
desert_category = MenuCategory(name='Deserts')
bacon_item = MenuItem(name='Bacon', description='Delicious bacon', category=appetizer_category)
baconz_item = MenuItem(name='Baconz', description='Bacon with Bacon on top, fried in a bacon crust', category=entree_category)
baconIceCream_item = MenuItem(name='Bacon Ice Cream', description='Bacon Ice Cream topped with bacon bits', category=desert_category)
db.session.add_all([appetizer_category, entree_category, desert_category, bacon_item, baconz_item, baconIceCream_item])
db.session.commit()
| Python | 0.000001 | |
955a2a7e467cdcf83a19525e421feb9a5eaca7e3 | Add huxley/js.py for javascript | huxley/js.py | huxley/js.py | # Copyright (c) 2013 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or impliedriver.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
JavaScript to be executed in the testing user agent.
"""
getHuxleyEvents = """
(function() {
var events = [];
window.addEventListener(
'click',
function (e) { events.push([Date.now(), 'click', [e.clientX, e.clientY]]); },
true
);
window.addEventListener(
'keyup',
function (e) { events.push([Date.now(), 'keyup', String.fromCharCode(e.keyCode)]); },
true
);
window.addEventListener(
'scroll',
function(e) { events.push([Date.now(), 'scroll', [this.pageXOffset, this.pageYOffset]]); },
true
);
window._getHuxleyEvents = function() { return events };
})();
""" | Python | 0 | |
79710bb5b77b8cfa95d92f7f39ac44fc0c605022 | Create featureCC.py | featureCC.py | featureCC.py | ### *- Program: FeatureCC
### *- Objective: To determine the total number of point, line, and polygon shapefiles
### in a directory
### *- Input: Provided by the user (workspace)
### *- Output: Display total files for point, line, and polygon shapefiles to the user
# - START PROGRAM -
# Import OS module to load appropriate paths depending on which system is being used
import os
# Import ArcPy module to use built-in functions to achieve the program objective
import arcpy
# From ArcPy, import the environment/workspace
from arcpy import env
# Ask the user to input a file path to set as a workspace
env.workspace = raw_input("\nPlease enter your file path: ")
# Assign the workspace to a new variable
filePath = env.workspace
x = 0
while x < 1: # Set up a file validation system
if os.path.exists(filePath): # If file path, exists: continue. Otherwise..
x = 1 # ..go to Line 45
point = arcpy.ListFeatureClasses("*", "Point") # List point feature classes
line = arcpy.ListFeatureClasses("*", "Line") # List line feature classes
poly = arcpy.ListFeatureClasses("*", "Polygon") # List polygon feature classes
pointCount = len(point) # Count the number of point feature classes
lineCount = len(line) # Count the number of line feature classes
polyCount = len(poly) # Count the number of polygon feature classes
print("\nPOINTS:"), pointCount, ("files") # Print total for point feature classes
print("LINES:"), lineCount, ("files") # Print total for line feature classes
print("POLYGONS:"), polyCount, ("files\n") # Print total for polygon feature classes
else:
raw_input("\n!ERROR! - File path does not exist." # If file path does not exist..
"\nPress Enter to continue. ") # ..display an error message..
env.workspace = raw_input("\nPlease enter your file path: ") # ..and ask user to..
filePath = env.workspace # ..enter it again
# Import time module and exit the program in 10 seconds
import time
time.sleep(10)
# - END PROGRAM -
# I'm gonna make him an offer he can't refuse
# - Don Vito Corleone (The Godfather)
| Python | 0 | |
61fa5c26b9b2eff24e88313671c7aa673e24bb0f | Create pythagoras.py | pythagoras.py | pythagoras.py | #!/bin/python
from math import sqrt
print "a^2 + b^2 = c^2"
leg1 = raw_input("Leg1 (a): ")
leg2 = raw_input("Leg2 (b): ")
hypotenuse = sqrt((int(leg1) ** 2) + (int(leg2) ** 2))
print hypotenuse
| Python | 0.000003 | |
c1bed8533d479112df6ae4aea0bb31e4419ae4f8 | change location of jianfan lib in data repo | setup/setupdev.py | setup/setupdev.py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import shutil
"""
$PALI_DIR is the dir of git clone https://github.com/siongui/pali.git
Manual setup (for reference):
1. setup TongWen (deprecated):
```bash
cd $PALI_DIR
mkdir -p common/app/scripts/ext
cd common/app/scripts/ext/
wget http://tongwen.openfoundry.org/src/web/tongwen_core.js
wget http://tongwen.openfoundry.org/src/web/tongwen_table_s2t.js
wget http://tongwen.openfoundry.org/src/web/tongwen_table_t2s.js
wget http://tongwen.openfoundry.org/src/web/tongwen_table_ps2t.js
wget http://tongwen.openfoundry.org/src/web/tongwen_table_pt2s.js
```
2. setup jianfan (deprecated):
```bash
wget https://python-jianfan.googlecode.com/files/jianfan-0.0.2.zip
unzip jianfan-0.0.2.zip
mv jianfan-0.0.2/jianfan $PALI_DIR/common/pylib/
rm -rf jianfan-0.0.2
```
3. create symbolic links:
```bash
cd $PALI_DIR/tipitaka
ln -s ../common/ common
cd $PALI_DIR/tipitaka/pylib
ln -s ../../../data/pali/common/translation/ translation
ln -s ../../../data/pali/common/romn/ romn
cd $PALI_DIR/dictionary
ln -s ../common/ common
cd $PALI_DIR/common/pylib
ln -s ../../../data/pylib/jianfan/ jianfan
```
"""
def ln(source, link_name):
if os.path.islink(link_name):
os.unlink(link_name)
os.symlink(source, link_name)
def setupSymlinks():
# enter tipitaka dir
os.chdir(os.path.join(os.path.dirname(__file__), '../tipitaka'))
ln('../common/', 'common')
os.chdir('pylib')
ln('../../../data/pali/common/translation/', 'translation')
ln('../../../data/pali/common/romn/', 'romn')
# enter dictionary dir
os.chdir('../../dictionary')
ln('../common/', 'common')
# enter common dir
os.chdir('../common/pylib')
ln('../../../data/pylib/jianfan/', 'jianfan')
if __name__ == '__main__':
tipitakaLatnCssPath = os.path.join(os.path.dirname(__file__),
'../../data/pali/common/romn/cscd/tipitaka-latn.css')
dstPath = os.path.join(os.path.dirname(__file__),
'../tipitaka/app/css/tipitaka-latn.css')
shutil.copyfile(tipitakaLatnCssPath, dstPath)
setupSymlinks()
| #!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import shutil
"""
$PALI_DIR is the dir of git clone https://github.com/siongui/pali.git
Manual setup (for reference):
1. setup TongWen (deprecated):
```bash
cd $PALI_DIR
mkdir -p common/app/scripts/ext
cd common/app/scripts/ext/
wget http://tongwen.openfoundry.org/src/web/tongwen_core.js
wget http://tongwen.openfoundry.org/src/web/tongwen_table_s2t.js
wget http://tongwen.openfoundry.org/src/web/tongwen_table_t2s.js
wget http://tongwen.openfoundry.org/src/web/tongwen_table_ps2t.js
wget http://tongwen.openfoundry.org/src/web/tongwen_table_pt2s.js
```
2. setup jianfan (deprecated):
```bash
wget https://python-jianfan.googlecode.com/files/jianfan-0.0.2.zip
unzip jianfan-0.0.2.zip
mv jianfan-0.0.2/jianfan $PALI_DIR/common/pylib/
rm -rf jianfan-0.0.2
```
3. create symbolic links:
```bash
cd $PALI_DIR/tipitaka
ln -s ../common/ common
cd $PALI_DIR/tipitaka/pylib
ln -s ../../../data/pali/common/translation/ translation
ln -s ../../../data/pali/common/romn/ romn
cd $PALI_DIR/dictionary
ln -s ../common/ common
cd $PALI_DIR/common/pylib
ln -s ../../../data/pali/common/gae/libs/jianfan/ jianfan
```
"""
def ln(source, link_name):
if os.path.islink(link_name):
os.unlink(link_name)
os.symlink(source, link_name)
def setupSymlinks():
# enter tipitaka dir
os.chdir(os.path.join(os.path.dirname(__file__), '../tipitaka'))
ln('../common/', 'common')
os.chdir('pylib')
ln('../../../data/pali/common/translation/', 'translation')
ln('../../../data/pali/common/romn/', 'romn')
# enter dictionary dir
os.chdir('../../dictionary')
ln('../common/', 'common')
# enter common dir
os.chdir('../common/pylib')
ln('../../../data/pali/common/gae/libs/jianfan/', 'jianfan')
if __name__ == '__main__':
tipitakaLatnCssPath = os.path.join(os.path.dirname(__file__),
'../../data/pali/common/romn/cscd/tipitaka-latn.css')
dstPath = os.path.join(os.path.dirname(__file__),
'../tipitaka/app/css/tipitaka-latn.css')
shutil.copyfile(tipitakaLatnCssPath, dstPath)
setupSymlinks()
| Python | 0 |
7b09ba64c0327ecea04cc95057ffa7d5c8d939c8 | Add test for setopt to demonstrate that edit_config retains non-ASCII characters. | setuptools/tests/test_setopt.py | setuptools/tests/test_setopt.py | # coding: utf-8
from __future__ import unicode_literals
import io
import six
from setuptools.command import setopt
from setuptools.extern.six.moves import configparser
class TestEdit:
@staticmethod
def parse_config(filename):
parser = configparser.ConfigParser()
with io.open(filename, encoding='utf-8') as reader:
(parser.read_file if six.PY3 else parser.readfp)(reader)
return parser
@staticmethod
def write_text(file, content):
with io.open(file, 'wb') as strm:
strm.write(content.encode('utf-8'))
def test_utf8_encoding_retained(self, tmpdir):
"""
When editing a file, non-ASCII characters encoded in
UTF-8 should be retained.
"""
config = tmpdir.join('setup.cfg')
self.write_text(config, '[names]\njaraco=йарацо')
setopt.edit_config(str(config), dict(names=dict(other='yes')))
parser = self.parse_config(str(config))
assert parser['names']['jaraco'] == 'йарацо'
assert parser['names']['other'] == 'yes'
| Python | 0 | |
cbf0d257bcbaeddeb9390047f575038b5d842dc8 | update version | paginator_plus/__init__.py | paginator_plus/__init__.py | # -*- coding: utf-8 -*-
__version__ = '0.0.1'
| Python | 0 | |
348d6bba64423a1ca0f1532f4cf878811b65760f | add deploy.py | deep_speech_2/deploy.py | deep_speech_2/deploy.py | """Deployment for DeepSpeech2 model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import gzip
import distutils.util
import multiprocessing
import paddle.v2 as paddle
from data_utils.data import DataGenerator
from model import deep_speech2
from swig_ctc_beam_search_decoder import *
from swig_scorer import Scorer
from error_rate import wer
import utils
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--num_samples",
default=100,
type=int,
help="Number of samples for inference. (default: %(default)s)")
parser.add_argument(
"--num_conv_layers",
default=2,
type=int,
help="Convolution layer number. (default: %(default)s)")
parser.add_argument(
"--num_rnn_layers",
default=3,
type=int,
help="RNN layer number. (default: %(default)s)")
parser.add_argument(
"--rnn_layer_size",
default=512,
type=int,
help="RNN layer cell number. (default: %(default)s)")
parser.add_argument(
"--use_gpu",
default=True,
type=distutils.util.strtobool,
help="Use gpu or not. (default: %(default)s)")
parser.add_argument(
"--num_threads_data",
default=multiprocessing.cpu_count(),
type=int,
help="Number of cpu threads for preprocessing data. (default: %(default)s)")
parser.add_argument(
"--mean_std_filepath",
default='mean_std.npz',
type=str,
help="Manifest path for normalizer. (default: %(default)s)")
parser.add_argument(
"--decode_manifest_path",
default='datasets/manifest.test',
type=str,
help="Manifest path for decoding. (default: %(default)s)")
parser.add_argument(
"--model_filepath",
default='ds2_new_models_0628/params.pass-51.tar.gz',
type=str,
help="Model filepath. (default: %(default)s)")
parser.add_argument(
"--vocab_filepath",
default='datasets/vocab/eng_vocab.txt',
type=str,
help="Vocabulary filepath. (default: %(default)s)")
parser.add_argument(
"--decode_method",
default='beam_search',
type=str,
help="Method for ctc decoding: best_path or beam_search. (default: %(default)s)"
)
parser.add_argument(
"--beam_size",
default=500,
type=int,
help="Width for beam search decoding. (default: %(default)d)")
parser.add_argument(
"--num_results_per_sample",
default=1,
type=int,
help="Number of output per sample in beam search. (default: %(default)d)")
parser.add_argument(
"--language_model_path",
default="lm/data/en.00.UNKNOWN.klm",
type=str,
help="Path for language model. (default: %(default)s)")
parser.add_argument(
"--alpha",
default=0.26,
type=float,
help="Parameter associated with language model. (default: %(default)f)")
parser.add_argument(
"--beta",
default=0.1,
type=float,
help="Parameter associated with word count. (default: %(default)f)")
parser.add_argument(
"--cutoff_prob",
default=0.99,
type=float,
help="The cutoff probability of pruning"
"in beam search. (default: %(default)f)")
args = parser.parse_args()
def infer():
"""Deployment for DeepSpeech2."""
# initialize data generator
data_generator = DataGenerator(
vocab_filepath=args.vocab_filepath,
mean_std_filepath=args.mean_std_filepath,
augmentation_config='{}',
num_threads=args.num_threads_data)
# create network config
# paddle.data_type.dense_array is used for variable batch input.
# The size 161 * 161 is only an placeholder value and the real shape
# of input batch data will be induced during training.
audio_data = paddle.layer.data(
name="audio_spectrogram", type=paddle.data_type.dense_array(161 * 161))
text_data = paddle.layer.data(
name="transcript_text",
type=paddle.data_type.integer_value_sequence(data_generator.vocab_size))
output_probs = deep_speech2(
audio_data=audio_data,
text_data=text_data,
dict_size=data_generator.vocab_size,
num_conv_layers=args.num_conv_layers,
num_rnn_layers=args.num_rnn_layers,
rnn_size=args.rnn_layer_size,
is_inference=True)
# load parameters
parameters = paddle.parameters.Parameters.from_tar(
gzip.open(args.model_filepath))
# prepare infer data
batch_reader = data_generator.batch_reader_creator(
manifest_path=args.decode_manifest_path,
batch_size=args.num_samples,
min_batch_size=1,
sortagrad=False,
shuffle_method=None)
infer_data = batch_reader().next()
# run inference
infer_results = paddle.infer(
output_layer=output_probs, parameters=parameters, input=infer_data)
num_steps = len(infer_results) // len(infer_data)
probs_split = [
infer_results[i * num_steps:(i + 1) * num_steps]
for i in xrange(len(infer_data))
]
# targe transcription
target_transcription = [
''.join(
[data_generator.vocab_list[index] for index in infer_data[i][1]])
for i, probs in enumerate(probs_split)
]
ext_scorer = Scorer(args.alpha, args.beta, args.language_model_path)
## decode and print
wer_sum, wer_counter = 0, 0
for i, probs in enumerate(probs_split):
beam_result = ctc_beam_search_decoder(
probs.tolist(),
args.beam_size,
data_generator.vocab_list,
len(data_generator.vocab_list),
args.cutoff_prob,
ext_scorer, )
print("\nTarget Transcription:\t%s" % target_transcription[i])
print("Beam %d: %f \t%s" % (0, beam_result[0][0], beam_result[0][1]))
wer_cur = wer(target_transcription[i], beam_result[0][1])
wer_sum += wer_cur
wer_counter += 1
print("cur wer = %f , average wer = %f" %
(wer_cur, wer_sum / wer_counter))
def main():
utils.print_arguments(args)
paddle.init(use_gpu=args.use_gpu, trainer_count=1)
infer()
if __name__ == '__main__':
main()
| Python | 0.000001 | |
dc5e87f4a7bb1399951423c3a4236c58ab723665 | change AjaxImageField to behave like standard django FileField | ajaximage/fields.py | ajaximage/fields.py | #-*- coding: utf-8 -*-
from django.core.files.storage import default_storage
from django.db.models.fields.files import FileDescriptor, FieldFile
from django.db.models import Field
from django.conf import settings
from .widgets import AjaxImageEditor
class AjaxImageField(Field):
storage = default_storage
attr_class = FieldFile
descriptor_class = FileDescriptor
def __init__(self, *args, **kwargs):
upload_to = kwargs.pop('upload_to', '')
max_height = kwargs.pop('max_height', 0)
max_width = kwargs.pop('max_width', 0)
crop = kwargs.pop('crop', False)
crop = 1 if crop is True else 0
if crop is 1 and (max_height is 0 or max_width is 0):
raise Exception('Both max_width and max_height are needed if cropping')
self.widget = AjaxImageEditor(
upload_to=upload_to,
max_width=max_width,
max_height=max_height,
crop=crop
)
super(AjaxImageField, self).__init__(*args, **kwargs)
def contribute_to_class(self, cls, name, virtual_only=False):
super(AjaxImageField, self).contribute_to_class(cls, name, virtual_only)
setattr(cls, self.name, self.descriptor_class(self))
def get_prep_value(self, value):
"""Returns field's value prepared for saving into a database."""
# Need to convert File objects provided via a form to unicode for database insertion
if value is None:
return None
return str(value)
def get_internal_type(self):
return "TextField"
def formfield(self, **kwargs):
defaults = {'widget': self.widget}
defaults.update(kwargs)
return super(AjaxImageField, self).formfield(**defaults)
if 'south' in settings.INSTALLED_APPS:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^ajaximage\.fields\.AjaxImageField"]) | #-*- coding: utf-8 -*-
from django.db.models import Field
from django.forms import widgets
from ajaximage.widgets import AjaxImageEditor
from django.conf import settings
class AjaxImageField(Field):
def __init__(self, *args, **kwargs):
upload_to = kwargs.pop('upload_to', '')
max_height = kwargs.pop('max_height', 0)
max_width = kwargs.pop('max_width', 0)
crop = kwargs.pop('crop', False)
crop = 1 if crop is True else 0
if(crop is 1 and (max_height is 0 or max_width is 0)):
raise Exception('Both max_width and max_height are needed if cropping')
self.widget = AjaxImageEditor(upload_to=upload_to,
max_width=max_width,
max_height=max_height,
crop=crop)
super(AjaxImageField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return "TextField"
def formfield(self, **kwargs):
defaults = {'widget': self.widget}
defaults.update(kwargs)
return super(AjaxImageField, self).formfield(**defaults)
if 'south' in settings.INSTALLED_APPS:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^ajaximage\.fields\.AjaxImageField"]) | Python | 0 |
a7ab3b5e4d75a4f5a887bafc980d24dce7983b4a | add Alfred.py | Alfred.py | Alfred.py | # Copyright (c) 2013 Christopher Kaster (@Kasoki)
#
# This file is part of alfred.py <https://github.com/Kasoki/alfred.py>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
""" IMPORTANT: Not sure how to use this lib? Check out the "example.py" file :) """
Version="0.1.1"
class Handler:
""" Alfred.Handler, this class is responsible for handling Alfred! """
def __init__(self, args=[], query="", use_no_query_string=True):
""" Create a new handler
Keyword arguments:
args -- This list should be *sys.argv* (default: [])
query -- This string should only be used if args is not set!
use_no_query_string -- If there is no query, should the handler use "NO QUERY" instead of one?
"""
if type(args) != list:
raise TypeError("Alfred.Handler(args): args is no list!")
if len(args) > 1:
self.query = args[1]
elif query != "":
self.query = query
else:
if use_no_query_string:
self.query = "EMPTY_QUERY"
else:
self.query = ""
self.items = []
def get_current_directory(self):
return os.getcwd()
def query_is_empty(self):
if self.query == "EMPTY_QUERY" or self.query == "":
return True
else:
return False
def add_item(self, item):
""" Adds a new Alfred.Item to this handler
Keyword arguments:
item -- The Alfred.Item you want to add ;)
"""
if not isinstance(item, Item):
raise TypeError("Alfred.Handler.add_item(item): item is no instance of Alfred.Item")
self.items.append(item)
def add_new_item(self, title="", subtitle="", uid=None, arg="", icon=None):
""" Adds a new Item to this handler without using the Alfred.Item class!
Keyword arguments:
title -- The title of this item
subtitle -- The subtitle of this item
uid -- The uid of this item (default: None)
arg -- The argument of this item
icon -- The icon of this item (Default: None)
"""
self.add_item(Item(title, subtitle, uid, arg, icon))
def __str__(self):
return self.to_xml()
def to_xml(self, max_results=None):
""" Generates a XML string
Keyword arguments:
max_results -- How many results should be in this string? (Default: None - No limitation)
"""
xml_string = '<?xml version="1.0" encoding="UTF-8" ?>'
xml_string += '<items>'
counter = 0
for item in self.items:
xml_string += item.__str__()
counter += 1
if max_results is not None and counter >= max_results:
break
xml_string += '</items>'
return xml_string
def push(self, max_results=None):
""" Push the content to Alfred
Keyword arguments:
max_results -- How many results should be in this string? (Default: None - No limitation)
"""
print self.to_xml(max_results)
class Item:
def __init__(self, title="", subtitle="", uid=None, arg="", icon=None):
""" Creates a new Item for Alfred
Keyword arguments:
title -- The title of this item
subtitle -- The subtitle of this item
uid -- The uid of this item (default: None)
arg -- The argument of this item
icon -- The icon of this item (Default: None)
"""
self.title = title
self.subtitle = subtitle
self.uid = uid
self.arg = arg
self.icon = icon
def __str__(self):
title = '<title>%s</title>' % self.title
subtitle = '<subtitle>%s</subtitle>' % self.subtitle
icon = ''
if self.icon is not None:
icon = '<icon>%s</icon>' % self.icon
item_content = "%s%s%s" % (title, subtitle, icon)
item_info = '<item uid="%s" arg="%s">%s</item>' % (self.uid, self.arg, item_content)
return item_info
| Python | 0.000936 | |
d522281a6a70c00f4ad6c77783a1269dd20289dc | Add Scripts for Regression benchmark (#2516) | .dev_scripts/benchmark_filter.py | .dev_scripts/benchmark_filter.py | import argparse
import mmcv
import os
import os.path as osp
def parse_args():
parser = argparse.ArgumentParser(description='Filter configs to train')
parser.add_argument(
'--basic-arch',
action='store_true',
help='to train models in basic arch')
parser.add_argument(
'--datasets', action='store_true', help='to train models in dataset')
parser.add_argument(
'--data-pipeline',
action='store_true',
help='to train models related to data pipeline, e.g. augmentations')
parser.add_argument(
'--nn-module',
action='store_true',
help='to train models related to neural network modules')
args = parser.parse_args()
return args
basic_arch_root = [
'cascade_rcnn', 'double_heads', 'fcos', 'foveabox', 'free_anchor',
'grid_rcnn', 'guided_anchoring', 'htc', 'libra_rcnn', 'atss', 'mask_rcnn',
'ms_rcnn', 'nas_fpn', 'reppoints', 'retinanet', 'ssd', 'gn', 'ghm'
]
datasets_root = ['wider_face', 'pascal_voc', 'cityscapes', 'mask_rcnn']
data_pipeline_root = ['albu_example', 'instaboost']
nn_module_root = [
'carafe', 'dcn', 'empirical_attention', 'gcnet', 'gn+ws', 'hrnet', 'pafpn',
'nas_fpn'
]
benchmark_pool = [
'configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py',
'configs/htc/htc_r50_fpn_1x_coco.py',
'ghm/retinanet_ghm_r50_fpn_1x_coco.py',
'configs/carafe/mask_rcnn_r50_fpn_carafe_1x_coco.py',
'configs/grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py',
'configs/albu_example/mask_rcnn_r50_fpn_albu_1x_coco.py',
'configs/rpn/rpn_r50_fpn_1x_coco.py',
'configs/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py',
'configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py',
'configs/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco.py',
'configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py',
'configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py',
'configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py',
'configs/instaboost/mask_rcnn_r50_fpn_instaboost_4x_coco.py',
'configs/retinanet/retinanet_r50_caffe_fpn_1x_coco.py',
'configs/ssd/ssd300_coco.py',
'configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py',
'configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py',
'configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py', # noqa
'configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py',
'configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py',
'configs/free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py',
'configs/scratch/mask_rcnn_r50_fpn_gn-all_scratch_6x_coco.py',
'configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py',
'configs/fp16/retinanet_r50_fpn_fp16_1x_coco.py',
'configs/fp16/mask_rcnn_r50_fpn_fp16_1x_coco.py',
'configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py',
'configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py',
'configs/wider_face/ssd300_wider_face.py',
'configs/gn/mask_rcnn_r50_fpn_gn-all_2x_coco.py',
'configs/fcos/fcos_center_r50_caffe_fpn_gn-head_4x4_1x_coco.py',
'configs/atss/atss_r50_fpn_1x_coco.py',
'configs/hrnet/mask_rcnn_hrnetv2p_w18_1x_coco.py',
'configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py',
'configs/foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py',
'configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712.py',
'configs/pascal_voc/ssd300_voc0712.py',
'configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py',
'configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco.py',
'configs/gn+ws/mask_rcnn_r50_fpn_gn_ws-all_2x_coco.py'
]
def main():
args = parse_args()
benchmark_type = []
if args.basic_arch:
benchmark_type += basic_arch_root
if args.datasets:
benchmark_type += datasets_root
if args.data_pipeline:
benchmark_type += data_pipeline_root
if args.nn_module:
benchmark_type += nn_module_root
config_dpath = 'configs/'
benchmark_configs = []
for cfg_root in benchmark_type:
cfg_dir = osp.join(config_dpath, cfg_root)
configs = os.scandir(cfg_dir)
for cfg in configs:
config_path = osp.join(cfg_dir, cfg.name)
if config_path in benchmark_pool:
benchmark_configs.append(config_path)
print(f'Totally found {len(benchmark_configs)} configs to benchmark')
config_dicts = dict(models=benchmark_configs)
mmcv.dump(config_dicts, 'regression_test_configs.json')
if __name__ == '__main__':
main()
| Python | 0 | |
2a963c4d13035b6f8e301a7f0240b28e0e0764d3 | Create WordLadder_001.py | leetcode/127-Word-Ladder/WordLadder_001.py | leetcode/127-Word-Ladder/WordLadder_001.py | class Solution(object):
def ladderLength(self, beginWord, endWord, wordList):
"""
:type beginWord: str
:type endWord: str
:type wordList: Set[str]
:rtype: int
"""
if beginWord == endWord:
return 1
cnt = 1
q = [beginWord]
while q:
nq = []
for word in q:
for i in range(len(beginWord)):
for j in 'abcdefghijklmnopqrstuvwxyz':
if j != word[i]:
nword = word[:i] + j + word[i + 1:]
if nword == endWord:
return cnt + 1
if nword in wordList:
nq.append(nword)
wordList.remove(nword)
cnt += 1
q = nq
return 0
| Python | 0 | |
c657d92f1f8dc3cd4ff9995dc0d2857ce8f6fdd4 | Create CountingBits.py | leetcode/338-Counting-Bits/CountingBits.py | leetcode/338-Counting-Bits/CountingBits.py | class Solution(object):
def countBits(self, num):
"""
:type num: int
:rtype: List[int]
"""
seed = 1
res = [0]
while num > 0:
res += [res[i] + 1 for i in xrange(min(num, seed))]
num -= seed
seed = seed << 1
return res
| Python | 0.000001 | |
b85eb165ca41528d2709ae255e47e722a194aa87 | Add piano-roll/audio file to pretty_midi object example | examples/reverse_pianoroll.py | examples/reverse_pianoroll.py | """
Utility function for converting an audio file
to a pretty_midi.PrettyMIDI object. Note that this method is nowhere close
to the state-of-the-art in automatic music transcription.
This just serves as a fun example for rough
transcription which can be expanded on for anyone motivated.
"""
from __future__ import division
import sys
import argparse
import numpy as np
import pretty_midi
import librosa
def piano_roll_to_pretty_midi(piano_roll, fs=100, program=0):
'''Convert a Piano Roll array into a PrettyMidi object
with a single instrument.
Parameters
----------
piano_roll : np.ndarray, shape=(128,frames), dtype=int
Piano roll of one instrument
fs : int
Sampling frequency of the columns, i.e. each column is spaced apart
by ``1./fs`` seconds.
program : int
The program number of the instrument.
Returns
-------
midi_object : pretty_midi.PrettyMIDI
A pretty_midi.PrettyMIDI class instance describing
the piano roll.
'''
notes, frames = piano_roll.shape
pm = pretty_midi.PrettyMIDI()
instrument = pretty_midi.Instrument(program=program)
# pad 1 column of zeros so we can acknowledge inital and ending events
piano_roll = np.pad(piano_roll, [(0, 0), (1, 1)], 'constant')
# use changes in velocities to find note on / note off events
velocity_changes = np.nonzero(np.diff(piano_roll).T)
# keep track on velocities and note on times
prev_velocities = np.zeros(notes, dtype=int)
note_on_time = np.zeros(notes)
for time, note in zip(*velocity_changes):
# use time + 1 because of padding above
velocity = piano_roll[note, time + 1]
time = time / fs
if velocity > 0:
if prev_velocities[note] == 0:
note_on_time[note] = time
prev_velocities[note] = velocity
else:
pm_note = pretty_midi.Note(
velocity=prev_velocities[note],
pitch=note,
start=note_on_time[note],
end=time)
instrument.notes.append(pm_note)
prev_velocities[note] = 0
pm.instruments.append(instrument)
return pm
def cqt_to_piano_roll(cqt, min_midi, max_midi, threshold):
'''Convert a CQT spectrogram into a piano roll representation by
thresholding scaled magnitudes.
Parameters
----------
cqt : np.ndarray, shape=(max_midi-min_midi,frames), dtype=complex64
CQT spectrogram of audio.
min_midi : int
Minimum MIDI note to transcribe.
max_midi : int
Maximum MIDI note to transcribe.
threshold : int
Threshold value to activate note on event, 0-127
Returns
-------
piano_roll : np.ndarray, shape=(128,frames), dtype=int
Piano roll representation on audio.
'''
piano_roll = np.abs(cqt)
piano_roll = np.digitize(piano_roll,
np.linspace(piano_roll.min(),
piano_roll.max(),
127))
piano_roll[piano_roll < threshold] = 0
piano_roll = np.pad(piano_roll,
[(128 - max_midi, min_midi), (0, 0)],
'constant')
return piano_roll
if __name__ == '__main__':
# Set up command-line argument parsing
parser = argparse.ArgumentParser(
description='Transcribe Audio file to MIDI file',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('input_audio', action='store',
help='Path to the input Audio file')
parser.add_argument('output_midi', action='store',
help='Path where the transcribed MIDI will be written')
parser.add_argument('--program', default=0, type=int, action='store',
help='Program of the instrument in the output MIDI')
parser.add_argument('--min_midi', default=24, type=int, action='store',
help='Minimum MIDI note to transcribe')
parser.add_argument('--max_midi', default=107, type=int, action='store',
help='Maximum MIDI note to transcribe')
parser.add_argument('--threshold', default=64, type=int, action='store',
help='Threshold to activate note on event, 0-127')
parameters = vars(parser.parse_args(sys.argv[1:]))
y, sr = librosa.load(parameters['input_audio'])
min_midi, max_midi = parameters['min_midi'], parameters['max_midi']
cqt = librosa.cqt(y, sr=sr, fmin=min_midi,
n_bins=max_midi - min_midi)
pr = cqt_to_piano_roll(cqt, min_midi, max_midi, parameters['threshold'])
# get audio time
audio_time = len(y) / sr
# get sampling frequency of cqt spectrogram
fs = pr.shape[1]/audio_time
pm = piano_roll_to_pretty_midi(pr, fs=fs,
program=parameters['program'])
pm.write(parameters['output_midi'])
| Python | 0 | |
2c3136d0d68024cd867efac5ac92a84067936c49 | Improve 'workarounds' conf options documentation | nova/conf/workarounds.py | nova/conf/workarounds.py | # Copyright 2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""The 'workarounds' group is for very specific reasons.
If you're:
- Working around an issue in a system tool (e.g. libvirt or qemu) where the
fix is in flight/discussed in that community.
- The tool can be/is fixed in some distributions and rather than patch the
code those distributions can trivially set a config option to get the
"correct" behavior.
Then this is a good place for your workaround.
.. warning::
Please use with care! Document the BugID that your workaround is paired with.
"""
from oslo_config import cfg
workarounds_group = cfg.OptGroup(
'workarounds',
title='Workaround Options',
help="""
A collection of workarounds used to mitigate bugs or issues found in system
tools (e.g. Libvirt or QEMU) or Nova itself under certain conditions. These
should only be enabled in exceptional circumstances. All options are linked
against bug IDs, where more information on the issue can be found.
""")
disable_rootwrap = cfg.BoolOpt(
'disable_rootwrap',
default=False,
help="""
Use sudo instead of rootwrap.
Allow fallback to sudo for performance reasons.
For more information, refer to the bug report:
https://bugs.launchpad.net/nova/+bug/1415106
Possible values:
* True: Use sudo instead of rootwrap
* False: Use rootwrap as usual
Services which consume this:
* ``nova-compute``
Interdependencies to other options:
Any options that affect 'rootwrap' will be ignored.
""")
disable_libvirt_livesnapshot = cfg.BoolOpt(
'disable_libvirt_livesnapshot',
default=True,
help="""
Disable live snapshots when using the libvirt driver.
When using libvirt 1.2.2 live snapshots fail intermittently under load. This
config option provides a mechanism to disable live snapshot, in favour of cold
snapshot, while this is resolved.
For more information, refer to the bug report:
https://bugs.launchpad.net/nova/+bug/1334398
Possible values:
* True: Live migrate is disabled when using libvirt
* False: Live migrate functions as usual
Services which consume this:
* ``nova-compute``
Interdependencies to other options:
* None
""")
handle_virt_lifecycle_events = cfg.BoolOpt(
'handle_virt_lifecycle_events',
default=True,
help="""
Enable handling of events emitted from compute drivers.
Many compute drivers emit lifecycle events, which are events that occur when,
for example, an instance is starting or stopping. If the instance is going
through task state changes due to an API operation, like resize, the events
are ignored.
This is an advanced feature which allows the hypervisor to signal to the
compute service that an unexpected state change has occurred in an instance
and that the instance can be shutdown automatically. Unfortunately, this can
race in some conditions, for exmaple in reboot operations or when the compute
service or when host is rebooted (planned or due to an outage). If such races
are common, then it is advisable to disable this feature.
Care should be taken when this feature is disabled and
'sync_power_state_interval' is set to a negative value. In this case, any
instances that get out of sync between the hypervisor and the Nova database
will have to be synchronized manually.
For more information, refer to the bug report:
https://bugs.launchpad.net/bugs/1444630
Possible values:
* True: Enable the feature
* False: Disable the feature
Services which consume this:
* ``nova-compute``
Interdependencies to other options:
* If ``sync_power_state_interval`` is negative and this feature is disabled,
then instances that get out of sync between the hypervisor and the Nova
database will have to be synchonized manually.
""")
ALL_OPTS = [disable_rootwrap,
disable_libvirt_livesnapshot,
handle_virt_lifecycle_events]
def register_opts(conf):
conf.register_opts(ALL_OPTS, group=workarounds_group)
def list_opts():
return {workarounds_group: ALL_OPTS}
| # Copyright 2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
""" The workarounds_opts group is for very specific reasons.
If you're:
- Working around an issue in a system tool (e.g. libvirt or qemu) where the
fix is in flight/discussed in that community.
- The tool can be/is fixed in some distributions and rather than patch the
code those distributions can trivially set a config option to get the
"correct" behavior.
Then this is a good place for your workaround.
.. warning::
Please use with care! Document the BugID that your workaround is paired with.
"""
from oslo_config import cfg
disable_rootwrap = cfg.BoolOpt(
'disable_rootwrap',
default=False,
help='This option allows a fallback to sudo for performance '
'reasons. For example see '
'https://bugs.launchpad.net/nova/+bug/1415106')
disable_libvirt_livesnapshot = cfg.BoolOpt(
'disable_libvirt_livesnapshot',
default=True,
help='When using libvirt 1.2.2 live snapshots fail '
'intermittently under load. This config option provides '
'a mechanism to enable live snapshot while this is '
'resolved. See '
'https://bugs.launchpad.net/nova/+bug/1334398')
handle_virt_lifecycle_events = cfg.BoolOpt(
'handle_virt_lifecycle_events',
default=True,
help="Whether or not to handle events raised from the compute "
"driver's 'emit_event' method. These are lifecycle "
"events raised from compute drivers that implement the "
"method. An example of a lifecycle event is an instance "
"starting or stopping. If the instance is going through "
"task state changes due to an API operation, like "
"resize, the events are ignored. However, this is an "
"advanced feature which allows the hypervisor to signal "
"to the compute service that an unexpected state change "
"has occurred in an instance and the instance can be "
"shutdown automatically - which can inherently race in "
"reboot operations or when the compute service or host "
"is rebooted, either planned or due to an unexpected "
"outage. Care should be taken when using this and "
"sync_power_state_interval is negative since then if any "
"instances are out of sync between the hypervisor and "
"the Nova database they will have to be synchronized "
"manually. See https://bugs.launchpad.net/bugs/1444630")
ALL_OPTS = [disable_rootwrap,
disable_libvirt_livesnapshot,
handle_virt_lifecycle_events]
def register_opts(conf):
conf.register_opts(ALL_OPTS, group='workarounds')
def list_opts():
return {'workarounds': ALL_OPTS}
| Python | 0.000008 |
0cd5ed79f019db91261c0d858b61796021ec3f80 | Add syntax highlighting tests for PEP 570 | test/highlight/parameters.py | test/highlight/parameters.py | def g(h, i, /, j, *, k=100, **kwarg):
# ^ operator
# ^ operator
pass
| Python | 0 | |
a85f0df776da6c8f39e5d5dbb91370531e4605be | Create GUI-Main.py | GUI-Main.py | GUI-Main.py | #!/usr/bin/env python
import tkinter as tk
import sys, glob, time
from tkinter.filedialog import *
from PIL import Image
_imaging = Image.core
class Application(tk.Frame):
def __init__(self, master=None):
tk.Frame.__init__(self, master)
self.grid()
self.input_directory = StringVar()
self.output_directory = StringVar()
self.progress = StringVar()
self.progress = StringVar()
self.width, self.height = StringVar(), StringVar()
self.createWidgets()
self.variable_dictionary = {}
def createWidgets(self):
self.input_directory.set("No Image Direcotry Selected")
self.output_directory.set("No Output Directory Selected")
self.file_Label = tk.Label(self, textvariable=self.input_directory)
self.file_Label.grid(row=0, column=0)
self.file_button = tk.Button(self, text="Open Folder", command = lambda:self.selectFolder("Input"))
self.file_button.grid(row=0, column=1)
self.output_label = tk.Label(self, textvariable=self.output_directory)
self.output_label.grid(row=1, column=0)
self.output_button = tk.Button(self, text = "Select Folder", command = lambda:self.selectFolder("Output"))
self.output_button.grid(row=1, column=1)
self.width_label = tk.Label(self, text="Width (In Pixels): ")
self.width_label.grid(row=2, column=0)
self.width_entry = tk.Entry(self, textvariable=self.width)
self.width_entry.grid(row=2, column=1)
self.height_label = tk.Label(self, text="Height (In Pixels): ")
self.height_label.grid(row=3, column=0)
self.height_entry = tk.Entry(self, textvariable=self.height)
self.height_entry.grid(row=3, column=1)
self.resize_button = tk.Button(self, text="Resize Images", command=lambda:self.resizeImages(self.variable_dictionary["Input"], self.variable_dictionary["Output"]))
self.resize_button.grid(row=4, column=0)
def resizeImages(self, folder, output):
folder = str(folder)
output = str(output)
all_images = glob.glob(folder + "/*.png") + glob.glob(folder + "/*.jpg") + glob.glob(folder + "/*.gif") + glob.glob(folder + "/*.bmp")
print(all_images)
image_count = 0
for x in all_images:
image_count += 1
total_images = image_count
self.progress.set("0/" + str(total_images) + " Images Resized.")
self.status_label = tk.Label(self, textvariable = self.progress)
self.status_label.grid(row=4, column=1)
try:
image_count = 0
size = int(self.width.get()), int(self.height.get())
for image in all_images:
image_count += 1
curent_image = Image.open(image)
curent_image.thumbnail(size, Image.ANTIALIAS)
curent_image.save(output + "/" + str(image_count) + ".JPEG")
self.progress.set( str(image_count) + "/" + str(total_images) + " Images Resized. Resizing: " + image)
except:
print("Failed To Resize Images! ")
def selectFolder(self, name):
self.variable_dictionary[name] = askdirectory(title="Choose The Appropriate Folder")
if name == "Input":
self.input_directory.set(self.variable_dictionary[name])
else:
self.output_directory.set(self.variable_dictionary[name])
if __name__ == "__main__":
app = Application()
app.master.title("Image Resizer")
app.mainloop()
| Python | 0.000001 | |
bf6c8ce59ec841b19dab3a02a9065864035d4d82 | Add a new helper to convert stackalytics default_data.json | bin/helpers/openstack/stackalytics.py | bin/helpers/openstack/stackalytics.py | import sys
import json
import yaml
import datetime
# Read default_data.json from stackalytics/etc/ and convert for
# repoXplorer.
if __name__ == "__main__":
ident = {'identities': {},
'groups': {}}
data = json.loads(file(sys.argv[1]).read())
users = data['users']
groups = data['companies']
i = ident['identities']
g = ident['groups']
gstore = {}
for group in groups:
gstore[group['company_name']] = group['domains']
for user in users:
try:
i[user['launchpad_id']] = {}
iu = i[user['launchpad_id']]
except:
try:
i[user['github_id']] = {}
iu = i[user['github_id']]
except:
continue
sys.stdout.write('.')
iu['name'] = user['user_name']
iu['default-email'] = user['emails'][0]
iu['emails'] = {}
for email in user['emails']:
iu['emails'].setdefault(email, {})
histo = []
for c in user['companies']:
iu['emails'][email].setdefault('groups', {})
iu['emails'][email]['groups'][c['company_name']] = {}
# cd = iu['emails'][email]['groups'][c['company_name']]
g.setdefault(
c['company_name'], {
'description': '',
'emails': {},
'domains': gstore.get(c['company_name'], [])
})
if c['end_date'] is not None:
end_date_raw = datetime.datetime.strptime(
c['end_date'], '%Y-%b-%d')
histo.append([None, end_date_raw, c['company_name']])
else:
histo.append([None, None, c['company_name']])
histo.sort(key=lambda tup: tup[1] or datetime.datetime.today())
for z, h in enumerate(histo):
if z == 0:
pass
h[0] = histo[z-1][1]
cd = iu['emails'][email]['groups'][h[2]]
if h[0]:
cd['begin-date'] = h[0].strftime('%Y-%m-%d')
if h[1]:
cd['end-date'] = h[1].strftime('%Y-%m-%d')
path = 'test.yaml'
with open(path, 'w') as fd:
fd.write(yaml.safe_dump(ident,
default_flow_style=False))
| Python | 0.00006 | |
367a1ff9f0ca3daae3ee804b5484e3863bb72307 | Add initial proposal tests | tests/views/test_proposal.py | tests/views/test_proposal.py | #!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for proposal view.
"""
__authors__ = [
'"Sverre Rabbelier" <sverre@rabbelier.nl>',
]
import httplib
from tests.profile_utils import GSoCProfileHelper
from tests.test_utils import DjangoTestCase
from tests.timeline_utils import TimelineHelper
# TODO: perhaps we should move this out?
from soc.modules.gsoc.models.proposal import GSoCProposal
from soc.modules.seeder.logic.seeder import logic as seeder_logic
class ProposalTest(DjangoTestCase):
"""Tests proposal page.
"""
def setUp(self):
from soc.modules.gsoc.models.program import GSoCProgram
from soc.modules.gsoc.models.organization import GSoCOrganization
properties = {'status': 'visible', 'apps_tasks_limit': 20}
self.gsoc = seeder_logic.seed(GSoCProgram, properties=properties)
properties = {'scope': self.gsoc, 'status': 'active'}
self.org = seeder_logic.seed(GSoCOrganization, properties=properties)
self.timeline = TimelineHelper(self.gsoc.timeline)
self.data = GSoCProfileHelper(self.gsoc)
def assertProposalTemplatesUsed(self, response):
"""Asserts that all the templates from the dashboard were used.
"""
self.assertGSoCTemplatesUsed(response)
self.assertTemplateUsed(response, 'v2/modules/gsoc/proposal/base.html')
self.assertTemplateUsed(response, 'v2/modules/gsoc/_form.html')
def testSubmitProposal(self):
self.data.createStudent()
self.timeline.studentSignup()
url = '/gsoc/proposal/submit/' + self.org.key().name()
response = self.client.get(url)
self.assertProposalTemplatesUsed(response)
# test POST
override = {'program': self.gsoc, 'score': 0, 'mentor': None, 'org': self.org, 'status': 'new'}
properties = seeder_logic.seed_properties(GSoCProposal, properties=override)
postdata = properties.copy()
postdata['xsrf_token'] = self.getXsrfToken(url)
response = self.client.post(url, postdata)
self.assertResponseRedirect(response)
# TODO(SRabbelier): verify
proposal = GSoCProposal.all().get()
self.assertPropertiesEqual(properties, proposal)
| Python | 0 | |
ee4ab0cf3ef08459e1a8ad1cdae370870ba28805 | Create lc1755.py | LeetCode/lc1755.py | LeetCode/lc1755.py | class Solution:
def minAbsDifference(self, nums: List[int], goal: int) -> int:
n = len(nums)
nums.sort(key=lambda x: -abs(x))
neg = [0 for _ in range(n+1)]
pos = [0 for _ in range(n+1)]
for i in range(n-1, -1, -1):
if nums[i] < 0:
neg[i] = neg[i+1] + nums[i]
pos[i] = pos[i+1]
else:
pos[i] = pos[i+1] + nums[i]
neg[i] = neg[i+1]
# print(nums, pos, neg)
ans = abs(goal)
s = set([0])
def check(a, b):
if b < goal - ans or goal + ans < a:
return False
return True
for i in range(n):
s = set([x for x in s if check(x+neg[i], x+pos[i])])
# print(s)
t = set()
for x in s:
y = x + nums[i]
if abs(y - goal) < ans:
ans = abs(y - goal)
t.add(y)
s |= t
return ans
| Python | 0.000001 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.