commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
c75749922c8be3b70bacd66a6b25b8e50faf7b76 | Bump flask from 0.10.1 to 1.0 | codehugger/Flask-Starter,codehugger/Flask-Starter | setup.py | setup.py | # -*- coding: utf-8 -*-
from setuptools import setup
setup(
name="myapp",
version='0.0.1-dev',
description='My Awesome Application',
author='**INSERT_AUTHOR_NAME**',
author_email='**INSERT_AUTHOR_EMAIL**',
packages=[
'myapp',
'myapp.blueprints'
],
url='https://www.github.com/codehugger/myapp',
include_package_data=True,
zip_safe=False,
install_requires=[
'Flask==1.0',
'Flask-Migrate==1.2.0',
'Flask-SQLAlchemy==1.0',
'Flask-Script==0.6.7',
'Flask-Testing==0.4.1',
'Jinja2==2.7.2',
'Mako==0.9.1',
'MarkupSafe==0.19',
'SQLAlchemy==0.9.4',
'Werkzeug==0.9.4',
'alembic==0.6.4',
'itsdangerous==0.24',
'wsgiref==0.1.2',
]
)
| # -*- coding: utf-8 -*-
from setuptools import setup
setup(
name="myapp",
version='0.0.1-dev',
description='My Awesome Application',
author='**INSERT_AUTHOR_NAME**',
author_email='**INSERT_AUTHOR_EMAIL**',
packages=[
'myapp',
'myapp.blueprints'
],
url='https://www.github.com/codehugger/myapp',
include_package_data=True,
zip_safe=False,
install_requires=[
'Flask==0.10.1',
'Flask-Migrate==1.2.0',
'Flask-SQLAlchemy==1.0',
'Flask-Script==0.6.7',
'Flask-Testing==0.4.1',
'Jinja2==2.7.2',
'Mako==0.9.1',
'MarkupSafe==0.19',
'SQLAlchemy==0.9.4',
'Werkzeug==0.9.4',
'alembic==0.6.4',
'itsdangerous==0.24',
'wsgiref==0.1.2',
]
)
| bsd-3-clause | Python |
787bc6427cc641e3c7a590f1ab57a2b840f471bc | bump to v0.6.0.dev0 (#61) | sinhrks/japandas | setup.py | setup.py | # -*- coding: utf-8 -*-
#!/usr/bin/env python
import codecs
import os
from setuptools import setup, find_packages
PACKAGE = 'japandas'
README = 'README.rst'
REQUIREMENTS = 'requirements.txt'
VERSION = '0.6.0.dev0'
def read(fname):
# file must be read as utf-8 in py3 to avoid to be bytes
return codecs.open(os.path.join(os.path.dirname(__file__), fname), encoding='utf-8').read()
def write_version_py(filename=None):
cnt = """\
version = '%s'
"""
a = open(filename, 'w')
try:
a.write(cnt % VERSION)
finally:
a.close()
version_file = os.path.join(os.path.dirname(__file__), PACKAGE, 'version.py')
write_version_py(filename=version_file)
setup(name=PACKAGE,
version=VERSION,
description='pandas japanese extension',
long_description=read(README),
author='sinhrks',
author_email='sinhrks@gmail.com',
url='http://japandas.readthedocs.org/en/stable',
license = 'BSD',
packages=find_packages(),
package_data = {'japandas.tseries': ['data/*.pkl']},
install_requires=list(read(REQUIREMENTS).splitlines())
)
| # -*- coding: utf-8 -*-
#!/usr/bin/env python
import codecs
import os
from setuptools import setup, find_packages
PACKAGE = 'japandas'
README = 'README.rst'
REQUIREMENTS = 'requirements.txt'
VERSION = '0.5.1'
def read(fname):
# file must be read as utf-8 in py3 to avoid to be bytes
return codecs.open(os.path.join(os.path.dirname(__file__), fname), encoding='utf-8').read()
def write_version_py(filename=None):
cnt = """\
version = '%s'
"""
a = open(filename, 'w')
try:
a.write(cnt % VERSION)
finally:
a.close()
version_file = os.path.join(os.path.dirname(__file__), PACKAGE, 'version.py')
write_version_py(filename=version_file)
setup(name=PACKAGE,
version=VERSION,
description='pandas japanese extension',
long_description=read(README),
author='sinhrks',
author_email='sinhrks@gmail.com',
url='http://japandas.readthedocs.org/en/stable',
license = 'BSD',
packages=find_packages(),
package_data = {'japandas.tseries': ['data/*.pkl']},
install_requires=list(read(REQUIREMENTS).splitlines())
)
| bsd-3-clause | Python |
d2f7fce3cac7b2d742ab553325b3394092a0c8f8 | Change the name of the package | adolfosilva/libgen.py | setup.py | setup.py | from setuptools import setup
setup(
name='libgen.py',
version='0.1.0',
license='MIT',
author='Adolfo Silva',
author_email='code@adolfosilva.org',
url='https://github.com/adolfosilva/libgen.py',
description='A script to download books from gen.lib.rus.ec',
classifiers=[
'License :: OSI Approved :: MIT License',
],
keywords='libgen',
include_package_data=True, # include files listed in MANIFEST.in
tests_requires=['pytest'],
py_modules=['libgen'],
entry_points={
'console_scripts': ['libgen=libgen:main'],
},
install_requires=['beautifulsoup4', 'tabulate', 'requests']
)
| from setuptools import setup
setup(
name='libgen',
version='0.1',
license='MIT',
author='Adolfo Silva',
author_email='code@adolfosilva.org',
url='https://github.com/adolfosilva/libgen.py',
description='A script to download books from gen.lib.rus.ec',
tests_requires=['pytest'],
py_modules=['libgen'],
entry_points={
'console_scripts': ['libgen=libgen:main'],
},
install_requires=['beautifulsoup4', 'tabulate', 'requests']
)
| mit | Python |
46414834ec14f4c70b8c66770aa13570f09b924a | Upgrade content-io to 1.2.2 | andreif/djedi-cms,5monkeys/djedi-cms,andreif/djedi-cms,andreif/djedi-cms,5monkeys/djedi-cms,5monkeys/djedi-cms | setup.py | setup.py | #!/usr/bin/env python
from setuptools import setup, find_packages
from sys import version_info
install_requires = [
'six',
'content-io >= 1.2.2',
'simplejson >= 3.2.0'
]
tests_require = [
'coverage',
'Markdown <= 2.4.1',
'Pillow',
]
if version_info < (3,):
tests_require += ['unittest2']
version = __import__('djedi').__version__
setup(
name='djedi-cms',
version=version,
description='Django content management as it should be',
long_description=(
'.. image:: https://raw.github.com/5monkeys/djedi-cms/master/docs/_static/djedi-portrait.png\n\n'
'- Read the documentation_\n'
'- Browse the source_\n\n'
'.. image:: https://travis-ci.org/5monkeys/djedi-cms.png?branch=master\n'
' :target: https://travis-ci.org/5monkeys/djedi-cms\n'
'.. image:: https://coveralls.io/repos/5monkeys/djedi-cms/badge.png?branch=master\n'
' :target: https://coveralls.io/r/5monkeys/djedi-cms?branch=master\n\n'
'.. _documentation: http://djedi-cms.org/\n'
'.. _source: https://github.com/5monkeys/djedi-cms\n\n'
),
author='Jonas Lundberg',
author_email='jonas@5monkeys.se',
url='https://github.com/5monkeys/djedi-cms',
download_url='https://github.com/5monkeys/djedi-cms/tarball/%s' % version,
keywords=['cms', 'django', 'edit', 'gettext', 'content', 'management', 'template', 'plugins', 'markdown'],
license='BSD',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
classifiers=[
'Environment :: Web Environment',
'Natural Language :: English',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=install_requires,
extras_require={
'tests': tests_require,
},
tests_require=tests_require,
test_suite='runtests.main'
)
| #!/usr/bin/env python
from setuptools import setup, find_packages
from sys import version_info
install_requires = [
'six',
'content-io >= 1.2.1',
'simplejson >= 3.2.0'
]
tests_require = [
'coverage',
'Markdown <= 2.4.1',
'Pillow',
]
if version_info < (3,):
tests_require += ['unittest2']
version = __import__('djedi').__version__
setup(
name='djedi-cms',
version=version,
description='Django content management as it should be',
long_description=(
'.. image:: https://raw.github.com/5monkeys/djedi-cms/master/docs/_static/djedi-portrait.png\n\n'
'- Read the documentation_\n'
'- Browse the source_\n\n'
'.. image:: https://travis-ci.org/5monkeys/djedi-cms.png?branch=master\n'
' :target: https://travis-ci.org/5monkeys/djedi-cms\n'
'.. image:: https://coveralls.io/repos/5monkeys/djedi-cms/badge.png?branch=master\n'
' :target: https://coveralls.io/r/5monkeys/djedi-cms?branch=master\n\n'
'.. _documentation: http://djedi-cms.org/\n'
'.. _source: https://github.com/5monkeys/djedi-cms\n\n'
),
author='Jonas Lundberg',
author_email='jonas@5monkeys.se',
url='https://github.com/5monkeys/djedi-cms',
download_url='https://github.com/5monkeys/djedi-cms/tarball/%s' % version,
keywords=['cms', 'django', 'edit', 'gettext', 'content', 'management', 'template', 'plugins', 'markdown'],
license='BSD',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
classifiers=[
'Environment :: Web Environment',
'Natural Language :: English',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
],
install_requires=install_requires,
extras_require={
'tests': tests_require,
},
tests_require=tests_require,
test_suite='runtests.main'
)
| bsd-3-clause | Python |
204cb3a68562e4ac7367571071faeb1762031220 | Fix silly typo in setup.py | thinkst/opencanary,thinkst/opencanary,thinkst/opencanary,thinkst/opencanary | setup.py | setup.py | from setuptools import setup, find_packages
import os
import opencanary
setup(
name='opencanary',
version=opencanary.__version__,
url='http://www.thinkst.com/',
author='Thinkst Applied Research',
author_email='info@thinkst.com',
description='OpenCanary daemon',
long_description='A low interaction honeypot intended to be run on internal networks.',
install_requires=[
'Jinja2>=2.4',
'Twisted==18.4.0',
'pyasn1==0.4.5',
'pycrypto==2.6.1',
'simplejson==3.16.0',
'wsgiref==0.1.2',
'requests==2.7.0',
'zope.interface==4.4.2',
'PyPDF2==1.26.0',
'fpdf==1.7.2',
'passlib==1.7.1',
'ntlmlib==0.71'
],
setup_requires=[
'setuptools_git'
],
license='BSD',
packages = find_packages(exclude='test'),
scripts=['bin/opencanaryd','bin/opencanary.tac'],
platforms='any',
include_package_data=True
)
| from setuptools import setup, find_packages
import os
import opencanary
setup(
name='opencanary',
version=opencanary.__version__,
url='http://www.thinkst.com/',
author='Thinkst Applied Research',
author_email='info@thinkst.com',
description='OpenCanary daemon',
long_description='A low interaction honeypot intended to be run on internal networks.',
install_requires=[
'Jinja2>=2.4',
'Twisted==18.4.0',
'pyasn1==0.4.5',
'pycrypto==2.6.1',
'simplejson==3.16.0',
'wsgiref==0.1.2',
'requests==2.7.0'
'zope.interface==4.4.2',
'PyPDF2==1.26.0',
'fpdf==1.7.2',
'passlib==1.7.1',
'ntlmlib==0.71'
],
setup_requires=[
'setuptools_git'
],
license='BSD',
packages = find_packages(exclude='test'),
scripts=['bin/opencanaryd','bin/opencanary.tac'],
platforms='any',
include_package_data=True
)
| bsd-3-clause | Python |
d6c998868ac89acbbfc232245676449f126f6f19 | add subscribed field | lafranceinsoumise/api-django,lafranceinsoumise/api-django,lafranceinsoumise/api-django,lafranceinsoumise/api-django | src/people/admin.py | src/people/admin.py | from django.shortcuts import reverse
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from api.admin import admin_site
from .models import Person, PersonTag
@admin.register(Person, site=admin_site)
class PersonAdmin(admin.ModelAdmin):
list_display = ('email', 'subscribed', 'role_link')
search_fields = ('emails__address', 'first_name', 'last_name',)
fieldsets = (
(None, {
'fields': ('email', 'first_name', 'last_name',)
}),
(_('Dates'), {
'fields': ('created', 'modified')
}),
(_('Paramètres mails'), {
'fields': ('subscribed', 'bounced', 'bounced_date',)
}),
(_('Role correspondant'), {
'fields': ('role_link',)
})
)
readonly_fields = ('created', 'modified', 'role_link')
search_fields = ('emails__address', 'first_name', 'last_name', 'location_zip')
list_filter = ('tags', 'subscribed')
def role_link(self, obj):
return '<a href="%s">%s</a>' % (
reverse('admin:authentication_role_change', args=[obj.role_id]),
_('Voir le rôle')
)
role_link.allow_tags = True
role_link.short_description = _('Lien vers le rôle')
@admin.register(PersonTag, site=admin_site)
class PersonTagAdmin(admin.ModelAdmin):
pass
| from django.shortcuts import reverse
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from api.admin import admin_site
from .models import Person, PersonTag
@admin.register(Person, site=admin_site)
class PersonAdmin(admin.ModelAdmin):
list_display = ('email', 'subscribed', 'role_link')
search_fields = ('emails__address', 'first_name', 'last_name',)
fieldsets = (
(None, {
'fields': ('email', 'first_name', 'last_name',)
}),
(_('Dates'), {
'fields': ('created', 'modified')
}),
(_('Paramètres mails'), {
'fields': ('subscribed', 'bounced', 'bounced_date',)
}),
(_('Role correspondant'), {
'fields': ('role_link',)
})
)
readonly_fields = ('created', 'modified', 'role_link')
search_fields = ('emails__address', 'first_name', 'last_name', 'location_zip')
list_filter = ('tags',)
def role_link(self, obj):
return '<a href="%s">%s</a>' % (
reverse('admin:authentication_role_change', args=[obj.role_id]),
_('Voir le rôle')
)
role_link.allow_tags = True
role_link.short_description = _('Lien vers le rôle')
@admin.register(PersonTag, site=admin_site)
class PersonTagAdmin(admin.ModelAdmin):
pass
| agpl-3.0 | Python |
8e211245b786fab5ff8f686b4220f2b0fed2f10e | Update easyium/__init__.py | KarlGong/easyium-python,KarlGong/easyium | easyium/__init__.py | easyium/__init__.py | try:
import appium
appium_installed = True
except ImportError:
appium_installed = False
from .webdriver import WebDriver, WebDriverType
from .staticelement import StaticElement
from .identifier import Identifier
from .waits.waiter import wait_for
__author__ = 'karl.gong'
| from .webdriver import WebDriver, WebDriverType
from .staticelement import StaticElement
from .identifier import Identifier
from .waits.waiter import wait_for
__author__ = 'karl.gong'
try:
import appium
appium_installed = True
except ImportError:
appium_installed = False
| apache-2.0 | Python |
80be25e8ff106e0a578cc7ddcfd4aa060ce0ab4f | support deep path | if1live/easylinker | easylinker/links.py | easylinker/links.py | #-*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
import os
class LinkException(Exception):
pass
class Link(object):
def __init__(self, src, dst):
self.src = src
self.dst = dst
def pathname_to_valid_pathname(self, pathname):
return pathname.replace('/', os.path.sep).replace('\\', os.path.sep)
def create(self):
src = self.pathname_to_valid_pathname(self.src)
dst = self.pathname_to_valid_pathname(self.dst)
if os.path.exists(dst):
msg = "Destination({}) is already exist.".format(dst)
raise LinkException(msg)
if not os.path.exists(src):
msg = "Source({}) is not exist.".format(src)
raise LinkException(msg)
if os.path.isdir(src):
self._process_directory(src, dst)
else:
self._process_file(src, dst)
return True
def _process_directory(self, src, dst):
parent_dir = os.path.split(dst)[0]
try:
os.makedirs(parent_dir)
except OSError:
pass
if os.name == 'nt':
self._windows_directory_link(src, dst)
else:
self._unix_link(src, dst)
def _process_file(self, src, dst):
parent_dir = os.path.split(dst)[0]
try:
os.makedirs(parent_dir)
except OSError:
pass
if os.name == 'nt':
self._windows_file_link(src, dst)
else:
self._unix_link(src, dst)
def _unix_link(self, src, dst):
return os.symlink(src, dst)
def _windows_file_link(self, src, dst):
from ntfsutils import hardlink
return hardlink.create(src, dst)
def _windows_directory_link(self, src, dst):
from ntfsutils import junction
return junction.create(src, dst)
| #-*- coding: utf-8 -*-
from __future__ import unicode_literals
from __future__ import print_function
import os
class LinkException(Exception):
pass
class Link(object):
def __init__(self, src, dst):
self.src = src
self.dst = dst
def create(self):
src = self.src
dst = self.dst
if os.path.exists(dst):
msg = "Destination({}) is already exist.".format(dst)
raise LinkException(msg)
if not os.path.exists(src):
msg = "Source({}) is not exist.".format(src)
raise LinkException(msg)
if os.name == 'nt':
if os.path.isdir(src):
self._windows_directory_link(src, dst)
else:
self._windows_file_link(src, dst)
else:
self._unix_link(src, dst)
return True
def _unix_link(self, src, dst):
return os.symlink(src, dst)
def _windows_file_link(self, src, dst):
from ntfsutils import hardlink
return hardlink.create(src, dst)
def _windows_directory_link(self, src, dst):
from ntfsutils import junction
return junction.create(src, dst)
| mit | Python |
61f78a13362fb75796c423f60a62c865e50e212a | Rewrite command-line. (#9) | IzunaDevs/nsfw_dl | nsfw_dl/__main__.py | nsfw_dl/__main__.py | #!/usr/bin/python3.6
"""
Read the license at:
https://github.com/IzunaDevs/nsfw_dl/blob/master/LICENSE
"""
import argparse
import sys
import os
import nsfw_dl
def main(argv=sys.argv[1:]): # pylint: disable=dangerous-default-value
"""
Main entrypoint to nsfw_dl commandline.
"""
image = argparse.ArgumentParser()
image.add_argument('-d', '--download',
help='Download the result to a file.',
default=False)
image.add_argument('-f', '--file',
help="Filename to download to.",
default=lambda x: x.split("/")[-1])
image.add_argument('-s', '--source',
help='Image source to use.',
default='')
image.add_argument('query', help='Tags to use during search.',
default='', nargs="*")
args = image.parse_args(argv)
if (args.source == ''):
print("Usage: " + os.path.basename(sys.argv[0]) + " [-d/--download]"
" [-f/--file ...] [-s/--source ...] [query]")
print("Where first ... is the file name you want, second ... "
"is the source where source can be:")
sources = "\n".join("\n".join(v for v in source) for source in
nsfw_dl.SOURCES.values())
print(sources)
print("And query is what you want to search for.")
else:
download_file = args.download
file = args.file
with nsfw_dl.NSFWDL() as dl:
img = dl.download(args.source, args=" ".join(args.query))
if callable(file):
file = file(img)
if download_file:
with open(file, "wb") as f:
f.write(dl.get(img))
print(file)
else:
print(img)
if __name__ == '__main__':
main()
| #!/usr/bin/python3.6
"""
Read the license at:
https://github.com/IzunaDevs/nsfw_dl/blob/master/LICENSE
"""
import argparse
import sys
import nsfw_dl
def download(downloader, args, file, download_file):
with nsfw_dl.NSFWDL() as dl:
img = dl.download(downloader, args=args)
if callable(file):
file = file(img)
if download_file:
with open(file, "wb") as f:
f.write(dl.get(img))
print(file)
else:
print(img)
def main(argv=sys.argv[1:]): # pylint: disable=dangerous-default-value
"""
Main entrypoint to nsfw_dl commandline.
"""
parser = argparse.ArgumentParser()
image = argparse.ArgumentParser()
parser.add_argument("action", choices=["image", "sources"])
image.add_argument('-d', '--download',
help='Download the result to a file.',
default=False, action="store_true")
image.add_argument('-f', '--file',
help="Filename to download to.",
default=lambda x: x.split("/")[-1])
image.add_argument('source', help="Image source to use.")
image.add_argument('query', help="Tags to use during search.",
default='', nargs="*")
args = parser.parse_args(argv)
if args.action == "sources":
sources = "\n".join("\n".join(v for v in source) for source in
nsfw_dl.SOURCES.values())
print(sources)
else:
args = image.parse_args(argv[1:])
download(args.source, args.query, args.file, args.download)
if __name__ == '__main__':
main()
| mit | Python |
42dba1bb3d86aa7998a55967ef5e7dc0f47c2050 | Implement unfreeze. | jackstanek/s3bot | src/s3bot/freeze.py | src/s3bot/freeze.py | """Wrapper for the freeze.sh bash script."""
import getpass
import os
import subprocess
from s3bot import SHARED_BASE
from s3bot.useremails import UserEmailRecord
FREEZE_SH_SCRIPT_PATH = os.path.join(SHARED_BASE, "freezer", "freeze.sh")
UNFREEZE_SH_SCRIPT_PATH = os.path.join(SHARED_BASE, "freezer", "unfreeze.sh")
def freeze(path):
"""Queue a freeze.sh job for the specified file."""
user = getpass.getuser()
email = UserEmailRecord().get(user)
if not os.access(path, os.R_OK):
raise FileNotFoundError("could not access {}".format(path))
cmd = ["qsub",
"-F", path,
"-M", email,
"-q", "lab-long",
FREEZE_SH_SCRIPT_PATH]
try:
subprocess.run(cmd)
except subprocess.CalledProcessError as cpe:
print("Could not queue job: return code {}".format(cpe.returncode))
def unfreeze(itemname):
user = getpass.getuser()
email = UserEmailRecord().get(user)
cmd = ["qsub",
"-F", user, itemname,
"-M", email,
"-q", "lab-long",
UNFREEZE_SH_SCRIPT_PATH]
try:
subprocess.run(cmd)
except subprocess.CalledProcessError as cpe:
print("Could not queue job: return code {}".format(cpe.returncode))
| """Wrapper for the freeze.sh bash script."""
import getpass
import os
import subprocess
from s3bot import SHARED_BASE
from s3bot.useremails import UserEmailRecord
FREEZE_SH_SCRIPT_PATH = os.path.join(SHARED_BASE, "freezer", "freeze.sh")
def freeze(path):
"""Queue a freeze.sh job for the specified file."""
user = getpass.getuser()
email = UserEmailRecord().get(user)
if not os.access(path, os.R_OK):
raise FileNotFoundError("could not access {}".format(path))
cmd = ["qsub",
"-F", path,
"-M", email,
"-q", "lab-long",
FREEZE_SH_SCRIPT_PATH]
try:
subprocess.run(cmd)
except subprocess.CalledProcessError as cpe:
print("Could not queue job: return code {}".format(cpe.returncode))
| mit | Python |
c62ceaab233b0e2f60d5553e6b6f359cc6e0ae35 | Update string-to-integer-atoi.py | tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015 | Python/string-to-integer-atoi.py | Python/string-to-integer-atoi.py | # Time: O(n)
# Space: O(1)
#
# Implement atoi to convert a string to an integer.
#
# Hint: Carefully consider all possible input cases. If you want a challenge, please do not see below
# and ask yourself what are the possible input cases.
#
# Notes: It is intended for this problem to be specified vaguely (ie, no given input specs).
# You are responsible to gather all the input requirements up front.
#
# spoilers alert... click to show requirements for atoi.
#
# Requirements for atoi:
# The function first discards as many whitespace characters as necessary
# until the first non-whitespace character is found. Then, starting from this character,
# takes an optional initial plus or minus sign followed by as many numerical digits as possible, and interprets them as a numerical value.
#
# The string can contain additional characters after those that
# form the integral number, which are ignored and have no effect on the behavior of this function.
#
# If the first sequence of non-whitespace characters in str is not a valid integral number,
# or if no such sequence exists because either str is empty or it contains only whitespace characters, no conversion is performed.
#
# If no valid conversion could be performed, a zero value is returned.
# If the correct value is out of the range of representable values, INT_MAX (2147483647) or INT_MIN (-2147483648) is returned.
#
class Solution(object):
def atoi(self, str):
"""
:type str: str
:rtype: int
"""
INT_MAX = 2147483647
INT_MIN = -2147483648
result = 0
if not str:
return result
i = 0
while i < len(str) and str[i].isspace():
i += 1
if len(str) == i:
return result
sign = 1
if str[i] == "+":
i += 1
elif str[i] == "-":
sign = -1
i += 1
while i < len(str) and '0' <= str[i] <= '9':
if result > (INT_MAX - int(str[i])) / 10:
return INT_MAX if sign > 0 else INT_MIN
result = result * 10 + int(str[i])
i += 1
return sign * result
if __name__ == "__main__":
print Solution().atoi("")
print Solution().atoi(" ")
print Solution().atoi("-1")
print Solution().atoi("2147483647")
print Solution().atoi("2147483648")
print Solution().atoi("-2147483648")
print Solution().atoi("-2147483649")
| # Time: O(n)
# Space: O(1)
#
# Implement atoi to convert a string to an integer.
#
# Hint: Carefully consider all possible input cases. If you want a challenge, please do not see below
# and ask yourself what are the possible input cases.
#
# Notes: It is intended for this problem to be specified vaguely (ie, no given input specs).
# You are responsible to gather all the input requirements up front.
#
# spoilers alert... click to show requirements for atoi.
#
# Requirements for atoi:
# The function first discards as many whitespace characters as necessary
# until the first non-whitespace character is found. Then, starting from this character,
# takes an optional initial plus or minus sign followed by as many numerical digits as possible, and interprets them as a numerical value.
#
# The string can contain additional characters after those that
# form the integral number, which are ignored and have no effect on the behavior of this function.
#
# If the first sequence of non-whitespace characters in str is not a valid integral number,
# or if no such sequence exists because either str is empty or it contains only whitespace characters, no conversion is performed.
#
# If no valid conversion could be performed, a zero value is returned.
# If the correct value is out of the range of representable values, INT_MAX (2147483647) or INT_MIN (-2147483648) is returned.
#
class Solution(object):
def atoi(self, str):
"""
:type str: str
:rtype: int
"""
INT_MAX = 2147483647
INT_MIN = -2147483648
result = 0
if not str:
return result
i = 0
while i < len(str) and str[i].isspace():
i += 1
if len(str) == i:
return result
else:
sign = 1
if str[i] == "+":
i += 1
elif str[i] == "-":
sign = -1
i += 1
while i < len(str) and '0' <= str[i] <= '9':
if result > (INT_MAX - int(str[i])) / 10:
return INT_MAX if sign > 0 else INT_MIN
result = result * 10 + int(str[i])
i += 1
return sign * result
if __name__ == "__main__":
print Solution().atoi("")
print Solution().atoi(" ")
print Solution().atoi("-1")
print Solution().atoi("2147483647")
print Solution().atoi("2147483648")
print Solution().atoi("-2147483648")
print Solution().atoi("-2147483649")
| mit | Python |
c5f9a02843fd85e9622597c831f238d3daa86ee4 | Update the build number | vlegoff/cocomud | src/version.py | src/version.py | BUILD = 12
| BUILD = 11
| bsd-3-clause | Python |
7d0b81075b26782155cbcb77576e29b72c2fac86 | Update forms.py | philippeowagner/django-terms,philippeowagner/django-terms | terms/forms.py | terms/forms.py | # coding: utf-8
from __future__ import unicode_literals
from django.conf import settings
from django.db.models import Q
from django.forms import ModelForm, TextInput, ValidationError
from django.utils.translation import ugettext as _
from .models import Term
from .settings import AVAILABLE_WIDGETS, TERMS_DEFINITION_WIDGET as WIDGET
# If WIDGET == 'auto': get the best widget one can import.
# Otherwise: Get the specified widget.
if WIDGET in AVAILABLE_WIDGETS[:2]:
from django.forms import Textarea # 'basic'
if WIDGET == AVAILABLE_WIDGETS[2] or (WIDGET == AVAILABLE_WIDGETS[0]
and 'tinymce' in settings.INSTALLED_APPS):
from tinymce.widgets import TinyMCE as Textarea # 'tinymce'
if WIDGET == AVAILABLE_WIDGETS[3] or (WIDGET == AVAILABLE_WIDGETS[0]
and 'ckeditor' in settings.INSTALLED_APPS):
from ckeditor.widgets import CKEditorWidget as Textarea # 'ckeditor'
class TermForm(ModelForm):
def clean_name(self):
data = self.cleaned_data['name']
data = data.strip(' |')
name = data.split('|')[0]
if Term.objects.exclude(pk=self.instance.pk).filter(
Q(name=name) | Q(name__startswith=name + '|')).exists():
raise ValidationError(
_('A term already exists with this main variant.'))
return data
def clean(self):
data = self.cleaned_data
obj = self.instance
definition = data.get('definition', obj.definition)
url = data.get('url', obj.url)
if not (definition or url):
raise ValidationError(_('Fill either “Definition” or “Link”.'))
return super(TermForm, self).clean()
class Meta(object):
model = Term
widgets = {
'name': TextInput(attrs={'size': 120}),
'definition': Textarea(),
}
fields = ('name', 'case_sensitive', 'definition', 'url', 'teaser_title', 'teaser_img', 'teaser_txt')
| # coding: utf-8
from __future__ import unicode_literals
from django.conf import settings
from django.db.models import Q
from django.forms import ModelForm, TextInput, ValidationError
from django.utils.translation import ugettext as _
from .models import Term
from .settings import AVAILABLE_WIDGETS, TERMS_DEFINITION_WIDGET as WIDGET
# If WIDGET == 'auto': get the best widget one can import.
# Otherwise: Get the specified widget.
if WIDGET in AVAILABLE_WIDGETS[:2]:
from django.forms import Textarea # 'basic'
if WIDGET == AVAILABLE_WIDGETS[2] or (WIDGET == AVAILABLE_WIDGETS[0]
and 'tinymce' in settings.INSTALLED_APPS):
from tinymce.widgets import TinyMCE as Textarea # 'tinymce'
if WIDGET == AVAILABLE_WIDGETS[3] or (WIDGET == AVAILABLE_WIDGETS[0]
and 'ckeditor' in settings.INSTALLED_APPS):
from ckeditor.widgets import CKEditorWidget as Textarea # 'ckeditor'
class TermForm(ModelForm):
def clean_name(self):
data = self.cleaned_data['name']
data = data.strip(' |')
name = data.split('|')[0]
if Term.objects.exclude(pk=self.instance.pk).filter(
Q(name=name) | Q(name__startswith=name + '|')).exists():
raise ValidationError(
_('A term already exists with this main variant.'))
return data
def clean(self):
data = self.cleaned_data
obj = self.instance
definition = data.get('definition', obj.definition)
url = data.get('url', obj.url)
if not (definition or url):
raise ValidationError(_('Fill either “Definition” or “Link”.'))
return super(TermForm, self).clean()
class Meta(object):
model = Term
widgets = {
'name': TextInput(attrs={'size': 120}),
'definition': Textarea(),
}
fields = ('name', 'case_sensitive', 'definition', 'url')
| bsd-3-clause | Python |
7b17da483d7aaef5353d3d19ca5a81089c0ad647 | remove primitive bad pixels correction | zougloub/libseek,kkalya/seek,zougloub/libseek,kkalya/seek | test-viewer.py | test-viewer.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys, re, subprocess
import numpy as np
import cv2
if __name__ == '__main__':
cmd = ['./build/seek-test']
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
)
cv2.namedWindow('seek', cv2.WINDOW_NORMAL)
_min = 0x7e00
def minchange(x):
global _min
print("min=%s" % x)
_min = x
cv2.createTrackbar('min', 'seek', _min, 0xffff, minchange)
_max = 0x8200
def maxchange(x):
global _max
print("max=%s" % x)
_max = x
cv2.createTrackbar('max', 'seek', _max, 0xffff, maxchange)
try:
while True:
data = proc.stdout.read(208*156*2)
img = np.fromstring(data, dtype='<H').reshape((156, 208))
img = np.float32(img)
img -= _min
img[img<0] = 0
img /= (_max-_min)
if 1:
img = np.rot90(img, 3)
cv2.imshow('seek', img)
key = cv2.waitKey(1)
if key == 113: # q
break
except:
raise
finally:
proc.kill()
proc.wait()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys, re, subprocess
import numpy as np
import cv2
if __name__ == '__main__':
cmd = ['./build/seek-test']
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
)
grid_fn = '1grid.png'
if os.path.exists(grid_fn):
grid = cv2.imread('1grid.png')/255
grid, a, b = cv2.split(grid)
else:
grid = np.zeros((156, 208))
cv2.namedWindow('seek', cv2.WINDOW_NORMAL)
_min = 0x7e00
def minchange(x):
global _min
print("min=%s" % x)
_min = x
cv2.createTrackbar('min', 'seek', _min, 0xffff, minchange)
_max = 0x8200
def maxchange(x):
global _max
print("max=%s" % x)
_max = x
cv2.createTrackbar('max', 'seek', _max, 0xffff, maxchange)
try:
while True:
data = proc.stdout.read(208*156*2)
img = np.fromstring(data, dtype='<H').reshape((156, 208))
img = np.float32(img)
img -= _min
img[img<0] = 0
img /= (_max-_min)
b = cv2.blur(img, (9,9))
a = img * (1.0 - grid) + b * (grid)
if 1:
a = np.rot90(a, 3)
cv2.imshow('seek', a)
key = cv2.waitKey(1)
if key == 113: # q
break
except:
raise
finally:
proc.kill()
proc.wait()
| mit | Python |
069512c4d5ee2ffe0ead486138576ac2ae04e0cf | Update version to 1.10.5 | wagnerand/amo-validator,diox/amo-validator,wagnerand/amo-validator,mstriemer/amo-validator,diox/amo-validator,diox/amo-validator,wagnerand/amo-validator,mozilla/amo-validator,mozilla/amo-validator,wagnerand/amo-validator,diox/amo-validator,mstriemer/amo-validator,mozilla/amo-validator,mozilla/amo-validator,mstriemer/amo-validator,mstriemer/amo-validator | validator/__init__.py | validator/__init__.py | __version__ = '1.10.5'
class ValidationTimeout(Exception):
"""Validation has timed out.
May be replaced by the exception type raised by an external timeout
handler when run in a server environment."""
def __init__(self, timeout):
self.timeout = timeout
def __str__(self):
return 'Validation timeout after %d seconds' % self.timeout
| __version__ = '1.10.4'
class ValidationTimeout(Exception):
"""Validation has timed out.
May be replaced by the exception type raised by an external timeout
handler when run in a server environment."""
def __init__(self, timeout):
self.timeout = timeout
def __str__(self):
return 'Validation timeout after %d seconds' % self.timeout
| bsd-3-clause | Python |
bbb3274333b7527604bcd8e87bb0e61929a83f78 | Set program version to 1.2.0 | titusjan/objbrowser,titusjan/objbrowser | objbrowser/version.py | objbrowser/version.py | """ Version info for objbrowser
"""
import os, sys
import objbrowser.qtpy, objbrowser.qtpy._version
DEBUGGING = False
PROGRAM_NAME = 'objbrowser'
PROGRAM_VERSION = '1.2.0'
PROGRAM_URL = 'https://github.com/titusjan/objbrowser'
PROGRAM_DIRECTORY = os.path.abspath(os.path.dirname(__file__))
PYTHON_VERSION = "%d.%d.%d" % (sys.version_info[0:3])
QT_API = objbrowser.qtpy.API
QT_API_NAME = objbrowser.qtpy.API_NAME
QTPY_VERSION = '.'.join(map(str, objbrowser.qtpy._version.version_info))
| """ Version info for objbrowser
"""
import os, sys
import objbrowser.qtpy, objbrowser.qtpy._version
DEBUGGING = False
PROGRAM_NAME = 'objbrowser'
PROGRAM_VERSION = '1.2.0-dev'
PROGRAM_URL = 'https://github.com/titusjan/objbrowser'
PROGRAM_DIRECTORY = os.path.abspath(os.path.dirname(__file__))
PYTHON_VERSION = "%d.%d.%d" % (sys.version_info[0:3])
QT_API = objbrowser.qtpy.API
QT_API_NAME = objbrowser.qtpy.API_NAME
QTPY_VERSION = '.'.join(map(str, objbrowser.qtpy._version.version_info))
| mit | Python |
8362da9dbf7f63c37b70c1793b53d867529ef893 | Remove left-over debug message | alephobjects/ohai-kit,alephobjects/ohai-kit,alephobjects/ohai-kit | ohai_kit/singleton.py | ohai_kit/singleton.py | # Written by Senko Rasic <senko.rasic@goodcode.io>
# Released into Public Domain. Use it as you like.
from django.db import models
class SingletonModel(models.Model):
"""Singleton Django Model
Ensures there's always only one entry in the database, and can fix the
table (by deleting extra entries) even if added via another mechanism.
Also has a static load() method which always returns the object - from
the database if possible, or a new empty (default) instance if the
database is still empty. If your instance has sane defaults (recommended),
you can use it immediately without worrying if it was saved to the
database or not.
Useful for things like system-wide user-editable settings.
"""
class Meta:
abstract = True
def save(self, *args, **kwargs):
"""
Save object to the database. Removes all other entries if there
are any.
"""
self.__class__.objects.exclude(id=self.id).delete()
super(SingletonModel, self).save(*args, **kwargs)
@classmethod
def load(cls):
"""
Load object from the database. Failing that, create a new empty
(default) instance of the object and return it (without saving it
to the database).
"""
try:
return cls.objects.get()
except cls.DoesNotExist:
obj = cls()
obj.save()
return obj
| # Written by Senko Rasic <senko.rasic@goodcode.io>
# Released into Public Domain. Use it as you like.
from django.db import models
class SingletonModel(models.Model):
"""Singleton Django Model
Ensures there's always only one entry in the database, and can fix the
table (by deleting extra entries) even if added via another mechanism.
Also has a static load() method which always returns the object - from
the database if possible, or a new empty (default) instance if the
database is still empty. If your instance has sane defaults (recommended),
you can use it immediately without worrying if it was saved to the
database or not.
Useful for things like system-wide user-editable settings.
"""
class Meta:
abstract = True
def save(self, *args, **kwargs):
"""
Save object to the database. Removes all other entries if there
are any.
"""
self.__class__.objects.exclude(id=self.id).delete()
super(SingletonModel, self).save(*args, **kwargs)
@classmethod
def load(cls):
"""
Load object from the database. Failing that, create a new empty
(default) instance of the object and return it (without saving it
to the database).
"""
try:
print "Trying to load object from DB"
return cls.objects.get()
except cls.DoesNotExist:
print "Not found: Returning new object"
obj = cls()
obj.save()
return obj
| agpl-3.0 | Python |
4c5de3c7a031155488248563ab2d98fd6f40ba15 | improve http server example | jasonlvhit/whoops | examples/http/http_server.py | examples/http/http_server.py | from http.client import parse_headers
from io import BytesIO
from whoops import ioloop, async_server
class HttpServer(async_server.AsyncServer):
def __init__(self, ioloop, address):
super(HttpServer, self).__init__(ioloop, address)
self.host, self.port = address
self.rbufsize = -1
self.wbufsize = 0
self.rfile = None
self.wfile = None
self.connection = None
self.raw_requestline = None
self.headers = None
self._headers_buffer = []
def on_connection(self, conn):
self.connection = conn
self.parse_request()
self.do_response()
def parse_request(self):
data = self.connection.read()
self.rfile = BytesIO(data)
self.raw_requestline = self.rfile.readline(65537)
# print(self.raw_requestline)
self.header = parse_headers(self.rfile)
# print(self.header)
def do_response(self):
body = "<html><body><h2>Hello Whoops</h2></body></html>"
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", len(body))
self.end_headers()
self.send_body(body)
def send_response(self, code, message=None):
self._headers_buffer.append(
("%s %d %s\r\n" % ('HTTP/1.1', code, message)).encode('latin-1', 'strict'))
self.send_header('Server', 'whoops/0.1')
def send_header(self, key, value):
self._headers_buffer.append(
("%s: %s\r\n" % (key, value)).encode('latin-1', 'strict'))
def end_headers(self):
self._headers_buffer.append(b"\r\n")
self.flush_headers()
def flush_headers(self):
self.send(b''.join(self._headers_buffer))
self._headers_buffer = []
def send_body(self, body):
self.send(body.encode('latin-1'))
def send(self, msg, body=None):
if body:
msg += body
self.connection.write(msg)
def close(self):
self.connection.close()
if __name__ == "__main__":
HttpServer(ioloop.IOLoop.instance(num_backends=1000),
('127.0.0.1', 8888)).listen()
| from http.client import parse_headers
from io import BytesIO
from whoops import ioloop, async_server
class HttpServer(async_server.AsyncServer):
def __init__(self, ioloop, address):
super(HttpServer, self).__init__(ioloop, address)
self.rbufsize = -1
self.wbufsize = 0
self.rfile = None
self.wfile = None
self.connection = None
self.raw_requestline = None
self.headers = None
self._headers_buffer = []
def on_connection(self, conn):
self.connection = conn
self.parse_request()
self.do_response()
def parse_request(self):
data = self.connection.read()
self.rfile = BytesIO(data)
self.raw_requestline = self.rfile.readline(65537)
self.header = parse_headers(self.rfile)
def do_response(self):
body = "<html><body><h2>Hello Whoops</h2></body></html>"
self.send_response(200)
self.send_header("Content-type", "text/html")
self.send_header("Content-Length", len(body))
self.end_headers()
self.send_body(body)
def send_response(self, code, message=None):
self._headers_buffer.append(
("%s %d %s\r\n" % ('HTTP/1.1', code, message)).encode('latin-1', 'strict'))
self.send_header('Server', 'whoops/0.1')
def send_header(self, key, value):
self._headers_buffer.append(
("%s: %s\r\n" % (key, value)).encode('latin-1', 'strict'))
def end_headers(self):
self._headers_buffer.append(b"\r\n")
self.flush_headers()
def flush_headers(self):
self.send(b''.join(self._headers_buffer))
self._headers_buffer = []
def send_body(self, body):
self.send(body.encode('latin-1'))
def send(self, msg, body=None):
if body:
msg += body
self.connection.write(msg)
if __name__ == "__main__":
HttpServer(ioloop.IOLoop.instance(num_backends=1000),
('127.0.0.1', 8888)).listen()
| mit | Python |
024a957e536681b93d2fd84f6c4616fadc689467 | Fix typo, thanks to Johannes Schumann | tamasgal/km3pipe,tamasgal/km3pipe | examples/nogallery/module_workflow.py | examples/nogallery/module_workflow.py | #!/usr/bin/env python
from __future__ import division, absolute_import, print_function
__author__ = 'tamasgal'
from km3pipe.core import Pipeline, Module, Pump
class DummyPump(Pump):
"""A pump demonstration with a dummy list as data."""
def configure(self):
self.data = [{'nr': 1}, {'nr': 2}]
self.blobs = self.blob_generator()
def process(self, blob):
return next(self.blobs)
def blob_generator(self):
"""Create a blob generator."""
for blob in self.data:
yield blob
class Foo(Module):
"""A dummy module with optional and required parameters"""
def configure(self):
self.foo = self.get('foo') or 'default_foo' # optional
self.bar = self.get('bar') or 23 # optional
self.baz = self.require('baz') # required
self.i = 0
def process(self, blob):
print("This is the current blob: " + str(blob))
self.i += 1
blob['foo_entry'] = self.foo
return blob
def finish(self):
print("My process() method was called {} times.".format(self.i))
def moo(blob):
"""A simple function to attach"""
blob['moo_entry'] = 42
return blob
class PrintBlob(Module):
def process(self, blob):
print(blob)
return blob
pipe = Pipeline()
pipe.attach(DummyPump, 'the_pump')
pipe.attach(Foo, bar='dummybar', baz=69)
pipe.attach(moo)
pipe.attach(PrintBlob)
pipe.drain()
| #!/usr/bin/env python
from __future__ import division, absolute_import, print_function
__author__ = 'tamasgal'
from km3pipe.core import Pipeline, Module, Pump
class DummyPump(Pump):
"""A pump demonstration with a dummy list as data."""
def configure(self):
self.data = [{'nr': 1}, {'nr': 2}]
self.blobs = self.blob_generator()
def process(self, blob):
return next(self.blobs)
def blob_generator(self):
"""Create a blob generator."""
for blob in self.data:
yield blob
class Foo(Module):
"""A dummy module with optional and required parameters"""
def configure(self):
self.foo = self.get('foo') or 'default_foo' # optional
self.bar = self.get('bar') or 23 # optional
self.baz = self.require('baz') # required
self.i = 0
def process(self, blob):
print("This is the current blob: " + str(blob))
self.i += 1
blob['foo_entry'] = self.foo
return blob
def finish(self):
print("My process() method was called {} times.".format(self.i))
def moo(blob):
"""A simple function to attach"""
blob['moo_entry'] = 42
return blob
class PrintBlob(Module):
def process(self, blob):
print(blob)
return blob
pipe = Pipeline()
pipe.attach(Pump, 'the_pump')
pipe.attach(Foo, bar='dummybar', baz=69)
pipe.attach(moo)
pipe.attach(PrintBlob)
pipe.drain()
| mit | Python |
b90b464b3cf8dc2de0bded8b2554fd53fe9ce4d6 | Add test for dict-like | kragniz/json-sempai | test_sempai.py | test_sempai.py | import jsonsempai
import os
import shutil
import sys
import tempfile
TEST_FILE = '''{
"three": 3,
"one": {
"two": {
"three": 3
}
}
}'''
class TestSempai(object):
def setup(self):
self.direc = tempfile.mkdtemp(prefix='jsonsempai')
sys.path.append(self.direc)
with open(os.path.join(self.direc, 'sempai.json'), 'w') as f:
f.write(TEST_FILE)
def teardown(self):
sys.path.remove(self.direc)
shutil.rmtree(self.direc)
def test_import(self):
import sempai
def test_access(self):
import sempai
assert sempai.three == 3
def test_access_nested(self):
import sempai
assert sempai.one.two.three == 3
def test_acts_like_dict(self):
import sempai
assert sempai.one.two == {"three": 3}
def test_location(self):
import sempai
assert sempai.__file__ == os.path.join(self.direc, 'sempai.json')
| import jsonsempai
import os
import shutil
import sys
import tempfile
TEST_FILE = '''{
"three": 3,
"one": {
"two": {
"three": 3
}
}
}'''
class TestSempai(object):
def setup(self):
self.direc = tempfile.mkdtemp(prefix='jsonsempai')
sys.path.append(self.direc)
with open(os.path.join(self.direc, 'sempai.json'), 'w') as f:
f.write(TEST_FILE)
def teardown(self):
sys.path.remove(self.direc)
shutil.rmtree(self.direc)
def test_import(self):
import sempai
def test_access(self):
import sempai
assert sempai.three == 3
def test_access_nested(self):
import sempai
assert sempai.one.two.three == 3
def test_location(self):
import sempai
assert sempai.__file__ == os.path.join(self.direc, 'sempai.json')
| mit | Python |
5b9023fd4e338a065f813977c956ea7ac1bfff91 | Refactor examples/mnist/train_test.py from testing single epoch to single train step. This is to make the tests across examples consistent. | google/flax,google/flax | examples/mnist/train_test.py | examples/mnist/train_test.py | # Copyright 2020 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for flax.examples.mnist.train."""
from absl.testing import absltest
import train
import jax
from jax import random
# Parse absl flags test_srcdir and test_tmpdir.
jax.config.parse_flags_with_absl()
class TrainTest(absltest.TestCase):
def test_single_train_step(self):
train_ds, _ = train.get_datasets()
batch_size = 32
model = train.create_model(random.PRNGKey(0))
optimizer = train.create_optimizer(model, 0.1, 0.9)
_, train_metrics = \
train.train_step(optimizer=optimizer,
batch={k: v[:batch_size] for k, v in train_ds.items()})
self.assertLessEqual(train_metrics['loss'], 2.302)
self.assertGreaterEqual(train_metrics['accuracy'], 0.0625)
if __name__ == '__main__':
absltest.main()
| # Copyright 2020 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Tests for flax.examples.mnist.train."""
from absl.testing import absltest
import train
import jax
from jax import random
import numpy as onp
# Parse absl flags test_srcdir and test_tmpdir.
jax.config.parse_flags_with_absl()
class TrainTest(absltest.TestCase):
def test_train_one_epoch(self):
train_ds, test_ds = train.get_datasets()
input_rng = onp.random.RandomState(0)
model = train.create_model(random.PRNGKey(0))
optimizer = train.create_optimizer(model, 0.1, 0.9)
optimizer, train_metrics = train.train_epoch(optimizer, train_ds, 128, 0,
input_rng)
self.assertLessEqual(train_metrics['loss'], 0.27)
self.assertGreaterEqual(train_metrics['accuracy'], 0.92)
loss, accuracy = train.eval_model(optimizer.target, test_ds)
self.assertLessEqual(loss, 0.06)
self.assertGreaterEqual(accuracy, 0.98)
if __name__ == '__main__':
absltest.main()
| apache-2.0 | Python |
9006898036d9f877e450b3dad605450fb98a1f0d | Fix #265 - Wrong introspection rule for GeoDjango 1.0 | RaD/django-south,RaD/django-south,philipn/django-south,nimnull/django-south,philipn/django-south,nimnull/django-south,RaD/django-south | south/introspection_plugins/geodjango.py | south/introspection_plugins/geodjango.py | """
GeoDjango introspection rules
"""
import django
from django.conf import settings
from south.modelsinspector import add_introspection_rules
has_gis = "django.contrib.gis" in settings.INSTALLED_APPS
if has_gis:
# Alright,import the field
from django.contrib.gis.db.models.fields import GeometryField
# Make some introspection rules
if django.VERSION[0] == 1 and django.VERSION[1] >= 1:
# Django 1.1's gis module renamed these.
rules = [
(
(GeometryField, ),
[],
{
"srid": ["srid", {"default": 4326}],
"spatial_index": ["spatial_index", {"default": True}],
"dim": ["dim", {"default": 2}],
},
),
]
else:
rules = [
(
(GeometryField, ),
[],
{
"srid": ["_srid", {"default": 4326}],
"spatial_index": ["_index", {"default": True}],
"dim": ["_dim", {"default": 2}],
},
),
]
# Install them
add_introspection_rules(rules, ["^django\.contrib\.gis"]) | """
GeoDjango introspection rules
"""
import django
from django.conf import settings
from south.modelsinspector import add_introspection_rules
has_gis = "django.contrib.gis" in settings.INSTALLED_APPS
if has_gis:
# Alright,import the field
from django.contrib.gis.db.models.fields import GeometryField
# Make some introspection rules
if django.VERSION[0] == 1 and django.VERSION[1] >= 1:
# Django 1.1's gis module renamed these.
rules = [
(
(GeometryField, ),
[],
{
"srid": ["srid", {"default": 4326}],
"spatial_index": ["spatial_index", {"default": True}],
"dim": ["dim", {"default": 2}],
},
),
]
else:
rules = [
(
(GeometryField, ),
[],
{
"srid": ["_srid", {"default": 4326}],
"spatial_index": ["_spatial_index", {"default": True}],
"dim": ["_dim", {"default": 2}],
},
),
]
# Install them
add_introspection_rules(rules, ["^django\.contrib\.gis"]) | apache-2.0 | Python |
289f23df515c168ef7d2ae57e6054fefa07def3c | Test beam parsing | honnibal/spaCy,honnibal/spaCy,explosion/spaCy,aikramer2/spaCy,spacy-io/spaCy,recognai/spaCy,spacy-io/spaCy,spacy-io/spaCy,explosion/spaCy,explosion/spaCy,aikramer2/spaCy,aikramer2/spaCy,aikramer2/spaCy,recognai/spaCy,recognai/spaCy,recognai/spaCy,spacy-io/spaCy,spacy-io/spaCy,aikramer2/spaCy,recognai/spaCy,honnibal/spaCy,explosion/spaCy,spacy-io/spaCy,honnibal/spaCy,explosion/spaCy,aikramer2/spaCy,explosion/spaCy,recognai/spaCy | spacy/tests/parser/test_neural_parser.py | spacy/tests/parser/test_neural_parser.py | # coding: utf8
from __future__ import unicode_literals
from thinc.neural import Model
from mock import Mock
import pytest
import numpy
from ..._ml import chain, Tok2Vec, doc2feats
from ...vocab import Vocab
from ...pipeline import TokenVectorEncoder
from ...syntax.arc_eager import ArcEager
from ...syntax.nn_parser import Parser
from ...tokens.doc import Doc
from ...gold import GoldParse
@pytest.fixture
def vocab():
return Vocab()
@pytest.fixture
def arc_eager(vocab):
actions = ArcEager.get_actions(left_labels=['L'], right_labels=['R'])
return ArcEager(vocab.strings, actions)
@pytest.fixture
def tok2vec():
return Tok2Vec(8, 100, preprocess=doc2feats())
@pytest.fixture
def parser(vocab, arc_eager):
return Parser(vocab, moves=arc_eager, model=None)
@pytest.fixture
def model(arc_eager, tok2vec):
return Parser.Model(arc_eager.n_moves, token_vector_width=tok2vec.nO)[0]
@pytest.fixture
def doc(vocab):
return Doc(vocab, words=['a', 'b', 'c'])
@pytest.fixture
def gold(doc):
return GoldParse(doc, heads=[1, 1, 1], deps=['L', 'ROOT', 'R'])
def test_can_init_nn_parser(parser):
assert parser.model is None
def test_build_model(parser):
parser.model = Parser.Model(parser.moves.n_moves)[0]
assert parser.model is not None
def test_predict_doc(parser, tok2vec, model, doc):
doc.tensor = tok2vec([doc])[0]
parser.model = model
parser(doc)
def test_update_doc(parser, tok2vec, model, doc, gold):
parser.model = model
tokvecs, bp_tokvecs = tok2vec.begin_update([doc])
d_tokvecs = parser.update(([doc], tokvecs), [gold])
assert d_tokvecs[0].shape == tokvecs[0].shape
def optimize(weights, gradient, key=None):
weights -= 0.001 * gradient
bp_tokvecs(d_tokvecs, sgd=optimize)
assert d_tokvecs[0].sum() == 0.
def test_predict_doc_beam(parser, tok2vec, model, doc):
doc.tensor = tok2vec([doc])[0]
parser.model = model
parser(doc, beam_width=32, beam_density=0.001)
for word in doc:
print(word.text, word.head, word.dep_)
| # coding: utf8
from __future__ import unicode_literals
from thinc.neural import Model
from mock import Mock
import pytest
import numpy
from ..._ml import chain, Tok2Vec, doc2feats
from ...vocab import Vocab
from ...pipeline import TokenVectorEncoder
from ...syntax.arc_eager import ArcEager
from ...syntax.nn_parser import Parser
from ...tokens.doc import Doc
from ...gold import GoldParse
@pytest.fixture
def vocab():
return Vocab()
@pytest.fixture
def arc_eager(vocab):
actions = ArcEager.get_actions(left_labels=['L'], right_labels=['R'])
return ArcEager(vocab.strings, actions)
@pytest.fixture
def tok2vec():
return Tok2Vec(8, 100, preprocess=doc2feats())
@pytest.fixture
def parser(vocab, arc_eager):
return Parser(vocab, moves=arc_eager, model=None)
@pytest.fixture
def model(arc_eager, tok2vec):
return Parser.Model(arc_eager.n_moves, token_vector_width=tok2vec.nO)[0]
@pytest.fixture
def doc(vocab):
return Doc(vocab, words=['a', 'b', 'c'])
@pytest.fixture
def gold(doc):
return GoldParse(doc, heads=[1, 1, 1], deps=['L', 'ROOT', 'R'])
def test_can_init_nn_parser(parser):
assert parser.model is None
def test_build_model(parser):
parser.model = Parser.Model(parser.moves.n_moves)[0]
assert parser.model is not None
def test_predict_doc(parser, tok2vec, model, doc):
doc.tensor = tok2vec([doc])[0]
parser.model = model
parser(doc)
def test_update_doc(parser, tok2vec, model, doc, gold):
parser.model = model
tokvecs, bp_tokvecs = tok2vec.begin_update([doc])
d_tokvecs = parser.update(([doc], tokvecs), [gold])
assert d_tokvecs[0].shape == tokvecs[0].shape
def optimize(weights, gradient, key=None):
weights -= 0.001 * gradient
bp_tokvecs(d_tokvecs, sgd=optimize)
assert d_tokvecs[0].sum() == 0.
| mit | Python |
91f260f2c4c260f51e9a2529da6c028b968e5ce7 | Add another test for #1971 | explosion/spaCy,explosion/spaCy,explosion/spaCy,spacy-io/spaCy,honnibal/spaCy,spacy-io/spaCy,explosion/spaCy,honnibal/spaCy,explosion/spaCy,spacy-io/spaCy,spacy-io/spaCy,spacy-io/spaCy,explosion/spaCy,honnibal/spaCy,honnibal/spaCy,spacy-io/spaCy | spacy/tests/regression/test_issue1971.py | spacy/tests/regression/test_issue1971.py | # coding: utf8
from __future__ import unicode_literals
import pytest
from spacy.matcher import Matcher
from spacy.tokens import Token, Doc
@pytest.mark.xfail
def test_issue1971(en_vocab):
# Possibly related to #2675 and #2671?
matcher = Matcher(en_vocab)
pattern = [
{"ORTH": "Doe"},
{"ORTH": "!", "OP": "?"},
{"_": {"optional": True}, "OP": "?"},
{"ORTH": "!", "OP": "?"},
]
Token.set_extension("optional", default=False)
matcher.add("TEST", None, pattern)
doc = Doc(en_vocab, words=["Hello", "John", "Doe", "!"])
# We could also assert length 1 here, but this is more conclusive, because
# the real problem here is that it returns a duplicate match for a match_id
# that's not actually in the vocab!
assert all(match_id in en_vocab.strings for match_id, start, end in matcher(doc))
@pytest.mark.xfail
def test_issue_1971_2(en_vocab):
matcher = Matcher(en_vocab)
pattern1 = [{"LOWER": {"IN": ["eur"]}}, {"LIKE_NUM": True}]
pattern2 = list(reversed(pattern1))
doc = Doc(en_vocab, words=["EUR", "10", "is", "10", "EUR"])
matcher.add("TEST", None, pattern1, pattern2)
matches = matcher(doc)
assert len(matches) == 2
@pytest.mark.xfail
def test_issue_1971_3(en_vocab):
"""Test that pattern matches correctly for multiple extension attributes."""
Token.set_extension("a", default=1)
Token.set_extension("b", default=2)
doc = Doc(en_vocab, words=["hello", "world"])
matcher = Matcher(en_vocab)
matcher.add("A", None, [{"_": {"a": 1}}])
matcher.add("B", None, [{"_": {"b": 2}}])
matches = sorted((en_vocab.strings[m_id], s, e) for m_id, s, e in matcher(doc))
assert len(matches) == 4
assert matches == sorted([("A", 0, 1), ("A", 1, 2), ("B", 0, 1), ("B", 1, 2)])
# @pytest.mark.xfail
def test_issue_1971_4(en_vocab):
"""Test that pattern matches correctly with multiple extension attribute
values on a single token.
"""
Token.set_extension("ext_a", default="str_a")
Token.set_extension("ext_b", default="str_b")
matcher = Matcher(en_vocab)
doc = Doc(en_vocab, words=["this", "is", "text"])
pattern = [{"_": {"ext_a": "str_a", "ext_b": "str_b"}}] * 3
matcher.add("TEST", None, pattern)
matches = matcher(doc)
# Interesting: uncommenting this causes a segmentation fault, so there's
# definitely something going on here
# assert len(matches) == 1
| # coding: utf8
from __future__ import unicode_literals
import pytest
from spacy.matcher import Matcher
from spacy.tokens import Token, Doc
@pytest.mark.xfail
def test_issue1971(en_vocab):
# Possibly related to #2675 and #2671?
matcher = Matcher(en_vocab)
pattern = [
{"ORTH": "Doe"},
{"ORTH": "!", "OP": "?"},
{"_": {"optional": True}, "OP": "?"},
{"ORTH": "!", "OP": "?"},
]
Token.set_extension("optional", default=False)
matcher.add("TEST", None, pattern)
doc = Doc(en_vocab, words=["Hello", "John", "Doe", "!"])
# We could also assert length 1 here, but this is more conclusive, because
# the real problem here is that it returns a duplicate match for a match_id
# that's not actually in the vocab!
assert all(match_id in en_vocab.strings for match_id, start, end in matcher(doc))
@pytest.mark.xfail
def test_issue_1971_2(en_vocab):
matcher = Matcher(en_vocab)
pattern1 = [{"LOWER": {"IN": ["eur"]}}, {"LIKE_NUM": True}]
pattern2 = list(reversed(pattern1))
doc = Doc(en_vocab, words=["EUR", "10", "is", "10", "EUR"])
matcher.add("TEST", None, pattern1, pattern2)
matches = matcher(doc)
assert len(matches) == 2
@pytest.mark.xfail
def test_issue_1971_3(en_vocab):
"""Test that pattern matches correctly for multiple extension attributes."""
Token.set_extension("a", default=1)
Token.set_extension("b", default=2)
doc = Doc(en_vocab, words=["hello", "world"])
matcher = Matcher(en_vocab)
matcher.add("A", None, [{"_": {"a": 1}}])
matcher.add("B", None, [{"_": {"b": 2}}])
matches = sorted((en_vocab.strings[m_id], s, e) for m_id, s, e in matcher(doc))
assert len(matches) == 4
assert matches == sorted([("A", 0, 1), ("A", 1, 2), ("B", 0, 1), ("B", 1, 2)])
| mit | Python |
83f6775d0fb427ef9a15cb9dadd3cdf044620623 | Add ondelete. | Scifabric/pybossa,PyBossa/pybossa,PyBossa/pybossa,Scifabric/pybossa | pybossa/model/project_stats.py | pybossa/model/project_stats.py | # -*- coding: utf8 -*-
# This file is part of PYBOSSA.
#
# Copyright (C) 2015 Scifabric LTD.
#
# PYBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PYBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PYBOSSA. If not, see <http://www.gnu.org/licenses/>.
from sqlalchemy import Integer, Text, Float
from sqlalchemy.schema import Column, ForeignKey
from pybossa.core import db
from pybossa.model import DomainObject, make_timestamp
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy.ext.mutable import MutableDict
class ProjectStats(db.Model, DomainObject):
'''A Table with Project Stats for Projects.'''
__tablename__ = 'project_stats'
#: ID
id = Column(Integer, primary_key=True)
#: Project ID
project_id = Column(Integer, ForeignKey('project.id', ondelete='CASCADE'),
nullable=False)
#: Number of tasks
n_tasks = Column(Integer, default=0)
#: Number of task runs
n_task_runs = Column(Integer, default=0)
#: Number of results
n_results = Column(Integer, default=0)
#: Number of volunteers
n_volunteers = Column(Integer, default=0)
#: Number of completed tasks
n_completed_tasks = Column(Integer, default=0)
#: Overall progress
overall_progress = Column(Integer, default=0)
#: Average time to complete a task
average_time = Column(Float, default=0)
#: Number of blog posts
n_blogposts = Column(Integer, default=0)
#: Last Activity
last_activity = Column(Text, default=make_timestamp)
#: Stats payload
info = Column(MutableDict.as_mutable(JSON), default=dict())
| # -*- coding: utf8 -*-
# This file is part of PYBOSSA.
#
# Copyright (C) 2015 Scifabric LTD.
#
# PYBOSSA is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PYBOSSA is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PYBOSSA. If not, see <http://www.gnu.org/licenses/>.
from sqlalchemy import Integer, Text, Float
from sqlalchemy.schema import Column, ForeignKey
from pybossa.core import db
from pybossa.model import DomainObject, make_timestamp
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy.ext.mutable import MutableDict
class ProjectStats(db.Model, DomainObject):
'''A Table with Project Stats for Projects.'''
__tablename__ = 'project_stats'
#: Webook ID
id = Column(Integer, primary_key=True)
#: Webhook created (aka triggered)
project_id = Column(Integer, ForeignKey('project.id', ondelete='CASCADE'),
nullable=False)
#: Number of tasks
n_tasks = Column(Integer, default=0)
#: Number of task runs
n_task_runs = Column(Integer, default=0)
#: Number of results
n_results = Column(Integer, default=0)
#: Number of volunteers
n_volunteers = Column(Integer, default=0)
#: Number of completed tasks
n_completed_tasks = Column(Integer, default=0)
#: Overall progress
overall_progress = Column(Integer, default=0)
#: Average time to complete a task
average_time = Column(Float, default=0)
#: Number of blog posts
n_blogposts = Column(Integer, default=0)
#: Last Activity
last_activity = Column(Text, default=make_timestamp)
#: Stats payload
info = Column(MutableDict.as_mutable(JSON), default=dict())
| agpl-3.0 | Python |
797bd9842faa4b1cd29d713aa4721379fd56e2b6 | Add test-credentials command | leosac/leosac,islog/leosac,leosac/leosac,leosac/leosac,islog/leosac,islog/leosac | python/leosacpy/scripts/cli.py | python/leosacpy/scripts/cli.py | import logging
from types import SimpleNamespace
import asyncio
import click
import colorama
from click_repl import register_repl
import leosacpy.cli.dev.dev
from leosacpy.utils import guess_root_dir
# The debug flag is set globally.
# This is because when running "leosaccli --debug shell"
# we want all command running in the REPL to have DEBUG flag
# turned on.
from leosacpy.wsclient import LeosacAPI
sticky_debug_flag = None
@click.group()
@click.option('--debug/--no-debug', help='Debug mode', default=False)
@click.option('--root-dir', '-r', help='Leosac root directory')
@click.pass_context
def cli_entry_point(ctx, debug, root_dir):
colorama.init(autoreset=True)
ctx.obj = SimpleNamespace()
global sticky_debug_flag
if sticky_debug_flag is None:
print('Setting sticky_debug_flag to {}'.format(debug))
sticky_debug_flag = debug
ctx.obj.DEBUG = sticky_debug_flag
logging.info('DEBUG mode: {}'.format(ctx.obj.DEBUG))
ctx.obj.root_dir = root_dir or guess_root_dir()
if not ctx.obj.root_dir:
logging.warning('Running without a Leosac root directory.')
@cli_entry_point.command(name='test-credentials')
@click.argument('username')
@click.argument('password')
@click.pass_context
def test_credential(ctx, username, password):
c = LeosacAPI(target='ws://127.0.0.1:8888')
def r():
c.authenticate(username, password)
v = asyncio.get_event_loop().run_until_complete(c.authenticate(username, password))
asyncio.get_event_loop().run_until_complete(c.close())
print('Auth result: {}'.format(v))
cli_entry_point.add_command(leosacpy.cli.dev.dev.dev_cmd_group)
logging.basicConfig(level=logging.DEBUG)
register_repl(cli_entry_point, name='shell')
| import logging
from types import SimpleNamespace
import click
import colorama
from click_repl import register_repl
import leosacpy.cli.dev.dev
from leosacpy.utils import guess_root_dir
# The debug flag is set globally.
# This is because when running "leosaccli --debug shell"
# we want all command running in the REPL to have DEBUG flag
# turned on.
sticky_debug_flag = None
@click.group()
@click.option('--debug/--no-debug', help='Debug mode', default=False)
@click.option('--root-dir', '-r', help='Leosac root directory')
@click.pass_context
def cli_entry_point(ctx, debug, root_dir):
colorama.init(autoreset=True)
ctx.obj = SimpleNamespace()
global sticky_debug_flag
if sticky_debug_flag is None:
print('Setting sticky_debug_flag to {}'.format(debug))
sticky_debug_flag = debug
ctx.obj.DEBUG = sticky_debug_flag
logging.info('DEBUG mode: {}'.format(ctx.obj.DEBUG))
ctx.obj.root_dir = root_dir or guess_root_dir()
if not ctx.obj.root_dir:
logging.warning('Running without a Leosac root directory.')
cli_entry_point.add_command(leosacpy.cli.dev.dev.dev_cmd_group)
logging.basicConfig(level=logging.DEBUG)
register_repl(cli_entry_point, name='shell')
| agpl-3.0 | Python |
f9d606fd7b5833ed43c5f2ae720f7dcc4764543e | Update serializer.py | AndreiDrang/python-rucaptcha | python_rucaptcha/serializer.py | python_rucaptcha/serializer.py | from uuid import uuid4
from pydantic import Field, BaseModel, validator
class CaptchaOptionsSer(BaseModel):
phrase: bool = False
caseSensitive: bool = False
numeric: int = 0
calc: bool = False
minLen: int = 0
maxLen: int = 0
lang: str = ""
hintText: str = ""
hintImg: str = ""
softId: str = "1899"
@validator("numeric")
def numeric_check(cls, value):
if value not in range(1, 5):
raise ValueError("Invalid `numeric` param value")
return value
@validator("minLen", "maxLen")
def len_check(cls, value):
if value not in range(0, 21):
raise ValueError("Invalid `minLen \ maxLen` param value")
return value
@validator("hintText")
def hint_text_check(cls, value):
if len(value) > 140:
raise ValueError("Invalid `hintText` param value")
return value
@validator("softId")
def soft_id_set(cls, value):
value.update({"softId": "1899"})
return value
class NormalCaptchaSer(BaseModel):
method: str = "normal"
requestId: str = Field(default_factory=uuid4)
body: str = str()
options: "CaptchaOptionsSer" = CaptchaOptionsSer()
class TextCaptchaSer(BaseModel):
method: str = "text"
requestId: str = Field(default_factory=uuid4)
body: str = str()
options: "CaptchaOptionsSer" = CaptchaOptionsSer()
class SocketResponse(BaseModel):
method: str = str()
success: bool = None
code: str = str()
# captcha task ID at RuCaptcha service
captchaId: int = -1
# manually generated requestID
requestId: str = Field(default_factory=uuid4)
error: str = str()
# specific fields for balance request response
balance: float = 0
valute: str = str()
class SockAuthSer(BaseModel):
method: str = "auth"
requestId: str = Field(default_factory=uuid4)
key: str
options: dict
| from datetime import datetime
from typing import List, Optional
from pydantic import BaseModel
from pydantic import BaseModel, ValidationError, validator
from uuid import uuid4
class CaptchaOptionsSer(BaseModel):
phrase: bool = False
caseSensitive: bool = False
numeric: int = 0
calc: bool = False
minLen: int = 0
maxLen: int = 0
lang: str = ""
hintText: str = ""
hintImg: str = ""
softId: str = "1899"
@validator("numeric")
def numeric_check(cls, value):
if value not in range(1, 5):
raise ValueError("Invalid `numeric` param value")
return value
@validator("minLen", "maxLen")
def len_check(cls, value):
if value not in range(0, 21):
raise ValueError("Invalid `minLen \ maxLen` param value")
return value
@validator("hintText")
def hint_text_check(cls, value):
if len(value) > 140:
raise ValueError("Invalid `hintText` param value")
return value
@validator("softId")
def soft_id_set(cls, value):
value.update({"softId": "1899"})
return value
class NormalCaptchaSer(BaseModel):
method: str = "normal"
requestId: str = uuid4()
body: str
options: "CaptchaOptionsSer" = CaptchaOptionsSer()
class TextCaptchaSer(BaseModel):
method: str = "normal"
requestId: str = uuid4()
text: str
options: "CaptchaOptionsSer" = CaptchaOptionsSer()
| mit | Python |
3712295e7f3a1e54e468a2d3a1095ebb4d7154db | implement naive bayes fitting | becxer/pytrain | pytrain/nbayes/basic_nbayes.py | pytrain/nbayes/basic_nbayes.py | from numpy import *
class basic_nbayes:
def __init__(self, mat_data, label_data):
self.word_data = mat_data
self.num_word = 0
self.cate_data = label_data
self.cate_set = {}
self.num_cate = 0
self.cate_word = {}
self.cate_word_sum = {}
def fit(self):
self.num_word = len(self.word_data[0])
for i, cate in enumerate(self.cate_data):
self.cate_word[cate] = self.cate_word.get(cate, \
zeros(self.num_word)) + self.word_data[i]
self.cate_set[cate] = self.cate_set.get(cate, 0) + 1
for cate in self.cate_word:
self.cate_word_sum[cate] = self.cate_word[cate].sum(axis=0)
self.num_cate = len(self.cate_set)
def predict(self, array_input):
pass
|
class basic_nbayes:
def __init__(self, mat_data, label_data):
self.mat_data = mat_data
self.label_data = label_data
pass
def fit(self):
print "fitting----------------------"
num_row = len(self.mat_data)
num_col = len(self.mat_data[0])
pass
def predict(self, array_input):
pass
| mit | Python |
3590b8bfebd14f439270b2fb89e9e6c3c6e5ed6f | fix name of test | eirmag/weboob,nojhan/weboob-devel,Boussadia/weboob,franek/weboob,eirmag/weboob,Boussadia/weboob,Konubinix/weboob,laurent-george/weboob,frankrousseau/weboob,yannrouillard/weboob,willprice/weboob,franek/weboob,sputnick-dev/weboob,frankrousseau/weboob,laurent-george/weboob,Boussadia/weboob,frankrousseau/weboob,RouxRC/weboob,laurent-george/weboob,sputnick-dev/weboob,nojhan/weboob-devel,Boussadia/weboob,RouxRC/weboob,RouxRC/weboob,sputnick-dev/weboob,Konubinix/weboob,Konubinix/weboob,franek/weboob,eirmag/weboob,yannrouillard/weboob,yannrouillard/weboob,willprice/weboob,willprice/weboob,nojhan/weboob-devel | weboob/backends/cragr/test.py | weboob/backends/cragr/test.py | # -*- coding: utf-8 -*-
# Copyright(C) 2010 Romain Bignon
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from weboob.tools.test import BackendTest
class CrAgrTest(BackendTest):
BACKEND = 'cragr'
def test_cragr(self):
l = [a for a in self.backend.iter_accounts()]
if len(l) > 0:
a = l[0]
[o for o in self.backend.iter_history(a)]
| # -*- coding: utf-8 -*-
# Copyright(C) 2010 Romain Bignon
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
from weboob.tools.test import BackendTest
class CrAgrTest(BackendTest):
BACKEND = 'cragr'
def test_bnporc(self):
l = [a for a in self.backend.iter_accounts()]
if len(l) > 0:
a = l[0]
[o for o in self.backend.iter_history(a)]
| agpl-3.0 | Python |
cd1b264ea8ee45ecca403dbcd76ef2a21311e8c6 | add regex routine | OpenTransitTools/utils | ott/utils/re_utils.py | ott/utils/re_utils.py | import re
import logging
log = logging.getLogger(__file__)
def contains(regexp, str):
''' does string have one or more instances of regexp
'''
ret_val = False
try:
a = re.findall(regexp, str.strip())
if a and len(a) > 0:
ret_val = True
except:
pass
return ret_val
| import re
import logging
log = logging.getLogger(__file__)
| mpl-2.0 | Python |
f00c0c7d2d275ae8daf4442ab922022b28f67cd8 | Add marketing dependencies. | Yajo/website,open-synergy/website,gfcapalbo/website,xpansa/website,LasLabs/website,nuobit/website,Antiun/website,Endika/website,acsone/website,nuobit/website,acsone/website,gfcapalbo/website,Antiun/website,Yajo/website,open-synergy/website,nuobit/website,acsone/website,open-synergy/website,xpansa/website,open-synergy/website,Yajo/website,brain-tec/website,kaerdsar/website,Antiun/website,LasLabs/website,gfcapalbo/website,pedrobaeza/website,nuobit/website,LasLabs/website,pedrobaeza/website,Endika/website,brain-tec/website,kaerdsar/website,Endika/website,acsone/website,xpansa/website,brain-tec/website,Endika/website,Yajo/website,xpansa/website,pedrobaeza/website,LasLabs/website,gfcapalbo/website,pedrobaeza/website,Antiun/website,brain-tec/website,kaerdsar/website | website_slides/__openerp__.py | website_slides/__openerp__.py | # -*- coding: utf-8 -*-
# #############################################################################
#
# Odoo, Open Source Management Solution
# Copyright (C) 2014-TODAY Odoo SA (<https://www.odoo.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Slides',
'version': '1.0',
'summary': 'Share and Publish Videos, Presentations and Documents',
'category': 'website',
'author': "Odoo SA, "
"Incaser Informatica - Sergio Teruel, "
"Odoo Community Association (OCA)",
'website': 'https://github.com/OCA/website',
'depends': ['website',
'website_mail',
'marketing'],
'data': [
'views/res_config.xml',
'views/website_slides.xml',
'views/website_slides_embed.xml',
'views/website_slides_backend.xml',
'views/website_templates.xml',
'data/website_slides_data.xml',
'security/ir.model.access.csv',
'security/website_slides_security.xml'
],
'demo': [
'data/website_slides_demo.xml'
],
'installable': True,
}
| # -*- coding: utf-8 -*-
# #############################################################################
#
# Odoo, Open Source Management Solution
# Copyright (C) 2014-TODAY Odoo SA (<https://www.odoo.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Slides',
'version': '1.0',
'summary': 'Share and Publish Videos, Presentations and Documents',
'category': 'website',
'author': "Odoo SA, "
"Incaser Informatica - Sergio Teruel, "
"Odoo Community Association (OCA)",
'website': 'https://github.com/OCA/website',
'depends': ['website', 'website_mail'],
'data': [
'views/res_config.xml',
'views/website_slides.xml',
'views/website_slides_embed.xml',
'views/website_slides_backend.xml',
'views/website_templates.xml',
'data/website_slides_data.xml',
'security/ir.model.access.csv',
'security/website_slides_security.xml'
],
'demo': [
'data/website_slides_demo.xml'
],
'installable': True,
}
| agpl-3.0 | Python |
fe0c7836f054011017c7a5018d433bbf6e26b078 | Update agent for environment. | danieloconell/Louis | reinforcement-learning/play.py | reinforcement-learning/play.py | """This is the agent which currently takes the action with highest immediate reward."""
import env
import time
env.make("pygame")
for episode in range(10):
env.reset()
episode_reward = 0
for t in range(100):
episode_reward += env.actual_reward
if env.done:
print(
"Episode %d finished after %d timesteps, with reward %d"
% ((episode + 1), (t + 1), episode_reward))
break
max_action = 0
index = -1
for item in env.actions:
if env.create_reward(item) > max_action:
max_action = env.create_reward(item)
action = [item, index]
else:
index += 1
print(action[0])
episode_reward += env.create_reward(action[0])
env.action(action[0])
env.render()
| """This is the agent which currently takes the action with highest immediate reward."""
import env
env.make("text")
for episode in range(10):
env.reset()
episode_reward = 0
for t in range(100):
episode_reward += env.actual_reward
if env.done:
print(
"Episode %d finished after %d timesteps, with reward %d"
% ((episode + 1), (t + 1), episode_reward))
break
max_action = 0
index = -1
for item in env.actions:
if env.create_reward(item) > max_action:
max_action = env.create_reward(item)
action = [item, index]
else:
index += 1
print(action[0])
episode_reward += env.create_reward(action[0])
env.action(action[0])
env.render()
| mit | Python |
7691287e5f2563db8a0a6dcf2de042306b359d46 | Fix buildbots that are failing due to this test by adding all expected fails that TestMultipleDebuggers.py has. | llvm-mirror/lldb,llvm-mirror/lldb,llvm-mirror/lldb,apple/swift-lldb,apple/swift-lldb,llvm-mirror/lldb,apple/swift-lldb,llvm-mirror/lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb | packages/Python/lldbsuite/test/api/multiple-targets/TestMultipleTargets.py | packages/Python/lldbsuite/test/api/multiple-targets/TestMultipleTargets.py | """Test the lldb public C++ api when creating multiple targets simultaneously."""
from __future__ import print_function
import os
import re
import subprocess
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestMultipleTargets(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
@skipIfNoSBHeaders
@skipIfHostIncompatibleWithRemote
@expectedFailureAll(
archs="i[3-6]86",
bugnumber="multi-process-driver.cpp creates an x64 target")
@expectedFailureAll(
oslist=[
"windows",
"linux",
"freebsd"],
bugnumber="llvm.org/pr20282")
def test_multiple_debuggers(self):
env = {self.dylibPath: self.getLLDBLibraryEnvVal()}
self.driver_exe = os.path.join(os.getcwd(), "multi-target")
self.buildDriver('main.cpp', self.driver_exe)
self.addTearDownHook(lambda: os.remove(self.driver_exe))
self.signBinary(self.driver_exe)
# check_call will raise a CalledProcessError if multi-process-driver doesn't return
# exit code 0 to indicate success. We can let this exception go - the test harness
# will recognize it as a test failure.
if self.TraceOn():
print("Running test %s" % self.driver_exe)
check_call([self.driver_exe, self.driver_exe], env=env)
else:
with open(os.devnull, 'w') as fnull:
check_call([self.driver_exe, self.driver_exe],
env=env, stdout=fnull, stderr=fnull)
| """Test the lldb public C++ api when creating multiple targets simultaneously."""
from __future__ import print_function
import os
import re
import subprocess
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class TestMultipleTargets(TestBase):
mydir = TestBase.compute_mydir(__file__)
NO_DEBUG_INFO_TESTCASE = True
@skipIfNoSBHeaders
@skipIfHostIncompatibleWithRemote
def test_multiple_debuggers(self):
env = {self.dylibPath: self.getLLDBLibraryEnvVal()}
self.driver_exe = os.path.join(os.getcwd(), "multi-target")
self.buildDriver('main.cpp', self.driver_exe)
self.addTearDownHook(lambda: os.remove(self.driver_exe))
self.signBinary(self.driver_exe)
# check_call will raise a CalledProcessError if multi-process-driver doesn't return
# exit code 0 to indicate success. We can let this exception go - the test harness
# will recognize it as a test failure.
if self.TraceOn():
print("Running test %s" % self.driver_exe)
check_call([self.driver_exe, self.driver_exe], env=env)
else:
with open(os.devnull, 'w') as fnull:
check_call([self.driver_exe, self.driver_exe],
env=env, stdout=fnull, stderr=fnull)
| apache-2.0 | Python |
5c851ee3d333518829ce26bfc06fd1038e70651c | Add util to temporarily alter log levels | qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,dimagi/commcare-hq,qedsoftware/commcare-hq,qedsoftware/commcare-hq | corehq/util/decorators.py | corehq/util/decorators.py | from functools import wraps
import logging
from corehq.util.global_request import get_request
from dimagi.utils.logging import notify_exception
class ContextDecorator(object):
"""
A base class that enables a context manager to also be used as a decorator.
https://docs.python.org/3/library/contextlib.html#contextlib.ContextDecorator
"""
def __call__(self, fn):
@wraps(fn)
def decorated(*args, **kwds):
with self:
return fn(*args, **kwds)
return decorated
def handle_uncaught_exceptions(mail_admins=True):
"""Decorator to log uncaught exceptions and prevent them from
bubbling up the call chain.
"""
def _outer(fn):
@wraps(fn)
def _handle_exceptions(*args, **kwargs):
try:
return fn(*args, **kwargs)
except Exception as e:
msg = "Uncaught exception from {}.{}".format(fn.__module__, fn.__name__)
if mail_admins:
notify_exception(get_request(), msg)
else:
logging.exception(msg)
return _handle_exceptions
return _outer
class change_log_level(ContextDecorator):
"""
Temporarily change the log level of a specific logger.
Can be used as either a context manager or decorator.
"""
def __init__(self, logger, level):
self.logger = logging.getLogger(logger)
self.new_level = level
self.original_level = self.logger.level
def __enter__(self):
self.logger.setLevel(self.new_level)
def __exit__(self, exc_type, exc_val, exc_tb):
self.logger.setLevel(self.original_level)
| from functools import wraps
import logging
from corehq.util.global_request import get_request
from dimagi.utils.logging import notify_exception
def handle_uncaught_exceptions(mail_admins=True):
"""Decorator to log uncaught exceptions and prevent them from
bubbling up the call chain.
"""
def _outer(fn):
@wraps(fn)
def _handle_exceptions(*args, **kwargs):
try:
return fn(*args, **kwargs)
except Exception as e:
msg = "Uncaught exception from {}.{}".format(fn.__module__, fn.__name__)
if mail_admins:
notify_exception(get_request(), msg)
else:
logging.exception(msg)
return _handle_exceptions
return _outer
| bsd-3-clause | Python |
a2c965ea7e8c30378bb8d8f5c7977c45eb830423 | Comment on AsyncNotifier upstream's availability | onponomarev/ganeti,andir/ganeti,ganeti-github-testing/ganeti-test-1,apyrgio/ganeti,andir/ganeti,ganeti/ganeti,onponomarev/ganeti,mbakke/ganeti,leshchevds/ganeti,dimara/ganeti,apyrgio/snf-ganeti,apyrgio/snf-ganeti,yiannist/ganeti,leshchevds/ganeti,ganeti-github-testing/ganeti-test-1,mbakke/ganeti,yiannist/ganeti,yiannist/ganeti,bitemyapp/ganeti,ganeti/ganeti,apyrgio/ganeti,grnet/snf-ganeti,mbakke/ganeti,bitemyapp/ganeti,andir/ganeti,dimara/ganeti,ganeti/ganeti,grnet/snf-ganeti,leshchevds/ganeti | lib/asyncnotifier.py | lib/asyncnotifier.py | #
#
# Copyright (C) 2009 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Asynchronous pyinotify implementation"""
import asyncore
try:
# pylint: disable-msg=E0611
from pyinotify import pyinotify
except ImportError:
import pyinotify
# We contributed the AsyncNotifier class back to python-pyinotify, and it's
# part of their codebase since version 0.8.7. This code can be removed once
# we'll be ready to depend on python-pyinotify >= 0.8.7
class AsyncNotifier(asyncore.file_dispatcher):
"""An asyncore dispatcher for inotify events.
"""
# pylint: disable-msg=W0622,W0212
def __init__(self, watch_manager, default_proc_fun=None, map=None):
"""Initializes this class.
This is a a special asyncore file_dispatcher that actually wraps a
pyinotify Notifier, making it asyncronous.
"""
if default_proc_fun is None:
default_proc_fun = pyinotify.ProcessEvent()
self.notifier = pyinotify.Notifier(watch_manager, default_proc_fun)
# here we need to steal the file descriptor from the notifier, so we can
# use it in the global asyncore select, and avoid calling the
# check_events() function of the notifier (which doesn't allow us to select
# together with other file descriptors)
self.fd = self.notifier._fd
asyncore.file_dispatcher.__init__(self, self.fd, map)
def handle_read(self):
self.notifier.read_events()
self.notifier.process_events()
| #
#
# Copyright (C) 2009 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Asynchronous pyinotify implementation"""
import asyncore
try:
# pylint: disable-msg=E0611
from pyinotify import pyinotify
except ImportError:
import pyinotify
class AsyncNotifier(asyncore.file_dispatcher):
"""An asyncore dispatcher for inotify events.
"""
# pylint: disable-msg=W0622,W0212
def __init__(self, watch_manager, default_proc_fun=None, map=None):
"""Initializes this class.
This is a a special asyncore file_dispatcher that actually wraps a
pyinotify Notifier, making it asyncronous.
"""
if default_proc_fun is None:
default_proc_fun = pyinotify.ProcessEvent()
self.notifier = pyinotify.Notifier(watch_manager, default_proc_fun)
# here we need to steal the file descriptor from the notifier, so we can
# use it in the global asyncore select, and avoid calling the
# check_events() function of the notifier (which doesn't allow us to select
# together with other file descriptors)
self.fd = self.notifier._fd
asyncore.file_dispatcher.__init__(self, self.fd, map)
def handle_read(self):
self.notifier.read_events()
self.notifier.process_events()
| bsd-2-clause | Python |
a35d6f59d214741f554dde1363d2eac7addb04cb | Add limitations to package documentation | orome/crypto-enigma-py | crypto_enigma/__init__.py | crypto_enigma/__init__.py | #!/usr/bin/env python
# encoding: utf8
"""An Enigma machine simulator with rich textual display functionality.
Limitations
~~~~~~~~~~~
Note that the correct display of some characters used to represent
components (thin Naval rotors) assumes support for Unicode, while some
aspects of the display of machine state depend on support for combining
Unicode. This is a `known
limitation <https://github.com/orome/crypto-enigma-py/issues/1>`__ that
will be addressed in a future release.
Note also that at the start of any scripts that use this package, you should
.. parsed-literal::
from __future__ import unicode_literals
before any code that uses the API, or confiure IPython (in `ipython_config.py`) with
.. parsed-literal::
c.InteractiveShellApp.exec_lines += ["from __future__ import unicode_literals"]
or explicitly suppply Unicode strings (e.g., as in many of the examples here with :code:`u'TESTING'`).
"""
from ._version import __version__, __author__
#__all__ = ['machine', 'components']
from .components import *
from .machine import *
| #!/usr/bin/env python
# encoding: utf8
"""An Enigma machine simulator with rich textual display functionality."""
from ._version import __version__, __author__
#__all__ = ['machine', 'components']
from .components import *
from .machine import *
| bsd-3-clause | Python |
0fe35173418a52c6ff5ee7d9f61e7b793bd9d2e2 | change pmma parameters for bonding monomer | fishstamp82/moltools,fishstamp82/moltools | src/builder.py | src/builder.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import molecules, os, polymer
def pmma_monomer():
"""Return pmmma monomer building block as defined by SMILES
format obtained in avogadro"""
builddir = 'build'
molfile = 'pmma_monomer.pdb'
FILE = os.path.join( os.path.dirname( os.path.realpath( __file__) ) , os.path.join( builddir, molfile ))
m = polymer.Monomer.from_pdb( FILE, in_AA = True, out_AA = True )
m._mono_name = "pmma"
m._r = 1.46
m._angle = 104.5
m._dihedral = 180.0
return m
def sulfuric_acid():
"""Return geo opt.
molecule with sulfur in origo, one oxygen in xz plane"""
builddir = "build"
molfile = "sulfur_opt.xyz"
FILE = os.path.join( os.path.dirname( os.path.realpath( __file__) ) , os.path.join( builddir, molfile ))
m = molecules.Molecule.from_xyz( FILE, in_AA = True, out_AA = False )
return m
def paranitro_aniline():
"""Return geo opt.
molecule with sulfur in origo, one oxygen in xz plane"""
builddir = "build"
molfile = "pna_opt.xyz"
FILE = os.path.join( os.path.dirname( os.path.realpath( __file__) ) , os.path.join( builddir, molfile ))
m = molecules.Molecule.from_xyz( FILE, in_AA= True, out_AA = False )
return m
def tip3p():
"""Return geo opt.
molecule with sulfur in origo, one oxygen in xz plane"""
builddir = "build"
molfile = "tip3p.xyz"
FILE = os.path.join( os.path.dirname( os.path.realpath( __file__) ) , os.path.join( builddir, molfile ))
m = molecules.Water.get_standard( AA = False )
for ind, at in enumerate( m ):
at.order = ind + 1
return m
if __name__ == '__main__':
main()
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import molecules, os, polymer
def pmma_monomer():
"""Return pmmma monomer building block as defined by SMILES
format obtained in avogadro"""
builddir = 'build'
molfile = 'pmma_monomer.pdb'
FILE = os.path.join( os.path.dirname( os.path.realpath( __file__) ) , os.path.join( builddir, molfile ))
m = polymer.Monomer.from_pdb( FILE, in_AA = True, out_AA = True )
return m
def sulfuric_acid():
"""Return geo opt.
molecule with sulfur in origo, one oxygen in xz plane"""
builddir = "build"
molfile = "sulfur_opt.xyz"
FILE = os.path.join( os.path.dirname( os.path.realpath( __file__) ) , os.path.join( builddir, molfile ))
m = molecules.Molecule.from_xyz( FILE, in_AA = True, out_AA = False )
return m
def paranitro_aniline():
"""Return geo opt.
molecule with sulfur in origo, one oxygen in xz plane"""
builddir = "build"
molfile = "pna_opt.xyz"
FILE = os.path.join( os.path.dirname( os.path.realpath( __file__) ) , os.path.join( builddir, molfile ))
m = molecules.Molecule.from_xyz( FILE, in_AA= True, out_AA = False )
return m
def tip3p():
"""Return geo opt.
molecule with sulfur in origo, one oxygen in xz plane"""
builddir = "build"
molfile = "tip3p.xyz"
FILE = os.path.join( os.path.dirname( os.path.realpath( __file__) ) , os.path.join( builddir, molfile ))
m = molecules.Water.get_standard( AA = False )
for ind, at in enumerate( m ):
at.order = ind + 1
return m
if __name__ == '__main__':
main()
| mit | Python |
6882cdbbbb66db8289bfb69b8334e7406099d456 | remove connector and settings | whaleforever/carilogo,whaleforever/carilogo,whaleforever/carilogo | lib/search/search.py | lib/search/search.py | import argparse
import os
import cv2
import cluster
from shutil import copyfile
from fba.preprocess import Preprocess
from fba.searcher import Searcher
def initialize():
#TODO: need to fix database indexing
# create_database()
# index.indexing('images/ori_data', using="db")
index.indexing('images/ori_data', using='file')
# create cluster
# cluster.create_cluster_model()
if __name__ == "__main__":
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--index", required=True,
help="Path to where the computed index will be stored")
ap.add_argument("-q", "--query", required=True,
help="Path to the query image")
ap.add_argument("-r", "--result-path", required=True,
help="Path to the result path")
ap.add_argument("--cluster", action='store_true')
args = vars(ap.parse_args())
# initialize the image descriptor
cd = Preprocess((8, 8, 8))
# load the query image and describe it
query = cv2.imread(args["query"])
features = cd.describe(query)
if args["cluster"]:
searcher = Searcher(args["index"], use_cluster = True)
cluster_group = cluster.query_instance(features)
results = searcher.search(features,cluster_group=cluster_group)
else:
# perform the search
searcher = Searcher(args["index"])
results = searcher.search(features)
# display the query
cv2.namedWindow("Query", cv2.WINDOW_NORMAL)
cv2.imshow("Query", query)
cv2.resizeWindow("Query", 800, 600)
if not results:
print "No result ..."
else :
print "Total Results",len(results)
# loop over the results
for (score, resultID) in results:
# copyfile(args["result_path"] + "/" + resultID, 'result/' + resultID)
# load the result image and display it
result = cv2.imread(args["result_path"] + "/" + resultID)
cv2.namedWindow("Result", cv2.WINDOW_NORMAL)
cv2.imshow("Result", result)
cv2.resizeWindow("Result", 800, 600)
cv2.waitKey(0)
| import argparse
import os
import cv2
import connector
import settings
import cluster
from shutil import copyfile
from fba.preprocess import Preprocess
from fba.searcher import Searcher
def create_database():
if not os.path.isfile(settings.DATABASE):
connector.initial(table="image", feature="text",
cluster="int", weka_id="int", image_path="text")
def initialize():
#TODO: need to fix database indexing
# create_database()
# index.indexing('images/ori_data', using="db")
index.indexing('images/ori_data', using='file')
# create cluster
# cluster.create_cluster_model()
if __name__ == "__main__":
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--index", required=True,
help="Path to where the computed index will be stored")
ap.add_argument("-q", "--query", required=True,
help="Path to the query image")
ap.add_argument("-r", "--result-path", required=True,
help="Path to the result path")
ap.add_argument("--cluster", action='store_true')
args = vars(ap.parse_args())
# initialize the image descriptor
cd = Preprocess((8, 8, 8))
# load the query image and describe it
query = cv2.imread(args["query"])
features = cd.describe(query)
if args["cluster"]:
searcher = Searcher(args["index"], use_cluster = True)
cluster_group = cluster.query_instance(features)
results = searcher.search(features,cluster_group=cluster_group)
else:
# perform the search
searcher = Searcher(args["index"])
results = searcher.search(features)
# display the query
cv2.namedWindow("Query", cv2.WINDOW_NORMAL)
cv2.imshow("Query", query)
cv2.resizeWindow("Query", 800, 600)
if not results:
print "No result ..."
else :
print "Total Results",len(results)
# loop over the results
for (score, resultID) in results:
copyfile(args["result_path"] + "/" + resultID, 'result/' + resultID)
# load the result image and display it
result = cv2.imread(args["result_path"] + "/" + resultID)
cv2.namedWindow("Result", cv2.WINDOW_NORMAL)
cv2.imshow("Result", result)
cv2.resizeWindow("Result", 800, 600)
cv2.waitKey(0)
| mit | Python |
29dd6496534d32a996891475473e8584a0621871 | Fix import problem | imlonghao/yunbi | yunbi/__init__.py | yunbi/__init__.py | from .yunbi import Yunbi
| from yunbi import yunbi
| mit | Python |
ebb65e72b6df662a85819faa0c31b963ed1a9715 | allow string OBJID | sagasurvey/saga,sagasurvey/saga | SAGA/spectra/extract_exsiting.py | SAGA/spectra/extract_exsiting.py | import numpy as np
from easyquery import Query, QueryMaker
from ..utils import fill_values_by_query
from .common import SPEED_OF_LIGHT, ensure_specs_dtype
from .manual_fixes import fixes_sdss_spec_by_objid
__all__ = ["extract_sdss_spectra", "extract_nsa_spectra"]
def extract_sdss_spectra(sdss):
if sdss is None or not len(sdss):
return
specs = Query("SPEC_Z > -1.0").filter(
sdss["RA", "DEC", "SPEC_Z", "SPEC_Z_ERR", "SPEC_Z_WARN", "OBJID"]
)
if not len(specs):
return
specs["ZQUALITY"] = np.where(specs["SPEC_Z_WARN"] == 0, 4, 1)
del specs["SPEC_Z_WARN"]
for objid, fixes in fixes_sdss_spec_by_objid.items():
fill_values_by_query(specs, QueryMaker.equal("OBJID", objid), fixes)
specs.rename_column("OBJID", "SPECOBJID")
specs["TELNAME"] = "SDSS"
specs["MASKNAME"] = "SDSS"
specs["HELIO_CORR"] = True
return ensure_specs_dtype(specs)
def extract_nsa_spectra(nsa):
if nsa is None:
return
specs = nsa["RA", "DEC", "Z", "ZSRC", "NSAID"]
specs["TELNAME"] = "NSA"
specs["SPEC_Z_ERR"] = 20 / SPEED_OF_LIGHT
specs["ZQUALITY"] = 4
specs["HELIO_CORR"] = True
specs.rename_column("Z", "SPEC_Z")
specs.rename_column("ZSRC", "MASKNAME")
specs.rename_column("NSAID", "SPECOBJID")
return ensure_specs_dtype(specs)
| import numpy as np
from easyquery import Query
from ..utils import fill_values_by_query
from .common import SPEED_OF_LIGHT, ensure_specs_dtype
from .manual_fixes import fixes_sdss_spec_by_objid
__all__ = ["extract_sdss_spectra", "extract_nsa_spectra"]
def extract_sdss_spectra(sdss):
if sdss is None:
return
specs = Query("SPEC_Z > -1.0").filter(
sdss["RA", "DEC", "SPEC_Z", "SPEC_Z_ERR", "SPEC_Z_WARN", "OBJID"]
)
if not len(specs):
return
specs["ZQUALITY"] = np.where(specs["SPEC_Z_WARN"] == 0, 4, 1)
del specs["SPEC_Z_WARN"]
for objid, fixes in fixes_sdss_spec_by_objid.items():
fill_values_by_query(specs, "OBJID == {}".format(objid), fixes)
specs.rename_column("OBJID", "SPECOBJID")
specs["TELNAME"] = "SDSS"
specs["MASKNAME"] = "SDSS"
specs["HELIO_CORR"] = True
return ensure_specs_dtype(specs)
def extract_nsa_spectra(nsa):
if nsa is None:
return
specs = nsa["RA", "DEC", "Z", "ZSRC", "NSAID"]
specs["TELNAME"] = "NSA"
specs["SPEC_Z_ERR"] = 20 / SPEED_OF_LIGHT
specs["ZQUALITY"] = 4
specs["HELIO_CORR"] = True
specs.rename_column("Z", "SPEC_Z")
specs.rename_column("ZSRC", "MASKNAME")
specs.rename_column("NSAID", "SPECOBJID")
return ensure_specs_dtype(specs)
| mit | Python |
686666b224d9c8f104cc49a0ac58e170cf5b9477 | use get_device_from_array | yuyu2172/chainercv,yuyu2172/chainercv,pfnet/chainercv,chainer/chainercv,chainer/chainercv | chainercv/links/model/ssd/gradient_scaling.py | chainercv/links/model/ssd/gradient_scaling.py | from chainer import cuda
class GradientScaling(object):
"""Optimizer/UpdateRule hook function for scaling gradient.
This hook function scales gradient by a constant value.
Args:
rate (float): Coefficient for scaling.
Attributes:
rate (float): Coefficient for scaling.
"""
name = 'GradientScaling'
call_for_each_param = True
def __init__(self, rate):
self.rate = rate
def __call__(self, rule, param):
g = param.grad
with cuda.get_device_from_array(g):
g *= self.rate
| from chainer import cuda
class GradientScaling(object):
"""Optimizer/UpdateRule hook function for scaling gradient.
This hook function scales gradient by a constant value.
Args:
rate (float): Coefficient for scaling.
Attributes:
rate (float): Coefficient for scaling.
"""
name = 'GradientScaling'
call_for_each_param = True
def __init__(self, rate):
self.rate = rate
def __call__(self, rule, param):
g = param.grad
with cuda.get_device(g):
g *= self.rate
| mit | Python |
d44e4bf7f73ecc68ded083008a2e753410edfa0e | implement name_not_equal_shortName | OParl/validator,OParl/validator | oparlvalidator/schema.py | oparlvalidator/schema.py | # -*- encoding: utf-8 -*-
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import json
from os.path import dirname, join
with open(join(dirname(__file__), 'schema.json')) as json_file:
OPARL = json.load(json_file)
# Additional validation functions here
def name_not_equal_shortName(data):
"""
Validate that two values are not equal, e. g.
name and nameShort may not be equal (section 5.2.3).
"""
if 'name' not in data or 'nameShort' not in data:
return True
return not data['name'] == data['nameShort']
| # -*- encoding: utf-8 -*-
from __future__ import (unicode_literals, absolute_import,
division, print_function)
import json
from os.path import dirname, join
with open(join(dirname(__file__), 'schema.json')) as json_file:
OPARL = json.load(json_file)
# Additional validation functions here
def name_not_equal_shortName(data):
"""
Validate that two values are not equal, e. g.
name and nameShort may not be equal (section 5.2.3).
"""
pass #TODO: implement
| mit | Python |
53ea29812a55a52c1a1b037340a4c4ddb5567197 | Add main method using ArgumentParser | danmichaelo/wm_metrics,danmichaelo/wm_metrics,Commonists/wm_metrics,danmichaelo/wm_metrics,Commonists/wm_metrics,Commonists/wm_metrics,danmichaelo/wm_metrics,Commonists/wm_metrics | new_editor.py | new_editor.py | #!/usr/bin/python
import json
def new_editors(old_period, new_period):
# new editors list
new_editors = []
# Loading old period json filehandle
data_old = json.load(old_period)["result"]["Individual Results"][0]
# iteration on each user of individual results list
# counts editor without edit on old period and add them to new_editors
for k in data_old.keys():
if(int(data_old[k]["edits"])==0):
new_editors.append(k)
data_new= json.load(new_period)["result"]["Individual Results"][0]
# new editors that survived
count_new = 0
# new editors that survived with more than 1 edit per month
count_one_epm = 0
# new editors that survived with more than 10 edits per month
count_ten_epm = 0
max_edit_new = 0
for k in new_editors:
edits = int(data_new[k]["edits"])
max_edit_new = max(max_edit_new, edits)
if(edits>=1):
count_new = count_new + 1
if(edits>=6):
count_one_epm = count_one_epm + 1
if(edits>=60):
count_ten_epm = count_ten_epm + 1
print """New editors that kept editing after WLM2011(Fr): %s
\t%s with more than one edit per month (6 months period)"
\t%s with more than ten edits per month (6 months period)"
\t%s max edits for a new contributors (6 months period)"""\
% (count_new, count_one_epm, count_ten_epm, max_edit_new)
def main():
"""Main method, entry point of the script."""
from argparse import ArgumentParser
description = "Computes new editor numbers based on WikiMetrics data"
parser = ArgumentParser(description=description)
parser.add_argument("-o", "--old",
type=file,
dest="old_period",
metavar="old_period.json",
required=True,
help="The old period data, as a JSON file")
parser.add_argument("-n", "--new",
type=file,
dest="new_period",
metavar="new_period.json",
required=True,
help="The new period data, as a JSON file")
args = parser.parse_args()
new_editors(args.old_period, args.new_period)
if __name__ == "__main__":
main()
| #!/usr/bin/python
import json
def new_editors(old_period, new_period):
# new editors list
new_editors = []
# Loading old period json filehandle
data_old = json.load(old_period)["result"]["Individual Results"][0]
# iteration on each user of individual results list
# counts editor without edit on old period and add them to new_editors
for k in data_old.keys():
if(int(data_old[k]["edits"])==0):
new_editors.append(k)
data_new= json.load(new_period)["result"]["Individual Results"][0]
# new editors that survived
count_new = 0
# new editors that survived with more than 1 edit per month
count_one_epm = 0
# new editors that survived with more than 10 edits per month
count_ten_epm = 0
max_edit_new = 0
for k in new_editors:
edits = int(data_new[k]["edits"])
max_edit_new = max(max_edit_new, edits)
if(edits>=1):
count_new = count_new + 1
if(edits>=6):
count_one_epm = count_one_epm + 1
if(edits>=60):
count_ten_epm = count_ten_epm + 1
print """New editors that kept editing after WLM2011(Fr): %s
\t%s with more than one edit per month (6 months period)"
\t%s with more than ten edits per month (6 months period)"
\t%s max edits for a new contributors (6 months period)"""\
% (count_new, count_one_epm, count_ten_epm, max_edit_new)
| mit | Python |
652b87210168ea046b76240daee51ba7d12bd27a | Bump version to 0.2.3 | akolar/ogn-lib | ogn_lib/__init__.py | ogn_lib/__init__.py | from ogn_lib.client import OgnClient # noqa: F401
from ogn_lib.parser import Parser # noqa: F401
from ogn_lib.constants import AirplaneType, AddressType, BeaconType # noqa: F401
__title__ = 'ogn-lib'
__description__ = 'Beacon processor for the OGN data stream.'
__version__ = '0.2.3'
__author__ = 'Anze Kolar'
__author_email__ = 'me@akolar.com'
__license__ = 'MIT'
__copyright__ = 'Copyright 2018 Anze Kolar'
| from ogn_lib.client import OgnClient # noqa: F401
from ogn_lib.parser import Parser # noqa: F401
from ogn_lib.constants import AirplaneType, AddressType, BeaconType # noqa: F401
__title__ = 'ogn-lib'
__description__ = 'Beacon processor for the OGN data stream.'
__version__ = '0.2.2'
__author__ = 'Anze Kolar'
__author_email__ = 'me@akolar.com'
__license__ = 'MIT'
__copyright__ = 'Copyright 2018 Anze Kolar'
| mit | Python |
3fc0f46fa96615c59cce044e3e58e502923edae4 | Use cleaner version from cookiecutter OpenStack template | openstack/oslotest,openstack/oslotest | openstack/common/test.py | openstack/common/test.py | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Common utilities used in testing"""
import os
import fixtures
import testtools
_TRUE_VALUES = ('True', 'true', '1', 'yes')
class BaseTestCase(testtools.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self._set_timeout()
self._fake_output()
self.useFixture(fixtures.FakeLogger('openstack.common'))
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
def _set_timeout(self):
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
try:
test_timeout = int(test_timeout)
except ValueError:
# If timeout value is invalid do not set a timeout.
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
def _fake_output(self):
if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES:
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES:
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
| # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Common utilities used in testing"""
import os
import fixtures
import testtools
class BaseTestCase(testtools.TestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self._set_timeout()
self._fake_output()
self.useFixture(fixtures.FakeLogger('openstack.common'))
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
def _set_timeout(self):
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
try:
test_timeout = int(test_timeout)
except ValueError:
# If timeout value is invalid do not set a timeout.
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
def _fake_output(self):
if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or
os.environ.get('OS_STDOUT_CAPTURE') == '1'):
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or
os.environ.get('OS_STDERR_CAPTURE') == '1'):
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
| apache-2.0 | Python |
08291f3948108da15b9832c495fade04cf2e22c4 | Add test to check title of index | jake-jake-jake/cocktails,jake-jake-jake/cocktails,jake-jake-jake/cocktails,jake-jake-jake/cocktails | tests/tests.py | tests/tests.py | #!/usr/bin/env python3
from selenium import webdriver
import unittest
class AdminPageTest(unittest.TestCase):
def setUp(self):
self.browser = webdriver.Firefox()
self.browser.implicitly_wait(3)
def tearDown(self):
self.browser.quit()
def test_visit_admin_page(self):
# Visit admin page
self.browser.get('http://localhost:8000/admin')
# Check page title
self.assertIn('Django site admin', self.browser.title)
class API_fetch_tests(unittest.TestCase):
def setUp(self):
self.browser = webdriver.Firefox()
def tearDown(self):
self.browser.quit()
def test_fetch_Ingredient_JSON(self):
pass
def test_fetch_Drink_JSON(self):
pass
class ReactAppTests(unittest.TestCase):
def setUp(self):
self.browser = webdriver.Firefox()
def tearDown(self):
self.browser.quit()
def test_fetch_index(self):
self.browser.get('http://localhost:8000/index')
self.assertIn('Cocktails', self.browser.title)
if __name__ == '__main__':
print('test')
unittest.main()
| #!/usr/bin/env python3
from selenium import webdriver
import unittest
class AdminPageTest(unittest.TestCase):
def setUp(self):
self.browser = webdriver.Firefox()
self.browser.implicitly_wait(3)
def tearDown(self):
self.browser.quit()
def test_visit_admin_page(self):
# Visit admin page
self.browser.get('http://localhost:8000/admin')
# Check page title
self.assertIn('Django site admin', self.browser.title)
class API_fetch_tests(unittest.TestCase):
def setUp(self):
self.browser = webdriver.Firefox()
def tearDown(self):
self.browser.quit()
def test_fetch_Ingredient_JSON(self):
pass
def test_fetch_Drink_JSON(self):
pass
if __name__ == '__main__':
print('test')
unittest.main()
| mit | Python |
ff1dbbfb83549500a5c0ea1181c8f03fd544a61e | Return True/False from mysql.is_running | efajardo/osg-test,efajardo/osg-test | osgtest/library/mysql.py | osgtest/library/mysql.py | import os
import re
from osgtest.library import core
from osgtest.library import service
def name():
if core.el_release() < 7:
return 'mysql'
else:
return 'mariadb'
def daemon_name():
if core.el_release() < 7:
return 'mysqld'
else:
return 'mariadb'
def pidfile():
return os.path.join('/var/run', daemon_name(), daemon_name() + '.pid')
def server_rpm():
return name() + '-server'
def client_rpm():
return name()
def start():
service.check_start(daemon_name())
def stop():
service.check_stop(daemon_name())
def is_running():
return service.is_running(daemon_name())
def _get_command(user='root', database=None):
command = ['mysql', '-N', '-B', '--user=' + str(user)]
if database:
command.append('--database=' + str(database))
return command
def execute(statements, database=None):
"""Execute MySQL statements
`statements` must be a single string, but may contain multiple statements;
this will be fed to `mysql` as a script. The trailing `;` is necessary
even if executing a single statement. Query output is tab-separated.
If `database` is specified, the given database is used.
Return (exit status, stdout, stderr).
"""
return core.system(_get_command(database=database), stdin=statements)
def check_execute(statements, message, database=None, exit=0):
"""Execute MySQL statements and check the exit code
`statements` must be a single string, but may contain multiple statements;
this will be fed to `mysql` as a script. The trailing `;` is necessary
even if executing a single statement. Query output is tab-separated.
If `database` is specified, the given database is used.
If the return code from the call does not match the expected exit code,
an error is raised, and `message` is printed.
Return (standard output, standard error, and the failure
message generated by core.diagnose()).
"""
return core.check_system(_get_command(database=database), message, stdin=statements, exit=exit)
def dbdump(destfile, database=None):
"""Dump the contents of one or all databases to the given file
`destfile` must be a path the user can write to. If `database` is specified,
only the given database is dumped; otherwise, all databases are dumped.
The output is suitable for feeding back into `mysql` as a script.
"""
command = "mysqldump --skip-comments --skip-extended-insert -u root "
if database:
command += re.escape(database)
else:
command += "--all-databases"
command += ">" + re.escape(destfile)
core.system(command, user=None, stdin=None, log_output=False, shell=True)
| import os
import re
from osgtest.library import core
from osgtest.library import service
def name():
if core.el_release() < 7:
return 'mysql'
else:
return 'mariadb'
def daemon_name():
if core.el_release() < 7:
return 'mysqld'
else:
return 'mariadb'
def pidfile():
return os.path.join('/var/run', daemon_name(), daemon_name() + '.pid')
def server_rpm():
return name() + '-server'
def client_rpm():
return name()
def start():
service.check_start(daemon_name())
def stop():
service.check_stop(daemon_name())
def is_running():
service.is_running(daemon_name())
def _get_command(user='root', database=None):
command = ['mysql', '-N', '-B', '--user=' + str(user)]
if database:
command.append('--database=' + str(database))
return command
def execute(statements, database=None):
"""Execute MySQL statements
`statements` must be a single string, but may contain multiple statements;
this will be fed to `mysql` as a script. The trailing `;` is necessary
even if executing a single statement. Query output is tab-separated.
If `database` is specified, the given database is used.
Return (exit status, stdout, stderr).
"""
return core.system(_get_command(database=database), stdin=statements)
def check_execute(statements, message, database=None, exit=0):
"""Execute MySQL statements and check the exit code
`statements` must be a single string, but may contain multiple statements;
this will be fed to `mysql` as a script. The trailing `;` is necessary
even if executing a single statement. Query output is tab-separated.
If `database` is specified, the given database is used.
If the return code from the call does not match the expected exit code,
an error is raised, and `message` is printed.
Return (standard output, standard error, and the failure
message generated by core.diagnose()).
"""
return core.check_system(_get_command(database=database), message, stdin=statements, exit=exit)
def dbdump(destfile, database=None):
"""Dump the contents of one or all databases to the given file
`destfile` must be a path the user can write to. If `database` is specified,
only the given database is dumped; otherwise, all databases are dumped.
The output is suitable for feeding back into `mysql` as a script.
"""
command = "mysqldump --skip-comments --skip-extended-insert -u root "
if database:
command += re.escape(database)
else:
command += "--all-databases"
command += ">" + re.escape(destfile)
core.system(command, user=None, stdin=None, log_output=False, shell=True)
| apache-2.0 | Python |
878aa7b1f6752d988782ef0a4f9fe05a861e163e | Increment version | explosion/thinc,spacy-io/thinc,explosion/thinc,explosion/thinc,spacy-io/thinc,spacy-io/thinc,explosion/thinc | thinc/about.py | thinc/about.py | # inspired from:
# https://python-packaging-user-guide.readthedocs.org/en/latest/single_source_version/
# https://github.com/pypa/warehouse/blob/master/warehouse/__about__.py
__name__ = 'thinc'
__version__ = '6.10.0'
__summary__ = "Practical Machine Learning for NLP"
__uri__ = 'https://github.com/explosion/thinc'
__author__ = 'Matthew Honnibal'
__email__ = 'matt@explosion.ai'
__license__ = 'MIT'
__title__ = "thinc"
__release__ = True
| # inspired from:
# https://python-packaging-user-guide.readthedocs.org/en/latest/single_source_version/
# https://github.com/pypa/warehouse/blob/master/warehouse/__about__.py
__name__ = 'thinc'
__version__ = '6.9.1'
__summary__ = "Practical Machine Learning for NLP"
__uri__ = 'https://github.com/explosion/thinc'
__author__ = 'Matthew Honnibal'
__email__ = 'matt@explosion.ai'
__license__ = 'MIT'
__title__ = "thinc"
__release__ = True
| mit | Python |
029d556540b8b03b2b260934ca01cd2811cceb99 | Set version to v7.1.0 | explosion/thinc,spacy-io/thinc,explosion/thinc,spacy-io/thinc,spacy-io/thinc,explosion/thinc,explosion/thinc | thinc/about.py | thinc/about.py | # inspired from:
# https://python-packaging-user-guide.readthedocs.org/en/latest/single_source_version/
# https://github.com/pypa/warehouse/blob/master/warehouse/__about__.py
__name__ = "thinc"
__version__ = "7.1.0"
__summary__ = "Practical Machine Learning for NLP"
__uri__ = "https://github.com/explosion/thinc"
__author__ = "Matthew Honnibal"
__email__ = "matt@explosion.ai"
__license__ = "MIT"
__title__ = "thinc"
__release__ = True
| # inspired from:
# https://python-packaging-user-guide.readthedocs.org/en/latest/single_source_version/
# https://github.com/pypa/warehouse/blob/master/warehouse/__about__.py
__name__ = "thinc"
__version__ = "7.1.0.dev0"
__summary__ = "Practical Machine Learning for NLP"
__uri__ = "https://github.com/explosion/thinc"
__author__ = "Matthew Honnibal"
__email__ = "matt@explosion.ai"
__license__ = "MIT"
__title__ = "thinc"
__release__ = False
| mit | Python |
9f66ac0a6e5b76b024fe4982fad7d2aec9e7d040 | Write output to a file, not just stdout. | jkingdon/jh2gh | convert.py | convert.py | import sys
import tokenizer
import tree
import copy
import string_stream
class VariableAssigner:
def __init__(self, variables):
self._variables = copy.deepcopy(variables)
def next_name(self, kind):
return self._variables[kind].pop(0)
class Convert:
def __init__(self):
self._variables = {}
def store_variables(self, kind, variables):
if not self._variables.has_key(kind):
self._variables[kind] = []
self._variables[kind] += variables
def convert(self, input):
expressions = tree.parse(input)
for i in xrange(0, len(expressions), 2):
command = expressions[i]
arguments = expressions[i + 1]
if command == "kind" and arguments[0] == "variable":
expressions[i] = '';
expressions[i + 1] = '';
elif command == "var":
if arguments[0] == "variable":
arguments[0] = "object"
else:
expressions[i] = "tvar"
self.store_variables(arguments[0], arguments[1:])
elif command == "term":
assigner = VariableAssigner(self._variables)
return_type = arguments[0]
name_and_arguments = arguments[1]
for i in xrange(1, len(name_and_arguments)):
name_and_arguments[i] = assigner.next_name(name_and_arguments[i])
return expressions.to_string()
class Wiki:
def read(self, input):
result = ''
in_proof = False
while True:
line = input.readline()
if line == '':
break
if line == "</jh>\n":
in_proof = False
elif in_proof:
result += line
if line == "<jh>\n":
in_proof = True
return result
def convert(self, input):
jhilbert = self.read(input)
return Convert().convert(string_stream.StringStream(jhilbert))
if __name__ == '__main__':
if len(sys.argv) != 3:
print >> sys.stderr, 'Usage: JHILBERT-INPUT GHILBERT-OUTPUT'
exit(1)
input = open(sys.argv[1], "r")
output = open(sys.argv[2], "w")
output.write(Wiki().convert(input))
| import sys
import tokenizer
import tree
import copy
import string_stream
class VariableAssigner:
def __init__(self, variables):
self._variables = copy.deepcopy(variables)
def next_name(self, kind):
return self._variables[kind].pop(0)
class Convert:
def __init__(self):
self._variables = {}
def store_variables(self, kind, variables):
if not self._variables.has_key(kind):
self._variables[kind] = []
self._variables[kind] += variables
def convert(self, input):
expressions = tree.parse(input)
for i in xrange(0, len(expressions), 2):
command = expressions[i]
arguments = expressions[i + 1]
if command == "kind" and arguments[0] == "variable":
expressions[i] = '';
expressions[i + 1] = '';
elif command == "var":
if arguments[0] == "variable":
arguments[0] = "object"
else:
expressions[i] = "tvar"
self.store_variables(arguments[0], arguments[1:])
elif command == "term":
assigner = VariableAssigner(self._variables)
return_type = arguments[0]
name_and_arguments = arguments[1]
for i in xrange(1, len(name_and_arguments)):
name_and_arguments[i] = assigner.next_name(name_and_arguments[i])
return expressions.to_string()
class Wiki:
def read(self, input):
result = ''
in_proof = False
while True:
line = input.readline()
if line == '':
break
if line == "</jh>\n":
in_proof = False
elif in_proof:
result += line
if line == "<jh>\n":
in_proof = True
return result
def convert(self, input):
jhilbert = self.read(input)
return Convert().convert(string_stream.StringStream(jhilbert))
if __name__ == '__main__':
if len(sys.argv) != 3:
print >> sys.stderr, 'Usage: JHILBERT-INPUT GHILBERT-OUTPUT'
exit(1)
input = sys.argv[1]
output = sys.argv[2]
result = Wiki().convert(open(input, "r"))
print result
| apache-2.0 | Python |
1bd74f8a597eb2e11280eb008c7da8a8d429f2f0 | Fix response table title | softlayer/softlayer-python,allmightyspiff/softlayer-python | SoftLayer/CLI/autoscale/scale.py | SoftLayer/CLI/autoscale/scale.py | """Scales an Autoscale group"""
# :license: MIT, see LICENSE for more details.
import click
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
from SoftLayer.managers.autoscale import AutoScaleManager
from SoftLayer import utils
@click.command()
@click.argument('identifier')
@click.option('--up/--down', 'scale_up', is_flag=True, default=True,
help="'--up' adds guests, '--down' removes guests.")
@click.option('--by/--to', 'scale_by', is_flag=True, required=True,
help="'--by' will add/remove the specified number of guests."
" '--to' will add/remove a number of guests to get the group's guest count to the specified number.")
@click.option('--amount', required=True, type=click.INT, help="Number of guests for the scale action.")
@environment.pass_env
def cli(env, identifier, scale_up, scale_by, amount):
"""Scales an Autoscale group. Bypasses a scale group's cooldown period."""
autoscale = AutoScaleManager(env.client)
# Scale By, and go down, need to use negative amount
if not scale_up and scale_by:
amount = amount * -1
result = []
if scale_by:
click.secho("Scaling group {} by {}".format(identifier, amount), fg='green')
result = autoscale.scale(identifier, amount)
else:
click.secho("Scaling group {} to {}".format(identifier, amount), fg='green')
result = autoscale.scale_to(identifier, amount)
try:
# Check if the first guest has a cancellation date, assume we are removing guests if it is.
status = result[0]['virtualGuest']['status']['keyName'] or False
except (IndexError, KeyError, TypeError):
status = False
if status == 'ACTIVE':
member_table = formatting.Table(['Id', 'Hostname', 'Created'], title="Added Guests")
else:
member_table = formatting.Table(['Id', 'Hostname', 'Created'], title="Cancelled Guests")
for guest in result:
real_guest = guest.get('virtualGuest')
member_table.add_row([
guest.get('id'), real_guest.get('hostname'), utils.clean_time(real_guest.get('createDate'))
])
env.fout(member_table)
| """Scales an Autoscale group"""
# :license: MIT, see LICENSE for more details.
import click
from SoftLayer.CLI import environment
from SoftLayer.CLI import formatting
from SoftLayer.managers.autoscale import AutoScaleManager
from SoftLayer import utils
@click.command()
@click.argument('identifier')
@click.option('--up/--down', 'scale_up', is_flag=True, default=True,
help="'--up' adds guests, '--down' removes guests.")
@click.option('--by/--to', 'scale_by', is_flag=True, required=True,
help="'--by' will add/remove the specified number of guests."
" '--to' will add/remove a number of guests to get the group's guest count to the specified number.")
@click.option('--amount', required=True, type=click.INT, help="Number of guests for the scale action.")
@environment.pass_env
def cli(env, identifier, scale_up, scale_by, amount):
"""Scales an Autoscale group. Bypasses a scale group's cooldown period."""
autoscale = AutoScaleManager(env.client)
# Scale By, and go down, need to use negative amount
if not scale_up and scale_by:
amount = amount * -1
result = []
if scale_by:
click.secho("Scaling group {} by {}".format(identifier, amount), fg='green')
result = autoscale.scale(identifier, amount)
else:
click.secho("Scaling group {} to {}".format(identifier, amount), fg='green')
result = autoscale.scale_to(identifier, amount)
try:
# Check if the first guest has a cancellation date, assume we are removing guests if it is.
cancel_date = result[0]['virtualGuest']['billingItem']['cancellationDate'] or False
except (IndexError, KeyError, TypeError):
cancel_date = False
if cancel_date:
member_table = formatting.Table(['Id', 'Hostname', 'Created'], title="Cancelled Guests")
else:
member_table = formatting.Table(['Id', 'Hostname', 'Created'], title="Added Guests")
for guest in result:
real_guest = guest.get('virtualGuest')
member_table.add_row([
guest.get('id'), real_guest.get('hostname'), utils.clean_time(real_guest.get('createDate'))
])
env.fout(member_table)
| mit | Python |
8c6386547380eb0a58110ae3e7ef1550627d92c8 | Fix #1, non OS dependant speech | SlapBot/stephanie-va | Stephanie/TextManager/speaker.py | Stephanie/TextManager/speaker.py | import os
import eyed3
import time
from pygame import mixer
class Speaker:
def __init__(self):
self.speak_result = ""
self.audio_file = None
def speak_from_os(self, speech_result_filename):
try:
self.speak_result = self.get_abs_filename(speech_result_filename)
if sys.platform == "win32":
os.startfile(self.speak_result)
else:
os.system("xdg-open " + self.speak_result)
except:
print("Default Audio Player for mp3 files is not set up, like vlc or something.")
try:
self.hibernate()
except:
print("Something went wrong with your stupid system, eyed3 named package wasn't installed probably "
"Check back at the support tab in the main website. Don't worry mate, I'll help you. Or if you're "
"trying to close the application abruptly, keep pressing CTRL + C repeatedly.")
@staticmethod
def get_abs_filename(filename):
speak_result = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir, filename))
return speak_result
def hibernate(self):
self.audio_file = eyed3.load(self.speak_result)
wait_period = self.audio_file.info.time_secs
time.sleep(wait_period+2)
def say(self, speech):
self.speak_from_os(speech)
def speak_from_pygame(self, speech_result_filename):
self.speak_result = self.get_abs_filename(speech_result_filename)
try:
self.speak_pygame()
except:
print("Man switch back to os option config.ini, this package is really bad"
"trust me, I spent entire day to clear one bug and there's still more."
" Yolo.")
def speak_pygame(self):
mixer.init()
mixer.pause()
mixer.music.load(self.speak_result)
mixer.music.play()
self.hibernate()
mixer.music.stop()
mixer.unpause()
mixer.quit()
os.remove(self.speak_result)
| import os
import eyed3
import time
from pygame import mixer
class Speaker:
def __init__(self):
self.speak_result = ""
self.audio_file = None
def speak_from_os(self, speech_result_filename):
try:
self.speak_result = self.get_abs_filename(speech_result_filename)
os.startfile(self.speak_result)
except:
print("Default Audio Player for mp3 files is not set up, like vlc or something.")
try:
self.hibernate()
except:
print("Something went wrong with your stupid system, eyed3 named package wasn't installed probably "
"Check back at the support tab in the main website. Don't worry mate, I'll help you. Or if you're "
"trying to close the application abruptly, keep pressing CTRL + C repeatedly.")
@staticmethod
def get_abs_filename(filename):
speak_result = os.path.abspath(os.path.join(os.path.dirname(__file__),
os.pardir, filename))
return speak_result
def hibernate(self):
self.audio_file = eyed3.load(self.speak_result)
wait_period = self.audio_file.info.time_secs
time.sleep(wait_period+2)
def say(self, speech):
self.speak_from_os(speech)
def speak_from_pygame(self, speech_result_filename):
self.speak_result = self.get_abs_filename(speech_result_filename)
try:
self.speak_pygame()
except:
print("Man switch back to os option config.ini, this package is really bad"
"trust me, I spent entire day to clear one bug and there's still more."
" Yolo.")
def speak_pygame(self):
mixer.init()
mixer.pause()
mixer.music.load(self.speak_result)
mixer.music.play()
self.hibernate()
mixer.music.stop()
mixer.unpause()
mixer.quit()
os.remove(self.speak_result)
| mit | Python |
de98d66f8716b8c71c110f2f57cd3c140cc6d984 | Set version to v2.2.2 | honnibal/spaCy,spacy-io/spaCy,explosion/spaCy,spacy-io/spaCy,spacy-io/spaCy,honnibal/spaCy,explosion/spaCy,explosion/spaCy,spacy-io/spaCy,honnibal/spaCy,spacy-io/spaCy,spacy-io/spaCy,explosion/spaCy,explosion/spaCy,honnibal/spaCy,explosion/spaCy | spacy/about.py | spacy/about.py | # fmt: off
__title__ = "spacy"
__version__ = "2.2.2"
__release__ = True
__download_url__ = "https://github.com/explosion/spacy-models/releases/download"
__compatibility__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json"
__shortcuts__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/shortcuts-v2.json"
| # fmt: off
__title__ = "spacy"
__version__ = "2.2.2.dev5"
__release__ = True
__download_url__ = "https://github.com/explosion/spacy-models/releases/download"
__compatibility__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/compatibility.json"
__shortcuts__ = "https://raw.githubusercontent.com/explosion/spacy-models/master/shortcuts-v2.json"
| mit | Python |
d40049cb81cc83ef69301104065228c62597d666 | Update test.py | ggcoke/stocks | com/ggcoke/stocks/test.py | com/ggcoke/stocks/test.py | #! /usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'ggcoke'
import wmcloud_helper
HOST = 'api.wmcloud.com'
PORT = 9763
TOKEN = 'token'
if __name__ == '__main__':
params = {'field': None,
'ticker': '000001',
'secID': None,
'callback': None}
path = wmcloud_helper.WMCloudHelper.create_path('bond', 'getBond.json', **params)
header = wmcloud_helper.WMCloudHelper.create_header()
helper = wmcloud_helper.WMCloudHelper()
result = helper.get(path, header)
print(type(result))
print(result)
print(result['retCode'])
print(result['data'][0]['typeName'])
| #! /usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'ggcoke'
import wmcloud_helper
HOST = 'api.wmcloud.com'
PORT = 9763
TOKEN = 'bc486a2662eb4a2e4d19a0f15abfbddc'
if __name__ == '__main__':
params = {'field': None,
'ticker': '000001',
'secID': None,
'callback': None}
path = wmcloud_helper.WMCloudHelper.create_path('bond', 'getBond.json', **params)
header = wmcloud_helper.WMCloudHelper.create_header()
helper = wmcloud_helper.WMCloudHelper()
result = helper.get(path, header)
print(type(result))
print(result)
print(result['retCode'])
print(result['data'][0]['typeName']) | apache-2.0 | Python |
9f23f6a5d40c7aeec1f4759c0837feb46f90b4ea | 更新 modules Groups 中的 apps.py, 新增函式功能宣告註解 | yrchen/CommonRepo,yrchen/CommonRepo,yrchen/CommonRepo,yrchen/CommonRepo | commonrepo/groups/apps.py | commonrepo/groups/apps.py | # -*- coding: utf-8 -*-
#
# Copyright 2016 edX PDR Lab, National Central University, Taiwan.
#
# http://edxpdrlab.ncu.cc/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Created By: yrchen@ATCity.org
# Maintained By: yrchen@ATCity.org
#
"""
App configurations for Groups in Common Repository project.
"""
from __future__ import absolute_import, unicode_literals
from django.apps import AppConfig
from actstream import registry
class GroupsAppConfig(AppConfig):
name = 'commonrepo.groups'
def ready(self):
registry.register(self.get_model('Group'))
import commonrepo.groups.signals
| # -*- coding: utf-8 -*-
#
# Copyright 2016 edX PDR Lab, National Central University, Taiwan.
#
# http://edxpdrlab.ncu.cc/
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Created By: yrchen@ATCity.org
# Maintained By: yrchen@ATCity.org
#
from __future__ import absolute_import, unicode_literals
from django.apps import AppConfig
from actstream import registry
class GroupsAppConfig(AppConfig):
name = 'commonrepo.groups'
def ready(self):
registry.register(self.get_model('Group'))
import commonrepo.groups.signals
| apache-2.0 | Python |
cc412d23662b023357b40539edb481832d9e3a2d | change VERSION to 0.2 beta | pglotov/django-rest-framework-gis,illing2005/django-rest-framework-gis,nmandery/django-rest-framework-gis,arjenvrielink/django-rest-framework-gis,nmandery/django-rest-framework-gis,sh4wn/django-rest-framework-gis,barseghyanartur/django-rest-framework-gis,manhg/django-rest-framework-gis,bopo/django-rest-framework-gis,djangonauts/django-rest-framework-gis | rest_framework_gis/__init__.py | rest_framework_gis/__init__.py | VERSION = (0, 2, 0, 'beta')
__version__ = VERSION # alias
def get_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
version = '%s.%s' % (version, VERSION[2])
if VERSION[3:] == ('alpha', 0):
version = '%s pre-alpha' % version
else:
if VERSION[3] != 'final':
version = '%s %s' % (version, VERSION[3])
return version
| VERSION = (0, 1, 0, 'final')
__version__ = VERSION # alias
def get_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
version = '%s.%s' % (version, VERSION[2])
if VERSION[3:] == ('alpha', 0):
version = '%s pre-alpha' % version
else:
if VERSION[3] != 'final':
version = '%s %s' % (version, VERSION[3])
return version
| mit | Python |
2d71fb157bf53dfde30af5f352656b6de68cba13 | Fix for better test. | charanpald/APGL | exp/sandbox/test/RandomisedSVDTest.py | exp/sandbox/test/RandomisedSVDTest.py |
import unittest
import numpy
import scipy.sparse
from exp.sandbox.RandomisedSVD import RandomisedSVD
from apgl.util.Util import Util
import numpy.testing as nptst
class RandomisedSVDTest(unittest.TestCase):
def setUp(self):
numpy.random.rand(21)
numpy.set_printoptions(suppress=True, linewidth=200, precision=3)
def testSvd(self):
n = 100
m = 80
A = scipy.sparse.rand(m, n, 0.1)
ks = [10, 20, 30, 40]
q = 2
lastError = numpy.linalg.norm(A.todense())
for k in ks:
U, s, V = RandomisedSVD.svd(A, k, q)
nptst.assert_array_almost_equal(U.T.dot(U), numpy.eye(k))
nptst.assert_array_almost_equal(V.T.dot(V), numpy.eye(k))
A2 = (U*s).dot(V.T)
error = numpy.linalg.norm(A - A2)
self.assertTrue(error <= lastError)
lastError = error
#Compare versus exact svd
U, s, V = numpy.linalg.svd(numpy.array(A.todense()))
inds = numpy.flipud(numpy.argsort(s))[0:k*2]
U, s, V = Util.indSvd(U, s, V, inds)
Ak = (U*s).dot(V.T)
error2 = numpy.linalg.norm(A - Ak)
self.assertTrue(error2 <= error)
if __name__ == '__main__':
unittest.main()
|
import unittest
import numpy
import scipy.sparse
from exp.sandbox.RandomisedSVD import RandomisedSVD
from apgl.util.Util import Util
import numpy.testing as nptst
class RandomisedSVDTest(unittest.TestCase):
def setUp(self):
numpy.random.rand(21)
numpy.set_printoptions(suppress=True, linewidth=200, precision=3)
def testSvd(self):
n = 100
A = scipy.sparse.rand(n, n, 0.1)
ks = [10, 20, 30, 40]
q = 2
lastError = numpy.linalg.norm(A.todense())
for k in ks:
U, s, V = RandomisedSVD.svd(A, k, q)
nptst.assert_array_almost_equal(U.T.dot(U), numpy.eye(k))
nptst.assert_array_almost_equal(V.T.dot(V), numpy.eye(k))
A2 = (U*s).dot(V.T)
error = numpy.linalg.norm(A - A2)
self.assertTrue(error <= lastError)
lastError = error
#Compare versus exact svd
U, s, V = numpy.linalg.svd(numpy.array(A.todense()))
inds = numpy.flipud(numpy.argsort(s))[0:k*2]
U, s, V = Util.indSvd(U, s, V, inds)
Ak = (U*s).dot(V.T)
error2 = numpy.linalg.norm(A - Ak)
self.assertTrue(error2 <= error)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause | Python |
9bfcc3f837df2b9342dfb4d7f3616ca4d9401969 | add unfolding of symlink into a directory of symlinks | arecarn/dploy | dploy/__init__.py | dploy/__init__.py | """
dploy script is an attempt at creating a clone of GNU stow that will work on
Windows as well as *nix
"""
import sys
assert sys.version_info >= (3, 4), 'Requires Python 3.4 or Greater'
import os
import pathlib
from dploy.util import resolve_abs_path
def dploy(source, dest):
"""
main script entry point
"""
source_absolute = resolve_abs_path(source)
dest_absolute = resolve_abs_path(dest)
_dploy_absolute_paths(pathlib.Path(source_absolute),
pathlib.Path(dest_absolute))
def _dploy_absolute_paths(source, dest):
assert source.is_dir()
assert source.is_absolute()
assert dest.is_absolute()
for src_file in source.iterdir():
dploy_path = dest / pathlib.Path(src_file.stem)
src_file_relative = _pathlib_relative_path(src_file,
dploy_path.parent)
try:
dploy_path.symlink_to(src_file_relative)
msg = "Link: {dest} => {source}"
print(msg.format(source=src_file_relative, dest=dploy_path))
except FileExistsError:
if dploy_path.samefile(src_file):
msg = "Link: Already Linked {dest} => {source}"
print(msg.format(source=src_file_relative, dest=dploy_path))
elif dploy_path.is_dir() and src_file.is_dir:
if dploy_path.is_symlink():
unfold(dploy_path)
_dploy_absolute_paths(src_file, dploy_path)
else:
msg = "Abort: {file} Already Exists"
print(msg.format(file=dploy_path))
sys.exit(1)
except FileNotFoundError:
msg = "Abort: {dest} Not Found"
print(msg.format(dest=dest))
sys.exit(1)
def unfold(dest):
"""
we are dploying some more files and we have a conflic
top level dest is a symlink that now needs to be a plain directory
steps:
- record children of the top level dest dir
- unlink top level dir
- create directory in place of the top level dest dir
- individually symlink recorded children
todo:
there is also the case were this will need to be undone
"""
children = []
for child in dest.iterdir(): # TODO re-implement as a list comprehension
child_relative = _pathlib_relative_path(child.resolve(), dest.parent)
children.append(child_relative)
print(children)
dest.unlink()
dest.mkdir()
for child in children:
source = pathlib.Path(child)
dploy_path = dest / source.stem
dploy_path.symlink_to(source)
def _pathlib_relative_path(path, start_at):
return os.path.relpath(path.__str__(), start_at.__str__())
| """
dploy script
"""
import sys
assert sys.version_info >= (3, 4), 'Requires Python 3.4 or Greater'
import os
import pathlib
from dploy.util import resolve_abs_path
def dploy(source, dest):
"""
main script entry point
"""
source_absolute = resolve_abs_path(source)
dest_absolute = resolve_abs_path(dest)
_dploy_absolute_paths(pathlib.Path(source_absolute),
pathlib.Path(dest_absolute))
def _dploy_absolute_paths(source, dest):
assert source.is_dir()
assert source.is_absolute()
assert dest.is_absolute()
for src_file in source.iterdir():
dploy_path = dest / pathlib.Path(src_file.stem)
src_file_relative = os.path.relpath(src_file.__str__(),
dploy_path.parent.__str__())
try:
dploy_path.symlink_to(src_file_relative)
msg = "Link: {dest} => {source}"
print(msg.format(source=src_file_relative, dest=dploy_path))
except FileExistsError:
if dploy_path.samefile(src_file):
msg = "Link: Already Linked {dest} => {source}"
print(msg.format(source=src_file_relative, dest=dploy_path))
elif dploy_path.is_dir() and src_file.is_dir:
_dploy_absolute_paths(src_file, dploy_path)
else:
msg = "Abort: {file} Already Exists"
print(msg.format(file=dploy_path))
sys.exit(1)
except FileNotFoundError:
msg = "Abort: {dest} Not Found"
print(msg.format(dest=dest))
sys.exit(1)
| mit | Python |
67e41dadd80d5ac9dc419d4883906ce505bffa17 | Add interface | pySTEPS/pysteps | nowcasts/interface.py | nowcasts/interface.py |
from . import simple_advection
from . import steps
def get_method(name):
"""Return one callable function to produce deterministic or ensemble
precipitation nowcasts.\n\
Methods for precipitation nowcasting:
+-------------------+-------------------------------------------------------+
| Name | Description |
+===================+=======================================================+
| extrapolation | this method is a simple advection forecast based |
| | on Lagrangian persistence |
+-------------------+-------------------------------------------------------+
| | implementation of the STEPS stochastic nowcasting |
| steps | method as described in Seed (2003), Bowler et al |
| | (2006) and Seed et al (2013) |
+-------------------+-------------------------------------------------------+
"""
if name.lower() == "extrapolation":
return simple_advection.forecast
elif name.lower() == "steps":
return steps.forecast
else:
raise ValueError("unknown nowcasting method %s" % name)
|
def get_method():
""""""
pass
| bsd-3-clause | Python |
52e51af4b9aaa274ef727ce428f129d042dfc595 | Complete delete_min() and its helper functions | bowen0701/algorithms_data_structures | ds_binary_heap.py | ds_binary_heap.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class BinaryHeap(object):
def __init__(self):
# Put single zero as the 1st element, so that
# integer division can be used in later methods.
self.heap_ls = [0]
self.current_size = 0
def _percolate_up(self, i):
while i // 2 > 0:
if self.heap_ls[i] < self.heap_ls[i // 2]:
tmp = self.heap_ls[i // 2]
self.heap_ls[i // 2] = self.heap_ls[i]
self.heap_ls[i] = tmp
i = i // 2
def insert(self, new_node):
self.heap_ls.append(new_node)
self.current_size += 1
self._percolate_up(self.current_size)
def _get_min_child(self, i):
if (i * 2 + 1) > self.current_size:
return i * 2
else:
if self.heap_ls[i * 2] < self.heap_ls[i * 2 + 1]:
return i * 2
else:
return i * 2 + 1
def _percolate_down(self, i):
while (i * 2) <= self.current_size:
min_child = _get_min_child(i)
if self.heap_ls[i] > self.heap_ls[min_child]:
tmp = self.heap_ls[1]
self.heap_ls[1] = self.heap_ls[min_child]
self.heap_ls[min_child] = tmp
else:
pass
i = min_child
def delete_min(self):
val_del = self.heap_ls[1]
self.heap_ls[1] = self.heap_ls[self.current_size]
self.current_size -= 1
self.heap_ls.pop()
self._percolate_down(1)
return val_del
def main():
pass
if __name__ == '__main__':
main()
| from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class BinaryHeap(object):
def __init__(self):
# Put single zero as the 1st element, so that
# integer division can be used in later methods.
self.heap_ls = [0]
self.current_size = 0
def _percolate_up(self, i):
while i // 2 > 0:
if self.heap_ls[i] < self.heap_ls[i // 2]:
tmp = self.heap_ls[i // 2]
self.heap_ls[i // 2] = self.heap_ls[i]
self.heap_ls[i] = tmp
i = i // 2
def insert(self, new_node):
self.heap_ls.append(new_node)
self.current_size += 1
self._percolate_up(self.current_size)
def _percolate_down(self, i):
pass
def _get_min_child(self, i):
pass
def delete_min(self):
pass
def main():
pass
if __name__ == '__main__':
main()
| bsd-2-clause | Python |
411672c069638c9d6dd5b3aab7dfcc23894f7ae1 | Update the pic build flag used for Sun CC | hlange/LogSoCR,hlange/LogSoCR,hlange/LogSoCR,hlange/LogSoCR,hlange/LogSoCR,hlange/LogSoCR,hlange/LogSoCR,hlange/LogSoCR,hlange/LogSoCR,hlange/LogSoCR,hlange/LogSoCR,hlange/LogSoCR,hlange/LogSoCR | waflib/Tools/suncc.py | waflib/Tools/suncc.py | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2010 (ita)
# Ralf Habacker, 2006 (rh)
from waflib import Utils
from waflib.Tools import ccroot, ar
from waflib.Configure import conf
@conf
def find_scc(conf):
"""
Detect the Sun C compiler
"""
v = conf.env
cc = conf.find_program('cc', var='CC')
try:
conf.cmd_and_log(cc + ['-flags'])
except Exception:
conf.fatal('%r is not a Sun compiler' % cc)
v.CC_NAME = 'sun'
conf.get_suncc_version(cc)
@conf
def scc_common_flags(conf):
"""
Flags required for executing the sun C compiler
"""
v = conf.env
v['CC_SRC_F'] = []
v['CC_TGT_F'] = ['-c', '-o']
# linker
if not v['LINK_CC']: v['LINK_CC'] = v['CC']
v['CCLNK_SRC_F'] = ''
v['CCLNK_TGT_F'] = ['-o']
v['CPPPATH_ST'] = '-I%s'
v['DEFINES_ST'] = '-D%s'
v['LIB_ST'] = '-l%s' # template for adding libs
v['LIBPATH_ST'] = '-L%s' # template for adding libpaths
v['STLIB_ST'] = '-l%s'
v['STLIBPATH_ST'] = '-L%s'
v['SONAME_ST'] = '-Wl,-h,%s'
v['SHLIB_MARKER'] = '-Bdynamic'
v['STLIB_MARKER'] = '-Bstatic'
# program
v['cprogram_PATTERN'] = '%s'
# shared library
v['CFLAGS_cshlib'] = ['-xcode=pic32', '-DPIC']
v['LINKFLAGS_cshlib'] = ['-G']
v['cshlib_PATTERN'] = 'lib%s.so'
# static lib
v['LINKFLAGS_cstlib'] = ['-Bstatic']
v['cstlib_PATTERN'] = 'lib%s.a'
def configure(conf):
conf.find_scc()
conf.find_ar()
conf.scc_common_flags()
conf.cc_load_tools()
conf.cc_add_flags()
conf.link_add_flags()
| #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2006-2010 (ita)
# Ralf Habacker, 2006 (rh)
from waflib import Utils
from waflib.Tools import ccroot, ar
from waflib.Configure import conf
@conf
def find_scc(conf):
"""
Detect the Sun C compiler
"""
v = conf.env
cc = conf.find_program('cc', var='CC')
try:
conf.cmd_and_log(cc + ['-flags'])
except Exception:
conf.fatal('%r is not a Sun compiler' % cc)
v.CC_NAME = 'sun'
conf.get_suncc_version(cc)
@conf
def scc_common_flags(conf):
"""
Flags required for executing the sun C compiler
"""
v = conf.env
v['CC_SRC_F'] = []
v['CC_TGT_F'] = ['-c', '-o']
# linker
if not v['LINK_CC']: v['LINK_CC'] = v['CC']
v['CCLNK_SRC_F'] = ''
v['CCLNK_TGT_F'] = ['-o']
v['CPPPATH_ST'] = '-I%s'
v['DEFINES_ST'] = '-D%s'
v['LIB_ST'] = '-l%s' # template for adding libs
v['LIBPATH_ST'] = '-L%s' # template for adding libpaths
v['STLIB_ST'] = '-l%s'
v['STLIBPATH_ST'] = '-L%s'
v['SONAME_ST'] = '-Wl,-h,%s'
v['SHLIB_MARKER'] = '-Bdynamic'
v['STLIB_MARKER'] = '-Bstatic'
# program
v['cprogram_PATTERN'] = '%s'
# shared library
v['CFLAGS_cshlib'] = ['-Kpic', '-DPIC']
v['LINKFLAGS_cshlib'] = ['-G']
v['cshlib_PATTERN'] = 'lib%s.so'
# static lib
v['LINKFLAGS_cstlib'] = ['-Bstatic']
v['cstlib_PATTERN'] = 'lib%s.a'
def configure(conf):
conf.find_scc()
conf.find_ar()
conf.scc_common_flags()
conf.cc_load_tools()
conf.cc_add_flags()
conf.link_add_flags()
| agpl-3.0 | Python |
44520918dc0fad40f3afcfc2cdfde6f3208543cd | Use wiringPiSetupGpio, which required root. With wiringPiSetupSys some gpios stayed on low after boot. | ammannbros/garden-lighting,ammannbros/garden-lighting,ammannbros/garden-lighting,ammannbros/garden-lighting | garden_lighting/MCP23017/raspberry.py | garden_lighting/MCP23017/raspberry.py | import time
import wiringpi
from garden_lighting.MCP23017.MCP23017 import MCP23017
class RaspberryMCP23017(MCP23017):
def __init__(self, dev_addr, rst_pin=0xFF, i2cport=1):
super().__init__(dev_addr, rst_pin, i2cport)
def initDevice(self):
'''
Does a reset to put all registers in initial state
'''
# Set pin numbering mode
# wiringPiSetupSys() did not work because pins were low after booting and running the write commands
# This requires root!
wiringpi.wiringPiSetupGpio()
# Define the reset pin as output
wiringpi.pinMode(self.RstPin, wiringpi.GPIO.OUTPUT)
# Create a reset impulse
wiringpi.digitalWrite(self.RstPin, wiringpi.GPIO.LOW)
# wait for 50 ms
time.sleep(.050)
wiringpi.digitalWrite(self.RstPin, wiringpi.GPIO.HIGH)
| import time
import os
import wiringpi
from garden_lighting.MCP23017.MCP23017 import MCP23017
class RaspberryMCP23017(MCP23017):
def __init__(self, dev_addr, rst_pin=0xFF, i2cport=1):
super().__init__(dev_addr, rst_pin, i2cport)
def initDevice(self):
'''
Does a reset to put all registers in initial state
'''
os.system("gpio export " + str(self.RstPin) + " out")
# Set pin numbering mode
# We don't need performance, don't want root and don't want to interfere with
# other wiringpi instances -> sysfspy
wiringpi.wiringPiSetupSys()
# Define the reset pin as output
wiringpi.pinMode(self.RstPin, wiringpi.GPIO.OUTPUT)
# Create a reset impulse
wiringpi.digitalWrite(self.RstPin, wiringpi.GPIO.LOW)
# wait for 50 ms
time.sleep(.050)
wiringpi.digitalWrite(self.RstPin, wiringpi.GPIO.HIGH)
| mit | Python |
8b6aafdac18e4a8b2e929619d8f668edb01ad1aa | fix typo in participant gender enum | NCI-GDC/gdcdatamodel,NCI-GDC/gdcdatamodel | gdcdatamodel/models/nodes/clinical.py | gdcdatamodel/models/nodes/clinical.py | from psqlgraph import Node, pg_property
class Clinical(Node):
__nonnull_properties__ = []
@pg_property(str, enum=['male', 'female'])
def gender(self, value):
self._set_property('gender', value)
@pg_property(str, enum=[
'not reported',
'white',
'american indian or alaska native',
'black or african american',
'asian',
'native hawaiian or other pacific islander',
'other'])
def race(self, value):
self._set_property('race', value)
@pg_property(str, enum=['hispanic or latino', 'not hispanic or latino'])
def ethnicity(self, value):
self._set_property('ethnicity', value)
@pg_property(str, enum=['alive', 'dead', 'lost to follow-up'])
def vital_status(self, value):
self._set_property('vital_status', value)
@pg_property(int)
def year_of_diagnosis(self, value):
self._set_property('year_of_diagnosis', value)
@pg_property(int)
def age_at_diagnosis(self, value):
self._set_property('age_at_diagnosis', value)
@pg_property(int)
def days_to_death(self, value):
self._set_property('days_to_death', value)
@pg_property(str)
def icd_10(self, value):
self._set_property('icd_10', value)
| from psqlgraph import Node, pg_property
class Clinical(Node):
__nonnull_properties__ = []
@pg_property(str, enum=['male', 'femle'])
def gender(self, value):
self._set_property('gender', value)
@pg_property(str, enum=[
'not reported',
'white',
'american indian or alaska native',
'black or african american',
'asian',
'native hawaiian or other pacific islander',
'other'])
def race(self, value):
self._set_property('race', value)
@pg_property(str, enum=['hispanic or latino', 'not hispanic or latino'])
def ethnicity(self, value):
self._set_property('ethnicity', value)
@pg_property(str, enum=['alive', 'dead', 'lost to follow-up'])
def vital_status(self, value):
self._set_property('vital_status', value)
@pg_property(int)
def year_of_diagnosis(self, value):
self._set_property('year_of_diagnosis', value)
@pg_property(int)
def age_at_diagnosis(self, value):
self._set_property('age_at_diagnosis', value)
@pg_property(int)
def days_to_death(self, value):
self._set_property('days_to_death', value)
@pg_property(str)
def icd_10(self, value):
self._set_property('icd_10', value)
| apache-2.0 | Python |
c1d60b3930d9a85c5d41d3bacc81316b2865c3a2 | bump version | ickc/pantable | pantable/version.py | pantable/version.py | __version__ = '0.0.8'
| __version__ = '0.0.7'
| bsd-3-clause | Python |
f201fc9efd56c78d37432402501fed47992553df | Remove unused imports | vodkina/GlobaLeaks,vodkina/GlobaLeaks,vodkina/GlobaLeaks,vodkina/GlobaLeaks | backend/globaleaks/tests/handlers/test_custodian.py | backend/globaleaks/tests/handlers/test_custodian.py | # -*- coding: utf-8 -*-
import unittest
from twisted.internet.defer import inlineCallbacks
from globaleaks.tests import helpers
from globaleaks.handlers import custodian
class TestIdentityAccessRequestInstance(helpers.TestHandlerWithPopulatedDB):
_handler = custodian.CustodianIdentityAccessRequestInstance
@inlineCallbacks
def setUp(self):
yield helpers.TestHandlerWithPopulatedDB.setUp(self)
yield self.perform_full_submission_actions()
@inlineCallbacks
def test_get_new_identityaccessrequest(self):
iars = yield custodian.get_identityaccessrequests_list()
handler = self.request(user_id = self.dummyCustodian['id'], role='custodian')
yield handler.get(iars[0]['id'])
@inlineCallbacks
def test_put_identityaccessrequest_response(self):
iars = yield custodian.get_identityaccessrequests_list()
handler = self.request(user_id = self.dummyCustodian['id'], role='custodian')
yield handler.get(iars[0]['id'])
self.responses[0]['response'] = 'authorized'
self.responses[0]['response_motivation'] = 'oh yeah!'
handler = self.request(self.responses[0], user_id = self.dummyCustodian['id'], role='custodian')
yield handler.put(iars[0]['id'])
yield handler.get(iars[0]['id'])
class TestIdentityAccessRequestsCollection(helpers.TestHandlerWithPopulatedDB):
_handler = custodian.CustodianIdentityAccessRequestsCollection
@inlineCallbacks
def setUp(self):
yield helpers.TestHandlerWithPopulatedDB.setUp(self)
yield self.perform_full_submission_actions()
@inlineCallbacks
def test_get(self):
handler = self.request(user_id=self.dummyCustodian['id'], role='custodian')
yield handler.get()
| # -*- coding: utf-8 -*-
import unittest
import random
from twisted.internet.defer import inlineCallbacks
from globaleaks.tests import helpers
from globaleaks.handlers import custodian
from globaleaks.rest import errors
class TestIdentityAccessRequestInstance(helpers.TestHandlerWithPopulatedDB):
_handler = custodian.CustodianIdentityAccessRequestInstance
@inlineCallbacks
def setUp(self):
yield helpers.TestHandlerWithPopulatedDB.setUp(self)
yield self.perform_full_submission_actions()
@inlineCallbacks
def test_get_new_identityaccessrequest(self):
iars = yield custodian.get_identityaccessrequests_list()
handler = self.request(user_id = self.dummyCustodian['id'], role='custodian')
yield handler.get(iars[0]['id'])
@inlineCallbacks
def test_put_identityaccessrequest_response(self):
iars = yield custodian.get_identityaccessrequests_list()
handler = self.request(user_id = self.dummyCustodian['id'], role='custodian')
yield handler.get(iars[0]['id'])
self.responses[0]['response'] = 'authorized'
self.responses[0]['response_motivation'] = 'ou iea!'
handler = self.request(self.responses[0], user_id = self.dummyCustodian['id'], role='custodian')
yield handler.put(iars[0]['id'])
yield handler.get(iars[0]['id'])
class TestIdentityAccessRequestsCollection(helpers.TestHandlerWithPopulatedDB):
_handler = custodian.CustodianIdentityAccessRequestsCollection
@inlineCallbacks
def setUp(self):
yield helpers.TestHandlerWithPopulatedDB.setUp(self)
yield self.perform_full_submission_actions()
@inlineCallbacks
def test_get(self):
handler = self.request(user_id=self.dummyCustodian['id'], role='custodian')
yield handler.get()
| agpl-3.0 | Python |
13b613a76e409d352f866dd6de8c199ad8772c2f | Add 5-min timeout | xiao-chen/dist_test,cloudera/dist_test,umbrant/dist_test,xiao-chen/dist_test,cloudera/dist_test,umbrant/dist_test,umbrant/dist_test,umbrant/dist_test,cloudera/dist_test,xiao-chen/dist_test,cloudera/dist_test,xiao-chen/dist_test | parse_for_submit.py | parse_for_submit.py | #!/usr/bin/python
import json
import sys
if len(sys.argv) != 3:
print sys.argv[0], "<json file with hashes> <output json file>"
sys.exit(1)
outmap = {
"tasks": [
{"isolate_hash": "fa0fee63c6d4e540802d22464789c21de12ee8f5",
"description": "andrew test task"}
]
}
tasks = []
inmap = json.load(open(sys.argv[1], "r"))
for k,v in inmap['items'].iteritems():
tasks += [{"isolate_hash" : str(v),
"description" : str(k),
"timeout": 300
}]
outmap = {"tasks": tasks}
json.dump(outmap, open(sys.argv[2], "wt"))
| #!/usr/bin/python
import json
import sys
if len(sys.argv) != 3:
print sys.argv[0], "<json file with hashes> <output json file>"
sys.exit(1)
outmap = {
"tasks": [
{"isolate_hash": "fa0fee63c6d4e540802d22464789c21de12ee8f5",
"description": "andrew test task"}
]
}
tasks = []
inmap = json.load(open(sys.argv[1], "r"))
for k,v in inmap['items'].iteritems():
tasks += [{"isolate_hash" : str(v),
"description" : str(k),
}]
outmap = {"tasks": tasks}
json.dump(outmap, open(sys.argv[2], "wt"))
| apache-2.0 | Python |
643f4d8e178e35337381f5afbb3c926be153743a | remove x-based matplotlib rendering | mahnen/gamma_limits_sensitivity | gamma_limits_sensitivity/tests/test_high_level_api.py | gamma_limits_sensitivity/tests/test_high_level_api.py | '''
The reader of the paper just saw there is a github repo
she installs it with
pip install github.com/mahnen/gamma_limits_sensitivity
then she finds the callable in her path named also gamma_limits_sensitivity,
as stated in the README and wants to try it on an upper limit calculation
she calls it as explained:
gamma_limits_sensitivity ul --N_on=10 --N_off=50 --alpha=0.2 --l_lim=15 --A_eff=<some_path>
and some nice plots return
--------------------------------------------------------------
A sunny day ... her boss is really interessted in knowing what
her newly developed telescope is actually capable of, independend of the source
So she calls:
gamma_limits_sensitivity sens --s_bg=7.1 --alpha=0.2 --t_obs=36000 --A_eff=<some_path>
and gets plots
--------------------------------------------------------------
Another days she likes an undicsovered source very much and
would like to know if her gamma ray telescope can detect this source in a
reasonable amount of time
so she calls:
gamma_limits_sensitivity predict --s_bg=7.1 --alpha=0.2 --f_0=1e-12 --df_0=1e-13
--Gamma=-2.6 --dGamma=0.2 --E_0=1e9 --A_eff=<some_path>
and gets some plots again and the estimated time to detection printed to stdout.
--------------------------------------------------------------
'''
import gamma_limits_sensitivity as gls
import matplotlib
matplotlib.use('Agg') # needed to remove x-based rendering in cont. integration testing
def test_high_level_api_ul():
'''
This test tests the cli upper limit functionality explained in above user
story.
'''
dictionary = gls.upper_limit(
N_on=10,
N_off=50,
alpha=0.2,
l_lim=15,
A_eff='some_path',
)
for plot in dictionary['plots']:
assert isinstance(plot, matplotlib.figure.Figure)
def test_high_level_api_sens():
'''
This test tests the cli sens functionality explained in above user story.
'''
dictionary = gls.sensitivity(
s_bg=10,
alpha=0.2,
t_obs=10*3600,
A_eff='some_path',
)
for plot in dictionary['plots']:
assert isinstance(plot, matplotlib.figure.Figure)
def test_high_level_api_predict():
'''
This test tests the cli predict functionality explained
in above user story.
'''
dictionary = gls.predict(
s_bg=10,
alpha=0.2,
f_0=1e-12,
df_0=1e-13,
Gamma=-2.6,
dGamma=0.2,
E_0=1e9,
A_eff='some_path',
)
for plot in dictionary['plots']:
assert isinstance(plot, matplotlib.figure.Figure)
for time_quantile in dictionary['times']:
assert time_quantile >= 0
| '''
The reader of the paper just saw there is a github repo
she installs it with
pip install github.com/mahnen/gamma_limits_sensitivity
then she finds the callable in her path named also gamma_limits_sensitivity,
as stated in the README and wants to try it on an upper limit calculation
she calls it as explained:
gamma_limits_sensitivity ul --N_on=10 --N_off=50 --alpha=0.2 --l_lim=15 --A_eff=<some_path>
and some nice plots return
--------------------------------------------------------------
A sunny day ... her boss is really interessted in knowing what
her newly developed telescope is actually capable of, independend of the source
So she calls:
gamma_limits_sensitivity sens --s_bg=7.1 --alpha=0.2 --t_obs=36000 --A_eff=<some_path>
and gets plots
--------------------------------------------------------------
Another days she likes an undicsovered source very much and
would like to know if her gamma ray telescope can detect this source in a
reasonable amount of time
so she calls:
gamma_limits_sensitivity predict --s_bg=7.1 --alpha=0.2 --f_0=1e-12 --df_0=1e-13
--Gamma=-2.6 --dGamma=0.2 --E_0=1e9 --A_eff=<some_path>
and gets some plots again and the estimated time to detection printed to stdout.
--------------------------------------------------------------
'''
import gamma_limits_sensitivity as gls
import matplotlib
def test_high_level_api_ul():
'''
This test tests the cli upper limit functionality explained in above user
story.
'''
dictionary = gls.upper_limit(
N_on=10,
N_off=50,
alpha=0.2,
l_lim=15,
A_eff='some_path',
)
for plot in dictionary['plots']:
assert isinstance(plot, matplotlib.figure.Figure)
def test_high_level_api_sens():
'''
This test tests the cli sens functionality explained in above user story.
'''
dictionary = gls.sensitivity(
s_bg=10,
alpha=0.2,
t_obs=10*3600,
A_eff='some_path',
)
for plot in dictionary['plots']:
assert isinstance(plot, matplotlib.figure.Figure)
def test_high_level_api_predict():
'''
This test tests the cli predict functionality explained
in above user story.
'''
dictionary = gls.predict(
s_bg=10,
alpha=0.2,
f_0=1e-12,
df_0=1e-13,
Gamma=-2.6,
dGamma=0.2,
E_0=1e9,
A_eff='some_path',
)
for plot in dictionary['plots']:
assert isinstance(plot, matplotlib.figure.Figure)
for time_quantile in dictionary['times']:
assert time_quantile >= 0
| mit | Python |
cc271c69d2bbc96298b3e914fee3ff46c08e161b | add logger | avlach/univbris-ocf,avlach/univbris-ocf,avlach/univbris-ocf,avlach/univbris-ocf | src/python/expedient/common/backends/remoteuser.py | src/python/expedient/common/backends/remoteuser.py | '''
Created on Jun 15, 2010
@author: jnaous
'''
from django.contrib.auth.backends import RemoteUserBackend
import logging
logger = logging.getLogger("backends.remoteuser")
class NoCreateRemoteUserBackend(RemoteUserBackend):
"""
Extends the RemoteUserBackend by simply setting C{create_unknown_user} to
False instead of the default True, so that unknown users are not created.
"""
create_unknown_user = False
| '''
Created on Jun 15, 2010
@author: jnaous
'''
from django.contrib.auth.backends import RemoteUserBackend
class NoCreateRemoteUserBackend(RemoteUserBackend):
"""
Extends the RemoteUserBackend by simply setting C{create_unknown_user} to
False instead of the default True, so that unknown users are not created.
"""
create_unknown_user = False
| bsd-3-clause | Python |
f30a923b881e908fa607e276de1d152d803248f1 | Update main to run a profiler | toofishes/python-pgpdump | pgpdump/__main__.py | pgpdump/__main__.py | import sys
import cProfile
from . import AsciiData, BinaryData
def parsefile(name):
with open(name) as infile:
if name.endswith('.asc'):
data = AsciiData(infile.read())
else:
data = BinaryData(infile.read())
counter = 0
for packet in data.packets():
counter += 1
print counter
def main():
for filename in sys.argv[1:]:
parsefile(filename)
if __name__ == '__main__':
cProfile.run('main()', 'main.profile')
| import sys
from . import BinaryData
for filename in sys.argv[1:]:
with open(filename) as infile:
data = BinaryData(infile.read())
for packet in data.packets():
print hex(packet.key_id), packet.creation_date
| bsd-3-clause | Python |
f4fcf5abee232afb8419ddb43c4662006ad76232 | Remove unused import from brain.py | ratchetrobotics/espresso | espresso/brain.py | espresso/brain.py | from tinydb import TinyDB
class Brain(object):
def __init__(self, db_location):
self.db = TinyDB(db_location)
| from tinydb import TinyDB
from tinydb import where
class Brain(object):
def __init__(self, db_location):
self.db = TinyDB(db_location)
| bsd-3-clause | Python |
31ea71b07f49df05a646684bb540730da89929db | add 04_division.py | jerodg/hackerrank-python | 01_Introduction/04_division.py | 01_Introduction/04_division.py | #!/usr/bin/python3.6
"""Jerod Gawne, 2017-04-19
Division
https://www.hackerrank.com/challenges/python-division/
Editorial
a = int(input())
b = int(input())
print(a // b)
print(a / b)
"""
def main():
"""
Main-Logic
"""
a, b = int(input()), int(input())
print((a // b), (a / b), sep='\n')
if __name__ == '__main__':
try:
main()
except Exception:
import sys
import traceback
print(traceback.print_exception(*sys.exc_info())) | #!/usr/bin/python3.6
"""
Jerod Gawne, 2017-04-19
Division
https://www.hackerrank.com/challenges/python-division/
"""
def main():
"""
Main-Logic
"""
a = int(input())
b = int(input())
if b != 0:
print(a//b)
print(a/b)
if __name__ == '__main__':
try:
main()
except Exception:
import sys
import traceback
print(traceback.print_exception(*sys.exc_info())) | mit | Python |
7cdc7879cc2f9d23f62975d2ede50311db94f501 | read MapPackage | c3nav/c3nav,c3nav/c3nav,c3nav/c3nav,c3nav/c3nav | src/c3nav/mapdata/utils/cache/package.py | src/c3nav/mapdata/utils/cache/package.py | import os
import struct
from collections import namedtuple
from io import BytesIO
from tarfile import TarFile, TarInfo
from c3nav.mapdata.utils.cache import AccessRestrictionAffected, GeometryIndexed, MapHistory
CachePackageLevel = namedtuple('CachePackageLevel', ('history', 'restrictions'))
class CachePackage:
def __init__(self, bounds, levels=None):
self.bounds = bounds
self.levels = {} if levels is None else levels
def add_level(self, level_id: int, history: MapHistory, restrictions: AccessRestrictionAffected):
self.levels[level_id] = CachePackageLevel(history, restrictions)
def save(self, filename=None, compression=None):
if filename is None:
from django.conf import settings
filename = os.path.join(settings.CACHE_ROOT, 'package.tar')
if compression is not None:
filename += '.' + compression
filemode = 'w'
if compression is not None:
filemode += ':' + compression
with TarFile.open(filename, filemode) as f:
self._add_bytesio(f, 'bounds', BytesIO(struct.pack('<IIII', *(int(i*100) for i in self.bounds))))
for level_id, level_data in self.levels.items():
self._add_geometryindexed(f, 'history_%d' % level_id, level_data.history)
self._add_geometryindexed(f, 'restrictions_%d' % level_id, level_data.restrictions)
def _add_bytesio(self, f: TarFile, filename: str, data: BytesIO):
data.seek(0, os.SEEK_END)
tarinfo = TarInfo(name=filename)
tarinfo.size = data.tell()
data.seek(0)
f.addfile(tarinfo, data)
def _add_geometryindexed(self, f: TarFile, filename: str, obj: GeometryIndexed):
data = BytesIO()
obj.write(data)
self._add_bytesio(f, filename, data)
def save_all(self, filename=None):
for compression in (None, 'gz', 'xz'):
self.save(filename, compression)
@classmethod
def read(cls, f):
f = TarFile.open(fileobj=f)
files = {info.name: info for info in f.getmembers()}
bounds = tuple(i/100 for i in struct.unpack('<IIII', f.extractfile(files['bounds']).read()))
levels = {}
for filename in files:
if not filename.startswith('history_'):
continue
level_id = int(filename[8:])
levels[level_id] = CachePackageLevel(
history=MapHistory.read(f.extractfile(files['history_%d' % level_id])),
restrictions=AccessRestrictionAffected.read(f.extractfile(files['restrictions_%d' % level_id]))
)
return cls(bounds, levels)
| import os
import struct
from collections import namedtuple
from io import BytesIO
from tarfile import TarFile, TarInfo
from c3nav.mapdata.utils.cache import AccessRestrictionAffected, GeometryIndexed, MapHistory
CachePackageLevel = namedtuple('CachePackageLevel', ('history', 'restrictions'))
class CachePackage:
def __init__(self, bounds, levels=None):
self.bounds = bounds
self.levels = {} if levels is None else levels
def add_level(self, level_id: int, history: MapHistory, restrictions: AccessRestrictionAffected):
self.levels[level_id] = CachePackageLevel(history, restrictions)
def save(self, filename=None, compression=None):
if filename is None:
from django.conf import settings
filename = os.path.join(settings.CACHE_ROOT, 'package.tar')
if compression is not None:
filename += '.' + compression
filemode = 'w'
if compression is not None:
filemode += ':' + compression
with TarFile.open(filename, filemode) as f:
self._add_bytesio(f, 'bounds', BytesIO(struct.pack('<IIII', *(int(i*100) for i in self.bounds))))
for level_id, level_data in self.levels.items():
self._add_geometryindexed(f, 'history_%d' % level_id, level_data.history)
self._add_geometryindexed(f, 'restrictions_%d' % level_id, level_data.restrictions)
def _add_bytesio(self, f: TarFile, filename: str, data: BytesIO):
data.seek(0, os.SEEK_END)
tarinfo = TarInfo(name=filename)
tarinfo.size = data.tell()
data.seek(0)
f.addfile(tarinfo, data)
def _add_geometryindexed(self, f: TarFile, filename: str, obj: GeometryIndexed):
data = BytesIO()
obj.write(data)
self._add_bytesio(f, filename, data)
def save_all(self, filename=None):
for compression in (None, 'gz', 'xz'):
self.save(filename, compression)
| apache-2.0 | Python |
14d2d5f2f6c97ccf75373702dd42a335aa0a8063 | Allow dots in usernames | pinry/pinry,lapo-luchini/pinry,pinry/pinry,lapo-luchini/pinry,pinry/pinry,lapo-luchini/pinry,lapo-luchini/pinry,pinry/pinry | users/forms.py | users/forms.py | from django import forms
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
class UserCreationForm(forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username,
email, and password.
"""
error_messages = {
'duplicate_username': _("A user with that username already exists."),
}
username = forms.RegexField(
label=_("Username"), max_length=30,
regex=r'^[\w-.]+$'
)
password = forms.CharField(
label=_("Password"),
widget=forms.PasswordInput
)
class Meta:
model = User
fields = ("username", "email")
def clean_username(self):
# Since User.username is unique, this check is redundant,
# but it sets a nicer error message than the ORM. See #13147.
username = self.cleaned_data["username"]
try:
User._default_manager.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(self.error_messages['duplicate_username'])
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password"])
if commit:
user.save()
return user
| from django import forms
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import User
class UserCreationForm(forms.ModelForm):
"""
A form that creates a user, with no privileges, from the given username,
email, and password.
"""
error_messages = {
'duplicate_username': _("A user with that username already exists."),
}
username = forms.RegexField(
label=_("Username"), max_length=30,
regex=r'^[\w-]+$'
)
password = forms.CharField(
label=_("Password"),
widget=forms.PasswordInput
)
class Meta:
model = User
fields = ("username", "email")
def clean_username(self):
# Since User.username is unique, this check is redundant,
# but it sets a nicer error message than the ORM. See #13147.
username = self.cleaned_data["username"]
try:
User._default_manager.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(self.error_messages['duplicate_username'])
def save(self, commit=True):
user = super(UserCreationForm, self).save(commit=False)
user.set_password(self.cleaned_data["password"])
if commit:
user.save()
return user
| bsd-2-clause | Python |
de875418621c1d604507fbd095cf666ee5cac742 | Bump version | matrix-org/synapse,TribeMedia/synapse,matrix-org/synapse,iot-factory/synapse,TribeMedia/synapse,rzr/synapse,howethomas/synapse,iot-factory/synapse,iot-factory/synapse,TribeMedia/synapse,iot-factory/synapse,iot-factory/synapse,matrix-org/synapse,illicitonion/synapse,howethomas/synapse,matrix-org/synapse,illicitonion/synapse,illicitonion/synapse,howethomas/synapse,TribeMedia/synapse,rzr/synapse,rzr/synapse,rzr/synapse,howethomas/synapse,matrix-org/synapse,illicitonion/synapse,matrix-org/synapse,rzr/synapse,howethomas/synapse,illicitonion/synapse,TribeMedia/synapse | synapse/__init__.py | synapse/__init__.py | # -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" This is a reference implementation of a Matrix home server.
"""
__version__ = "0.9.0-r3"
| # -*- coding: utf-8 -*-
# Copyright 2014, 2015 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" This is a reference implementation of a Matrix home server.
"""
__version__ = "0.9.0-r2"
| apache-2.0 | Python |
fd2070a483cbec1c93bf815130216b97b3ca023b | Improve URL validator | jmagnusson/wtforms,cklein/wtforms,Xender/wtforms,Aaron1992/wtforms,skytreader/wtforms,subyraman/wtforms,crast/wtforms,wtforms/wtforms,pawl/wtforms,pawl/wtforms,hsum/wtforms,Aaron1992/wtforms | wtforms/validators.py | wtforms/validators.py | """
wtforms.validators
~~~~~~~~~~~~~~~~~~
TODO
:copyright: 2007-2008 by James Crasta, Thomas Johansson.
:license: MIT, see LICENSE.txt for details.
"""
import re
class ValidationError(ValueError):
pass
def email(form, field):
if not re.match(r'^.+@[^.].+\.[a-z]{2,4}$', field.data, re.IGNORECASE): # XXX better email regex?
raise ValidationError(u'Invalid email address.')
def length(message=None, min=-1, max=None):
fmt_args = {'min': min, 'max': max}
def _length(form, field):
L = field.data and len(field.data) or 0
if L < min:
raise ValidationError((message or u'Must be at least %(min)i characters.') % fmt_args)
elif max is not None and L > max:
raise ValidationError((message or u'May not be longer than %(max)i characters.') % fmt_args)
return _length
def url(allow_blank=False):
def _url(form, field):
if allow_blank and not field.data:
return
match = re.match(r'[a-z]+://.*\.[a-z]{2,4}(\/.*)?', field.data, re.I)
if not match:
raise ValidationError(u'Is not a valid URL.')
return _url
def not_empty(message=None):
def _not_empty(form, field):
if not field.data or not field.data.strip():
raise ValidationError(message or u'Field must not be empty.')
return _not_empty
def ip_address(form, field):
if not re.match(r'^([0-9]{1,3}\.){3}[0-9]{1,3}$', field.data):
raise ValidationError(u'Invalid ip address.')
__all__ = ('ValidationError', 'email', 'length', 'url', 'not_empty', 'ip_address')
| """
wtforms.validators
~~~~~~~~~~~~~~~~~~
TODO
:copyright: 2007-2008 by James Crasta, Thomas Johansson.
:license: MIT, see LICENSE.txt for details.
"""
import re
class ValidationError(ValueError):
pass
def email(form, field):
if not re.match(r'^.+@[^.].+\.[a-z]{2,4}$', field.data, re.IGNORECASE): # XXX better email regex?
raise ValidationError(u'Invalid email address.')
def length(message=None, min=-1, max=None):
fmt_args = {'min': min, 'max': max}
def _length(form, field):
L = field.data and len(field.data) or 0
if L < min:
raise ValidationError((message or u'Must be at least %(min)i characters.') % fmt_args)
elif max is not None and L > max:
raise ValidationError((message or u'May not be longer than %(max)i characters.') % fmt_args)
return _length
def url(allow_blank=False):
def _url(form, field):
if allow_blank and not field.data:
return
match = re.match(r'[a-z]+://.*', field.data, re.I)
if not match:
raise ValidationError(u'Is not a valid URL.')
return _url
def not_empty(message=None):
def _not_empty(form, field):
if not field.data or not field.data.strip():
raise ValidationError(message or u'Field must not be empty.')
return _not_empty
def ip_address(form, field):
if not re.match(r'^([0-9]{1,3}\.){3}[0-9]{1,3}$', field.data):
raise ValidationError(u'Invalid email address.')
__all__ = ('ValidationError', 'email', 'length', 'url', 'not_empty')
| bsd-3-clause | Python |
bbe4391154f989140cd8b3252c63bc7f1bbd232b | Disable screenshot_sync_tests on Mac. For real, this time. | PeterWangIntel/chromium-crosswalk,krieger-od/nwjs_chromium.src,krieger-od/nwjs_chromium.src,chuan9/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,axinging/chromium-crosswalk,markYoungH/chromium.src,M4sse/chromium.src,Just-D/chromium-1,Fireblend/chromium-crosswalk,bright-sparks/chromium-spacewalk,Fireblend/chromium-crosswalk,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,dushu1203/chromium.src,bright-sparks/chromium-spacewalk,PeterWangIntel/chromium-crosswalk,ltilve/chromium,Chilledheart/chromium,markYoungH/chromium.src,ltilve/chromium,jaruba/chromium.src,axinging/chromium-crosswalk,hgl888/chromium-crosswalk,chuan9/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,markYoungH/chromium.src,Fireblend/chromium-crosswalk,jaruba/chromium.src,Chilledheart/chromium,ondra-novak/chromium.src,bright-sparks/chromium-spacewalk,ondra-novak/chromium.src,dushu1203/chromium.src,crosswalk-project/chromium-crosswalk-efl,markYoungH/chromium.src,Chilledheart/chromium,Pluto-tv/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,Chilledheart/chromium,TheTypoMaster/chromium-crosswalk,littlstar/chromium.src,hgl888/chromium-crosswalk,axinging/chromium-crosswalk,M4sse/chromium.src,dushu1203/chromium.src,bright-sparks/chromium-spacewalk,axinging/chromium-crosswalk,Just-D/chromium-1,TheTypoMaster/chromium-crosswalk,jaruba/chromium.src,Jonekee/chromium.src,dednal/chromium.src,crosswalk-project/chromium-crosswalk-efl,M4sse/chromium.src,ondra-novak/chromium.src,ltilve/chromium,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk,Jonekee/chromium.src,TheTypoMaster/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,mohamed--abdel-maksoud/chromium.src,mohamed--abdel-maksoud/chromium.src,markYoungH/chromium.src,fujunwei/chromium-crosswalk,M4sse/chromium.src,Fireblend/chromium-crosswalk,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk,chuan9/chromium-crosswalk,ltilve/chromium,ondra-novak/chromium.src,Pluto-tv/chromium-crosswalk,Just-D/chromium-1,dushu1203/chromium.src,fujunwei/chromium-crosswalk,krieger-od/nwjs_chromium.src,crosswalk-project/chromium-crosswalk-efl,littlstar/chromium.src,dushu1203/chromium.src,markYoungH/chromium.src,Pluto-tv/chromium-crosswalk,Jonekee/chromium.src,dednal/chromium.src,krieger-od/nwjs_chromium.src,krieger-od/nwjs_chromium.src,jaruba/chromium.src,jaruba/chromium.src,mohamed--abdel-maksoud/chromium.src,hgl888/chromium-crosswalk,ltilve/chromium,dednal/chromium.src,markYoungH/chromium.src,dednal/chromium.src,bright-sparks/chromium-spacewalk,Fireblend/chromium-crosswalk,axinging/chromium-crosswalk,dushu1203/chromium.src,Jonekee/chromium.src,PeterWangIntel/chromium-crosswalk,jaruba/chromium.src,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk-efl,chuan9/chromium-crosswalk,Fireblend/chromium-crosswalk,bright-sparks/chromium-spacewalk,Jonekee/chromium.src,Pluto-tv/chromium-crosswalk,chuan9/chromium-crosswalk,markYoungH/chromium.src,TheTypoMaster/chromium-crosswalk,Just-D/chromium-1,Chilledheart/chromium,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk,Just-D/chromium-1,fujunwei/chromium-crosswalk,jaruba/chromium.src,M4sse/chromium.src,krieger-od/nwjs_chromium.src,mohamed--abdel-maksoud/chromium.src,dednal/chromium.src,Just-D/chromium-1,M4sse/chromium.src,dushu1203/chromium.src,krieger-od/nwjs_chromium.src,markYoungH/chromium.src,fujunwei/chromium-crosswalk,ltilve/chromium,mohamed--abdel-maksoud/chromium.src,dushu1203/chromium.src,Pluto-tv/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk-efl,Pluto-tv/chromium-crosswalk,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,Chilledheart/chromium,krieger-od/nwjs_chromium.src,dednal/chromium.src,littlstar/chromium.src,Just-D/chromium-1,axinging/chromium-crosswalk,fujunwei/chromium-crosswalk,dednal/chromium.src,hgl888/chromium-crosswalk,chuan9/chromium-crosswalk,littlstar/chromium.src,Jonekee/chromium.src,Jonekee/chromium.src,Jonekee/chromium.src,krieger-od/nwjs_chromium.src,fujunwei/chromium-crosswalk,littlstar/chromium.src,hgl888/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,ondra-novak/chromium.src,dednal/chromium.src,fujunwei/chromium-crosswalk,Chilledheart/chromium,M4sse/chromium.src,markYoungH/chromium.src,bright-sparks/chromium-spacewalk,jaruba/chromium.src,bright-sparks/chromium-spacewalk,Jonekee/chromium.src,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk,markYoungH/chromium.src,krieger-od/nwjs_chromium.src,mohamed--abdel-maksoud/chromium.src,axinging/chromium-crosswalk,Fireblend/chromium-crosswalk,axinging/chromium-crosswalk,ltilve/chromium,chuan9/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,dednal/chromium.src,hgl888/chromium-crosswalk-efl,littlstar/chromium.src,Pluto-tv/chromium-crosswalk,Chilledheart/chromium,Pluto-tv/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,M4sse/chromium.src,Jonekee/chromium.src,M4sse/chromium.src,TheTypoMaster/chromium-crosswalk,ondra-novak/chromium.src,ltilve/chromium,PeterWangIntel/chromium-crosswalk,jaruba/chromium.src,littlstar/chromium.src,crosswalk-project/chromium-crosswalk-efl,crosswalk-project/chromium-crosswalk-efl,Just-D/chromium-1,Fireblend/chromium-crosswalk,hgl888/chromium-crosswalk-efl,dushu1203/chromium.src,hgl888/chromium-crosswalk-efl,bright-sparks/chromium-spacewalk,mohamed--abdel-maksoud/chromium.src,fujunwei/chromium-crosswalk,fujunwei/chromium-crosswalk,dushu1203/chromium.src,M4sse/chromium.src,ondra-novak/chromium.src,M4sse/chromium.src,Jonekee/chromium.src,axinging/chromium-crosswalk,hgl888/chromium-crosswalk-efl,littlstar/chromium.src,hgl888/chromium-crosswalk-efl,chuan9/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,hgl888/chromium-crosswalk-efl,Just-D/chromium-1,Fireblend/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Chilledheart/chromium,ondra-novak/chromium.src,dednal/chromium.src,crosswalk-project/chromium-crosswalk-efl,hgl888/chromium-crosswalk-efl,ltilve/chromium,jaruba/chromium.src,jaruba/chromium.src,ondra-novak/chromium.src,dushu1203/chromium.src,mohamed--abdel-maksoud/chromium.src,mohamed--abdel-maksoud/chromium.src,dednal/chromium.src,crosswalk-project/chromium-crosswalk-efl | content/test/gpu/gpu_tests/screenshot_sync.py | content/test/gpu/gpu_tests/screenshot_sync.py | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import screenshot_sync_expectations as expectations
from telemetry import test
from telemetry.core import util
from telemetry.page import page
from telemetry.page import page_set
from telemetry.page import page_test
# pylint: disable=W0401,W0614
from telemetry.page.actions.all_page_actions import *
data_path = os.path.join(
util.GetChromiumSrcDir(), 'content', 'test', 'data', 'gpu')
@test.Disabled('mac')
class _ScreenshotSyncValidator(page_test.PageTest):
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs('--enable-gpu-benchmarking')
def ValidatePage(self, page, tab, results):
test_success = tab.EvaluateJavaScript('window.__testSuccess')
if not test_success:
message = tab.EvaluateJavaScript('window.__testMessage')
raise page_test.Failure(message)
@test.Disabled('mac')
class ScreenshotSyncPage(page.Page):
def __init__(self, page_set, base_dir):
super(ScreenshotSyncPage, self).__init__(
url='file://screenshot_sync.html',
page_set=page_set,
base_dir=base_dir,
name='ScreenshotSync')
self.user_agent_type = 'desktop'
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.WaitForJavaScriptCondition(
'window.__testComplete', timeout_in_seconds=120)
@test.Disabled('mac')
class ScreenshotSyncProcess(test.Test):
"""Tests that screenhots are properly synchronized with the frame one which
they were requested"""
test = _ScreenshotSyncValidator
def CreateExpectations(self, page_set):
return expectations.ScreenshotSyncExpectations()
def CreatePageSet(self, options):
ps = page_set.PageSet(file_path=data_path, serving_dirs=[''])
ps.AddPage(ScreenshotSyncPage(ps, ps.base_dir))
return ps
| # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import screenshot_sync_expectations as expectations
from telemetry import test
from telemetry.core import util
from telemetry.page import page
from telemetry.page import page_set
from telemetry.page import page_test
# pylint: disable=W0401,W0614
from telemetry.page.actions.all_page_actions import *
data_path = os.path.join(
util.GetChromiumSrcDir(), 'content', 'test', 'data', 'gpu')
class _ScreenshotSyncValidator(page_test.PageTest):
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs('--enable-gpu-benchmarking')
def ValidatePage(self, page, tab, results):
test_success = tab.EvaluateJavaScript('window.__testSuccess')
if not test_success:
message = tab.EvaluateJavaScript('window.__testMessage')
raise page_test.Failure(message)
@test.Disabled('mac')
class ScreenshotSyncPage(page.Page):
def __init__(self, page_set, base_dir):
super(ScreenshotSyncPage, self).__init__(
url='file://screenshot_sync.html',
page_set=page_set,
base_dir=base_dir,
name='ScreenshotSync')
self.user_agent_type = 'desktop'
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.WaitForJavaScriptCondition(
'window.__testComplete', timeout_in_seconds=120)
class ScreenshotSyncProcess(test.Test):
"""Tests that screenhots are properly synchronized with the frame one which
they were requested"""
test = _ScreenshotSyncValidator
def CreateExpectations(self, page_set):
return expectations.ScreenshotSyncExpectations()
def CreatePageSet(self, options):
ps = page_set.PageSet(file_path=data_path, serving_dirs=[''])
ps.AddPage(ScreenshotSyncPage(ps, ps.base_dir))
return ps
| bsd-3-clause | Python |
400c463097d9454a2d3780f2266186d97b2fa7fc | Create __init__.py | dfm/python-finufft,dfm/python-finufft,dfm/python-finufft | finufft/__init__.py | finufft/__init__.py | # -*- coding: utf-8 -*-
#
# Copyright 2017 Daniel Foreman-Mackey
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
__version__ = "0.0.1.dev0"
try:
__FINUFFT_SETUP__
except NameError:
__FINUFFT_SETUP__ = False
if not __FINUFFT_SETUP__:
__all__ = ["nufft1d1", "nufft1d2", "nufft1d3"]
from .interface import (
nufft1d1, nufft1d2, nufft1d3,
)
| # -*- coding: utf-8 -*-
__version__ = "0.0.1.dev0"
try:
__FINUFFT_SETUP__
except NameError:
__FINUFFT_SETUP__ = False
if not __FINUFFT_SETUP__:
__all__ = ["nufft1d1", "nufft1d2", "nufft1d3"]
from .interface import (
nufft1d1, nufft1d2, nufft1d3,
)
| apache-2.0 | Python |
b7d65281f83642a5b2563f48a1668fe869529c0b | comment out Sefton election id | DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,chris48s/UK-Polling-Stations | polling_stations/apps/data_collection/management/commands/import_sefton.py | polling_stations/apps/data_collection/management/commands/import_sefton.py | from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = 'E08000014'
addresses_name = 'Democracy_Club__04May2017 Sefton.tsv'
stations_name = 'Democracy_Club__04May2017 Sefton.tsv'
elections = [
'mayor.liverpool-city-ca.2017-05-04',
#'parl.2017-06-08'
]
csv_delimiter = '\t'
| from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = 'E08000014'
addresses_name = 'Democracy_Club__04May2017 Sefton.tsv'
stations_name = 'Democracy_Club__04May2017 Sefton.tsv'
elections = [
'mayor.liverpool-city-ca.2017-05-04',
'parl.2017-06-08'
]
csv_delimiter = '\t'
| bsd-3-clause | Python |
c702a996436988550f6aaef606be6371db17a136 | split context factory into two classes, a new one when opennsa just performs https requests | NORDUnet/opennsa,jab1982/opennsa,NORDUnet/opennsa,jab1982/opennsa,NORDUnet/opennsa | opennsa/ctxfactory.py | opennsa/ctxfactory.py | """
SSL/TLS context definition.
Most of this code is borrowed from the SGAS 3.X LUTS codebase.
NORDUnet holds the copyright for SGAS 3.X LUTS and OpenNSA.
"""
import os
from OpenSSL import SSL
class RequestContextFactory(object):
"""
Context Factory for issuing requests to SSL/TLS services without having
a client certificate.
"""
def __init__(self, certificate_dir, verify):
self.certificate_dir = certificate_dir
self.verify = verify
self.ctx = None
def getContext(self):
if self.ctx is not None:
return self.ctx
else:
self.ctx = self._createContext()
return self.ctx
def _createContext(self):
def verify_callback(conn, x509, error_number, error_depth, allowed):
# just return what openssl thinks is right
if self.verify:
return allowed # return what openssl thinks is right
else:
return 1 # allow everything which has a cert
ctx = SSL.Context(SSL.TLSv1_METHOD) # only tls v1 (its almost 2012, should be okay
ctx.set_verify(SSL.VERIFY_PEER, verify_callback)
calist = [ ca for ca in os.listdir(self.certificate_dir) if ca.endswith('.0') ]
for ca in calist:
# openssl wants absolute paths
ca = os.path.join(self.certificate_dir, ca)
ctx.load_verify_locations(ca)
return ctx
class ContextFactory(RequestContextFactory):
"""
Full context factory with private key and cert. When running service
over SSL/TLS.
"""
def __init__(self, private_key_path, public_key_path, certificate_dir, verify):
super(RequestContextFactory, self).__init__(certificate_dir, verify)
self.private_key_path = private_key_path
self.public_key_path = public_key_path
def _createContext(self):
ctx = super(RequestContextFactory, self).createContext()
ctx.use_privatekey_file(self.private_key_path)
ctx.use_certificate_file(self.public_key_path)
ctx.check_privatekey() # sanity check
return ctx
| """
SSL/TLS context definition.
Most of this code is borrowed from the SGAS 3.X LUTS codebase.
NORDUnet holds the copyright for SGAS 3.X LUTS and OpenNSA.
"""
import os
from OpenSSL import SSL
class ContextFactory:
def __init__(self, private_key_path, public_key_path, certificate_dir, verify=True):
self.private_key_path = private_key_path
self.public_key_path = public_key_path
self.certificate_dir = certificate_dir
self.verify = verify
self.ctx = None
def getContext(self):
if self.ctx is not None:
return self.ctx
else:
self.ctx = self._createContext()
return self.ctx
def _createContext(self):
ctx = SSL.Context(SSL.TLSv1_METHOD) # only tls v1 (its almost 2012, should be okay
ctx.use_privatekey_file(self.private_key_path)
ctx.use_certificate_file(self.public_key_path)
ctx.check_privatekey() # sanity check
def verify_callback(conn, x509, error_number, error_depth, allowed):
# just return what openssl thinks is right
if self.verify:
return allowed # return what openssl thinks is right
else:
return 1 # allow everything which has a cert
ctx.set_verify(SSL.VERIFY_PEER, verify_callback)
calist = [ ca for ca in os.listdir(self.certificate_dir) if ca.endswith('.0') ]
for ca in calist:
# openssl wants absolute paths
ca = os.path.join(self.certificate_dir, ca)
ctx.load_verify_locations(ca)
return ctx
| bsd-3-clause | Python |
2822df8d958c5497e155843585773d043a7c9b61 | Prepare v3.3.19.dev | Flexget/Flexget,Flexget/Flexget,Flexget/Flexget,Flexget/Flexget | flexget/_version.py | flexget/_version.py | """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '3.3.19.dev'
| """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '3.3.18'
| mit | Python |
f10c641074674474ea87bd45e70170e8c4f6ae1f | Prepare v2.0.51.dev | poulpito/Flexget,jacobmetrick/Flexget,sean797/Flexget,OmgOhnoes/Flexget,gazpachoking/Flexget,Danfocus/Flexget,crawln45/Flexget,tobinjt/Flexget,malkavi/Flexget,qk4l/Flexget,qk4l/Flexget,LynxyssCZ/Flexget,tobinjt/Flexget,tobinjt/Flexget,dsemi/Flexget,ianstalk/Flexget,malkavi/Flexget,jacobmetrick/Flexget,oxc/Flexget,JorisDeRieck/Flexget,LynxyssCZ/Flexget,jawilson/Flexget,drwyrm/Flexget,dsemi/Flexget,LynxyssCZ/Flexget,jawilson/Flexget,Flexget/Flexget,tarzasai/Flexget,ianstalk/Flexget,crawln45/Flexget,crawln45/Flexget,Flexget/Flexget,jawilson/Flexget,OmgOhnoes/Flexget,poulpito/Flexget,JorisDeRieck/Flexget,gazpachoking/Flexget,oxc/Flexget,Danfocus/Flexget,JorisDeRieck/Flexget,malkavi/Flexget,ianstalk/Flexget,oxc/Flexget,poulpito/Flexget,sean797/Flexget,tarzasai/Flexget,Flexget/Flexget,qk4l/Flexget,drwyrm/Flexget,JorisDeRieck/Flexget,Flexget/Flexget,tobinjt/Flexget,Danfocus/Flexget,dsemi/Flexget,LynxyssCZ/Flexget,crawln45/Flexget,Danfocus/Flexget,OmgOhnoes/Flexget,drwyrm/Flexget,malkavi/Flexget,sean797/Flexget,jacobmetrick/Flexget,jawilson/Flexget,tarzasai/Flexget | flexget/_version.py | flexget/_version.py | """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '2.0.51.dev'
| """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '2.0.50'
| mit | Python |
ec1b26a0fa246cc1c89dff3a2cb710c561aac2b6 | Prepare v1.2.476.dev | tobinjt/Flexget,drwyrm/Flexget,jacobmetrick/Flexget,ianstalk/Flexget,tobinjt/Flexget,jawilson/Flexget,LynxyssCZ/Flexget,malkavi/Flexget,poulpito/Flexget,poulpito/Flexget,qvazzler/Flexget,poulpito/Flexget,dsemi/Flexget,LynxyssCZ/Flexget,Flexget/Flexget,sean797/Flexget,malkavi/Flexget,crawln45/Flexget,antivirtel/Flexget,OmgOhnoes/Flexget,Flexget/Flexget,LynxyssCZ/Flexget,oxc/Flexget,jacobmetrick/Flexget,Danfocus/Flexget,tobinjt/Flexget,tarzasai/Flexget,gazpachoking/Flexget,oxc/Flexget,ianstalk/Flexget,JorisDeRieck/Flexget,antivirtel/Flexget,OmgOhnoes/Flexget,Flexget/Flexget,Danfocus/Flexget,jawilson/Flexget,crawln45/Flexget,crawln45/Flexget,Pretagonist/Flexget,antivirtel/Flexget,JorisDeRieck/Flexget,qk4l/Flexget,jawilson/Flexget,tarzasai/Flexget,JorisDeRieck/Flexget,Danfocus/Flexget,sean797/Flexget,jacobmetrick/Flexget,qk4l/Flexget,OmgOhnoes/Flexget,tarzasai/Flexget,Pretagonist/Flexget,qvazzler/Flexget,qk4l/Flexget,drwyrm/Flexget,dsemi/Flexget,malkavi/Flexget,LynxyssCZ/Flexget,ianstalk/Flexget,sean797/Flexget,gazpachoking/Flexget,tobinjt/Flexget,Flexget/Flexget,Danfocus/Flexget,jawilson/Flexget,Pretagonist/Flexget,drwyrm/Flexget,JorisDeRieck/Flexget,dsemi/Flexget,qvazzler/Flexget,crawln45/Flexget,malkavi/Flexget,oxc/Flexget | flexget/_version.py | flexget/_version.py | """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '1.2.476.dev'
| """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '1.2.475'
| mit | Python |
782c3ebbf0c0028bc212678169accce2e61d572d | Prepare v1.2.493.dev | OmgOhnoes/Flexget,JorisDeRieck/Flexget,qvazzler/Flexget,oxc/Flexget,Flexget/Flexget,Flexget/Flexget,qk4l/Flexget,drwyrm/Flexget,tarzasai/Flexget,antivirtel/Flexget,Flexget/Flexget,crawln45/Flexget,tarzasai/Flexget,jawilson/Flexget,tobinjt/Flexget,drwyrm/Flexget,jacobmetrick/Flexget,tarzasai/Flexget,gazpachoking/Flexget,ianstalk/Flexget,jacobmetrick/Flexget,poulpito/Flexget,qk4l/Flexget,jacobmetrick/Flexget,jawilson/Flexget,dsemi/Flexget,malkavi/Flexget,gazpachoking/Flexget,ianstalk/Flexget,OmgOhnoes/Flexget,Danfocus/Flexget,jawilson/Flexget,LynxyssCZ/Flexget,sean797/Flexget,drwyrm/Flexget,qvazzler/Flexget,crawln45/Flexget,malkavi/Flexget,sean797/Flexget,jawilson/Flexget,Pretagonist/Flexget,poulpito/Flexget,oxc/Flexget,LynxyssCZ/Flexget,tobinjt/Flexget,malkavi/Flexget,crawln45/Flexget,LynxyssCZ/Flexget,sean797/Flexget,qk4l/Flexget,ianstalk/Flexget,tobinjt/Flexget,JorisDeRieck/Flexget,dsemi/Flexget,JorisDeRieck/Flexget,Danfocus/Flexget,malkavi/Flexget,antivirtel/Flexget,Flexget/Flexget,crawln45/Flexget,Danfocus/Flexget,JorisDeRieck/Flexget,tobinjt/Flexget,OmgOhnoes/Flexget,Pretagonist/Flexget,qvazzler/Flexget,Danfocus/Flexget,LynxyssCZ/Flexget,oxc/Flexget,dsemi/Flexget,antivirtel/Flexget,Pretagonist/Flexget,poulpito/Flexget | flexget/_version.py | flexget/_version.py | """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '1.2.493.dev'
| """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '1.2.492'
| mit | Python |
e75ea01cdcbc283f26799fd8a9690b8fa28f2fe1 | Fix linter issues | juju/python-libjuju,juju/python-libjuju | juju/charmhub.py | juju/charmhub.py | from .client import client
from .errors import JujuError
class CharmHub:
def __init__(self, model):
self.model = model
async def info(self, name, channel=None):
"""info displays detailed information about a CharmHub charm. The charm
can be specified by the exact name.
Channel is a hint for providing the metadata for a given channel.
Without the channel hint then only the default release will have the
metadata.
"""
if not name:
raise JujuError("name expected")
if channel is None:
channel = ""
facade = self._facade()
return await facade.Info(tag="application-{}".format(name), channel=channel)
async def find(self, query, category=None, channel=None,
charm_type=None, platforms=None, publisher=None,
relation_requires=None, relation_provides=None):
"""find queries the CharmHub store for available charms or bundles.
"""
if charm_type is not None and charm_type not in ["charm", "bundle"]:
raise JujuError("expected either charm or bundle for charm_type")
facade = self._facade()
return await facade.Find(query=query, category=category, channel=channel,
type_=charm_type, platforms=platforms, publisher=publisher,
relation_provides=relation_provides, relation_requires=relation_requires)
def _facade(self):
return client.CharmHubFacade.from_connection(self.model.connection())
| from .client import client
from .errors import JujuError
class CharmHub:
def __init__(self, model):
self.model = model
async def info(self, name, channel=None):
"""info displays detailed information about a CharmHub charm. The charm
can be specified by the exact name.
Channel is a hint for providing the metadata for a given channel.
Without the channel hint then only the default release will have the
metadata.
"""
if not name:
raise JujuError("name expected")
if channel is None:
channel = ""
facade = self._facade()
return await facade.Info(tag="application-{}".format(name), channel=channel)
async def find(self, query, category=None, channel=None,
charm_type=None, platforms=None, publisher=None,
relation_requires=None, relation_provides=None):
"""find queries the CharmHub store for available charms or bundles.
"""
if charm_type is not None and charm_type not in ["charm", "bundle"]:
raise JujuError("expected either charm or bundle for charm_type")
facade = self._facade()
return await facade.Find(query=query, category=category, channel=channel,
type_=charm_type, platforms=platforms, publisher=publisher,
relation_provides=relation_provides, relation_requires=relation_requires)
def _facade(self):
return client.CharmHubFacade.from_connection(self.model.connection())
| apache-2.0 | Python |
fddc7e09bcebf9b4875906ad03e58699237b13be | Enable filtering OpenStack package by tenant. | opennode/waldur-mastermind,opennode/nodeconductor-assembly-waldur,opennode/nodeconductor-assembly-waldur,opennode/nodeconductor-assembly-waldur,opennode/waldur-mastermind,opennode/waldur-mastermind,opennode/waldur-mastermind | src/nodeconductor_assembly_waldur/packages/filters.py | src/nodeconductor_assembly_waldur/packages/filters.py | import django_filters
from nodeconductor.core.filters import UUIDFilter
from . import models
class PackageTemplateFilter(django_filters.FilterSet):
name = django_filters.CharFilter(lookup_type='icontains')
settings_uuid = UUIDFilter(name='service_settings__uuid')
class Meta(object):
model = models.PackageTemplate
fields = ('name', 'settings_uuid',)
class OpenStackPackageFilter(django_filters.FilterSet):
name = django_filters.CharFilter(lookup_type='icontains')
customer = UUIDFilter(name='tenant__service_project_link__project__customer__uuid')
project = UUIDFilter(name='tenant__service_project_link__project__uuid')
tenant = UUIDFilter(name='tenant__uuid')
class Meta(object):
model = models.OpenStackPackage
fields = ('name', 'customer', 'project', 'tenant')
| import django_filters
from nodeconductor.core.filters import UUIDFilter
from . import models
class PackageTemplateFilter(django_filters.FilterSet):
name = django_filters.CharFilter(lookup_type='icontains')
settings_uuid = UUIDFilter(name='service_settings__uuid')
class Meta(object):
model = models.PackageTemplate
fields = ('name', 'settings_uuid',)
class OpenStackPackageFilter(django_filters.FilterSet):
name = django_filters.CharFilter(lookup_type='icontains')
customer = UUIDFilter(name='tenant__service_project_link__project__customer')
project = UUIDFilter(name='tenant__service_project_link__project')
class Meta(object):
model = models.OpenStackPackage
fields = ('name', 'customer', 'project')
| mit | Python |
f5d4f543cc7265433bf6040335b2f6d592b52b91 | Add import os in lmod to fix regression | cmd-ntrf/jupyter-lmod,cmd-ntrf/jupyter-lmod,cmd-ntrf/jupyter-lmod | lmod/__init__.py | lmod/__init__.py | import os # require by lmod output evaluated by exec()
from functools import partial
from os import environ
from subprocess import Popen, PIPE
LMOD_SYSTEM_NAME = environ.get('LMOD_SYSTEM_NAME', '')
def module(command, *args):
cmd = (environ['LMOD_CMD'], 'python', '--terse', command)
result = Popen(cmd + args, stdout=PIPE, stderr=PIPE)
if command in ('load', 'unload', 'restore', 'save'):
exec(result.stdout.read())
return result.stderr.read().decode()
def avail():
string = module('avail')
modules = []
for entry in string.split():
if not (entry.startswith('/') or entry.endswith('/')):
modules.append(entry)
return modules
def list():
string = module('list').strip()
if string != "No modules loaded":
return string.split()
return []
def savelist(system=LMOD_SYSTEM_NAME):
names = module('savelist').split()
if system:
suffix = '.{}'.format(system)
n = len(suffix)
names = [name[:-n] for name in names if name.endswith(suffix)]
return names
show = partial(module, 'show')
load = partial(module, 'load')
unload = partial(module, 'unload')
restore = partial(module, 'restore')
save = partial(module, 'save')
| from functools import partial
from os import environ
from subprocess import Popen, PIPE
LMOD_SYSTEM_NAME = environ.get('LMOD_SYSTEM_NAME', '')
def module(command, *args):
cmd = (environ['LMOD_CMD'], 'python', '--terse', command)
result = Popen(cmd + args, stdout=PIPE, stderr=PIPE)
if command in ('load', 'unload', 'restore', 'save'):
exec(result.stdout.read())
return result.stderr.read().decode()
def avail():
string = module('avail')
modules = []
for entry in string.split():
if not (entry.startswith('/') or entry.endswith('/')):
modules.append(entry)
return modules
def list():
string = module('list').strip()
if string != "No modules loaded":
return string.split()
return []
def savelist(system=LMOD_SYSTEM_NAME):
names = module('savelist').split()
if system:
suffix = '.{}'.format(system)
n = len(suffix)
names = [name[:-n] for name in names if name.endswith(suffix)]
return names
show = partial(module, 'show')
load = partial(module, 'load')
unload = partial(module, 'unload')
restore = partial(module, 'restore')
save = partial(module, 'save')
| mit | Python |
6aadb1eae7d0a69556fb84a7404d59edd416d55c | Add logout confirmation message | Kromey/fbxnano,Kromey/fbxnano,Kromey/akwriters,Kromey/fbxnano,Kromey/fbxnano,Kromey/akwriters,Kromey/akwriters,Kromey/akwriters | passwordless/views.py | passwordless/views.py | from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth import authenticate,login,logout
from django.contrib import messages
from django.shortcuts import render,redirect
from django.views import View
from django.views.generic.edit import FormView
from . import forms
from . import models
# Create your views here.
class LoginView(FormView):
template_name = 'passwordless/login.html'
form_class = forms.LoginForm
success_url = '/'
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['request'] = self.request
return kwargs
def form_valid(self, form):
form.send_email()
return super().form_valid(form)
class RegisterView(LoginView):
template_name = 'passwordless/register.html'
form_class = forms.RegistrationForm
def form_valid(self, form):
form.create_user()
return super().form_valid(form)
class AuthnView(View):
def get(self, request, token):
user = authenticate(token=token)
if user is not None:
login(request, user)
return redirect('chat:index')
else:
if request.user.is_authenticated:
messages.info(request, 'You are already authenticated on this site')
return redirect('chat:index')
else:
return render(request, 'passwordless/invalid.html')
class LogoutView(View):
def get(self, request):
logout(request)
messages.success(request, 'You are now logged out of the site')
return redirect('index')
| from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth import authenticate,login,logout
from django.contrib import messages
from django.shortcuts import render,redirect
from django.views import View
from django.views.generic.edit import FormView
from . import forms
from . import models
# Create your views here.
class LoginView(FormView):
template_name = 'passwordless/login.html'
form_class = forms.LoginForm
success_url = '/'
def get_form_kwargs(self):
kwargs = super().get_form_kwargs()
kwargs['request'] = self.request
return kwargs
def form_valid(self, form):
form.send_email()
return super().form_valid(form)
class RegisterView(LoginView):
template_name = 'passwordless/register.html'
form_class = forms.RegistrationForm
def form_valid(self, form):
form.create_user()
return super().form_valid(form)
class AuthnView(View):
def get(self, request, token):
user = authenticate(token=token)
if user is not None:
login(request, user)
return redirect('chat:index')
else:
if request.user.is_authenticated:
messages.info(request, 'You are already authenticated on this site')
return redirect('chat:index')
else:
return render(request, 'passwordless/invalid.html')
class LogoutView(View):
def get(self, request):
logout(request)
return redirect('index')
| mit | Python |
8c5935a369190d5e712f7337c72d898caef4cee2 | Fix value error when unpacking betclic lines that have no odds | dmartin35/pronosfoot,dmartin35/pronosfoot,dmartin35/pronosfoot | external/odds/betclic/api.py | external/odds/betclic/api.py | import bs4
import requests
from collections import namedtuple
from memoize import memoize
import contextlib
from external.odds.betclic import TEAM_MAP
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
BETCLIC_L1_URL = 'https://www.betclic.fr/football-s1/ligue-1-uber-eats-c4'
Odds = namedtuple('Odds', ['win', 'draw','lose'])
def _download_odds():
"""
Download html from
:return:
"""
resp = requests.get(BETCLIC_L1_URL, headers={}, verify=False)
resp.raise_for_status()
return resp.content.decode('utf-8')
def _parse_odds(html):
"""
Parse the odds and returns
:param html: full html content of the downloaded page - to parse
:return: list of (teamA, teamB, odds win, odds draw, odds lose)
"""
soup = bs4.BeautifulSoup(html, 'html.parser')
# odds = soup.select('div.match-entry')
odds = soup.find_all('a', class_='cardEvent')
raw = []
for odd in odds:
with contextlib.suppress(Exception):
teams = odd.select('.scoreboard_wrapper')[0]
teams = teams.select('.scoreboard_contestantLabel')
teams = [t.string.strip() for t in teams]
match_odds = odd.select('.market_odds')[0]
match_odds = match_odds.select('.oddValue')
wdl = [tag.string for tag in match_odds]
raw.append(tuple(teams + wdl))
return raw
def _get_odds_raw():
"""
returns the list of raw odds from FDJ - parions sport page - for Ligue1
fetches LFP html page then parse it to extract as many odds as possible
:return: list of (teamA, teamB, odds win, odds draw, odds lose)
"""
return _parse_odds(_download_odds())
def convert_team_name(name):
"""
convert external team name into internal team name
"""
custom = TEAM_MAP.get(name)
return custom if custom else name.title()
@memoize(timeout=300)
def get_odds():
"""
formats the raw structure into dict with team name matching internal names
:return: dict of (teamA, teamB): Odds(win, draw, lose)
"""
raw = _get_odds_raw()
odds = {}
for line in raw:
try:
team_a, team_b, win, draw, lose = line
team_a = convert_team_name(team_a)
team_b = convert_team_name(team_b)
odds[(team_a, team_b)] = Odds(win, draw, lose)
except ValueError:
pass # ignore in case of unpacking with value error for some lines
return odds
| import bs4
import requests
from collections import namedtuple
from memoize import memoize
import contextlib
from external.odds.betclic import TEAM_MAP
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
BETCLIC_L1_URL = 'https://www.betclic.fr/football-s1/ligue-1-uber-eats-c4'
Odds = namedtuple('Odds', ['win', 'draw','lose'])
def _download_odds():
"""
Download html from
:return:
"""
resp = requests.get(BETCLIC_L1_URL, headers={}, verify=False)
resp.raise_for_status()
return resp.content.decode('utf-8')
def _parse_odds(html):
"""
Parse the odds and returns
:param html: full html content of the downloaded page - to parse
:return: list of (teamA, teamB, odds win, odds draw, odds lose)
"""
soup = bs4.BeautifulSoup(html, 'html.parser')
# odds = soup.select('div.match-entry')
odds = soup.find_all('a', class_='cardEvent')
raw = []
for odd in odds:
with contextlib.suppress(Exception):
teams = odd.select('.scoreboard_wrapper')[0]
teams = teams.select('.scoreboard_contestantLabel')
teams = [t.string.strip() for t in teams]
match_odds = odd.select('.market_odds')[0]
match_odds = match_odds.select('.oddValue')
wdl = [tag.string for tag in match_odds]
raw.append(tuple(teams + wdl))
return raw
def _get_odds_raw():
"""
returns the list of raw odds from FDJ - parions sport page - for Ligue1
fetches LFP html page then parse it to extract as many odds as possible
:return: list of (teamA, teamB, odds win, odds draw, odds lose)
"""
return _parse_odds(_download_odds())
def convert_team_name(name):
"""
convert external team name into internal team name
"""
custom = TEAM_MAP.get(name)
return custom if custom else name.title()
@memoize(timeout=300)
def get_odds():
"""
formats the raw structure into dict with team name matching internal names
:return: dict of (teamA, teamB): Odds(win, draw, lose)
"""
raw = _get_odds_raw()
odds = {}
for line in raw:
team_a, team_b, win, draw, lose = line
team_a = convert_team_name(team_a)
team_b = convert_team_name(team_b)
odds[(team_a, team_b)] = Odds(win, draw, lose)
return odds
| mit | Python |
c3e9be243f1da9c253302ee4eb8549d8d1b00f69 | Update __init__.py | h2non/filetype.py | filetype/__init__.py | filetype/__init__.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from .filetype import * # noqa
from .helpers import * # noqa
from .match import * # noqa
# Current package semver version
__version__ = version = '1.2.0'
| # -*- coding: utf-8 -*-
from __future__ import absolute_import
from .filetype import * # noqa
from .helpers import * # noqa
from .match import * # noqa
# Current package semver version
__version__ = version = '1.1.0'
| mit | Python |
4794e23607ff0fb18353270e3e5f7090c331a90c | Fix missing import | JustinShenk/party-pi,JustinShenk/party-pi,JustinShenk/party-pi | partypi/utils/tweeter.py | partypi/utils/tweeter.py | import os
import tweepy
def twitter_api():
consumer_key = None
consumer_secret = None
access_token = None
access_token_secret = None
try:
from credentials import (
consumer_key,
consumer_secret,
access_token,
access_token_secret
)
except:
try:
consumer_key = os.environ.get('TWITTER_KEY')
consumer_secret = os.environ.get('TWITTER_SECRET')
access_token = os.environ.get('TWITTER_TOKEN')
access_token_secret = os.environ.get('TWITTER_TOKEN_SECRET')
except:
print("No twitter auth found")
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
return api
def tweet_message(message):
api = twitter_api()
try:
api.update_status(status=message)
print("Tweeted: {}".format(message))
except tweepy.TweepError as e:
print(e.reason)
def tweet_image(filename, message):
api = twitter_api()
api.update_with_media(filename, status=message)
print("Tweeted: {}".format(message))
if __name__ == '__main__':
tweet_image('PartyPi.png', 'testing the API!')
| import tweepy
def twitter_api():
consumer_key = None
consumer_secret = None
access_token = None
access_token_secret = None
try:
from credentials import (
consumer_key,
consumer_secret,
access_token,
access_token_secret
)
except:
try:
consumer_key = os.environ.get('TWITTER_KEY')
consumer_secret = os.environ.get('TWITTER_SECRET')
access_token = os.environ.get('TWITTER_TOKEN')
access_token_secret = os.environ.get('TWITTER_TOKEN_SECRET')
except:
print("No twitter auth found")
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
return api
def tweet_message(message):
api = twitter_api()
try:
api.update_status(status=message)
print("Tweeted: {}".format(message))
except tweepy.TweepError as e:
print(e.reason)
def tweet_image(filename, message):
api = twitter_api()
api.update_with_media(filename, status=message)
print("Tweeted: {}".format(message))
if __name__ == '__main__':
tweet_image('PartyPi.png', 'testing the API!')
| mit | Python |
5d7f2f84600abcede94a0aaee087ef299cf740a6 | Add filter on the town field on the Farmer model | tm-kn/farmers-api | farmers_api/farmers/views.py | farmers_api/farmers/views.py | from rest_framework import viewsets
from .models import Farmer
from .serializers import FarmerSerializer
class FarmerViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Farmer.objects.all()
serializer_class = FarmerSerializer
filter_fields = ('town',)
| from rest_framework import viewsets
from .models import Farmer
from .serializers import FarmerSerializer
class FarmerViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Farmer.objects.all()
serializer_class = FarmerSerializer
| bsd-2-clause | Python |
833ed3352c2e40923c167ddb41edba17db292bb7 | Allow mongo returner to update a password protected mongo database. | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/returners/mongo_return.py | salt/returners/mongo_return.py | '''
Return data to a mongodb server
This is the default interface for returning data for the butter statd subsytem
'''
import logging
import pymongo
log = logging.getLogger(__name__)
__opts__ = {
'mongo.host': 'salt',
'mongo.port': 27017,
'mongo.db': 'salt',
'mongo.user': '',
'mongo.password': '',
}
def returner(ret):
'''
Return data to a mongodb server
'''
conn = pymongo.Connection(
__opts__['mongo.host'],
__opts__['mongo.port'],
)
db = conn[__opts__['mongo.db']]
user = __opts__.get('mongo.user')
password = __opts__.get('mongo.password')
if user and password:
db.authenticate(user, password)
col = db[ret['id']]
back = {}
if type(ret['return']) == type(dict()):
for key in ret['return']:
back[key.replace('.', '-')] = ret['return'][key]
else:
back = ret['return']
log.debug( back )
col.insert({ret['jid']: back})
| '''
Return data to a mongodb server
This is the default interface for returning data for the butter statd subsytem
'''
import logging
import pymongo
log = logging.getLogger(__name__)
__opts__ = {
'mongo.host': 'salt',
'mongo.port': 27017,
'mongo.db': 'salt',
}
def returner(ret):
'''
Return data to a mongodb server
'''
conn = pymongo.Connection(
__opts__['mongo.host'],
__opts__['mongo.port'],
)
db = conn[__opts__['mongo.db']]
col = db[ret['id']]
back = {}
if type(ret['return']) == type(dict()):
for key in ret['return']:
back[key.replace('.', '-')] = ret['return'][key]
else:
back = ret['return']
log.debug( back )
col.insert({ret['jid']: back})
| apache-2.0 | Python |
4fbac792b89102caa5414db18d1ec39d1ef75a98 | Fix One-eyed Cheat | NightKev/fireplace,butozerca/fireplace,Meerkov/fireplace,smallnamespace/fireplace,Ragowit/fireplace,amw2104/fireplace,amw2104/fireplace,liujimj/fireplace,butozerca/fireplace,smallnamespace/fireplace,oftc-ftw/fireplace,liujimj/fireplace,beheh/fireplace,oftc-ftw/fireplace,jleclanche/fireplace,Ragowit/fireplace,Meerkov/fireplace | fireplace/cards/gvg/rogue.py | fireplace/cards/gvg/rogue.py | from ..utils import *
##
# Minions
# Goblin Auto-Barber
class GVG_023:
action = buffWeapon("GVG_023a")
# One-eyed Cheat
class GVG_025:
def OWN_MINION_SUMMON(self, minion):
if minion.race == Race.PIRATE and minion != self:
self.stealth = True
# Iron Sensei
class GVG_027:
def OWN_TURN_END(self):
mechs = self.controller.field.filter(race=Race.MECHANICAL).exclude(self)
if mechs:
self.buff(random.choice(mechs), "GVG_027e")
# Trade Prince Gallywix
class GVG_028:
def CARD_PLAYED(self, player, card):
if player is not self.controller and card.type == CardType.SPELL:
if card.id != "GVG_028t":
player.opponent.give(card.id)
player.give("GVG_028t")
class GVG_028t:
def action(self):
self.controller.tempMana += 1
##
# Spells
# Tinker's Sharpsword Oil
class GVG_022:
action = buffWeapon("GVG_022a")
def action(self):
if self.controller.weapon:
self.buff(self.controller.weapon, "GVG_022a")
if self.controller.field:
self.buff(random.choice(self.controller.field), "GVG_022b")
##
# Weapons
# Cogmaster's Wrench
class GVG_024:
def atk(self, i):
if self.controller.field.filter(race=Race.MECHANICAL):
return i + 2
return i
| from ..utils import *
##
# Minions
# Goblin Auto-Barber
class GVG_023:
action = buffWeapon("GVG_023a")
# One-eyed Cheat
class GVG_025:
def OWN_MINION_SUMMON(self, player, minion):
if minion.race == Race.PIRATE and minion != self:
self.stealth = True
# Iron Sensei
class GVG_027:
def OWN_TURN_END(self):
mechs = self.controller.field.filter(race=Race.MECHANICAL).exclude(self)
if mechs:
self.buff(random.choice(mechs), "GVG_027e")
# Trade Prince Gallywix
class GVG_028:
def CARD_PLAYED(self, player, card):
if player is not self.controller and card.type == CardType.SPELL:
if card.id != "GVG_028t":
player.opponent.give(card.id)
player.give("GVG_028t")
class GVG_028t:
def action(self):
self.controller.tempMana += 1
##
# Spells
# Tinker's Sharpsword Oil
class GVG_022:
action = buffWeapon("GVG_022a")
def action(self):
if self.controller.weapon:
self.buff(self.controller.weapon, "GVG_022a")
if self.controller.field:
self.buff(random.choice(self.controller.field), "GVG_022b")
##
# Weapons
# Cogmaster's Wrench
class GVG_024:
def atk(self, i):
if self.controller.field.filter(race=Race.MECHANICAL):
return i + 2
return i
| agpl-3.0 | Python |
7837ef57c5dcdddc16ad353fa0c992c4403e22dd | change default model for compilation | BerlinUnited/NaoTH,BerlinUnited/NaoTH,BerlinUnited/NaoTH,BerlinUnited/NaoTH,BerlinUnited/NaoTH,BerlinUnited/NaoTH,BerlinUnited/NaoTH | Utils/py/BallDetection/RegressionNetwork/devil_code_generator1/compile.py | Utils/py/BallDetection/RegressionNetwork/devil_code_generator1/compile.py | #!/usr/bin/env python3
import argparse
import pickle
from pathlib import Path
from tensorflow.keras.models import load_model
from onbcg import keras_compile # can throw linter warnings, but python3 can handle imports like that
DATA_DIR = Path(Path(__file__).parent.parent.absolute() / "data").resolve()
CPP_DIR = Path(Path(__file__).parent.parent.absolute() / "cpp").resolve()
MODEL_DIR = Path(Path(__file__).parent.parent.absolute() / "data/best_models").resolve()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Compile keras network to c++')
parser.add_argument('-b', '--database-path', dest='imgdb_path',
help='Path to the image database to use for training. '
'Default is imgdb.pkl in current folder.', default=str(DATA_DIR / 'imgdb.pkl'))
parser.add_argument('-m', '--model-path', dest='model_path',
help='Store the trained model using this path. Default is fy1500_conf.h5.',
default=str(MODEL_DIR / 'fy1500_conf.h5'))
parser.add_argument('-c', '--code-path', dest='code_path',
help='Store the c code in this file. Default is <model_name>.c.')
args = parser.parse_args()
if args.code_path is None:
# load the model to get the name
model = load_model(args.model_path)
print(model.name)
args.code_path = CPP_DIR / (model.name + ".cpp")
images = {}
with open(args.imgdb_path, "rb") as f:
images["mean"] = pickle.load(f)
images["images"] = pickle.load(f)
images["y"] = pickle.load(f)
keras_compile(images, args.model_path, args.code_path, unroll_level=2, arch="sse3")
| #!/usr/bin/env python3
import argparse
import pickle
from pathlib import Path
from tensorflow.keras.models import load_model
from onbcg import keras_compile # can throw linter warnings, but python3 can handle imports like that
DATA_DIR = Path(Path(__file__).parent.parent.absolute() / "data").resolve()
CPP_DIR = Path(Path(__file__).parent.parent.absolute() / "cpp").resolve()
MODEL_DIR = Path(Path(__file__).parent.parent.absolute() / "data/best_models").resolve()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Compile keras network to c++')
parser.add_argument('-b', '--database-path', dest='imgdb_path',
help='Path to the image database to use for training. '
'Default is imgdb.pkl in current folder.', default=str(DATA_DIR / 'imgdb.pkl'))
parser.add_argument('-m', '--model-path', dest='model_path',
help='Store the trained model using this path. Default is fy1500.h5.',
default=str(MODEL_DIR / 'fy1500.h5'))
parser.add_argument('-c', '--code-path', dest='code_path',
help='Store the c code in this file. Default is <model_name>.c.')
args = parser.parse_args()
if args.code_path is None:
# load the model to get the name
model = load_model(args.model_path)
print(model.name)
args.code_path = CPP_DIR / (model.name + ".cpp")
images = {}
with open(args.imgdb_path, "rb") as f:
images["mean"] = pickle.load(f)
images["images"] = pickle.load(f)
images["y"] = pickle.load(f)
keras_compile(images, args.model_path, args.code_path, unroll_level=2, arch="sse3")
| apache-2.0 | Python |
d8046ebe1974778149bb37ca710df63a1116c115 | Bump prerelease (4.0.1-a1) [ci skip] | mar10/pyftpsync | ftpsync/__init__.py | ftpsync/__init__.py | # -*- coding: utf-8 -*-
# Make version accessible as 'ftpsync.__version__'
# from ftpsync._version import __version__
"""
Package version number.
See https://www.python.org/dev/peps/pep-0440
Examples
Pre-releases (alpha, beta, release candidate):
'3.0.0a1', '3.0.0b1', '3.0.0rc1'
Final release:
'3.0.0'
Developmental release (to mark 3.0.0 as 'used'. Don't publish this):
'3.0.0-dev1'
"""
__version__ = "4.0.1-a1"
| # -*- coding: utf-8 -*-
# Make version accessible as 'ftpsync.__version__'
# from ftpsync._version import __version__
"""
Package version number.
See https://www.python.org/dev/peps/pep-0440
Examples
Pre-releases (alpha, beta, release candidate):
'3.0.0a1', '3.0.0b1', '3.0.0rc1'
Final release:
'3.0.0'
Developmental release (to mark 3.0.0 as 'used'. Don't publish this):
'3.0.0-dev1'
"""
__version__ = "4.0.0"
| mit | Python |
d4e03bfcbc6292d3a50237f95c9d67ba5d89a475 | Use usubscribe rather than popping the broadcaster | sahlinet/swampdragon,boris-savic/swampdragon,michael-k/swampdragon,denizs/swampdragon,aexeagmbh/swampdragon,jonashagstedt/swampdragon,jonashagstedt/swampdragon,d9pouces/swampdragon,boris-savic/swampdragon,sahlinet/swampdragon,aexeagmbh/swampdragon,jonashagstedt/swampdragon,bastianh/swampdragon,boris-savic/swampdragon,michael-k/swampdragon,faulkner/swampdragon,bastianh/swampdragon,seclinch/swampdragon,Manuel4131/swampdragon,michael-k/swampdragon,faulkner/swampdragon,seclinch/swampdragon,Manuel4131/swampdragon,Manuel4131/swampdragon,seclinch/swampdragon,sahlinet/swampdragon,denizs/swampdragon,d9pouces/swampdragon,aexeagmbh/swampdragon,denizs/swampdragon,bastianh/swampdragon,faulkner/swampdragon,d9pouces/swampdragon,h-hirokawa/swampdragon,h-hirokawa/swampdragon | swampdragon/pubsub_providers/redis_sub_provider.py | swampdragon/pubsub_providers/redis_sub_provider.py | import json
import tornadoredis.pubsub
import tornadoredis
from .base_provider import BaseProvider
class RedisSubProvider(BaseProvider):
def __init__(self):
self._subscriber = tornadoredis.pubsub.SockJSSubscriber(tornadoredis.Client())
def close(self, broadcaster):
for channel in self._subscriber.subscribers:
if broadcaster in self._subscriber.subscribers[channel]:
self._subscriber.unsubscribe(channel, broadcaster)
def get_channel(self, base_channel, **channel_filter):
return self._construct_channel(base_channel, **channel_filter)
def subscribe(self, channels, broadcaster):
self._subscriber.subscribe(channels, broadcaster)
def unsubscribe(self, channels, broadcaster):
for channel in channels:
if broadcaster in self._subscriber.subscribers[channel]:
self._subscriber.subscribers[channel].pop(broadcaster)
def publish(self, channel, data):
if isinstance(data, dict):
data = json.dumps(data)
broadcasters = list(self._subscriber.subscribers[channel].keys())
if broadcasters:
for bc in broadcasters:
if not bc.session.is_closed:
bc.broadcast(broadcasters, data)
break
| import json
import tornadoredis.pubsub
import tornadoredis
from .base_provider import BaseProvider
class RedisSubProvider(BaseProvider):
def __init__(self):
self._subscriber = tornadoredis.pubsub.SockJSSubscriber(tornadoredis.Client())
def close(self, broadcaster):
for channel in self._subscriber.subscribers:
if broadcaster in self._subscriber.subscribers[channel]:
self._subscriber.subscribers[channel].pop(broadcaster)
def get_channel(self, base_channel, **channel_filter):
return self._construct_channel(base_channel, **channel_filter)
def subscribe(self, channels, broadcaster):
self._subscriber.subscribe(channels, broadcaster)
def unsubscribe(self, channels, broadcaster):
for channel in channels:
if broadcaster in self._subscriber.subscribers[channel]:
self._subscriber.subscribers[channel].pop(broadcaster)
def publish(self, channel, data):
if isinstance(data, dict):
data = json.dumps(data)
broadcasters = list(self._subscriber.subscribers[channel].keys())
if broadcasters:
for bc in broadcasters:
if not bc.session.is_closed:
bc.broadcast(broadcasters, data)
break
| bsd-3-clause | Python |
003089545b47e3fea262d334af94d9e592ec6852 | update overall styles | praekelt/molo-gem,praekelt/molo-gem,praekelt/molo-gem | gem/settings/dev.py | gem/settings/dev.py | from .base import * # noqa
ALLOWED_HOSTS = [
'localhost',
'.localhost',
'127.0.0.1'
]
DEBUG = True
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
try:
from .local import * # noqa
except ImportError:
pass
try:
from secrets import * # noqa
except ImportError:
pass
LOGGING = {
'version': 1,
'loggers': {
'django': {
'level': 'INFO',
'handlers': ['console'],
},
'django.template': {
'level': 'INFO',
'handlers': ['console'],
},
},
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
}
},
}
| from .base import * # noqa
ALLOWED_HOSTS = [
'localhost',
'.localhost',
'127.0.0.1',
'172.30.1.238'
]
DEBUG = True
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
try:
from .local import * # noqa
except ImportError:
pass
try:
from secrets import * # noqa
except ImportError:
pass
LOGGING = {
'version': 1,
'loggers': {
'django': {
'level': 'INFO',
'handlers': ['console'],
},
'django.template': {
'level': 'INFO',
'handlers': ['console'],
},
},
'handlers': {
'console': {
'level': 'INFO',
'class': 'logging.StreamHandler',
}
},
}
| bsd-2-clause | Python |
22761fa04ca2a93a2c11103fb8f8fca3a51c8626 | Call .save() on database model tests to trigger any DB-level constraints | OSSystems/lava-server,Linaro/lava-server,Linaro/lava-server,Linaro/lava-server,OSSystems/lava-server,Linaro/lava-server,OSSystems/lava-server | launch_control/dashboard_app/tests.py | launch_control/dashboard_app/tests.py | """
Unit tests of the Dashboard application
"""
from django.contrib.contenttypes import generic
from django.db import IntegrityError
from django.test import TestCase
from launch_control.utils.call_helper import ObjectFactoryMixIn
from launch_control.dashboard_app.models import (
HardwareDevice,
SoftwarePackage,
)
class SoftwarePackageTestCase(TestCase, ObjectFactoryMixIn):
class Dummy:
class SoftwarePackage:
name = 'libfoo'
version = '1.2.0'
def test_creation_1(self):
dummy, sw_package = self.make_and_get_dummy(SoftwarePackage)
sw_package.save()
self.assertEqual(sw_package.name, dummy.name)
self.assertEqual(sw_package.version, dummy.version)
def test_uniqueness(self):
pkg1 = self.make(SoftwarePackage)
pkg1.save()
pkg2 = self.make(SoftwarePackage)
self.assertRaises(IntegrityError, pkg2.save)
class HardwarePackageTestCase(TestCase, ObjectFactoryMixIn):
class Dummy:
class HardwareDevice:
device_type = 'device.cpu'
description = 'some cpu'
def test_creation_1(self):
dummy, hw_device = self.make_and_get_dummy(HardwareDevice)
hw_device.save()
self.assertEqual(hw_device.device_type, dummy.device_type)
self.assertEqual(hw_device.description, dummy.description)
def test_attributes(self):
hw_device = self.make(HardwareDevice)
hw_device.save()
hw_device.attributes.create(name="connection-bus", value="usb")
self.assertEqual(hw_device.attributes.count(), 1)
attr = hw_device.attributes.get()
self.assertEqual(attr.name, "connection-bus")
self.assertEqual(attr.value, "usb")
| """
Unit tests of the Dashboard application
"""
from django.contrib.contenttypes import generic
from django.db import IntegrityError
from django.test import TestCase
from launch_control.utils.call_helper import ObjectFactoryMixIn
from launch_control.dashboard_app.models import (
HardwareDevice,
SoftwarePackage,
)
class SoftwarePackageTestCase(TestCase, ObjectFactoryMixIn):
class Dummy:
class SoftwarePackage:
name = 'libfoo'
version = '1.2.0'
def test_creation_1(self):
dummy, sw_package = self.make_and_get_dummy(SoftwarePackage)
self.assertEqual(sw_package.name, dummy.name)
self.assertEqual(sw_package.version, dummy.version)
def test_uniqueness(self):
pkg1 = self.make(SoftwarePackage)
pkg1.save()
pkg2 = self.make(SoftwarePackage)
self.assertRaises(IntegrityError, pkg2.save)
class HardwarePackageTestCase(TestCase, ObjectFactoryMixIn):
class Dummy:
class HardwareDevice:
device_type = 'device.cpu'
description = 'some cpu'
def test_creation_1(self):
dummy, hw_device = self.make_and_get_dummy(HardwareDevice)
self.assertEqual(hw_device.device_type, dummy.device_type)
self.assertEqual(hw_device.description, dummy.description)
def test_attributes(self):
hw_device = self.make(HardwareDevice)
hw_device.save()
hw_device.attributes.create(name="connection-bus", value="usb")
self.assertEqual(hw_device.attributes.count(), 1)
attr = hw_device.attributes.get()
self.assertEqual(attr.name, "connection-bus")
self.assertEqual(attr.value, "usb")
| agpl-3.0 | Python |
bea5ea8c4bc2b4342adb199d6b7e9012476030a2 | implement multimethod for backup backend | cmpitg/mongob | src/backend.py | src/backend.py | import os
import gzip
from multipledispatch import dispatch
from bson.json_util import loads as json_loads
from bson.json_util import dumps as json_dumps
from pymongo.mongo_client import MongoClient
from pymongo.database import Database
from pymongo.collection import Collection
from io import TextIOBase
##############################################################################
def get_db(uri, connections):
"""
Retrieves database from connection string. The connection string
follows one of the formats:
'mongodb://[username][[:[password]]@]<host>/<db_name>'
or 'file://<path>'
or simply '<path>'.
"""
if uri.startswith('mongodb://'):
db_name_pos = uri.rfind("/") + 1
client = MongoClient(uri[:db_name_pos])
db = client[uri[db_name_pos:]]
else:
if uri.startswith("file://"):
uri = uri[len("file://"):]
client = uri
db = uri
try:
connections.index(client)
except Exception:
connections.append(client)
return connections, db
# print(get_db("mongodb://localhost/hello_world", []))
# print(get_db("file:///tmp/hello_world.txt", []))
# print(get_db("/tmp/hi_hi.txt", []))
##############################################################################
@dispatch(str)
def close(path):
pass
@dispatch(MongoClient)
def close(client):
"""
Closes connection to MongoDB server.
"""
client.close()
##############################################################################
@dispatch(str)
def dest_name(path):
"""
Retrieves name of destination collection.
"""
return os.path.basename(path)
@dispatch(Collection)
def dest_name(coll):
"""
Retrieves name of destination file.
"""
return coll.name
##############################################################################
@dispatch(str)
def dest_size(path):
"""
Retrieves size of backed up data.
"""
with open(path, 'w') as input:
return json_loads(input.read())
@dispatch(Collection)
def dest_size(coll):
"""
Retrieves size of backed up data.
"""
return coll.count()
| from multipledispatch import dispatch
from pymongo.mongo_client import MongoClient
from io import TextIOBase
@dispatch(TextIOBase)
def close(f):
##############################################################################
def get_db(uri, connections):
"""
Retrieves database from connection string. The connection string
follows one of the formats:
'mongodb://[username][[:[password]]@]<host>/<db_name>'
or 'file://<path>'
or simply '<path>'.
"""
if uri.startswith('mongodb://'):
db_name_pos = uri.rfind("/") + 1
client = MongoClient(uri[:db_name_pos])
db = client[uri[db_name_pos:]]
else:
if uri.startswith("file://"):
uri = uri[len("file://"):]
client = uri
db = uri
try:
connections.index(client)
except Exception:
connections.append(client)
return connections, db
# print(get_db("mongodb://localhost/hello_world", []))
# print(get_db("file:///tmp/hello_world.txt", []))
# print(get_db("/tmp/hi_hi.txt", []))
@dispatch(MongoClient)
def close(f):
"""
Closes connection to MongoDB server.
"""
f.close()
| mit | Python |
71c63587d86c627434f5683ec1496376fa0e3dde | Fix typo | bfelbo/deepmoji,bfelbo/DeepMoji | scripts/analyze_all_results.py | scripts/analyze_all_results.py | from __future__ import print_function
# allow us to import the codebase/keras directory
import sys
import glob
import numpy as np
from os.path import dirname, abspath
sys.path.insert(0, dirname(dirname(abspath(__file__))))
DATASETS = ['SE0714', 'Olympic', 'PsychExp', 'SS-Twitter', 'SS-Youtube',
'SCv1', 'SV2-GEN'] # 'SE1604' excluded due to Twitter's ToS
def get_results(dset):
METHOD = 'last'
RESULTS_DIR = 'results/'
RESULT_PATHS = glob.glob('{}/{}_{}_*_results.txt'.format(RESULTS_DIR, dset, METHOD))
assert len(RESULT_PATHS)
scores = []
for path in RESULT_PATHS:
with open(path) as f:
score = f.readline().split(':')[1]
scores.append(float(score))
average = np.mean(scores)
maximum = max(scores)
minimum = min(scores)
std = np.std(scores)
print('Dataset: {}'.format(dset))
print('Method: {}'.format(METHOD))
print('Number of results: {}'.format(len(scores)))
print('--------------------------')
print('Average: {}'.format(average))
print('Maximum: {}'.format(maximum))
print('Minimum: {}'.format(minimum))
print('Standard deviaton: {}'.format(std))
for dset in DATASETS:
get_results(dset)
| from __future__ import print_function
# allow us to import the codebase/keras directory
import sys
import glob
import numpy as np
from os.path import dirname, abspath
sys.path.insert(0, dirname(dirname(abspath(__file__))))
DATASETS = ['SE0714', 'Olympic', 'PsychExp', 'SS-Twitter', 'SS-Youtube',
'SCv1', 'SV2-GEN'] # 'SE1604' excluded due to Twitter's ToS
def get_results(dset):
METHOD = 'full'
RESULTS_DIR = 'results/'
RESULT_PATHS = glob.glob('{}/{}_{}_*_results.txt'.format(RESULTS_DIR, DATASET, METHOD))
assert len(RESULT_PATHS)
scores = []
for path in RESULT_PATHS:
with open(path) as f:
score = f.readline().split(':')[1]
scores.append(float(score))
average = np.mean(scores)
maximum = max(scores)
minimum = min(scores)
std = np.std(scores)
print('Dataset: {}'.format(DATASET))
print('Method: {}'.format(METHOD))
print('Number of results: {}'.format(len(scores)))
print('--------------------------')
print('Average: {}'.format(average))
print('Maximum: {}'.format(maximum))
print('Minimum: {}'.format(minimum))
print('Standard deviaton: {}'.format(std))
for dset in []:
get_results(dset) | mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.