commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
1641243682f080257b7f79b35503985d3d72aa44
|
Fix requirement for diff tool
|
setup.py
|
setup.py
|
from setuptools import setup
setup(name='osm_hall_monitor',
version='0.2',
description='Passive changeset monitoring for OpenStreetMap.',
url='http://github.com/ethan-nelson/osm_hall_monitor',
author='Ethan Nelson',
author_email='ethan-nelson@users.noreply.github.com',
install_requires = ['psycopg2','osmdt'],
packages=['osmhm'],
zip_safe=False)
|
Python
| 0
|
@@ -339,10 +339,18 @@
'osm
-dt
+_diff_tool
'%5D,%0A
|
c0155f59f52696e178def437f712f22c610c333a
|
Add classifiers for Python 3.3 and PyPy
|
setup.py
|
setup.py
|
import io
from setuptools import setup, find_packages
long_description = '\n'.join((
io.open('README.rst', encoding='utf-8').read(),
io.open('CHANGES.txt', encoding='utf-8').read()
))
setup(
name='morepath',
version='0.15.dev0',
description="A micro web-framework with superpowers",
long_description=long_description,
author="Morepath developers",
author_email="morepath@googlegroups.com",
url='http://morepath.readthedocs.io',
license="BSD",
packages=find_packages(),
include_package_data=True,
zip_safe=False,
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Environment :: Web Environment',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Development Status :: 5 - Production/Stable'
],
keywords="web wsgi routing morepath",
install_requires=[
'setuptools',
'webob >= 1.3.1',
'reg >= 0.9.2',
'dectate >= 0.10.1',
'importscan',
],
extras_require=dict(
test=[
'pytest >= 2.9.0',
'py >= 1.4.20',
'pytest-cov',
'pytest-remove-stale-bytecode',
'WebTest >= 2.0.14',
'pyyaml',
],
),
)
|
Python
| 0.999386
|
@@ -935,17 +935,134 @@
on :: 3.
-4
+3',%0A 'Programming Language :: Python :: 3.4',%0A 'Programming Language :: Python :: Implementation :: PyPy
',%0A
|
97f03ffd2a309340bf41d381b35703272be600cc
|
Bump version.
|
setup.py
|
setup.py
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'name': 'django-email-analytics',
'version': '0.3',
'author': 'Jess Johnson',
'author_email': 'jess@grokcode.com',
'packages': ['emailanalytics'],
'scripts': [],
'url': 'https://github.com/grokcode/django-email-analytics',
'license': 'LICENSE.txt',
'description': 'Adds Google Analytics tracking to emails sent with Django.',
'long_description': open('docs/README.rst').read(),
'install_requires': ['beautifulsoup4'],
}
setup(**config)
|
Python
| 0
|
@@ -160,9 +160,9 @@
'0.
-3
+4
',%0A
|
024878fc913097364123d28a99ab7cb5501b0af5
|
Set permission mask to allow read/exec for all users
|
setup.py
|
setup.py
|
#!/usr/bin/env python
import subprocess
from distutils.core import setup
requirements = [pkg.split('=')[0] for pkg in open('requirements.txt').readlines()]
description = 'Download videos from Udemy for personal offline use'
try:
subprocess.call(["pandoc", "README.md", "-f", "markdown", "-t", "rst", "-o", "README.rst"])
long_description = open("README.rst").read()
except OSError:
print("Pandoc not installed")
long_description = description
classifiers = ['Environment :: Console',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Topic :: Multimedia :: Video',
]
version = open('CHANGES.txt').readlines()[0][1:].strip()
setup(name='udemy-dl',
version=version,
description=description,
author='Gaganpreet Singh Arora',
author_email='gaganpreet.arora@gmail.com',
url='https://github.com/gaganpreet/udemy-dl',
scripts=['src/udemy-dl',],
install_requires=requirements,
long_description=long_description,
packages=['udemy_dl'],
package_dir = {'udemy_dl': 'src/udemy_dl'},
classifiers=classifiers
)
|
Python
| 0
|
@@ -16,16 +16,26 @@
python%0A%0A
+import os%0A
import s
@@ -738,16 +738,156 @@
trip()%0A%0A
+# if installed as root or with sudo, set permission mask to allow read/exec for all users%0Aif os.getuid() == 0:%0A os.umask(int('022', 8))%0A%0A
setup(na
|
a1a0de9e1f290161d83165f2983335844a5c17f9
|
fix tests_require
|
setup.py
|
setup.py
|
#!/usr/bin/env python
#-*-coding:utf-8-*-
from setuptools import setup, find_packages
#+BEGIN_DELETE
import os.path
# anditional to make a tar.gz file
import tarfile
with tarfile.open("skeleton.tar.gz", "w:gz") as tar:
for name in ["setup.py", "LICENSE", "README.md", "requirements.txt",
"pyskeleton", "tests","setup.cfg","pytest.ini","MANIFEST.in"]:
tar.add(name)
from pyskeleton.compat import replace
replace('skeleton.tar.gz', 'pyskeleton/skeleton.tar.gz')
#+END_DELETE
import pyskeleton
import codecs
def long_description():
with codecs.open('README.md', encoding='utf-8') as f:
return f.read()
REQUIREMENTS = ['pytest-runner']
import sys
if sys.platform == "win32":
REQUIREMENTS.append('pyosreplace')
setup(
name='pyskeleton',
version=pyskeleton.__version__,
description='quickly create a python module, have some other good concern.',
url='https://github.com/a358003542/pyskeleton',
long_description=long_description(),
author='wanze',
author_email='a358003542@gmail.com',
maintainer='wanze',
maintainer_email='a358003542@gmail.com',
license='GPL 2',
platforms='Linux',
keywords=['skeleton', 'python'],
classifiers=['Development Status :: 4 - Beta',
'Environment :: Console',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'License :: OSI Approved :: GNU General Public License v2 (GPLv2)',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5'],
packages=find_packages(exclude=['ez_setup', 'examples', 'tests']),
include_package_data=True,
package_data={"pyskeleton": ['skeleton.tar.gz'], },
setup_requires=REQUIREMENTS,
install_requires=REQUIREMENTS,
test_require=['pytest'],
entry_points={
'console_scripts': ['pyskeleton=pyskeleton.__main__:main', ],
}
)
|
Python
| 0.000001
|
@@ -1965,16 +1965,17 @@
test
+s
_require
|
a6d27ef03438133e7dd38f4a26208dc7d3904f6e
|
Fix url in setup.py file.
|
setup.py
|
setup.py
|
#!/usr/bin/env python
import os
from setuptools import setup, find_packages
try:
from sphinx.setup_command import BuildDoc
cmdclass = {'build_sphinx': BuildDoc}
except ImportError:
print "Sphinx not installed - needed to build documentation"
cmdclass = {}
# Figure out the version; this could be done by importing the
# module, though that requires Django to be already installed,
# which may not be the case when processing a pip requirements
# file, for example.
import re
here = os.path.dirname(os.path.abspath(__file__))
version_re = re.compile(
r'__version__ = (\(.*?\))')
fp = open(os.path.join(here, 'src/webassets', '__init__.py'))
version = None
for line in fp:
match = version_re.search(line)
if match:
version = eval(match.group(1))
break
else:
raise Exception("Cannot find version in __init__.py")
fp.close()
setup(
name = 'webassets',
version = ".".join(map(str, version)),
description = 'Media asset management for Python, with glue code for '+\
'various web frameworks',
long_description = 'Merges, minifies and compresses Javascript and '
'CSS files, supporting a variety of different filters, including '
'YUI, jsmin, jspacker or CSS tidy. Also supports URL rewriting '
'in CSS files.',
author = 'Michael Elsdoerfer',
author_email = 'michael@elsdoerfer.com',
license = 'BSD',
url = 'github.com/miracle2k/webassets/',
classifiers = [
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Software Development :: Libraries',
],
entry_points = """[console_scripts]\nwebassets = webassets.script:run\n""",
packages = find_packages('src'),
package_dir = {'': 'src'},
cmdclass = cmdclass,
)
|
Python
| 0
|
@@ -1409,16 +1409,23 @@
url = '
+http://
github.c
|
949b190f0ebc04798e6c20fca458500a96f78433
|
Add workaround to setup.py for a distutils bug causing attempted hard links in a Vagrant image directory shared with the host to fail
|
setup.py
|
setup.py
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
from setuptools import setup, find_packages
from codecs import open
from os import path
import sys
from setuptools.command.test import test as TestCommand
here = path.abspath(path.dirname(__file__))
class PyTest(TestCommand):
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
def initialize_options(self):
TestCommand.initialize_options(self)
self.pytest_args = []
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.pytest_args)
sys.exit(errno)
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
with open(path.join(here, "VERSION")) as f:
version = f.read().strip()
setup(
name='roger-mesos-tools',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version=version,
description='A set of tools/scripts to interact with RogerOS via the command line',
long_description=long_description,
url='https://github.com/seomoz/roger-mesos-tools',
author='RogerOS Team',
author_email='rogeros-dev@moz.com',
license='Apache 2.0',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: Apache 2.0 License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
],
keywords='sample setuptools development',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(),
tests_require=['pytest>=2.5.1', 'mock>=1.0.1', 'mockito>=0.5.2'],
cmdclass={'test': PyTest},
# Including test_suite to executable
test_suite="tests",
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['argparse', 'setuptools', 'requests>=2.11.1', 'pyyaml',
'tabulate>=0.7.5', 'slackclient>=1.0.0', 'Jinja2>=2.8', 'statsd>=3.2.1', 'slackweb>=1.0.5', 'termcolor>=1.1.0'],
# Include the folders listed in the MANIFEST.in file as a part of the
# package
include_package_data=True,
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
# data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'roger=bin.roger:main', 'j2y=bin.j2y:main'
]
},
scripts={ 'bin/docker-forward-ssh-agent', 'cli/roger_build.py', 'cli/roger_deploy.py', 'cli/roger_gitpull.py', 'cli/roger_init.py', 'cli/roger_logs.py', 'cli/roger_ps.py', 'cli/roger_push.py', 'cli/roger_shell.py', 'cli/roger_promote.py' }
)
|
Python
| 0
|
@@ -290,16 +290,334 @@
ommand%0A%0A
+# Work around a bug in distutils where attempted hard links within a directory%0A# shared between the Vagrant machine and the host fails. Deleting the 'os.link'%0A# method causes distutils to copy instead of using hard links.%0A# https://bugs.python.org/issue8876%0Aif os.environ.get('USER','') == 'vagrant':%0A del os.link%0A%0A
here = p
|
c18bbf109a19eb0a5ee65be61030a1c3dd54a6d4
|
Bump version
|
setup.py
|
setup.py
|
#!/usr/bin/env python
from setuptools import setup
setup(name='python-bittrex',
version='0.2.1',
url="https://github.com/ericsomdahl/python-bittrex",
packages=['bittrex'],
modules=['bittrex'],
install_requires=['requests'],
description='Python bindings for bittrex API.',
author='Eric Somdahl',
author_email='eric@corsairconsulting.com',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Operating System :: OS Independent',
'License :: OSI Approved :: MIT License',
'Development Status :: 3 - Alpha',
'Topic :: Office/Business :: Financial',
])
|
Python
| 0
|
@@ -99,9 +99,9 @@
0.2.
-1
+2
',%0A
|
182eb14fd444940e598b26b5ce075ec1366ff321
|
add catalog.xml to deployment/packaging (#149)
|
setup.py
|
setup.py
|
# -*- coding: iso-8859-15 -*-
# =================================================================
#
# Authors: Tom Kralidis <tomkralidis@gmail.com>
#
# Copyright (c) 2015 Tom Kralidis
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation
# files (the "Software"), to deal in the Software without
# restriction, including without limitation the rights to use,
# copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following
# conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
#
# =================================================================
import os
from distutils.core import setup
import pycsw
def is_package(path):
"""Decipher whether a filepath is a Python package"""
return (
os.path.isdir(path) and
os.path.isfile(os.path.join(path, '__init__.py'))
)
def find_packages(path, base=''):
"""Find all packages in path"""
packages = {}
for item in os.listdir(path):
directory = os.path.join(path, item)
if is_package(directory):
if base:
module_name = "%(base)s.%(item)s" % vars()
else:
module_name = item
packages[module_name] = directory
packages.update(find_packages(directory, module_name))
return packages
def find_packages_xsd(location='.'):
"""
Figure out which packages need to be specified as package_data
keys (the ones with XML Schema documents
"""
packages = []
for root, dirs, files in os.walk(location):
if 'schemas' in dirs: # include as a package_data key
packages.append(root.replace(os.sep, '.').replace('..', ''))
return packages
def get_package_data(location='.', forced_dir=None):
"""Generate package_data dict"""
package_data = {}
for ploc in location:
# turn package identifier into filepath
filepath = ploc.replace('.', os.sep)
if forced_dir is not None: # force os.walk to traverse subdir
filepath = '%s%sschemas' % (filepath, os.sep)
for root, dirs, files in os.walk(filepath):
if len(files) > 0:
# find all the XML Schema documents
xsds = filter(lambda x: x.find('.xsd') != -1, files)
if len(xsds) > 0:
if ploc not in package_data: # set key
package_data[ploc] = []
for xsd in xsds: # add filename to list
root2 = root.replace(filepath, '').split(os.sep)[1:]
pathstr = '%s%s%s' % (os.sep.join(root2), os.sep, xsd)
if forced_dir is not None:
filename = 'schemas%s%s' % (os.sep, pathstr)
else:
filename = pathstr
package_data[ploc].append(filename)
return package_data
# ensure a fresh MANIFEST file is generated
if (os.path.exists('MANIFEST')):
os.unlink('MANIFEST')
# set setup.packages
PACKAGES = find_packages('.').keys()
# get package_data.keys()
PACKAGE_DATA_XSD = find_packages_xsd('pycsw')
# Because package 'pycsw' contains all other packages,
# process it last, so that it doesn't set it's package_data
# files to one already set in other packages
ROOT_PACKAGE = PACKAGE_DATA_XSD.pop(0)
# set package_data
PACKAGE_DATA = get_package_data(PACKAGE_DATA_XSD)
# update package_data for pycsw package
PACKAGE_DATA.update(get_package_data([ROOT_PACKAGE], 'schemas'))
# set the dependencies
# GeoNode and OpenDataCatalog do not require SQLAlchemy
INSTALL_REQUIRES = [line.strip() for line in open('requirements.txt')]
KEYWORDS = ('pycsw csw catalogue catalog metadata discovery search'
' ogc iso fgdc dif ebrim inspire')
DESCRIPTION = 'pycsw is an OGC CSW server implementation written in Python'
setup(
name='pycsw',
version=pycsw.__version__,
description=DESCRIPTION,
long_description=open('README.txt').read(),
license='MIT',
platforms='all',
keywords=KEYWORDS,
author='Tom Kralidis',
author_email='tomkralidis@gmail.com',
maintainer='Tom Kralidis',
maintainer_email='tomkralidis@gmail.com',
url='http://pycsw.org/',
install_requires=INSTALL_REQUIRES,
packages=PACKAGES,
package_data=PACKAGE_DATA,
scripts=[os.path.join('bin', 'pycsw-admin.py')],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Scientific/Engineering :: GIS',
]
)
|
Python
| 0
|
@@ -2982,16 +2982,177 @@
files)%0A
+ catalog_xml = filter(lambda x: x.find('catalog.xml') != -1,%0A files)%0A xsds.extend(catalog_xml)%0A
|
b3f4c5211d33d8242b863f812421b37b22cd91cc
|
update deps, setup.py
|
setup.py
|
setup.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
import sys
from subprocess import call
from setuptools import setup, find_packages
from setuptools.command.install import install as _install
version = '0.2.21'
def _post_install(dir):
call([sys.executable, 'setup_post.py'],
cwd=os.path.join(dir, 'metadoc'))
class CustomInstall(_install):
"""Do stuff after setup."""
def run(self):
_install.run(self)
self.execute(_post_install, (self.install_lib,),
msg="Running post install task")
setup(
name='metadoc',
version=version,
description="Post-truth era news article metadata service.",
long_description="",
classifiers=[ # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers
"Programming Language :: Python :: 3.5",
"Topic :: Internet :: WWW/HTTP",
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: MIT License",
"Intended Audience :: Developers",
"Operating System :: POSIX :: Linux",
"Environment :: Web Environment",
],
keywords=["scraping", "metadata", "news article"],
author='Paul Solbach',
author_email='p@psolbach.com',
url='https://github.com/psolbach/metadoc',
license='MIT',
cmdclass={'install': CustomInstall},
packages=find_packages(exclude=['tests']),
include_package_data=True,
zip_safe=False,
install_requires=[
'aiohttp==1.1.5',
'asynctest==0.9.0',
'bottle==0.12.10',
'jmespath==0.9.0',
'langdetect==1.0.7',
'libextract==0.0.12',
'newspaper3k==0.1.7',
'nltk==3.2.1',
'pytest==3.0.5',
'pytest-cov==2.4.0',
'numpy==1.11.2',
'tldextract==2.0.2',
'requests==2.12.2',
'whois==0.7'
]
)
|
Python
| 0
|
@@ -207,19 +207,18 @@
on = '0.
-2.2
+3.
1'%0A%0Adef
@@ -1500,34 +1500,8 @@
2',%0A
- 'newspaper3k==0.1.7',%0A
|
8b64aec8d6fe9b27e1b95bc61488eb03216f0077
|
fix scikit-image dependency
|
setup.py
|
setup.py
|
#!/usr/bin/env python
#===============================================================================
# Copyright 2017 Geoscience Australia
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#===============================================================================
from distutils.core import setup
import os
version = '0.0.1'
setup(name='geophys_utils',
version=version,
packages=[
'geophys_utils',
'geophys_utils.test',
'geophys_utils.netcdf_converter',
'geophys_utils.dataset_metadata_cache',
],
package_data={'geophys_utils': ['csw_utils_settings.yml'],
'geophys_utils.netcdf_converter': ['aseg_gdf_settings.yml'],
'geophys_utils.dataset_metadata_cache': ['dataset_metadata_cache_settings.yml',
('data/dataset_metadata_cache.sqlite'
if (os.name == 'posix')
else ('data\\dataset_metadata_cache.sqlite'
if (os.name == 'nt')
else []))
],
},
scripts=(['bin/csw_find',
'bin/rechunk',
'bin/aseg2nc',
'bin/nc2aseg',
]
if (os.name == 'posix')
else (['bin\\csw_find.bat',
'bin\\rechunk.bat',
'bin\\aseg2nc.bat',
'bin\\nc2aseg.bat',
]
if (os.name == 'nt')
else [])),
requires=[
'distutils',
'functools',
'itertools',
'netCDF4',
'numpy',
'osgeo',
'owslib',
# 'scikit-image',TODO add scikit-image - currently removed as dash is casuing errors
'scipy',
'shapely',
'tempfile',
'unittest',
'yaml',
'unidecode',
],
url='https://github.com/geoscienceaustralia/geophys_utils',
author='Alex Ip - Geoscience Australia',
maintainer='Alex Ip - Geoscience Australia',
maintainer_email='alex.ip@ga.gov.au',
description='Geophysics data access utilities',
long_description='Geophysics data access utilities',
license='Apache License Version 2.0'
)
|
Python
| 0.000003
|
@@ -2526,92 +2526,20 @@
-# 'scikit-image',TODO add scikit-image - currently removed as dash is casuing errors
+ 'skimage',
%0A
|
8978c1b49f43465bc3cd51b3ee51350d44ed9ae7
|
Bump tqdm from 4.38.0 to 4.39.0
|
setup.py
|
setup.py
|
#!/usr/bin/env python3
from os import path
from setuptools import setup, find_packages
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='ogn-python',
version='0.5.0',
description='A database backend for the Open Glider Network',
long_description=long_description,
url='https://github.com/glidernet/ogn-python',
author='Konstantin Gründger aka Meisterschueler, Fabian P. Schmidt aka kerel, Dominic Spreitz',
author_email='kerel-fs@gmx.de',
license='AGPLv3',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: GIS',
'License :: OSI Approved :: GNU Affero General Public License v3',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='gliding ogn',
packages=find_packages(exclude=['tests', 'tests.*']),
install_requires=[
'Flask==1.1.1',
'Flask-SQLAlchemy==2.4.1',
'Flask-Migrate==2.5.2',
'Flask-Bootstrap==3.3.7.1',
'Flask-WTF==0.14.2',
'Flask-Caching==1.8.0',
'geopy==1.20.0',
'celery==4.3.0',
'redis==3.3.11',
'aerofiles==1.0.0',
'geoalchemy2==0.6.3',
'shapely==1.6.4.post2',
'ogn-client==0.9.5',
'psycopg2-binary==2.8.4',
'mgrs==1.3.5',
'xmlunittest==0.5.0',
'tqdm==4.38.0',
'requests==2.22.0',
],
test_require=[
'pytest==5.0.1',
'flake8==1.1.1',
'xmlunittest==0.4.0',
],
zip_safe=False
)
|
Python
| 0.000001
|
@@ -1657,17 +1657,17 @@
qdm==4.3
-8
+9
.0',%0A%09'r
|
05295bfa9edf99dfe66d21025088e00ae6152bfa
|
bump version to 0.3.5
|
setup.py
|
setup.py
|
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
try:
from pypandoc import convert
read_md = lambda f: convert(f, 'rst')
except ImportError:
print("warning: pypandoc module not found,"
"could not convert markdown README to RST")
read_md = lambda f: open(f, 'r').read()
config = {
'name': 'colour-valgrind',
'version': '0.3.4',
'description': 'Wraps Valgrind to colour the output.',
'long_description': read_md('README.md'),
'author': 'Matthew Cox',
'url': 'http://github.com/MatthewCox/colour-valgrind',
'author_email': 'matthewcpcox@gmail.com',
'classifiers': [
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Debuggers',
'Topic :: Text Processing :: Filters',
'Topic :: Utilities',
],
'keywords': 'valgrind color colour filter',
'license': 'MIT',
'packages': ['colourvalgrind'],
'install_requires': [
'colorama',
'regex',
'six',
],
'entry_points': {
'console_scripts': ['colour-valgrind=colourvalgrind.command_line:main'],
},
'include_package_data': True,
}
setup(**config)
|
Python
| 0.000001
|
@@ -402,9 +402,9 @@
0.3.
-4
+5
',%0A
|
0cd17d762ef1b7728b3ce3670e08cc30e2eac904
|
remove README
|
setup.py
|
setup.py
|
import os
import sys
from distutils.core import setup, Extension
## Command-line argument parsing
# --with-zlib: use zlib for compressing and decompressing
# --without-zlib: ^ negated
# --with-zlib=<dir>: path to zlib if needed
# --with-libmemcached=<dir>: path to libmemcached package if needed
cmd = None
use_zlib = True
pkgdirs = [] # incdirs and libdirs get these
libs = ["memcached"]
defs = []
incdirs = []
libdirs = []
def append_env(L, e):
v = os.environ.get(e)
if v and os.path.exists(v):
L.append(v)
append_env(pkgdirs, "LIBMEMCACHED")
append_env(pkgdirs, "ZLIB")
# Hack up sys.argv, yay
unprocessed = []
for arg in sys.argv[1:]:
if arg == "--with-zlib":
use_zlib = True
continue
elif arg == "--without-zlib":
use_zlib = False
continue
elif arg == "--with-sasl2":
libs.append("sasl2")
continue
elif arg == "--gen-setup":
cmd = arg[2:]
elif "=" in arg:
if arg.startswith("--with-libmemcached=") or \
arg.startswith("--with-zlib="):
pkgdirs.append(arg.split("=", 1)[1])
continue
unprocessed.append(arg)
sys.argv[1:] = unprocessed
# FreeBSD
if not pkgdirs:
pkgdirs = ['/', '/usr', '/usr/local']
for pkgdir in pkgdirs:
incdirs.append(os.path.join(pkgdir, "include"))
libdirs.append(os.path.join(pkgdir, "lib"))
if use_zlib:
libs.append("z")
defs.append(("USE_ZLIB", None))
## OS X non-PPC workaround
# Apple OS X 10.6 with Xcode 4 have Python compiled with PPC but they removed
# support for compiling with that arch, so we have to override ARCHFLAGS.
if sys.platform == "darwin" and not os.environ.get("ARCHFLAGS"):
compiler_dirn = "/usr/libexec/gcc/darwin"
if os.path.exists(compiler_dirn):
dir_items = os.listdir(compiler_dirn)
if "ppc" not in dir_items:
print >>sys.stderr, "enabling osx-specific ARCHFLAGS/ppc hack"
os.environ["ARCHFLAGS"] = "-arch i386 -arch x86_64"
# There's a bug in <py3 with Py_True/False that will propagate with GCC's
# strict aliasing rules. Let's skip this flag for now.
cflags = ["-fno-strict-aliasing", ]
## Extension definitions
pylibmc_ext = Extension("_pylibmc", ["_pylibmcmodule.c"],
libraries=libs, include_dirs=incdirs,
library_dirs=libdirs, define_macros=defs,
extra_compile_args=cflags)
# Hidden secret: if environment variable GEN_SETUP is set, generate Setup file.
if cmd == "gen-setup":
line = " ".join((
pylibmc_ext.name,
" ".join("-l" + lib for lib in pylibmc_ext.libraries),
" ".join("-I" + incdir for incdir in pylibmc_ext.include_dirs),
" ".join("-L" + libdir for libdir in pylibmc_ext.library_dirs),
" ".join("-D" + name + ("=" + str(value), "")[value is None] for (name, value) in pylibmc_ext.define_macros)))
open("Setup", "w").write(line + "\n")
sys.exit(0)
readme_text = open("README.rst", "U").read()
version = open("pylibmc-version.h", "U").read().strip().split("\"")[1]
setup(name="pylibmc", version=version,
url="http://sendapatch.se/projects/pylibmc/",
author="Ludvig Ericson", author_email="ludvig@lericson.se",
license="3-clause BSD <http://www.opensource.org/licenses/bsd-license.php>",
description="Quick and small memcached client for Python",
long_description=readme_text,
ext_modules=[pylibmc_ext], packages=["pylibmc"])
|
Python
| 0
|
@@ -2957,16 +2957,18 @@
xit(0)%0A%0A
+#
readme_t
@@ -3386,16 +3386,18 @@
%22,%0A
+ #
long_de
|
57b17e6edcbe1e5400e3ede82292c1cd1c38f4e4
|
Bump version
|
setup.py
|
setup.py
|
from setuptools import setup, find_packages
from pip.req import parse_requirements
def reqs_from_requirements_file():
reqs = parse_requirements('requirements.txt', session='hack')
return [str(r.req) for r in reqs]
setup(
name="demosys-py",
version="0.1.1",
description="Modern OpenGL 4.1+ Prototype Framework inspired by Django",
long_description=open('README.rst').read(),
url="https://github.com/Contraz/demosys-py",
author="Einar Forselv",
author_email="eforselv@gmail.com",
maintainer="Einar Forselv",
maintainer_email="eforselv@gmail.com",
packages=find_packages(),
include_package_data=True,
keywords = ['opengl', 'framework'],
classifiers=[
'Programming Language :: Python',
'Environment :: MacOS X',
'Environment :: X11 Applications',
'Intended Audience :: Developers',
'Topic :: Multimedia :: Graphics',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries :: Application Frameworks',
],
install_requires=reqs_from_requirements_file(),
entry_points={'console_scripts': [
'demosys_test = demosys_test.main:main',
'demosys-admin = demosys.core.management:execute_from_command_line',
]},
)
|
Python
| 0
|
@@ -266,17 +266,17 @@
on=%220.1.
-1
+2
%22,%0A d
|
2cc2bf3665246f1876e9c25911baf6e418a356db
|
Add include_package_data=True to setup
|
setup.py
|
setup.py
|
import os
import sys
from pathlib import Path
from shutil import rmtree
from setuptools import setup, find_packages, Command
from simple_model.__version__ import __version__
here = Path(__file__).absolute().parent
with open(here / 'README.rst') as f:
readme = '\n' + f.read()
class UploadCommand(Command):
"""Support setup.py publish."""
description = 'Build and publish the package.'
user_options = []
@staticmethod
def status(s):
"""Prints things in bold."""
print('\033[1m{0}\033[0m'.format(s))
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
try:
self.status('Removing previous builds…')
rmtree(os.path.join(here, 'dist'))
except FileNotFoundError:
pass
self.status('Building Source distribution…')
os.system('{0} setup.py sdist'.format(sys.executable))
self.status('Uploading the package to PyPi via Twine…')
os.system('twine upload dist/*')
self.status('Pushing git tags…')
os.system('git tag v{0}'.format(__version__))
os.system('git push --tags')
sys.exit()
setup(
name='pysimplemodel',
version=__version__,
description='Data handling made easy',
long_description='\n' + readme,
url='https://github.com/lamenezes/simple-model',
author='Luiz Menezes',
author_email='luiz.menezesf@gmail.com',
packages=find_packages(exclude=['tests']),
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries',
],
cmdclass={
'upload': UploadCommand,
},
)
|
Python
| 0.000006
|
@@ -1898,10 +1898,41 @@
%0A %7D,%0A
+ include_package_data=True,%0A
)%0A
|
f2256ffb1786ba88922c437a54f214547448121b
|
version bump
|
setup.py
|
setup.py
|
from setuptools import setup, find_packages
from setuptools.command.test import test
class TestRunner(test):
def run(self, *args, **kwargs):
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(self.distribution.install_requires)
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
from runtests import runtests
runtests()
setup(
name='django-preferences',
version='0.0.4',
description='Django app allowing users to set app specific preferences through the admin interface.',
long_description = open('README.rst', 'r').read() + open('AUTHORS.rst', 'r').read() + open('CHANGELOG.rst', 'r').read(),
author='Praekelt Foundation',
author_email='dev@praekelt.com',
license='BSD',
test_suite = 'preferences.tests',
cmdclass={'test': TestRunner},
url='http://github.com/praekelt/django-preferences',
packages = find_packages(),
tests_require = [
'django',
],
include_package_data=True,
classifiers = [
"Programming Language :: Python",
"License :: OSI Approved :: BSD License",
"Development Status :: 4 - Beta",
"Operating System :: OS Independent",
"Framework :: Django",
"Intended Audience :: Developers",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content",
],
)
|
Python
| 0.000001
|
@@ -506,17 +506,17 @@
on='0.0.
-4
+5
',%0A d
|
e846fff0060a431187e607fa0852b00265aff709
|
fix bug #141
|
setup.py
|
setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""QTPyLib: Quantitative Trading Python Library
(https://github.com/ranaroussi/qtpylib)
Simple, event-driven algorithmic trading system written in
Python 3, that supports backtesting and live trading using
Interactive Brokers for market data and order execution.
"""
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='QTPyLib',
version='1.5.83',
description='Quantitative Trading Python Library',
long_description=long_description,
url='https://github.com/ranaroussi/qtpylib',
author='Ran Aroussi',
author_email='ran@aroussi.com',
license='LGPL',
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Development Status :: 4 - Beta',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'Topic :: Office/Business :: Financial',
'Topic :: Office/Business :: Financial :: Investment',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
platforms = ['any'],
keywords='qtpylib qtpy algotrading algo trading interactive brokers tws ibgw ibpy ezibpy',
packages=find_packages(exclude=['contrib', 'docs', 'tests', 'demo', 'demos', 'examples']),
install_requires=[
'python-dateutil>=2.5.3','ezibpy>=1.12.66',
'flask>=0.11.1','numpy>=1.11.1','pandas>=0.22.0','pymysql>=0.7.6',
'pytz>=2016.6.1','requests>=2.10.0','pyzmq>=15.2.1',
'nexmo>=1.2.0','twilio>=5.4.0','ibpy2>=0.8.0',
],
entry_points={
'console_scripts': [
'sample=sample:main',
],
},
include_package_data=True,
package_data={
'static': 'qtpylib/_webapp/*',
'db': 'qtpylib/schema.sql*'
},
)
|
Python
| 0
|
@@ -2173,16 +2173,17 @@
tatic':
+%5B
'qtpylib
@@ -2193,16 +2193,17 @@
ebapp/*'
+%5D
,%0A
@@ -2210,16 +2210,17 @@
'db':
+%5B
'qtpylib
@@ -2232,16 +2232,17 @@
ma.sql*'
+%5D
%0A %7D,%0A
|
990d1a364dcfd62e700daba9945c35f96fbdfa5b
|
Order the main extensions list by name.
|
sweettooth/extensions/urls.py
|
sweettooth/extensions/urls.py
|
from django.conf.urls.defaults import patterns, include, url
from django.views.generic.simple import direct_to_template
from django.views.generic.list_detail import object_list
from extensions import views, models
upload_patterns = patterns('',
url(r'^$', views.upload_file, dict(pk=None), name='extensions-upload-file'),
url(r'^new-version/(?P<pk>\d+)/$', views.upload_file, name='extensions-upload-file'),
)
ajax_patterns = patterns('',
url(r'^edit/(?P<pk>\d+)', views.ajax_inline_edit_view, name='extensions-ajax-inline'),
url(r'^submit/(?P<pk>\d+)', views.ajax_submit_and_lock_view, name='extensions-ajax-submit'),
url(r'^upload/screenshot/(?P<pk>\d+)', views.ajax_upload_screenshot_view, name='extensions-ajax-screenshot'),
url(r'^upload/icon/(?P<pk>\d+)', views.ajax_upload_icon_view, name='extensions-ajax-icon'),
url(r'^detail/', views.ajax_details_view, name='extensions-ajax-details'),
)
shell_patterns = patterns('',
url(r'^extension-query/', views.ajax_query_view),
url(r'^extension-info/', views.ajax_details_view),
url(r'^download-extension/(?P<uuid>.+)\.shell-extension\.zip$',
views.download),
)
urlpatterns = patterns('',
url(r'^$', object_list, dict(queryset=models.Extension.objects.visible(),
paginate_by=10,
template_object_name='extension',
template_name='extensions/list.html'),
name='extensions-index'),
# we ignore PK of extension, and get extension from version PK
url(r'^extension/(?P<ext_pk>\d+)/(?P<slug>.+)/version/(?P<pk>\d+)/$',
views.extension_version_view, name='extensions-version-detail'),
url(r'^extension/(?P<pk>\d+)/(?P<slug>.+)/$',
views.extension_latest_version_view, name='extensions-detail'),
url(r'^extension/(?P<pk>\d+)/$',
views.extension_latest_version_view, dict(slug=None), name='extensions-detail'),
url(r'^local/', direct_to_template, dict(template='extensions/local.html'), name='extensions-local'),
url(r'^upload/', include(upload_patterns)),
url(r'^ajax/', include(ajax_patterns)),
url(r'', include(shell_patterns)),
)
|
Python
| 0
|
@@ -1264,16 +1264,33 @@
isible()
+.order_by('name')
,%0A
|
73d89e5a6f23fd3292b4057e56649d6c20fc0483
|
replace couch_db reference
|
corehq/apps/change_feed/pillow.py
|
corehq/apps/change_feed/pillow.py
|
import json
from kafka import KeyedProducer
from kafka.common import KafkaUnavailableError
from casexml.apps.case.models import CommCareCase
from corehq.apps.change_feed import data_sources
from corehq.apps.change_feed.connection import get_kafka_client
from corehq.apps.change_feed.models import ChangeMeta
from corehq.apps.change_feed.topics import get_topic
from couchforms.models import all_known_formlike_doc_types
import logging
from pillowtop.checkpoints.manager import PillowCheckpoint, get_django_checkpoint_store
from pillowtop.couchdb import CachedCouchDB
from pillowtop.listener import PythonPillow
class ChangeFeedPillow(PythonPillow):
def __init__(self, couch_db, kafka, checkpoint):
super(ChangeFeedPillow, self).__init__(couch_db=couch_db, checkpoint=checkpoint)
self._kafka = kafka
self._producer = KeyedProducer(self._kafka)
def get_db_name(self):
return self.couch_db.dbname
def process_change(self, change, is_retry_attempt=False):
document_type = _get_document_type(change.document)
if document_type:
assert change.document is not None
change_meta = ChangeMeta(
document_id=change.id,
data_source_type=data_sources.COUCH,
data_source_name=self.get_db_name(),
document_type=document_type,
document_subtype=_get_document_subtype(change.document),
domain=change.document.get('domain', None),
is_deletion=change.deleted,
)
self._producer.send_messages(
bytes(get_topic(document_type)),
bytes(change_meta.domain),
bytes(json.dumps(change_meta.to_json())),
)
def get_default_couch_db_change_feed_pillow():
default_couch_db = CachedCouchDB(CommCareCase.get_db().uri, readonly=False)
try:
kafka_client = get_kafka_client()
except KafkaUnavailableError:
logging.warning('Ignoring missing kafka client during unit testing')
kafka_client = None
return ChangeFeedPillow(
couch_db=default_couch_db,
kafka=kafka_client,
checkpoint=PillowCheckpoint(get_django_checkpoint_store(), 'default-couch-change-feed')
)
def _get_document_type(document_or_none):
return document_or_none.get('doc_type', None) if document_or_none else None
def _get_document_subtype(document_or_none):
type = _get_document_type(document_or_none)
if type in ('CommCareCase', 'CommCareCase-Deleted'):
return document_or_none.get('type', None)
elif type in all_known_formlike_doc_types():
return document_or_none.get('xmlns', None)
return None
|
Python
| 0.000001
|
@@ -915,24 +915,28 @@
rn self.
+get_
couch_db
.dbname%0A
@@ -927,16 +927,18 @@
couch_db
+()
.dbname%0A
|
a9c2a45786b0bf3ec46adf09943bd2aaa79b9f19
|
Fix issue with spurious XP when opening a window and remove some logging
|
CodeStats.py
|
CodeStats.py
|
import sublime
import sublime_plugin
import os
import shutil
import datetime
import json
import requests
# Pulses will be sent after intervals of this many seconds
PULSE_TIMEOUT = 10
# Default URL for the API
DEFAULT_URL = 'https://codestats.net/api/my/pulses'
def log(*msg):
print('code-stats-sublime: ', *msg)
def show_first_time_setup():
"""
Show first time setup if it hasn't been shown yet.
"""
default_settings_file = os.path.join(sublime.packages_path(), 'CodeStats', 'Settings', 'CodeStats.sublime-settings')
user_settings_file = os.path.join(sublime.packages_path(), 'User', 'CodeStats.sublime-settings')
if not os.path.isfile(user_settings_file) and os.path.isfile(default_settings_file):
shutil.copyfile(default_settings_file, user_settings_file)
sublime.message_dialog('''
Setting up CodeStats:
0. Go to your Machines page on https://codestats.net/my/machines and create a new API key.
1. Insert the API key in the configuration file and save.
2. Start writing code!
''')
window = sublime.active_window()
window.open_file(user_settings_file)
def send_pulses():
# If required settings are not defined, don't act
if not Config.has_required_settings():
return
window = sublime.active_window()
pulses = Pulse.pulses_to_send
if Pulse.current_pulse is not None:
pulses += [str(Pulse.current_pulse)]
failed_pulses = []
window.status_message('C::S submitting…')
for pulse in pulses:
failed = False
r = None
try:
r = requests.post(
Config.api_url,
headers={
'content-type': 'application/json',
'x-api-token': Config.api_key,
},
data=pulse
)
if r.status_code != 201:
failed = True
log('Pulse failed with status', r.status_code, 'and content:', r.text)
window.status_message('C::S submit failed: {} {}'.format(r.status_code, r.text))
except requests.exceptions.RequestException as e:
failed = True
log('Pulse failed with exception', e)
window.status_message('C::S error: ' + str(e))
if failed:
failed_pulses += [pulse]
Pulse.current_pulse = None
Pulse.pulses_to_send = failed_pulses
if len(failed_pulses) == 0:
window.status_message('')
class Config:
"""
Configuration handler. Listens to changes in plugin configuration.
"""
api_key = None
api_url = None
initted = False
@classmethod
def init(cls):
cls.load_settings()
cls.settings.add_on_change('API_URL', cls.url_changed)
cls.settings.add_on_change('API_KEY', cls.key_changed)
cls.initted = True
if not cls.__is_undefined__(cls.api_key):
log('Initialised with key {}.'.format(cls.api_key))
else:
log('Initialised with no key.')
@classmethod
def load_settings(cls):
cls.settings = sublime.load_settings('CodeStats.sublime-settings')
cls.url_changed()
cls.key_changed()
@classmethod
def url_changed(cls):
cls.api_url = cls.settings.get('API_URL', DEFAULT_URL)
log('URL changed to {}.'.format(cls.api_url))
@classmethod
def key_changed(cls):
cls.api_key = cls.settings.get('API_KEY', None)
log('Key changed to {}.'.format(cls.api_key))
@classmethod
def has_required_settings(cls):
return not cls.__is_undefined__(cls.api_url) and not cls.__is_undefined__(cls.api_key)
@classmethod
def has_init(cls):
return cls.initted
@staticmethod
def __is_undefined__(value):
return value is None or value == ''
class Timer:
"""
Timer that runs given function after given time.
"""
def __init__(self, fun):
self.fun = fun
self.set_timeout()
log('Timer started.')
def run(self):
self.fun()
log('Timer removed.')
def set_timeout(self):
sublime.set_timeout_async(lambda: self.run(), PULSE_TIMEOUT * 1000)
class Pulse:
"""
Represents one Pulse to be sent to the API.
"""
# Current active pulse
current_pulse = None
# JSONified pulses waiting for sending because of network problems
pulses_to_send = []
def __init__(self):
self.xps = {}
def add_xp(self, language, amount):
"""
Add XP with the given language and given amount into the pulse.
"""
xp = self.xps.get(language, 0) + amount
self.xps[language] = xp
def __str__(self):
# Convert pulse into JSON string that can be sent to API
ret = {'coded_at': datetime.datetime.now(datetime.timezone.utc).isoformat()}
ret['xps'] = [{'language': l, 'xp': x} for l, x in self.xps.items()]
return json.dumps(ret)
@classmethod
def get_pulse(cls):
"""
Get or create currently active Pulse.
"""
if cls.current_pulse is None:
cls.current_pulse = Pulse()
return cls.current_pulse
class ChangeListener(sublime_plugin.EventListener):
"""
Event listener that listens to changes in any editors and counts them.
Changes seem to be a good approximation of characters typed in Sublime Text.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.timer = None
def timer_run(self):
send_pulses()
self.timer = None
def on_modified_async(self, view):
# If plugin isn't fully loaded yet, don't do anything
if not Config.has_init():
return
# Start timer if not already started
if self.timer is None:
self.timer = Timer(lambda: self.timer_run())
pulse = Pulse.get_pulse()
syntax_file = os.path.basename(view.settings().get('syntax'))
language = os.path.splitext(syntax_file)[0]
pulse.add_xp(language, 1)
log(pulse)
def plugin_loaded():
Config.init()
if not Config.has_required_settings():
show_first_time_setup()
|
Python
| 0
|
@@ -304,17 +304,16 @@
sublime:
-
', *msg)
@@ -313,16 +313,16 @@
, *msg)%0A
+
%0A%0Adef sh
@@ -2196,17 +2196,22 @@
ption',
-e
+str(e)
)%0A
@@ -3983,38 +3983,8 @@
ut()
-%0A log('Timer started.')
%0A%0A
@@ -4022,38 +4022,8 @@
un()
-%0A log('Timer removed.')
%0A%0A
@@ -5685,24 +5685,213 @@
return%0A%0A
+ # Prevent XP from other views than editor view (widgets are builtin stuff%0A # like menus, find dialogs, etc.)%0A if view.settings().get('is_widget'):%0A return%0A%0A
# St
@@ -6169,16 +6169,16 @@
le)%5B0%5D%0A%0A
+
@@ -6207,27 +6207,8 @@
1)%0A
- log(pulse)%0A
%0A%0Ade
|
4cfe702b7c06f431f8b250bbd72e121bfeef5b5e
|
Fix warning message in api AlertsHandler.
|
dashboard/dashboard/api/alerts.py
|
dashboard/dashboard/api/alerts.py
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import datetime
import urllib
from google.appengine.datastore import datastore_query
from google.appengine.ext import ndb
from dashboard import alerts
from dashboard import group_report
from dashboard.api import api_request_handler
from dashboard.common import request_handler
from dashboard.models import anomaly
def ParseISO8601(s):
# ISO8601 specifies many possible formats. The dateutil library is much more
# flexible about parsing all of the possible formats, but it would be annoying
# to third_party it just for this. A few formats should cover enough users.
try:
return datetime.datetime.strptime(s, '%Y-%m-%dT%H:%M:%S.%f')
except ValueError:
return datetime.datetime.strptime(s, '%Y-%m-%dT%H:%M:%S')
class AlertsHandler(api_request_handler.ApiRequestHandler):
"""API handler for various alert requests."""
def AuthorizedPost(self, *args):
"""Returns alert data in response to API requests.
Possible list types:
keys: A comma-separated list of urlsafe Anomaly keys.
bug_id: A bug number on the Chromium issue tracker.
rev: A revision number.
Outputs:
Alerts data; see README.md.
"""
alert_list = None
response = {}
try:
if len(args) == 0:
is_improvement = self.request.get('is_improvement', None)
assert is_improvement in [None, 'true', 'false'], is_improvement
if is_improvement:
is_improvement = is_improvement == 'true'
recovered = self.request.get('recovered', None)
assert recovered in [None, 'true', 'false'], recovered
if recovered:
recovered = recovered == 'true'
start_cursor = self.request.get('cursor', None)
if start_cursor:
start_cursor = datastore_query.Cursor(urlsafe=start_cursor)
min_timestamp = self.request.get('min_timestamp', None)
if min_timestamp:
min_timestamp = ParseISO8601(min_timestamp)
max_timestamp = self.request.get('max_timestamp', None)
if max_timestamp:
max_timestamp = ParseISO8601(max_timestamp)
try:
alert_list, next_cursor, _ = anomaly.Anomaly.QueryAsync(
bot_name=self.request.get('bot', None),
bug_id=self.request.get('bug_id', None),
is_improvement=is_improvement,
key=self.request.get('key', None),
limit=int(self.request.get('limit', 100)),
master_name=self.request.get('master', None),
max_end_revision=self.request.get('max_end_revision', None),
max_start_revision=self.request.get('max_start_revision', None),
max_timestamp=max_timestamp,
min_end_revision=self.request.get('min_end_revision', None),
min_start_revision=self.request.get('min_start_revision', None),
min_timestamp=min_timestamp,
recovered=recovered,
sheriff=self.request.get('sheriff', None),
start_cursor=start_cursor,
test=self.request.get('test', None),
test_suite_name=self.request.get('test_suite', None)).get_result()
except AssertionError:
alert_list, next_cursor = [], None
if next_cursor:
response['next_cursor'] = next_cursor.urlsafe()
else:
list_type = args[0]
if list_type.startswith('bug_id'):
bug_id = list_type.replace('bug_id/', '')
try:
bug_id = int(bug_id)
except ValueError as e:
raise api_request_handler.BadRequestError(
'Invalid bug ID "%s".' % bug_id)
response['DEPRECATION WARNING'] = (
'Please use /api/alerts?bug_id=%s' % bug_id)
alert_list, _, _ = anomaly.Anomaly.QueryAsync(
bug_id=bug_id).get_result()
elif list_type.startswith('keys'):
keys = list_type.replace('keys/', '').split(',')
response['DEPRECATION WARNING'] = (
'Please use /api/alerts?key=%s' % keys[0])
alert_list = group_report.GetAlertsForKeys(keys)
elif list_type.startswith('rev'):
rev = list_type.replace('rev/', '')
response['DEPRECATION WARNING'] = (
'Please use /api/alerts?max_end_revision=%s&min_start_revision=%s'
% (rev, rev))
alert_list = group_report.GetAlertsAroundRevision(rev)
elif list_type.startswith('history'):
try:
days = int(list_type.replace('history/', ''))
except ValueError:
days = 7
cutoff = datetime.datetime.now() - datetime.timedelta(days=days)
sheriff_name = self.request.get('sheriff', 'Chromium Perf Sheriff')
sheriff_key = ndb.Key('Sheriff', sheriff_name)
sheriff = sheriff_key.get()
if not sheriff:
raise api_request_handler.BadRequestError(
'Invalid sheriff %s' % sheriff_name)
response['DEPRECATION WARNING'] = (
'Please use /api/alerts?min_timestamp=%s&sheriff=%s' % (
urllib.quote(cutoff.isoformat()),
urllib.quote(sheriff_name)))
include_improvements = bool(self.request.get('improvements'))
filter_for_benchmark = self.request.get('benchmark')
is_improvement = None
if not include_improvements:
is_improvement = False
response['DEPRECATION WARNING'] += '&is_improvement=false'
if filter_for_benchmark:
response['DEPRECATION WARNING'] += (
'&test_suite_name=' + filter_for_benchmark)
alert_list, _, _ = anomaly.Anomaly.QueryAsync(
sheriff=sheriff_key.id(),
min_timestamp=cutoff,
is_improvement=is_improvement,
test_suite_name=filter_for_benchmark).get_result()
else:
raise api_request_handler.BadRequestError(
'Invalid alert type %s' % list_type)
except request_handler.InvalidInputError as e:
raise api_request_handler.BadRequestError(e.message)
anomaly_dicts = alerts.AnomalyDicts(
[a for a in alert_list if a.key.kind() == 'Anomaly'])
response['anomalies'] = anomaly_dicts
return response
|
Python
| 0.999824
|
@@ -5740,21 +5740,16 @@
st_suite
-_name
=' + fil
|
5130b6c2b89484cfebd4b5416fd3e260ebab8b85
|
Univariate regression: Increase the number of decimals from 0 to 3
|
Orange/widgets/regression/owlinearregression.py
|
Orange/widgets/regression/owlinearregression.py
|
from PyQt4.QtGui import QLayout
from Orange.data import Table
from Orange.regression.linear import (RidgeRegressionLearner, LinearModel,
LinearRegressionLearner)
from Orange.preprocess.preprocess import Preprocess
from Orange.widgets import widget, settings, gui
class OWLinearRegression(widget.OWWidget):
name = "Linear Regression"
description = "A linear regression algorithm with optional L1 and L2 " \
"regularization."
icon = "icons/LinearRegression.svg"
inputs = [("Data", Table, "set_data"),
("Preprocessor", Preprocess, "set_preprocessor")]
outputs = [("Learner", RidgeRegressionLearner),
("Predictor", LinearModel)]
#: Types
OLS, Ridge, Lasso = 0, 1, 2
learner_name = settings.Setting("Linear Regression")
ridge = settings.Setting(False)
reg_type = settings.Setting(OLS)
ridgealpha = settings.Setting(1.0)
lassoalpha = settings.Setting(1.0)
want_main_area = False
def __init__(self, parent=None):
super().__init__(parent)
self.data = None
self.preprocessors = None
box = gui.widgetBox(self.controlArea, "Learner/Predictor Name")
gui.lineEdit(box, self, "learner_name")
box = gui.widgetBox(self.controlArea, "Options")
box = gui.radioButtons(box, self, "reg_type",
callback=self._reg_type_changed)
gui.appendRadioButton(box, "Ordinary linear regression")
gui.appendRadioButton(box, "Ridge regression")
ibox = gui.indentedBox(box)
gui.doubleSpin(ibox, self, "ridgealpha", 0.0, 1000.0, label="alpha:")
self.ridge_box = ibox
gui.appendRadioButton(box, "Lasso regression")
ibox = gui.indentedBox(box)
gui.doubleSpin(ibox, self, "lassoalpha", 0.0, 1000.0, label="alpha")
self.lasso_box = ibox
gui.button(self.controlArea, self, "Apply", callback=self.apply,
default=True)
self.layout().setSizeConstraint(QLayout.SetFixedSize)
self.ridge_box.setEnabled(self.reg_type == self.Ridge)
self.lasso_box.setEnabled(self.reg_type == self.Lasso)
self.apply()
def set_data(self, data):
self.data = data
if data is not None:
self.apply()
def set_preprocessor(self, preproc):
if preproc is None:
self.preprocessors = None
else:
self.preprocessors = (preproc,)
self.apply()
def apply(self):
args = {"preprocessors": self.preprocessors}
if self.reg_type == OWLinearRegression.OLS:
learner = LinearRegressionLearner(**args)
elif self.reg_type == OWLinearRegression.Ridge:
learner = RidgeRegressionLearner(
alpha=self.ridgealpha, **args)
elif self.reg_type == OWLinearRegression.Lasso:
learner = RidgeRegressionLearner(
alpha=self.lassoalpha, **args)
else:
assert False
learner.name = self.learner_name
predictor = None
if self.data is not None:
self.error(0)
if not learner.check_learner_adequacy(self.data.domain):
self.error(0, learner.learner_adequacy_err_msg)
else:
predictor = learner(self.data)
predictor.name = self.learner_name
self.send("Learner", learner)
self.send("Predictor", predictor)
def _reg_type_changed(self):
self.ridge_box.setEnabled(self.reg_type == self.Ridge)
self.lasso_box.setEnabled(self.reg_type == self.Lasso)
if __name__ == "__main__":
import sys
from PyQt4.QtGui import QApplication
a = QApplication(sys.argv)
ow = OWLinearRegression()
d = Table('iris')
ow.set_data(d)
ow.show()
a.exec_()
ow.saveSettings()
|
Python
| 0.998977
|
@@ -1639,36 +1639,82 @@
ridgealpha%22,
- 0.0, 1000.0
+%0A 0.0, 1000.0, step=0.01, decimals=2
, label=%22alp
@@ -1888,16 +1888,39 @@
oalpha%22,
+%0A
0.0, 10
@@ -1924,16 +1924,27 @@
1000.0,
+ step=0.01,
label=%22
@@ -1949,16 +1949,28 @@
=%22alpha%22
+, decimals=2
)%0A
|
9f5c1bdbe31945706725de688e61c0d77a91ae9b
|
fix filtering
|
lg_earth/scripts/add_kml.py
|
lg_earth/scripts/add_kml.py
|
#!/usr/bin/env python
from std_msgs.msg import String, Empty
from lg_common.srv import USCSMessage
from lg_common.msg import StringArray
from interactivespaces_msgs.msg import GenericMessage
import SimpleHTTPServer
import SocketServer
import threading
import tempfile
import rospy
import json
import copy
import os
import re
import binascii
DEFAULT_VIEWPORTS = ['left_three', 'left_two', 'left_one', 'center',
'right_one', 'right_two', 'right_three']
DEFAULT_EARTH_INSTANCE = {
u'activity': u'earth',
u'activity_config': {},
u'assets': [],
u'height': 1920,
u'presentation_viewport': u'CHANGE_ME',
u'slug': -1875729098,
u'width': 1080,
u'x_coord': 0,
u'y_coord': 0
}
kml_id_pattern = re.compile('<kml .* id=\"()\".*>')
def get_kml_id(kml):
"""
if <kml> tag has id attribute returns it value
othervise return unsigned crc32 of kml string
"""
id_match = kml_id_pattern.search(kml, re.IGNORECASE)
if id_match:
return id_match.group(1)
return hex(binascii.crc32(kml) % (1 << 32))
def get_match_any_starts_with(prefixes):
def matcher(test_string):
for prefix in prefixes:
if test_string.startswith(prefix):
return True
return False
return matcher
class KMLAdder():
def __init__(self, uscs_service, director_pub, added_kml_pub, port, hostname='localhost', viewports=None):
self.serve_dir = tempfile.mktemp()
self.uscs_service = uscs_service
self.director_pub = director_pub
self.added_kml_pub = added_kml_pub
self.id_to_file = dict()
self.hostname = hostname
self.viewports = viewports
if self.viewports is None:
self.viewports = DEFAULT_VIEWPORTS
self.port = port
self.server = threading.Thread(target=self._serve)
os.mkdir(self.serve_dir)
self.server.start()
def handle_kml(self, msg):
kml = msg.data
filename = tempfile.mktemp(dir=self.serve_dir)
with open(filename, 'w') as f:
f.write(kml)
kml_id = get_kml_id(kml)
if kml_id not in self.id_to_file:
self.id_to_file[kml_id] = list()
# Keep track of files for easier remove by id
self.id_to_file[kml_id].append(os.path.basename(filename))
current_scene = self.uscs_service.call().message
current_scene = json.loads(current_scene)
self.add_earths(current_scene)
for window in current_scene['windows']:
if window['activity'] != 'earth':
continue
window['assets'].append(self.formatURL(filename))
new_msg = GenericMessage()
new_msg.type = 'json'
new_msg.message = json.dumps(current_scene)
self.director_pub.publish(new_msg)
self.added_kml_pub.publish(StringArray(list(self.id_to_file.keys())))
def formatURL(self, filename):
return 'http://{}:{}/{}'.format(self.hostname, self.port, os.path.basename(filename))
def clear_kmls(self, msg):
current_scene = self.uscs_service.call().message
current_scene = json.loads(current_scene)
ids = msg.strings if msg.strings else None
matcher = None
if ids:
files = [name for name in names for names in self.id_to_file]
for id in ids:
self.id_to_file.pop(id)
urls_to_remove = [self.formatURL(filename) for filename in files]
matcher = get_match_any_starts_with(urls_to_remove)
else:
# Remove all additional kmls
self.id_to_file = dict()
matcher = get_match_any_starts_with([self.formatURL(self.serve_dir)])
for window in current_scene['windows']:
if window['activity'] == 'earth':
window['assets'] = [a for a in window['assets'] if not matcher(a)]
new_msg = GenericMessage()
new_msg.type = 'json'
new_msg.message = json.dumps(current_scene)
self.director_pub.publish(new_msg)
self.added_kml_pub.publish(StringArray(list(self.id_to_file.keys())))
def _serve(self):
os.chdir(self.serve_dir)
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
self.httpd = SocketServer.TCPServer(("", self.port), Handler)
self.httpd.serve_forever()
def add_earths(self, scene):
for viewport in self.viewports:
flag = False
for window in scene['windows']:
if window['activity'] == 'earth' and window['presentation_viewport'] == viewport:
flag = True
# if no instance of earth w/ our current viewport is found
# we add one and give it our viewport
if flag is False:
scene['windows'].append(copy.deepcopy(DEFAULT_EARTH_INSTANCE))
scene['windows'][-1]['presentation_viewport'] = viewport
def shutdown(self):
self.httpd.shutdown()
self.server.join()
def main():
rospy.init_node('add_kml')
director_pub = rospy.Publisher('/director/scene', GenericMessage, queue_size=10)
added_kml_pub = rospy.Publisher('/lg_earth/added_kml', StringArray, latch=True, queue_size=1)
uscs_service = rospy.ServiceProxy('/uscs/message', USCSMessage)
hostname = rospy.get_param('~hostname', 'localhost')
port = rospy.get_param('~port', 18111)
k = KMLAdder(uscs_service, director_pub, added_kml_pub, port, hostname)
rospy.Subscriber('/lg_earth/add_kml', String, k.handle_kml)
rospy.Subscriber('/lg_earth/clear_kml', StringArray, k.clear_kmls)
rospy.on_shutdown(k.shutdown)
rospy.spin()
if __name__ == '__main__':
main()
|
Python
| 0.000002
|
@@ -3255,179 +3255,402 @@
-matcher = None%0A if ids:%0A files = %5Bname for name in names for names in self.id_to_file%5D%0A for id in ids:%0A self.id_to_file.pop(id)
+if ids:%0A files = %5B%5D%0A for id in ids:%0A if id in self.id_to_file:%0A for names in self.id_to_file.pop(id):%0A if type(names) == list:%0A for name in names:%0A files.append(name)%0A else:%0A files.append(names)%0A
%0A
|
b5a4708009e78c5727f2a54c54056df21983e958
|
Fix SlackOAuth
|
slack.py
|
slack.py
|
import os
import sys
import logging
from werkzeug.contrib.fixers import ProxyFix
import flask
from flask import Flask, redirect, url_for
from flask_dance.consumer import OAuth2ConsumerBlueprint
from raven.contrib.flask import Sentry
from requests.auth import AuthBase
log = logging.getLogger(__name__)
log.setLevel(logging.INFO)
app = Flask(__name__)
app.wsgi_app = ProxyFix(app.wsgi_app)
sentry = Sentry(app)
app.secret_key = os.environ.get("FLASK_SECRET_KEY", "supersekrit")
app.config["SLACK_OAUTH_CLIENT_ID"] = os.environ.get("SLACK_OAUTH_CLIENT_ID")
app.config["SLACK_OAUTH_CLIENT_SECRET"] = os.environ.get("SLACK_OAUTH_CLIENT_SECRET")
class SlackOAuth(AuthBase):
"""
Slack wants the access token to be passed in a `token` GET parameter or POST
parameter, rather than using the `Authorization: Bearer` header. This is
annoying, but we can make it work using this custom Auth object.
"""
def __init__(self, blueprint):
self.blueprint = blueprint
def __call__(self, r):
if self.blueprint.token:
access_token = self.blueprint.token.get("access_token")
else:
access_token = None
if access_token:
r.data.setdefault('token', access_token)
return r
slack_bp = OAuth2ConsumerBlueprint("slack", __name__,
base_url="https://slack.com/api/",
authorization_url="https://slack.com/oauth/authorize",
token_url="https://slack.com/api/oauth.access",
scope=["identify", "chat:write:bot"],
)
slack_bp.auth = SlackOAuth(slack_bp)
slack_bp.from_config["client_id"] = "SLACK_OAUTH_CLIENT_ID"
slack_bp.from_config["client_secret"] = "SLACK_OAUTH_CLIENT_SECRET"
app.register_blueprint(slack_bp, url_prefix="/login")
@app.route("/")
def index():
slack = slack_bp.session
if not slack.authorized:
return redirect(url_for("slack.login"))
resp = slack.post("chat.postMessage", data={
"channel": "#general",
"text": "ping",
"icon_emoji": ":robot_face:",
})
assert resp.ok, resp.text
return resp.text
if __name__ == "__main__":
app.run()
|
Python
| 0.999411
|
@@ -74,16 +74,65 @@
roxyFix%0A
+from werkzeug.urls import url_encode, url_decode%0A
import f
@@ -309,16 +309,48 @@
AuthBase
+%0Afrom urlobject import URLObject
%0A%0Alog =
@@ -1246,16 +1246,20 @@
if
+not
access_t
@@ -1281,13 +1281,304 @@
r
-.data
+eturn r%0A%0A if r.method == %22GET%22:%0A url = URLObject(r.url)%0A if not %22token%22 in url.query_dict:%0A url = url.add_query_param(%22token%22, access_token)%0A r.url = url%0A elif r.method == %22POST%22:%0A args = url_decode(r.body)%0A args
.set
@@ -1589,15 +1589,15 @@
ult(
-'
+%22
token
-'
+%22
, ac
@@ -1600,32 +1600,70 @@
, access_token)%0A
+ r.body = url_encode(args)%0A
return r
|
6020741950aa1e4f60fbb66946fd962d6b0ccc21
|
fix bug
|
geni/methods/get_credential.py
|
geni/methods/get_credential.py
|
from geni.util.faults import *
from geni.util.excep import *
from geni.util.method import Method
from geni.util.parameter import Parameter, Mixed
from geni.util.auth import Auth
from geni.util.record import GeniRecord
from geni.util.credential import *
from geni.util.rights import *
from geni.util.debug import log
class get_credential(Method):
"""
Retrive a credential for an object
If cred == Nonee then the behavior reverts to get_self_credential
@param cred credential object specifying rights of the caller
@param type type of object (user | slice | sa | ma | node)
@param hrn human readable name of object
@return the string representation of a credential object
"""
interfaces = ['registry']
accepts = [
Mixed(Parameter(str, "credential"),
Parameter(None, "No credential")),
Parameter(str, "Human readable name (hrn)")
]
returns = Parameter(str, "String representation of a credential object")
def call(self, cred, type, hrn):
if not cred:
return self.get_self_credential(type, hrn)
self.api.auth.check(cred, 'getcredential')
self.api.auth.verify_object_belongs_to_me(hrn)
auth_hrn = self.api.auth.get_authority(hrn)
if not auth_hrn:
auth_hrn = hrn
auth_info = self.api.auth.get_auth_info(auth_hrn)
table = self.api.auth.get_auth_table(auth_hrn)
records = table.resolve('*', hrn)
if not records:
raise RecordNotFount(hrn)
record = records[0]
# verify_cancreate_credential requires that the member lists
# (researchers, pis, etc) be filled in
self.api.fill_record_info(record)
self.api.auth.verify_cancreate_credential(self.api.auth.client_cred, record)
# TODO: Check permission that self.client_cred can access the object
object_gid = record.get_gid_object()
new_cred = Credential(subject = object_gid.get_subject())
new_cred.set_gid_caller(self.api.auth.client_gid)
new_cred.set_gid_object(object_gid)
new_cred.set_issuer(key=auth_info.get_pkey_object(), subject=auth_hrn)
new_cred.set_pubkey(object_gid.get_pubkey())
rl = determine_rights(type,hrn)
new_cred.set_privileges(rl)
# determine the type of credential that we want to use as a parent for
# this credential.
if (type == "ma") or (type == "node"):
auth_kind = "authority,ma"
else: # user, slice, sa
auth_kind = "authority,sa"
new_cred.set_parent(self.api.auth.hierarchy.get_auth_cred(auth_hrn, kind=auth_kind))
new_cred.encode()
new_cred.sign()
return new_cred.save_to_string(save_parents=True)
def get_self_credential(self, type, hrn):
"""
get_self_credential a degenerate version of get_credential used by a client
to get his initial credential when de doesnt have one. This is the same as
get_credetial(..., cred = None, ...)
The registry ensures that the client is the principal that is named by
(type, name) by comparing the public key in the record's GID to the
private key used to encrypt the client side of the HTTPS connection. Thus
it is impossible for one principal to retrive another principal's
credential without having the appropriate private key.
@param type type of object (user | slice | sa | ma | node)
@param hrn human readable name of authority to list
@return string representation of a credential object
"""
self.api.auth.verify_object_belongs_to_me(hrn)
auth_hrn = self.api.auth.get_authority(hrn)
if not auth_hrn:
auth_hrn = hrn
auth_info = self.api.auth.get_auth_info(auth_hrn)
# find a record that matches
record = None
table = self.api.auth.get_auth_table(auth_hrn)
records = table.resolve('*', hrn)
for rec in records:
if type in ['*'] or rec.get_type() in [type]:
record = rec
gid = record.get_gid_object()
peer_cert = self.api.auth.peer_cert
if not peer_cert.is_pubkey(gid.get_pubkey()):
raise ConnectionKeyGIDMismatch(gid.get_subject())
# create the credential
gid = record.get_gid_object()
cred = Credential(subject = gid.get_subject())
cred.set_gid_caller(gid)
cred.set_gid_object(gid)
cred.set_issuer(key=auth_info.get_pkey_object(), subject=auth_hrn)
cred.set_pubkey(gid.get_pubkey())
rl = determine_rights(type, hrn)
cred.set_privileges(rl)
# determine the type of credential that we want to use as a parent for
# this credential.
if (type == "ma") or (type == "node"):
auth_kind = "authority,ma"
else: # user, slice, sa
auth_kind = "authority,sa"
cred.set_parent(self.api.auth.hierarchy.get_auth_cred(auth_hrn, kind=auth_kind))
cred.encode()
cred.sign()
return cred.save_to_string(save_parents=True)
|
Python
| 0.000001
|
@@ -1538,17 +1538,17 @@
dNotFoun
-t
+d
(hrn)%0A
|
db676bcde0b301c1cd1a4346687b3cef02390e8c
|
Mark trustee_domain_admin_password secret
|
magnum/common/keystone.py
|
magnum/common/keystone.py
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from keystoneclient.auth.identity import v3
import keystoneclient.exceptions as kc_exception
from keystoneclient import session
from keystoneclient.v3 import client as kc_v3
from oslo_config import cfg
from oslo_log import log as logging
from magnum.common import exception
from magnum.i18n import _
from magnum.i18n import _LE
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
trust_opts = [
cfg.StrOpt('trustee_domain_id',
help=_('Id of the domain to create trustee for bays')),
cfg.StrOpt('trustee_domain_admin_id',
help=_('Id of the admin with roles sufficient to manage users'
' in the trustee_domain')),
cfg.StrOpt('trustee_domain_admin_password',
help=_('Password of trustee_domain_admin')),
cfg.ListOpt('roles',
default=[],
help=_('The roles which are delegated to the trustee '
'by the trustor'))
]
CONF.register_opts(trust_opts, group='trust')
CONF.import_group('keystone_authtoken', 'keystonemiddleware.auth_token')
class KeystoneClientV3(object):
"""Keystone client wrapper so we can encapsulate logic in one place."""
def __init__(self, context):
self.context = context
self._client = None
self._admin_client = None
self._domain_admin_client = None
@property
def auth_url(self):
v3_auth_url = CONF.keystone_authtoken.auth_uri.replace('v2.0', 'v3')
return v3_auth_url
@property
def auth_token(self):
return self.client.auth_token
@property
def session(self):
return self.client.session
@property
def admin_session(self):
return self.admin_client.session
@property
def client(self):
if self.context.is_admin:
return self.admin_client
else:
if not self._client:
self._client = self._get_ks_client()
return self._client
def _get_admin_credentials(self):
credentials = {
'username': CONF.keystone_authtoken.admin_user,
'password': CONF.keystone_authtoken.admin_password,
'project_name': CONF.keystone_authtoken.admin_tenant_name
}
return credentials
@property
def admin_client(self):
if not self._admin_client:
admin_credentials = self._get_admin_credentials()
self._admin_client = kc_v3.Client(auth_url=self.auth_url,
**admin_credentials)
return self._admin_client
@property
def domain_admin_client(self):
if not self._domain_admin_client:
auth = v3.Password(
auth_url=self.auth_url,
user_id=CONF.trust.trustee_domain_admin_id,
domain_id=CONF.trust.trustee_domain_id,
password=CONF.trust.trustee_domain_admin_password)
sess = session.Session(auth=auth)
self._domain_admin_client = kc_v3.Client(session=sess)
return self._domain_admin_client
@staticmethod
def _is_v2_valid(auth_token_info):
return 'access' in auth_token_info
@staticmethod
def _is_v3_valid(auth_token_info):
return 'token' in auth_token_info
def _get_ks_client(self):
kwargs = {'auth_url': self.auth_url,
'endpoint': self.auth_url}
if self.context.trust_id:
kwargs.update(self._get_admin_credentials())
kwargs['trust_id'] = self.context.trust_id
kwargs.pop('project_name')
elif self.context.auth_token_info:
kwargs['token'] = self.context.auth_token
if self._is_v2_valid(self.context.auth_token_info):
LOG.warning('Keystone v2 is deprecated.')
kwargs['auth_ref'] = self.context.auth_token_info['access']
kwargs['auth_ref']['version'] = 'v2.0'
elif self._is_v3_valid(self.context.auth_token_info):
kwargs['auth_ref'] = self.context.auth_token_info['token']
kwargs['auth_ref']['version'] = 'v3'
else:
LOG.error(_LE('Unknown version in auth_token_info'))
raise exception.AuthorizationFailure()
elif self.context.auth_token:
kwargs['token'] = self.context.auth_token
else:
LOG.error(_LE('Keystone v3 API conntection failed, no password '
'trust or auth_token'))
raise exception.AuthorizationFailure()
return kc_v3.Client(**kwargs)
def create_trust(self, trustee_user):
trustor_user_id = self.client.auth_ref.user_id
trustor_project_id = self.client.auth_ref.project_id
# inherit the role of the trustor, unless set CONF.trust.roles
if CONF.trust.roles:
roles = CONF.trust.roles
else:
roles = self.context.roles
try:
trust = self.client.trusts.create(
trustor_user=trustor_user_id,
project=trustor_project_id,
trustee_user=trustee_user,
impersonation=True,
role_names=roles)
except Exception:
LOG.exception(_LE('Failed to create trust'))
raise exception.TrustCreateFailed(
trustee_user_id=trustee_user)
return trust
def delete_trust(self, trust_id):
if trust_id is None:
return
try:
self.client.trusts.delete(trust_id)
except kc_exception.NotFound:
pass
except Exception:
LOG.exception(_LE('Failed to delete trust'))
raise exception.TrustDeleteFailed(trust_id=trust_id)
def create_trustee(self, username, password, domain_id):
try:
user = self.domain_admin_client.users.create(
name=username,
password=password,
domain=domain_id)
except Exception:
LOG.exception(_LE('Failed to create trustee'))
raise exception.TrusteeCreateFailed(username=username,
domain_id=domain_id)
return user
def delete_trustee(self, trustee_id):
try:
self.domain_admin_client.users.delete(trustee_id)
except kc_exception.NotFound:
pass
except Exception:
LOG.exception(_LE('Failed to delete trustee'))
raise exception.TrusteeDeleteFailed(trustee_id=trustee_id)
def get_validate_region_name(self, region_name):
if region_name is None:
message = _("region_name needs to be configured in magnum.conf")
raise exception.InvalidParameterValue(message)
"""matches the region of a public endpoint for the Keystone
service."""
try:
regions = self.client.regions.list()
except kc_exception.NotFound:
pass
except Exception:
LOG.exception(_LE('Failed to list regions'))
raise exception.RegionsListFailed()
region_list = []
for region in regions:
region_list.append(region.id)
if region_name not in region_list:
raise exception.InvalidParameterValue(_(
'region_name %(region_name)s is invalid, '
'expecting a region_name in %(region_name_list)s.') % {
'region_name': region_name,
'region_name_list': '/'.join(
region_list + ['unspecified'])})
return region_name
|
Python
| 0.999889
|
@@ -1259,16 +1259,29 @@
ssword',
+ secret=True,
%0A
|
c80fd613af104fc05e0413936d9c740b048365d9
|
add pdb shell
|
tensorpack/callbacks/stats.py
|
tensorpack/callbacks/stats.py
|
# -*- coding: utf-8 -*-
# File: stats.py
# Author: Yuxin Wu <ppwwyyxxc@gmail.com>
import os
import numpy as np
from .base import Callback
from ..utils import logger
from ..tfutils import get_op_tensor_name
__all__ = ['SendStat', 'DumpParamAsImage', 'InjectShell']
class SendStat(Callback):
""" An equivalent of :class:`SendMonitorData`, but as a normal callback. """
def __init__(self, command, names):
self.command = command
if not isinstance(names, list):
names = [names]
self.names = names
def _trigger(self):
M = self.trainer.monitors
v = {k: M.get_latest(k) for k in self.names}
cmd = self.command.format(**v)
ret = os.system(cmd)
if ret != 0:
logger.error("Command {} failed with ret={}!".format(cmd, ret))
# TODO other types of shell?
class InjectShell(Callback):
"""
When triggered, opens an IPython shell if a file exists.
Useful for interactive debug during training.
Using this callback requires ipython to be installed.
"""
def __init__(self, file='INJECT_SHELL.tmp'):
"""
Args:
file (str): if this file exists, will open a shell.
"""
self._file = file
logger.info("Create a file '{}' to open debug shell.".format(file))
def _trigger(self):
if os.path.isfile(self._file):
logger.info("File {} exists, entering shell.".format(self._file))
trainer = self.trainer # noqa
import IPython as IP; IP.embed() # noqa
def _after_train(self):
if os.path.isfile(self._file):
os.unlink(self._file)
class DumpParamAsImage(Callback):
"""
Dump a tensor to image(s) to ``logger.LOG_DIR`` after every epoch.
Note that it requires the tensor is directly evaluable, i.e. either inputs
are not its dependency (e.g. the weights of the model), or the inputs are
feedfree (in which case this callback will take an extra datapoint from the input pipeline).
"""
def __init__(self, tensor_name, prefix=None, map_func=None, scale=255):
"""
Args:
tensor_name (str): the name of the tensor.
prefix (str): the filename prefix for saved images. Defaults to the Op name.
map_func: map the value of the tensor to an image or list of
images of shape [h, w] or [h, w, c]. If None, will use identity.
scale (float): a multiplier on pixel values, applied after map_func.
"""
op_name, self.tensor_name = get_op_tensor_name(tensor_name)
self.func = map_func
if prefix is None:
self.prefix = op_name
else:
self.prefix = prefix
self.log_dir = logger.LOG_DIR
self.scale = scale
def _before_train(self):
self._tensor = self.graph.get_tensor_by_name(self.tensor_name)
def _trigger(self):
val = self.trainer.sess.run(self._tensor)
if self.func is not None:
val = self.func(val)
if isinstance(val, list) or val.ndim == 4:
for idx, im in enumerate(val):
self._dump_image(im, idx)
else:
self._dump_image(val)
self.trainer.monitors.put_image(self.prefix, val)
def _dump_image(self, im, idx=None):
assert im.ndim in [2, 3], str(im.ndim)
fname = os.path.join(
self.log_dir,
self.prefix + '-ep{:03d}{}.png'.format(
self.epoch_num, '-' + str(idx) if idx else ''))
res = im * self.scale
res = np.clip(res, 0, 255)
cv2.imwrite(fname, res.astype('uint8'))
try:
import cv2
except ImportError:
from ..utils.develop import create_dummy_class
DumpParamAsImage = create_dummy_class('DumpParamAsImage', 'cv2') # noqa
|
Python
| 0.000001
|
@@ -818,37 +818,8 @@
)%0A%0A%0A
-# TODO other types of shell?%0A
clas
@@ -887,16 +887,20 @@
IPython
+/pdb
shell i
@@ -1080,16 +1080,33 @@
ELL.tmp'
+, shell='ipython'
):%0A
@@ -1201,37 +1201,156 @@
-%22%22%22%0A self._file = file
+ shell (str): one of 'ipython', 'pdb'%0A %22%22%22%0A self._file = file%0A assert shell in %5B'ipython', 'pdb'%5D%0A self._shell = shell
%0A
@@ -1394,21 +1394,18 @@
to open
-debug
+%7B%7D
shell.%22
@@ -1416,16 +1416,23 @@
mat(file
+, shell
))%0A%0A
@@ -1584,94 +1584,284 @@
-trainer = self.trainer # noqa%0A import IPython as IP; IP.embed() # noqa
+self._inject()%0A%0A def _inject(self):%0A trainer = self.trainer # noqa%0A if self._shell == 'ipython':%0A import IPython as IP # noqa%0A IP.embed()%0A elif self._shell == 'pdb':%0A import pdb # noqa%0A pdb.set_trace()
%0A%0A
|
e145d8012efbfb373dfe566845f3957777a3da5a
|
Clean up an unnecessary variable.
|
sqltd.py
|
sqltd.py
|
#!/usr/bin/env python
import sqlite3
import sys
import re
def runPage(db, html):
def parseStrings(s, query=[False]):
output = ''
if s == '<sql>':
query[0] = True
elif s == '</sql>':
query[0] = False
elif query[0]:
result = dbExecute(db, s)
output = '<table>\n%s</table>\n' % (''.join(makeTable(result)))
else:
output = ''.join(s)
return output
split = re.split('(<sql>|</sql>)', html)
output = ''
return ''.join([parseStrings(s) for s in split])
def dbConnect(db_path):
db = sqlite3.connect(db_path)
return db
def dbExecute(db, query):
#May need a way to check that the query is valid
c = db.cursor()
c.execute(query)
return c
def makeTable(rows):
def makeRow(row):
return ''.join(['<tr>\n', ''.join([' <td>%s</td>\n' % str(col) for col in row]), '</tr>\n'])
header = ''.join(['<th>%s</th>\n' % (field[0]) for field in rows.description])
output = ''.join([makeRow(row) for row in rows.fetchall()])
return "%s%s" %(header, output)
if __name__ == "__main__":
if len(sys.argv) >= 2:
DB_PATH = sys.argv[1];
else:
print "No sqlite database specified."
exit(1)
db = dbConnect(DB_PATH)
if(not db):
print "Error opening database"
exit(1);
print runPage(db, ''.join(sys.stdin))
|
Python
| 0.000031
|
@@ -618,29 +618,8 @@
l)%0A%0A
- output = ''%0A%0A
|
9a68c2f1af56169215cf473b1f588df77fb1f9fe
|
Remove wait timeouts
|
MTGO-scry-bug-test.sikuli/MTGO-scry-bug-test.py
|
MTGO-scry-bug-test.sikuli/MTGO-scry-bug-test.py
|
import os
import shutil
import tempfile
import time
REGION_PLAY = Region(7,965,334,57)
REGION_MULLIGAN_KEEP = Region(0,13,175,154)
REGION_TEMPORARY_ZONE = Region(1150,195,130,37)
REGION_PUT_ON_THE_BOTTOM_OF_YOUR_LIBRARY = Region(921,181,459,166)
REGION_ON_THE_BOTTOM_OF_THE_LIBRARY = Region(1726,664,180,37)
REGION_CONCEDE_MATCH_BUTTON = Region(891,554,133,48)
TEMP_DIR_PREFIX = time.strftime("MTGO-scry-bug_%Y-%m-%d_%H-%M-%S", time.gmtime())
TEMP_PATH = tempfile.mkdtemp(prefix=TEMP_DIR_PREFIX)
print "TEMP_PATH:", TEMP_PATH
OUTPUT_PATH = os.path.join(TEMP_PATH, 'output')
print "OUTPUT_PATH:", OUTPUT_PATH
HITS_DIR = 'hits'
HITS_PATH = os.path.join(OUTPUT_PATH, HITS_DIR)
print "HITS_PATH:", HITS_PATH
MISSES_DIR = 'misses'
MISSES_PATH = os.path.join(OUTPUT_PATH, MISSES_DIR)
print "MISSES_PATH:", MISSES_PATH
os.mkdir(OUTPUT_PATH)
os.mkdir(HITS_PATH)
os.mkdir(MISSES_PATH)
iterations = 0
hits = 0
while True:
REGION_PLAY.wait("play.png", 5)
REGION_PLAY.click(Location(164, 993))
REGION_MULLIGAN_KEEP.wait("mulligan_keep.png", 5)
for i in range(0, 7):
REGION_MULLIGAN_KEEP.wait("mulligan_highlighted_keep.png", 5)
time.sleep(0.5)
REGION_MULLIGAN_KEEP.click(Location(47, 142))
REGION_TEMPORARY_ZONE.wait("temporary_zone.png", 5)
time.sleep(0.1)
card_sent_to_bottom = capture(Region(1209,283,102,63))
click(Location(1242, 379)) # Click on the top card of the library.
time.sleep(0.5)
REGION_PUT_ON_THE_BOTTOM_OF_YOUR_LIBRARY.click(Location(1139, 424)) # Click on "Put on the bottom of your library."
REGION_ON_THE_BOTTOM_OF_THE_LIBRARY.wait("card_on_the_bottom_of_the_library.png", 5)
time.sleep(0.5)
card_drawn_region = Region(Region(203,780,155,115))
card_drawn = capture(card_drawn_region)
copy_path = ""
if card_drawn_region.exists(card_sent_to_bottom):
hits += 1
copy_path = HITS_PATH
else:
copy_path = MISSES_PATH
iterations += 1
print hits, "/", iterations
shutil.move(card_sent_to_bottom, os.path.join(copy_path, str(iterations) + "_bottom.png"))
shutil.move(card_drawn, os.path.join(copy_path, str(iterations) + "_drawn.png"))
click(Location(1903, 13)) # Click on the "X" (close) button.
region_concede_match_button.wait("concede_match.png", 5)
time.sleep(0.5)
region_concede_match_button.click("concede_match.png")
|
Python
| 0.000023
|
@@ -1026,27 +1026,24 @@
t(%22play.png%22
-, 5
)%0A REGION
@@ -1120,27 +1120,24 @@
an_keep.png%22
-, 5
)%0A for i
@@ -1213,27 +1213,24 @@
ed_keep.png%22
-, 5
)%0A ti
@@ -1345,27 +1345,24 @@
ry_zone.png%22
-, 5
)%0A time.s
@@ -1724,27 +1724,24 @@
library.png%22
-, 5
)%0A time.s
@@ -2377,11 +2377,8 @@
png%22
-, 5
)%0A
|
d1d3a8d5729dda7ca32d140dfcf078e9afbb57a4
|
Move configurable constants back to the top level
|
MTGO-scry-bug-test.sikuli/MTGO-scry-bug-test.py
|
MTGO-scry-bug-test.sikuli/MTGO-scry-bug-test.py
|
import hashlib
import os
import shutil
import tempfile
import time
def hash_file(file_path):
hasher = hashlib.md5()
with open(file_path, 'rb') as opened_file:
buf = opened_file.read()
hasher.update(buf)
return hasher.hexdigest()
def main():
REGION_PLAY = Region(7,965,334,57)
REGION_MULLIGAN_KEEP = Region(0,13,175,154)
REGION_CARD_SENT_TO_BOTTOM = Region(1209,283,102,63)
REGION_TEMPORARY_ZONE = Region(1017,199,124,29)
REGION_PUT_ON_THE_BOTTOM_OF_YOUR_LIBRARY = Region(921,181,459,166)
REGION_CHAT_PUT_A_CARD_ON_THE_BOTTOM_OF_THE_LIBRARY = Region(1501,671,398,51)
REGION_CARD_PREVIEW_CAPTURE = Region(1549,546,52,23)
REGION_CONCEDE_MATCH_BUTTON = Region(891,554,133,48)
LOCATION_PLAY = Location(169, 995)
LOCATION_MULLIGAN = Location(47, 141)
LOCATION_TEMPORARY_ZONE_CARD = Location(1195, 382)
LOCATION_PUT_ON_THE_BOTTOM_OF_YOUR_LIBRARY = Location(1118, 430)
LOCATION_FIRST_CARD_IN_HAND = Location(282, 910)
LOCATION_X_CLOSE = Location(1902, 14)
LOCATION_CONCEDE_MATCH = Location(961, 579)
TEMP_DIR_PREFIX = time.strftime("MTGO-scry-bug_%Y-%m-%d_%H-%M-%S", time.gmtime())
TEMP_PATH = tempfile.mkdtemp(prefix=TEMP_DIR_PREFIX)
print "TEMP_PATH:", TEMP_PATH
OUTPUT_PATH = os.path.join(TEMP_PATH, 'output')
print "OUTPUT_PATH:", OUTPUT_PATH
HITS_DIR = 'hits'
HITS_PATH = os.path.join(OUTPUT_PATH, HITS_DIR)
print "HITS_PATH:", HITS_PATH
MISSES_DIR = 'misses'
MISSES_PATH = os.path.join(OUTPUT_PATH, MISSES_DIR)
print "MISSES_PATH:", MISSES_PATH
os.mkdir(OUTPUT_PATH)
os.mkdir(HITS_PATH)
os.mkdir(MISSES_PATH)
iterations = 0
hits = 0
while True:
REGION_PLAY.wait("play.png")
REGION_PLAY.click(LOCATION_PLAY)
REGION_MULLIGAN_KEEP.wait("mulligan_keep.png")
for i in range(0, 7):
REGION_MULLIGAN_KEEP.wait("mulligan_highlighted_keep.png")
time.sleep(0.5)
REGION_MULLIGAN_KEEP.click(LOCATION_MULLIGAN)
REGION_TEMPORARY_ZONE.wait("temporary_zone.png")
time.sleep(0.1)
click(LOCATION_TEMPORARY_ZONE_CARD)
time.sleep(0.5)
REGION_PUT_ON_THE_BOTTOM_OF_YOUR_LIBRARY.click(LOCATION_PUT_ON_THE_BOTTOM_OF_YOUR_LIBRARY)
REGION_CHAT_PUT_A_CARD_ON_THE_BOTTOM_OF_THE_LIBRARY.wait("chat_put_a_card_on_the_bottom_of_the_library.png")
time.sleep(0.5)
card_sent_to_bottom_capture = capture(REGION_CARD_PREVIEW_CAPTURE)
hover(LOCATION_FIRST_CARD_IN_HAND) # Update the preview with the drawn card.
time.sleep(1.0)
card_drawn_capture = capture(REGION_CARD_PREVIEW_CAPTURE)
copy_path = ""
card_sent_to_bottom_hash = hash_file(card_sent_to_bottom_capture)
card_drawn_hash = hash_file(card_drawn_capture)
print "card_sent_to_bottom_hash", card_sent_to_bottom_hash
print "card_drawn_hash", card_drawn_hash
if card_sent_to_bottom_hash == card_drawn_hash:
hits += 1
copy_path = HITS_PATH
else:
copy_path = MISSES_PATH
iterations += 1
print hits, "/", iterations
shutil.move(card_sent_to_bottom_capture, os.path.join(copy_path, str(iterations) + "_bottom.png"))
shutil.move(card_drawn_capture, os.path.join(copy_path, str(iterations) + "_drawn.png"))
click(LOCATION_X_CLOSE)
region_concede_match_button.wait("concede_match.png")
time.sleep(0.5)
region_concede_match_button.click(LOCATION_CONCEDE_MATCH)
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -65,216 +65,8 @@
me%0A%0A
-def hash_file(file_path):%0A hasher = hashlib.md5()%0A with open(file_path, 'rb') as opened_file:%0A buf = opened_file.read()%0A hasher.update(buf)%0A%0A return hasher.hexdigest()%0A%0Adef main():%0A
REGI
@@ -136,20 +136,16 @@
334,57)%0A
-
REGION_M
@@ -211,20 +211,16 @@
75,154)%0A
-
REGION_C
@@ -289,20 +289,16 @@
102,63)%0A
-
REGION_T
@@ -367,20 +367,16 @@
124,29)%0A
-
REGION_P
@@ -445,20 +445,16 @@
59,166)%0A
-
REGION_C
@@ -523,20 +523,16 @@
398,51)%0A
-
REGION_C
@@ -600,20 +600,16 @@
,52,23)%0A
-
REGION_C
@@ -678,20 +678,16 @@
33,48)%0A%0A
-
LOCATION
@@ -751,20 +751,16 @@
9, 995)%0A
-
LOCATION
@@ -823,20 +823,16 @@
7, 141)%0A
-
LOCATION
@@ -897,20 +897,16 @@
5, 382)%0A
-
LOCATION
@@ -971,20 +971,16 @@
8, 430)%0A
-
LOCATION
@@ -1044,20 +1044,16 @@
2, 910)%0A
-
LOCATION
@@ -1117,20 +1117,16 @@
02, 14)%0A
-
LOCATION
@@ -1191,16 +1191,220 @@
, 579)%0A%0A
+def hash_file(file_path):%0A hasher = hashlib.md5()%0A with open(file_path, 'rb') as opened_file:%0A buf = opened_file.read()%0A hasher.update(buf)%0A%0A return hasher.hexdigest()%0A%0Adef main():%0A
TEMP
|
49e9fbbd00a7732faa716e5e930ec63dbaa18983
|
fix gumbel unit test
|
test/lib/test_distribution.py
|
test/lib/test_distribution.py
|
from flaky import flaky
from slm_lab.lib import distribution
import pytest
import torch
@pytest.mark.parametrize('pdparam_type', [
'probs', 'logits'
])
def test_argmax(pdparam_type):
pdparam = torch.tensor([1.1, 10.0, 2.1])
# test both probs or logits
pd = distribution.Argmax(**{pdparam_type: pdparam})
for _ in range(10):
assert pd.sample().item() == 1
assert torch.equal(pd.probs, torch.tensor([0., 1., 0.]))
@flaky
@pytest.mark.parametrize('pdparam_type', [
'probs', 'logits'
])
def test_gumbel_categorical(pdparam_type):
pdparam = torch.tensor([1.1, 10.0, 2.1])
pd = distribution.GumbelSoftmax(**{pdparam_type: pdparam})
for _ in range(10):
assert torch.is_tensor(pd.sample())
@pytest.mark.parametrize('pdparam_type', [
'probs', 'logits'
])
def test_multicategorical(pdparam_type):
pdparam0 = torch.tensor([10.0, 0.0, 0.0])
pdparam1 = torch.tensor([0.0, 10.0, 0.0])
pdparam2 = torch.tensor([0.0, 0.0, 10.0])
pdparams = [pdparam0, pdparam1, pdparam2]
# use a probs
pd = distribution.MultiCategorical(**{pdparam_type: pdparams})
assert isinstance(pd.probs, list)
# test probs only since if init from logits, probs will be close but not precise
if pdparam_type == 'probs':
assert torch.equal(pd.probs[0], torch.tensor([1., 0., 0.]))
assert torch.equal(pd.probs[1], torch.tensor([0., 1., 0.]))
assert torch.equal(pd.probs[2], torch.tensor([0., 0., 1.]))
for _ in range(10):
assert torch.equal(pd.sample(), torch.tensor([0, 1, 2]))
|
Python
| 0
|
@@ -656,32 +656,66 @@
am_type: pdparam
+, 'temperature': torch.tensor(1.0)
%7D)%0A for _ in
|
cec8e3078784abf0224b23592d56745a09d7060f
|
add jszip to mechanical rob
|
test/requests/link_checker.py
|
test/requests/link_checker.py
|
import re
import requests
from lxml.html import parse
from requests.exceptions import ConnectionError
DO_FAIL=False # fail on error
def is_root_link(link):
pattern = re.compile("^/$")
return pattern.match(link)
def is_mailto_link(link):
pattern = re.compile("^mailto:.*")
return pattern.match(link)
def is_internal_link(link):
pattern = re.compile("^/.*")
return pattern.match(link)
def is_in_page_link(link):
pattern = re.compile("^#.*")
return pattern.match(link)
def get_links(doc):
return [x for x in [y.get("href") for y in doc.cssselect("a")] if not (
is_root_link(x)
or is_mailto_link(x))]
def verify_link(link):
if link[0] == "#":
# local link on page
return
print("verifying "+link)
try:
result = requests.get(link, timeout=20, verify=False)
if result.status_code == 200:
print(link+" ==> OK")
elif result.status_code == 307:
print(link+" ==> REDIRECT")
else:
print("ERROR: link `"+link+"` failed with status "
, result.status_code)
if DO_FAIL:
raise Exception("Failed verify")
except ConnectionError as ex:
print("ERROR: ", link, ex)
if DO_FAIL:
raise ex
def verify_static_file(link):
print("verifying "+link)
try:
result = requests.get(link, timeout=20, verify=False)
if (result.status_code == 200 and
result.content.find(bytes("Error: 404 Not Found", "utf-8")) <= 0):
print(link+" ==> OK")
else:
print("ERROR: link {}".format(link))
raise Exception("Failed verify")
except ConnectionError as ex:
print("ERROR: ", link, ex)
def check_page(host, start_url):
print("")
print("Checking links host "+host+" in page `"+start_url+"`")
doc = parse(start_url).getroot()
links = get_links(doc)
in_page_links = list(filter(is_in_page_link, links))
internal_links = list(filter(is_internal_link, links))
external_links = [x for x in links if not (is_internal_link(x) or is_in_page_link(x))]
for link in internal_links:
verify_link(host+link)
for link in external_links:
verify_link(link)
def check_links(args_obj, parser):
print("")
print("Checking links")
host = args_obj.host
# Check the home page
check_page(host, host)
# Check traits page
check_page(
host,
host+"/show_trait?trait_id=1435395_s_at&dataset=HC_M2_0606_P")
def check_packaged_js_files(args_obj, parser):
host = args_obj.host
js_files = [
# Datatables Extensions:
"/css/DataTablesExtensions/buttonsBootstrap/css/buttons.bootstrap.css",
"/js/DataTablesExtensions/buttons/js/dataTables.buttons.min.js",
"/css/DataTablesExtensions/buttonStyles/css/buttons.dataTables.min.css",
"/js/DataTablesExtensions/buttons/js/dataTables.buttons.min.js",
"/js/DataTablesExtensions/colResize/dataTables.colResize.js",
"/js/DataTablesExtensions/colReorder/js/dataTables.colReorder.js",
"/js/DataTablesExtensions/buttons/js/buttons.colVis.min.js",
"/js/DataTables/js/jquery.dataTables.js",
"/css/DataTablesExtensions/scroller/css/scroller.dataTables.min.css",
# Datatables plugins:
"/js/DataTablesExtensions/plugins/sorting/natural.js",
"/js/DataTablesExtensions/plugins/sorting/scientific.js",
# Other js libraries
"/js/chroma/chroma.min.js",
"/js/d3-tip/d3-tip.js",
"/js/d3js/d3.min.js",
"/js/js_alt/underscore.min.js",
"/js/nvd3/nv.d3.min.css",
"/js/qtip2/jquery.qtip.min.js",
"/js/js_alt/md5.min.js",
]
print("Checking links")
for link in js_files:
verify_static_file(host+link)
|
Python
| 0
|
@@ -3763,24 +3763,58 @@
d5.min.js%22,%0A
+ %22/js/jszip/jszip.min.js%22,%0A
%5D%0A%0A p
|
79759d982c7524a117a4d3cb72a28b501ef5b354
|
add jsalt/timeago.min.js to mechanical rob
|
test/requests/link_checker.py
|
test/requests/link_checker.py
|
import re
import requests
from lxml.html import parse
from requests.exceptions import ConnectionError
DO_FAIL=False # fail on error
def is_root_link(link):
pattern = re.compile("^/$")
return pattern.match(link)
def is_mailto_link(link):
pattern = re.compile("^mailto:.*")
return pattern.match(link)
def is_internal_link(link):
pattern = re.compile("^/.*")
return pattern.match(link)
def is_in_page_link(link):
pattern = re.compile("^#.*")
return pattern.match(link)
def get_links(doc):
return [x for x in [y.get("href") for y in doc.cssselect("a")] if not (
is_root_link(x)
or is_mailto_link(x))]
def verify_link(link):
if link[0] == "#":
# local link on page
return
print("verifying "+link)
try:
result = requests.get(link, timeout=20, verify=False)
if result.status_code == 200:
print(link+" ==> OK")
elif result.status_code == 307:
print(link+" ==> REDIRECT")
else:
print("ERROR: link `"+link+"` failed with status "
, result.status_code)
if DO_FAIL:
raise Exception("Failed verify")
except ConnectionError as ex:
print("ERROR: ", link, ex)
if DO_FAIL:
raise ex
def verify_static_file(link):
print("verifying "+link)
try:
result = requests.get(link, timeout=20, verify=False)
if (result.status_code == 200 and
result.content.find(bytes("Error: 404 Not Found", "utf-8")) <= 0):
print(link+" ==> OK")
else:
print("ERROR: link {}".format(link))
raise Exception("Failed verify")
except ConnectionError as ex:
print("ERROR: ", link, ex)
def check_page(host, start_url):
print("")
print("Checking links host "+host+" in page `"+start_url+"`")
doc = parse(start_url).getroot()
links = get_links(doc)
in_page_links = list(filter(is_in_page_link, links))
internal_links = list(filter(is_internal_link, links))
external_links = [x for x in links if not (is_internal_link(x) or is_in_page_link(x))]
for link in internal_links:
verify_link(host+link)
for link in external_links:
verify_link(link)
def check_links(args_obj, parser):
print("")
print("Checking links")
host = args_obj.host
# Check the home page
check_page(host, host)
# Check traits page
check_page(
host,
host+"/show_trait?trait_id=1435395_s_at&dataset=HC_M2_0606_P")
def check_packaged_js_files(args_obj, parser):
host = args_obj.host
js_files = [
# Datatables Extensions:
"/css/DataTablesExtensions/buttonsBootstrap/css/buttons.bootstrap.css",
"/js/DataTablesExtensions/buttons/js/dataTables.buttons.min.js",
"/css/DataTablesExtensions/buttonStyles/css/buttons.dataTables.min.css",
"/js/DataTablesExtensions/buttons/js/dataTables.buttons.min.js",
"/js/DataTablesExtensions/colResize/dataTables.colResize.js",
"/js/DataTablesExtensions/colReorder/js/dataTables.colReorder.js",
"/js/DataTablesExtensions/buttons/js/buttons.colVis.min.js",
"/js/DataTables/js/jquery.dataTables.js",
"/css/DataTablesExtensions/scroller/css/scroller.dataTables.min.css",
# Datatables plugins:
"/js/DataTablesExtensions/plugins/sorting/natural.js",
"/js/DataTablesExtensions/plugins/sorting/scientific.js",
# Other js libraries
"/js/chroma/chroma.min.js",
"/js/d3-tip/d3-tip.js",
"/js/d3js/d3.min.js",
"/js/js_alt/underscore.min.js",
"/js/nvd3/nv.d3.min.css",
"/js/qtip2/jquery.qtip.min.js",
"/js/js_alt/md5.min.js",
"/js/js_alt/jstat.min.js",
"/js/js_alt/parsley.min.js",
]
print("Checking links")
for link in js_files:
verify_static_file(host+link)
|
Python
| 0.000004
|
@@ -3835,24 +3835,61 @@
ey.min.js%22,%0A
+ %22/js/js_alt/timeago.min.js%22,%0A
%5D%0A%0A p
|
f27e08b0dcace5b9f49c5b2a211347a2f50f8254
|
Use tags or direct url
|
stats.py
|
stats.py
|
from bs4 import BeautifulSoup
import requests
def statsRoyale(tag):
link = 'http://statsroyale.com/profile/' + tag
response = (requests.get(link)).text
soup = BeautifulSoup(response, 'html.parser')
stats = {}
content = soup.find_all('div', {'class':'content'})
stats['clan'] = content[0].get_text()
if stats['clan'] == 'No Clan':
stats['clan'] = None
stats['highest_trophies'] = content[1].get_text()
stats['last_known_trophies'] = content[2].get_text()
stats['challenge_cards_won'] = content[3].get_text()
stats['tournament_cards_won'] = content[4].get_text()
stats['total_donations'] = content[5].get_text()
stats['best_session_rank'] = content[6].get_text()
stats['previous_session_rank'] = content[7].get_text()
stats['legendary_trophies'] = content[8].get_text()
stats['wins'] = content[9].get_text()
stats['losses'] = content[10].get_text()
stats['3_crown_wins'] = content[11].get_text()
return stats
stats = statsRoyale(tag='9890JJJV')
print stats
|
Python
| 0
|
@@ -62,16 +62,122 @@
e(tag):%0A
+%09if not tag.find('/') == -1:%0A%09%09tag = tag%5B::-1%5D%0A%09%09pos = tag.find('/')%0A%09%09tag = tag%5B:pos%5D%0A%09%09tag = tag%5B::-1%5D%0A%0A
%09link =
@@ -305,29 +305,21 @@
r')%0A
-%09stats = %7B%7D%0A%0A%09content
+%0A%09description
= s
@@ -352,460 +352,216 @@
s':'
-content'%7D)%0A%09stats%5B'clan'%5D = content%5B0%5D.get_text()%0A%09if stats%5B'clan'%5D == 'No Clan':%0A%09%09stats%5B'clan'%5D = None%0A%09stats%5B'highest_trophies'%5D = content%5B1%5D.get_text()%0A%09stats%5B'last_known_trophies'%5D = content%5B2%5D.get_text()%0A%09stats%5B'challenge_cards_won'%5D = content%5B3%5D.get_text()%0A%09stats%5B'tournament_cards_won'%5D = content%5B4%5D.get_text()%0A%09stats%5B'total_donations'%5D = content%5B5%5D.get_text()%0A%09stats%5B'best_session_rank'%5D = content%5B6%5D.get_text()%0A%09stats%5B'previous_session_rank'%5D
+description'%7D)%0A%09content = soup.find_all('div', %7B'class':'content'%7D)%0A%0A%09stats = %7B%7D%0A%0A%09for i in range(len(description)):%0A%09%09description_text = ((description%5Bi%5D.get_text()).replace(' ', '_')).lower()%0A%09%09content_text
= c
@@ -571,9 +571,9 @@
ent%5B
-7
+i
%5D.ge
@@ -585,189 +585,105 @@
t()%0A
+%09
%09stats%5B
-'legendary_trophies'%5D = content%5B8%5D.get_text()%0A%09stats%5B'wins'%5D = content%5B9%5D.get_text()%0A%09stats%5B'losses'%5D = content%5B10%5D.get_text()%0A%09stats%5B'3_crown_wins
+description_text%5D = content_text%0A%0A%09if stats%5B'clan'%5D == 'No Clan':%0A%09%09stats%5B'clan
'%5D =
-content%5B11%5D.get_text()
+None%0A
%0A%09re
|
c917ef2c5e241e2cd2f6bf77c80ca8be7c7fd3cf
|
Update docs list
|
great_expectations/cli/docs.py
|
great_expectations/cli/docs.py
|
import os
import sys
import click
from great_expectations.cli import toolkit
from great_expectations.cli.cli_logging import logger
from great_expectations.cli.util import cli_message, cli_message_list
from great_expectations.core.usage_statistics.usage_statistics import (
send_usage_message,
)
@click.group()
def docs():
"""Data Docs operations"""
pass
@docs.command(name="build")
@click.option(
"--directory",
"-d",
default=None,
help="The project's great_expectations directory.",
)
@click.option(
"--site-name",
"-s",
help="The site for which to generate documentation. See data_docs section in great_expectations.yml",
)
@click.option(
"--view/--no-view",
help="By default open in browser unless you specify the --no-view flag",
default=True,
)
def docs_build(directory, site_name, view=True):
""" Build Data Docs for a project."""
context = toolkit.load_data_context_with_error_handling(directory)
build_docs(context, site_name=site_name, view=view)
send_usage_message(data_context=context, event="cli.docs.build", success=True)
@docs.command(name="list")
@click.option(
'--directory',
'-d',
default=None,
help="The project's great_expectations directory."
)
def docs_list(directory):
"""List known Data Docs Sites."""
context = toolkit.load_data_context_with_error_handling(directory)
docs_sites_url_dicts = context.get_docs_sites_urls()
docs_sites_strings = [
" - <cyan>{}</cyan>: {}".format(docs_site_dict["site_name"], docs_site_dict["site_url"])\
for docs_site_dict in docs_sites_url_dicts
]
if len(docs_sites_strings) == 0:
cli_message("No Data Docs sites found")
else:
list_intro_string = _build_intro_string(docs_sites_strings)
cli_message_list(docs_sites_strings, list_intro_string)
send_usage_message(data_context=context, event="cli.docs.list", success=True)
@docs.command(name="clean")
@click.option(
'--directory',
'-d',
default=None,
help="Clean data docs"
)
@click.option(
"--site-name",
"-s",
help="The site that you want documentation cleaned for. See data_docs section in great_expectations.yml",
)
@click.option(
"--all",
"-a",
help="With this, all sites will get their data docs cleaned out. See data_docs section in great_expectations.yml",
)
def clean_data_docs(directory, site_name=None, all=None):
"""Delete data docs"""
context = toolkit.load_data_context_with_error_handling(directory)
failed = True
if (site_name is None and all is None):
cli_message("<red>{}</red>".format("Please specify --all y to remove all sites or specify specific site using site_name"))
sys.exit(1)
context.clean_data_docs(site_name=site_name)
failed = False
if failed == False and context is not None:
send_usage_message(
data_context=context,
event="cli.docs.clean",
success=True
)
cli_message("<green>{}</green>".format("Cleaned data docs"))
if failed and context is not None:
send_usage_message(
data_context=context,
event="cli.docs.clean",
success=False
)
def _build_intro_string(docs_sites_strings):
doc_string_count = len(docs_sites_strings)
if doc_string_count == 1:
list_intro_string = "1 Data Docs site found:"
elif doc_string_count > 1:
list_intro_string = f"{doc_string_count} Data Docs sites found:"
return list_intro_string
def build_docs(context, site_name=None, view=True):
"""Build documentation in a context"""
logger.debug("Starting cli.datasource.build_docs")
cli_message("Building Data Docs...")
if site_name is not None:
site_names = [site_name]
else:
site_names = None
index_page_locator_infos = context.build_data_docs(site_names=site_names)
msg = "The following Data Docs sites were built:\n"
for site_name, index_page_locator_info in index_page_locator_infos.items():
if os.path.isfile(index_page_locator_info):
msg += " - <cyan>{}:</cyan> ".format(site_name)
msg += "file://{}\n".format(index_page_locator_info)
else:
msg += " - <cyan>{}:</cyan> ".format(site_name)
msg += "{}\n".format(index_page_locator_info)
msg = msg.rstrip("\n")
cli_message(msg)
if view:
context.open_data_docs(site_name=site_name)
|
Python
| 0
|
@@ -1510,16 +1510,29 @@
.format(
+%0A
docs_sit
@@ -1551,16 +1551,28 @@
_name%22%5D,
+%0A
docs_si
@@ -1574,25 +1574,29 @@
cs_site_dict
-%5B
+.get(
%22site_url%22%5D)
@@ -1597,11 +1597,202 @@
url%22
-%5D)%5C
+) or%0A f'site configured but does not exist. Run the following command to build site: great_expectations '%0A f'docs build --site-name %7Bdocs_site_dict%5B%22site_name%22%5D%7D'%0A )
%0A
@@ -3279,21 +3279,16 @@
docs%22))%0A
-
%0A if
@@ -3623,20 +3623,25 @@
cs site
-foun
+configure
d:%22%0A
@@ -3732,20 +3732,25 @@
s sites
-foun
+configure
d:%22%0A
|
9c014da319023eee205d725b49c7aa486af5e59e
|
remove extraneous comment.
|
greatbigcrane/project/views.py
|
greatbigcrane/project/views.py
|
"""
Copyright 2010 Jason Chu, Dusty Phillips, and Phil Schalm
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os.path
import json
from shutil import copyfile
from django.shortcuts import render_to_response, redirect, get_object_or_404
from django.views.generic.list_detail import object_list
from django.views.generic.list_detail import object_detail
from django.views.generic.create_update import delete_object
from django.core.urlresolvers import reverse
from django.template import RequestContext
from django.template.loader import render_to_string
from django.conf import settings
from django.http import HttpResponse
from django.forms.util import ErrorList
from job_queue.jobs import queue_job
from project.models import Project
from project.forms import ProjectForm
from preferences.models import Preference
from notifications.models import Notification
def index(request):
'''We should move this to a different app. Possibly preferences, it's more generic.'''
projects = Project.objects.filter(favourite=False).order_by('-updated_at')[:5]
favourite_projects = Project.objects.filter(favourite=True).order_by('name')
notifications = Notification.objects.exclude(dismissed=True)[:10]
return render_to_response('index.html', RequestContext(request,
{'project_list': projects, 'favourite_project_list': favourite_projects, 'notifications': notifications}))
def about(request):
'''Also go to another app or flatpage...'''
return render_to_response('about.html', RequestContext(request))
def list_projects(request):
orderby = request.GET.get('orderby', 'name')
projects = Project.objects.all().order_by(orderby)
return object_list(request, projects, template_name="project/project_list.html",
template_object_name="project", extra_context={'orderby': orderby})
def view_project(request, project_id):
return object_detail(request, Project.objects.all(), object_id=project_id,
template_object_name='project', extra_context={
'notifications': Notification.objects.filter(project=project_id,dismissed=False)[:10]})
def add_project(request):
form = ProjectForm(request.POST or None)
if form.is_valid():
try:
base_dir = os.path.expanduser(form.cleaned_data['base_directory'])
base_dir = base_dir.rstrip('/')
if form.cleaned_data['git_repo']:
target_dir = os.path.dirname(base_dir)
else:
target_dir = base_dir
if not os.path.isdir(target_dir):
os.makedirs(target_dir)
if form.cleaned_data['git_repo']:
instance = form.save()
queue_job("GITCLONE", project_id=instance.id)
else:
skeleton = [(os.path.join(settings.PROJECT_HOME, "../bootstrap.py"),
os.path.join(target_dir, "bootstrap.py")),
(os.path.join(settings.PROJECT_HOME, "../base_buildout.cfg"),
os.path.join(target_dir, "buildout.cfg"))]
for source, dest in skeleton:
if not os.path.isfile(dest):
copyfile(source, dest)
instance = form.save()
queue_job("BOOTSTRAP", project_id=instance.id)
return redirect(instance.get_absolute_url())
except IOError as e:
errors = form._errors.setdefault("base_directory", ErrorList())
errors.append(u"An error occurred: " + str(e))
base_url = Preference.objects.get_preference("projects_directory", '')
return render_to_response("project/project_form.html",
RequestContext(request, {'form': form, 'base_url': base_url}))
def edit_project(request, project_id):
project = get_object_or_404(Project, id=project_id)
form = ProjectForm(request.POST or None, instance=project)
if form.is_valid():
instance = form.save()
return redirect(instance.get_absolute_url())
base_url = Preference.objects.get_preference("projects_directory", '')
return render_to_response("project/project_edit_form.html",
RequestContext(request, {'form': form, 'base_url': base_url}))
def delete_project(request, project_id):
return delete_object(request, model=Project, object_id=project_id,
post_delete_redirect=reverse("list_projects"))
def favourite_project(request, project_id):
project = Project.objects.get(pk=project_id)
project.favourite=not project.favourite
project.save()
return handle_ajax(request)
def project_notifications(request, project_id):
project = Project.objects.get(pk=project_id)
notifications = Notification.objects.filter(project=project_id,dismissed=False)[:10]
return render_to_response("notifications/_notification_list.html",
RequestContext(request, {'notifications': notifications, 'project': project}))
def handle_ajax(request):
# return HttpResponse(request.POST['update'])
if 'update' in request.POST:
update = dict()
d = json.loads(request.POST['update'])
for k,v in d.items():
if v == 'projects':
update[k] = Project.objects.all().order_by(request.GET.get('orderby', 'name'))
elif v == 'home-projects':
update[k] = Project.objects.filter(favourite=False).order_by('-updated_at')[:5]
elif v == 'favourite-projects':
update[k] = Project.objects.filter(favourite=True).order_by('name')
update[k] = render_to_string("project/_project_list.html", RequestContext(request,
{'project_list': update[k]}))
return HttpResponse(json.dumps({'update': update}),content_type="application/json")
else:
return HttpResponse('fail')
|
Python
| 0
|
@@ -5487,58 +5487,8 @@
t):%0A
- # return HttpResponse(request.POST%5B'update'%5D)%0A
|
62420187118d4709bb419065d9d32e8aaddf640b
|
add psutil to synth.py (#55)
|
synth.py
|
synth.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import re
import textwrap
import synthtool as s
from synthtool import gcp
gapic = gcp.GAPICGenerator()
common = gcp.CommonTemplates()
version = "v1"
# ----------------------------------------------------------------------------
# Generate pubsub GAPIC layer
# ----------------------------------------------------------------------------
library = gapic.py_library(
"pubsub",
version,
config_path="/google/pubsub/artman_pubsub.yaml",
include_protos=True,
)
s.move(
library,
excludes=[
"docs/**/*",
"nox.py",
"README.rst",
"setup.py",
"google/cloud/pubsub_v1/__init__.py",
"google/cloud/pubsub_v1/types.py",
],
)
# Adjust tests to import the clients directly.
s.replace(
"tests/unit/gapic/v1/test_publisher_client_v1.py",
"from google.cloud import pubsub_v1",
"from google.cloud.pubsub_v1.gapic import publisher_client",
)
s.replace(
"tests/unit/gapic/v1/test_publisher_client_v1.py", " pubsub_v1", " publisher_client"
)
s.replace(
"tests/unit/gapic/v1/test_subscriber_client_v1.py",
"from google.cloud import pubsub_v1",
"from google.cloud.pubsub_v1.gapic import subscriber_client",
)
s.replace(
"tests/unit/gapic/v1/test_subscriber_client_v1.py",
" pubsub_v1",
" subscriber_client",
)
# DEFAULT SCOPES are being used. so let's force them in.
s.replace(
"google/cloud/pubsub_v1/gapic/*er_client.py",
"# The name of the interface for this client. This is the key used to",
"""# The scopes needed to make gRPC calls to all of the methods defined in
# this service
_DEFAULT_SCOPES = (
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/pubsub', )
\g<0>""",
)
s.replace(
"google/cloud/pubsub_v1/gapic/publisher_client.py",
"import google.api_core.gapic_v1.method\n",
"\g<0>import google.api_core.path_template\n",
)
# Doc strings are formatted poorly
s.replace(
"google/cloud/pubsub_v1/proto/pubsub_pb2.py",
'DESCRIPTOR = _MESSAGESTORAGEPOLICY,\n\s+__module__.*\n\s+,\n\s+__doc__ = """',
"\g<0>A message storage policy.\n\n\n ",
)
s.replace(
"google/cloud/pubsub_v1/gapic/subscriber_client.py",
"subscription \(str\): The subscription whose backlog .*\n(.*\n)+?"
"\s+Format is .*",
"""subscription (str): The subscription whose backlog the snapshot retains.
Specifically, the created snapshot is guaranteed to retain: \\
(a) The existing backlog on the subscription. More precisely, this is \\
defined as the messages in the subscription's backlog that are \\
unacknowledged upon the successful completion of the \\
`CreateSnapshot` request; as well as: \\
(b) Any messages published to the subscription's topic following the \\
successful completion of the CreateSnapshot request. \\
Format is ``projects/{project}/subscriptions/{sub}``.""",
)
s.replace(
"google/cloud/pubsub_v1/gapic/publisher_client.py",
"import functools\n",
"import collections\n"
"from copy import deepcopy\n\g<0>"
)
s.replace(
"google/cloud/pubsub_v1/gapic/publisher_client.py",
"import pkg_resources\n",
"\g<0>import six\n"
)
s.replace(
"google/cloud/pubsub_v1/gapic/publisher_client.py",
"class PublisherClient",
"""# TODO: remove conditional import after Python 2 support is dropped
if six.PY2:
from collections import Mapping
else:
from collections.abc import Mapping
def _merge_dict(d1, d2):
# Modifies d1 in-place to take values from d2
# if the nested keys from d2 are present in d1.
# https://stackoverflow.com/a/10704003/4488789
for k, v2 in d2.items():
v1 = d1.get(k) # returns None if v1 has no such key
if v1 is None:
raise Exception("{} is not recognized by client_config".format(k))
if isinstance(v1, Mapping) and isinstance(v2, Mapping):
_merge_dict(v1, v2)
else:
d1[k] = v2
return d1
\n\n\g<0>"""
)
s.replace(
"google/cloud/pubsub_v1/gapic/publisher_client.py",
"client_config \(dict\): DEPRECATED.",
"client_config (dict):"
)
s.replace(
"google/cloud/pubsub_v1/gapic/publisher_client.py",
"# Raise deprecation warnings .*\n.*\n.*\n.*\n.*\n.*\n",
"""default_client_config = deepcopy(publisher_client_config.config)
if client_config is None:
client_config = default_client_config
else:
client_config = _merge_dict(default_client_config, client_config)
"""
)
s.replace(
"google/cloud/pubsub_v1/gapic/publisher_client.py",
"~google.api_core.page_iterator.PageIterator",
"~google.api_core.page_iterator.GRPCIterator"
)
s.replace(
"google/cloud/pubsub_v1/gapic/subscriber_client.py",
"~google.api_core.page_iterator.PageIterator",
"~google.api_core.page_iterator.GRPCIterator"
)
# Temporary fixup for 'grpc-google-iam-vi 0.12.4' (before generation).
s.replace(
"google/cloud/pubsub_v1/gapic/transports/*_grpc_transport.py",
"from google.iam.v1 import iam_policy_pb2",
"from google.iam.v1 import iam_policy_pb2_grpc as iam_policy_pb2",
)
# ----------------------------------------------------------------------------
# Add templated files
# ----------------------------------------------------------------------------
templated_files = gcp.CommonTemplates().py_library(
unit_cov_level=97, cov_level=99, system_test_dependencies=["test_utils"]
)
s.move(templated_files)
# Temporary fix for the generated synth file (the test_utils path)
s.replace(
"noxfile.py",
r'session\.install\("-e", "\.\./test_utils/"\)',
'# \g<0>',
)
s.shell.run(["nox", "-s", "blacken"], hide_output=False)
|
Python
| 0.000005
|
@@ -6221,16 +6221,26 @@
t_utils%22
+, %22psutil%22
%5D%0A)%0As.mo
|
7fae0ec95a5d5cacc36ecd650cb886d2d83ad1ea
|
Replace Unwrapped with Value on proto method names (#2283)
|
synth.py
|
synth.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import os
import synthtool as s
import synthtool.gcp as gcp
import logging
logging.basicConfig(level=logging.DEBUG)
gapic = gcp.GAPICGenerator()
common = gcp.CommonTemplates()
for version in ['V1', 'V1beta2']:
lower_version = version.lower()
library = gapic.php_library(
service='videointelligence',
version=lower_version,
artman_output_name=f'google-cloud-video-intelligence-{lower_version}')
# copy all src including partial veneer classes
s.move(library / 'src')
# copy proto files to src also
s.move(library / 'proto/src/Google/Cloud/VideoIntelligence', 'src/')
s.move(library / 'tests/')
# copy GPBMetadata file to metadata
s.move(library / 'proto/src/GPBMetadata/Google/Cloud/Videointelligence', 'metadata/')
# document and utilize apiEndpoint instead of serviceAddress
s.replace(
"**/Gapic/*GapicClient.php",
r"'serviceAddress' =>",
r"'apiEndpoint' =>")
s.replace(
"**/Gapic/*GapicClient.php",
r"@type string \$serviceAddress\n\s+\*\s+The address",
r"""@type string $serviceAddress
* **Deprecated**. This option will be removed in a future major release. Please
* utilize the `$apiEndpoint` option instead.
* @type string $apiEndpoint
* The address""")
s.replace(
"**/Gapic/*GapicClient.php",
r"\$transportConfig, and any \$serviceAddress",
r"$transportConfig, and any `$apiEndpoint`")
# prevent proto messages from being marked final
s.replace(
"src/V*/**/*.php",
r"final class",
r"class")
# fix year
s.replace(
'**/Gapic/*GapicClient.php',
r'Copyright \d{4}',
'Copyright 2017')
s.replace(
'**/V1*/VideoIntelligenceServiceClient.php',
r'Copyright \d{4}',
'Copyright 2017')
s.replace(
'tests/**/V1*/*Test.php',
r'Copyright \d{4}',
'Copyright 2018')
# V1 is GA, so remove @experimental tags
s.replace(
'src/V1/VideoIntelligenceServiceClient.php',
r'^(\s+\*\n)?\s+\*\s@experimental\n',
'')
s.replace(
'src/V1/Gapic/*GapicClient.php',
r'^(\s+\*\n)?\s+\*\s@experimental\n',
'')
|
Python
| 0
|
@@ -2212,16 +2212,179 @@
lass%22)%0A%0A
+# Replace %22Unwrapped%22 with %22Value%22 for method names.%0As.replace(%0A %22src/V*/**/*.php%22,%0A r%22public function (%5Cw%7B0,%7D)Unwrapped%22,%0A r%22public function %5C1Value%22%0A)%0A%0A
# fix ye
|
3610c9d782ab9bbacb2b990a5712a44f3b93d374
|
update synth to fix relative cloud.google.com links (#2745)
|
synth.py
|
synth.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import os
import synthtool as s
import synthtool.gcp as gcp
import logging
logging.basicConfig(level=logging.DEBUG)
gapic = gcp.GAPICGenerator()
common = gcp.CommonTemplates()
library = gapic.php_library(
service='dlp',
version='v2',
config_path='/google/privacy/dlp/artman_dlp_v2.yaml',
artman_output_name='google-cloud-dlp-v2')
# copy all src including partial veneer classes
s.move(library / 'src')
# copy proto files to src also
s.move(library / 'proto/src/Google/Cloud/Dlp', 'src/')
s.move(library / 'tests/')
# copy GPBMetadata file to metadata
s.move(library / 'proto/src/GPBMetadata/Google/Privacy/Dlp', 'metadata/')
# document and utilize apiEndpoint instead of serviceAddress
s.replace(
"**/Gapic/*GapicClient.php",
r"'serviceAddress' =>",
r"'apiEndpoint' =>")
s.replace(
"**/Gapic/*GapicClient.php",
r"@type string \$serviceAddress\n\s+\*\s+The address",
r"""@type string $serviceAddress
* **Deprecated**. This option will be removed in a future major release. Please
* utilize the `$apiEndpoint` option instead.
* @type string $apiEndpoint
* The address""")
s.replace(
"**/Gapic/*GapicClient.php",
r"\$transportConfig, and any \$serviceAddress",
r"$transportConfig, and any `$apiEndpoint`")
# fix year
s.replace(
'**/Gapic/*GapicClient.php',
r'Copyright \d{4}',
'Copyright 2018')
s.replace(
'**/V2/DlpServiceClient.php',
r'Copyright \d{4}',
'Copyright 2018')
s.replace(
'tests/**/V2/*Test.php',
r'Copyright \d{4}',
'Copyright 2018')
# Fix missing documentation. See https://github.com/googleapis/gapic-generator/issues/1915
s.replace(
'src/V2/Gapic/DlpServiceGapicClient.php',
r'@type InspectJobConfig \$inspectJob\n',
'@type InspectJobConfig $inspectJob The configuration details for an inspect\n'
' * job. Only one of $inspectJob and $riskJob may be provided.\n')
s.replace(
'src/V2/Gapic/DlpServiceGapicClient.php',
r'@type RiskAnalysisJobConfig \$riskJob\n',
'@type RiskAnalysisJobConfig $riskJob The configuration details for a risk\n'
' * analysis job. Only one of $inspectJob and $riskJob may be provided.\n')
### [START] protoc backwards compatibility fixes
# roll back to private properties.
s.replace(
"src/V*/**/*.php",
r"Generated from protobuf field ([^\n]{0,})\n\s{5}\*/\n\s{4}protected \$",
r"""Generated from protobuf field \1
*/
private $""")
# prevent proto messages from being marked final
s.replace(
"src/V*/**/*.php",
r"final class",
r"class")
# Replace "Unwrapped" with "Value" for method names.
s.replace(
"src/V*/**/*.php",
r"public function ([s|g]\w{3,})Unwrapped",
r"public function \1Value"
)
### [END] protoc backwards compatibility fixes
|
Python
| 0
|
@@ -3460,28 +3460,175 @@
ckwards compatibility fixes%0A
+%0A# fix relative cloud.google.com links%0As.replace(%0A %22src/**/V*/**/*.php%22,%0A r%22(.%7B0,%7D)%5C%5D%5C((/.%7B0,%7D)%5C)%22,%0A r%22%5C1%5D(https://cloud.google.com%5C2)%22%0A)%0A
|
f8ae46f22a9b5b1fc8215ac26aed6dfddf25c224
|
set AUTOSYNTH_MULTIPLE_COMMITS=true for context aware commits (#320)
|
synth.py
|
synth.py
|
import synthtool as s
import synthtool.gcp as gcp
import logging
import subprocess
logging.basicConfig(level=logging.DEBUG)
# Run the gapic generator
gapic = gcp.GAPICMicrogenerator()
version = 'v1'
library = gapic.typescript_library(
'container',
generator_args={
"grpc-service-config": f"google/container/{version}/container_grpc_service_config.json",
"package-name": f"@google-cloud/container"
},
proto_path=f'/google/container/{version}',
version=version)
s.copy(
library,
excludes=['package.json', 'README.md', 'src/index.ts'],
)
# Copy templated files
common_templates = gcp.CommonTemplates()
templates = common_templates.node_library(source_location='build/src')
s.copy(templates)
# fix broken doc links
s.replace("src/v1/doc/google/container/v1/doc_cluster_service.js",
"https:\/\/cloud\.google\.com\/kubernetes-engine\/docs\/reference\/rest\/v1\/projects\.zones\.clusters\.nodePool",
"https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.zones.clusters.nodePools#resource-nodepool")
s.replace('src/v1/*.ts',
'/compute/docs/zones',
'https://cloud.google.com/compute/docs/regions-zones/')
s.replace('src/v1/*.ts',
'/compute/docs/networks-and-firewalls',
'https://cloud.google.com/vpc/docs/firewalls')
s.replace('src/v1/*.ts',
"/container-engine/reference/rest/v1/projects.zones.clusters",
"https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1/projects.zones.clusters")
# Node.js specific cleanup
subprocess.run(['npm', 'install'])
subprocess.run(['npm', 'run', 'compile-protos'])
subprocess.run(['npm', 'run', 'fix'])
|
Python
| 0
|
@@ -119,16 +119,52 @@
DEBUG)%0A%0A
+AUTOSYNTH_MULTIPLE_COMMITS = True%0A%0A%0A
# Run th
|
d54d202970610a59cb7fd60e51483c6e0db93d60
|
update synth scripts to document/utilize apiEndpoint instead of serviceAddress option (#2165)
|
synth.py
|
synth.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import os
# https://github.com/googleapis/artman/pull/655#issuecomment-507784277
os.environ["SYNTHTOOL_ARTMAN_VERSION"] = "0.29.1"
import synthtool as s
import synthtool.gcp as gcp
import logging
logging.basicConfig(level=logging.DEBUG)
gapic = gcp.GAPICGenerator()
common = gcp.CommonTemplates()
v1_library = gapic._generate_code(
'texttospeech', 'v1', 'php',
config_path='artman_texttospeech_v1.yaml',
artman_output_name='google-cloud-texttospeech-v1')
s.copy(v1_library / f'src/')
s.copy(v1_library / f'proto/src/GPBMetadata/Google/Cloud/Texttospeech', f'metadata')
s.copy(v1_library / f'proto/src/Google/Cloud/TextToSpeech', f'src')
s.copy(v1_library / f'tests')
# fix copyright year
s.replace(
'src/V1/**/*Client.php',
r'Copyright \d{4}',
r'Copyright 2018')
s.replace(
'tests/**/V1/*Test.php',
r'Copyright \d{4}',
r'Copyright 2018')
|
Python
| 0
|
@@ -1326,16 +1326,635 @@
ests')%0A%0A
+# document and utilize apiEndpoint instead of serviceAddress%0As.replace(%0A %22**/Gapic/*GapicClient.php%22,%0A r%22'serviceAddress' =%3E%22,%0A r%22'apiEndpoint' =%3E%22)%0As.replace(%0A %22**/Gapic/*GapicClient.php%22,%0A r%22@type string %5C$serviceAddress%22,%0A r%22%22%22@type string $serviceAddress%0A * **Deprecated**. This option will be removed in a future major release. Please%0A * utilize the %60$apiEndpoint%60 option instead.%0A * @type string $apiEndpoint%22%22%22)%0As.replace(%0A %22**/Gapic/*GapicClient.php%22,%0A r%22%5C$transportConfig, and any %5C$serviceAddress%22,%0A r%22$transportConfig, and any %60$apiEndpoint%60%22)%0A%0A
# fix co
|
30d108b3a206d938ef67c112bc6c953a12c606af
|
Allow specifying custom host and port when starting app
|
tasks.py
|
tasks.py
|
"""Task functions for use with Invoke."""
from invoke import task
@task
def clean(context):
cmd = '$(npm bin)/gulp clean'
context.run(cmd)
@task
def requirements(context):
steps = [
'pip install -r requirements.txt',
'npm install',
'$(npm bin)/bower install',
]
cmd = ' && '.join(steps)
context.run(cmd)
@task
def run(context):
steps = [
'open http://127.0.0.1:5000/',
'FLASK_APP=typesetter/typesetter.py FLASK_DEBUG=1 flask run',
]
cmd = ' && '.join(steps)
context.run(cmd)
@task
def static(context):
cmd = '$(npm bin)/gulp'
context.run(cmd)
|
Python
| 0
|
@@ -352,16 +352,17 @@
n(cmd)%0A%0A
+%0A
@task%0Ade
@@ -366,32 +366,63 @@
%0Adef run(context
+, host='127.0.0.1', port='5000'
):%0A steps = %5B
@@ -447,22 +447,21 @@
p://
-127.0.0.1:5000
+%7Bhost%7D:%7Bport%7D
/',%0A
@@ -527,23 +527,117 @@
lask run
-',%0A
+ --host=%7Bhost%7D --port=%7Bport%7D',%0A %5D%0A steps = %5Bstep.format(host=host, port=port) for step in steps
%5D%0A cm
|
c05b06577785bdf34f1fcd051ecf6d4398d2f77e
|
Add new release task w/ API doc prebuilding
|
tasks.py
|
tasks.py
|
from os.path import join
from invoke import Collection, ctask as task
from invocations import docs as _docs
d = 'sites'
# Usage doc/API site (published as docs.paramiko.org)
path = join(d, 'docs')
docs = Collection.from_module(_docs, name='docs', config={
'sphinx.source': path,
'sphinx.target': join(path, '_build'),
})
# Main/about/changelog site ((www.)?paramiko.org)
path = join(d, 'www')
www = Collection.from_module(_docs, name='www', config={
'sphinx.source': path,
'sphinx.target': join(path, '_build'),
})
# Until we move to spec-based testing
@task
def test(ctx):
ctx.run("python test.py --verbose")
@task
def coverage(ctx):
ctx.run("coverage run --source=paramiko test.py --verbose")
ns = Collection(test, coverage, docs=docs, www=www)
|
Python
| 0.000002
|
@@ -17,16 +17,48 @@
ort join
+%0Afrom shutil import rmtree, move
%0A%0Afrom i
@@ -134,16 +134,58 @@
s _docs%0A
+from invocations.packaging import publish%0A
%0A%0Ad = 's
@@ -237,32 +237,37 @@
s.paramiko.org)%0A
+docs_
path = join(d, '
@@ -273,16 +273,55 @@
'docs')%0A
+docs_build = join(docs_path, '_build')%0A
docs = C
@@ -384,32 +384,37 @@
sphinx.source':
+docs_
path,%0A 'sphin
@@ -424,36 +424,26 @@
arget':
-join(path, '
+docs
_build
-')
,%0A%7D)%0A%0A#
@@ -490,16 +490,20 @@
ko.org)%0A
+www_
path = j
@@ -594,16 +594,20 @@
ource':
+www_
path,%0A
@@ -630,16 +630,20 @@
': join(
+www_
path, '_
@@ -847,16 +847,301 @@
ose%22)%0A%0A%0A
+# Until we stop bundling docs w/ releases. Need to discover use cases first.%0A@task('docs') # Will invoke the API doc site build%0Adef release(ctx):%0A # Move the built docs into where Epydocs used to live%0A rmtree('docs')%0A move(docs_build, 'docs')%0A # Publish%0A publish(ctx)%0A%0A%0A
ns = Col
@@ -1163,16 +1163,25 @@
overage,
+ release,
docs=do
|
b7e42d4a231cc1c34e193e2bd719c134f7f29b0a
|
Use a minimum of 1% completness to not ship empty translations.
|
tasks.py
|
tasks.py
|
import os
import os.path
import sys
from invoke import run, task
@task
def clean():
run('git clean -Xfd')
@task
def test(country='all'):
print('Python version: ' + sys.version)
test_cmd = 'coverage run `which django-admin.py` test --settings=tests.settings'
flake_cmd = 'flake8 --ignore=W801,E128,E501,W402'
country = os.environ.get('COUNTRY', country)
# Fix issue #49
cwp = os.path.dirname(os.path.abspath(__name__))
pythonpath = os.environ.get('PYTHONPATH', '').split(os.pathsep)
pythonpath.append(os.path.join(cwp, 'tests'))
os.environ['PYTHONPATH'] = os.pathsep.join(pythonpath)
if country == 'all':
run('{0} localflavor'.format(flake_cmd))
run('{0} tests'.format(test_cmd))
run('coverage report')
elif country not in os.listdir('localflavor'):
print('The country {0!r} is not supported yet.'.format(country))
else:
run('{0} localflavor/{1}'.format(flake_cmd, country))
run('{0} tests.test_{1}'.format(test_cmd, country))
run('coverage report -m --include=localflavor/{0}/*'.format(country))
@task
def translations(pull=False, locale=None):
if pull:
if locale:
run('tx pull -l {0}'.format(locale))
else:
run('tx pull -a')
if locale:
run('cd localflavor; django-admin.py makemessages -l {0}; '
'django-admin.py compilemessages -l {0}; cd ..'.format(locale))
else:
run('cd localflavor; django-admin.py makemessages -a; '
'django-admin.py compilemessages; cd ..')
@task
def docs():
run('cd docs; make html; cd ..')
|
Python
| 0
|
@@ -1115,16 +1115,24 @@
ask%0Adef
+compile_
translat
@@ -1140,51 +1140,150 @@
ons(
-pull=False, locale=None):%0A if pull:%0A
+):%0A run('cd localflavor; django-admin.py compilemessages; cd ..')%0A%0A%0A@task(post=%5Bcompile_translations%5D)%0Adef pull_translations(locale=None):%0A
@@ -1293,36 +1293,32 @@
locale:%0A
-
-
run('tx pull -l
@@ -1314,16 +1314,19 @@
tx pull
+-f
-l %7B0%7D'.
@@ -1341,20 +1341,16 @@
ocale))%0A
-
else
@@ -1355,28 +1355,24 @@
se:%0A
-
-
run('tx pull
@@ -1377,11 +1377,104 @@
ll -
-a')
+-minimum-perc=1 -f -a')%0A%0A%0A@task(post=%5Bcompile_translations%5D)%0Adef make_translations(locale=None):
%0A
@@ -1517,45 +1517,8 @@
vor;
- django-admin.py makemessages -l %7B0%7D;
'%0A
@@ -1537,38 +1537,35 @@
django-admin.py
-compil
+mak
emessages -l %7B0%7D
@@ -1566,21 +1566,16 @@
-l %7B0%7D;
-cd ..
'.format
@@ -1658,63 +1658,8 @@
s -a
-; '%0A 'django-admin.py compilemessages; cd ..
')%0A%0A
|
f62626799eddfea04ffad5005de09305a18f287d
|
Add linux dependencies task.
|
tasks.py
|
tasks.py
|
from invoke import Collection, task, run
from okcupyd import tasks
ns = Collection()
ns.add_collection(tasks, name='okcupyd')
@ns.add_task
@task(default=True)
def install():
run("python setup.py install")
@ns.add_task
@task
def pypi():
run("python setup.py sdist upload -r pypi")
@ns.add_task
@task(aliases='r')
def rerecord(rest):
run('tox -e py27 -- --record --credentials test_credentials {0} -s'
.format(rest), pty=True)
run('tox -e py27 -- --resave --scrub --credentials test_credentials {0} -s'
.format(rest), pty=True)
@ns.add_task
@task(aliases='r1')
def rerecord_one(rest):
run('tox -e py27 -- --record --credentials test_credentials -k {0} -s'
.format(rest), pty=True)
run('tox -e py27 -- --resave --scrub --credentials test_credentials -k {0} -s'
.format(rest), pty=True)
@ns.add_task
@task
def rerecord_failing():
result = run("tox -e py27 | grep test_ | grep \u2015 | sed 's:\\\u2015::g'",
hide='out')
for test_name in result.stdout.split('\n'):
rerecord_one(rest=test_name.strip())
|
Python
| 0.000002
|
@@ -1091,8 +1091,308 @@
trip())%0A
+%0A%0Alinux_dependencies = ('zlib1g-dev', 'libxml2-dev', 'libxslt1-dev', 'python-dev',%0A 'libncurses5-dev')%0A@ns.add_task%0A@task(aliases='linux_dep')%0Adef install_linux_dependencies():%0A for package in linux_pacakges:%0A run('%7B0%7D %7B1%7D'.format(install_command, package), pty=False)%0A
|
61deb461f2a36413cbb6108e7e0e86fc81f44891
|
Update to work with invoke >= 0.13
|
tasks.py
|
tasks.py
|
import sys
from invoke import run, task
@task
def docs(watch=False, warn=False):
if watch:
return watcher(docs)
run('make -C docs/ html', warn=warn)
@task
def test(path=None, coverage=False, watch=False, warn=False):
if watch:
return watcher(test, path=path, coverage=coverage)
path = path or 'tests/'
cmd = 'pytest'
if coverage:
cmd += ' --cov=mopidy --cov-report=term-missing'
cmd += ' %s' % path
run(cmd, pty=True, warn=warn)
@task
def lint(watch=False, warn=False):
if watch:
return watcher(lint)
run('flake8', warn=warn)
@task
def update_authors():
# Keep authors in the order of appearance and use awk to filter out dupes
run("git log --format='- %aN <%aE>' --reverse | awk '!x[$0]++' > AUTHORS")
def watcher(task, *args, **kwargs):
while True:
run('clear')
kwargs['warn'] = True
task(*args, **kwargs)
try:
run(
'inotifywait -q -e create -e modify -e delete '
'--exclude ".*\.(pyc|sw.)" -r docs/ mopidy/ tests/')
except KeyboardInterrupt:
sys.exit()
|
Python
| 0
|
@@ -51,16 +51,21 @@
ef docs(
+ctx,
watch=Fa
@@ -183,16 +183,21 @@
ef test(
+ctx,
path=Non
@@ -510,16 +510,21 @@
ef lint(
+ctx,
watch=Fa
@@ -640,16 +640,19 @@
authors(
+ctx
):%0A #
|
45f098b3664a11ef51cd66a11773bab923b02c91
|
Make all exceptions inherit from ValueError
|
stdnum/exceptions.py
|
stdnum/exceptions.py
|
# exceptions.py - collection of stdnum exceptions
# coding: utf-8
#
# Copyright (C) 2013 Arthur de Jong
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301 USA
"""Collection of exceptions.
The validation functions of stdnum should raise one of the below exceptions
when validation of the number fails.
"""
__all__ = ['ValidationError', 'InvalidFormat', 'InvalidChecksum',
'InvalidLength', 'InvalidComponent']
class ValidationError(Exception):
"""Top-level error for validating numbers.
This exception should normally not be raised, only subclasses of this
exception."""
def __str__(self):
"""Return the exception message."""
return ''.join(self.args[:1]) or getattr(self, 'message', '')
class InvalidFormat(ValidationError): # noqa N818
"""Something is wrong with the format of the number.
This generally means characters or delimiters that are not allowed are
part of the number or required parts are missing."""
message = 'The number has an invalid format.'
class InvalidChecksum(ValidationError): # noqa N818
"""The number's internal checksum or check digit does not match."""
message = "The number's checksum or check digit is invalid."
class InvalidLength(InvalidFormat): # noqa N818
"""The length of the number is wrong."""
message = 'The number has an invalid length.'
class InvalidComponent(ValidationError): # noqa N818
"""One of the parts of the number has an invalid reference.
Some part of the number refers to some external entity like a country
code, a date or a predefined collection of values. The number contains
some invalid reference."""
message = 'One of the parts of the number are invalid or unknown.'
|
Python
| 0
|
@@ -81,16 +81,21 @@
(C) 2013
+-2022
Arthur
@@ -1124,17 +1124,18 @@
ror(
-Exception
+ValueError
):%0A
|
ad9f0c488bf761fe83714377fce06ed93d2ec5f3
|
Update navigation controller
|
controller/navigation.py
|
controller/navigation.py
|
import asyncio
class ControlProtocol:
def connection_made(transport):
pass
def connection_lost(exc):
pass
class DriveMotorControl:
def __init__(self, left=0, right=1):
self.loop = asyncio.get_event_loop()
left = open("/var/run/motor{}".format(left),"w")
right = open("/var/run/motor{}".format(right),"w")
self.__left = 0
self.__right = 0
self._left, lpr = self.loop.connect_write_pipe(asyncio.Protocol, left)
self._right, rpr = self.loop.connect_write_pipe(asyncio.Protocol, right)
@property
def left(self):
return self.__left
@left.setter
def setleft(self, val):
self.__left = val
self._left.write("{}\n".format(val))
@property
def right(self):
return self.__right
@right.setter
def setright(self, val):
self.__right = val
self._right.write("{}\n".format(val))
def stop(self):
self.left = 0
self.right = 0
def forward(self):
self.left = 1
self.right = -1
def turnright(self):
self.right = 1
self.left = 1
self.call_later(1, self.stop)
def turnleft(self):
self.right = -1
self.left = -1
self.call_later(1, self.stop)
|
Python
| 0.000001
|
@@ -13,125 +13,8 @@
io%0A%0A
-class ControlProtocol:%0A def connection_made(transport):%0A pass%0A%0A def connection_lost(exc):%0A pass%0A%0A
clas
@@ -128,16 +128,22 @@
+self._
left = o
@@ -191,16 +191,22 @@
+self._
right =
@@ -289,32 +289,132 @@
elf.__right = 0%0A
+ self.loop.run_until_complete(self.connect())%0A%0A @asyncio.coroutine%0A def connect(self):%0A
self._le
@@ -418,24 +418,35 @@
_left, lpr =
+ yield from
self.loop.c
@@ -481,16 +481,22 @@
otocol,
+self._
left)%0A
@@ -519,16 +519,27 @@
t, rpr =
+ yield from
self.lo
@@ -578,16 +578,22 @@
otocol,
+self._
right)%0A%0A
@@ -679,19 +679,16 @@
def
-set
left(sel
@@ -761,24 +761,33 @@
.format(val)
+.encode()
)%0A %0A @
@@ -871,19 +871,16 @@
def
-set
right(se
@@ -960,16 +960,25 @@
mat(val)
+.encode()
)%0A
@@ -1194,36 +1194,48 @@
eft = 1%0A
-self
+return self.loop
.call_later(1, s
@@ -1324,20 +1324,32 @@
-self
+return self.loop
.call_la
|
14b607e89a465693e88da4f936fc477a718d9a3e
|
Fix typo
|
metrology/stats/sample.py
|
metrology/stats/sample.py
|
import math
import random
import sys
import heapq
from time import time
from atomic import Atomic
from threading import RLock
from metrology.stats.snapshot import Snapshot
class UniformSample(object):
def __init__(self, reservoir_size):
self.counter = Atomic(0)
self.values = [0] * reservoir_size
def clear(self):
self.values = [0] * len(self.values)
self.counter.value = 0
def size(self):
count = self.counter.value
if count > len(self.values):
return len(self.values)
return count
def __len__(self):
return self.size
def snapshot(self):
return Snapshot(self.values[0:self.size()])
def update(self, value):
new_count = self.counter.update(lambda v: v + 1)
if new_count <= len(self.values):
self.values[new_count - 1] = value
else:
index = random.uniform(0, new_count)
if index < len(self.values):
self.values[int(index)] = value
class ExponentiallyDecayingSample(object):
def __init__(self, reservoir_size, alpha):
self.values = []
self.next_scale_time = Atomic(0)
self.alpha = alpha
self.reservoir_size = reservoir_size
self.lock = RLock()
self.rescale_threshold = ExponentiallyDecayingSample.calculate_rescale_threshold(alpha)
self.clear()
@staticmethod
def calculate_rescale_threshold(alpha):
# determine rescale-threshold such that we will not overflow exp() in
# weight function, and subsequently not overflow into inf on dividing
# by random.random()
min_rand = 1.0 / (2**32) # minimum non-zero value from random()
safety = 2.0 # safety pad for numerical inaccuracy
max_value = sys.float_info.max * min_rand / safety
return math.log(max_value) / alpha
def clear(self):
with self.lock:
self.values = []
self.start_time = time()
self.next_scale_time.value = self.start_time + self.rescale_threshold
def size(self):
with self.lock:
return len(self.values)
def __len__(self):
return self.size()
def snapshot(self):
with self.lock:
return Snapshot(val for _, val in self.values)
def weight(self, timestamp):
return math.exp(self.alpha * (timestamp - self.start_time))
def rescale(self, now, next_time):
if self.next_scale_time.compare_and_swap(next_time, now + self.rescale_threshold):
with self.lock:
rescaleFactor = math.exp(-self.alpha * (now - self.start_time))
self.values = [(k * rescaleFactor, v) for k, v in self.values]
self.start_time = now
def rescale_if_necessary(self):
now = time()
next_time = self.next_scale_time.get_value()
if now > next_time:
self._rescale(now, next_time)
def update(self, value, timestamp=None):
if timestamp is None:
timestamp = time()
self.rescale_if_necessary()
with self.lock:
try:
priority = self.weight(timestamp) / random.random()
except (OverflowError, ZeroDivisionError):
priority = sys.float_info.max
if len(self.values) < self.reservoir_size:
heapq.heappush(self.values, (priority, value))
else:
heapq.heappushpop(self.values, (priority, value))
|
Python
| 0.999999
|
@@ -2940,17 +2940,16 @@
self.
-_
rescale(
|
055573c622d03d02077f49ee3146db300b92813c
|
Add guard for empty selection when changing images
|
microscopium/bokeh_app.py
|
microscopium/bokeh_app.py
|
import os
import numpy as np
import click
from bokeh.server.server import Server
from bokeh.application import Application
from bokeh.application.handlers.function import FunctionHandler
from bokeh.plotting import figure, ColumnDataSource
from bokeh.layouts import row
from bokeh.models.tools import TapTool, PanTool
from skimage import io
import pandas as pd
def imread(path):
image0 = io.imread(path)
if image0.shape[2] == 3: # RGB image
shape = image0.shape[:2]
im1 = np.concatenate((image0,
np.full((shape + (1,)), 255, dtype='uint8')),
axis=2)
else: # already RGBA
im1 = image0
return im1
def make_document(filename):
dataframe = pd.read_csv(filename, index_col=0).set_index('index')
directory = os.path.dirname(filename)
dataframe['path'] = dataframe['url'].apply(lambda x:
os.path.join(directory, x))
def makedoc(doc):
source = ColumnDataSource(dataframe)
image_holder = ColumnDataSource({'image': []})
pca = figure(title='PCA', x_range=[-0.6, 2.7], y_range=[-1.3, 1.8],
sizing_mode='scale_both', tools=[TapTool(), PanTool()])
glyphs = pca.circle(source=source, x='x', y='y')
sel = figure(title='Selected', x_range=[0, 1], y_range=[0, 1],
sizing_mode='scale_both')
image_canvas = sel.image_rgba('image', 0, 0, 1, 1, source=image_holder)
def load_image(attr, old, new):
print('new index: ', new.indices)
index, filename = dataframe[['info', 'path']].iloc[new.indices[0]]
image = imread(filename)
print('image size: ', image.shape)
image_holder.data = {'image': [image]}
glyphs.data_source.on_change('selected', load_image)
fig = row([pca, sel], sizing_mode='stretch_both')
doc.title = 'Bokeh microscopium app'
doc.add_root(fig)
print('ready!')
return makedoc
@click.command()
@click.argument('filename')
@click.option('-p', '--path', default='/')
@click.option('-P', '--port', type=int, default=5000)
def run_server(filename, path='/', port=5000):
apps = {path: Application(FunctionHandler(make_document(filename)))}
server = Server(apps, port=port)
server.run_until_shutdown()
if __name__ == '__main__':
run_server()
|
Python
| 0
|
@@ -1584,16 +1584,85 @@
ndices)%0A
+ if len(new.indices %3E 0): # could be empty selection%0A
@@ -1683,16 +1683,17 @@
ename =
+(
datafram
@@ -1711,16 +1711,52 @@
'path'%5D%5D
+%0A
.iloc%5Bne
@@ -1768,17 +1768,22 @@
ices%5B0%5D%5D
-%0A
+)%0A
@@ -1819,51 +1819,8 @@
- print('image size: ', image.shape)%0A
|
63a2deeb5602eb9834232a592bac16501bb8c8de
|
Fix program name when using __main__
|
cookiecutter/__main__.py
|
cookiecutter/__main__.py
|
"""Allow cookiecutter to be executable through `python -m cookiecutter`."""
from __future__ import absolute_import
from .cli import main
if __name__ == "__main__":
main()
|
Python
| 0.000026
|
@@ -163,16 +163,60 @@
__%22:
-%0A main(
+ # pragma: no cover%0A main(prog_name=%22cookiecutter%22
)%0A
|
f4ba2cba93222b4dd494caf487cdd6be4309e41a
|
Update labels for application form
|
studygroups/forms.py
|
studygroups/forms.py
|
from django import forms
from studygroups.models import StudyGroupSignup, Application
from localflavor.us.forms import USPhoneNumberField
class ApplicationForm(forms.ModelForm):
mobile = USPhoneNumberField(required=False)
class Meta:
model = Application
labels = {
'name': 'Please tell us what to call you',
'mobile': 'What is your mobile number?',
'contact_method': 'Please tell us how would you perfer us to contact us',
'computer_access': 'Do you have normal everyday access to the computer?',
'goals': 'Please tell what your learning goals are',
'support': '',
}
widgets = {
'study_groups': forms.CheckboxSelectMultiple,
}
fields = '__all__'
class SignupForm(forms.ModelForm):
mobile = USPhoneNumberField(required=False)
class Meta:
model = StudyGroupSignup
exclude = []
widgets = {
'study_group': forms.HiddenInput
}
class EmailForm(forms.Form):
study_group_id = forms.IntegerField(widget=forms.HiddenInput)
subject = forms.CharField()
body = forms.CharField(widget=forms.Textarea)
sms_body = forms.CharField(max_length=160, widget=forms.Textarea)
|
Python
| 0
|
@@ -428,59 +428,35 @@
: 'P
-lease tell us how would you perfer us to c
+referred Method of C
ontact
- us
+.
',%0A
@@ -502,24 +502,8 @@
ave
-normal everyday
acce
@@ -512,11 +512,9 @@
to
-the
+a
com
@@ -518,16 +518,39 @@
computer
+ outside of the library
?',%0A
@@ -571,75 +571,387 @@
': '
-Please tell what your learning goals are',%0A 'support': '
+In one sentence, please explain your goals for taking this course.',%0A 'support': 'A successful study group requires the support of all of its members. How will you help your peers achieve their goals?',%0A 'study_groups': 'Which course are you applying for? (by applying for a specific course, you agree to attend sessions at the specified time and location).
',%0A
|
c1b637b2c3a097b4b7421a4192de8bf2326d1613
|
Fix test_cant_auto_apply_freetext
|
suggestions/tests.py
|
suggestions/tests.py
|
from django.test import TestCase
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from mks.models import Member, Party, GENDER_CHOICES
from committees.models import Committee
from .models import Suggestion, SuggestedAction
class SuggestionsTests(TestCase):
MK_SITE = 'http://mk1.example.com'
def setUp(self):
self.member1 = Member.objects.create(name='mk_1')
self.member2 = Member.objects.create(name='mk_2')
self.regular_user = User.objects.create_user('reg_user')
self.editor = User.objects.create_superuser(
'admin', 'admin@example.com', 'passwd')
self.party = Party.objects.create(name='party')
self.committee = Committee.objects.create(name='comm')
def test_set_suggestion(self):
actions = [
{
'action': SuggestedAction.SET,
'subject': self.member1,
'fields': {
'website': self.MK_SITE,
'gender': GENDER_CHOICES[0][0],
'current_party': self.party,
}
}
]
suggestion = Suggestion.objects.create_suggestion(
suggested_by=self.regular_user,
actions=actions,
)
self.assertIsNone(self.member1.website)
self.assertIsNone(self.member1.gender)
self.assertIsNone(self.member1.current_party)
suggestion.auto_apply(self.editor)
mk = Member.objects.get(pk=self.member1.pk)
self.assertEqual(mk.website, self.MK_SITE)
self.assertEqual(mk.gender, GENDER_CHOICES[0][0])
self.assertEqual(mk.current_party, self.party)
suggestion = Suggestion.objects.get(pk=suggestion.pk)
self.assertEqual(suggestion.resolved_status, Suggestion.FIXED)
self.assertEqual(suggestion.resolved_by, self.editor)
self.assertIsNotNone(suggestion.resolved_at)
# cleanup
mk.website = None
mk.gender = None
mk.current_party = None
mk.save()
self.member1 = mk
Suggestion.objects.all().delete()
def test_m2m_set_suggestion(self):
suggestion = Suggestion.objects.create_suggestion(
suggested_by=self.regular_user,
subject=self.committee,
action=SuggestedAction.SET,
fields={'members': self.member1},
)
suggestion.auto_apply(self.editor)
self.assertEqual(list(self.committee.members.all()), [self.member1])
# cleanup
self.committee.members.clear()
Suggestion.objects.all().delete()
def test_m2m_add_remove_suggestion(self):
# make sure we're starting clean
self.assertEqual(self.committee.members.count(), 0)
suggestion1 = Suggestion.objects.create_suggestion(
suggested_by=self.regular_user,
subject=self.committee,
action=Suggestion.ADD,
fields={'members': self.member1}
)
suggestion2 = Suggestion.objects.create_suggestion(
suggested_by=self.regular_user,
subject=self.committee,
action=Suggestion.ADD,
fields={'members': self.member2}
)
suggestion3 = Suggestion.objects.create_suggestion(
suggested_by=self.regular_user,
subject=self.committee,
action=Suggestion.REMOVE,
fields={'members': self.member1}
)
suggestion1.auto_apply(self.editor)
self.assertItemsEqual(self.committee.members.all(), [self.member1])
suggestion2.auto_apply(self.editor)
self.assertItemsEqual(
self.committee.members.all(), [self.member1, self.member2])
suggestion3.auto_apply(self.editor)
self.assertItemsEqual(self.committee.members.all(), [self.member2])
# cleanup
self.committee.members.clear()
Suggestion.objects.all().delete()
def test_get_pending_suggestions(self):
total = Suggestion.objects.get_pending_suggestions().count()
self.assertEqual(total, 0)
total_mk1 = Suggestion.objects.get_pending_suggestions_for(
self.member1).count()
self.assertEqual(total_mk1, 0)
total_mk2 = Suggestion.objects.get_pending_suggestions_for(
self.member2).count()
self.assertEqual(total_mk2, 0)
suggestion1 = Suggestion.objects.create_suggestion(
suggested_by=self.regular_user,
actions=[
{
'action': SuggestedAction.SET,
'fields': {'website': self.MK_SITE},
'subject':self.member1,
},
]
)
suggestion2 = Suggestion.objects.create_suggestion(
suggested_by=self.regular_user,
actions=[
{
'action': SuggestedAction.SET,
'fields': {'website': self.MK_SITE},
'subject':self.member2,
},
]
)
total = Suggestion.objects.get_pending_suggestions().count()
self.assertEqual(total, 2)
total_mk1 = Suggestion.objects.get_pending_suggestions_for(
self.member1).count()
self.assertEqual(total_mk1, 1)
total_mk2 = Suggestion.objects.get_pending_suggestions_for(
self.member2).count()
self.assertEqual(total_mk2, 1)
suggestion1.auto_apply(self.editor)
total = Suggestion.objects.get_pending_suggestions().count()
self.assertEqual(total, 1)
total_mk1 = Suggestion.objects.get_pending_suggestions_for(
self.member1).count()
self.assertEqual(total_mk1, 0)
suggestions_mks2 = Suggestion.objects.get_pending_suggestions_for(
self.member2)
total_mk2 = suggestions_mks2.count()
self.assertEqual(total_mk2, 1)
self.assertEqual(list(suggestions_mks2), [suggestion2])
# cleanup
Suggestion.objects.all().delete()
def test_cant_auto_apply_freetext(self):
suggestion = Suggestion.objects.create_suggestion(
suggested_by=self.regular_user,
subject=self.member1,
action=Suggestion.FREE_TEXT,
content={'text': "A free text comment"}
)
with self.assertRaises(ValueError):
suggestion.auto_apply(self.editor)
# cleanup
Suggestion.objects.all().delete()
def test_invalid_add_without_field(self):
with self.assertRaises(ValidationError):
Suggestion.objects.create_suggestion(
suggested_by=self.regular_user,
subject=self.committee,
action=Suggestion.ADD,
suggested_object=self.member1
)
def test_free_text_without_content(self):
with self.assertRaises(ValidationError):
Suggestion.objects.create_suggestion(
suggested_by=self.regular_user,
action=Suggestion.FREE_TEXT,
)
def test_invalid_set_without_suggested_object(self):
with self.assertRaises(ValidationError):
Suggestion.objects.create_suggestion(
suggested_by=self.regular_user,
subject=self.member1,
action=SuggestedAction.SET,
field='current_party',
)
def test_invalid_action_withot_subject(self):
with self.assertRaises(ValidationError):
Suggestion.objects.create_suggestion(
suggested_by=self.regular_user,
action=SuggestedAction.SET,
)
|
Python
| 0.000531
|
@@ -6232,100 +6232,16 @@
-subject=self.member1,%0A action=Suggestion.FREE_TEXT,%0A content=%7B'text':
+comment=
%22A f
@@ -6257,17 +6257,16 @@
comment%22
-%7D
%0A
|
95686be0b45e350791c85c757acd450623b14d60
|
test OK
|
tests.py
|
tests.py
|
import pdb
import logging
import unittest
from yahoo_oauth import OAuth2, OAuth1
from fantasy_sport import FantasySport
from fantasy_sport.utils import pretty_json, pretty_xml
logging.getLogger('yahoo_oauth').setLevel(logging.WARNING)
class TestFantasySport(unittest.TestCase):
def setUp(self,):
oauth = OAuth1(None, None, from_file='oauth.json',base_url='http://fantasysports.yahooapis.com/fantasy/v2/')
self.yfs = FantasySport(oauth)
def test_get_games_info(self,):
response = self.yfs.get_games_info(['nfl'])
self.assertEqual(response.status_code, 200)
logging.debug(pretty_json(response.content))
def test_get_games_info_with_login(self,):
response = self.yfs.get_games_info(['mlb'], use_login=True)
self.assertEqual(response.status_code, 200)
logging.debug(pretty_json(response.content))
def test_get_leagues(self):
response = self.yfs.get_leagues(['238.l.627060'])
self.assertEqual(response.status_code, 200)
logging.debug(pretty_json(response.content))
def test_get_leagues_with_multiple_keys(self,):
self.yfs.fmt = 'xml'
response = self.yfs.get_leagues(('238.l.627060','238.l.627062'))
self.yfs.fmt = 'json'
self.assertEqual(response.status_code, 200)
logging.debug(pretty_xml(response.content))
def test_get_leagues_scoreboard(self):
response = self.yfs.get_leagues_scoreboard(['238.l.627060'])
self.assertEqual(response.status_code, 200)
logging.debug(pretty_json(response.content))
|
Python
| 0.999767
|
@@ -1581,14 +1581,243 @@
tent))%0A%0A
+ def test_get_leagues_settings(self):%0A response = self.yfs.get_leagues_settings(%5B'238.l.627060','238.l.627062'%5D)%0A self.assertEqual(response.status_code, 200)%0A logging.debug(pretty_json(response.content))%0A%0A
%0A %0A
|
7d0cf5548fc8efcce8743ab6308425e5f97e8945
|
Add support for rollback() operation
|
sunburnt/sunburnt.py
|
sunburnt/sunburnt.py
|
from __future__ import absolute_import
import cgi
from itertools import islice
import urllib
import httplib2
from .schema import SolrSchema, SolrError
from .search import SolrSearch
h = httplib2.Http(".cache")
class SolrConnection(object):
def __init__(self, url, h=h):
self.url = url.rstrip("/") + "/"
self.update_url = self.url + "update/"
self.select_url = self.url + "select/"
self.request = h.request
def commit(self, wait_flush=True, wait_searcher=True):
wait_flush = "true" if wait_flush else "false"
wait_searcher = "true" if wait_searcher else "false"
response = self.update('<commit waitFlush="%s" waitSearcher="%s"/>' %
(wait_flush, wait_searcher))
def optimize(self):
wait_flush = "true" if wait_flush else "false"
wait_searcher = "true" if wait_searcher else "false"
response = self.update('<optimize waitFlush="%s" waitSearcher="%s"/>' %
(wait_flush, wait_searcher))
def update(self, update_doc):
body = update_doc
headers = {"Content-Type":"text/xml; charset=utf-8"}
r, c = self.request(self.update_url, method="POST", body=body,
headers=headers)
if r.status != 200:
raise SolrError(r, c)
def select(self, params):
qs = utf8_urlencode(params)
url = "%s?%s" % (self.select_url, qs)
r, c = self.request(url)
if r.status != 200:
raise SolrError(r, c)
return c
class SolrInterface(object):
def __init__(self, url, schemadoc):
self.conn = SolrConnection(url)
self.schema = SolrSchema(schemadoc)
def add(self, docs, chunk=100):
# to avoid making messages too large, we break the message every
# chunk docs.
for doc_chunk in grouper(docs, chunk):
update_message = self.schema.make_update(doc_chunk)
self.conn.update(str(update_message))
def delete(self, docs=None, queries=None):
if not docs and not queries:
raise SolrError("No docs or query specified for deletion")
delete_message = self.schema.make_delete(docs, queries)
self.conn.update(str(delete_message))
def commit(self, *args, **kwargs):
self.conn.commit(*args, **kwargs)
def optimize(self, *args, **kwargs):
self.conn.optimize(*args, **kwargs)
def search(self, **kwargs):
params = kwargs.copy()
for k, v in kwargs.items():
if hasattr(v, "items"):
del params[k]
params.update(v)
print self.conn.select(params)
return self.schema.parse_results(self.conn.select(params))
def query(self, *args, **kwargs):
q = SolrSearch(self)
if len(args) + len(kwargs) > 0:
return q.query(*args, **kwargs)
else:
return q
def utf8_urlencode(params):
utf8_params = {}
for k, v in params.items():
if isinstance(k, unicode):
k = k.encode('utf-8')
if isinstance(v, unicode):
v = v.encode('utf-8')
utf8_params[k] = v
return urllib.urlencode(utf8_params)
def grouper(iterable, n):
"grouper('ABCDEFG', 3) --> [['ABC'], ['DEF'], ['G']]"
i = iter(iterable)
g = list(islice(i, 0, n))
while g:
yield g
g = list(islice(i, 0, n))
|
Python
| 0.000001
|
@@ -1029,32 +1029,103 @@
ait_searcher))%0A%0A
+ def rollback(self):%0A response = self.update(%22%3Crollback/%3E%22)%0A%0A
def update(s
@@ -2507,32 +2507,86 @@
rgs, **kwargs)%0A%0A
+ def rollback(self):%0A self.conn.rollback()%0A%0A
def search(s
|
89128af9fad82166c716ef7a73965d23afa23095
|
Fix a misdefined test. All extant tests now pass.
|
tests.py
|
tests.py
|
from unittest import TestCase, main
from preconditions import PreconditionError, preconditions
class PreconditionTestBase (TestCase):
def check_prec_fail(self, target, *args, **kw):
self.assertRaises(PreconditionError, target, *args, **kw)
class InvalidPreconditionTests (PreconditionTestBase):
def test_varargs(self):
self.check_prec_fail(preconditions, lambda *a: True)
def test_kwargs(self):
self.check_prec_fail(preconditions, lambda **kw: True)
def test_unknown_nondefault_param(self):
# The preconditions refer to "x" but are applied to "a, b", so
# "x" is unknown:
p = preconditions(lambda x: True)
self.check_prec_fail(p, lambda a, b: a+b)
def test_default_masks_param(self):
# Preconditions may have defaults as a hack to bind local
# variables (such as when declared syntactically inside loops),
# but this "closure hack" must not mask application function
# parameter names:
p = preconditions(lambda a, b='a stored value': True)
self.check_prec_fail(p, lambda a, b: a+b)
class BasicPreconditionTests (PreconditionTestBase):
def test_basic_precondition(self):
@preconditions(lambda i: isinstance(i, int) and i > 0)
def uint_pred(i):
return i-1
# Not greater than 0:
self.check_prec_fail(uint_pred, 0)
# Not an int:
self.check_prec_fail(uint_pred, 1.0)
# Test a successful call:
self.assertEqual(0, uint_pred(1))
def test_relational_precondition(self):
@preconditions(lambda a, b: a < b)
def inc_range(a, b):
return range(a, b)
self.check_prec_fail(inc_range, 3, 3)
self.check_prec_fail(inc_range, 5, 3)
self.assertEqual([3, 4], inc_range(3, 5))
def test_multiple_preconditions(self):
@preconditions(
lambda a: isinstance(a, float),
lambda b: isinstance(b, int),
lambda b: b > 0,
lambda a, b: a < b,
)
def f(a, b):
return a ** b
self.check_prec_fail(f, 3, 5)
self.check_prec_fail(f, 3.0, 5.0)
self.check_prec_fail(f, 3.0, -2)
self.check_prec_fail(f, 3.0, 2)
self.assertEqual(0.25, f(0.5, 2))
def test_zero_preconditions(self):
p = preconditions()
def f():
return None
g = p(f)
self.assertIs(None, f())
self.assertIs(None, g())
self.assertIs(f, g)
def test_precondition_with_default(self):
@preconditions(lambda a, _s=[2, 3, 5]: a in _s)
def f(a):
return a
self.check_prec_fail(f, 4)
self.assertEqual(3, f(3))
class MethodPreconditionTests (PreconditionTestBase):
def test_invariant_precondition(self):
class C (object):
@preconditions(lambda self: self.key in self.items)
def get(self):
return self.items[self.key]
i = C()
self.check_prec_fail(i.get)
i.items = {'a': 'apple', 'b': 'banana'}
i.key = 'b'
self.assertEqual('banana', i.get())
def test__init__(self):
class C (object):
@preconditions(lambda name: isinstance(name, unicode))
def __init__(self, name):
self.name = name
self.check_prec_fail(C, b'Not unicode!')
self.assertEqual(u'Alice', C(u'Alice').name)
def test_old_school__init__(self):
class C:
@preconditions(lambda name: isinstance(name, unicode))
def __init__(self, name):
self.name = name
self.check_prec_fail(C, b'Not unicode!')
self.assertEqual(u'Alice', C(u'Alice').name)
def test__new__(self):
class C (tuple):
@preconditions(lambda a, b: a < b)
def __new__(self, a, b):
return tuple.__new__(self, (a, b))
self.check_prec_fail(C, 5, 3)
self.assertEqual((3, 5), C(3, 5))
def test_old_school_method(self):
class OldSchool:
def __init__(self, x):
self.x = x
@preconditions(lambda self, x: self.x < x)
def increase_to(self, x):
self.x = x
obj = OldSchool(5)
self.check_prec_fail(obj.increase_to, 3)
obj.increase_to(7)
self.check_prec_fail(obj.increase_to, 6)
class PreconditionInterfaceTests (PreconditionTestBase):
def test__name__(self):
@preconditions(lambda x: True)
def f(x):
return x
self.assertEqual('f', f.__name__)
def test_zero_preconditions__name__(self):
@preconditions()
def f(x):
return x
self.assertEqual('f', f.__name__)
def test_nopre(self):
def assert_false():
assert False
p = preconditions(lambda x: assert_false())
def f(x):
return 2*x
g = p(f)
self.assertIs(f, g.nopre)
self.assertEqual(6, g.nopre(3))
def test_zero_preconditions_nopre(self):
p = preconditions()
def f(x):
return 2*x
g = p(f)
self.assertIs(f, g.nopre)
self.assertEqual(6, g.nopre(3))
if __name__ == '__main__':
main()
|
Python
| 0.000033
|
@@ -3028,46 +3028,8 @@
C()%0A
-%0A self.check_prec_fail(i.get)%0A%0A
@@ -3072,16 +3072,74 @@
anana'%7D%0A
+ i.key = 'X'%0A%0A self.check_prec_fail(i.get)%0A%0A
|
90bbb6604fdbb16c5a9d4390a429f2ce1c31035c
|
Add more tests, pend all dysfunctional tests.
|
tests/acceptance/test_rate.py
|
tests/acceptance/test_rate.py
|
from pytest import fixture
from pytest import mark
from adhocracy_core.testing import annotator_login
from .shared import wait
from .shared import get_column_listing
from .shared import get_list_element
from .shared import get_listing_create_form
from .shared import login_god
from .test_comment import create_comment
class TestRate:
def test_create(self, browser):
login_god(browser)
comment = create_comment(browser, 'comment1')
assert comment is not None
def test_upvote(self, browser):
rateable = get_column_listing(browser, 'content2').find_by_css('.comment')
pro_button = rateable.find_by_css('.rate-pro')
pro_button.click()
def check_result():
total = rateable.find_by_css('.rate-difference')
return total[0].text == '+1'
assert wait(check_result)
def test_downvote(self, browser):
rateable = get_column_listing(browser, 'content2').find_by_css('.comment')
pro_button = rateable.find_by_css('.rate-contra')
pro_button.click()
def check_result():
total = rateable.find_by_css('.rate-difference')
return total[0].text == '-1'
assert wait(check_result)
def test_neutralvote(self, browser):
rateable = get_column_listing(browser, 'content2').find_by_css('.comment')
pro_button = rateable.find_by_css('.rate-neutral')
pro_button.click()
def check_result():
total = rateable.find_by_css('.rate-difference')
return total[0].text == '0'
assert wait(check_result)
# FIXME: test detail list.
# FIXME: test replacing god user with one that is allowed to rate, but not much more.
# FIXME: test manu users and more interesting totals.
|
Python
| 0
|
@@ -1,24 +1,35 @@
+import re%0A%0A
from pytest import fixtu
@@ -494,16 +494,80 @@
t None%0A%0A
+ @mark.skipif(True, reason=%22pending weil schlechtes wetter%22)%0A
def
@@ -677,36 +677,32 @@
mment')%0A
-pro_
button = rateabl
@@ -728,36 +728,32 @@
e-pro')%0A
-pro_
button.click()%0A
@@ -1037,36 +1037,32 @@
mment')%0A
-pro_
button = rateabl
@@ -1091,36 +1091,32 @@
ontra')%0A
-pro_
button.click()%0A
@@ -1407,28 +1407,24 @@
t')%0A
-pro_
button = rat
@@ -1470,12 +1470,8 @@
-pro_
butt
@@ -1653,127 +1653,1232 @@
-# FIXME: test detail list.%0A%0A # FIXME: test replacing god user
+@mark.skipif(True, reason=%22pending weil schlechtes wetter%22)%0A def test_detaillist(self, browser):%0A%0A # FIXME: the button appears to be surprisingly click%0A # resistant. since we don't have any clues as to why, we%0A # postponed the investigations.%0A%0A rateable = get_column_listing(browser, 'content2').find_by_css('.comment').first%0A button = rateable.find_by_css('.rate-difference').first%0A button.click()%0A%0A def check_result():%0A try:%0A auditTrail = rateable.find_by_css('.rate-details').first%0A print(auditTrail)%0A return %22god%22 in auditTrail.text and %220%22 in auditTrail.text%0A except Exception as e:%0A print(e)%0A return False%0A assert
w
+a
it
-h one that is allowed to rate, but not much more.
+(check_result)%0A%0A @mark.skipif(True, reason=%22pending weil schlechtes wetter%22)%0A def test_multi_rateable(self, browser):%0A%0A # FIXME: all rate widgets are totalled over all others. there is%0A # something wrong with the filter for the rating target (object).%0A # write a test for that, then fix it!%0A%0A pass%0A%0A @mark.skipif(True, reason=%22pending weil schlechtes wetter%22)%0A def test_multi_user(self, browser):
%0A%0A
+
@@ -2894,17 +2894,17 @@
test man
-u
+y
users a
@@ -2929,10 +2929,279 @@
g totals
-.
+ and audit%0A # trails.%0A%0A pass%0A%0A @mark.skipif(True, reason=%22pending weil schlechtes wetter%22)%0A def test_authorisations(self, browser):%0A%0A # FIXME: test replacing god user with one that is allowed to%0A # rate, but not much more.%0A%0A pass
%0A
|
dc57d4b95e39f756858dc1d73c8f221f0bb1956c
|
add stubs
|
tests/commands/test__vi_cc.py
|
tests/commands/test__vi_cc.py
|
import unittest
from Vintageous.vi.constants import _MODE_INTERNAL_NORMAL
from Vintageous.vi.constants import MODE_NORMAL
from Vintageous.vi.constants import MODE_VISUAL
from Vintageous.vi.constants import MODE_VISUAL_LINE
from Vintageous.tests.commands import set_text
from Vintageous.tests.commands import add_selection
from Vintageous.tests.commands import get_sel
from Vintageous.tests.commands import first_sel
from Vintageous.tests.commands import BufferTest
class Test_vi_cc_InModeInternalNormal(BufferTest):
def testSelectsWholeLine(self):
set_text(self.view, ''.join(('foo bar\nfoo bar\nfoo bar\n',)))
add_selection(self.view, self.R((1, 2), (1, 2)))
self.view.run_command('_vi_cc_motion', {'mode': _MODE_INTERNAL_NORMAL, 'count': 1})
self.assertEqual(self.R((1, 0), (1, 7)), first_sel(self.view))
def testDeletesWholeLine(self):
set_text(self.view, ''.join(('foo bar\nfoo bar\nfoo bar\n',)))
add_selection(self.view, self.R((1, 0), (1, 7)))
self.view.run_command('_vi_cc_action', {'mode': _MODE_INTERNAL_NORMAL})
self.assertEqual(self.view.substr(self.R(0, self.view.size())), 'foo bar\n\nfoo bar\n')
def testKeepsLeadingWhitespace(self):
set_text(self.view, ''.join(('foo bar\n\t foo bar\nfoo bar\n',)))
add_selection(self.view, self.R((1, 0), (1, 10)))
self.view.run_command('_vi_cc_action', {'mode': _MODE_INTERNAL_NORMAL})
self.assertEqual(self.view.substr(self.R(0, self.view.size())), 'foo bar\n\t \nfoo bar\n')
@unittest.skip("Implement this")
def testCanDeleteWithCount(self):
self.assertTrue(False)
|
Python
| 0.000001
|
@@ -1610,13 +1610,8 @@
ment
- this
%22)%0D%0A
@@ -1679,12 +1679,121 @@
e(False)%0D%0A%0D%0A
+ @unittest.skip(%22Implement%22)%0D%0A def testDeletedLinesAreYanked(self):%0D%0A self.assertTrue(False)%0D%0A%0D%0A
|
13f6394b42026b50cf2eedc17834a9de355344f1
|
split tests
|
tests/entity_linking_tests.py
|
tests/entity_linking_tests.py
|
import unittest
from dataset.dbpedia import NgramEntityResolver
from dataset.msnbc import DataSet
from entity_linking.babelfy import _extract_candidates, link, CandidateEntity, SemanticGraph
from entity_linking.evaluation import Metrics
from kilogram import NgramService
import kilogram
NgramService.configure(hbase_host=('diufpc304', '9090'))
kilogram.NER_HOSTNAME = 'diufpc54.unifr.ch'
ner = NgramEntityResolver("/Users/dragoon/Downloads/dbpedia/dbpedia_data.txt",
"/Users/dragoon/Downloads/dbpedia/dbpedia_uri_excludes.txt",
"/Users/dragoon/Downloads/dbpedia/dbpedia_lower_includes.txt",
"/Users/dragoon/Downloads/dbpedia/dbpedia_2015-04.owl")
msnbc_data = DataSet('../extra/data/msnbc/texts/',
'../extra/data/msnbc/msnbc_truth.txt', ner)
class TestEntityLinking(unittest.TestCase):
def test_extract_candidates(self):
self.assertIsNotNone(_extract_candidates([("Obama", "NNP")]))
self.assertEquals(len(_extract_candidates([('Obama', 'NNP'), ('went', 'VBD'), ('with', 'IN'), ('me', 'PRP'), ('for', 'IN'), ('a', 'DT'), ('walk', 'NN'), ('.', '.')])), 2)
def test_twitter(self):
for line in open('fixtures/sample.txt'):
text = line.strip()
print text, [x for x in link(text) if x.true_entity]
def test_entity_linking(self):
print link("After his departure from Buffalo, Saban returned to coach college football teams including Miami, Army and UCF.")
print link("Barack and Michelle visited us today.")
print link("GitHub experienced a massive DDoS attack yesterday evening.")
print link("Saban, previously a head coach of NFL's Miami, is now coaching Crimson Tide. "
"His achievements include leading LSU to the BCS National Championship once and Alabama three times.")
def test_prior_prob_a2kb(self):
print 'Prior prob, A2KB'
metric = Metrics()
for filename, values in msnbc_data.data.iteritems():
candidates = []
for i, line_dict in enumerate(values):
text = line_dict['text']
# i ensures different nouns
cand = CandidateEntity(0, 0, i, text)
if cand.uri_counts:
line_dict['cand'] = cand
candidates.append(cand)
# resolve
graph = SemanticGraph(candidates)
graph.do_iterative_removal()
graph.do_linking()
for line_dict in values:
true_uri = line_dict['true_uri']
uri = None
if 'cand' in line_dict:
uri = line_dict['cand'].true_entity
metric.evaluate(true_uri, uri)
metric.print_metrics()
def test_prior_prob_a2kb_typed(self):
print 'Prior prob, A2KB + Types'
metric = Metrics()
for filename, values in msnbc_data.data.iteritems():
candidates = []
for i, line_dict in enumerate(values):
text = line_dict['text']
# i ensures different nouns
cand = CandidateEntity(0, 0, i, text)
cand.prune_types(line_dict['type'], ner)
if cand.uri_counts:
line_dict['cand'] = cand
candidates.append(cand)
# resolve
graph = SemanticGraph(candidates)
graph.do_iterative_removal()
graph.do_linking()
for line_dict in values:
true_uri = line_dict['true_uri']
uri = None
if 'cand' in line_dict:
uri = line_dict['cand'].true_entity
metric.evaluate(true_uri, uri)
metric.print_metrics()
def test_prior_prob_d2kb(self):
print 'Prior prob, D2KB'
metric = Metrics()
for filename, values in msnbc_data.data.iteritems():
candidates = []
for i, line_dict in enumerate(values):
if line_dict['true_uri']['uri'] is None:
continue
text = line_dict['text']
cand = CandidateEntity(0, 0, i, text)
if cand.uri_counts:
line_dict['cand'] = cand
candidates.append(cand)
# resolve
graph = SemanticGraph(candidates)
graph.do_iterative_removal()
graph.do_linking()
for line_dict in values:
if line_dict['true_uri']['uri'] is None:
continue
true_uri = line_dict['true_uri']
uri = None
if 'cand' in line_dict:
uri = line_dict['cand'].true_entity
metric.evaluate(true_uri, uri)
metric.print_metrics()
if __name__ == '__main__':
print('Test Entity Linkings')
unittest.main()
|
Python
| 0.000003
|
@@ -1179,183 +1179,8 @@
2)%0A%0A
- def test_twitter(self):%0A for line in open('fixtures/sample.txt'):%0A text = line.strip()%0A print text, %5Bx for x in link(text) if x.true_entity%5D%0A%0A
@@ -1708,16 +1708,64 @@
mes.%22)%0A%0A
+%0Aclass TestEntityLinkingKB(unittest.TestCase):%0A%0A
def
|
6755255332039ab3c0ea60346f61420b52e2f474
|
Fix intermittent failure in l10n language selector test
|
tests/functional/test_l10n.py
|
tests/functional/test_l10n.py
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import random
import pytest
from ..pages.home import HomePage
@pytest.mark.nondestructive
def test_change_language(base_url, selenium):
page = HomePage(base_url, selenium).open()
initial = page.footer.language
# avoid selecting the same language or locales that have homepage redirects
excluded = [initial, 'ja', 'ja-JP-mac', 'zh-TW', 'zh-CN']
available = [l for l in page.footer.languages if l not in excluded]
new = random.choice(available)
page.footer.select_language(new)
assert new in selenium.current_url, 'Language is not in URL'
assert new == page.footer.language, 'Language has not been selected'
|
Python
| 0.000003
|
@@ -712,19 +712,35 @@
assert
+'/%7B0%7D/'.format(
new
+)
in sele
|
7ac7ef85f10c4f29a858693944c489060b6f8498
|
Connection string redundant
|
tests/functional_test_base.py
|
tests/functional_test_base.py
|
import pyproctor
from moto import mock_s3
from pyshelf.app import app
import pyshelf.configure as configure
import boto
from boto.s3.key import Key
import yaml
import tests.metadata_utils as meta_utils
import tests.permission_utils as utils
from tests.route_tester.tester import Tester
from tests.search.test_wrapper import TestWrapper as SearchTestWrapper
from pyshelf.search.container import Container as SearchContainer
from tests.metadata.comparator import Comparator as MetadataComparator
from pyshelf.resource_identity import ResourceIdentity
class FunctionalTestBase(pyproctor.TestBase):
RESPONSE_404 = {
"message": "Resource not found",
"code": "resource_not_found"
}
RESPONSE_403 = {
"code": "forbidden",
"message": "Forbidden"
}
RESPONSE_INVALID_NAME = {
"message": "The artifact name provided is not allowable. Please remove leading underscores.",
"code": "invalid_artifact_name"
}
RESPONSE_DUPLICATE = {
"code": "duplicate_artifact",
"message": "Artifact by name test already exists in current directory"
}
ELASTICSEARCH_CONNECTION_STRING = "http://localhost:9200/metadata"
def setUp(self):
self.app = app
self.setup_elastic()
self.setup_moto()
self.setup_metadata()
self.test_client = app.test_client()
self._route_tester = None
self._metadata_comparator = None
@property
def metadata_comparator(self):
if not self._metadata_comparator:
self._metadata_comparator = MetadataComparator(
self,
FunctionalTestBase.ELASTICSEARCH_CONNECTION_STRING,
app.logger)
return self._metadata_comparator
def assert_metadata_matches(self, resource_url):
"""
Makes the assumption that mock_s3 has been
enabled (done in configure_moto).
Makes sure that the metadata for a particular
artifact is the same in the search layer and
the cloud layer.
Args:
resource_url(basestring): The full path to the resource from the APIs
perspective
Raises:
AssertionError
"""
self.metadata_comparator.compare(resource_url)
@classmethod
def setUpClass(cls):
config = {
"buckets": {
"test": {
"accessKey": "test",
"secretKey": "test"
},
"bucket2": {
"accessKey": "test",
"secretKey": "test"
}
},
"elasticSearchConnectionString": cls.ELASTICSEARCH_CONNECTION_STRING,
}
configure.logger(app.logger, "DEBUG")
app.config.update(config)
def setup_elastic(self):
con_str = "http://localhost:9200/metadata"
search_container = SearchContainer(self.app.logger, con_str)
self.search_wrapper = SearchTestWrapper(search_container)
def setup_moto(self):
self.moto_s3 = mock_s3()
self.moto_s3.start()
import httpretty
# EXTREMELY IMPORTANT! If the port is not
# appended httpretty does not identify it as http
# but httplib does so the file pointer that
# is supposed to be filled up by httpetty.fakesocket.socket
# is not.
httpretty.core.POTENTIAL_HTTP_PORTS.add(9200)
self.boto_connection = boto.connect_s3()
self.boto_connection.create_bucket("test")
self.boto_connection.create_bucket("bucket2")
self.test_bucket = self.boto_connection.get_bucket("test")
self.setup_artifacts()
self.create_auth_key()
def setup_artifacts(self):
key = Key(self.test_bucket, "test")
key.set_contents_from_string("hello world")
nested_key = Key(self.test_bucket, "/dir/dir2/dir3/nest-test")
nested_key.set_contents_from_string("hello world")
artifact_list = Key(self.test_bucket, "/dir/dir2/dir3/dir4/test5")
artifact_list.set_contents_from_string("")
thing_key = Key(self.test_bucket, "empty")
thing_key.set_contents_from_string("hello world")
empty_meta = Key(self.test_bucket, "/_metadata_empty.yaml")
empty_meta.set_contents_from_string("")
def setup_metadata(self):
self.add_metadata("/test/artifact/test")
self.add_metadata("/test/artifact/dir/dir2/dir3/nest-test")
self.add_metadata("/test/artifact/this/that/other", "1.2")
self.add_metadata("/test/artifact/thing", "1.2"),
self.add_metadata("/test/artifact/blah", "1.19"),
self.add_metadata("/test/artifact/a", "1.19"),
self.add_metadata("/test/artifact/zzzz", "1.19"),
self.add_metadata("/test/artifact/dir/dir2/Test", "2")
self.search_wrapper.refresh_index()
def add_metadata(self, resource_path, version="1", metadata=None):
"""
Adds metadata to moto and elastic.
"""
resource_id = ResourceIdentity(resource_path)
data = meta_utils.get_meta(resource_id.artifact_name, resource_id.resource_path, version)
if metadata:
data.update(metadata)
key = Key(self.boto_connection.get_bucket(resource_id.bucket_name), resource_id.cloud_metadata)
key.set_contents_from_string(yaml.dump(data))
self.search_wrapper.add_metadata(resource_id.search, data)
def create_auth_key(self):
self.auth = utils.auth_header()
key_name = "_keys/{}".format(self.auth["Authorization"])
auth_key = Key(self.test_bucket, key_name)
auth_key.set_contents_from_string(utils.get_permissions_func_test())
auth_bucket2 = Key(self.boto_connection.get_bucket("bucket2"), key_name)
auth_bucket2.set_contents_from_string(utils.get_permissions_func_test())
@property
def route_tester(self):
if not self._route_tester:
self._route_tester = Tester(self, self.test_client)
return self._route_tester
def tearDown(self):
self.moto_s3.stop()
self.search_wrapper.teardown_metadata()
def response_500(self, message=None):
if not message:
message = "Internal server error"
return {
"message": message,
"code": "internal_server_error"
}
|
Python
| 0.999752
|
@@ -2878,59 +2878,8 @@
f):%0A
- con_str = %22http://localhost:9200/metadata%22%0A
@@ -2938,15 +2938,58 @@
er,
-con_str
+FunctionalTestBase.ELASTICSEARCH_CONNECTION_STRING
)%0A
|
da6c8f6daee4baa3798ab2c4b49fbc780e46ee3a
|
Rename test case for ObjectLoader to match
|
tests.py
|
tests.py
|
#!/usr/bin/env python
import sys
import os
import unittest
from straight.plugin import loaders
class ModuleLoaderTestCase(unittest.TestCase):
def setUp(self):
self.loader = loaders.ModuleLoader()
sys.path.append(os.path.join(os.path.dirname(__file__), 'test-packages', 'more-test-plugins'))
sys.path.append(os.path.join(os.path.dirname(__file__), 'test-packages', 'some-test-plugins'))
def tearDown(self):
del sys.path[-1]
del sys.path[-1]
def test_load(self):
modules = list(self.loader.load('testplugin'))
assert len(modules) == 2, modules
def test_plugin(self):
assert self.loader.load('testplugin')[0].do(1) == 2
class SelectiveLoaderTestCase(unittest.TestCase):
def setUp(self):
self.loader = loaders.ObjectLoader()
sys.path.append(os.path.join(os.path.dirname(__file__), 'test-packages', 'more-test-plugins'))
sys.path.append(os.path.join(os.path.dirname(__file__), 'test-packages', 'some-test-plugins'))
def tearDown(self):
del sys.path[-1]
del sys.path[-1]
def test_load_all(self):
objects = list(self.loader.load('testplugin'))
self.assertEqual(len(objects), 2, str(objects)[:100] + ' ...')
if __name__ == '__main__':
unittest.main()
|
Python
| 0
|
@@ -710,17 +710,14 @@
ass
-Selective
+Object
Load
|
d8737e4b2a0b41b139edbed6535e834a9aa17699
|
Remove ShCommandContext
|
modules/command/cmd_sh.py
|
modules/command/cmd_sh.py
|
# -*- coding: utf-8 -*-
from models import CommandInfo
from cmd import Command, CommandContext, validator, cmd_indicator
class ShCommand(Command):
"""
NAME: sh - execute shell command within application container on remote machine
SYNOPSIS: sh [--env|-e <ENV>] <marathin_app_id> <raw bash command>
DESC: execute shell within the specified application container and get the result, the target container will be found by marathon_app_id.\n
You can also optionally specify target `ENV` by --env or -e
E.G. sh dev-some-service ls /opt/logs
sh dev-some-service tail -n 200 /opt/logs/out.log
sh --env dev some-service tail -n 200 /opt/logs/out.log
sh -e prod some-service tail -n 200 /opt/logs/out.log
"""
name = 'sh'
alias = ['shell', 'bash']
def __init__(self, env, marathon_app_id, sh):
self.env = env
self.marathon_app_id = marathon_app_id
self.sh = sh
def execute(self):
pass
class ShCommandParser(object):
support = cmd_indicator(ShCommand)
def has_env_option(self, txt):
cmd_segs = txt.split()
return '-e' == cmd_segs[1].lower() or '--env' == cmd_segs[1].lower()
def parse_env(self, txt):
cmd_segs = txt.split()
def parse_by_arg():
if self.has_env_option(txt):
return cmd_segs[2]
def parse_by_app_id():
app_id_index = 1 + (2 if self.has_env_option(txt) else 0)
app_id = cmd_segs[app_id_index]
return app_id.split('-')[0]
return parse_by_arg() or parse_by_app_id()
def parse_marathon_app_id(self, txt):
cmd_segs = txt.split()
return cmd_segs[3] if self.has_env_option(txt) else cmd_segs[1]
def parse_remote_sh(self, txt):
cmd_segs = txt.split()
return cmd_segs[4:] if self.has_env_option(txt) else cmd_segs[2:]
@validator
def is_valid(self, txt):
cmd_segs = txt.split()
return self.support(txt) and (len(cmd_segs) >= 5 if self.has_env_option(txt) else len(cmd_segs) >= 3)
def parse(self, txt):
"""
sh command should be the format like:
- sh dev-some-service ls /opt/logs
- sh dev-some-service tail -f /opt/logs/out.log
- sh -e dev some-service tail -f /opt/logs/out.log
"""
env = self.parse_env(txt)
marathon_app_id = self.parse_marathon_app_id(txt)
sh = self.parse_remote_sh(txt)
return ShCommand(env, marathon_app_id, sh)
class ShCommandContext(CommandContext):
def __init__(self, env, marathon_app_id):
self.env = env
self.marathon_app_id = marathon_app_id
def enter(self):
# TODO: Return a open connection to remote machine
return self
def exit(self, type, value, traceback):
# TODO: Close connection
pass
|
Python
| 0.000007
|
@@ -2516,353 +2516,4 @@
sh)%0A
-%0Aclass ShCommandContext(CommandContext):%0A def __init__(self, env, marathon_app_id):%0A self.env = env%0A self.marathon_app_id = marathon_app_id%0A%0A def enter(self):%0A # TODO: Return a open connection to remote machine%0A return self%0A%0A def exit(self, type, value, traceback):%0A # TODO: Close connection%0A pass%0A
|
f924770c9e48e4bcb94f724b7e0c9911a87a7415
|
Version 2.4.1
|
createsend/createsend.py
|
createsend/createsend.py
|
import urllib
import urllib2
import httplib
import base64
import gzip
from StringIO import StringIO
from urlparse import urlparse
from utils import json_to_py, get_faker
__version_info__ = ('2', '4', '0')
__version__ = '.'.join(__version_info__)
class CreateSendError(Exception):
"""Represents a CreateSend API error and contains specific data about the error."""
def __init__(self, data):
self.data = data
def __str__(self):
# self.data should contain Code, Message and optionally ResultData
extra = ("\nExtra result data: %s" % self.data.ResultData) if hasattr(self.data, 'ResultData') else ""
return "The CreateSend API responded with the following error - %s: %s%s" % (self.data.Code, self.data.Message, extra)
class ClientError(Exception): pass
class ServerError(Exception): pass
class BadRequest(CreateSendError): pass
class Unauthorized(CreateSendError): pass
class NotFound(ClientError): pass
class Unavailable(Exception): pass
class CreateSendBase(object):
def __init__(self):
self.fake_web = False
def stub_request(self, expected_url, filename, status=None, body=None):
self.fake_web = True
self.faker = get_faker(expected_url, filename, status, body)
def make_request(self, method, path, params={}, body="", username=None, password=None):
headers = {
'User-Agent': 'createsend-python-%s' % __version__,
'Content-Type': 'application/json; charset=utf-8',
'Accept-Encoding' : 'gzip, deflate' }
parsed_base_uri = urlparse(CreateSend.base_uri)
"""username and password should only be set when it is intended that
the default basic authentication mechanism using the API key be
overridden (e.g. when using the apikey route with username and password)."""
if username and password:
headers['Authorization'] = "Basic %s" % base64.b64encode("%s:%s" % (username, password))
else:
# Allow api_key to be set for a CreateSend instance.
headers['Authorization'] = "Basic %s" % base64.b64encode("%s:x" % (CreateSend.api_key or self.api_key))
self.headers = headers
"""If in fake web mode (i.e. self.stub_request has been called),
self.faker should be set, and this request should be treated as a fake."""
if self.fake_web:
# Check that the actual url which would be requested matches self.faker.url.
actual_url = "http://%s%s" % (parsed_base_uri.netloc, self.build_url(parsed_base_uri, path, params))
self.faker.actual_url = actual_url
if self.faker.url != actual_url:
raise Exception("Faker's expected URL (%s) doesn't match actual URL (%s)" % (self.faker.url, actual_url))
self.faker.actual_body = body
if self.faker.body is not None:
if self.faker.body != body:
raise Exception("Faker's expected body (%s) doesn't match actual body (%s)" % (self.faker.body, body))
data = self.faker.open() if self.faker else ''
status = self.faker.status if (self.faker and self.faker.status) else 200
return self.handle_response(status, data)
c = httplib.HTTPConnection(parsed_base_uri.netloc)
c.request(method, self.build_url(parsed_base_uri, path, params), body, headers)
response = c.getresponse()
if response.getheader('content-encoding', '') == 'gzip':
data = gzip.GzipFile(fileobj=StringIO(response.read())).read()
else:
data = response.read()
c.close()
return self.handle_response(response.status, data)
def build_url(self, parsed_base_uri, path, params):
url = parsed_base_uri.path + path
if params and len(params) > 0:
url = (url + "?%s" % urllib.urlencode(params))
return url
def handle_response(self, status, data):
if status == 400:
raise BadRequest(json_to_py(data))
elif status == 401:
raise Unauthorized(json_to_py(data))
elif status == 404:
raise NotFound()
elif status in range(400, 500):
raise ClientError()
elif status in range(500, 600):
raise ServerError()
return data
def _get(self, path, params={}, username=None, password=None):
return self.make_request(path=path, method="GET", params=params, username=username, password=password)
def _post(self, path, body=""):
return self.make_request(path=path, method="POST", body=body)
def _put(self, path, body="", params={}):
return self.make_request(path=path, method="PUT", params=params, body=body)
def _delete(self, path, params={}):
return self.make_request(path=path, method="DELETE", params=params)
class CreateSend(CreateSendBase):
"""Provides high level CreateSend functionality/data you'll probably need."""
base_uri = "http://api.createsend.com/api/v3"
api_key = ""
def apikey(self, site_url, username, password):
"""Gets your CreateSend API key, given your site url, username and password."""
# The only case in which username and password are passed to self.get
params = { "SiteUrl": site_url }
response = self._get("/apikey.json", params, username, password)
return json_to_py(response).ApiKey
def clients(self):
"""Gets your clients."""
response = self._get('/clients.json')
return json_to_py(response)
def countries(self):
"""Gets valid countries."""
response = self._get('/countries.json')
return json_to_py(response)
def systemdate(self):
"""Gets the current date in your account's timezone."""
response = self._get('/systemdate.json')
return json_to_py(response).SystemDate
def timezones(self):
"""Gets valid timezones."""
response = self._get('/timezones.json')
return json_to_py(response)
def administrators(self):
"""gets administrators associated with the account"""
response = self._get('/admins.json')
return json_to_py(response)
def get_primary_contact(self):
"""retrieves the primary contact for this account"""
response = self._get('/primarycontact.json')
return json_to_py(response)
def set_primary_contact(self, email):
"""assigns the primary contact for this account"""
params = { "email": email }
response = self._put('/primarycontact.json', params = params)
return json_to_py(response)
|
Python
| 0
|
@@ -195,17 +195,17 @@
, '4', '
-0
+1
')%0A__ver
|
a18e195734983849a90786a4631987466952a232
|
Set vestion to 0.4.2 in __init__.py
|
lib/recordclass/__init__.py
|
lib/recordclass/__init__.py
|
# The MIT License (MIT)
#
# Copyright (c) <2011-2014> <Shibzukhov Zaur, szport at gmail dot com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from .memoryslots import memoryslots, itemgetset
from .record import recordclass
__version__ = '0.4'
|
Python
| 0.000106
|
@@ -1277,11 +1277,13 @@
_ = '0.4
+.2
'%0D%0A
|
ba1fa9d47ae725774807a0aa97cdde476e572266
|
improve readbility and remove unuse function.
|
timer.py
|
timer.py
|
from time import strftime
import time
import os
import datetime
import sys
def to_min(sec):
return int(sec/60)
def to_hour(sec):
return int(sec/60/60)
class Timer(object):
def __init__(self):
self._key = self._initial()
self._target_working_sec = 9 * 60 * 60 # 9 hour
def _initial(self):
key = "%s.txt" % strftime("%Y-%m-%d")
if not os.path.exists(key):
self._touch(key)
return key
def _touch(self, fname, times=None):
with open(fname, 'a'):
os.utime(fname, times)
def timeit(self, desc):
log = self._gen_log(desc)
self._save(self._key, log)
self._console(log)
def _gen_log(self, desc):
return "%s, %s" % (strftime("%Y%m%d %H:%M:%S"), desc)
def _save(self, fname, desc):
with open(fname, 'a') as f:
f.write("%s\n" % desc)
def _console(self, msg):
print(msg)
def report(self):
raw = self._load(self._key)
if len(raw) % 2 != 0:
raw.append(self._gen_log("NOW"))
working_sec = self._calc(raw)
self._console("")
self._console("========== Summary ===========")
count_down_min = to_min(self._target_working_sec - working_sec)
count_down_hour = to_hour(self._target_working_sec - working_sec)
self._console("Countdown minutes: %s hr (%s min)" % (count_down_hour, count_down_min))
self._console("")
self._console("Target working minutes: %s hr (%s min)" % (to_hour(self._target_working_sec), to_min(self._target_working_sec)))
self._console("Current working minutes: %s hr (%s min)" % (to_hour(working_sec), to_min(working_sec)))
def _load(self, fname):
lines = []
with open(fname) as f:
lines = f.read().splitlines()
return lines
def _calc(self, raw):
composite_list = [raw[x:x+2] for x in range(0, len(raw),2)]
working_sec = 0
self._print_raw(raw)
for come_in, go_out in composite_list:
sec = self._timestamp(go_out) - self._timestamp(come_in)
working_sec += sec
return working_sec
def _print_raw(self, raw):
self._console("========== RAW data ===========")
for idx, item in enumerate(raw, start=0):
self._console("[%s] %s.) %s" % ("*" if idx % 2 == 0 else " ", idx, item))
def _timestamp(self, line):
time_str = line.split(',')[0]
return time.mktime(datetime.datetime.strptime(time_str, "%Y%m%d %H:%M:%S").timetuple())
if __name__ == '__main__':
t = Timer()
argv = sys.argv
if len(argv) == 1:
t.report()
if len(argv) >= 2:
t.timeit(" ".join(argv[1:]))
|
Python
| 0
|
@@ -53,24 +53,55 @@
rt datetime%0A
+from datetime import timedelta%0A
import sys%0A%0A
@@ -107,19 +107,20 @@
%0Adef to_
-min
+hour
(sec):%0A
@@ -133,65 +133,161 @@
urn
-int
+get_time
(sec
-/60
)%0A%0Adef
-to_hour
+get_time
(sec
+ond
):%0A
-return int(sec/60/60)%0A
+sec = timedelta(seconds=second)%0A d = datetime.datetime(1,1,1) + sec%0A return %22%25dh %25dm%22 %25 (d.hour, d.minute)
%0A%0Acl
@@ -801,20 +801,15 @@
elf.
-_console(log
+report(
)%0A%0A
@@ -1320,31 +1320,53 @@
-c
+self._console(%22C
ount
-_
down
-_min = to_min
+: %25s (%25s)%22 %25 (to_hour
(sel
@@ -1405,34 +1405,9 @@
sec)
-%0A count_down_hour =
+,
to_
@@ -1435,30 +1435,18 @@
king_sec
- - working_sec
+))
)%0A
@@ -1467,179 +1467,32 @@
e(%22C
-ountdown minutes: %25s hr (%25s min)%22 %25 (count_down_hour, count_down_min))%0A self._console(%22%22)%0A self._console(%22Target working minutes: %25s hr (%25s min)
+urrent : %25s
%22 %25
-(
to_hour(
self
@@ -1491,179 +1491,75 @@
our(
-self._target_working_sec), to_min(self._target_working_sec)))%0A self._console(%22Current working minutes: %25s hr (%25s min)%22 %25 (to_hour(working_sec), to_min(working_sec))
+working_sec))%0A self._console(%22%22)%0A self._print_raw(raw
)%0A%0A%0A
@@ -1822,37 +1822,8 @@
= 0%0A
- self._print_raw(raw)%0A
@@ -2491,20 +2491,16 @@
report()
-
%0A if
|
d537ea32462c7ef46634d1527702c4c4a6d37e1e
|
Fix UDF test, take two
|
tests/query_test/test_udfs.py
|
tests/query_test/test_udfs.py
|
#!/usr/bin/env python
# Copyright (c) 2012 Cloudera, Inc. All rights reserved.
from tests.common.test_vector import *
from tests.common.impala_test_suite import *
class TestUdfs(ImpalaTestSuite):
@classmethod
def get_workload(cls):
return 'functional-query'
@classmethod
def add_test_dimensions(cls):
super(TestUdfs, cls).add_test_dimensions()
# UDFs require codegen
cls.TestMatrix.add_constraint(
lambda v: v.get_value('exec_option')['disable_codegen'] == False)
# There is no reason to run these tests using all dimensions.
cls.TestMatrix.add_constraint(lambda v:\
v.get_value('table_format').file_format == 'text' and\
v.get_value('table_format').compression_codec == 'none')
def test_udfs(self, vector):
self.run_test_case('QueryTest/udf', vector)
|
Python
| 0.999732
|
@@ -730,16 +730,241 @@
none')%0A%0A
+ # This must run serially because other tests executing 'invalidate metadata' will nuke%0A # all loaded functions.%0A # TODO: This can be run in parallel once functions are persisted correctly.%0A @pytest.mark.execute_serially%0A
def te
|
9c0a83da524831cf557e24ad0a61c160c856dec9
|
move definitions to the bottom again
|
tools.py
|
tools.py
|
# coding: utf-8
from pyquery import PyQuery as q
import json
from collections import OrderedDict
this = None
BASE = 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/'
def load(filename='resource.json'):
schema = json.load(open(filename), object_pairs_hook=OrderedDict)
return schema
def get_pq(uri=BASE + 'aws-template-resource-type-ref.html'):
h = q(uri, headers={
'user-agent':
'https://github.com/fungusakafungus/cloudformation-jsonschema'
})
h.make_links_absolute()
return h
def all_resource_properties_hrefs():
h = get_pq(BASE + 'aws-product-property-reference.html')
res = OrderedDict(
(a1.attr('href'), a1.text())
for a1 in [q(a)
for a
in h('#main-col-body li a')
]
)
return res
def all_resource_hrefs():
h = get_pq(BASE + 'aws-template-resource-type-ref.html')
all_resource_hrefs = OrderedDict(
(a1.text().strip(), a1.attr('href'))
for a1 in [q(a) for a in h('#main-col-body li a')])
return all_resource_hrefs
def write(schema, filename='resource.json'):
with open(filename, 'w') as f:
f.write(json.dumps(schema, indent=4, separators=(',', ': ')))
def print_(schema):
return json.dumps(schema, indent=4)
def all_resource_patterns_by_name():
h = get_pq(BASE + 'aws-template-resource-type-ref.html')
all_resource_patterns_by_name = OrderedDict(
(
a.strip(),
{'properties': {'Type': {'enum': [a.strip()]}}}
)
for a in h('#main-col-body li a').map(lambda x: this.text)
)
return all_resource_patterns_by_name
def resources_dict(schema):
if 'definitions' not in schema:
schema['definitions'] = OrderedDict(
{'resource_types': OrderedDict()}
)
return schema['definitions']['resource_types']
def get_oneOf():
res_names = all_resource_patterns_by_name().keys()
return [{"$ref": "#/definitions/resource_types/" + i} for i in res_names]
def update_all_resource_patterns_by_name(schema):
o = resources_dict(schema)
new = all_resource_patterns_by_name()
new.update(o)
schema['oneOf'] = get_oneOf()
schema['definitions']['resource_types'] = new
|
Python
| 0
|
@@ -2269,8 +2269,89 @@
%5D = new%0A
+ # put definitions last%0A schema%5B'definitions'%5D = schema.pop('definitions')%0A
|
085755df4c542af7cbbf83af9b6069d7d9c74989
|
Implement a test for the default user config file
|
tests/test_get_user_config.py
|
tests/test_get_user_config.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_get_user_config
--------------------
Tests formerly known from a unittest residing in test_config.py named
TestGetUserConfig.test_get_user_config_valid
TestGetUserConfig.test_get_user_config_invalid
TestGetUserConfig.test_get_user_config_nonexistent
"""
import os
import shutil
import pytest
from cookiecutter import config
from cookiecutter.exceptions import InvalidConfiguration
@pytest.fixture(scope='module')
def user_config_path():
return os.path.expanduser('~/.cookiecutterrc')
@pytest.fixture(scope='function')
def back_up_rc(request, user_config_path):
"""
Back up an existing cookiecutter rc and restore it after the test.
If ~/.cookiecutterrc is pre-existing, move it to a temp location
"""
user_config_path_backup = os.path.expanduser('~/.cookiecutterrc.backup')
if os.path.exists(user_config_path):
shutil.copy(user_config_path, user_config_path_backup)
os.remove(user_config_path)
def remove_test_rc():
"""
Remove the ~/.cookiecutterrc that has been created in the test.
"""
if os.path.exists(user_config_path):
os.remove(user_config_path)
def restore_original_rc():
"""
If it existed, restore the original ~/.cookiecutterrc
"""
if os.path.exists(user_config_path_backup):
shutil.copy(user_config_path_backup, user_config_path)
os.remove(user_config_path_backup)
# According to the py.test source code finalizers are popped from an
# internal list that we populated via 'addfinalizer'. As a result the
# last-added finalizer function is executed first.
request.addfinalizer(restore_original_rc)
request.addfinalizer(remove_test_rc)
@pytest.mark.usefixtures('back_up_rc')
def test_get_user_config_valid(user_config_path):
"""
Get config from a valid ~/.cookiecutterrc file
"""
shutil.copy('tests/test-config/valid-config.yaml', user_config_path)
conf = config.get_user_config()
expected_conf = {
'cookiecutters_dir': '/home/example/some-path-to-templates',
'replay_dir': '/home/example/some-path-to-replay-files',
'default_context': {
'full_name': 'Firstname Lastname',
'email': 'firstname.lastname@gmail.com',
'github_username': 'example'
}
}
assert conf == expected_conf
@pytest.mark.usefixtures('back_up_rc')
def test_get_user_config_invalid(user_config_path):
"""
Get config from an invalid ~/.cookiecutterrc file
"""
shutil.copy('tests/test-config/invalid-config.yaml', user_config_path)
with pytest.raises(InvalidConfiguration):
config.get_user_config()
@pytest.mark.usefixtures('back_up_rc')
def test_get_user_config_nonexistent():
"""
Get config from a nonexistent ~/.cookiecutterrc file
"""
assert config.get_user_config() == config.DEFAULT_CONFIG
@pytest.fixture
def custom_user_config():
return {
'cookiecutters_dir': '/foo/bar/some-path-to-templates',
'replay_dir': '/foo/bar/some-path-to-replay-files',
'default_context': {
'full_name': 'Cookiemonster',
'github_username': 'hackebrot'
},
'abbreviations': {
'cookiedozer': 'https://github.com/hackebrot/cookiedozer.git',
}
}
@pytest.fixture
def custom_user_config_path(tmpdir, custom_user_config):
user_config_file = tmpdir.join('user_config')
user_config_file.write(config.yaml.dump(custom_user_config))
return str(user_config_file)
def test_specify_config_path(custom_user_config_path, custom_user_config):
user_config = config.get_user_config(custom_user_config_path)
assert user_config == custom_user_config
|
Python
| 0.000006
|
@@ -3775,20 +3775,128 @@
custom_user_config%0A
+%0A%0Adef test_default_config_path(user_config_path):%0A assert config.DEFAULT_CONFIG_FILE == user_config_path%0A
|
586cd6c864fdbdb3ac20aa49bdc6c550fa93aa2f
|
fix a testdir stragler
|
tests/test_latex_formatter.py
|
tests/test_latex_formatter.py
|
# -*- coding: utf-8 -*-
"""
Pygments LaTeX formatter tests
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2006-2007 by Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
import os
import unittest
import tempfile
from pygments.formatters import LatexFormatter
from pygments.lexers import PythonLexer
class LatexFormatterTest(unittest.TestCase):
def test_valid_output(self):
tokensource = list(PythonLexer().get_tokens(file(
os.path.join(testdir, testfile)).read()))
fmt = LatexFormatter(full=True)
handle, pathname = tempfile.mkstemp('.tex')
# place all output files in /tmp too
old_wd = os.getcwd()
os.chdir(os.path.dirname(pathname))
tfile = os.fdopen(handle, 'w+b')
fmt.format(tokensource, tfile)
tfile.close()
try:
try:
import subprocess
ret = subprocess.Popen(['latex', '-interaction=nonstopmode', pathname],
stdout=subprocess.PIPE).wait()
except ImportError:
# Python 2.3 - no subprocess module
ret = os.popen('latex -interaction=nonstopmode "%s"' % pathname).close()
if ret == 32512: raise OSError # not found
except OSError:
# latex not available
pass
else:
self.failIf(ret, 'latex run reported errors')
os.unlink(pathname)
os.chdir(old_wd)
|
Python
| 0.99996
|
@@ -320,16 +320,47 @@
nLexer%0A%0A
+from support import test_file%0A%0A
%0Aclass L
@@ -492,51 +492,18 @@
ile(
-%0A os.path.join(testdir,
test
+_
file
+(
)).r
|
592b8a1a97a3c6d4c17eaeb6e748134501240894
|
Increase the default number of training epoch
|
train.py
|
train.py
|
#!/usr/bin/env python
__author__ = 'Tony Beltramelli www.tonybeltramelli.com - 19/08/2016'
import os
import argparse
from modules.Model import *
from modules.Batch import *
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--training_file', type=str, required=True)
parser.add_argument('--vocabulary_file', type=str, required=True)
parser.add_argument('--model_name', type=str, required=True)
parser.add_argument('--epoch', type=int, default=50)
parser.add_argument('--batch_size', type=int, default=50)
parser.add_argument('--sequence_length', type=int, default=50)
parser.add_argument('--log_frequency', type=int, default=100)
parser.add_argument('--learning_rate', type=int, default=0.002)
parser.add_argument('--units_number', type=int, default=128)
parser.add_argument('--layers_number', type=int, default=2)
args = parser.parse_args()
training_file = args.training_file
vocabulary_file = args.vocabulary_file
model_name = args.model_name
epoch = args.epoch
batch_size = args.batch_size
sequence_length = args.sequence_length
log_frequency = args.log_frequency
learning_rate = args.learning_rate
batch = Batch(training_file, vocabulary_file, batch_size, sequence_length)
input_number = batch.vocabulary.size
classes_number = batch.vocabulary.size
units_number = args.units_number
layers_number = args.layers_number
print "Start training with epoch: {}, batch_size: {}, log_frequency: {}," \
"learning_rate: {}".format(epoch, batch_size, log_frequency, learning_rate)
if not os.path.exists(model_name):
os.makedirs(model_name)
model = Model(model_name)
model.build(input_number, sequence_length, layers_number, units_number, classes_number)
classifier = model.get_classifier()
cost = tf.reduce_mean(tf.square(classifier - model.y))
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
expected_prediction = tf.equal(tf.argmax(classifier, 1), tf.argmax(model.y, 1))
accuracy = tf.reduce_mean(tf.cast(expected_prediction, tf.float32))
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
iteration = 0
while batch.dataset_full_passes < epoch:
iteration += 1
batch_x, batch_y = batch.get_next_batch()
batch_x = batch_x.reshape((batch_size, sequence_length, input_number))
sess.run(optimizer, feed_dict={model.x: batch_x, model.y: batch_y})
if iteration % log_frequency == 0:
acc = sess.run(accuracy, feed_dict={model.x: batch_x, model.y: batch_y})
loss = sess.run(cost, feed_dict={model.x: batch_x, model.y: batch_y})
print("Iteration {}, batch loss: {:.6f}, training accuracy: {:.5f}".format(iteration * batch_size,
loss, acc))
batch.clean()
print("Optimization done")
saver = tf.train.Saver(tf.all_variables())
checkpoint_path = "{}/{}.ckpt".format(model_name, model_name)
saver.save(sess, checkpoint_path, global_step=iteration * batch_size)
print("Model saved in {}".format(model_name))
if __name__ == "__main__":
main()
|
Python
| 0.000005
|
@@ -469,33 +469,34 @@
pe=int, default=
-5
+20
0)%0A parser.ad
|
07cfc39e50251384ddb647ccc7f73c98ed8cf7b9
|
Save model with an interval of 1000 steps
|
train.py
|
train.py
|
import tensorflow as tf
from model import CycleGAN
from reader import Reader
from datetime import datetime
import os
X_TRAIN_FILE = 'data/tfrecords/apple.tfrecords'
Y_TRAIN_FILE = 'data/tfrecords/orange.tfrecords'
BATCH_SIZE = 1
def train():
current_time = datetime.now().strftime("%Y%m%d-%H%M")
checkpoints_dir = "checkpoints/{}".format(current_time)
os.makedirs(checkpoints_dir, exist_ok=True)
graph = tf.Graph()
cycle_gan = CycleGAN()
with graph.as_default():
X_reader = Reader(X_TRAIN_FILE, batch_size=BATCH_SIZE, name='X')
Y_reader = Reader(Y_TRAIN_FILE, batch_size=BATCH_SIZE, name='Y')
x = X_reader.feed()
y = Y_reader.feed()
G_loss, D_Y_loss, F_loss, D_X_loss, summary_op = cycle_gan.model(x, y)
optimizer = cycle_gan.optimize(G_loss, D_Y_loss, F_loss, D_X_loss)
saver = tf.train.Saver()
train_writer = tf.summary.FileWriter(checkpoints_dir, graph)
with tf.Session(graph=graph) as sess:
sess.run(tf.global_variables_initializer())
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
try:
step = 0
while not coord.should_stop():
_, G_loss_val, D_Y_loss_val, F_loss_val, D_X_loss_val, summary = \
sess.run([optimizer, G_loss, D_Y_loss, F_loss, D_X_loss, summary_op])
train_writer.add_summary(summary, step)
train_writer.flush()
print('-----------Step %d:-------------' % step)
print(' G_loss : {}'.format(G_loss_val))
print(' D_Y_loss : {}'.format(D_Y_loss_val))
print(' F_loss : {}'.format(F_loss_val))
print(' D_X_loss : {}'.format(D_X_loss_val))
if step % 10 == 0:
save_path = saver.save(sess, checkpoints_dir + "/model.ckpt")
print("Model saved in file: %s" % save_path)
step += 1
except KeyboardInterrupt:
print('Interrupted')
coord.request_stop()
except Exception as e:
coord.request_stop(e)
finally:
save_path = saver.save(sess, checkpoints_dir + "/model.ckpt")
print("Model saved in file: %s" % save_path)
# When done, ask the threads to stop.
coord.request_stop()
coord.join(threads)
if __name__ == '__main__':
train()
|
Python
| 0
|
@@ -1387,24 +1387,54 @@
er.flush()%0A%0A
+ if step %25 100 == 0:%0A
prin
@@ -1478,32 +1478,34 @@
%25 step)%0A
+
print(' G_loss
@@ -1532,32 +1532,34 @@
s_val))%0A
+
+
print(' D_Y_los
@@ -1590,32 +1590,34 @@
s_val))%0A
+
print(' F_loss
@@ -1644,32 +1644,34 @@
s_val))%0A
+
+
print(' D_X_los
@@ -1723,16 +1723,18 @@
tep %25 10
+00
== 0:%0A
@@ -1794,32 +1794,50 @@
+ %22/model.ckpt%22
+, global_step=step
)%0A prin
|
5e4152b6d48740268a4937535815c561144fdbd5
|
Fix failing unit test
|
tests/unit/test_connection.py
|
tests/unit/test_connection.py
|
try:
import unittest2 as unittest
except ImportError:
import unittest # noqa
from StringIO import StringIO
from mock import Mock, ANY
from cassandra.connection import (Connection, PROTOCOL_VERSION,
HEADER_DIRECTION_TO_CLIENT,
HEADER_DIRECTION_FROM_CLIENT, ProtocolError)
from cassandra.decoder import (write_stringmultimap, write_int, write_string,
SupportedMessage)
from cassandra.marshal import uint8_pack, uint32_pack
class ConnectionTest(unittest.TestCase):
def make_connection(self):
c = Connection('1.2.3.4')
c._socket = Mock()
c._socket.send.side_effect = lambda x: len(x)
return c
def make_header_prefix(self, message_class, version=PROTOCOL_VERSION, stream_id=0):
return ''.join(map(uint8_pack, [
0xff & (HEADER_DIRECTION_TO_CLIENT | version),
0, # flags (compression)
stream_id,
message_class.opcode # opcode
]))
def make_options_body(self):
options_buf = StringIO()
write_stringmultimap(options_buf, {
'CQL_VERSION': ['3.0.1'],
'COMPRESSION': []
})
return options_buf.getvalue()
def make_error_body(self, code, msg):
buf = StringIO()
write_int(buf, code)
write_string(buf, msg)
return buf.getvalue()
def make_msg(self, header, body=""):
return header + uint32_pack(len(body)) + body
def test_bad_protocol_version(self, *args):
c = self.make_connection()
c._id_queue.get_nowait()
c._callbacks = Mock()
c.defunct = Mock()
# read in a SupportedMessage response
header = self.make_header_prefix(SupportedMessage, version=0x04)
options = self.make_options_body()
message = self.make_msg(header, options)
c.process_msg(message, len(message) - 8)
# make sure it errored correctly
c.defunct.assert_called_once_with(ANY)
args, kwargs = c.defunct.call_args
self.assertIsInstance(args[0], ProtocolError)
def test_bad_header_direction(self, *args):
c = self.make_connection()
c._id_queue.get_nowait()
c._callbacks = Mock()
c.defunct = Mock()
# read in a SupportedMessage response
header = ''.join(map(uint8_pack, [
0xff & (HEADER_DIRECTION_FROM_CLIENT | PROTOCOL_VERSION),
0, # flags (compression)
0,
SupportedMessage.opcode # opcode
]))
options = self.make_options_body()
message = self.make_msg(header, options)
c.process_msg(message, len(message) - 8)
# make sure it errored correctly
c.defunct.assert_called_once_with(ANY)
args, kwargs = c.defunct.call_args
self.assertIsInstance(args[0], ProtocolError)
def test_negative_body_length(self, *args):
c = self.make_connection()
c._id_queue.get_nowait()
c._callbacks = Mock()
c.defunct = Mock()
# read in a SupportedMessage response
header = self.make_header_prefix(SupportedMessage)
options = self.make_options_body()
message = self.make_msg(header, options)
c.process_msg(message, -13)
# make sure it errored correctly
c.defunct.assert_called_once_with(ANY)
args, kwargs = c.defunct.call_args
self.assertIsInstance(args[0], ProtocolError)
def test_unsupported_cql_version(self, *args):
c = self.make_connection()
c._id_queue.get_nowait()
c._callbacks = {0: c._handle_options_response}
c.defunct = Mock()
c.cql_version = "3.0.3"
# read in a SupportedMessage response
header = self.make_header_prefix(SupportedMessage)
options_buf = StringIO()
write_stringmultimap(options_buf, {
'CQL_VERSION': ['7.8.9'],
'COMPRESSION': []
})
options = options_buf.getvalue()
message = self.make_msg(header, options)
c.process_msg(message, len(message) - 8)
# make sure it errored correctly
c.defunct.assert_called_once_with(ANY)
args, kwargs = c.defunct.call_args
self.assertIsInstance(args[0], ProtocolError)
def test_not_implemented(self):
"""
Ensure the following methods throw NIE's. If not, come back and test them.
"""
c = self.make_connection()
self.assertRaises(NotImplementedError, c.close)
self.assertRaises(NotImplementedError, c.defunct, None)
self.assertRaises(NotImplementedError, c.send_msg, None, None)
self.assertRaises(NotImplementedError, c.wait_for_response, None)
self.assertRaises(NotImplementedError, c.wait_for_responses)
self.assertRaises(NotImplementedError, c.register_watcher, None, None)
self.assertRaises(NotImplementedError, c.register_watchers, None)
|
Python
| 0.000015
|
@@ -525,16 +525,17 @@
2_pack%0A%0A
+%0A
class Co
@@ -4472,17 +4472,16 @@
%22%22%22%0A
-%0A
@@ -4482,17 +4482,16 @@
c
-
= self.m
@@ -4568,286 +4568,8 @@
se)%0A
- self.assertRaises(NotImplementedError, c.defunct, None)%0A self.assertRaises(NotImplementedError, c.send_msg, None, None)%0A self.assertRaises(NotImplementedError, c.wait_for_response, None)%0A self.assertRaises(NotImplementedError, c.wait_for_responses)%0A
|
1970bad9d9933432154de2042c4ed74a8696b7f0
|
fix timeout when no options are specified
|
teuthology/task/thrashosds.py
|
teuthology/task/thrashosds.py
|
import contextlib
import logging
import ceph_manager
from teuthology import misc as teuthology
log = logging.getLogger(__name__)
@contextlib.contextmanager
def task(ctx, config):
"""
"Thrash" the OSDs by randomly marking them out/down (and then back
in) until the task is ended. This loops, and every op_delay
seconds it randomly chooses to add or remove an OSD (even odds)
unless there are fewer than min_out OSDs out of the cluster, or
more than min_in OSDs in the cluster.
All commands are run on mon0 and it stops when __exit__ is called.
The config is optional, and is a dict containing some or all of:
min_in: (default 2) the minimum number of OSDs to keep in the
cluster
min_out: (default 0) the minimum number of OSDs to keep out of the
cluster
op_delay: (5) the length of time to sleep between changing an
OSD's status
clean_interval: (60) the approximate length of time to loop before
waiting until the cluster goes clean. (In reality this is used
to probabilistically choose when to wait, and the method used
makes it closer to -- but not identical to -- the half-life.)
chance_down: (0) the probability that the thrasher will mark an
OSD down rather than marking it out. (The thrasher will not
consider that OSD out of the cluster, since presently an OSD
wrongly marked down will mark itself back up again.) This value
can be either an integer (eg, 75) or a float probability (eg
0.75).
timeout: (360) the number of seconds to wait for the cluster
to become clean before the task exits. If this doesn't happen,
an exception will be raised.
example:
tasks:
- ceph:
- thrashosds:
chance_down: 10
op_delay: 3
min_in: 1
timeout: 600
- interactive:
"""
log.info('Beginning thrashosds...')
first_mon = teuthology.get_first_mon(ctx, config)
(mon,) = ctx.cluster.only(first_mon).remotes.iterkeys()
manager = ceph_manager.CephManager(
mon,
logger=log.getChild('ceph_manager'),
)
thrash_proc = ceph_manager.Thrasher(
manager,
config,
logger=log.getChild('thrasher')
)
try:
yield
finally:
log.info('joining thrashosds')
thrash_proc.do_join()
manager.wait_till_clean(config.get('timeout', 360))
|
Python
| 0.000003
|
@@ -1865,24 +1865,170 @@
ve:%0A %22%22%22%0A
+ if config is None:%0A config = %7B%7D%0A assert isinstance(config, dict), %5C%0A 'thrashosds task only accepts a dict for configuration'%0A
log.info
|
e7b7709784e105114d490eaab655a16e9842a1ed
|
optimize post processor shouldn't run 'call' with shell and pipe.
|
thumbnails/post_processors.py
|
thumbnails/post_processors.py
|
import imghdr
import os
from subprocess import call, PIPE
import tempfile
import uuid
from django.core.files import File
def get_or_create_temp_dir():
temp_dir = os.path.join(tempfile.gettempdir(), 'thumbnails')
if not os.path.exists(temp_dir):
os.mkdir(temp_dir)
return temp_dir
def process(thumbnail_file, **kwargs):
"""
Post processors are functions that receive file objects,
performs necessary operations and return the results as file objects.
"""
from . import conf
for processor in conf.POST_PROCESSORS:
processor['processor'](thumbnail_file, **processor['kwargs'])
return thumbnail_file
def optimize(thumbnail_file, jpg_command=None, png_command=None, gif_command=None):
"""
A post processing function to optimize file size. Accepts commands
to optimize JPG, PNG and GIF images as arguments. Example:
THUMBNAILS = {
# Other options...
'POST_PROCESSORS': [
{
'processor': 'thumbnails.post_processors.optimize',
'png_command': 'optipng -force -o7 "%(filename)s"',
'jpg_command': 'jpegoptim -f --strip-all "%(filename)s"',
},
],
}
"""
temp_dir = get_or_create_temp_dir()
thumbnail_filename = os.path.join(temp_dir, "%s" % uuid.uuid4().hex)
f = open(thumbnail_filename, 'wb')
f.write(thumbnail_file.read())
f.close()
# Detect filetype
filetype = imghdr.what(thumbnail_filename)
# Construct command to optimize image based on filetype
command = None
if filetype == "jpg" or filetype == "jpeg":
command = jpg_command
elif filetype == "png":
command = png_command
elif filetype == "gif":
command = gif_command
# Run Command
if command:
command = command % {'filename': thumbnail_filename}
call(command, shell=True, stdout=PIPE)
optimized_file = File(open(thumbnail_filename, 'rb'))
# _get_size() is needed to prevent Django < 1.5 from throwing an AttributeError.
# This is fixed in https://github.com/django/django/commit/5c954136eaef3d98d532368deec4c19cf892f664
# and can be removed when we stop supporting Django 1.4
optimized_file._get_size()
os.remove(thumbnail_filename)
return optimized_file
|
Python
| 0
|
@@ -48,14 +48,8 @@
call
-, PIPE
%0Aimp
@@ -712,16 +712,29 @@
nd=None,
+%0A
gif_com
@@ -1881,45 +1881,133 @@
-call(command, shell=True, stdout=PIPE
+try:%0A call(command)%0A except OSError:%0A raise OSError('Error while optimizing %25s image' %25 filetype
)%0A%0A
@@ -2068,16 +2068,21 @@
))%0A #
+ Call
_get_si
@@ -2089,18 +2089,8 @@
ze()
- is needed
to
|
58b5cd41be50ee72a8cae46504273e0760a5446b
|
Corrected docker_host docker driver doc
|
molecule/driver/docker.py
|
molecule/driver/docker.py
|
# Copyright (c) 2015-2017 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import absolute_import
import os
from molecule import logger
from molecule.driver import base
LOG = logger.get_logger(__name__)
class Docker(base.Base):
"""
The class responsible for managing `Docker`_ containers. `Docker`_ is
the default driver used in Molecule.
Molecule leverages Ansible's `docker_container`_ module, by mapping
variables from `molecule.yml` into `create.yml` and `destroy.yml`.
.. _`docker_container`: http://docs.ansible.com/ansible/latest/docker_container_module.html
.. code-block:: yaml
driver:
name: docker
platforms:
- name: instance
hostname: instance
image: image_name:tag
registry:
url: registry.example.com
credentials:
username: $USERNAME
password: $PASSWORD
email: user@example.com
command: sleep infinity
privileged: True|False
volumes:
- /sys/fs/cgroup:/sys/fs/cgroup:ro
capabilities:
- SYS_ADMIN
exposed_ports:
- 53/udp
- 53/tcp
published_ports:
- 0.0.0.0:8053:53/udp
- 0.0.0.0:8053:53/tcp
ulimits:
- nofile:262144:262144
dns_servers:
- 8.8.8.8
networks:
- name: foo
- name: bar
.. code-block:: bash
$ sudo pip install docker-py
Provide the files Molecule will preserve upon each subcommand execution.
.. code-block:: yaml
driver:
name: docker
safe_files:
- foo
To use a URL to connect to the Docker API instead of the default Unix socket
path `unix://var/run/docker.sock`.
.. code-block:: yaml
driver:
name: docker
platforms:
- name: instance
image: centos:7
docker_host: 'tcp://localhost:12376'
provisioner:
name: ansible
env:
DOCKER_HOST: 'tcp://localhost:12376'
.. _`Docker`: https://www.docker.com
""" # noqa
def __init__(self, config):
super(Docker, self).__init__(config)
self._name = 'docker'
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def login_cmd_template(self):
return ('docker exec '
'-e COLUMNS={columns} '
'-e LINES={lines} '
'-e TERM=bash '
'-ti {instance} bash')
@property
def default_safe_files(self):
return [
os.path.join(self._config.scenario.ephemeral_directory,
'Dockerfile')
]
@property
def default_ssh_connection_options(self):
return []
def login_options(self, instance_name):
return {'instance': instance_name}
def ansible_connection_options(self, instance_name):
return {'ansible_connection': 'docker'}
|
Python
| 0.999971
|
@@ -2577,16 +2577,63 @@
ame: bar
+%0A docker_host: tcp://localhost:12376
%0A%0A ..
@@ -2851,24 +2851,24 @@
safe_files:%0A
+
@@ -2878,427 +2878,8 @@
oo%0A%0A
- To use a URL to connect to the Docker API instead of the default Unix socket%0A path %60unix://var/run/docker.sock%60.%0A%0A .. code-block:: yaml%0A%0A driver:%0A name: docker%0A platforms:%0A - name: instance%0A image: centos:7%0A docker_host: 'tcp://localhost:12376'%0A provisioner:%0A name: ansible%0A env:%0A DOCKER_HOST: 'tcp://localhost:12376'%0A%0A
|
c761c55ebc5d7f89116cbfe81031953394657b85
|
patch utils.which
|
daemon/tests/conftest.py
|
daemon/tests/conftest.py
|
"""
Unit test fixture module.
"""
import threading
import time
import mock
import pytest
from mock.mock import MagicMock
from core.api.grpc.client import InterfaceHelper
from core.api.grpc.server import CoreGrpcServer
from core.api.tlv.corehandlers import CoreHandler
from core.emulator.coreemu import CoreEmu
from core.emulator.data import IpPrefixes
from core.emulator.distributed import DistributedServer
from core.emulator.enumerations import EventTypes
from core.emulator.session import Session
from core.nodes.base import CoreNode
from core.nodes.netclient import LinuxNetClient
EMANE_SERVICES = "zebra|OSPFv3MDR|IPForward"
class PatchManager:
def __init__(self):
self.patches = []
def patch_obj(self, _cls, attribute, return_value=None):
p = mock.patch.object(_cls, attribute, return_value=return_value)
p.start()
self.patches.append(p)
def patch(self, func):
p = mock.patch(func)
p.start()
self.patches.append(p)
def shutdown(self):
for p in self.patches:
p.stop()
class MockServer:
def __init__(self, coreemu):
self.config = {}
self.coreemu = coreemu
@pytest.fixture(scope="session")
def patcher(request):
patch_manager = PatchManager()
patch_manager.patch_obj(DistributedServer, "remote_cmd", return_value="1")
if request.config.getoption("mock"):
patch_manager.patch("os.mkdir")
patch_manager.patch("core.utils.cmd")
patch_manager.patch("core.nodes.netclient.get_net_client")
patch_manager.patch_obj(
LinuxNetClient, "get_mac", return_value="00:00:00:00:00:00"
)
patch_manager.patch_obj(CoreNode, "nodefile")
patch_manager.patch_obj(Session, "write_state")
patch_manager.patch_obj(Session, "write_nodes")
yield patch_manager
patch_manager.shutdown()
@pytest.fixture(scope="session")
def global_coreemu(patcher):
coreemu = CoreEmu(config={"emane_prefix": "/usr"})
yield coreemu
coreemu.shutdown()
@pytest.fixture(scope="session")
def global_session(request, patcher, global_coreemu):
mkdir = not request.config.getoption("mock")
session = Session(1000, {"emane_prefix": "/usr"}, mkdir)
yield session
session.shutdown()
@pytest.fixture(scope="session")
def ip_prefixes():
return IpPrefixes(ip4_prefix="10.83.0.0/16")
@pytest.fixture(scope="session")
def iface_helper():
return InterfaceHelper(ip4_prefix="10.83.0.0/16")
@pytest.fixture(scope="module")
def module_grpc(global_coreemu):
grpc_server = CoreGrpcServer(global_coreemu)
thread = threading.Thread(target=grpc_server.listen, args=("localhost:50051",))
thread.daemon = True
thread.start()
time.sleep(0.1)
yield grpc_server
grpc_server.server.stop(None)
@pytest.fixture(scope="module")
def module_coretlv(patcher, global_coreemu, global_session):
request_mock = MagicMock()
request_mock.fileno = MagicMock(return_value=1)
server = MockServer(global_coreemu)
request_handler = CoreHandler(request_mock, "", server)
request_handler.session = global_session
request_handler.add_session_handlers()
yield request_handler
@pytest.fixture
def grpc_server(module_grpc):
yield module_grpc
module_grpc.coreemu.shutdown()
@pytest.fixture
def session(global_session):
global_session.set_state(EventTypes.CONFIGURATION_STATE)
yield global_session
global_session.clear()
@pytest.fixture
def coretlv(module_coretlv):
session = module_coretlv.session
coreemu = module_coretlv.coreemu
coreemu.sessions[session.id] = session
yield module_coretlv
coreemu.shutdown()
def pytest_addoption(parser):
parser.addoption("--distributed", help="distributed server address")
parser.addoption("--mock", action="store_true", help="run without mocking")
def pytest_generate_tests(metafunc):
distributed_param = "distributed_address"
if distributed_param in metafunc.fixturenames:
distributed_address = metafunc.config.getoption("distributed")
metafunc.parametrize(distributed_param, [distributed_address])
|
Python
| 0
|
@@ -1470,24 +1470,72 @@
utils.cmd%22)%0A
+ patch_manager.patch(%22core.utils.which%22)%0A
patc
|
bdda5ca28721dfeca8cfe42afb81fee09b78e83a
|
fix tap handler
|
daiquiri/tap/handlers.py
|
daiquiri/tap/handlers.py
|
from django.conf import settings
from django.dispatch import receiver
from django.db.models.signals import post_save, post_delete
from daiquiri.metadata.models import Database, Table, Column
from daiquiri.metadata.settings import ACCESS_LEVEL_PUBLIC, ACCESS_LEVEL_INTERNAL
from .models import (
Schema as TapSchema,
Table as TapTable,
Column as TapColumn,
)
@receiver(post_save, sender=Database)
def database_updated_handler(sender, **kwargs):
instance = kwargs['instance']
# check if the metadata_access_level is public or it matches the TAP_ACCESS_LEVEL
tap = (instance.metadata_access_level == ACCESS_LEVEL_PUBLIC) or \
(instance.metadata_access_level == settings.TAP_ACCESS_LEVEL)
if tap:
try:
schema = TapSchema.objects.using('tap').get(pk=instance.id)
except TapSchema.DoesNotExist:
schema = TapSchema.objects.using('tap').create(pk=instance.id)
schema.schema_name = instance.name
schema.utype = None
if instance.description:
schema.description = instance.description[:255]
schema.save()
else:
# remove the database from the TAP_SCHEMA (if it exists)
try:
TapSchema.objects.using('tap').get(pk=instance.id).delete()
except TapSchema.DoesNotExist:
pass
# call the handler for each table of the database
for table in instance.tables.all():
table_updated_handler(Table, instance=table)
@receiver(post_delete, sender=Database)
def database_deleted_handler(sender, **kwargs):
instance = kwargs['instance']
# remove the database from the TAP_SCHEMA (if it exists)
try:
TapSchema.objects.using('tap').get(pk=instance.id).delete()
except TapSchema.DoesNotExist:
pass
@receiver(post_save, sender=Table)
def table_updated_handler(sender, **kwargs):
instance = kwargs['instance']
# check if the metadata_access_level is public or it matches the TAP_ACCESS_LEVEL
tap = (instance.metadata_access_level == ACCESS_LEVEL_PUBLIC) or \
(instance.metadata_access_level == settings.TAP_ACCESS_LEVEL)
# get the schema from the TAP_SCHEMA
try:
schema = TapSchema.objects.using('tap').get(pk=instance.database.id)
except TapSchema.DoesNotExist:
schema = None
if tap and schema:
try:
table = TapTable.objects.using('tap').get(pk=instance.id)
table.schema = schema
except TapTable.DoesNotExist:
table = TapTable.objects.using('tap').create(pk=instance.id, schema=schema)
table.schema_name = str(instance.database)
table.table_name = instance.name
table.table_type = instance.type
table.utype = instance.utype
if instance.description:
table.description = instance.description[:255]
table.table_index = instance.order
table.save()
else:
# remove the table from the TAP_SCHEMA (if it exists)
try:
TapTable.objects.using('tap').get(pk=instance.id).delete()
except TapTable.DoesNotExist:
pass
# call the handler for each column of the table
for columns in instance.columns.all():
column_updated_handler(Column, instance=columns)
@receiver(post_delete, sender=Table)
def table_deleted_handler(sender, **kwargs):
instance = kwargs['instance']
# remove the table from the TAP_SCHEMA (if it exists)
try:
TapTable.objects.using('tap').get(pk=instance.id).delete()
except TapTable.DoesNotExist:
pass
@receiver(post_save, sender=Column)
def column_updated_handler(sender, **kwargs):
instance = kwargs['instance']
# check if the metadata_access_level is public or it matches the TAP_ACCESS_LEVEL
tap = (instance.metadata_access_level == ACCESS_LEVEL_PUBLIC) or \
(instance.metadata_access_level == settings.TAP_ACCESS_LEVEL)
# get the table from the TAP_SCHEMA
try:
table = TapTable.objects.using('tap').get(pk=instance.table.id)
except TapTable.DoesNotExist:
table = None
if tap and table:
try:
column = TapColumn.objects.using('tap').get(pk=instance.id)
column.table = table
except TapColumn.DoesNotExist:
column = TapColumn.objects.using('tap').create(pk=instance.id, table=table)
column.table_name = str(instance.table)
column.column_name = instance.name
column.datatype = instance.datatype
column.arraysize = instance.size
column.size = instance.size
if instance.description:
schema.description = instance.description[:255]
column.utype = instance.utype
column.unit = instance.unit
column.ucd = instance.ucd
column.indexed = instance.indexed
column.principal = instance.principal
column.std = instance.std
column.column_index = instance.order
column.save()
else:
# remove the column from the TAP_SCHEMA (if it exists)
try:
TapColumn.objects.using('tap').get(pk=instance.id).delete()
except TapColumn.DoesNotExist:
pass
@receiver(post_delete, sender=Column)
def column_deleted_handler(sender, **kwargs):
instance = kwargs['instance']
# remove the column from the TAP_SCHEMA (if it exists)
try:
TapColumn.objects.using('tap').get(pk=instance.id).delete()
except TapColumn.DoesNotExist:
pass
|
Python
| 0.000002
|
@@ -4625,38 +4625,38 @@
on:%0A
-schema
+column
.description = i
|
27e30c4172f2da79168640799188f0394b88c9ec
|
Fix circular import between querysets.workflow and models.domain
|
swf/models/domain.py
|
swf/models/domain.py
|
# -*- coding: utf-8 -*-
from boto.swf.exceptions import SWFResponseError, SWFDomainAlreadyExistsError
from swf.constants import REGISTERED
from swf.core import ConnectedSWFObject
from swf.querysets.workflow import WorkflowTypeQuerySet
from swf.exceptions import AlreadyExistsError, DoesNotExistError
class Domain(ConnectedSWFObject):
"""Simple Workflow Domain wrapper
Params
------
* name:
* type: String
* value: Name of the domain to register (unique)
* retention_period
* type: Integer
* value: Domain's workflow executions records retention in days
* status
* type: swf.core.ConnectedSWFObject.{REGISTERED, DEPRECATED}
* value: the domain status
* description
* type: String
* value: Textual description of the domain
"""
def __init__(self, name,
status=REGISTERED,
description=None,
retention_period=30, *args, **kwargs):
super(Domain, self).__init__(*args, **kwargs)
self.name = name
self.status = status
self.retention_period = retention_period
self.description = description
def save(self):
"""Creates the domain amazon side"""
try:
self.connection.register_domain(self.name,
str(self.retention_period),
self.description)
except SWFDomainAlreadyExistsError:
raise AlreadyExistsError("Domain %s already exists amazon-side" % self.name)
def delete(self):
"""Deprecates the domain amazon side"""
try:
self.connection.deprecate_domain(self.name)
except SWFResponseError as e:
if e.error_code == 'UnknownResourceFault':
raise DoesNotExistError("Domain %s does not exist amazon-side" % self.name)
def workflows(self, status=REGISTERED):
"""Lists the current domain's workflows"""
qs = WorkflowTypeQuerySet(self.name)
return qs.all(registration_status=status)
@property
def executions(self):
pass
def __repr__(self):
return '<{} name={} status={}>'.format(
self.__class__.__name__, self.name, self.status)
|
Python
| 0.000025
|
@@ -178,64 +178,8 @@
ect%0A
-from swf.querysets.workflow import WorkflowTypeQuerySet%0A
from
@@ -1938,24 +1938,88 @@
orkflows%22%22%22%0A
+ from swf.querysets.workflow import WorkflowTypeQuerySet%0A
qs =
|
6e013f4c5f9f71e3b4386c3b401449922ffdfad8
|
fix colorization
|
utils.py
|
utils.py
|
import torch
import numpy as np
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.initialized = False
self.val = None
self.avg = None
self.sum = None
self.count = None
def initialize(self, val, weight):
self.val = val
self.avg = val
self.sum = val * weight
self.count = weight
self.initialized = True
def update(self, val, weight=1):
if not self.initialized:
self.initialize(val, weight)
else:
self.add(val, weight)
def add(self, val, weight):
self.val = val
self.sum += val * weight
self.count += weight
self.avg = self.sum / self.count
def value(self):
return self.val
def average(self):
return self.avg
def unique(ar, return_index=False, return_inverse=False, return_counts=False):
ar = np.asanyarray(ar).flatten()
optional_indices = return_index or return_inverse
optional_returns = optional_indices or return_counts
if ar.size == 0:
if not optional_returns:
ret = ar
else:
ret = (ar,)
if return_index:
ret += (np.empty(0, np.bool),)
if return_inverse:
ret += (np.empty(0, np.bool),)
if return_counts:
ret += (np.empty(0, np.intp),)
return ret
if optional_indices:
perm = ar.argsort(kind='mergesort' if return_index else 'quicksort')
aux = ar[perm]
else:
ar.sort()
aux = ar
flag = np.concatenate(([True], aux[1:] != aux[:-1]))
if not optional_returns:
ret = aux[flag]
else:
ret = (aux[flag],)
if return_index:
ret += (perm[flag],)
if return_inverse:
iflag = np.cumsum(flag) - 1
inv_idx = np.empty(ar.shape, dtype=np.intp)
inv_idx[perm] = iflag
ret += (inv_idx,)
if return_counts:
idx = np.concatenate(np.nonzero(flag) + ([ar.size],))
ret += (np.diff(idx),)
return ret
def colorEncode(labelmap, colors):
labelmap = labelmap.astype('int')
labelmap_rgb = np.zeros((labelmap.shape[0], labelmap.shape[1], 3),
dtype=np.uint8)
for label in unique(labelmap):
labelmap_rgb += (labelmap == label)[:, :, np.newaxis] * \
np.tile(colors[label],
(labelmap.shape[0], labelmap.shape[1], 1))
return labelmap_rgb
def accuracy(batch_data, pred):
(imgs, segs, infos) = batch_data
_, preds = torch.max(pred.data.cpu(), dim=1)
valid = (segs >= 0)
acc = 1.0 * torch.sum(valid * (preds == segs)) / (torch.sum(valid) + 1e-10)
return acc, torch.sum(valid)
def intersectionAndUnion(batch_data, pred, numClass):
(imgs, segs, infos) = batch_data
_, preds = torch.max(pred.data.cpu(), dim=1)
# compute area intersection
intersect = preds.clone()
intersect[torch.ne(preds, segs)] = -1
area_intersect = torch.histc(intersect.float(),
bins=numClass,
min=0,
max=numClass-1)
# compute area union:
preds[torch.lt(segs, 0)] = -1
area_pred = torch.histc(preds.float(),
bins=numClass,
min=0,
max=numClass-1)
area_lab = torch.histc(segs.float(),
bins=numClass,
min=0,
max=numClass-1)
area_union = area_pred + area_lab - area_intersect
return area_intersect, area_union
|
Python
| 0.000004
|
@@ -2385,24 +2385,63 @@
elmap):%0A
+ if label %3C 0:%0A continue%0A
labe
@@ -2506,20 +2506,16 @@
-
np.tile(
@@ -2529,20 +2529,16 @@
label%5D,%0A
-
|
b07d1b73c3b3bdee8af53b544009b0de8ef436ac
|
use isinstance and types.*
|
utils.py
|
utils.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
System-level utilities and helper functions.
"""
import datetime
import logging
import os
import random
import shlex
import sys
import types
from eventlet import greenthread
from eventlet.green import subprocess
from openstack.common import exception
TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
LOG = logging.getLogger(__name__)
def int_from_bool_as_string(subject):
"""
Interpret a string as a boolean and return either 1 or 0.
Any string value in:
('True', 'true', 'On', 'on', '1')
is interpreted as a boolean True.
Useful for JSON-decoded stuff and config file parsing
"""
return bool_from_string(subject) and 1 or 0
def bool_from_string(subject):
"""
Interpret a string as a boolean.
Any string value in:
('True', 'true', 'On', 'on', '1')
is interpreted as a boolean True.
Useful for JSON-decoded stuff and config file parsing
"""
if type(subject) == type(bool):
return subject
if hasattr(subject, 'startswith'): # str or unicode...
if subject.strip().lower() in ('true', 'on', '1'):
return True
return False
def execute(*cmd, **kwargs):
"""
Helper method to execute command with optional retry.
:cmd Passed to subprocess.Popen.
:process_input Send to opened process.
:check_exit_code Defaults to 0. Raise exception.ProcessExecutionError
unless program exits with this code.
:delay_on_retry True | False. Defaults to True. If set to True, wait a
short amount of time before retrying.
:attempts How many times to retry cmd.
:run_as_root True | False. Defaults to False. If set to True,
the command is prefixed by the command specified
in the root_helper kwarg.
:root_helper command to prefix all cmd's with
:raises exception.Error on receiving unknown arguments
:raises exception.ProcessExecutionError
"""
process_input = kwargs.pop('process_input', None)
check_exit_code = kwargs.pop('check_exit_code', 0)
delay_on_retry = kwargs.pop('delay_on_retry', True)
attempts = kwargs.pop('attempts', 1)
run_as_root = kwargs.pop('run_as_root', False)
root_helper = kwargs.pop('root_helper', '')
if len(kwargs):
raise exception.Error(_('Got unknown keyword args '
'to utils.execute: %r') % kwargs)
if run_as_root:
cmd = shlex.split(root_helper) + list(cmd)
cmd = map(str, cmd)
while attempts > 0:
attempts -= 1
try:
LOG.debug(_('Running cmd (subprocess): %s'), ' '.join(cmd))
_PIPE = subprocess.PIPE # pylint: disable=E1101
obj = subprocess.Popen(cmd,
stdin=_PIPE,
stdout=_PIPE,
stderr=_PIPE,
close_fds=True)
result = None
if process_input is not None:
result = obj.communicate(process_input)
else:
result = obj.communicate()
obj.stdin.close() # pylint: disable=E1101
_returncode = obj.returncode # pylint: disable=E1101
if _returncode:
LOG.debug(_('Result was %s') % _returncode)
if type(check_exit_code) == types.IntType \
and _returncode != check_exit_code:
(stdout, stderr) = result
raise exception.ProcessExecutionError(
exit_code=_returncode,
stdout=stdout,
stderr=stderr,
cmd=' '.join(cmd))
return result
except exception.ProcessExecutionError:
if not attempts:
raise
else:
LOG.debug(_('%r failed. Retrying.'), cmd)
if delay_on_retry:
greenthread.sleep(random.randint(20, 200) / 100.0)
finally:
# NOTE(termie): this appears to be necessary to let the subprocess
# call clean something up in between calls, without
# it two execute calls in a row hangs the second one
greenthread.sleep(0)
def import_class(import_str):
"""Returns a class from a string including module and class"""
mod_str, _sep, class_str = import_str.rpartition('.')
try:
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str)
except (ImportError, ValueError, AttributeError):
raise exception.NotFound('Class %s cannot be found' % class_str)
def import_object(import_str):
"""Returns an object including a module or module and class"""
try:
__import__(import_str)
return sys.modules[import_str]
except ImportError:
cls = import_class(import_str)
return cls()
def isotime(at=None):
if not at:
at = datetime.datetime.utcnow()
return at.strftime(TIME_FORMAT)
def parse_isotime(timestr):
return datetime.datetime.strptime(timestr, TIME_FORMAT)
def parse_mailmap(mailmap='.mailmap'):
mapping = {}
if os.path.exists(mailmap):
fp = open(mailmap, 'r')
for l in fp:
l = l.strip()
if not l.startswith('#') and ' ' in l:
canonical_email, alias = l.split(' ')
mapping[alias] = canonical_email
return mapping
def str_dict_replace(s, mapping):
for s1, s2 in mapping.iteritems():
s = s.replace(s1, s2)
return s
def utcnow():
"""Overridable version of utils.utcnow."""
if utcnow.override_time:
return utcnow.override_time
return datetime.datetime.utcnow()
utcnow.override_time = None
def set_time_override(override_time=datetime.datetime.utcnow()):
"""Override utils.utcnow to return a constant time."""
utcnow.override_time = override_time
def clear_time_override():
"""Remove the overridden time."""
utcnow.override_time = None
|
Python
| 0
|
@@ -1586,19 +1586,25 @@
%0A if
-typ
+isinstanc
e(subjec
@@ -1608,22 +1608,27 @@
ject
-) ==
+,
type
-(bool
+s.BooleanType
):%0A
@@ -1660,60 +1660,47 @@
if
-hasattr(subject, 'startswith'): # str or unicode...
+isinstance(subject, types.StringTypes):
%0A
|
c04ac9ef5f112674df0ae43cc1f0be98050cdac5
|
Check for NoneType. (#253)
|
utils.py
|
utils.py
|
#!/usr/bin/env python3
import hashlib
import struct
import logging
import socket
from lrucache import *
logger = logging.getLogger('utils')
# The maximum number of clashes to allow when assigning a port.
MAX_CLASHES = 50
class ServicePortAssigner(object):
"""
Helper class to assign service ports.
Ordinarily Marathon should assign the service ports, but Marathon issue
https://github.com/mesosphere/marathon/issues/3636 means that service
ports are not returned for applications using IP-per-task. We work around
that here by assigning deterministic ports from a configurable range when
required.
Note that auto-assigning ports is only useful when using vhost: the ports
that we assign here are not exposed to the client.
The LB command line options --min-serv-port-ip-per-task and
--max-serv-port-ip-per-task specify the allowed range of ports to
auto-assign from. The range of ports used for auto-assignment should be
selected to ensure no clashes with the exposed LB ports and the
Marathon-assigned services ports.
The service port assigner provides a mechanism to auto assign service ports
using the application name to generate service port (while preventing
clashes when the port is already claimed by another app). The assigner
provides a deterministic set of ports for a given ordered set of port
requests.
"""
def __init__(self):
self.min_port = None
self.max_port = None
self.max_ports = None
self.can_assign = False
self.next_port = None
self.ports_by_app = {}
def _assign_new_service_port(self, app, task_port):
assert self.can_assign
if self.max_ports <= len(self.ports_by_app):
logger.warning("Service ports are exhausted")
return None
# We don't want to be searching forever, so limit the number of times
# we clash to the number of remaining ports.
ports = self.ports_by_app.values()
port = None
for i in range(MAX_CLASHES):
hash_str = "%s-%s-%s" % (app['id'], task_port, i)
hash_val = hashlib.sha1(hash_str.encode("utf-8")).hexdigest()
hash_int = int(hash_val[:8], 16)
trial_port = self.min_port + (hash_int % self.max_ports)
if trial_port not in ports:
port = trial_port
break
if port is None:
for port in range(self.min_port, self.max_port + 1):
if port not in ports:
break
# We must have assigned a unique port by now since we know there were
# some available.
assert port and port not in ports, port
logger.debug("Assigned new port: %d", port)
return port
def _get_service_port(self, app, task_port):
key = (app['id'], task_port)
port = (self.ports_by_app.get(key) or
self._assign_new_service_port(app, task_port))
self.ports_by_app[key] = port
return port
def set_ports(self, min_port, max_port):
"""
Set the range of ports that we can use for auto-assignment of
service ports - just for IP-per-task apps.
:param min_port: The minimum port value
:param max_port: The maximum port value
"""
assert not self.ports_by_app
assert max_port >= min_port
self.min_port = min_port
self.max_port = max_port
self.max_ports = max_port - min_port + 1
self.can_assign = self.min_port and self.max_port
def reset(self):
"""
Reset the assigner so that ports are newly assigned.
"""
self.ports_by_app = {}
def get_service_ports(self, app):
"""
Return a list of service ports for this app.
:param app: The application.
:return: The list of ports. Note that if auto-assigning and ports
become exhausted, a port may be returned as None.
"""
ports = filter(lambda p: p is not None,
map(lambda p: p.get('port', None),
app.get('portDefinitions', []))
)
ports = list(ports) # wtf python?
if not ports and is_ip_per_task(app) and self.can_assign \
and len(app['tasks']) > 0:
logger.warning("Auto assigning service port for "
"IP-per-container task")
task = app['tasks'][0]
_, task_ports = get_task_ip_and_ports(app, task)
ports = [self._get_service_port(app, task_port)
for task_port in task_ports]
logger.debug("Service ports: %r", ports)
return ports
def resolve_ip(host):
cached_ip = ip_cache.get(host, None)
if cached_ip:
return cached_ip
else:
try:
logger.debug("trying to resolve ip address for host %s", host)
ip = socket.gethostbyname(host)
ip_cache.set(host, ip)
return ip
except socket.gaierror:
return None
ip_cache = LRUCache()
def set_ip_cache(new_ip_cache):
ip_cache = new_ip_cache
def is_ip_per_task(app):
"""
Return whether the application is using IP-per-task.
:param app: The application to check.
:return: True if using IP per task, False otherwise.
"""
return app.get('ipAddress') is not None
def get_task_ip_and_ports(app, task):
"""
Return the IP address and list of ports used to access a task. For a
task using IP-per-task, this is the IP address of the task, and the ports
exposed by the task services. Otherwise, this is the IP address of the
host and the ports exposed by the host.
:param app: The application owning the task.
:param task: The task.
:return: Tuple of (ip address, [ports]). Returns (None, None) if no IP
address could be resolved or found for the task.
"""
# If the app ipAddress field is present and not None then this app is using
# IP per task. The ipAddress may be an empty dictionary though, in which
# case there are no discovery ports. At the moment, Mesos only supports a
# single IP address, so just take the first IP in the list.
if is_ip_per_task(app):
logger.debug("Using IP per container")
task_ip_addresses = task.get('ipAddresses')
if not task_ip_addresses:
logger.warning("Task %s does not yet have an ip address allocated",
task['id'])
return None, None
task_ip = task_ip_addresses[0]['ipAddress']
discovery = app['ipAddress'].get('discovery', {})
task_ports = [int(port['number'])
for port in discovery.get('ports', [])]
else:
logger.debug("Using host port mapping")
task_ports = task.get('ports', [])
task_ip = resolve_ip(task['host'])
if not task_ip:
logger.warning("Could not resolve ip for host %s, ignoring",
task['host'])
return None, None
logger.debug("Returning: %r, %r", task_ip, task_ports)
return task_ip, task_ports
|
Python
| 0
|
@@ -4559,16 +4559,59 @@
, task)%0A
+ if task_ports is not None:%0A
@@ -4654,32 +4654,36 @@
app, task_port)%0A
+
|
b26d057bc2cb16e98e3af95f6f2a2e5f4bff07cf
|
Add final command to run results
|
utils.py
|
utils.py
|
# Copyright (c) 2010, Silas Sewell
# All rights reserved.
#
# This file is subject to the New BSD License (see the LICENSE file).
import copy
import inspect
import os
import pipes
import string
import subprocess
import sys
DIRECTORY_STACK_NAME = '__utils_directory_stack'
class Objectify(dict):
def __getattr__(self, name):
try:
return self[name]
except KeyError:
raise AttributeError(name)
def __nonzero__(self):
try:
return self['_bool']
except KeyError:
return len(self) > 0
def __repr__(self):
return repr(self.dict())
def __str__(self):
return str(self.dict())
def __unicode__(self):
return unicode(self.dict())
def dict(self):
values = {}
for name, value in self.items():
if not name.startswith('_'):
values[name] = value
return values
def exit(code=0, text=''):
"""Exit and print text (if defined) to stderr if code > 0 or stdout
otherwise.
exit(code=1, text='Invalid directory path')
"""
if not isinstance(text, basestring):
text = unicode(text)
if code > 0:
if text:
print >> sys.stderr, text
sys.exit(code)
else:
if text:
print text
sys.exit(0)
def mkdir(path, recursive=True):
"""Create a directory at the specified path. By default this function
recursively creates the structure.
mkdir('/tmp/build')
"""
if os.path.exists(path):
return True
try:
if recursive:
os.makedirs(path)
else:
os.mkdir(path)
except OSError:
return False
return True
def popd(no_class=False):
"""Remove last path from the stack and make it the current working
directory. By default popd will look for the stack variable in self if
in a method and the local scope if in a function. The class behaviour can
be disabled by passing no_class=True.
popd()
"""
# Get locals from caller
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)
locals = calframe[1][0].f_locals
# Use self if caller is a method and no_class is false
if not no_class and 'self' in locals:
locals = locals['self'].__dict__
# Get or create directory stack variable
path = ''
success = False
if DIRECTORY_STACK_NAME in locals:
stack = locals[DIRECTORY_STACK_NAME]
# Do popd
if len(stack) > 0:
path = stack.pop()
try:
os.chdir(path)
success = True
except OSError:
pass
# Return results with path
return Objectify({
'_bool': success,
'path': path,
})
def pushd(path, no_class=False):
"""Add the current working directory to the stack and switch to the path
specified. By default pushd will attach the the stack variable to self if
in a method and the local scope if in a function. The class behaviour can
be disabled by passing no_class=True.
pushd('/tmp')
"""
# Get locals from caller
curframe = inspect.currentframe()
calframe = inspect.getouterframes(curframe, 2)
locals = calframe[1][0].f_locals
# Use self if caller is a method and no_class is false
if not no_class and 'self' in locals:
locals = locals['self'].__dict__
# Get or create directory stack variable
if DIRECTORY_STACK_NAME not in locals:
stack = locals[DIRECTORY_STACK_NAME] = []
else:
stack = locals[DIRECTORY_STACK_NAME]
# Do pushd
success = False
try:
stack.append(os.getcwd())
os.chdir(path)
success = True
except OSError:
stack.pop()
# Delete variable if empty
if not locals[DIRECTORY_STACK_NAME]:
del locals[DIRECTORY_STACK_NAME]
# Return results with path
return Objectify({
'_bool': success,
'path': path,
})
def rm(path, recursive=False):
"""Delete a specified file or directory. This function does not recursively
delete by default.
rm('/tmp/build', recursive=True)
"""
try:
if recursive:
if os.path.isfile(path):
os.remove(path)
else:
shutil.rmtree(path)
else:
if os.path.isfile(path):
os.remove(path)
else:
os.rmdir(path)
except OSError:
return False
return True
def run(command, **kwargs):
"""Run a shell command and wait for the response. The result object will
resolve to True if result.code == 0 and output/error results can be
retrieved from result.stdout and result.stderr variables.
run('ls ${path}', path='/tmp')
"""
if ('env' in kwargs and not kwargs.get('env_empty') and
isinstance(kwargs['env'], dict)):
env = copy.deepcopy(os.environ)
env.update(kwargs['env'])
elif kwargs.get('env_empty'):
env = {}
else:
env = None
if kwargs:
args = {}
for name, value in kwargs.items():
args[name] = pipes.quote(value)
command = string.Template(command).safe_substitute(args)
ref = subprocess.Popen(
command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
shell=True,
close_fds=True,
env=env,
)
data = ref.communicate()
return Objectify({
'_bool': ref.returncode == 0,
'code': ref.returncode,
'stdout': data[0],
'stderr': data[1],
})
|
Python
| 0.000004
|
@@ -5532,16 +5532,44 @@
e == 0,%0A
+ 'command': command,%0A
|
157a09187bccfbfae9b4698159f3a889cb619dd6
|
Call resp.json()
|
utils.py
|
utils.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2015–2020 Molly White
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from datetime import datetime
import os
import requests
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
headers = {'User-Agent': 'Boston Snowbot (https://github.com/molly/boston-snowbot)'}
def log(message):
"""Write message to a logfile."""
with open(os.path.join(__location__, "snowbot.log"), 'a') as f:
f.write("\n" + datetime.today().strftime("%H:%M %Y-%m-%d") + " " + message)
def fetch(url, is_json = False):
"""Make a request to a URL, and handle errors as needed."""
try:
resp = requests.get(url, headers=headers, timeout=5)
except requests.exceptions.Timeout:
log("Request timed out when trying to hit {}".format(url))
except requests.exceptions.ConnectionError:
log("Connection error when trying to hit {}".format(url))
except requests.exceptions.HTTPError:
log("HTTP error when trying to hit {}".format(url))
else:
if is_json:
return resp.json
return resp.text
|
Python
| 0.000001
|
@@ -2120,16 +2120,18 @@
esp.json
+()
%0A
|
e2dd97f16f4f8223c25dbaf661863b3e7323a302
|
add more make errors. i now need to add context lines.
|
mozharness/base/errors.py
|
mozharness/base/errors.py
|
#!/usr/bin/env python
"""Generic error regexes.
We could also create classes that generate these, but with the appropriate
level (please don't die on any errors; please die on any warning; etc.)
"""
# ErrorLists {{{1
""" TODO: more of these.
We could have a generic shell command error list (e.g. File not found,
permission denied) that others could be based on.
"""
# For ssh, scp, rsync over ssh
SSHErrorList=[
{'substr': 'Name or service not known', 'level': 'error'},
{'substr': 'Could not resolve hostname', 'level': 'error'},
{'substr': 'POSSIBLE BREAK-IN ATTEMPT', 'level': 'warning'},
{'substr': 'Network error:', 'level': 'error'},
{'substr': 'Access denied', 'level': 'error'},
{'substr': 'Authentication refused', 'level': 'error'},
{'substr': 'Out of memory', 'level': 'error'},
{'substr': 'Connection reset by peer', 'level': 'warning'},
{'substr': 'Host key verification failed', 'level': 'error'},
{'substr': 'command not found', 'level': 'error'},
{'substr': 'WARNING:', 'level': 'warning'},
{'substr': 'rsync error:', 'level': 'error'},
{'substr': 'Broken pipe:', 'level': 'error'},
{'substr': 'connection unexpectedly closed:', 'level': 'error'},
]
HgErrorList=[
{'regex': '^abort:', 'level': 'error'},
{'substr': 'command not found', 'level': 'error'},
{'substr': 'unknown exception encountered', 'level': 'error'},
]
PythonErrorList=[
{'substr': 'Traceback (most recent call last)', 'level': 'error'},
{'substr': 'SyntaxError: ', 'level': 'error'},
{'substr': 'TypeError: ', 'level': 'error'},
{'substr': 'NameError: ', 'level': 'error'},
{'substr': 'ZeroDivisionError: ', 'level': 'error'},
{'substr': 'command not found', 'level': 'error'},
]
# TODO determine if I've got enough from
# http://www.gnu.org/software/automake/manual/make/Error-Messages.html
MakefileErrorList = [
{'substr': 'No rule to make target ', 'level': 'error'},
{'regex': 'akefile.*was not found\.', 'level': 'error'},
{'regex': 'Stop\.$', 'level': 'error'},
]
# __main__ {{{1
if __name__ == '__main__':
"""TODO: unit tests.
"""
pass
|
Python
| 0.000017
|
@@ -1981,16 +1981,178 @@
rror'%7D,%0A
+ %7B'regex': ':%5Cd+: error:', 'level': 'error'%7D,%0A %7B'regex': 'make%5C%5B%5Cd+%5C%5D: %5C*%5C*%5C* %5C%5B.*%5C%5D Error %5Cd+', 'level': 'error'%7D,%0A %7B'substr': 'Warning: ', 'level': 'warning'%7D,%0A
%5D%0A%0A%0A%0A# _
|
9391d9091ffee2e01e1fb6654ffbb032dfc9b278
|
fix utils.py
|
utils.py
|
utils.py
|
from mxnet import gluon
from mxnet import autograd
from mxnet import nd
from mxnet import image
from mxnet.gluon import nn
import mxnet as mx
import numpy as np
class DataLoader(object):
"""similiar to gluon.data.DataLoader, but faster"""
def __init__(self, X, y, batch_size, shuffle):
self.batch_size = batch_size
self.shuffle = shuffle
self.X = X
self.y = y
def __iter__(self):
n = self.X.shape[0]
if self.shuffle:
idx = np.arange(n)
np.random.shuffle(idx)
self.X = nd.array(self.X.asnumpy()[idx])
self.y = nd.array(self.y.asnumpy()[idx])
for i in range(n//self.batch_size):
yield (self.X[i*self.batch_size:(i+1)*self.batch_size],
self.y[i*self.batch_size:(i+1)*self.batch_size])
def __len__(self):
return self.X.shape[0]//self.batch_size
def load_data_fashion_mnist(batch_size, resize=None):
"""download the fashion mnist dataest and then load into memory"""
def transform_mnist(data, label):
if resize:
# resize to resize x resize
n = data.shape[0]
new_data = nd.zeros((n, resize, resize, data.shape[3]))
for i in range(n):
new_data[i] = image.imresize(data[i], resize, resize)
data = new_data
# change data from batch x height x weight x channel to batch x channel x height x weight
return nd.transpose(data.astype('float32'), (0,3,1,2))/255, label.astype('float32')
mnist_train = gluon.data.vision.FashionMNIST(
train=True, transform=transform_mnist)[:]
mnist_test = gluon.data.vision.FashionMNIST(
train=False, transform=transform_mnist)[:]
train_data = DataLoader(mnist_train[0], nd.array(mnist_train[1]), batch_size, shuffle=True)
test_data = DataLoader(mnist_test[0], nd.array(mnist_test[1]), batch_size, shuffle=False)
return (train_data, test_data)
def try_gpu():
"""If GPU is available, return mx.gpu(0); else return mx.cpu()"""
try:
ctx = mx.gpu()
_ = nd.array([0], ctx=ctx)
except:
ctx = mx.cpu()
return ctx
def try_all_gpus():
"""Return all available GPUs, or [mx.gpu()] if there is no GPU"""
ctx_list = []
try:
for i in range(16):
ctx = mx.gpu(i)
_ = nd.array([0], ctx=ctx)
ctx_list.append(ctx)
except:
pass
if not ctx_list:
ctx_list = [mx.cpu()]
return ctx_list
def SGD(params, lr):
for param in params:
param[:] = param - lr * param.grad
def accuracy(output, label):
return nd.mean(output.argmax(axis=1)==label).asscalar()
def _get_batch(batch, ctx):
"""return data and label on ctx"""
if isinstance(batch, mx.io.DataBatch):
data = batch.data[0]
label = batch.label[0]
else:
data, label = batch
return (gluon.utils.split_and_load(data, ctx),
gluon.utils.split_and_load(label, ctx),
data.shape[0])
def evaluate_accuracy(data_iterator, net, ctx=[mx.cpu()]):
acc = nd.array([0])
n = 0.
if isinstance(data_iterator, mx.io.MXDataIter):
data_iterator.reset()
for batch in data_iterator:
data, label, batch_size = _get_batch(batch, ctx)
for X, y in zip(data, label):
acc += nd.sum(net(X).argmax(axis=1)==y).copyto(mx.cpu())
acc.wait_to_read() # don't push too many operators into backend
n += batch_size
return acc.asscalar() / n
def train(train_data, test_data, net, loss, trainer, ctx, num_epochs, print_batches=None):
"""Train a network"""
if isinstance(ctx, mx.Context):
ctx = [ctx]
for epoch in range(num_epochs):
train_loss, train_acc, n = 0.0, 0.0, 0.0
if isinstance(train_data, mx.io.MXDataIter):
train_data.reset()
for i, batch in enumerate(train_data):
data, label, batch_size = _get_batch(batch, ctx)
losses = []
with autograd.record():
outputs = [net(X) for X in data]
losses = [loss(yhat, y) for yhat, y in zip(outputs, label)]
for l in losses:
l.backward()
train_acc += sum([(yhat.argmax(axis=1)==y).sum().asscalar()
for yhat, y in zip(outputs, label)])
train_loss += sum([l.sum().asscalar() for l in losses])
trainer.step(batch_size)
n += batch_size
if print_batches and (i+1) % print_batches == 0:
print("Batch %d. Loss: %f, Train acc %f" % (
n, train_loss/n, train_acc/n
))
test_acc = evaluate_accuracy(test_data, net, ctx)
print("Epoch %d. Loss: %f, Train acc %f, Test acc %f" % (
epoch, train_loss/n, train_acc/n, test_acc
))
class Residual(nn.HybridBlock):
def __init__(self, channels, same_shape=True, **kwargs):
super(Residual, self).__init__(**kwargs)
self.same_shape = same_shape
with self.name_scope():
strides = 1 if same_shape else 2
self.conv1 = nn.Conv2D(channels, kernel_size=3, padding=1,
strides=strides)
self.bn1 = nn.BatchNorm()
self.conv2 = nn.Conv2D(channels, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm()
if not same_shape:
self.conv3 = nn.Conv2D(channels, kernel_size=1,
strides=strides)
def hybrid_forward(self, F, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
if not self.same_shape:
x = self.conv3(x)
return F.relu(out + x)
def resnet18_28(num_classes):
net = nn.HybridSequential()
with net.name_scope():
net.add(
nn.BatchNorm(),
nn.Conv2D(64, kernel_size=3, strides=1),
nn.MaxPool2D(pool_size=3, strides=2),
Residual(64),
Residual(64),
Residual(128, same_shape=False),
Residual(128),
Residual(256, same_shape=False),
Residual(256),
nn.AvgPool2D(pool_size=3),
nn.Dense(num_classes)
)
return net
|
Python
| 0.000006
|
@@ -3086,16 +3086,72 @@
pu()%5D):%0A
+ if isinstance(ctx, mx.Context):%0A ctx = %5Bctx%5D%0A
acc
|
9a986e0c1b0cebc494a709d9c12eca7f69b9fd41
|
Fix divide by zero issue when timestamp start and end are the same
|
libmproxy/console/common.py
|
libmproxy/console/common.py
|
import urwid
import urwid.util
from .. import utils, flow
VIEW_LIST = 0
VIEW_FLOW = 1
VIEW_FLOW_REQUEST = 0
VIEW_FLOW_RESPONSE = 1
def highlight_key(s, k):
l = []
parts = s.split(k, 1)
if parts[0]:
l.append(("text", parts[0]))
l.append(("key", k))
if parts[1]:
l.append(("text", parts[1]))
return l
KEY_MAX = 30
def format_keyvals(lst, key="key", val="text", indent=0):
"""
Format a list of (key, value) tuples.
If key is None, it's treated specially:
- We assume a sub-value, and add an extra indent.
- The value is treated as a pre-formatted list of directives.
"""
ret = []
if lst:
maxk = min(max(len(i[0]) for i in lst if i and i[0]), KEY_MAX)
for i, kv in enumerate(lst):
if kv is None:
ret.append(urwid.Text(""))
else:
cols = []
# This cumbersome construction process is here for a reason:
# Urwid < 1.0 barfs if given a fixed size column of size zero.
if indent:
cols.append(("fixed", indent, urwid.Text("")))
cols.extend([
(
"fixed",
maxk,
urwid.Text([(key, kv[0] or "")])
),
kv[1] if isinstance(kv[1], urwid.Widget) else urwid.Text([(val, kv[1])])
])
ret.append(urwid.Columns(cols, dividechars = 2))
return ret
def shortcuts(k):
if k == " ":
k = "page down"
elif k == "j":
k = "down"
elif k == "k":
k = "up"
return k
def fcol(s, attr):
s = unicode(s)
return (
"fixed",
len(s),
urwid.Text(
[
(attr, s)
]
)
)
if urwid.util.detected_encoding:
SYMBOL_REPLAY = u"\u21ba"
SYMBOL_RETURN = u"\u2190"
else:
SYMBOL_REPLAY = u"[r]"
SYMBOL_RETURN = u"<-"
def raw_format_flow(f, focus, extended, padding):
f = dict(f)
pile = []
req = []
if extended:
req.append(
fcol(
utils.format_timestamp(f["req_timestamp"]),
"highlight"
)
)
else:
req.append(fcol(">>" if focus else " ", "focus"))
if f["req_is_replay"]:
req.append(fcol(SYMBOL_REPLAY, "replay"))
req.append(fcol(f["req_method"], "method"))
preamble = sum(i[1] for i in req) + len(req) -1
if f["intercepting"] and not f["req_acked"]:
uc = "intercept"
elif f["resp_code"] or f["err_msg"]:
uc = "text"
else:
uc = "title"
req.append(
urwid.Text([(uc, f["req_url"])])
)
pile.append(urwid.Columns(req, dividechars=1))
resp = []
resp.append(
("fixed", preamble, urwid.Text(""))
)
if f["resp_code"]:
codes = {
2: "code_200",
3: "code_300",
4: "code_400",
5: "code_500",
}
ccol = codes.get(f["resp_code"]/100, "code_other")
resp.append(fcol(SYMBOL_RETURN, ccol))
if f["resp_is_replay"]:
resp.append(fcol(SYMBOL_REPLAY, "replay"))
resp.append(fcol(f["resp_code"], ccol))
if f["intercepting"] and f["resp_code"] and not f["resp_acked"]:
rc = "intercept"
else:
rc = "text"
if f["resp_ctype"]:
resp.append(fcol(f["resp_ctype"], rc))
resp.append(fcol(f["resp_clen"], rc))
resp.append(fcol(f["resp_rate"], rc))
elif f["err_msg"]:
resp.append(fcol(SYMBOL_RETURN, "error"))
resp.append(
urwid.Text([
(
"error",
f["err_msg"]
)
])
)
pile.append(urwid.Columns(resp, dividechars=1))
return urwid.Pile(pile)
class FlowCache:
@utils.LRUCache(200)
def format_flow(self, *args):
return raw_format_flow(*args)
flowcache = FlowCache()
def format_flow(f, focus, extended=False, hostheader=False, padding=2):
d = dict(
intercepting = f.intercepting,
req_timestamp = f.request.timestamp_start,
req_is_replay = f.request.is_replay(),
req_method = f.request.method,
req_acked = f.request.reply.acked,
req_url = f.request.get_url(hostheader=hostheader),
err_msg = f.error.msg if f.error else None,
resp_code = f.response.code if f.response else None,
)
if f.response:
if f.response.content:
contentdesc = utils.pretty_size(len(f.response.content))
elif f.response.content == flow.CONTENT_MISSING:
contentdesc = "[content missing]"
else:
contentdesc = "[no content]"
delta = f.response.timestamp_end - f.response.timestamp_start
size = len(f.response.content) + f.response.get_header_size()
rate = utils.pretty_size(size / delta)
d.update(dict(
resp_code = f.response.code,
resp_is_replay = f.response.is_replay(),
resp_acked = f.response.reply.acked,
resp_clen = contentdesc,
resp_rate = "{0}/s".format(rate),
))
t = f.response.headers["content-type"]
if t:
d["resp_ctype"] = t[0].split(";")[0]
else:
d["resp_ctype"] = ""
return flowcache.format_flow(tuple(sorted(d.items())), focus, extended, padding)
def int_version(v):
SIG = 3
v = urwid.__version__.split("-")[0].split(".")
x = 0
for i in range(min(SIG, len(v))):
x += int(v[i]) * 10**(SIG-i)
return x
# We have to do this to be portable over 0.9.8 and 0.9.9 If compatibility
# becomes a pain to maintain, we'll just mandate 0.9.9 or newer.
class WWrap(urwid.WidgetWrap):
if int_version(urwid.__version__) >= 990:
def set_w(self, x):
self._w = x
def get_w(self):
return self._w
w = property(get_w, set_w)
|
Python
| 0.000018
|
@@ -5015,22 +5015,47 @@
e(size /
+ (
delta
+ if delta %3E 0 else 1 )
)%0A%0A
|
e6a0e0c6a73c96b55784b3b567a1564127a86db1
|
added rand gauss util
|
utils.py
|
utils.py
|
#!/usr/bin/python
import os, json
from parameters import Conf, Knows, Settings, Controller, isKnows, isConf, isSettings, isButton
from smbool import SMBool
class ParamsLoader(object):
@staticmethod
def factory(params):
# can be a json, a python file or a dict with the parameters
if type(params) is str:
ext = os.path.splitext(params)
if ext[1].lower() == '.json':
return ParamsLoaderJson(params)
else:
print("wrong parameters file type: {}".format(ext[1]))
sys.exit(-1)
elif type(params) is dict:
return ParamsLoaderDict(params)
else:
print("wrong parameters input, is neither a string nor a json file name: {}".format(params))
sys.exit(-1)
def __init__(self):
if 'Knows' not in self.params:
self.params['Knows'] = {}
if 'Conf' not in self.params:
self.params['Conf'] = {}
if 'Settings' not in self.params:
self.params['Settings'] = {}
if 'Controller' not in self.params:
self.params['Controller'] = {}
def load(self):
# update the parameters in the parameters classes: Conf, Knows, Settings
# Conf
for param in self.params['Conf']:
if isConf(param):
setattr(Conf, param, self.params['Conf'][param])
# Knows
for param in self.params['Knows']:
if isKnows(param) and hasattr(Knows, param):
setattr(Knows, param, SMBool(self.params['Knows'][param][0],
self.params['Knows'][param][1],
['{}'.format(param)]))
# Settings
## hard rooms
for hardRoom in ['X-Ray', 'Gauntlet']:
if hardRoom in self.params['Settings']:
Settings.hardRooms[hardRoom] = Settings.hardRoomsPresets[hardRoom][self.params['Settings'][hardRoom]]
## bosses
for boss in ['Kraid', 'Phantoon', 'Draygon', 'Ridley', 'MotherBrain']:
if boss in self.params['Settings']:
Settings.bossesDifficulty[boss] = Settings.bossesDifficultyPresets[boss][self.params['Settings'][boss]]
## hellruns
for hellRun in ['Ice', 'MainUpperNorfair', 'LowerNorfair']:
if hellRun in self.params['Settings']:
Settings.hellRuns[hellRun] = Settings.hellRunPresets[hellRun][self.params['Settings'][hellRun]]
# Controller
for button in self.params['Controller']:
if isButton(button):
setattr(Controller, button, self.params['Controller'][button])
def dump(self, fileName):
with open(fileName, 'w') as jsonFile:
json.dump(self.params, jsonFile)
def printToScreen(self):
print("self.params: {}".format(self.params))
print("loaded knows: ")
for knows in Knows.__dict__:
if isKnows(knows):
print("{}: {}".format(knows, Knows.__dict__[knows]))
print("loaded settings:")
for setting in Settings.__dict__:
if isSettings(setting):
print("{}: {}".format(setting, Settings.__dict__[setting]))
print("loaded conf:")
for conf in Conf.__dict__:
if isConf(conf):
print("{}: {}".format(conf, Conf.__dict__[conf]))
print("loaded controller:")
for button in Controller.__dict__:
if isButton(button):
print("{}: {}".format(button, Controller.__dict__[button]))
class ParamsLoaderJson(ParamsLoader):
# when called from the test suite
def __init__(self, jsonFileName):
with open(jsonFileName) as jsonFile:
self.params = json.load(jsonFile)
super(ParamsLoaderJson, self).__init__()
class ParamsLoaderDict(ParamsLoader):
# when called from the website
def __init__(self, params):
self.params = params
super(ParamsLoaderDict, self).__init__()
|
Python
| 0.999711
|
@@ -27,16 +27,24 @@
os, json
+, random
%0Afrom pa
@@ -159,16 +159,283 @@
SMBool%0A%0A
+# gauss random in %5B0, r%5D range%0A# the higher the slope, the less probable extreme values are%0Adef randGaussBounds(r, slope=8):%0A r = float(r)%0A n = int(round(random.gauss(r/2, r/slope), 0))%0A if n %3C 0:%0A n = 0%0A if n %3E r:%0A n = int(r)%0A return n%0A%0A
class Pa
|
c967f59da33dec46ccbe73d7e7878e01715da236
|
Add docstrings and comments to video module
|
video.py
|
video.py
|
import graphics
class VideoController():
def power_on(self):
print("VideoController.power_on()")
self._create_terminal_window()
def _create_terminal_window(self):
win = graphics.GraphWin("RichEmu86", 890, 408)
win.setBackground("black")
s = "RichEmu86 " * 8
i = 0
x = 446
y = 12
height = 16
fontSize = 14
for i in range(0, 25):
t = graphics.Text(graphics.Point(x, y), s)
t.setSize(fontSize)
t.setFace("courier")
t.setTextColor("white")
t.draw(win)
y = y + height
win.getMouse()
win.close()
|
Python
| 0
|
@@ -40,32 +40,142 @@
():%0A
-%0A def power_on(self):
+ %22%22%22Represents a computer system's video controller.%22%22%22%0A %0A def power_on(self):%0A %22%22%22Powers on this video controller.%22%22%22
%0A
@@ -286,32 +286,106 @@
l_window(self):%0A
+ # Creates the terminal window using John Zelle's graphics module.%0A
win = gr
|
c705ef83607e09b2ed6e2b8d14aa6a6a7f9f57ea
|
Update __init__.py
|
newspaperdemo/__init__.py
|
newspaperdemo/__init__.py
|
from flask import Flask, request, render_template, redirect, url_for, json
from newspaper import Article
from xml.etree import ElementTree
app = Flask(__name__)
# Debug logging
import logging
import sys
# Defaults to stdout
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
try:
log.info('Logging to console')
except:
_, ex, _ = sys.exc_info()
log.error(ex.message)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/scrape')
def show_article():
url_to_clean = request.args.get('url_to_clean')
if not url_to_clean:
a= {
'authors': '',
'title': '',
'text': '',
'keywords': '',
'summary': ''
}
response = app.response_class(
response=json.dumps(a),
status=200,
mimetype='application/json'
)
return response
article = Article(url_to_clean)
article.download()
article.parse()
try:
html_string = ElementTree.tostring(article.clean_top_node)
except:
html_string = "Error converting html to string."
try:
article.nlp()
except:
log.error("Couldn't process with NLP")
a = {
'authors': str(', '.join(article.authors)),
'title': article.title,
'text': article.text,
'keywords': str(', '.join(article.keywords)),
'summary': article.summary
}
response = app.response_class(
response=json.dumps(a),
status=200,
mimetype='application/json'
)
return response
|
Python
| 0.000072
|
@@ -65,15 +65,9 @@
_for
-, json
%0A
+
from
@@ -482,14 +482,21 @@
e('/
-scrape
+articles/show
')%0Ad
@@ -603,281 +603,41 @@
-a= %7B%0A%09 'authors': '', %0A%09 'title': '',%0A%09 'text': '',%0A%09 'keywords': '',%0A%09 'summary': ''%0A%09 %7D%0A%09response = app.response_class(%0A response=json.dumps(a),%0A status=200,%0A mimetype='application/json'%0A )%0A return response %0A
+return redirect(url_for('index'))
%0A%0A
@@ -953,17 +953,56 @@
a = %7B%0A
-%09
+ 'html': html_string, %0A
'author
@@ -1039,17 +1039,24 @@
ors)), %0A
-%09
+
'title'
@@ -1072,17 +1072,24 @@
.title,%0A
-%09
+
'text':
@@ -1103,17 +1103,116 @@
e.text,%0A
-%09
+ 'top_image': article.top_image,%0A 'videos': str(', '.join(article.movies)),%0A
'keywor
@@ -1250,17 +1250,24 @@
ords)),%0A
-%09
+
'summar
@@ -1290,158 +1290,93 @@
ary%0A
-%09 %7D%0A response = app.response_class(%0A
+
+%7D%0A
re
-sponse=json.dumps(a),%0A status=200,%0A mimetype='application/json'%0A )%0A return response
+turn render_template('article/index.html', article=a, url=url_to_clean)
%0A
|
90c4340dcc578b8ea3532b46058772c4ddda56c0
|
add missing import
|
views.py
|
views.py
|
# -*- encoding: utf-8 ---------------------------------------------------------
from operator import attrgetter
from serve import *
from models import *
@app.route('/')
@app.route('/index.html')
def index():
datasets = Dataset.query.all()
return render_template('index.html', datasets=datasets)
@app.route('/dataset/<int:dataset_id>/topic_model/<int:topic_model_id>')
def topic_model(dataset_id, topic_model_id):
dataset = Dataset.query.get(dataset_id)
topic_model = TopicModel.query.get((dataset_id, topic_model_id))
top_ten_topics = sorted(topic_model.topics,
key=attrgetter('probability'), reverse=True)[:10]
return render_template('topic_model.html',
dataset=dataset,
topic_model=topic_model,
top_ten_topics=top_ten_topics)
@app.route('/dataset/<int:dataset_id>/topic_model/<int:topic_model_id>'
'/browse_topics.html')
def browse_topics(dataset_id, topic_model_id):
dataset = Dataset.query.get(dataset_id)
topic_model = TopicModel.query.get((dataset_id, topic_model_id))
topics = topic_model.topics
return render_template('browse_topics.html',
dataset=dataset,
topic_model=topic_model,
topics=topics)
@app.route('/dataset/<int:dataset_id>/topic_model/<int:topic_model_id>'
'/topic/<int:topic_id>')
def topic(dataset_id, topic_model_id, topic_id):
return render_template('topic.html',
dataset=Dataset.query.get(dataset_id),
topic_model=TopicModel.query.get((dataset_id,
topic_model_id)),
topic=Topic.query.get((dataset_id,
topic_model_id, topic_id)))
@app.route('/dataset/<int:dataset_id>/topic_model/<int:topic_model_id>/'
'term/<int:term_id>')
def term(dataset_id, topic_model_id, term_id):
term = Term.query.filter_by(dataset_id=dataset_id, id=term_id).all()[0]
topic_terms = [topic_term for topic_term in term.topic_terms
if topic_term.topic_model_id == topic_model_id]
similar_terms_l = [similar_term for similar_term in term.similar_terms_l
if similar_term.topic_model_id == topic_model_id]
return render_template('term.html',
dataset=Dataset.query.get(dataset_id),
topic_model=TopicModel.query.get((dataset_id,
topic_model_id)),
term=term,
topic_terms=topic_terms,
similar_terms_l=similar_terms_l)
@app.route('/dataset/<int:dataset_id>/topic_model/<int:topic_model_id>/'
'document/<int:document_id>')
def document(dataset_id, topic_model_id, document_id):
document = Document.query.get((document_id, dataset_id))
document_topics = [document_topic for document_topic
in document.document_topics if
document_topic.topic_model_id == topic_model_id]
document_similarities = [similarity for similarity in
document.similar_documents_l if
similarity.topic_model_id == topic_model_id]
return render_template('document.html',
dataset=Dataset.query.get(dataset_id),
topic_model=TopicModel.query.get((dataset_id,
topic_model_id)),
document=document,
document_topics=document_topics,
document_similarities=document_similarities)
|
Python
| 0.000042
|
@@ -75,16 +75,50 @@
-----%0A%0A%0A
+from flask import render_template%0A
from ope
|
0523b4f72344999c675f63b07d83bd509adcef2a
|
Check return code
|
t/py/script_tests.py
|
t/py/script_tests.py
|
'''
Created on Jan 30, 2015
@author: gaprice@lbl.gov
'''
from __future__ import print_function
import os
import inspect
from biokbase.Transform import script_utils
from biokbase.Transform.handler_utils import PlugIns
from biokbase.AbstractHandle.Client import AbstractHandle
from bzrlib.config import ConfigObj
import json
from biokbase.workspace.client import Workspace
import random
import sys
import subprocess
from deep_eq import deep_eq
KB_TOKEN = 'KB_AUTH_TOKEN'
TEST_CFG_FILE = 'test.cfg'
FILE_LOC = os.path.split(__file__)[0]
sys.path.append(os.path.join(FILE_LOC, '../')) # to import demo/setup
# this import is both resolved and used
from demo.setup import TransformVirtualEnv # @UnresolvedImport @UnusedImport
TRANSFORM_LOC = os.path.join(FILE_LOC, '../../')
# maybe this should be configurable...?
PLUGIN_CFG_LOC = os.path.join(TRANSFORM_LOC, 'plugins/configs')
class Test_Scripts(object):
@classmethod
def setup_class(cls):
cls.token = os.environ.get(KB_TOKEN)
if not cls.token:
raise ValueError('No token found in environment variable ' +
KB_TOKEN)
cls.plugins_cfg = PlugIns(PLUGIN_CFG_LOC)
cfg = ConfigObj(TEST_CFG_FILE)
for url in ['ws_url', 'shock_url', 'handle_url', 'ujs_url']:
setattr(cls, url, cfg.get(url))
tve = TransformVirtualEnv(FILE_LOC, 'venv', TRANSFORM_LOC,
keep_current_venv=False)
tve.activate_for_current_py_process()
def upload_file_to_shock_and_get_handle(self, test_file):
node_id = script_utils.upload_file_to_shock(
shock_service_url=self.shock_url,
filePath=test_file,
ssl_verify=False,
token=self.token)['id']
handle = AbstractHandle(self.handle_url, token=self.token)
handle_id = handle.persist_handle({'id': node_id,
'type': 'shock',
'url': self.shock_url
})
return node_id, handle_id
def create_random_workspace(self, prefix):
ws = Workspace(self.ws_url, token=self.token)
ws_name = prefix + '_' + str(random.random())[2:]
wsinfo = ws.create_workspace({'workspace': ws_name})
return wsinfo[1]
def run_convert_taskrunner(self, args):
input_args = self.plugins_cfg.get_handler_args("convert", args)
command_list = ['trns_convert_taskrunner.py']
for k in input_args:
command_list.append("--{0}".format(k))
command_list.append("{0}".format(input_args[k]))
task = subprocess.Popen(command_list, stderr=subprocess.PIPE)
return task.communicate()
def test_assyfile_to_cs_basic_ops(self):
this_function_name = sys._getframe().f_code.co_name
src_obj_name = 'foo'
dest_obj_name = 'foo2'
src_type = 'KBaseFile.AssemblyFile'
dest_type = 'KBaseGenomes.ContigSet'
test_file = os.path.join(FILE_LOC, 'test_files/sample.fa')
node_id, handle = self.upload_file_to_shock_and_get_handle(test_file)
test_json = os.path.join(FILE_LOC, 'test_files/AssemblyFile.json')
with open(test_json) as assyjsonfile:
assyjson = json.loads(assyjsonfile.read())
assyjson['assembly_file']['file']['url'] = self.shock_url
assyjson['assembly_file']['file']['id'] = node_id
assyjson['assembly_file']['file']['hid'] = handle
src_ws = self.create_random_workspace(this_function_name)
dest_ws = self.create_random_workspace(this_function_name)
ws = Workspace(self.ws_url, token=self.token)
objdata = ws.save_objects(
{'workspace': src_ws,
'objects': [{'name': src_obj_name,
'type': src_type,
'data': assyjson}]
})[0]
ref = str(objdata[6]) + '/' + str(objdata[0]) + '/' + str(objdata[4])
args = {'source_kbase_type': src_type,
'destination_kbase_type': dest_type,
'source_workspace_name': src_ws,
'destination_workspace_name': dest_ws,
'source_object_name': src_obj_name,
'destination_object_name': dest_obj_name,
'workspace_service_url': self.ws_url,
'ujs_service_url': self.ujs_url,
'working_directory': src_ws}
stdo, stde = self.run_convert_taskrunner(args)
if stdo:
raise TestException('Got unexpected data in standard out:\n' +
stdo)
if 'ERROR' in stde:
raise TestException('Error reported in stderr:\n' + stde)
if 'INFO - Conversion completed.' not in stde:
raise TestException('Script did not report as completed:\n' + stde)
newobj = ws.get_objects([{'workspace': dest_ws,
'name': dest_obj_name}])[0]
prov = newobj['provenance'][0]
assert prov['input_ws_objects'] == [ref]
assert prov['resolved_ws_objects'] == [ref]
assert prov['script'] ==\
'trns_transform_KBaseFile_AssemblyFile_to_KBaseGenomes_ContigSet'
assert prov['script_ver'] == '0.0.1'
with open(os.path.join(FILE_LOC, 'test_files/ContigSetOut.json')) as f:
expected = json.loads(f.read())
expected['fasta_ref'] = node_id
deep_eq(expected, newobj['data'], _assert=True)
class TestException(Exception):
pass
def main():
# use nosetests to run these tests, this is a hack to get them to run
# while testing the tests
Test_Scripts.setup_class()
ts = Test_Scripts()
methods = inspect.getmembers(ts, predicate=inspect.ismethod)
for meth in methods:
if meth[0].startswith('test_'):
print("\nRunning " + meth[0])
meth[1]()
if __name__ == '__main__':
main()
|
Python
| 0.00001
|
@@ -2735,22 +2735,24 @@
-return
+so, se =
task.co
@@ -2762,16 +2762,55 @@
nicate()
+%0A return so, se, task.returncode
%0A%0A de
@@ -4535,16 +4535,22 @@
do, stde
+, code
= self.
@@ -4940,16 +4940,110 @@
+ stde)
+%0A if code != 0:%0A raise TestException('Got non zero return code from script')
%0A%0A
|
844270b6eee2eabfaa1b43c73ed8ffcab833586f
|
Bump to version 0.19.4
|
tabutils/__init__.py
|
tabutils/__init__.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
tabutils
~~~~~~~~
Provides methods for reading and processing data from tabular formatted files
Attributes:
CURRENCIES [tuple(unicode)]: Currency symbols to remove from decimal
strings.
ENCODING (str): Default file encoding.
DEFAULT_DATETIME (obj): Default datetime object
"""
from __future__ import (
absolute_import, division, print_function, with_statement,
unicode_literals)
from datetime import datetime as dt
__title__ = 'tabutils'
__package_name__ = 'tabutils'
__author__ = 'Reuben Cummings'
__description__ = 'tabular data utility methods'
__email__ = 'reubano@gmail.com'
__version__ = '0.19.3'
__license__ = 'MIT'
__copyright__ = 'Copyright 2015 Reuben Cummings'
CURRENCIES = ('$', '£', '€')
ENCODING = 'utf-8'
DEFAULT_DATETIME = dt(9999, 12, 31, 0, 0, 0)
|
Python
| 0
|
@@ -704,17 +704,17 @@
= '0.19.
-3
+4
'%0A__lice
|
c7665ba1988215fd27f0eb7f547a34104d8b921f
|
add MA
|
nsnqtlib/tkpi/momentum.py
|
nsnqtlib/tkpi/momentum.py
|
import numpy as np
#MACD related indicators
#Moving average: there will be unstable period in the beginning
#input: list of close price
def EMA(close=[], timeperiod=10):
ema = []
current = close[0]
for i in close:
current = (current*(timeperiod-1)+ 2*i)/(timeperiod+1)
ema.append(current)
return ema
def DIF(close=[], fastperiod=12, slowperiod=26):
dif = []
s_ema = EMA(close, slowperiod)
f_ema = EMA(close, fastperiod)
for i in range(len(close)):
dif.append(f_ema[i]-s_ema[i])
return dif
def DEA(close=[], fastperiod=12, slowperiod=26, signalperiod=9):
dif = DIF(close,fastperiod,slowperiod)
return EMA(dif, signalperiod)
def MACD(close=[], fastperiod=12, slowperiod=26, signalperiod=9):
macd = []
dif = DIF(close,fastperiod,slowperiod)
dea = EMA(dif, signalperiod)
for i in range(len(close)):
macd.append(2*(dif[i]-dea[i]))
return macd
# 夏普比率: 平均收益率/收益率标准差
#Sharpe Ratio: Sharpe ratio = Excess return / Standard deviation
#input:
# erp: Portfolio expected return rate
# within fixed timeperiod (e.g.yearly/monthly)
# rf: risk-free/expect rate of interest
def sharpe(erp=[], rf=0):
a = np.array(erp)
return (np.mean(a)-rf)/np.std(a,ddof=1)
#最大回撤率
#Max draw down
#input:
# worth: net worth ratio history
# period: To be added....
# >0 means short-term MADD within input period -> worth list
def MDD(worth=[],period=0):
current_mdd = mdd = 0
for i in range(len(worth)):
if period>0 and i>period:
j = i-period
else:
j = 0
if i > 0:
current_mdd = max(worth[int(j):int(i)])-worth[i]
if mdd < current_mdd:
mdd = current_mdd
return mdd
#To be added:
#DMI related indicators
#KDJ
#RSI
#BIAS
|
Python
| 0.999026
|
@@ -14,16 +14,241 @@
y as np%0A
+import pandas as pd%0A%0A%0A#Moving average%0Adef MA(data=%5B%5D, timeperiod=10):%0A ma = %5B%5D%0A ma_a = pd.DataFrame(data,columns=%5B'MA'%5D).rolling(window=timeperiod).mean()%0A for i in ma_a%5B'MA'%5D:%0A ma.append(i)%0A return ma%0A
%0A#MACD r
@@ -2110,14 +2110,126 @@
%0A#BIAS%0A%0A
+if __name__ == '__main__':%0A test = %5B11.9,10.8,20.0,9.1,7.9,4.1,31.2,16,29.9,15.1,11,12%5D%0A print(MA(test,3))
%0A %0A
|
1045f8a2cedf86a401a2868f4092f5d416e8f3e9
|
Bump to 0.26
|
octave_kernel/__init__.py
|
octave_kernel/__init__.py
|
"""An Octave kernel for Jupyter"""
__version__ = '0.25.1'
|
Python
| 0.000005
|
@@ -51,9 +51,9 @@
'0.2
-5.1
+6.0
'%0A
|
389f892fcd3903b936226a2e464d26a0df359e7b
|
Determine what wheels should be built, don't attempt to build them if they've been built, don't continue if they fail to build.
|
wheel.py
|
wheel.py
|
#!/usr/bin/env python
import os
import sys
import urllib2
import argparse
import subprocess
from os.path import abspath, dirname, join, basename
import yaml
WHEELS_DIST_DIR = abspath(join(dirname(__file__), 'wheels', 'dist'))
WHEELS_BUILD_DIR = abspath(join(dirname(__file__), 'wheels', 'build'))
WHEELS_YML = join(WHEELS_BUILD_DIR, 'wheels.yml')
def main():
parser = argparse.ArgumentParser(description='Build wheels in Docker')
parser.add_argument('--image', '-i', help='Build only on this image')
parser.add_argument('package', help='Package name (in wheels.yml)')
args = parser.parse_args()
with open(WHEELS_YML, 'r') as handle:
wheels = yaml.load(handle)
assert args.package in wheels['packages'], 'Not in %s: %s' % (WHEELS_YML, args.package)
if args.image is not None:
images = [args.image]
else:
try:
imageset = wheels['packages'][args.package]['imageset']
images = wheels['imagesets'][imageset]
except:
images = wheels['imagesets']['default']
src_cache = join(WHEELS_BUILD_DIR, 'cache')
if not os.path.exists(src_cache):
os.makedirs(src_cache)
src_url = wheels['packages'][args.package]['src']
tgz = join(src_cache, basename(src_url))
if not os.path.exists(tgz):
with open(tgz, 'w') as handle:
r = urllib2.urlopen(src_url, None, 15)
handle.write(r.read())
for image in images:
try:
buildpy = wheels['images'][image]['buildpy']
except:
buildpy = 'python'
cmd = [ 'docker', 'run',
'--volume=%s/:/host/dist/' % WHEELS_DIST_DIR,
'--volume=%s/:/host/build/:ro' % WHEELS_BUILD_DIR,
image, buildpy, '-u', '/host/build/build.py', args.package, image ]
print 'Running docker:', ' '.join(cmd)
subprocess.check_call(cmd)
if __name__ == '__main__':
main()
|
Python
| 0.001012
|
@@ -138,16 +138,24 @@
basename
+, exists
%0A%0Aimport
@@ -787,24 +787,83 @@
s.package)%0A%0A
+ version = wheels%5B'packages'%5D%5Bargs.package%5D%5B'version'%5D%0A%0A
if args.
@@ -1177,32 +1177,24 @@
%0A if not
-os.path.
exists(src_c
@@ -1347,16 +1347,8 @@
not
-os.path.
exis
@@ -1490,28 +1490,1491 @@
-for image in images:
+plat_cache = join(src_cache, '__platform_cache.json')%0A if not exists(plat_cache):%0A open(plat_cache, 'w').write(yaml.dump(%7B%7D))%0A platforms = yaml.safe_load(open(plat_cache).read())%0A%0A expected = %7B%7D%0A%0A for image in images:%0A plat_name = wheels%5B'images'%5D.get(image, %7B%7D).get('plat_name', None)%0A if plat_name is None:%0A if image not in platforms:%0A print 'Caching platform tag for image: %25s' %25 image%0A cmd = %5B 'docker', 'run', image, '/python/2.6-ucs2/bin/python',%0A '-c', 'import wheel.pep425tags; print '%0A 'wheel.pep425tags.get_platforms(major_only=True)%5B0%5D' %5D%0A platforms%5Bimage%5D = subprocess.check_output(cmd).strip()%0A print 'Platform tag for %25s is: %25s' %25 (image, platforms%5Bimage%5D)%0A open(plat_cache, 'w').write(yaml.dump(platforms))%0A plat_name = platforms%5Bimage%5D%0A expected%5Bimage%5D = %5B%5D%0A for py in ('26', '27'):%0A for abi_flags in ('m', 'mu'):%0A whl = '%25s-%25s-cp%25s-cp%25s%25s-%25s.whl' %25 (args.package, version, py, py, abi_flags, plat_name)%0A expected%5Bimage%5D.append(join(WHEELS_DIST_DIR, args.package, whl))%0A%0A for image in images:%0A for f in expected%5Bimage%5D:%0A if not exists(f):%0A break%0A print '%25s exists...' %25 f%0A else:%0A print 'Skipping build on %25s because all expected wheels exist' %25 image%0A continue
%0A
@@ -3415,16 +3415,321 @@
ll(cmd)%0A
+ missing = %5B%5D%0A for f in expected%5Bimage%5D:%0A if not exists(f):%0A missing.append(f)%0A if missing:%0A print 'The following expected wheels were not found after the attempted build on %25s:' %25 image%0A print '%5Cn'.join(missing)%0A sys.exit(1)
%0A%0Aif __n
|
bcb3ecebfec6c38a57e82b2c149d045546fbcc72
|
fix broken test for py26
|
locust/test/test_runners.py
|
locust/test/test_runners.py
|
import unittest
import gevent
from gevent.queue import Queue
from gevent import sleep
from locust.runners import LocalLocustRunner, MasterLocustRunner, SlaveLocustRunner
from locust.core import Locust, task, TaskSet
from locust.rpc import Message
from locust.stats import RequestStats, global_stats
from locust.main import parse_options
from locust.test.testcases import LocustTestCase
from locust import events
def mocked_rpc_server():
class MockedRpcServer(object):
queue = Queue()
outbox = []
def __init__(self, host, port):
pass
@classmethod
def mocked_send(cls, message):
cls.queue.put(message.serialize())
def recv(self):
results = self.queue.get()
return Message.unserialize(results)
def send(self, message):
self.outbox.append(message.serialize())
return MockedRpcServer
class TestMasterRunner(LocustTestCase):
def setUp(self):
global_stats.reset_all()
self._slave_report_event_handlers = [h for h in events.slave_report._handlers]
parser, _, _ = parse_options()
args = [
"--clients", "10",
"--hatch-rate", "10"
]
opts, _ = parser.parse_args(args)
self.options = opts
def tearDown(self):
events.slave_report._handlers = self._slave_report_event_handlers
def test_slave_connect(self):
import mock
class MyTestLocust(Locust):
pass
with mock.patch("locust.rpc.rpc.Server", mocked_rpc_server()) as server:
master = MasterLocustRunner(MyTestLocust, self.options)
server.mocked_send(Message("client_ready", None, "zeh_fake_client1"))
sleep(0)
self.assertEqual(1, len(master.clients))
self.assertTrue("zeh_fake_client1" in master.clients, "Could not find fake client in master instance's clients dict")
server.mocked_send(Message("client_ready", None, "zeh_fake_client2"))
server.mocked_send(Message("client_ready", None, "zeh_fake_client3"))
server.mocked_send(Message("client_ready", None, "zeh_fake_client4"))
sleep(0)
self.assertEqual(4, len(master.clients))
server.mocked_send(Message("quit", None, "zeh_fake_client3"))
sleep(0)
self.assertEqual(3, len(master.clients))
def test_slave_stats_report_median(self):
import mock
class MyTestLocust(Locust):
pass
with mock.patch("locust.rpc.rpc.Server", mocked_rpc_server()) as server:
master = MasterLocustRunner(MyTestLocust, self.options)
server.mocked_send(Message("client_ready", None, "fake_client"))
sleep(0)
master.stats.get("/", "GET").log(100, 23455)
master.stats.get("/", "GET").log(800, 23455)
master.stats.get("/", "GET").log(700, 23455)
data = {"user_count":1}
events.report_to_master.fire(client_id="fake_client", data=data)
master.stats.clear_all()
server.mocked_send(Message("stats", data, "fake_client"))
sleep(0)
s = master.stats.get("/", "GET")
self.assertEqual(700, s.median_response_time)
def test_spawn_zero_locusts(self):
class MyTaskSet(TaskSet):
@task
def my_task(self):
pass
class MyTestLocust(Locust):
task_set = MyTaskSet
min_wait = 100
max_wait = 100
runner = LocalLocustRunner([MyTestLocust], self.options)
timeout = gevent.Timeout(2.0)
timeout.start()
try:
runner.start_hatching(0, 1, wait=True)
runner.greenlet.join()
except gevent.Timeout:
self.fail("Got Timeout exception. A locust seems to have been spawned, even though 0 was specified.")
finally:
timeout.cancel()
def test_spawn_uneven_locusts(self):
"""
Tests that we can accurately spawn a certain number of locusts, even if it's not an
even number of the connected slaves
"""
import mock
class MyTestLocust(Locust):
pass
with mock.patch("locust.rpc.rpc.Server", mocked_rpc_server()) as server:
master = MasterLocustRunner(MyTestLocust, self.options)
for i in range(5):
server.mocked_send(Message("client_ready", None, "fake_client%i" % i))
sleep(0)
master.start_hatching(7, 7)
self.assertEqual(5, len(server.outbox))
num_clients = 0
for msg in server.outbox:
num_clients += Message.unserialize(msg).data["num_clients"]
self.assertEqual(7, num_clients, "Total number of locusts that would have been spawned is not 7")
def test_spawn_fewer_locusts_than_slaves(self):
import mock
class MyTestLocust(Locust):
pass
with mock.patch("locust.rpc.rpc.Server", mocked_rpc_server()) as server:
master = MasterLocustRunner(MyTestLocust, self.options)
for i in range(5):
server.mocked_send(Message("client_ready", None, "fake_client%i" % i))
sleep(0)
master.start_hatching(2, 2)
self.assertEqual(5, len(server.outbox))
num_clients = 0
for msg in server.outbox:
num_clients += Message.unserialize(msg).data["num_clients"]
self.assertEqual(2, num_clients, "Total number of locusts that would have been spawned is not 2")
def test_exception_in_task(self):
class HeyAnException(Exception):
pass
class MyLocust(Locust):
class task_set(TaskSet):
@task
def will_error(self):
raise HeyAnException(":(")
runner = LocalLocustRunner([MyLocust], self.options)
l = MyLocust()
l._catch_exceptions = False
self.assertRaises(HeyAnException, l.run)
self.assertRaises(HeyAnException, l.run)
self.assertEqual(1, len(runner.exceptions))
hash_key, exception = runner.exceptions.popitem()
self.assertIn("traceback", exception)
self.assertIn("HeyAnException" , exception["traceback"])
self.assertEqual(2, exception["count"])
class TestMessageSerializing(unittest.TestCase):
def test_message_serialize(self):
msg = Message("client_ready", None, "my_id")
rebuilt = Message.unserialize(msg.serialize())
self.assertEqual(msg.type, rebuilt.type)
self.assertEqual(msg.data, rebuilt.data)
self.assertEqual(msg.node_id, rebuilt.node_id)
|
Python
| 0.000001
|
@@ -6606,18 +6606,20 @@
f.assert
-In
+True
(%22traceb
@@ -6622,17 +6622,19 @@
aceback%22
-,
+ in
excepti
@@ -6660,10 +6660,12 @@
sert
-In
+True
(%22He
@@ -6678,17 +6678,18 @@
eption%22
-,
+in
excepti
|
7567f63b5bf967a8dc2b370c0deecef41ded3dcd
|
add LoadBalancer API
|
Client.py
|
Client.py
|
#!/usr/bin/env python
# -*- coding:utf-8 -*-
#
#
# Copyright 2012 Netsco Inc.
# Copyright 2012 Minsu Kang
from urllib import quote_plus as quote
from urllib2 import urlopen, HTTPError
from base64 import b64encode
import hmac
import hashlib
import json
import re
UCLOUD_API_KEY = ''
UCLOUD_SECRET = ''
UCLOUD_API_URL = 'https://api.ucloudbiz.olleh.com/server/v1/client/api'
class Client(object):
def __init__(self, api_key=UCLOUD_API_KEY, secret=UCLOUD_SECRET):
self.api_url = UCLOUD_API_URL
self.api_key = api_key
self.secret = secret
def request(self, command, args={}):
if not command:
raise RuntimeError('Command Missing !!')
args['command'] = command
args['response'] = 'json'
args['apiKey'] = self.api_key
query = '&'.join(
'='.join([k, quote(args[k])]) for k in sorted(args.keys()))
signature = b64encode(hmac.new(
self.secret,
msg=query.lower(),
digestmod=hashlib.sha1
).digest())
#-------------------------------------------------------
# reconstruct : command + params + api_key + signature
#-------------------------------------------------------
query = '='.join(["command", quote(args["command"])]) + '&'
args.pop("command")
api_key = '='.join(["apiKey", quote(args["apiKey"])])
args.pop("apiKey")
query += '&'.join(
'='.join([k, quote(args[k])]) for k in sorted(args.keys()))
query += '&' + api_key
query += '&signature=' + quote(signature)
#-------------------------------------------------------
try:
response = urlopen(self.api_url + '?' + query)
except HTTPError as e:
raise RuntimeError("%s" % e)
decoded = json.loads(response.read())
# response top node check
response_header = command.lower() + 'response'
if not response_header in decoded:
if 'errorresponse' in decoded:
raise RuntimeError(
"ERROR: " + decoded['errorresponse']['errortext'])
# try one more thing
response_header = command + 'response'
if not response_header in decoded:
raise RuntimeError("ERROR: Unable to parse the response")
# return the response content
return decoded.get(response_header, "")
""" command line : python Client.py command """
if __name__=="__main__":
import sys
if len(sys.argv) != 2:
print "usage: python Client.py command"
exit(-1)
command = sys.argv[1]
client = Client()
result = client.request(command)
print json.dumps(result, sort_keys=True, indent=4)
|
Python
| 0
|
@@ -312,18 +312,36 @@
_API_URL
+S
=
+ %7B%0A 'server' :
'https:
@@ -387,16 +387,96 @@
ent/api'
+,%0A 'lb' : 'https://api.ucloudbiz.olleh.com/loadbalancer/v1/client/api',%0A%7D
%0A%0Aclass
@@ -513,16 +513,37 @@
__(self,
+ api_type = 'server',
api_key
@@ -619,16 +619,27 @@
_API_URL
+S%5Bapi_type%5D
%0A
|
dcc89b0d4757a4d2e0a541172ce3ded1f7e92014
|
Create CDAP's HDFS directory
|
package/scripts/master.py
|
package/scripts/master.py
|
import sys
import ambari_helpers as helpers
from resource_management import *
class Master(Script):
def install(self, env):
print 'Install the CDAP Master';
import params
self.configure(env)
# Add repository file
helpers.add_repo(params.files_dir + params.repo_file, params.os_repo_dir)
# Install any global packages
self.install_packages(env)
# Install package
helpers.package('cdap-master')
def start(self, env):
print 'Start the CDAP Master';
import params
self.configure(env)
Execute('service cdap-master start')
def stop(self, env):
print 'Stop the CDAP Master';
import params
self.configure(env)
Execute('service cdap-master stop')
def status(self, env):
print 'Status of the CDAP Master';
import params
self.configure(env)
Execute('service cdap-master status')
def configure(self, env):
print 'Configure the CDAP Master';
if __name__ == "__main__":
Master().execute()
|
Python
| 0
|
@@ -519,32 +519,98 @@
.configure(env)%0A
+ create_hdfs_dir(params.hdfs_namespace, params.hdfs_user, 755)%0A
Execute('ser
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.