commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
f06d8ccbd066c432a91ffc127d347277253f95c3 | Add pynbs module | pynbs.py | pynbs.py |
from struct import Struct
from collections import namedtuple
__all__ = ['read', 'File', 'Header', 'Note', 'Layer', 'Instrument']
BYTE = Struct('<b')
SHORT = Struct('<h')
INT = Struct('<i')
Note = namedtuple('Note', ['tick', 'layer', 'instrument', 'key'])
Layer = namedtuple('Layer', ['id', 'name', 'volume'])
Instrument = namedtuple('Instrument', ['id', 'name', 'file', 'pitch', 'key'])
def read(filename):
return File(open(filename, 'rb'))
class Header(object):
def __init__(self, headers):
for key, value in headers.items():
setattr(self, key, value)
class File(object):
def __init__(self, buff):
self.filename = buff.name
self._buffer = buff
self.header = Header(self.parse_header())
self.notes = list(self.parse_notes())
self.layers = list(self.parse_layers())
self.instruments = list(self.parse_instruments())
self._buffer.close()
def read_numeric(self, fmt):
return fmt.unpack(self._buffer.read(fmt.size))[0]
def read_string(self):
length = self.read_numeric(INT)
return self._buffer.read(length).decode()
def _jump(self):
value = -1
while True:
jump = self.read_numeric(SHORT)
if not jump:
break
value += jump
yield value
def parse_header(self):
return {
'song_length': self.read_numeric(SHORT),
'song_layers': self.read_numeric(SHORT),
'song_name': self.read_string(),
'song_author': self.read_string(),
'original_author': self.read_string(),
'description': self.read_string(),
'tempo': self.read_numeric(SHORT) / 100.0,
'auto_save': self.read_numeric(BYTE) == 1,
'auto_save_duration': self.read_numeric(BYTE),
'time_signature': '{}/4'.format(self.read_numeric(BYTE)),
'minutes_spent': self.read_numeric(INT),
'left_clicks': self.read_numeric(INT),
'right_clicks': self.read_numeric(INT),
'blocks_added': self.read_numeric(INT),
'blocks_removed': self.read_numeric(INT),
'song_origin': self.read_string(),
}
def parse_notes(self):
for current_tick in self._jump():
for current_layer in self._jump():
yield Note(current_tick, current_layer,
self.read_numeric(BYTE), self.read_numeric(BYTE))
def parse_layers(self):
return (Layer(i, self.read_string(), self.read_numeric(BYTE))
for i in range(self.header.song_layers))
def parse_instruments(self):
for i in range(self.read_numeric(BYTE)):
yield Instrument(i, self.read_string(), self.read_string(),
self.read_numeric(BYTE), self.read_numeric(BYTE))
| Python | 0 | |
6050610a5cf34bc55a05fa3a8d8a38f6e8e743af | Add test_ko.py for "ko" locale (#9) | tests/localization_tests/test_ko.py | tests/localization_tests/test_ko.py | # -*- coding: utf-8 -*-
from pendulum import Pendulum
from .. import AbstractTestCase
from . import AbstractLocalizationTestCase
class KoTest(AbstractLocalizationTestCase, AbstractTestCase):
locale = 'ko'
def diff_for_humans(self):
with self.wrap_with_test_now():
d = Pendulum.now().sub_second()
self.assertEqual('1 ์ด ์ ', d.diff_for_humans())
d = Pendulum.now().sub_seconds(2)
self.assertEqual('2 ์ด ์ ', d.diff_for_humans())
d = Pendulum.now().sub_minute()
self.assertEqual('1 ๋ถ ์ ', d.diff_for_humans())
d = Pendulum.now().sub_minutes(2)
self.assertEqual('2 ๋ถ ์ ', d.diff_for_humans())
d = Pendulum.now().sub_hour()
self.assertEqual('1 ์๊ฐ ์ ', d.diff_for_humans())
d = Pendulum.now().sub_hours(2)
self.assertEqual('2 ์๊ฐ ์ ', d.diff_for_humans())
d = Pendulum.now().sub_day()
self.assertEqual('1 ์ผ ์ ', d.diff_for_humans())
d = Pendulum.now().sub_days(2)
self.assertEqual('2 ์ผ ์ ', d.diff_for_humans())
d = Pendulum.now().sub_week()
self.assertEqual('1 ์ฃผ์ผ ์ ', d.diff_for_humans())
d = Pendulum.now().sub_weeks(2)
self.assertEqual('2 ์ฃผ์ผ ์ ', d.diff_for_humans())
d = Pendulum.now().sub_month()
self.assertEqual('1 ๊ฐ์ ์ ', d.diff_for_humans())
d = Pendulum.now().sub_months(2)
self.assertEqual('2 ๊ฐ์ ์ ', d.diff_for_humans())
d = Pendulum.now().sub_year()
self.assertEqual('1 ๋
์ ', d.diff_for_humans())
d = Pendulum.now().sub_years(2)
self.assertEqual('2 ๋
์ ', d.diff_for_humans())
d = Pendulum.now().add_second()
self.assertEqual('1 ์ด ํ', d.diff_for_humans())
d = Pendulum.now().add_second()
d2 = Pendulum.now()
self.assertEqual('1 ์ด ๋ค', d.diff_for_humans(d2))
self.assertEqual('1 ์ด ์', d2.diff_for_humans(d))
self.assertEqual('1 ์ด', d.diff_for_humans(d2, True))
self.assertEqual('2 ์ด', d2.diff_for_humans(d.add_second(), True))
| Python | 0 | |
013c6c57959fd8317ba8b27a2a467a37f0a1d8be | Create __init__.py | stock/__init__.py | stock/__init__.py | Python | 0.000429 | ||
1eb1851e4dec9c6425c3cf127e6c4ec5b0d3c987 | Add LineNumberTable tests | tests/test_line_number_attribute.py | tests/test_line_number_attribute.py | # -*- coding: utf-8 -*-
import os.path
import pytest
from jawa import ClassFile
@pytest.fixture
def cf():
sample_path = os.path.join(
os.path.dirname(__file__),
'data',
'HelloWorldDebug.class'
)
with open(sample_path, 'rb') as fin:
cf = ClassFile(fin)
yield cf
def test_exceptions_read(cf):
m = cf.methods.find_one(name='main')
a = m.code.attributes.find_one(name='LineNumberTable')
assert len(a.line_no) == 2
assert a.line_no[0] == (0, 3)
assert a.line_no[1] == (8, 4)
def test_exceptions_write(cf):
m = cf.methods.find_one(name='main')
a = m.code.attributes.find_one(name='LineNumberTable')
assert a.info == b'\x00\x02\x00\x00\x00\x03\x00\x08\x00\x04'
| Python | 0 | |
beb98425423e0278d9d4d5e39e6b5196146425a0 | add manual tests | manual_tests.py | manual_tests.py | import os
import sys
import copy
from estnin import estnin
from estnin import _estnin
from datetime import date
from timeit import default_timer as timer
def target(count):
# return [p for p in estnin.create(estnin.FEMALE, date(2199, 12, 1), 0)]
for _ in range(count):
#estnin(89912319991, set_checksum=False)
estnin(estnin.MIN, set_checksum=False)
return count
def print_person(person):
print('='*30)
print('to str: %s' % person)
print('is male: %s' % person.is_male)
print('is female: %s' % person.is_female)
print('date: %s' % person.date)
print('year: %s' % person.year)
print('month: %s' % person.month)
print('day: %s' % person.day)
print('sequence: %s' % person.sequence)
print('checksum: %s' % person.checksum)
def performance():
"""
[*] creating list of 91999 elements took: 3.30743s, 27815.870 elems/s
baseline
[*] creating list of 91999 elements took: 3.01910s, 30472.310 elems/s
__int__ optimization
[*] creating list of 91999 elements took: 2.83526s, 32448.128 elems/s
__str__ optimization
[*] creating list of 91999 elements took: 2.77732s, 33125.086 elems/s
create does not cast to str
"""
times = []
rounds = 20
for c in range(rounds):
print("\r[*] round: {}/{}".format(c+1, rounds), end='')
start = timer()
persons = target(10000)
end = timer()
times.append(end - start)
print()
total = sum(times)/len(times)
print("[*] times (ms):", ' '.join(map(lambda time: '{:.2f}'.format(time*100), times)))
print("[*] creating list of {} elements took: average {:.3f}ms, {:.3f} elems/s ".format(persons, total*100, persons/total))
def test():
e = estnin(estnin.MIN)
print_person(e)
o = copy.copy(e)
o.month += 1
print_person(o)
print((-e))
print_person(e)
if __name__ == '__main__':
try:
person = estnin.create(estnin.MALE, date(1800, 1, 1), 0)
print_person(person)
performance()
test()
person = estnin.create(estnin.MALE, date(1800, 1, 1), 0)
print(_estnin(3, date(1989, 8 ,28), 27, 1))
except KeyboardInterrupt:
sys.exit()
| Python | 0 | |
cf0021c664612082c669fc562d98759fcd7a4915 | Add setup.py | setup.py | setup.py | # Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='slackelot',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.0.1',
description='A simple wrapper around the Slack web api to post messages',
long_description=long_description,
# The project's main homepage.
url='https://github.com/Chris-Graffagnino/slackelot',
# Author details
author='Chris Graffagnino',
author_email='graffwebdev@gmail.com',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
# What does your project relate to?
keywords='slack',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
# packages=find_packages(exclude=['contrib', 'docs', 'tests']),
# Alternatively, if you want to distribute just a my_module.py, uncomment
# this:
py_modules=["slackelot"],
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['requests'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
# extras_require={
# 'dev': ['check-manifest'],
# 'test': ['coverage'],
# },
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
# package_data={
# 'sample': ['package_data.dat'],
# },
# Although 'package_data' is the preferred approach, in some case you may
# need to place data files outside of your packages. See:
# http://docs.python.org/3.4/distutils/setupscript.html#installing-additional-files # noqa
# In this case, 'data_file' will be installed into '<sys.prefix>/my_data'
data_files=[('my_data', ['data/data_file'])],
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
# entry_points={
# 'console_scripts': [
# 'sample=sample:main',
# ],
# },
)
| Python | 0.000001 | |
e785008aa948e929f7e3ecab3445c1347cb128f3 | Add setup.py file | setup.py | setup.py | #!/usr/bin/env python
from setuptools import find_packages
from setuptools import setup
setup(
name='django-signage',
version='0.0.1',
description='A lightweight web-based digital signage application',
license='BSD',
author='Jason Bittel',
author_email='jason.bittel@gmail.com',
url='https://github.com/jbittel/django-signage',
download_url='https://github.com/jbittel/django-signage',
packages=find_packages(),
include_package_data=True,
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Programming Language :: Python',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| Python | 0.000001 | |
7a21009efda275372be7b801e07635bd2a9e47af | add setup.py | setup.py | setup.py | import os
from setuptools import setup, find_packages
import coupons
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='django-coupons',
version=coupons.__version__,
description='A reuseable Django application for coupon gereration and handling.',
long_description=read('README.md'),
license=read('LICENSE'),
author='byteweaver',
author_email='contact@byteweaver.net',
url='https://github.com/byteweaver/django-coupons',
packages=find_packages(),
install_requires=[
'django',
],
tests_require=[
'django-nose',
'coverage',
'django-coverage',
],
test_suite='coupons.tests',
)
| Python | 0.000001 | |
d0430066830350b3ef1621bb7c9d7ae7ae7045f4 | Add setup.py. | setup.py | setup.py | """
* Copyright (c) 2016. Mingyu Gao
* All rights reserved.
*
"""
import os
import re
# To use a consistent encoding
from codecs import open
# Always prefer setuptools over distutils
import setuptools
here = os.path.abspath(os.path.dirname(__file__))
package = 'easypyplot'
version = '0.0.0'
desc = 'Python matplotlib utilities and wrappers'
# Get version number
with open(os.path.join(here, package, '__init__.py'), encoding='utf-8') as fh:
matches = re.findall(r'^\s*__version__\s*=\s*[\'"]([^\'"]+)[\'"]',
fh.read(), re.M)
if matches:
version = matches[-1]
setuptools.setup(
name=package,
version=version,
description=desc,
author='Mingyu Gao',
author_email='mgao12@stanford.edu',
#long_description='',
#url='',
#license='',
packages=[package],
#install_requires=[],
)
| Python | 0 | |
3f1b78f5156a6ee18020340290dde24d02d01105 | Add basic setup.py | setup.py | setup.py | """
Flask-AtlassianConnect
-------------
This is the description for that library
"""
from setuptools import setup
setup(
name='AC-Flask-HipChat',
version='0.1-dev',
url='https://bitbucket.org/mrdon/ac-flask-hipchat',
license='APLv2',
author='Don Brown',
author_email='mrdon@twdata.org',
description='Atlassian Connect library based on Flask for HipChat',
long_description=__doc__,
packages=['ac_flask', 'ac_flask.hipchat'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'Flask',
'pymongo',
'redis',
'requests',
'PyJWT'
],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
) | Python | 0.000002 | |
ec25f1901d60814a62790cae2becfb6cac0f5e3e | add argparse dep | setup.py | setup.py | #!/usr/bin/python
from setuptools import setup, find_packages
setup(
name='configdb',
version='0.1',
description='database framework for configuration info',
author='ale',
author_email='ale@incal.net',
url='http://git.autistici.org/p/configdb',
install_requires=['argparse', 'Flask', 'formencode', 'inflect',
'SQLAlchemy>0.7'],
setup_requires=[],
zip_safe=True,
packages=find_packages(),
entry_points={
'console_scripts': [
'configdb-api-server = configdb.server.wsgiapp:main',
'configdb-client = configdb.client.cli:main',
],
},
)
| #!/usr/bin/python
from setuptools import setup, find_packages
setup(
name='configdb',
version='0.1',
description='database framework for configuration info',
author='ale',
author_email='ale@incal.net',
url='http://git.autistici.org/p/configdb',
install_requires=['Flask', 'formencode', 'inflect', 'SQLAlchemy>0.7'],
setup_requires=[],
zip_safe=True,
packages=find_packages(),
entry_points={
'console_scripts': [
'configdb-api-server = configdb.server.wsgiapp:main',
'configdb-client = configdb.client.cli:main',
],
},
)
| Python | 0.000019 |
089b020b07fda88ba4679d161badb4423a75444e | add Python setup script | setup.py | setup.py | # Based on PyPA sample project's setup script.
"""Pymultihash installation script."""
import os.path
from setuptools import setup
# Load readme file into long description.
thisdir = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(thisdir, 'README.rst')) as readme:
long_description = readme.read()
setup(
name='pymultihash',
version='0.5.0a1',
description="Python implementation of the multihash specification",
long_description=long_description,
url='https://github.com/ivilata/pymultihash',
author="Ivan Vilata-i-Balaguer",
author_email='ivan@selidor.net',
license='MIT',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Security :: Cryptography',
],
keywords="multihash hash digest format ASCII encoding",
packages=['multihash'],
install_requires=[],
extras_require={
'sha3': ['sha3'],
'blake2': ['pyblake2'],
},
)
| Python | 0 | |
67b5eb144dbe14c134657ccc807343f361c5e249 | add setup.py | setup.py | setup.py | from distutils.core import setup
version = '0.1.0.dev0'
setup(name='sworkflow',
version=version,
description='Simple Workflow',
url='https://github.com/mydeco-dev-team/sworkflow',
packages=['sworkflow'],
) | Python | 0.000001 | |
4161de9755b531825e83f684c964441bff9ffa7d | bump version to 1.0.0 | setup.py | setup.py | """
setup.py
"""
from setuptools import setup
version = "1.0.0"
setup(
name="nco",
version=version,
author="Joe Hamman",
author_email="jhamman@ucar.edu",
license="MIT",
description="""python bindings to NCO""",
packages=["nco"],
py_modules=["nco.nco", "nco.custom"],
url="https://github.com/nco/pynco",
download_url="https://raw2.github.com/nco/pynco/tarball/{0}".format(version),
keywords=["netcdf", "climate"],
classifiers=[
"Development Status :: 4 - Beta",
"Topic :: Utilities",
"Operating System :: POSIX",
"Programming Language :: Python",
],
python_requires='>=3.6',
tests_require=["dateutil", "h5py", "netcdf4", "numpy", "pytest", "scipy"],
)
| """
setup.py
"""
from setuptools import setup
version = "0.0.4"
setup(
name="nco",
version=version,
author="Joe Hamman",
author_email="jhamman@ucar.edu",
license="MIT",
description="""python bindings to NCO""",
packages=["nco"],
py_modules=["nco.nco", "nco.custom"],
url="https://github.com/nco/pynco",
download_url="https://raw2.github.com/nco/pynco/tarball/{0}".format(version),
keywords=["netcdf", "climate"],
classifiers=[
"Development Status :: 4 - Beta",
"Topic :: Utilities",
"Operating System :: POSIX",
"Programming Language :: Python",
],
python_requires='>=3.6',
tests_require=["dateutil", "h5py", "netcdf4", "numpy", "pytest", "scipy"],
)
| Python | 0 |
d0b1762a098e78ee9d012628ad96d6a18e8d2565 | Create setup.py | setup.py | setup.py | from distutils.core import setup
import py2exe
setup(console=["./server.py"], data_files=[('.', ['./config.ini'])])
| Python | 0.000001 | |
7b6610e03d4485575b18881c375f83e999d20459 | Add setup.py #1 | setup.py | setup.py | from setuptools import setup
import io
import os
here = os.path.abspath(os.path.dirname(__file__))
def read(*filenames, **kwargs):
encoding = kwargs.get('encoding', 'utf-8')
sep = kwargs.get('sep', '\n')
buf = []
for filename in filenames:
with io.open(filename, encoding=encoding) as f:
buf.append(f.read())
return sep.join(buf)
long_description = read('README.md')
setup(
name='mongopool',
version='0.1',
url='http://github.com/ubervu/mongopool/',
description='Tool that manages your mongo clients to different clusters and maps databases to clients',
long_description=long_description,
license='Apache Software License',
author='UberVU',
install_requires=['pymongo>=2.4'],
# author_email='jeff@jeffknupp.com',
packages=['mongopool'],
include_package_data=True,
platforms='any',
test_suite='nose.collector',
tests_require=['nose', 'mock'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Database',
'Topic :: Software Development :: Libraries :: Python Modules',
],
extras_require={
'testing': ['nose'],
}
)
| Python | 0 | |
c43802f4cc071c6baf31f8d1461ce8c96e38fa9e | Bump greenlet==0.4.0 to support ARM architectures. | setup.py | setup.py | #!/usr/bin/env python
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
import os
import sys
# Add /usr/local/include to the path for macs, fixes easy_install for several packages (like gevent and pyyaml)
if sys.platform == 'darwin':
os.environ['C_INCLUDE_PATH'] = '/usr/local/include'
version = '0.1.0'
setup( name = 'pyon',
version = version,
description = 'OOI ION Python Capability Container and Core Modules',
url = 'https://github.com/ooici/pyon',
download_url = 'http://ooici.net/releases',
license = 'Apache 2.0',
author = 'Adam R. Smith',
author_email = 'a2smith@ucsd.edu',
keywords = ['ooici','ioncore', 'pyon'],
packages = find_packages(),
entry_points = {
'nose.plugins.0.10': [
'pycc_plugin=pyon.util.pycc_plugin:PYCC',
'timer_plugin=pyon.util.timer_plugin:TestTimer',
'insulate=pyon.util.insulate:Insulate',
'insulateslave=pyon.util.insulate:InsulateSlave',
'gevent_profiler=pyon.util.nose_gevent_profiler:TestGeventProfiler'
],
'console_scripts' : [
'pycc=scripts.pycc:entry',
'control_cc=scripts.control_cc:main',
'generate_interfaces=scripts.generate_interfaces:main',
'store_interfaces=scripts.store_interfaces:main',
'json_report=scripts.json_report:main',
'clear_couch=pyon.datastore.clear_couch_util:main',
]
},
dependency_links = [
'http://ooici.net/releases',
'https://github.com/ooici/gevent-profiler/tarball/master#egg=python-gevent-profiler'
],
test_suite = 'pyon',
package_data = {'': ['*.xml']},
install_requires = [
'greenlet==0.4.0',
'gevent==0.13.6',
'simplejson==2.1.6',
'msgpack-python==0.1.13',
'setproctitle==1.1.2',
'pyyaml==3.10',
'pika==0.9.5',
'httplib2>=0.7.2',
'pyzmq==2.1.7',
'gevent_zeromq==0.2.0',
'zope.interface',
'couchdb==0.8',
# 'lockfile==0.9.1',
'python-daemon==1.6',
'M2Crypto==0.21.1-pl1',
'coverage==3.5',
'nose==1.1.2',
'ipython==0.11',
'readline==6.2.1',
'mock==0.8',
'ndg-xacml==0.5.1',
'h5py==2.0.1', # see: http://www.hdfgroup.org/HDF5/release/obtain5.html
'python-gevent-profiler',
#'lxml==2.3.4', # Fails to compile on Linux ??!??
# DM related dependencies for 'tables'
# 'numpy==1.6.1',
# 'numexpr==1.4.2',
# 'cython==0.14.1',
# 'tables==2.3',
],
)
| #!/usr/bin/env python
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
import os
import sys
# Add /usr/local/include to the path for macs, fixes easy_install for several packages (like gevent and pyyaml)
if sys.platform == 'darwin':
os.environ['C_INCLUDE_PATH'] = '/usr/local/include'
version = '0.1.0'
setup( name = 'pyon',
version = version,
description = 'OOI ION Python Capability Container and Core Modules',
url = 'https://github.com/ooici/pyon',
download_url = 'http://ooici.net/releases',
license = 'Apache 2.0',
author = 'Adam R. Smith',
author_email = 'a2smith@ucsd.edu',
keywords = ['ooici','ioncore', 'pyon'],
packages = find_packages(),
entry_points = {
'nose.plugins.0.10': [
'pycc_plugin=pyon.util.pycc_plugin:PYCC',
'timer_plugin=pyon.util.timer_plugin:TestTimer',
'insulate=pyon.util.insulate:Insulate',
'insulateslave=pyon.util.insulate:InsulateSlave',
'gevent_profiler=pyon.util.nose_gevent_profiler:TestGeventProfiler'
],
'console_scripts' : [
'pycc=scripts.pycc:entry',
'control_cc=scripts.control_cc:main',
'generate_interfaces=scripts.generate_interfaces:main',
'store_interfaces=scripts.store_interfaces:main',
'json_report=scripts.json_report:main',
'clear_couch=pyon.datastore.clear_couch_util:main',
]
},
dependency_links = [
'http://ooici.net/releases',
'https://github.com/ooici/gevent-profiler/tarball/master#egg=python-gevent-profiler'
],
test_suite = 'pyon',
package_data = {'': ['*.xml']},
install_requires = [
# Patched greenlet to work on ARMS
'greenlet==0.3.1-p1',
'gevent==0.13.6',
'simplejson==2.1.6',
'msgpack-python==0.1.13',
'setproctitle==1.1.2',
'pyyaml==3.10',
'pika==0.9.5',
'httplib2>=0.7.2',
'pyzmq==2.1.7',
'gevent_zeromq==0.2.0',
'zope.interface',
'couchdb==0.8',
# 'lockfile==0.9.1',
'python-daemon==1.6',
'M2Crypto==0.21.1-pl1',
'coverage==3.5',
'nose==1.1.2',
'ipython==0.11',
'readline==6.2.1',
'mock==0.8',
'ndg-xacml==0.5.1',
'h5py==2.0.1', # see: http://www.hdfgroup.org/HDF5/release/obtain5.html
'python-gevent-profiler',
#'lxml==2.3.4', # Fails to compile on Linux ??!??
# DM related dependencies for 'tables'
# 'numpy==1.6.1',
# 'numexpr==1.4.2',
# 'cython==0.14.1',
# 'tables==2.3',
],
)
| Python | 0 |
3a6dd52e3cdfc5eca51d6dac4eb0701a1a04d550 | make version 0.3.5 | setup.py | setup.py | from setuptools import setup
setup(name='DukeDSClient',
version='0.3.5',
description='Command line tool(ddsclient) to upload/manage projects on the duke-data-service.',
url='https://github.com/Duke-GCB/DukeDSClient',
keywords='duke dds dukedataservice',
author='John Bradley',
license='MIT',
packages=['ddsc','ddsc.core'],
install_requires=[
'requests',
'PyYAML',
],
test_suite='nose.collector',
tests_require=['nose', 'mock'],
entry_points={
'console_scripts': [
'ddsclient = ddsc.__main__:main'
]
},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
| from setuptools import setup
setup(name='DukeDSClient',
version='0.3.4',
description='Command line tool(ddsclient) to upload/manage projects on the duke-data-service.',
url='https://github.com/Duke-GCB/DukeDSClient',
keywords='duke dds dukedataservice',
author='John Bradley',
license='MIT',
packages=['ddsc','ddsc.core'],
install_requires=[
'requests',
'PyYAML',
],
test_suite='nose.collector',
tests_require=['nose', 'mock'],
entry_points={
'console_scripts': [
'ddsclient = ddsc.__main__:main'
]
},
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Utilities',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
)
| Python | 0.000016 |
995c75162a00b89fe23f43ec12f5e9495deb7799 | add optionnal dependancies | setup.py | setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:set expandtab tabstop=4 shiftwidth=4:
import os
import re
import sys
from distutils.core import setup
#some install path variables
sysconfdir = os.getenv("SYSCONFDIR", "/etc")
datarootdir = os.getenv("DATAROOTDIR", sys.prefix)
data_dir = os.path.join(sys.prefix, 'share' ,'dnscherry')
small_description = 'A simple web application to manage DNS zones'
# change requirements according to python version
if sys.version_info[0] == 2:
install_requires = [
'CherryPy >= 3.0.0',
'dnspython',
'Mako'
],
elif sys.version_info[0] == 3:
install_requires = [
'CherryPy >= 3.0.0',
'dnspython3',
'Mako'
],
else:
print('unsupported version')
exit(1)
try:
f = open(os.path.join(os.path.dirname(__file__), 'README.rst'))
description = f.read()
f.close()
except IOError:
description = small_description
try:
license = open('LICENSE').read()
except IOError:
license = 'MIT'
try:
from setuptools import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
except ImportError:
from distutils.core import setup
PyTest = lambda x: x
# just a small function to easily install a complete directory
def get_list_files(basedir, targetdir):
return_list = []
for root, dirs, files in os.walk(basedir):
subpath = re.sub(r'' + basedir + '[\/]*', '', root)
files_list = []
for f in files:
files_list.append(os.path.join(root, f))
return_list.append((os.path.join(targetdir, subpath), files_list))
return return_list
resources_files = get_list_files('resources',
os.path.join(datarootdir, 'share', 'dnscherry'))
resources_files.append((
os.path.join(sysconfdir, 'dnscherry'),
[ 'conf/dnscherry.ini']
))
setup(
name = 'dnscherry',
version = '0.0.0',
zip_safe=False,
author = 'Pierre-Francois Carpentier',
author_email = 'carpentier.pf@gmail.com',
packages = ['dnscherry', 'dnscherry.auth'],
data_files = resources_files,
scripts = ['scripts/dnscherryd'],
url = 'https://github.com/kakwa/dnscherry',
license = license,
description = small_description,
long_description = description,
install_requires = install_requires,
tests_require = ['pytest'],
extras_require = {
'auth_htpasswd' : ['passlib'],
'auth_ldap' : ['python-ldap'],
'fastcgi' : ['flup']
},
cmdclass={'test': PyTest},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: System Administrators',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3']
)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# vim:set expandtab tabstop=4 shiftwidth=4:
import os
import re
import sys
from distutils.core import setup
#some install path variables
sysconfdir = os.getenv("SYSCONFDIR", "/etc")
datarootdir = os.getenv("DATAROOTDIR", sys.prefix)
data_dir = os.path.join(sys.prefix, 'share' ,'dnscherry')
small_description = 'A simple web application to manage DNS zones'
# change requirements according to python version
if sys.version_info[0] == 2:
install_requires = [
'CherryPy >= 3.0.0',
'dnspython',
'Mako'
],
elif sys.version_info[0] == 3:
install_requires = [
'CherryPy >= 3.0.0',
'dnspython3',
'Mako'
],
else:
print('unsupported version')
exit(1)
try:
f = open(os.path.join(os.path.dirname(__file__), 'README.rst'))
description = f.read()
f.close()
except IOError:
description = small_description
try:
license = open('LICENSE').read()
except IOError:
license = 'MIT'
try:
from setuptools import setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
#import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
except ImportError:
from distutils.core import setup
PyTest = lambda x: x
# just a small function to easily install a complete directory
def get_list_files(basedir, targetdir):
return_list = []
for root, dirs, files in os.walk(basedir):
subpath = re.sub(r'' + basedir + '[\/]*', '', root)
files_list = []
for f in files:
files_list.append(os.path.join(root, f))
return_list.append((os.path.join(targetdir, subpath), files_list))
return return_list
resources_files = get_list_files('resources',
os.path.join(datarootdir, 'share', 'dnscherry'))
resources_files.append((
os.path.join(sysconfdir, 'dnscherry'),
[ 'conf/dnscherry.ini']
))
setup(
name = 'dnscherry',
version = '0.0.0',
zip_safe=False,
author = 'Pierre-Francois Carpentier',
author_email = 'carpentier.pf@gmail.com',
packages = ['dnscherry'],
data_files = resources_files,
scripts = ['scripts/dnscherryd'],
url = 'https://github.com/kakwa/dnscherry',
license = license,
description = small_description,
long_description = description,
install_requires = install_requires,
tests_require=['pytest', 'mechanize'],
cmdclass={'test': PyTest},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: System Administrators',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3']
)
| Python | 0 |
b25e21745ecdc5c03b3229ba77ee51b5fdd1561d | Move scapy to scapyproto to avoid breaking import of scapy from inside of protocols | ooni/protocols/scapyproto.py | ooni/protocols/scapyproto.py | import random
from zope.interface import implements
from twisted.python import usage
from twisted.plugin import IPlugin
from twisted.internet import protocol, defer
from ooni.plugoo.tests import ITest, OONITest
from ooni.plugoo.assets import Asset
from ooni.utils import log
from ooni.lib.txscapy import txsr, txsend
class ScapyTest(OONITest):
"""
A utility class for writing scapy driven OONI tests.
"""
receive = True
timeout = None
pcapfile = 'scapytest.pcap'
def initialize(self, reactor=None):
if not self.reactor:
from twisted.internet import reactor
self.reactor = reactor
self.request = {}
self.response = {}
def experiment(self, args):
log.msg("Running experiment")
if self.receive:
log.msg("Sending and receiving packets.")
d = txsr(self.build_packets(), pcapfile=self.pcapfile,
timeout=self.timeout)
else:
log.msg("Sending packets.")
d = txsend(self.build_packets())
def finished(data):
log.msg("Finished sending")
return data
d.addCallback(finished)
return d
def build_packets(self):
"""
Override this method to build scapy packets.
"""
from scapy.all import IP, TCP
return IP()/TCP()
def load_assets(self):
return {}
| Python | 0 | |
3cab374806f9fd8a5fd90265025a4b021e0056f6 | add preliminary mergetools tests | test_mergetools.py | test_mergetools.py | """
Tests for the mergetools.py script.
"""
import tempfile
import random
from contextlib import contextmanager
import string
import datetime
import csv
import pandas as pd
import mergetools
from scraper import CSV_HEADERS
def random_str(length, chars=string.ascii_lowercase):
return ''.join(random.choice(chars) for _ in range(length))
def random_fb_dataframe(size=100, until=datetime.datetime.now()):
"""Returns a random dataframe that looks a bit like the data we tend to
get from facebook (and has the same columns) but with utterly random
content. May not quite make sense as regards comments etc. Will have to
ensure that it does before using for testing context."""
# choose a random page name
pagename = random_str(10)
data = {
"page":[pagename for _ in range(size)],
"posted by":[random_str(10) for _ in range(size)],
"message":[random_str(100, chars=string.ascii_lowercase + ' ')
for _ in range(size)],
"link":[random_str(25) for _ in range(size)],
"shares":[random.randint(0,15) for _ in range(size)],
"likes":[random.randint(0,1000) for _ in range(size)],
"number of comments":[random.randint(0,50) for _ in range(size)],
"pic":['' for _ in range(size)],
"url":[random_str(50) for _ in range(size)],
"type":[random.choice(['post','comment','comment'])
for _ in range(size)]
}
start_time = until - (datetime.timedelta(1) * size)
frame = pd.DataFrame(
data=data,
# idces should be a date range
index=pd.DatetimeIndex(start=start_time,
periods=size,
freq='D')
#columns=CSV_HEADERS,
)
return frame
def setup_disjoint():
"""Make some fake, totally disjoint data. Returns 2 pandas DataFrames
with the same columns and different data"""
start_a = datetime.datetime.now()
start_b = datetime.datetime.now() - datetime.timedelta(50)
return (random_fb_dataframe(size=30, until=start_a),
random_fb_dataframe(size=30, until=start_b))
@contextmanager
def write_dataframes(frames, encoding='utf-16'):
"""Writes a sequence of dataframes to temporary files and returns the
filenames. Should be used as a context manager, will clean up after
itself"""
files = []
for frame in frames:
files.append(tempfile.NamedTemporaryFile(mode='w', delete=False))
frame.to_csv(files[-1],
encoding=encoding,
index_label='dates',
quoting=csv.QUOTE_ALL,
sep='\t')
# actually write it
files[-1].close()
# yield the names
yield [f.name for f in files]
# close the files
for f in files:
f.delete()
# no doubt someday it will make sense to have this very cleverly organised
# but right now there is only one functionality to test
class Symdiff_Test(object):
"""Tests for the symmetric difference with context op"""
def disjoint_test(self):
"""Tests that the symmetric difference of two disjoint frames is
just their union."""
print('symdiff - testing disjoint')
a,b = setup_disjoint()
op = mergetools.SymmetricDifference(a,b,
write_out=False,
do_context=False)
result_1 = pd.concat([a,b])
result_2 = op()
assert result_1.equals(result_2)
def loadfile_test(self):
"""Make sure it can load data from file and perform an op without errors
"""
print('symdiff - testing files')
with write_dataframes(setup_disjoint()) as data:
op = mergetools.SymmetricDifference.from_args(data)
op()
| Python | 0 | |
992191d290df8d7764a272c3b45e2f7b937456ec | add fib | misc/py3/fib.py | misc/py3/fib.py | #!/usr/bin/env python
# Python 3: Fibonacci series up to n
def fib(n):
a, b = 0, 1
while a < n:
print(a, end=' ')
a, b = b, a + b
print()
fib(1000)
| Python | 0.999999 | |
abf7b0ffd86656f8311da7bfde65663d35ffd543 | fix for using stencilview | kivy/uix/scrollview.py | kivy/uix/scrollview.py | '''
Scroll View
===========
A ScrollView provides a scrollable/pannable viewport which is clipped to the
ScrollView's bounding box.
'''
__all__ = ('ScrollView', )
from kivy.uix.stencilview import StencilView
from kivy.uix.scatter import ScatterPlane
class ScrollView(StencilView):
'''ScrollView class. See module documentation for more informations.
'''
def __init__(self, **kwargs):
self.viewport = ScatterPlane()
super(ScrollView, self).__init__(**kwargs)
super(ScrollView, self).add_widget(self.viewport)
self.viewport.bind(size=self.size)
def add_widget(self, widget):
self.viewport.add_widget(widget)
def remove_widget(self, widget):
self.viewport.remove_widget(widget)
def clear_widgets(self):
self.viewport.clear()
def on_touch_down(self, touch):
if self.collide_point(*touch.pos):
return super(ScrollView, self).on_touch_down(touch)
def on_touch_move(self, touch):
if self.collide_point(*touch.pos):
return super(ScrollView, self).on_touch_move(touch)
def on_touch_up(self, touch):
if self.collide_point(*touch.pos):
return super(ScrollView, self).on_touch_up(touch)
| '''
ScrollView widget
'''
__all__ = ('ScrollView', )
from kivy.uix.stencil import StencilView
from kivy.uix.scatter import ScatterPlane
class ScrollView(StencilView):
'''
ScrollView:
A ScrollView provides a scrollable/pannable viewport
which is clipped to the ScrollView's bounding box.
'''
def __init__(self, **kwargs):
self.viewport = ScatterPlane()
super(ScrollView, self).__init__(**kwargs)
super(ScrollView, self).add_widget(self.viewport)
self.viewport.bind(size=self.size)
def add_widget(self, widget):
self.viewport.add_widget(widget)
def remove_widget(self, widget):
self.viewport.remove_widget(widget)
def clear_widgets(self):
self.viewport.clear()
def on_touch_down(self, touch):
if self.collide_point(*touch.pos):
return super(ScrollView, self).on_touch_down(touch)
def on_touch_move(self, touch):
if self.collide_point(*touch.pos):
return super(ScrollView, self).on_touch_move(touch)
def on_touch_up(self, touch):
if self.collide_point(*touch.pos):
return super(ScrollView, self).on_touch_up(touch)
| Python | 0.000001 |
befa79ec76752f0811b49ec323813e6e1931638d | Create solution.py | hackerrank/algorithms/implementation/medium/bigger_is_greater/py/solution.py | hackerrank/algorithms/implementation/medium/bigger_is_greater/py/solution.py | def solution(s):
#
# The next permutation algorithm. For more information, please look up:
# [href.] https://www.nayuki.io/page/next-lexicographical-permutation-algorithm
# [href.] https://en.wikipedia.org/wiki/Permutation#Generation_in_lexicographic_order
#
chars = list(s)
i = len(chars) - 1
while i > 0 and chars[i - 1] >= chars[i]:
i -= 1
if i == 0:
return None
j = len(chars) - 1
while chars[j] <= chars[i - 1]:
j -= 1
chars[i - 1], chars[j] = chars[j], chars[i - 1]
return ''.join(chars[:i] + list(reversed(chars[i:])))
testCount = int(input())
for testId in range(testCount):
word = input().strip()
greater = solution(word)
if greater:
print(greater)
else:
print('no answer')
| Python | 0.000018 | |
8738148bc5c1c650a4196b73f9f6d311dddeafea | Add validator for the DEP-11 file-format | contrib/dep11/dep11-validate.py | contrib/dep11/dep11-validate.py | #!/usr/bin/python3
#
# Copyright (C) 2014 Matthias Klumpp <mak@debian.org>
#
# Licensed under the GNU General Public License Version 3
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import sys
import yaml
from optparse import OptionParser
from voluptuous import Schema, Required, All, Any, Length, Range, Match, Url
schema_header = Schema({
Required('File'): All(str, 'DEP-11', msg="Must be \"DEP-11\""),
Required('Origin'): All(str, Length(min=1)),
Required('Version'): All(str, Match(r'(\d+\.?)+$'), msg="Must be a valid version number"),
})
schema_provides_dbus = Schema({
Required('type'): All(str, Length(min=1)),
Required('service'): All(str, Length(min=1)),
})
schema_provides = Schema({
Any('mimetypes',
'binaries',
'libraries',
'python3',
'python2',
'firmware'): All(list, [str], Length(min=1)),
'dbus': All(list, Length(min=1), [schema_provides_dbus]),
})
schema_keywords = Schema({
Required('C'): All(list, [str], Length(min=1), msg="Must have an unlocalized 'C' key"),
dict: All(list, [str], Length(min=1)),
}, extra = True)
schema_translated = Schema({
Required('C'): All(str, Length(min=1), msg="Must have an unlocalized 'C' key"),
dict: All(str, Length(min=1)),
}, extra = True)
schema_image = Schema({
Required('width'): All(int, Range(min=10)),
Required('height'): All(int, Range(min=10)),
Required('url'): All(str, Url()),
})
schema_screenshots = Schema({
Required('default', default=False): All(bool),
Required('image-source'): All(dict, Length(min=1), schema_image),
'image-thumbnail': All(dict, Length(min=1), schema_image),
'caption': All(dict, Length(min=1), schema_translated),
})
schema_icon = Schema({
Required(Any('stock',
'cached'), msg="A 'stock' or 'cached' icon must at least be provided."): All(str, Length(min=1)),
'cached': All(str, Match(r'.*[.].*$'), msg='Icon entry is missing filename or extension'),
'remote': All(str, Url()),
})
schema_url = Schema({
Any('homepage',
'bugtracker',
'faq',
'help',
'donation'): All(str, Url()),
})
schema_component = Schema({
Required('Type'): All(str, Any('generic', 'desktop-app', 'web-app', 'addon', 'codec', 'inputmethod')),
Required('ID'): All(str, Length(min=1)),
Required('Name'): All(dict, Length(min=1), schema_translated),
Required('Packages'): All(list, Length(min=1)),
'Summary': All(dict, Length(min=1), schema_translated),
'Description': All(dict, Length(min=1), schema_translated),
'Categories': All(list, [str], Length(min=1)),
'Url': All(dict, Length(min=1), schema_url),
'Icon': All(dict, Length(min=1), schema_icon),
'Keywords': All(dict, Length(min=1), schema_keywords),
'Provides': All(dict, Length(min=1), schema_provides),
'ProjectGroup': All(str, Length(min=1)),
'DeveloperName': All(dict, Length(min=1), schema_translated),
'Screenshots': All(list, Length(min=1), [schema_screenshots]),
'Extends': All(str, Length(min=1)),
})
class DEP11Validator:
issue_list = list()
def __init__(self):
pass
def add_issue(self, msg):
self.issue_list.append(msg)
def _test_locale_cruft(self, doc, key):
ldict = doc.get(key, None)
if not ldict:
return
for lang in ldict.keys():
if lang == 'x-test':
self.add_issue("[%s][%s]: %s" % (doc['ID'], key, "Found cruft locale: x-test"))
if lang.endswith('.UTF-8'):
self.add_issue("[%s][%s]: %s" % (doc['ID'], key, "AppStream locale names should not specify encoding (ends with .UTF-8)"))
def validate(self, fname):
ret = True
try:
docs = yaml.load_all(open(fname, 'r'))
header = next(docs)
except Exception as e:
self.add_issue("Could not parse file: %s" % (str(e)))
return False
try:
schema_header(header)
except Exception as e:
self.add_issue("Invalid DEP-11 header: %s" % (str(e)))
ret = False
for doc in docs:
if not doc:
self.add_issue("FATAL: Empty document found.")
ret = False
continue
if not doc.get('ID', None):
self.add_issue("FATAL: Component without ID found.")
ret = False
continue
try:
schema_component(doc)
except Exception as e:
self.add_issue("[%s]: %s" % (doc['ID'], str(e)))
ret = False
self._test_locale_cruft(doc, 'Name')
self._test_locale_cruft(doc, 'Summary')
self._test_locale_cruft(doc, 'Description')
self._test_locale_cruft(doc, 'DeveloperName')
# TODO: test screenshot caption
return ret
def print_issues(self):
for issue in self.issue_list:
print(issue)
def clear_issues():
self.issue_list = list()
def main():
parser = OptionParser()
parser.add_option("--no-color",
action="store_true", dest="no_color", default=False,
help="don'r print colored output")
(options, args) = parser.parse_args()
if len(args) < 1:
print("You need to specify a file to validate!")
sys.exit(4)
fname = args[0]
validator = DEP11Validator()
ret = validator.validate(fname)
validator.print_issues()
if ret:
msg = "Validation successful."
else:
msg = "Validation failed!"
if options.no_color:
print(msg)
elif ret:
print('\033[92m' + msg + '\033[0m')
else:
print('\033[91m' + msg + '\033[0m')
if not ret:
sys.exit(1)
if __name__ == "__main__":
main()
| Python | 0.000001 | |
df8ddd56ad51f0a644696cb0ff12c2e7a17c5913 | Create lonely-pixel-i.py | Python/lonely-pixel-i.py | Python/lonely-pixel-i.py | # Time: O(m * n)
# Space: O(m + n)
class Solution(object):
def findLonelyPixel(self, picture):
"""
:type picture: List[List[str]]
:rtype: int
"""
rows, cols = [0] * len(picture), [0] * len(picture[0])
for i in xrange(len(picture)):
for j in xrange(len(picture[0])):
if picture[i][j] == 'B':
rows[i] += 1
cols[j] += 1
result = 0
for i in xrange(len(picture)):
if rows[i] == 1:
for j in xrange(len(picture[0])):
result += picture[i][j] == 'B' and cols[j] == 1
return result
class Solution2(object):
def findLonelyPixel(self, picture):
"""
:type picture: List[List[str]]
:type N: int
:rtype: int
"""
return sum(col.count('B') == 1 == picture[col.index('B')].count('B') \
for col in zip(*picture))
| Python | 0.00022 | |
c1dcb46e95d5b96ecf45db2e1f466b6f99330e1c | Add VimwikiTask cache-ing | taskwiki/cache.py | taskwiki/cache.py | import copy
import vim
class TaskCache(object):
"""
A cache that holds all the tasks in the given file and prevents
multiple redundant taskwarrior calls.
"""
def __init__(self, tw):
self.task_cache = dict()
self.vimwikitask_cache = dict()
self.tw = tw
def __getitem__(self, key):
# String keys refer to the Task objects
if type(key) in (str, unicode):
task = self.task_cache.get(key)
if task is None:
task = self.tw.tasks.get(uuid=key)
self.task_cache[key] = task
return task
# Integer keys (line numbers) refer to the VimwikiTask objects
elif type(key) is int:
vimwikitask = self.vimwikitask_cache.get(key)
if vimwikitask is None:
vimwikitask = VimwikiTask.from_line(self, key)
return vimwikitask # May return None if the line has no task
# Anything else is wrong
else:
raise ValueError("Wrong key type: %s (%s)" % (key, type(key)))
def iterate_vimwiki_tasks(self):
iterated_cache = copy.copy(self.task_cache)
while iterated_cache.keys():
for key in list(iterated_cache.keys()):
task = iterated_cache[key]
if all([t['line_number'] not in iterated_cache.keys()
for t in task.add_dependencies]):
del iterated_cache[key]
yield task
def reset(self):
self.task_cache = dict()
self.vimwikitask_cache = dict()
def update_tasks(self):
# Select all tasks in the files that have UUIDs
uuids = [t['uuid'] for t in self.task_cache.values() if t.saved]
# Get them out of TaskWarrior at once
tasks = self.tw.filter(uuid=','.join(tasks))
# Update each task in the cache
for task in tasks:
self.task_cache[task['uuid']] = task
| import copy
import vim
class TaskCache(object):
"""
A cache that holds all the tasks in the given file and prevents
multiple redundant taskwarrior calls.
"""
def __init__(self, tw):
self.cache = dict()
self.tw = tw
def __getitem__(self, key):
task = self.cache.get(key)
if task is None:
task = self.tw.tasks.get(uuid=key)
self.cache[key] = task
return task
def __iter__(self):
iterated_cache = copy.copy(self.cache)
while iterated_cache.keys():
for key in list(iterated_cache.keys()):
task = iterated_cache[key]
if all([t.line_number not in iterated_cache.keys()
for t in task.add_dependencies]):
del iterated_cache[key]
yield task
def reset(self):
self.cache = dict()
def update_tasks(self):
# Select all tasks in the files that have UUIDs
uuids = [t['uuid'] for t in self.cache.values() if t.saved]
# Get them out of TaskWarrior at once
tasks = self.tw.filter(uuid=','.join(tasks))
# Update each task in the cache
for task in tasks:
self.cache[task['uuid']] = task
| Python | 0 |
67c3c0e3c165dc73f548cff57d6cb390614d5aad | Bring back old watcher module | virtool/watcher.py | virtool/watcher.py | import os
import time
import logging
from virtool.utils import file_stats
from setproctitle import setproctitle
from multiprocessing import Process
from inotify.adapters import Inotify
logger = logging.getLogger(__name__)
TYPE_NAME_DICT = {
"IN_CREATE": "create",
"IN_MODIFY": "modify",
"IN_DELETE": "delete",
"IN_MOVED_FROM": "delete",
"IN_CLOSE_WRITE": "close"
}
projector = [
"_id",
"_version",
"name",
"size_end",
"size_now",
"timestamp",
"file_type",
"created",
"reserved",
"ready"
]
class Watcher(Process):
def __init__(self, path, queue, interval=0.300):
super().__init__()
self.path = path
self.queue = queue
self.interval = interval
self.notifier = Inotify()
def run(self):
setproctitle("virtool-inotify")
self.notifier.add_watch(bytes(self.path, encoding="utf-8"))
last_modification = time.time()
try:
for event in self.notifier.event_gen():
if event is not None:
_, type_names, _, filename = event
if filename and type_names[0] in TYPE_NAME_DICT:
assert len(type_names) == 1
action = TYPE_NAME_DICT[type_names[0]]
filename = filename.decode()
now = time.time()
if action in ["create", "modify", "close"]:
file_entry = file_stats(os.path.join(self.path, filename))
file_entry["filename"] = filename
if action == "modify" and (now - last_modification) > self.interval:
self.queue.put({
"action": action,
"file": file_entry
})
last_modification = now
if action in ["create", "close"]:
self.queue.put({
"action": action,
"file": file_entry
})
if action == "delete":
self.queue.put({
"action": "delete",
"file": filename
})
except KeyboardInterrupt:
logging.debug("Stopped file watcher") | Python | 0 | |
4d08ff430eba96ebef3f0824fe83f5bc2a236675 | add share_mem | multiprocessing/share_mem.py | multiprocessing/share_mem.py | #!/usr/bin/env python
from multiprocessing import Process, Value, Array
def f(n, a):
n.value = 3.1415927
for i in range(len(a)):
a[i] = -a[i]
if __name__ == '__main__':
num = Value('d', 0.0)
arr = Array('i', range(10))
p = Process(target=f, args=(num, arr))
p.start()
p.join()
print(num.value)
print(arr[:])
| Python | 0 | |
119aabe89912c324d1588601c9cbc4b4a48e16ae | Add restarting_flup.py | restarting_flup.py | restarting_flup.py | #!/usr/bin/env python
"""
This is the same as the usual .fcgi file[1] for using FastCGI with flup,
except that this one terminates itself when the .fcgi fileโs modification
date changes. Assuming you have something[2] that restarts FastCGI processes
as needed (which you should anyway), this effectively allows you to reload
the application by just `touch`ing one file.
[1] http://flask.pocoo.org/docs/deploying/fastcgi/
[2] Something like Circus, Supervisord, or Lighttpd with `bin-path` configured.
"""
from os.path import getmtime
from flup.server.fcgi import WSGIServer
START_TIME = getmtime(__file__)
class RestartingServer(WSGIServer):
def _mainloopPeriodic(self):
WSGIServer._mainloopPeriodic(self)
if getmtime(__file__) != START_TIME:
self._keepGoing = False
from YOUR_APPLICATION import app
RestartingServer(app).run()
| Python | 0.000007 | |
7496159322a173bb6265aed2dac4e50ad64de858 | Add base fullfill service | service.py | service.py | from flask import Flask
from flask import jsonify
from flask import request
app = Flask(__name__)
@app.route("/chainBot", methods=['POST'])
def chainBot():
print(request.data)
return jsonify({
"speech": "My Test Speech",
"displayText": "My Test Text",
"data": {},
"contextOut": [],
"source": ""
}), 200, {'Content-Type': 'text/css; charset=utf-8'}
if __name__ == "__main__":
app.run("0.0.0.0", 80)
| Python | 0 | |
5b01f26d92a32964bcc97cbf9429177bce7c89be | add tests for progress indicator | tests/test_util.py | tests/test_util.py | # -*- coding: utf-8 -*-
from StringIO import StringIO
from biseqt.util import ProgressIndicator
def test_progress_indicator():
logs = StringIO()
ProgressIndicator.write = lambda self, message: logs.write(message)
indic = ProgressIndicator(num_total=1)
indic.start()
indic.progress()
assert logs.getvalue().strip() == '0/1 \r1/1', \
'Counting progress indicator works'
logs = StringIO()
indic = ProgressIndicator(num_total=1, percentage=True)
indic.start()
indic.progress()
assert logs.getvalue().strip() == '0% \r100%', \
'Percentage progress indicator works'
| Python | 0 | |
de7aee058348c00d2cdf244df102010b422e941b | Add a place holder for the PSNR metric | toolbox/metrics.py | toolbox/metrics.py | def psnr(y_true, y_pred):
raise NotImplementedError
| Python | 0.000009 | |
0da51215709f338e77acfa6e7933595d0c1df95d | Create SIP OPTIONS sender/receiver. | networks/sip.py | networks/sip.py | # -*- coding: utf-8 -*-
import argparse
import socket
CRLF = '\r\n'
def send_sip_options(server_host, server_port, client_host, client_port, verbose=True):
"""Sends SIP OPTIONS.
:param str server_host: SIP server host (IP address).
:param int server_port: SIP server port.
:param str client_host: Local client host (IP address).
:param int client_port: Local client port.
:param bool verbose: If True prints out the request payload.
:return: SIP server response.
:rtype: str
"""
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as sock:
sock.connect((server_host, server_port))
payload_fields = (
'OPTIONS sip:127.0.0.1:5060 SIP/2.0',
f'Via: SIP/2.0/UDP {client_host}:{client_port};rport;branch=BRANCH',
'Max-Forwards: 70',
f'From: <sip:{client_host}>;tag=TAG',
'To: <sip:127.0.0.1>',
'Call-ID: 1',
'CSeq: 1 OPTIONS',
'Content-Length: 0',
)
payload = CRLF.join(payload_fields).encode('utf-8')
if verbose:
print('===================')
print('SIP server request:')
print('===================')
print(payload.decode().strip())
print('--------------------')
print()
sock.send(payload)
return sock.recv(4096).decode('utf-8')
def main():
# prepare argument parser
parser = argparse.ArgumentParser()
parser.add_argument('server_host', help='SIP server hostname or IP address')
parser.add_argument('server_port', nargs='?', default=5060, help='SIP server port (default=5060)')
args = parser.parse_args()
hostname = socket.gethostname()
local_ip = socket.gethostbyname(hostname)
client = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
client.bind((local_ip, 0)) # get random port
client_host, client_port = client.getsockname()
response = send_sip_options(args.server_host, int(args.server_port), client_host, client_port)
print('====================')
print('SIP server response:')
print('====================')
print(response.strip())
print('--------------------')
if __name__ == '__main__':
main()
| Python | 0 | |
3b9a0c0b83dda484586ea9c19091b7da1cae55d1 | prepare a test file for python | test_riak3k.py | test_riak3k.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import nose
from nose.tools import *
import riak3k
| Python | 0.000002 | |
8f41417f27e3712dc796bdcb4d0105a12cb3f62c | Add test for mark_list | test/test_mark_list.py | test/test_mark_list.py | from support import lib,ffi
from qcgc_test import QCGCTest
class MarkListTestCase(QCGCTest):
def test_create_destroy(self):
"""Lifetime management"""
for i in range(100):
l = lib.qcgc_mark_list_create(i)
self.assertNotEqual(l, ffi.NULL)
self.assertEqual(l.head, l.tail)
self.assertEqual(l.insert_index, 0)
self.assertGreater(l.length, 0)
self.assertNotEqual(l.segments, ffi.NULL)
self.assertNotEqual(l.segemnts[l.head], ffi.NULL)
lib.qcgc_mark_list_destroy(l)
def test_push_pop(self):
"""Single push and pop"""
l = lib.qcgc_mark_list_create(1000)
for i in range(1000):
lib.qcgc_mark_list_push(l, ffi.cast("object_t *", i))
i = 0
while i < 1000:
segment = lib.qcgc_mark_list_get_head_segment(l)
self.assertNotEqual(segment, ffi.NULL)
for j in range(lib.QCGC_MARK_LIST_SEGMENT_SIZE):
if i < 1000:
self.assertEqual(ffi.cast("object_t *", i), segment[j])
i += 1
lib.qcgc_mark_list_drop_head_segment(l);
def test_grow_push(self):
"""Growing on space exhaustion when using single push"""
l = lib.qcgc_mark_list_create(200)
for i in range(1000):
lib.qcgc_mark_list_push(l, ffi.cast("object_t *", i))
i = 0
while i < 1000:
segment = lib.qcgc_mark_list_get_head_segment(l)
self.assertNotEqual(segment, ffi.NULL)
for j in range(lib.QCGC_MARK_LIST_SEGMENT_SIZE):
if i < 1000:
self.assertEqual(ffi.cast("object_t *", i), segment[j])
i += 1
lib.qcgc_mark_list_drop_head_segment(l);
def test_push_all(self):
"""Push array"""
arr_size = 2 * lib.QCGC_MARK_LIST_SEGMENT_SIZE
list_size = arr_size + lib.QCGC_MARK_LIST_SEGMENT_SIZE
pre_fill = lib.QCGC_MARK_LIST_SEGMENT_SIZE // 2
arr = ffi.new('object_t *[]', arr_size)
l = lib.qcgc_mark_list_create(list_size)
for i in range(arr_size):
arr[i] = ffi.cast("object_t *", i)
for i in range(pre_fill):
lib.qcgc_mark_list_push(l,ffi.NULL)
lib.qcgc_mark_list_push_all(l, arr, arr_size)
for i in range(list_size - arr_size - pre_fill):
lib.qcgc_mark_list_push(l,ffi.NULL)
i = 0
while i < list_size:
segment = lib.qcgc_mark_list_get_head_segment(l)
self.assertNotEqual(segment, ffi.NULL)
for j in range(lib.QCGC_MARK_LIST_SEMGENT_SIZE):
if i >= pre_fill and i < pre_fill + arr_size:
self.assertEqual(segment[i], ffi.cast("object_t *", i - pre_fill))
else:
self.assertEqual(segment[i], ffi.NULL)
i += 1
segment = lib.qcgc_mark_list_get_head_segment(l)
def test_grow_push_all(self):
"""Grow on push array"""
arr_size = 4 * lib.QCGC_MARK_LIST_SEGMENT_SIZE
pre_fill = lib.QCGC_MARK_LIST_SEGMENT_SIZE // 2
list_size = pre_fill + arr_size
init_size = lib.QCGC_MARK_LIST_SEGMENT_SIZE
arr = ffi.new('object_t *[]', arr_size)
l = lib.qcgc_mark_list_create(init_size)
for i in range(arr_size):
arr[i] = ffi.cast("object_t *", i)
for i in range(pre_fill):
lib.qcgc_mark_list_push(l,ffi.NULL)
lib.qcgc_mark_list_push_all(l, arr, arr_size)
i = 0
while i < list_size:
segment = lib.qcgc_mark_list_get_head_segment(l)
self.assertNotEqual(segment, ffi.NULL)
for j in range(lib.QCGC_MARK_LIST_SEMGENT_SIZE):
if i >= pre_fill and i < pre_fill + arr_size:
self.assertEqual(segment[i], ffi.cast("object_t *", i - pre_fill))
else:
self.assertEqual(segment[i], ffi.NULL)
i += 1
segment = lib.qcgc_mark_list_get_head_segment(l)
| Python | 0.000001 | |
17c9e1a16c5c16c1b49836cc376ddd6408b73de0 | make the dispatcher a global variable and the deprecation warning more developer friendly | coherence/extern/louie.py | coherence/extern/louie.py | """
Wrapper module for the louie implementation
"""
import warnings
from coherence.dispatcher import Dispatcher
class Any(object): pass
class All(object): pass
class Anonymous(object): pass
# fake the API
class Dummy(object): pass
signal = Dummy()
sender = Dummy()
#senders
sender.Anonymous = Anonymous
sender.Any = Any
#signals
signal.All = All
# a slightly less raise-y-ish implementation as louie was not so picky, too
class GlobalDispatcher(Dispatcher):
def connect(self, signal, callback, *args, **kw):
if not signal in self.receivers:
# ugly hack
self.receivers[signal] = []
return Dispatcher.connect(self, signal, callback, *args, **kw)
def _get_receivers(self, signal):
try:
return self.receivers[signal]
except KeyError:
return []
global _global_dispatcher
_global_dispatcher = GlobalDispatcher()
_global_receivers_pool = {}
def _display_deprecation_warning():
warnings.warn("extern.louie will soon be deprecated in favor of coherence.dispatcher.")
def connect(receiver, signal=All, sender=Any, weak=True):
callback = receiver
if signal in (Any, All):
raise NotImplemented("This is not allowed. Signal HAS to be something")
if sender not in (Any, All):
_display_deprecation_warning()
receiver = _global_dispatcher.connect(signal, callback)
_global_receivers_pool[(callback, signal)] = receiver
return receiver
def disconnect(receiver, signal=All, sender=Any, weak=True):
callback = receiver
if signal in (Any, All):
raise NotImplemented("This is not allowed. Signal HAS to be something")
if sender not in (Any, All):
_display_deprecation_warning()
receiver = _global_receivers_pool.pop((callback, signal))
return _global_dispatcher.disconnect(receiver)
def send(signal=All, sender=Anonymous, *arguments, **named):
if signal in (Any, All):
raise NotImplemented("This is not allowed. Signal HAS to be something")
if sender not in (Anonymous, None):
_display_deprecation_warning()
# the first value of the callback shall always be the signal:
return _global_dispatcher.save_emit(signal, *arguments, **named)
def send_minimal(signal=All, sender=Anonymous, *arguments, **named):
return send(signal, sender, *arguments, **named)
def send_exact(signal=All, sender=Anonymous, *arguments, **named):
return send(signal, sender, *arguments, **named)
def send_robust(signal=All, sender=Anonymous, *arguments, **named):
return send(signal, sender, *arguments, **named)
| """
Wrapper module for the louie implementation
"""
import warnings
from coherence.dispatcher import Dispatcher
class Any(object): pass
class All(object): pass
class Anonymous(object): pass
# fake the API
class Dummy(object): pass
signal = Dummy()
sender = Dummy()
#senders
sender.Anonymous = Anonymous
sender.Any = Any
#signals
signal.All = All
# a slightly less raise-y-ish implementation as louie was not so picky, too
class GlobalDispatcher(Dispatcher):
def connect(self, signal, callback, *args, **kw):
if not signal in self.receivers:
# ugly hack
self.receivers[signal] = []
return Dispatcher.connect(self, signal, callback, *args, **kw)
def _get_receivers(self, signal):
try:
return self.receivers[signal]
except KeyError:
return []
_global_dispatcher = GlobalDispatcher()
_global_receivers_pool = {}
def connect(receiver, signal=All, sender=Any, weak=True):
callback = receiver
if signal in (Any, All):
raise NotImplemented("This is not allowed. Signal HAS to be something")
if sender not in (Any, All):
warnings.warn("Seriously! Use the coherence.dispatcher. It IS object based")
receiver = _global_dispatcher.connect(signal, callback)
_global_receivers_pool[(callback, signal)] = receiver
return receiver
def disconnect(receiver, signal=All, sender=Any, weak=True):
callback = receiver
if signal in (Any, All):
raise NotImplemented("This is not allowed. Signal HAS to be something")
if sender not in (Any, All):
warnings.warn("Seriously! Use the coherence.dispatcher. It IS object based")
receiver = _global_receivers_pool.pop((callback, signal))
return _global_dispatcher.disconnect(receiver)
def send(signal=All, sender=Anonymous, *arguments, **named):
if signal in (Any, All):
raise NotImplemented("This is not allowed. Signal HAS to be something")
if sender not in (Anonymous, None):
warnings.warn("Seriously! Use the coherence.dispatcher. It IS object based")
# the first value of the callback shall always be the signal:
return _global_dispatcher.save_emit(signal, *arguments, **named)
def send_minimal(signal=All, sender=Anonymous, *arguments, **named):
return send(signal, sender, *arguments, **named)
def send_exact(signal=All, sender=Anonymous, *arguments, **named):
return send(signal, sender, *arguments, **named)
def send_robust(signal=All, sender=Anonymous, *arguments, **named):
return send(signal, sender, *arguments, **named)
| Python | 0 |
0475e35bb6e0bab1d61c038ddd902e32478211d7 | Create whois.py | whois.py | whois.py | # เธญเนเธฒเธเธเธเธเธงเธฒเธกเนเธเนเธเธตเน https://python3.wannaphong.com/2016/12/เธเธถเธเธเนเธญเธกเธนเธฅ-whois-เนเธเนเธกเธเธเนเธงเธข-python.html
# เนเธเธตเธขเธเนเธเธข เธงเธฃเธฃเธเธเธเธฉเน เธ เธฑเธเธเธดเธขเนเธเธเธนเธฅเธขเน
import whois
w = whois.whois('abc.xyz') # เธเธฃเธญเธเนเธเนเธกเธเธเธตเนเธเนเธญเธเธเธฒเธฃเธเนเธญเธกเธนเธฅ Whois
print(w.expiration_date) # เธงเธฑเนเธเธซเธกเธเธญเธฒเธขเธธ
print(w.text) # เธฃเธฒเธขเธฅเธฐเนเธญเธตเธขเธเนเธเนเธกเธ
| Python | 0.000003 | |
47f0edcbe4dd4902e679d4f1e384be1795c3d465 | Add str() calls around messages in tty | lib/spack/spack/tty.py | lib/spack/spack/tty.py | ##############################################################################
# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://scalability-llnl.github.io/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License (as published by
# the Free Software Foundation) version 2.1 dated February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import sys
import spack
from spack.color import *
indent = " "
def msg(message, *args):
cprint("@*b{==>} %s" % cescape(message))
for arg in args:
print indent + str(arg)
def info(message, *args, **kwargs):
format = kwargs.get('format', '*b')
cprint("@%s{==>} %s" % (format, cescape(str(message))))
for arg in args:
print indent + str(arg)
def verbose(message, *args):
if spack.verbose:
info(str(message), *args, format='c')
def debug(*args):
if spack.debug:
info("Debug: " + str(message), *args, format='*g')
def error(message, *args):
info("Error: " + str(message), *args, format='*r')
def warn(message, *args):
info("Warning: " + str(message), *args, format='*Y')
def die(message, *args):
error(message, *args)
sys.exit(1)
def pkg(message):
"""Outputs a message with a package icon."""
import platform
from version import Version
mac_ver = platform.mac_ver()[0]
if mac_ver and Version(mac_ver) >= Version('10.7'):
print u"\U0001F4E6" + indent,
else:
cwrite('@*g{[+]} ')
print message
def get_number(prompt, **kwargs):
default = kwargs.get('default', None)
abort = kwargs.get('abort', None)
if default is not None and abort is not None:
prompt += ' (default is %s, %s to abort) ' % (default, abort)
elif default is not None:
prompt += ' (default is %s) ' % default
elif abort is not None:
prompt += ' (%s to abort) ' % abort
number = None
while number is None:
ans = raw_input(prompt)
if ans == str(abort):
return None
if ans:
try:
number = int(ans)
if number < 1:
msg("Please enter a valid number.")
number = None
except ValueError:
msg("Please enter a valid number.")
elif default is not None:
number = default
return number
| ##############################################################################
# Copyright (c) 2013, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Written by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://scalability-llnl.github.io/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License (as published by
# the Free Software Foundation) version 2.1 dated February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import sys
import spack
from spack.color import *
indent = " "
def msg(message, *args):
cprint("@*b{==>} %s" % cescape(message))
for arg in args:
print indent + str(arg)
def info(message, *args, **kwargs):
format = kwargs.get('format', '*b')
cprint("@%s{==>} %s" % (format, cescape(message)))
for arg in args:
print indent + str(arg)
def verbose(message, *args):
if spack.verbose:
info(message, *args, format='c')
def debug(*args):
if spack.debug:
info("Debug: " + message, *args, format='*g')
def error(message, *args):
info("Error: " + message, *args, format='*r')
def warn(message, *args):
info("Warning: " + message, *args, format='*Y')
def die(message, *args):
error(message, *args)
sys.exit(1)
def pkg(message):
"""Outputs a message with a package icon."""
import platform
from version import Version
mac_ver = platform.mac_ver()[0]
if mac_ver and Version(mac_ver) >= Version('10.7'):
print u"\U0001F4E6" + indent,
else:
cwrite('@*g{[+]} ')
print message
def get_number(prompt, **kwargs):
default = kwargs.get('default', None)
abort = kwargs.get('abort', None)
if default is not None and abort is not None:
prompt += ' (default is %s, %s to abort) ' % (default, abort)
elif default is not None:
prompt += ' (default is %s) ' % default
elif abort is not None:
prompt += ' (%s to abort) ' % abort
number = None
while number is None:
ans = raw_input(prompt)
if ans == str(abort):
return None
if ans:
try:
number = int(ans)
if number < 1:
msg("Please enter a valid number.")
number = None
except ValueError:
msg("Please enter a valid number.")
elif default is not None:
number = default
return number
| Python | 0.000001 |
416872a1e7191f62dd2353f3e221a9e9c93c161f | Add tests for utils. | test_utils.py | test_utils.py | """
Tests for the utils.py file
"""
import axelrod as axl
import unittest
import utils
import tempfile
import csv
class TestUtils(unittest.TestCase):
"""
Simple tests for the utils
"""
axl.seed(0)
players = [s() for s in axl.demo_strategies]
tournament = axl.Tournament(players)
results = tournament.play()
def test_label(self):
label = utils.label("Test", self.results)
expected_label = "{} - turns: {}, repetitions: {}, strategies: {}. ".format("Test",
self.tournament.turns, self.tournament.repetitions,
len(self.tournament.players))
def test_summary_data(self):
tmpfile = tempfile.NamedTemporaryFile()
sd = utils.summary_data(self.results, tmpfile.name)
self.assertEqual(len(sd), len(self.tournament.players))
self.assertEqual([player.Name for player in sd],
self.results.ranked_names)
with open(tmpfile.name, "r") as csvfile:
csvreader = csv.reader(csvfile)
ranked_names = [row[1] for row in csvreader][1:]
self.assertEqual(ranked_names, self.results.ranked_names)
| Python | 0 | |
b28ace414c7087936ec14665026b78413b1f3791 | Create __init__.py | neutron_dynamic_routing/neutron/cmd/eventlet/agents/__init__.py | neutron_dynamic_routing/neutron/cmd/eventlet/agents/__init__.py | Python | 0.000429 | ||
8c367b23d32e3cefbb6eebd0bb8b53f12d47b3ec | Add a simple cycle finder. | runtests/cycles.py | runtests/cycles.py | """
Finding reference cycles of objects.
The analysis is more or less static and oriented for
single thread applications. Use objgraph for some of
the lower level operations.
main entry point is:
.. code::
assert_no_cycles(*objs)
"""
import gc
def assert_no_cycles(*objs):
""" Assert no objects on the list induces any cycles
in the back reference list.
e.g.
.. code::
a = 3O
assert_no_cycles(a)
a = []
b = [a]
a[0] = b
assert_no_cycles(a)
"""
gc.collect()
sccs = tarjan(objs)
if len(sccs) > 0:
show_cycles(sccs)
assert len(sccs) == 0
def show_cycles(sccs, joined=False):
import objgraph
a = sccs
if joined:
a = []
for scc in sccs:
a.extend(scc)
a = [a]
for scc in a:
objs = objgraph.at_addrs(scc)
print(objgraph.typestats(objs))
objgraph.show_backrefs(objs, max_depth=len(scc) + 5,
filter=lambda x: id(x) in scc)
def isin(obj, l):
# can not use 'in' because it checks for equality not identity.
for x in l:
if x is obj: return True
return False
def ignore_frames(x):
import inspect
import types
l = []
if inspect.isclass(x):
# must be a class object
l.extend([x.__mro__, x.__dict__])
if hasattr(x, '__weakref__'):
l.extend([x.__weakref__])
for member in x.__dict__.values():
# ignore attributes.
if inspect.isgetsetdescriptor(member):
l.append(member)
# ignore the module and module dict
if inspect.ismodule(x):
l.extend([x, x.__dict__])
# ignore a frame; this will not work with multi-threaded applications
# use refcycle in that case for live applications
if inspect.isframe(x):
# this can't detect multi-threaded.
l.extend([x])
return l
def tarjan(objs, get_referrers=gc.get_referrers,
ignore=ignore_frames,
getid=id,
squeeze=True):
""" Identifying strongly connected components from a directional graph.
Algorithm is from
https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm
Parameters
----------
objs : list
a list of objects to start the algorithm. The input graph consists of
all objects connected to these objects by the get_referrers function.
get_referrers: func(*objs)
returns the neighbour of objects. This function represents the egdes and
serves as the discovery function for vertices.
squeeze : bool
True, remove single item components except self-loops.
getid : func(x)
generates a unique id for the given object. ids are used to track objects
"""
gindex = [0]
index = {}
lowlink = {}
onStack = {}
S = []
id_to_obj = {}
edges = {}
# first traverse to obtain the full object list V,
# and the id to object mapping,
def bfs_action(x):
id_to_obj[getid(x)] = x
V = _bfs(objs,
get_referrers,
ignore=lambda x: ignore(x) + [id_to_obj],
action=lambda x: id_to_obj.update({getid(x) : x}),
getid=getid)
# shrink id_to_obj to the same size as V, removing undesired objects
id_to_obj = {k: id_to_obj[k] for k in V }
#print('V', V)
# initially, nothing is on the stack
for v in V: onStack[v] = False
def strongly_connect(v):
sccs = []
index[v] = gindex[0]
lowlink[v] = gindex[0]
gindex[0] = gindex[0] + 1
S.append(v)
onStack[v] = True
isloop = False
W = []
for w in _ignore_filter(get_referrers(id_to_obj[v]),
ignore=lambda x: [x] if getid(x) not in V else [],
extraids=set()
):
W.append(getid(w))
for w in W:
if w not in index:
sccs.extend(strongly_connect(w))
lowlink[v] = min(lowlink[v], lowlink[w])
elif onStack[w]:
lowlink[v] = min(lowlink[v], index[w])
if v == w:
isloop = True
if lowlink[v] == index[v]:
# start a new strongly connected component
scc = []
while True:
w = S.pop()
onStack[w] = False
# add w to the current strongly connected component
scc.append(w)
if w == v:
break
# if the scc is singular and not
# forming a loop, skip it.
if len(scc) > 1 or isloop:
# output
sccs.append(scc)
return sccs
sccs = []
for v in V:
if v not in index:
sccs.extend(strongly_connect(v))
return sorted(sccs, key=lambda x:-len(x))
def _ignore_filter(referrers, ignore, extraids, getid=id):
""" Ignore objects on the referrers list if ignore(x) is true or if x is in extra """
r = []
for ref in referrers:
if ignore is not None:
extraids.update(set([getid(o) for o in ignore(ref)]))
if getid(ref) in extraids: continue
r.append(ref)
return r
def _bfs(objs, get_referrers, ignore=None, action=None, getid=id):
""" A breadth first search traverse of the graph.
"""
import types
visited = set()
referrers = list(objs)
extraids = set()
extraids.add(getid(objs))
while True:
front = []
for ref in referrers:
refid = getid(ref)
if refid in visited:
# already visited
pass
else:
if action: action(ref)
visited.add(refid)
front.append(ref)
if len(front) == 0:
break
extraids.add(getid(referrers))
extraids.add(getid(front))
newreferrers = get_referrers(*front)
extraids.add(getid(newreferrers))
referrers = _ignore_filter(newreferrers,
ignore=ignore,
extraids=extraids)
#print(extraids)
#print('referrers', [type(o) for o in referrers])
#pprint(referrers)
#input()
return visited - extraids
def f():
pass
class d:
def __init__(self):
pass
def method(self):
pass
m2 = f
e = d()
f.e = e
def main():
a1 = dict()
a2 = dict()
a3 = dict()
a1['a2'] = a2
a2['a3'] = a3
a3['a1'] = a1
b = dict()
b['b'] = b
c = dict()
c['c'] = 'c'
import gc
import types
print(len(
_bfs([b],
gc.get_referrers
)
))
sccs = tarjan([a1, b, c], gc.get_referrers)
show_cycles(sccs, joined=True)
print(sccs)
del sccs
gc.collect()
sccs = tarjan([d, e, f])
show_cycles(sccs, joined=True)
return
sccs = tarjan(gc.get_objects(), gc.get_referrers)
print([len(i) for i in sccs])
import objgraph
objs = objgraph.at_addrs(sccs[0])
print(objgraph.typestats(objs))
if __name__ == "__main__":
main()
| Python | 0 | |
bf7ad11cc32af83aab6496ac7d7b911bea3d7876 | Use new API. | l10n_it_pec/model/partner.py | l10n_it_pec/model/partner.py | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Associazione Odoo Italia
# (<http://www.openerp-italia.org>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import models, fields
class ResPartner(models.Model):
_inherit = "res.partner"
pec_mail = fields.Char(string='PEC Mail')
| # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2014 Associazione Odoo Italia
# (<http://www.openerp-italia.org>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, orm
class ResPartner(orm.Model):
_inherit = "res.partner"
_columns = {
'pec_mail': fields.char(
'PEC Mail'
),
}
| Python | 0 |
2c590b82b716ecfca9b683afa1181a8368b6cb41 | Add some helper methods to deal with sqlite migrations | gertty/dbsupport.py | gertty/dbsupport.py | # Copyright 2014 Mirantis Inc.
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
import uuid
from alembic import op
import sqlalchemy
def sqlite_alter_columns(table_name, column_defs):
"""Implement alter columns for SQLite.
The ALTER COLUMN command isn't supported by SQLite specification.
Instead of calling ALTER COLUMN it uses the following workaround:
* create temp table '{table_name}_{rand_uuid}', with some column
defs replaced;
* copy all data to the temp table;
* drop old table;
* rename temp table to the old table name.
"""
connection = op.get_bind()
meta = sqlalchemy.MetaData(bind=connection)
meta.reflect()
changed_columns = {}
indexes = []
for col in column_defs:
# If we are to have an index on the column, don't create it
# immediately, instead, add it to a list of indexes to create
# after the table rename.
if col.index:
indexes.append(('ix_%s_%s' % (table_name, col.name),
table_name,
[col.name],
col.unique))
col.unique = False
col.index = False
changed_columns[col.name] = col
# construct lists of all columns and their names
old_columns = []
new_columns = []
column_names = []
for column in meta.tables[table_name].columns:
column_names.append(column.name)
old_columns.append(column)
if column.name in changed_columns.keys():
new_columns.append(changed_columns[column.name])
else:
col_copy = column.copy()
new_columns.append(col_copy)
for key in meta.tables[table_name].foreign_keys:
constraint = key.constraint
con_copy = constraint.copy()
new_columns.append(con_copy)
for index in meta.tables[table_name].indexes:
# If this is a single column index for a changed column, don't
# copy it because we may already be creating a new version of
# it (or removing it).
idx_columns = [col.name for col in index.columns]
if len(idx_columns)==1 and idx_columns[0] in changed_columns.keys():
continue
# Otherwise, recreate the index.
indexes.append((index.name,
table_name,
[col.name for col in index.columns],
index.unique))
# create temp table
tmp_table_name = "%s_%s" % (table_name, six.text_type(uuid.uuid4()))
op.create_table(tmp_table_name, *new_columns)
meta.reflect()
try:
# copy data from the old table to the temp one
sql_select = sqlalchemy.sql.select(old_columns)
connection.execute(sqlalchemy.sql.insert(meta.tables[tmp_table_name])
.from_select(column_names, sql_select))
except Exception:
op.drop_table(tmp_table_name)
raise
# drop the old table and rename temp table to the old table name
op.drop_table(table_name)
op.rename_table(tmp_table_name, table_name)
# (re-)create indexes
for index in indexes:
op.create_index(op.f(index[0]), index[1], index[2], unique=index[3])
| Python | 0 | |
489d883af246e7de727ea14e01ae4a0cd17f88eb | fix emoji on python3.4 | limbo/plugins/emoji.py | limbo/plugins/emoji.py | """!emoji <n> will return n random emoji"""
import re
import random
from emojicodedict import emojiCodeDict
def randomelt(dic):
keys = list(dic.keys())
i = random.randint(0, len(keys) - 1)
return dic[keys[i]]
def emoji(n=1):
emoji = []
for i in range(n):
emoji.append(randomelt(emojiCodeDict))
return "".join(emoji)
def on_message(msg, server):
text = msg.get("text", "")
match = re.findall(r"(!emoji)\s*(\d+)*", text)
if not match:
return
n = 1 if not match[0][1] else int(match[0][1])
return emoji(n)
| """!emoji <n> will return n random emoji"""
import re
import random
from emojicodedict import emojiCodeDict
def randomelt(dic):
keys = dic.keys()
i = random.randint(0, len(keys) - 1)
return dic[keys[i]]
def emoji(n=1):
emoji = []
for i in range(n):
emoji.append(randomelt(emojiCodeDict))
return "".join(emoji)
def on_message(msg, server):
text = msg.get("text", "")
match = re.findall(r"(!emoji)\s*(\d+)*", text)
if not match:
return
n = 1 if not match[0][1] else int(match[0][1])
return emoji(n)
| Python | 0 |
b35908d8ed8257bfde75953c360112f87c0eccd3 | add api/urls.py | django_comments_xtd/api/urls.py | django_comments_xtd/api/urls.py | from django.urls import path, re_path
from .views import (
CommentCount, CommentCreate, CommentList,
CreateReportFlag, ToggleFeedbackFlag,
preview_user_avatar,
)
urlpatterns = [
path('comment/', CommentCreate.as_view(),
name='comments-xtd-api-create'),
path('preview/', preview_user_avatar,
name='comments-xtd-api-preview'),
re_path(r'^(?P<content_type>\w+[-]{1}\w+)/(?P<object_pk>[-\w]+)/$',
CommentList.as_view(), name='comments-xtd-api-list'),
re_path(
r'^(?P<content_type>\w+[-]{1}\w+)/(?P<object_pk>[-\w]+)/count/$',
CommentCount.as_view(), name='comments-xtd-api-count'),
path('feedback/', ToggleFeedbackFlag.as_view(),
name='comments-xtd-api-feedback'),
path('flag/', CreateReportFlag.as_view(),
name='comments-xtd-api-flag'),
]
| Python | 0 | |
8e422c867f25424fbc2d95e4a11cb76ea4de66ac | Create land.py | objects/land.py | objects/land.py | {"rows":[
{"C": [
{"tile_start": "6C","tile_end": "21C", "side": NULL},
{"tile_start": "26C","tile_end": "57C", "side": NULL},
]},
{"D": [
{"tile_start": "20D","tile_end": "20D", "side": "USA"},
{"tile_start": "38D","tile_end": "42D", "side": "USA"},
{"tile_start": "44D","tile_end": "56D", "side": "USA"}
]},
{"E": [
{"tile_start": "3W","tile_end": "16W", "side": "USA"}
]},
{"F": [
{"tile_start": "3W","tile_end": "16W", "side": "USA"}
]},
{"G": [
{"tile_start": "3W","tile_end": "16W", "side": "USA"}
]},
{"H": [
{"tile_start": "3W","tile_end": "16W", "side": "USA"}
]},
{"I": [
{"tile_start": "3W","tile_end": "16W", "side": "USA"}
]},
{"J": [
{"tile_start": "3W","tile_end": "16W", "side": "USA"}
]},
{"K": [
{"tile_start": "3W","tile_end": "16W", "side": "USA"}
]},
{"L": [
{"tile_start": "3W","tile_end": "16W", "side": "USA"}
]},
{"M": [
{"tile_start": "3W","tile_end": "16W", "side": "USA"}
]},
{"N": [
{"tile_start": "3W","tile_end": "16W", "side": "USA"}
]},
{"O": [
{"tile_start": "3W","tile_end": "16W", "side": "USA"}
]},
{"P": [
{"tile_start": "3W","tile_end": "16W", "side": "USA"}
]},
{"Q": [
{"tile_start": "3W","tile_end": "16W", "side": "USA"}
]},
{"R": [
{"tile_start": "3W","tile_end": "16W", "side": "USA"}
]},
{"S": [
{"tile_start": "3W","tile_end": "16W", "side": "USA"}
]},
{"T": [
{"tile_start": "3W","tile_end": "16W", "side": "USA"}
]},
{"U": [
{"tile_start": "3W","tile_end": "16W", "side": "USA"}
]},
{"V": [
{"tile_start": "3W","tile_end": "16W", "side": "USA"}
]},
{"W": [
{"tile_start": "3W","tile_end": "16W", "side": "USA"}
]},
{"X": [
{"tile_start": "3W","tile_end": "16W", "side": "USA"}
]}
]}
| Python | 0.000001 | |
4a9a844353a565a596148e31c17dad6b57cda081 | Add text encoding functionality. | txtencoding.py | txtencoding.py | #!/usr/bin/env python3
from chardet.universaldetector import UniversalDetector
class TxtEncoding:
def __init__(self):
# inspired by https://chardet.readthedocs.org/en/latest/usage.html#example-detecting-encodings-of-multiple-files
self.detector = UniversalDetector()
def detectEncoding(self, fname):
'''Detect the encoding of file fname.
Returns a dictionary with {'encoding', 'confidence'} fields.'''
self.detector.reset()
with open(fname, 'rb') as f:
for line in f:
self.detector.feed(line)
if self.detector.done: break
self.detector.close()
return self.detector.result
| Python | 0.000001 | |
ff7d96204d528e65faec8312e98fd727bd163d08 | Save and load files. | scr/model/model.py | scr/model/model.py | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
Model implementation
"""
import json
from pathlib import Path
_EXTENSION = '.json'
def save(data, file_name, folder='', home_path=Path.home()):
fp = Path(home_path, folder)
while True:
if fp.exists():
break
else:
user_input = input("This directory doesn't exist. Do you want create it? [yes]/no: ")
if _user_decision(user_input):
fp.mkdir()
break
else:
folder = input('Write new name: ')
fp = Path(home_path, folder)
fp = Path(home_path, folder, file_name + _EXTENSION)
while True:
if fp.exists():
user_input = input('This file already exists. Do you want rename it? [yes]/no: ')
if _user_decision(user_input):
name = input('Write new file name: ')
fp = Path(home_path, folder, name)
else:
break
else:
break
print('File saved in: ', fp)
# Save
fp = fp.open('w')
json.dump(data, fp, indent=4, ensure_ascii=False, sort_keys=True)
fp.close()
print('Save successfully!')
def load(file_name, folder='', home_path=Path.home()):
# Check home_path
fp = Path(home_path, folder, file_name + _EXTENSION)
if fp.exists() and fp.is_file():
# load
fp = fp.open('r')
data_loaded = json.load(fp)
fp.close()
print(file_name, 'loaded successfully')
else:
fp_dir = Path(home_path, folder, file_name)
if fp.exists():
print('Invalid path')
elif fp_dir.is_dir():
print("It's a folder, not a file")
else:
print("This file doesn't exist")
data_loaded = {}
print('Empty data is loaded')
return data_loaded
def _user_decision(answer, default_answer='yes'):
if answer is '':
answer = default_answer
if answer == 'yes':
return True
elif answer == 'no':
return False
else:
print('Invalid answer')
answer = input('Please repeat the answer:')
return _user_decision(answer, default_answer)
| Python | 0 | |
24210f31a5b54adf1b3b038fdad73b679656217c | fix mr_unit.py so that it records test failures properly | scripts/mr_unit.py | scripts/mr_unit.py | import sys, os
import csv
import MySQLdb
import traceback
def add_perf_results_to_mr_unit(args):
mr_unit = MySQLdb.connect(host='mr-0x8', user='root', passwd=args[1], db='mr_unit')
mr_unit.autocommit(False)
cursor = mr_unit.cursor()
try:
for row in csv.reader(file(os.path.join(args[2], "perf.csv"))):
row = [r.strip() for r in row]
row[3] = row[3].split("/")[-1]
cursor.execute('INSERT INTO perf(date, build_id, git_hash, git_branch, machine_ip, test_name, start_time, '
'end_time, pass, ncpu, os, job_name) VALUES("{0}", "{1}", "{2}", "{3}", "{4}", "{5}", "{6}"'
', "{7}", "{8}", "{9}", "{10}", "{11}")'.format(*row))
mr_unit.commit()
except:
traceback.print_exc()
mr_unit.rollback()
assert False, "Failed to add performance results to mr_unit!"
if __name__ == '__main__':
add_perf_results_to_mr_unit(sys.argv) | import sys, os
import csv
import MySQLdb
import traceback
def add_perf_results_to_mr_unit(args):
mr_unit = MySQLdb.connect(host='mr-0x8', user='root', passwd=args[1], db='mr_unit')
mr_unit.autocommit(False)
cursor = mr_unit.cursor()
try:
for row in csv.reader(file(os.path.join(args[2], "perf.csv"))):
row = [r.strip() for r in row]
row[3] = row[3].split("/")[-1]
row[8] = "TRUE" if row[8] == "1" else "FALSE"
cursor.execute('INSERT INTO perf(date, build_id, git_hash, git_branch, machine_ip, test_name, start_time, '
'end_time, pass, ncpu, os, job_name) VALUES("{0}", "{1}", "{2}", "{3}", "{4}", "{5}", "{6}"'
', "{7}", {8}, "{9}", "{10}", "{11}")'.format(*row))
mr_unit.commit()
except:
traceback.print_exc()
mr_unit.rollback()
assert False, "Failed to add performance results to mr_unit!"
if __name__ == '__main__':
add_perf_results_to_mr_unit(sys.argv) | Python | 0.000001 |
f424001f409fd35b0e62be9a82d62b21b438e082 | Add missing comma | onetime/urls.py | onetime/urls.py | from django.conf.urls.defaults import *
from django.views.generic.simple import redirect_to
from onetime.views import cleanup, login
urlpatterns = patterns('',
(r'^cleanup/$', cleanup),
(r'^(?P<key>[a-z0-9+])$', login),
(r'^$', redirect_to, {'url': None}),
)
| from django.conf.urls.defaults import *
from django.views.generic.simple import redirect_to
from onetime.views import cleanup, login
urlpatterns = patterns(''
(r'^cleanup/$', cleanup),
(r'^(?P<key>[a-z0-9+])$', login),
(r'^$', redirect_to, {'url': None}),
)
| Python | 0.999998 |
6d9bf98b5c077421b2cdaca7ae9adf39f4ed475c | Add a migration to rename "type" field to "status" | src/kanboard/migrations/0002_type_to_status.py | src/kanboard/migrations/0002_type_to_status.py | from south.db import db
from django.db import models
from kanboard.models import *
class Migration:
def forwards(self, orm):
db.rename_column('kanboard_phase', 'type', 'status')
def backwards(self, orm):
db.rename_column('kanboard_phase', 'status', 'type')
models = {
'kanboard.board': {
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'kanboard.card': {
'backlogged_at': ('django.db.models.fields.DateTimeField', [], {}),
'blocked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'blocked_because': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'board': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'cards'", 'to': "orm['kanboard.Board']"}),
'color': ('django.db.models.fields.CharField', [], {'max_length': '7', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'done_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.SmallIntegerField', [], {}),
'phase': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'cards'", 'to': "orm['kanboard.Phase']"}),
'ready': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'size': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'started_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'kanboard.phase': {
'board': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'phases'", 'to': "orm['kanboard.Board']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'limit': ('django.db.models.fields.SmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'order': ('django.db.models.fields.SmallIntegerField', [], {}),
'status': ('django.db.models.fields.CharField', [], {'default': "'progress'", 'max_length': '25'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '80'})
},
'kanboard.phaselog': {
'Meta': {'unique_together': "(('phase', 'date'),)"},
'count': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'date': ('django.db.models.fields.DateField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phase': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'logs'", 'to': "orm['kanboard.Phase']"})
}
}
complete_apps = ['kanboard']
| Python | 0 | |
59f49535cc5f6ad1dbcb579e723d69a9aa1b2304 | add signal_comparison.py | metaseq/integration/signal_comparison.py | metaseq/integration/signal_comparison.py | """
Module for working with comparisons between two genomic_signal objects, e.g.,
genome-wide correlation
"""
import sys
import pybedtools
import itertools
import numpy as np
def compare(signal1, signal2, features, outfn, comparefunc=np.subtract,
batchsize=5000, array_kwargs=None, verbose=False):
"""
Compares two genomic signal objects and outputs results as a bedGraph file.
Can be used for entire genome-wide comparisons due to its parallel nature.
Typical usage would be to create genome-wide windows of equal size to
provide as `features`::
windowsize = 10000
features = pybedtools.BedTool().window_maker(
genome='hg19', w=windowsize)
You will usually want to choose bins for the array based on the final
resolution you would like. Say you would like 10-bp bins in the final
bedGraph; using the example above you would use array_kwargs={'bins':
windowsize/10}. Or, for single-bp resolution (beware: file will be large),
use {'bins': windowsize}.
Here's how it works. This function:
* Takes `batchsize` features at a time from `features`
* Constructs normalized (RPMMR) arrays in parallel for each input
genomic signal object for those `batchsize` features
* Applies `comparefunc` (np.subtract by default) to the arrays to get
a "compared" (e.g., difference matrix by default) for the `batchsize`
features.
* For each row in this matrix, it outputs each nonzero column as
a bedGraph format line in `outfn`
`comparefunc` is a function with the signature::
def f(x, y):
return z
where `x` and `y` will be arrays for `signal1` and `signal2` (normalized to
RPMMR) and `z` is a new array. By default this is np.subtract, but another
common `comparefunc` might be a log2-fold-change function::
def lfc(x, y):
return np.log2(x / y)
:param signal1: A genomic_signal object
:param signal2: Another genomic_signal object
:param features: An iterable of pybedtools.Interval objects. A list will be
created for every `batchsize` features, so you need enough memory for that
:param comparefunc: Function to use to compare arrays (default is np.subtract)
:param outfn: String filename to write bedGraph file
:param batchsize: Number of features (each with length `windowsize` bp) to
process at a time
:param array_kwargs: Kwargs passed directly to genomic_signal.array. Needs
`processes` and `chunksize` if you want parallel processing
:param verbose: Be noisy
"""
fout = open(outfn, 'w')
fout.write('track type=bedGraph\n')
i = 0
this_batch = []
for feature in features:
if i <= batchsize:
this_batch.append(feature)
i += 1
continue
if verbose:
print 'working on batch of %s' % batchsize
sys.stdout.flush()
arr1 = signal1.array(this_batch, **array_kwargs).astype(float)
arr2 = signal2.array(this_batch, **array_kwargs).astype(float)
arr1 /= signal1.million_mapped_reads()
arr2 /= signal2.million_mapped_reads()
compared = comparefunc(arr1, arr2)
for feature, row in itertools.izip(this_batch, compared):
start = feature.start
bins = len(row)
binsize = len(feature) / len(row)
# Quickly move on if nothing here. speed increase prob best for
# sparse data
if sum(row) == 0:
continue
for j in range(0, len(row)):
score = row[j]
stop = start + binsize
if score != 0:
fout.write('\t'.join([
feature.chrom,
str(start),
str(stop),
str(score)]) + '\n')
start = start + binsize
this_batch = []
i = 0
fout.close()
if __name__ == "__main__":
import metaseq
ip_bam = metaseq.genomic_signal(
metaseq.example_filename(
'wgEncodeUwTfbsK562CtcfStdAlnRep1.bam'), 'bam')
control_bam = metaseq.genomic_signal(
metaseq.example_filename(
'wgEncodeUwTfbsK562InputStdAlnRep1.bam'), 'bam')
BINSIZE = 10
WINDOWSIZE=10000
BINS = WINDOWSIZE / BINSIZE
features = pybedtools.BedTool()\
.window_maker(genome='hg19', w=WINDOWSIZE)\
.filter(lambda x: x.chrom=='chr19')
result = compare(
signal1=ip_bam,
signal2=control_bam,
features=features,
outfn='diffed.bedgraph',
array_kwargs=dict(bins=BINS, processes=6, chunksize=50), verbose=True)
| Python | 0.000003 | |
8d88bf0808c5249d2c1feace5b8a1db1679e44b6 | Create tests_unit.py | tests_unit.py | tests_unit.py | """
Nmeta2 Unit Tests
Uses pytest, install with:
sudo apt-get install python-pytest
To run test, type in:
py.test tests_unit.py
"""
#*** Testing imports:
import mock
import unittest
#*** Ryu imports:
from ryu.base import app_manager # To suppress cyclic import
from ryu.controller import controller
from ryu.controller import handler
from ryu.ofproto import ofproto_v1_3_parser
from ryu.ofproto import ofproto_v1_2_parser
from ryu.ofproto import ofproto_v1_0_parser
from ryu.app.wsgi import ControllerBase
from ryu.app.wsgi import WSGIApplication
from ryu.app.wsgi import route
#*** JSON imports:
import json
from json import JSONEncoder
#*** nmeta2 imports:
import switch_abstraction
import config
import api
#*** Instantiate Config class:
_config = config.Config()
#======================== tc_policy.py Unit Tests ============================
#*** Instantiate class:
switches = switch_abstraction.Switches(_config)
sock_mock = mock.Mock()
addr_mock = mock.Mock()
#*** Test Switches and Switch classes that abstract OpenFlow switches:
def test_switches():
with mock.patch('ryu.controller.controller.Datapath.set_state'):
#*** Set up a fake switch datapath:
datapath = controller.Datapath(sock_mock, addr_mock)
#*** Add a switch
assert switches.add(datapath) == 1
#*** Look up by DPID:
assert switches.datapath(datapath.id) == datapath
#======================== api.py Unit Tests ============================
class _TestController(ControllerBase):
def __init__(self, req, link, data, **config):
super(_TestController, self).__init__(req, link, data, **config)
eq_(data['test_param'], 'foo')
class Test_wsgi(unittest.TestCase):
"""
Test case for running WSGI controller for API testing
"""
def setUp(self):
wsgi = WSGIApplication()
#*** Instantiate API class:
self.api = api.Api(self, _config, wsgi)
def test_decode_JSON():
#*** The JSON_Body class is in the api.py module. Good JSON:
good_json = '{\"foo\": \"123\"}'
good = api.JSON_Body(good_json)
assert not good.error
assert good.error == ""
assert good.json == {'foo': '123'}
assert good['foo'] == '123'
assert good['bar'] == 0
#*** Bad JSON:
bad_json = "foo, bar=99"
bad = api.JSON_Body(bad_json)
assert bad.json == {}
assert bad.error == '{\"Error\": \"Bad JSON\"}'
| Python | 0.000003 | |
cba429780061bcdafde6f2bc799e74106e2cc336 | Create textevolve.py | textevolve.py | textevolve.py | '''
Evolve a piece of text with a simple evolutionary algorithm
Author: Saquib
7/27/13
'''
import random
def fitness(source, target):
fitval = 0
for i in range(0, len(source)):
fitval += (ord(target[i]) - ord(source[i])) ** 2
return(fitval)
def mutate(source):
charpos = random.randint(0, len(source) - 1)
parts = list(source)
parts[charpos] = chr(ord(parts[charpos]) + random.randint(-1,1))
return(''.join(parts))
source = ";wql* opqlq"
target = "hello world"
fitval = fitness(source, target)
i = 0
while True:
i += 1
m = mutate(source)
fitval_m = fitness(m, target)
if fitval_m < fitval:
fitval = fitval_m
source = m
print "%5i %5i %14s" % (i, fitval_m, m)
if fitval == 0:
break
| Python | 0.000001 | |
ff98bdf9ce263648de784183ad5984864f9d387a | Add ref create api test | tests/api/test_refs.py | tests/api/test_refs.py | async def test_create(spawn_client, test_random_alphanumeric, static_time):
client = await spawn_client(authorize=True, permissions=["create_ref"])
data = {
"name": "Test Viruses",
"description": "A bunch of viruses used for testing",
"data_type": "genome",
"organism": "virus",
"public": True
}
resp = await client.post("/api/refs", data)
assert resp.status == 201
assert resp.headers["Location"] == "/api/refs/" + test_random_alphanumeric.history[0]
assert await resp.json() == dict(
data,
id=test_random_alphanumeric.history[0],
created_at=static_time.iso,
user={
"id": "test"
},
users=[{
"build": True,
"id": "test",
"modify": True,
"modify_kind": True,
"remove": True
}]
)
| Python | 0.000001 | |
7d21b55f2de7cd2c34cd3cd985824178d382398d | add 'stages' code | util/stages.py | util/stages.py | from astrometry.util.file import *
class CallGlobal(object):
def __init__(self, pattern, *args, **kwargs):
self.pat = pattern
self.args = args
self.kwargs = kwargs
def __call__(self, stage, kwargs):
func = self.pat % stage
kwa = self.kwargs.copy()
kwa.update(kwargs)
return func(*self.args, **kwa)
def runstage(stage, picklepat, stagefunc, force=[], prereqs={},
**kwargs):
print 'Runstage', stage
pfn = picklepat % stage
if os.path.exists(pfn):
if stage in force:
print 'Ignoring pickle', pfn, 'and forcing stage', stage
else:
print 'Reading pickle', pfn
R = unpickle_from_file(pfn)
return R
if stage <= 0:
P = {}
else:
prereq = prereqs.get(stage, stage-1)
P = runstage(prereq, picklepat, stagefunc,
force=force, prereqs=prereqs, **kwargs)
else:
P = {}
print 'Running stage', stage
R = stagefunc(stage, **P)
print 'Stage', stage, 'finished'
print 'Saving pickle', pfn
pickle_to_file(R, pfn)
print 'Saved', pfn
return R
| Python | 0.000123 | |
6a9447b6fb92369496178b1a379c724dfa9eb7aa | add management command to bootstrap Twilio gateway fees for incoming messages | corehq/apps/smsbillables/management/commands/bootstrap_twilio_gateway_incoming.py | corehq/apps/smsbillables/management/commands/bootstrap_twilio_gateway_incoming.py | import logging
from django.core.management.base import LabelCommand
from corehq.apps.accounting.models import Currency
from corehq.apps.twilio.models import TwilioBackend
from corehq.apps.sms.models import INCOMING
from corehq.apps.smsbillables.models import SmsGatewayFee, SmsGatewayFeeCriteria
logger = logging.getLogger('accounting')
def bootstrap_twilio_gateway_incoming(orm):
currency_class = orm['accounting.Currency'] if orm else Currency
sms_gateway_fee_class = orm['smsbillables.SmsGatewayFee'] if orm else SmsGatewayFee
sms_gateway_fee_criteria_class = orm['smsbillables.SmsGatewayFeeCriteria'] if orm else SmsGatewayFeeCriteria
# https://www.twilio.com/sms/pricing/us
SmsGatewayFee.create_new(
TwilioBackend.get_api_id(),
INCOMING,
0.0075,
country_code=None,
currency=currency_class.objects.get(code="USD"),
fee_class=sms_gateway_fee_class,
criteria_class=sms_gateway_fee_criteria_class,
)
logger.info("Updated INCOMING Twilio gateway fees.")
class Command(LabelCommand):
help = "bootstrap incoming Twilio gateway fees"
args = ""
label = ""
def handle(self, *args, **options):
bootstrap_twilio_gateway_incoming(None)
| Python | 0 | |
ce2eb871b852fafa58c0d92761b38d158b8bbf1e | Add some tests for MultiQuerySet. | utils/tests.py | utils/tests.py | # -*- coding: utf-8 -*-
# Universal Subtitles, universalsubtitles.org
#
# Copyright (C) 2011 Participatory Culture Foundation
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see
# http://www.gnu.org/licenses/agpl-3.0.html.
from django.test import TestCase
from videos.models import Video
from utils.multi_query_set import MultiQuerySet
class MultiQuerySetTest(TestCase):
fixtures = ['test.json']
def test_full(self):
self.assertEqual(list(Video.objects.all()),
list(MultiQuerySet(Video.objects.all())),
"Full, single MQS didn't match full QS.")
self.assertEqual(list(Video.objects.all()),
list(MultiQuerySet(Video.objects.none(),
Video.objects.all(),
Video.objects.none())),
"Full MQS with blanks didn't match full QS.")
self.assertEqual(list(Video.objects.all()) + list(Video.objects.all()),
list(MultiQuerySet(Video.objects.none(),
Video.objects.all(),
Video.objects.none(),
Video.objects.all())),
"Double MQS with blanks didn't match double full QS.")
def test_slice(self):
qs = Video.objects.all()
mqs = MultiQuerySet(Video.objects.all())
self.assertEqual(list(qs[0:1]),
list(mqs[0:1]),
"MQS[:1] failed.")
self.assertEqual(list(qs[0:2]),
list(mqs[0:2]),
"MQS[:2] failed.")
self.assertEqual(list(qs[0:3]),
list(mqs[0:3]),
"MQS[:3] (out-of-bounds endpoint) failed.")
self.assertEqual(list(qs[1:3]),
list(mqs[1:3]),
"MQS[1:3] failed.")
self.assertEqual(list(qs[2:3]),
list(mqs[2:3]),
"MQS[2:3] failed.")
self.assertEqual(list(qs[1:1]),
list(mqs[1:1]),
"MQS[1:1] (empty slice) failed.")
def test_slice_multiple(self):
qs = list(Video.objects.all())
qs = qs + qs + qs
mqs = MultiQuerySet(Video.objects.all(),
Video.objects.all(),
Video.objects.all())
self.assertEqual(qs[0:3],
list(mqs[0:3]),
"MQS[:3] failed.")
self.assertEqual(qs[0:6],
list(mqs[0:6]),
"MQS[:6] (entire range) failed.")
self.assertEqual(qs[0:7],
list(mqs[0:7]),
"MQS[:7] (out-of-bounds endpoint) failed.")
self.assertEqual(qs[1:3],
list(mqs[1:3]),
"MQS[1:3] failed.")
self.assertEqual(qs[1:6],
list(mqs[1:6]),
"MQS[1:6] (entire range) failed.")
self.assertEqual(qs[1:7],
list(mqs[1:7]),
"MQS[1:7] (out-of-bounds endpoint) failed.")
self.assertEqual(qs[3:3],
list(mqs[3:3]),
"MQS[3:3] failed.")
self.assertEqual(qs[3:6],
list(mqs[3:6]),
"MQS[3:6] (entire range) failed.")
self.assertEqual(qs[3:7],
list(mqs[3:7]),
"MQS[3:7] (out-of-bounds endpoint) failed.")
| Python | 0 | |
adee3f0763a1119cfac212ce0eca88a08f7c65fa | Create masterStock.py | masterStock.py | masterStock.py | import requests
from bs4 import BeautifulSoup
import json
def loadMasterStock():
url = "http://www.supremenewyork.com/mobile_stock.json"
user = {"User-Agent": "Mozilla/5.0 (iPhone; CPU iPhone OS 10_2_1 like Mac OS X) AppleWebKit/602.4.6 (KHTML, like Gecko) Version/10.0 Mobile/14D27 Safari/602.1"}
# user = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36"}
r = requests.get(url, headers=user)
masterStock = json.loads(r.text)
with open("masterstock.txt", 'w') as outfile:
json.dump(masterStock, outfile, indent=4, sort_keys=True)
print("Saved to masterstock.txt")
if __name__ == '__main__':
loadMasterStock()
| Python | 0 | |
8397cd87fc05949f2f1b8d24505ae2b817f5dda1 | Add unittest for classification_summary | tests/chainer_tests/functions_tests/evaluation_tests/test_classification_summary.py | tests/chainer_tests/functions_tests/evaluation_tests/test_classification_summary.py | import unittest
import numpy
import six
import chainer
from chainer import cuda
from chainer import functions as F
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
from chainer.utils import type_check
def recall(preds, ts, dtype, label_num):
tp = numpy.zeros((label_num,), dtype=numpy.int32)
support = numpy.zeros((label_num,), dtype=numpy.int32)
for p, t in zip(preds.ravel(), ts.ravel()):
support[t] += 1
if p == t:
tp[t] += 1
return dtype(tp) / support
def precision(preds, ts, dtype, label_num):
tp = numpy.zeros((label_num,), dtype=numpy.int32)
relevant = numpy.zeros((label_num,), dtype=numpy.int32)
for p, t in zip(preds.ravel(), ts.ravel()):
relevant[p] += 1
if p == t:
tp[p] += 1
return dtype(tp) / relevant
def f1_score(precision, recall, beta=1.0):
beta_square = beta * beta
return (1 + beta_square) * precision * recall / (beta_square * precision + recall)
def support(ts, dtype, label_num):
ret = numpy.zeros((label_num,), dtype=numpy.int32)
for t in ts.ravel():
ret[t] += 1
return ret
@testing.parameterize(
*testing.product_dict(
[{'y_shape': (30, 3), 't_shape': (30,)},
{'y_shape': (30, 3, 5), 't_shape': (30, 5)}],
[{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64}],
[{'beta': 1.0},
{'beta': 2.0}]
)
)
class TestClassificationSummary(unittest.TestCase):
def setUp(self):
self.label_num = 3
self.y = numpy.random.uniform(-1, 1, self.y_shape).astype(self.dtype)
self.t = numpy.random.randint(0, self.label_num, self.t_shape).astype(numpy.int32)
self.check_forward_options = {}
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-4, 'rtol': 1e-3}
def check_forward(self, xp):
y = chainer.Variable(xp.asarray(self.y))
t = chainer.Variable(xp.asarray(self.t))
p_acutual, r_acutual, f1_actual, s_actual = F.classification_summary(
y, t, self.label_num, self.beta)
pred = self.y.argmax(axis=1).reshape(self.t.shape)
p_expect = precision(pred, self.t, self.dtype, self.label_num)
r_expect = recall(pred, self.t, self.dtype, self.label_num)
f1_expect = f1_score(p_expect, r_expect, self.beta)
s_expect = support(self.t, self.dtype, self.label_num)
chainer.testing.assert_allclose(f1_actual.data, f1_expect,
**self.check_forward_options)
def test_forward_cpu(self):
self.check_forward(numpy)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.cupy)
| Python | 0.000034 | |
570fdc71697fba6180787b9309d3a2d49f512ed2 | Add queueing python script | scripts/queue_sync_to_solr.py | scripts/queue_sync_to_solr.py | #! /bin/env python
# -*- coding: utf-8 -*-
import sys
import os
import logbook
from harvester.config import config as config_harvest
from redis import Redis
from rq import Queue
EMAIL_RETURN_ADDRESS = os.environ.get('EMAIL_RETURN_ADDRESS',
'example@example.com')
# csv delim email addresses
EMAIL_SYS_ADMIN = os.environ.get('EMAIL_SYS_ADMINS', None)
IMAGE_HARVEST_TIMEOUT = 144000
def def_args():
import argparse
parser = argparse.ArgumentParser(description='Harvest a collection')
parser.add_argument('rq_queue', type=str, help='RQ Queue to put job in')
parser.add_argument(
'collection_key',
type=int,
help='URL for the collection Django tastypie api resource')
return parser
def queue_image_harvest(redis_host,
redis_port,
redis_password,
redis_timeout,
rq_queue,
collection_key,
url_couchdb=None,
object_auth=None,
get_if_object=False,
harvest_timeout=IMAGE_HARVEST_TIMEOUT):
rQ = Queue(
rq_queue,
connection=Redis(
host=redis_host,
port=redis_port,
password=redis_password,
socket_connect_timeout=redis_timeout))
job = rQ.enqueue_call(
func=harvester.scripts.sync_couch_collection_to_solr.main
kwargs=dict(
collection_key=collection_key,
)
return job
def main(user_email,
collection_key,
log_handler=None,
mail_handler=None,
config_file='akara.ini',
rq_queue=None,
**kwargs):
'''Runs a UCLDC sync to solr for collection key'''
emails = [user_email]
if EMAIL_SYS_ADMIN:
emails.extend([u for u in EMAIL_SYS_ADMIN.split(',')])
if not mail_handler:
mail_handler = logbook.MailHandler(
EMAIL_RETURN_ADDRESS, emails, level='ERROR', bubble=True)
mail_handler.push_application()
config = config_harvest(config_file=config_file)
if not log_handler:
log_handler = logbook.StderrHandler(level='DEBUG')
log_handler.push_application()
print config
# the image_harvest should be a separate job, with a long timeout
job = queue_image_harvest(
config['redis_host'],
config['redis_port'],
config['redis_password'],
config['redis_connect_timeout'],
rq_queue=rq_queue,
collection_key=collection_key,
**kwargs)
log_handler.pop_application()
mail_handler.pop_application()
if __name__ == '__main__':
parser = def_args()
args = parser.parse_args(sys.argv[1:])
if not args.user_email or not args.url_api_collection:
parser.print_help()
sys.exit(27)
kwargs = {}
if args.timeout:
kwargs['harvest_timeout'] = int(args.timeout)
if args.get_if_object:
kwargs['get_if_object'] = args.get_if_object
main(
args.collection_key,
rq_queue=args.rq_queue,
**kwargs)
# Copyright ยฉ 2016, Regents of the University of California
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the University of California nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
| Python | 0.000001 | |
d06b3b41f786fc7cd2c05a6215fed026eef1cb8f | Add misc plugin. | curiosity/plugins/misc.py | curiosity/plugins/misc.py | import curio
import sys
import curious
from curious.commands import command
from curious.commands.context import Context
from curious.commands.plugin import Plugin
from curious.dataclasses.embed import Embed
class Misc(Plugin):
"""
Miscellaneous commands.
"""
@command()
async def info(self, ctx: Context):
"""
Shows info about the bot.
"""
em = Embed(title=ctx.guild.me.user.name, description="The official bot for the curious library")
em.add_field(name="Curious version", value=curious.__version__)
em.add_field(name="Curio version", value=curio.__version__)
em.add_field(name="CPython version", value="{}.{}.{}".format(*sys.version_info[0:3]))
# bot stats
em.add_field(name="Shard ID", value=ctx.event_context.shard_id)
em.add_field(name="Shard count", value=ctx.event_context.shard_count)
em.add_field(name="Heartbeats", value=ctx.bot._gateways[ctx.event_context.shard_id].heartbeats)
await ctx.channel.send(embed=em)
| Python | 0 | |
3d7a1ad963a11c8fc425c7d82f5e0f8f877dc861 | Add Python benchmark | lib/node_modules/@stdlib/math/base/special/atan2/benchmark/python/benchmark.py | lib/node_modules/@stdlib/math/base/special/atan2/benchmark/python/benchmark.py | #!/usr/bin/env python
"""Benchmark atan2."""
import timeit
name = "atan2"
repeats = 3
iterations = 1000000
def print_version():
"""Print the TAP version."""
print("TAP version 13")
def print_summary(total, passing):
"""Print the benchmark summary.
# Arguments
* `total`: total number of tests
* `passing`: number of passing tests
"""
print("#")
print("1.." + str(total)) # TAP plan
print("# total " + str(total))
print("# pass " + str(passing))
print("#")
print("# ok")
def print_results(elapsed):
"""Print benchmark results.
# Arguments
* `elapsed`: elapsed time (in seconds)
# Examples
``` python
python> print_results(0.131009101868)
```
"""
rate = iterations / elapsed
print(" ---")
print(" iterations: " + str(iterations))
print(" elapsed: " + str(elapsed))
print(" rate: " + str(rate))
print(" ...")
def benchmark():
"""Run the benchmark and print benchmark results."""
setup = "from math import atan2; from random import random;"
stmt = "y = atan2(100.0*random()-0.0, 100.0*random()-0.0)"
t = timeit.Timer(stmt, setup=setup)
print_version()
for i in xrange(3):
print("# python::" + name)
elapsed = t.timeit(number=iterations)
print_results(elapsed)
print("ok " + str(i+1) + " benchmark finished")
print_summary(repeats, repeats)
def main():
"""Run the benchmark."""
benchmark()
if __name__ == "__main__":
main()
| Python | 0.000138 | |
14160c8ee729a094b6a980ed7c94b37d11f6dfba | Create xor_recursive.py | tests/xor_recursive.py | tests/xor_recursive.py | import sys
def xor(*store):
print("---------------recursive call----------------")
print(len(store))
if(len(store) == 2):
print("lowest level")
b = store[0]
a = store[1]
print(b)
print(a)
return bool((a or b) and not(a and b))
else:
print("middle level")
b = store[0]
remaining = store[1:]
print(b)
print(remaining)
return bool((xor(*remaining) or b) and not(xor(*remaining) and b))
if __name__ == '__main__':
print("This is a testfile only, not to be used in production.")
sys.exit()
print("Expecting False: %s"%xor(0, 0, 0, 0)) # False
print("Expecting True : %s"%xor(0, 0, 0, 1)) # True
print("Expecting True : %s"%xor(0, 0, 1, 0)) # True
print("Expecting False: %s"%xor(0, 0, 1, 1)) # False
print("Expecting True : %s"%xor(0, 1, 0, 0)) # True
print("Expecting False: %s"%xor(0, 1, 0, 1)) # False
print("Expecting False: %s"%xor(0, 1, 1, 0)) # False
print("Expecting True : %s"%xor(0, 1, 1, 1)) # True
print("Expecting True : %s"%xor(1, 0, 0, 0)) # True
print("Expecting False: %s"%xor(1, 0, 0, 1)) # False
print("Expecting False: %s"%xor(1, 0, 1, 0)) # False
print("Expecting True : %s"%xor(1, 0, 1, 1)) # True
print("Expecting False: %s"%xor(1, 1, 0, 0)) # False
print("Expecting True : %s"%xor(1, 1, 0, 1)) # True
print("Expecting True : %s"%xor(1, 1, 1, 0)) # True
print("Expecting False: %s"%xor(1, 1, 1, 1)) # False
| Python | 0 | |
d003babe55d8b7a202a50bc6eeb2e1113ef8247f | Add oeis plugin | plugins/oeis.py | plugins/oeis.py | import requests
import re
class Plugin:
limit = 5
def on_command(self, bot, msg, stdin, stdout, reply):
session = smartbot.utils.web.requests_session()
url = "http://oeis.org/search"
payload = {
"fmt": "text",
"q": " ".join(msg["args"][1:]),
}
response = session.get(url, params=payload)
if response.status_code == 200:
self.i = -1
# only process lines starting with a percent symbol
for line in filter(lambda l: l.startswith("%"), response.text.split("\n")):
# content default is set to None
flag, identifier, content, *_ = line.split(" ", 2) + [None]
# process the line
self.process(flag, identifier, content, stdout)
# stop when limit is reached
if self.i >= self.limit:
print("...", file=stdout)
break
def process(self, flag, identifier, content, stdout):
# increase the sequence number
if flag[1] == "I":
self.i += 1
# print formatted sequence
elif flag[1] == "S":
sequence = re.sub(",", ", ", content)
print("[{}] {}: {}...".format(self.i, identifier, sequence), file=stdout)
# print sequence name
elif flag[1] == "N":
print(content, file=stdout)
def on_help(self):
return "Usage: oeis <query> (see https://oeis.org/hints.html)"
| Python | 0 | |
c122db5ceda59d786bd550f586ea87d808595ab6 | Add a script to reimport the LGA boundaries from the GADM.org data | pombola/nigeria/management/commands/nigeria_update_lga_boundaries_from_gadm.py | pombola/nigeria/management/commands/nigeria_update_lga_boundaries_from_gadm.py | from django.contrib.gis.gdal import DataSource
from django.core.management import BaseCommand
from django.db import transaction
from mapit.management.command_utils import save_polygons, fix_invalid_geos_geometry
from mapit.models import Area, Type
class Command(BaseCommand):
help = "Update the Nigeria boundaries from GADM"
args = '<SHP FILENAME>'
def get_lga_area(self, lga_name, state_name):
lga_name_in_db = {
'Eastern Obolo': 'Eastern O bolo',
}.get(lga_name, lga_name)
# print "state:", state_name
kwargs = {
'type': self.lga_type,
'name__iexact': lga_name_in_db,
'parent_area__name': state_name,
}
try:
area = Area.objects.get(**kwargs)
except Area.DoesNotExist:
del kwargs['parent_area__name']
area = Area.objects.get(**kwargs)
return area
def fix_geometry(self, g):
# Make a GEOS geometry only to check for validity:
geos_g = g.geos
if not geos_g.valid:
geos_g = fix_invalid_geos_geometry(geos_g)
if geos_g is None:
print "The geometry was invalid and couldn't be fixed"
g = None
else:
g = geos_g.ogr
return g
def handle(self, filename, **options):
with transaction.atomic():
self.lga_type = Type.objects.get(code='LGA')
ds = DataSource(filename)
layer = ds[0]
for feature in layer:
lga_name = unicode(feature['NAME_2'])
state_name = unicode(feature['NAME_1'])
print "Updating LGA {0} in state {1}".format(
lga_name, state_name
)
area = self.get_lga_area(lga_name, state_name)
g = feature.geom.transform('4326', clone=True)
g = self.fix_geometry(g)
if g is None:
continue
poly = [g]
save_polygons({area.id: (area, poly)})
| Python | 0.000022 | |
8c176349d064db3dbc4db505cdc8a2d6a162dd56 | Create a consolidated broker initialization script | dataactcore/scripts/initialize.py | dataactcore/scripts/initialize.py | import argparse
import logging
import os
from flask_bcrypt import Bcrypt
from dataactvalidator.app import createApp
from dataactbroker.scripts.setupEmails import setupEmails
from dataactcore.models.userModel import User
from dataactcore.interfaces.function_bag import createUserWithPassword
from dataactcore.scripts.setupAllDB import setupAllDB
from dataactbroker.handlers.aws.session import SessionTable
from dataactcore.interfaces.db import GlobalDB
from dataactcore.config import CONFIG_BROKER, CONFIG_DB
from dataactvalidator.scripts.loadTas import loadTas
from dataactvalidator.filestreaming.sqlLoader import SQLLoader
from dataactvalidator.filestreaming.schemaLoader import SchemaLoader
from dataactvalidator.scripts.loadFile import loadDomainValues
from dataactvalidator.scripts.loadSf133 import loadAllSf133
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
basePath = CONFIG_BROKER["path"]
validator_config_path = os.path.join(basePath, "dataactvalidator", "config")
def setupDB():
"""Set up broker database and initialize data."""
logger.info('Setting up databases')
print('setting up db')
setupAllDB()
setupEmails()
def createAdmin():
"""Create initial admin user."""
logger.info('Creating admin user')
adminEmail = CONFIG_BROKER['admin_email']
adminPass = CONFIG_BROKER['admin_password']
with createApp().app_context():
sess = GlobalDB.db().session
user = sess.query(User).filter(User.email == adminEmail).one_or_none()
if not user:
# once the rest of the setup scripts are updated to use
# GlobalDB instead of databaseSession, move the app_context
# creation up to initialize()
user = createUserWithPassword(
adminEmail, adminPass, Bcrypt(), permission=2)
return user
def setupSessionTable():
"""Create Dynamo session table."""
logger.info('Setting up DynamoDB session table')
SessionTable.createTable(CONFIG_BROKER['local'], CONFIG_DB['dynamo_port'])
def loadTasLookup():
"""Load/update the TAS table to reflect the latest list."""
logger.info('Loading TAS')
loadTas()
def loadSqlRules():
"""Load the SQL-based validation rules."""
logger.info('Loading SQL-based validation rules')
SQLLoader.loadSql("sqlRules.csv")
def loadDomainValueFiles(basePath):
"""Load domain values (e.g., CGAC codes, object class, SF-133)."""
logger.info('Loading domain values')
loadDomainValues(basePath)
def loadSf133():
logger.info('Loading SF-133')
# Unlike other domain value files, SF 133 data is stored
# on S3. If the application's 'use_aws' option is turned
# off, tell the SF 133 load to look for files in the
# validator's local config file instead
if CONFIG_BROKER['use_aws']:
loadAllSf133()
else:
loadAllSf133(validator_config_path)
def loadValidatorSchema():
"""Load file-level .csv schemas into the broker database."""
logger.info('Loading validator schemas')
SchemaLoader.loadAllFromPath(validator_config_path)
parser = argparse.ArgumentParser(description='Initialize the DATA Act Broker.')
parser.add_argument('-db', '--setup_db', help='Create broker database and helper tables', action='store_true')
parser.add_argument('-a', '--create_admin', help='Create an admin user', action='store_true')
parser.add_argument('-r', '--load_rules', help='Load SQL-based validation rules', action='store_true')
parser.add_argument('-t', '--update_tas', help='Update broker TAS list', action='store_true')
parser.add_argument('-s', '--update_sf133', help='Update broker SF-133 reports', action='store_true')
parser.add_argument('-v', '--update_validator', help='Update validator schema', action='store_true')
args = parser.parse_args()
print(args)
if args.setup_db:
logger.info('Setting up databases')
setupAllDB()
setupEmails()
setupSessionTable()
if args.create_admin:
createAdmin()
if args.load_rules:
loadSqlRules()
if args.update_tas:
loadTas()
if args.update_sf133:
loadSf133()
if args.update_validator:
loadValidatorSchema()
| Python | 0 | |
11df88564a4c3aed330c84031cceaecace867d41 | Add cython version of temporal_daily | pds/temporal_daily_cython.py | pds/temporal_daily_cython.py | import os
import cPickle
import glob
import datetime as dt
import math
import time
import zlib
import bitarray
import numpy as np
from bloomfilter_cython import BloomFilter
from pycassa import NotFoundException
from pycassa.pool import ConnectionPool
from pycassa.system_manager import SystemManager, SIMPLE_STRATEGY
from pycassa.columnfamily import ColumnFamily
class DailyTemporalBloomFilter(BloomFilter):
"""Long Range Temporal BloomFilter using a daily resolution.
For really high value of expiration (like 60 days) with low requirement on precision.
The actual error of this BF will the be native error of the BF + the error related
to the coarse aspect of the expiration, since we no longer expires information precisely.
Also, as opposed to a classic Bloom Filter, this one will aslo have false positive (reporting membership for a non-member)
AND false negative (reporting non-membership for a member).
The upper bound of the temporal_error can be theoricaly quite high. However, if the
items of the set are uniformly distributed over time, the avg error will be something like 1.0 / expiration
"""
def __new__(cls, capacity, error_rate, expiration, name, cassandra_session):
return super(DailyTemporalBloomFilter, cls).__new__(cls, capacity=capacity, error_rate=error_rate)
def __init__(self, capacity, error_rate, expiration, name, cassandra_session):
filename = ""
super(DailyTemporalBloomFilter, self).__init__(capacity=capacity, error_rate=error_rate)
self.bf_name = name
self.expiration = expiration
self.initialize_period()
self.cassandra_session = cassandra_session
self.cassandra_columns_family = "temporal_bf"
self.keyspace = 'parsely'
self.uncommited_keys = []
self.commit_batch = 1000
self.columnfamily = None
self.ensure_cassandra_cf()
def ensure_cassandra_cf(self):
s = SystemManager()
if self.keyspace not in s.list_keyspaces():
s.create_keyspace(self.keyspace, SIMPLE_STRATEGY, {'replication_factor': '1'})
if self.cassandra_columns_family not in s.get_keyspace_column_families(self.keyspace):
s.create_column_family(self.keyspace, self.cassandra_columns_family)
self.columnfamily = ColumnFamily(self.cassandra_session, self.cassandra_columns_family)
def archive_bf_key(self, bf_key):
self.uncommited_keys.append(bf_key)
if len(self.uncommited_keys) >= self.commit_batch:
current_period_hour = dt.datetime.now().strftime('%Y-%m-%d:%H')
self.columnfamily.insert('%s_%s' % (self.bf_name, current_period_hour), {k:'' for k in self.uncommited_keys})
self.uncommited_keys = []
def _hour_range(self, start, end, reverse=False, inclusive=True):
"""Generator that gives us all the hours between a start and end datetime
(inclusive)."""
def total_seconds(td):
return (td.microseconds + (td.seconds + td.days * 24.0 * 3600.0) * 10.0**6) / 10.0**6
hours = int(math.ceil(total_seconds(end - start) / (60.0 * 60.0)))
if inclusive:
hours += 1
for i in xrange(hours):
if reverse:
yield end - dt.timedelta(hours=i)
else:
yield start + dt.timedelta(hours=i)
def _drop_archive(self):
last_period = self.current_period - dt.timedelta(days=self.expiration-1)
hours = self._hour_range(last_period, dt.datetime.now())
for hour in hours:
try:
row = "%s_%s" % (self.bf_name, hour.strftime('%Y-%m-%d:%H'))
nbr_keys = self.columnfamily.get_count(row)
keys = self.columnfamily.remove(row)
except:
pass
def rebuild_from_archive(self):
"""Rebuild the BF using the archived items"""
#self.initialize_bitarray()
last_period = self.current_period - dt.timedelta(days=self.expiration-1)
hours = self._hour_range(last_period, dt.datetime.now())
rows = []
for i,hour in enumerate(hours):
row = "%s_%s" % (self.bf_name, hour.strftime('%Y-%m-%d:%H'))
rows.append(row)
rows_content = self.columnfamily.multiget(rows, column_count=1E6)
for row_content in rows_content.values():
for k in row_content.keys():
self.add(k, rebuild_mode=True)
def add(self, key, rebuild_mode=False):
if not rebuild_mode:
self.archive_bf_key(key)
result = super(DailyTemporalBloomFilter, self).add(key)
return result
def initialize_period(self, period=None):
"""Initialize the period of BF.
:period: datetime.datetime for setting the period explicity.
"""
if not period:
self.current_period = dt.datetime.now()
else:
self.current_period = period
self.current_period = dt.datetime(self.current_period.year, self.current_period.month, self.current_period.day)
self.date = self.current_period.strftime("%Y-%m-%d")
| Python | 0.000001 | |
e60f13ab304c04e17af91bc87edc1891948a6f7a | Add validation function for python schema events | parsely_raw_data/validate.py | parsely_raw_data/validate.py | from __future__ import print_function
import logging
import pprint
from collections import defaultdict
from six import string_types
from .schema import SCHEMA
"""
Data Pipeline validation functions
"""
SCHEMA_DICT = None
REQ_FIELDS = None
CHECKS = {'req': 'Fields "{}" are required. ({} are present)',
'size': 'Field "{}" is too large (size limit {})',
'type': 'Field "{}" should be {}',
'not_in_schema': 'Field "{}" not in schema. {}'}
log = logging.getLogger(__name__)
def _create_schema_dict():
global SCHEMA_DICT, REQ_FIELDS
SCHEMA_DICT = defaultdict(dict)
for field in SCHEMA:
conditions = {k: field.get(k) for k, _ in CHECKS.items()}
if conditions['type'] == object:
conditions['type'] = dict
if conditions['type'] == str:
conditions['type'] = string_types
SCHEMA_DICT[field['key']] = conditions
REQ_FIELDS = set([k for k, v in SCHEMA_DICT.items() if v['req']])
_create_schema_dict()
def _handle_warning(check_type, field, value, cond, raise_error=True):
"""If raise, raise an error. Otherwise just log."""
msg = CHECKS[check_type].format(field, cond)
if raise_error:
raise ValueError(msg, value, type(value))
else:
log.warn(msg, value, type(value))
return False
def validate(event, raise_error=True):
"""Checks whether an event matches the given schema.
:param raise_error: let errors/exceptions bubble up.
"""
present = REQ_FIELDS.intersection(set(event.keys()))
if len(present) != len(REQ_FIELDS):
return _handle_warning('req', list(REQ_FIELDS), '', list(present), raise_error=raise_error)
for field, value in event.items():
try:
field_reqs = SCHEMA_DICT[field]
check_type = field_reqs['type']
check_size = field_reqs['size']
# verify type based on schema
if value is not None and not isinstance(value, check_type):
return _handle_warning('type',
field,
value,
check_type,
raise_error=raise_error)
# verify size of string values
if isinstance(value, string_types) and check_size is not None and len(value) > check_size:
return _handle_warning('size',
field,
value,
check_size,
raise_error=raise_error)
except KeyError as exc:
return _handle_warning('not_in_schema', field, value, '', raise_error=raise_error)
return True # event passes tests
if __name__ == "__main__":
log.warn = print
# non schema fields
d = {k: "test" for k in REQ_FIELDS}
d['test'] = "test"
assert validate(d, raise_error=False) != True
# fields too long
d = {k: "test" for k in REQ_FIELDS}
d['utm_term'] = 'd' * 90
assert validate(d, raise_error=False) != True
# fields wrong type
d = {k: "test" for k in REQ_FIELDS}
d['timestamp_info_nginx_ms'] = 123456
d['extra_data'] = "not a dict"
assert validate(d, raise_error=False) != True
d['visitor'] = "true"
assert validate(d, raise_error=False) != True
d['ip_lat'] = 4
assert validate(d, raise_error=False) != True
# not all required fields
d = {}
assert validate(d, raise_error=False) != True
# error catching
d = {}
err = False
try:
validate(d)
except Exception as e:
err = True
assert err == True
| Python | 0.000001 | |
271be0bf16692aae2736d40e96447262e75c4a0f | add missing web.py | zmq/web.py | zmq/web.py | #-----------------------------------------------------------------------------
# Copyright (C) 2013 Brian Granger, Min Ragan-Kelley
#
# This file is part of pyzmq
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
raise ImportError('\n'.join([
"zmq.web is now maintained separately as zmqweb,",
"which can be found at https://github.com/ellisonbg/zmqweb"
]))
| Python | 0 | |
0a7f1695f9155bbe10b933e47637e4df0e2e31d4 | Create HttpAndWeb.py | day3/HttpAndWeb.py | day3/HttpAndWeb.py | import requests
def Get(url, PostId):
try:
isinstance(int(PostId), int)
if int(PostId) <= 100 and int(PostId) > 0:
r = requests.get(url + PostId)
return r
else:
print("Number must be between 1 and 100")
except ValueError as err:
raise(err)
return "No Results"
def Post(PostUrl,title, body, userId=11):
Post= {
'title': title,
'body': body,
'userId': userId
}
request = requests.post(PostUrl, data=Postdata)
return request
def main():
print("Python HTTP API command line app %s\n" %("-"*31))
print("Simple Python HTTP API command line app")
url = "https://jsonplaceholder.typicode.com/posts/"
PostId = input("Enter a number between 1 and 100: ")
get = Get(url,PostId)
print("GET Response data\n\t%s\n%s\n\tStatus code\n\t%s\n%s\n\tHeaders\n\t%s\n%s" %
("-"*17,get.text, "-"*11, get.status_code,"-"*7, get.headers))
title = input("Enter a title for your post: ")
body = input("Enter a body for your post: ")
post = Post(url,title,body)
print("\tPOST Response data\n\t%s\n%s\n\tStatus code\n\t%s\n%s\n\tHeaders\n\t%s\n%s" %
("-"*17,post.text, "-"*11, post.status_code,"-"*7, post.headers))
if __name__ == '__main__':
main()
| Python | 0.000002 | |
9fa6ec498d70afdb4f28410d4ac7c29780c60861 | Add first stab at origen submodule | transmutagen/origen.py | transmutagen/origen.py | from subprocess import run
from pyne.origen22 import (nlibs, write_tape5_irradiation, write_tape4,
parse_tape9, merge_tape9, write_tape9)
from pyne.material import from_atom_frac
if __name__ == '__main__':
ORIGEN = '/home/origen22/code/o2_therm_linux.exe'
xs_TAPE9 = "/Users/aaronmeurer/Documents/origen/C371ALLCP.03/CCC371.03/origen22/libs/pwru50.lib"
decay_TAPE9 = "/Users/aaronmeurer/Documents/origen/C371ALLCP.03/CCC371.03/origen22/libs/decay.lib"
parsed_xs_tape9 = parse_tape9(xs_TAPE9)
parsed_decay_tape9 = parse_tape9(decay_TAPE9)
merged_tape9 = merge_tape9([parsed_decay_tape9, parsed_xs_tape9])
# Can set outfile to change directory, but the file name needs to be
# TAPE9.INP.
write_tape9(merged_tape9)
xsfpy_nlb = nlibs(parsed_xs_tape9)
time = 2.6e6
# Can set outfile, but the file name should be called TAPE5.INP.
write_tape5_irradiation("IRF", time/(60*60*24), 4e14, xsfpy_nlb=xsfpy_nlb, cut_off=0)
M = from_atom_frac({"": 1}, mass=1, atoms_per_molecule=1)
write_tape4(M)
run(ORIGEN)
data = parse_tape6()
print(data)
| Python | 0.000001 | |
72f9d74fe6503de45e7251460d5419eebcabfb7e | Add files via upload | old/hefesto_temp_fix.py | old/hefesto_temp_fix.py | import os
def replace_temp(inputfile_folder):
os.chdir(inputfile_folder)
home_dir = os.getcwd()
for i in os.listdir(os.getcwd()):
if os.path.isdir(i):
os.chdir(i)
print("In folder: {}".format(os.getcwd()))
for z in os.listdir(os.getcwd()):
if '.txt' in z:
with open(z, 'r') as infile:
with open("temp.txt", 'w') as outfile:
print("\nChanging string in file: {}".format(z))
infile_text = infile.read()
s = infile_text.replace(",20,80,1200,0,-2,0", "0,20,80,1600,0,-2,0")
outfile.write(s)
os.remove(z)
os.rename("temp.txt", z)
infile.close()
print("Success! Replaced string in file: {}".format(z))
os.chdir(home_dir)
def initialization():
print("\n\n\n\nPlease specify your HeFESTo input file folder (in Exoplanet Pocketknife format):")
in1 = input("\n>>> ")
if in1 in os.listdir(os.getcwd()):
replace_temp(inputfile_folder=in1)
else:
initialization()
initialization() | Python | 0 | |
816872186966186eb463d1fd45bea3a4c6f68e00 | Add new sanity test for demoproject views | demoproject/tests_demo.py | demoproject/tests_demo.py | from demoproject.urls import urlpatterns
from django.test import Client, TestCase
class DemoProject_TestCase(TestCase):
def setUp(self):
self.client = Client()
def test_all_views_load(self):
"""
A simple sanity test to make sure all views from demoproject
still continue to load!
"""
for url in urlpatterns:
address = url._regex
if address.startswith('^'):
address = '/' + address[1:]
if address.endswith('$'):
address = address[:-1]
response = self.client.get(address)
self.assertEqual(response.status_code, 200)
| Python | 0 | |
2cdf9728bd185fa7a32e4a7f758311594245fae0 | Add proc_suffixes_file.py | file_path/proc_suffixes_file.py | file_path/proc_suffixes_file.py | #!/usr/bin/env python
import os
import re
import sys
SUFFIX_PAT = re.compile(r'(?P<FILE>[a-zA-z0-9]+)_\d+\b')
SUFFIXED_LIST = []
def is_suffixed_file(dir_path, file_name):
base_name, ext_name = os.path.splitext(file_name)
match_obj = SUFFIX_PAT.match(base_name)
if not match_obj:
return False
no_suffixed_file = os.path.join(dir_path, match_obj.group('FILE') + ext_name)
if not os.path.exists(no_suffixed_file):
return False
return True
def collect_suffixed_file(dir_path, file_name):
if not is_suffixed_file(dir_path, file_name):
return
suffix_file = os.path.join(dir_path, file_name)
SUFFIXED_LIST.append(suffix_file)
def remove_files():
if not SUFFIXED_LIST:
print 'No suffixes file.'
return
SUFFIXED_LIST.sort()
for name in SUFFIXED_LIST:
print name
input_str = raw_input('Do you want to remove this files: [Y/N]')
if input_str.upper() != 'Y':
return
for name in SUFFIXED_LIST:
try:
os.remove(name)
print '%s removed.' % name
except OSError, e:
print e
def main():
if len(sys.argv) < 2:
print 'Please a directory.'
return
if not os.path.isdir(sys.argv[1]):
print 'Please input valid path - %s' % sys.argv[1]
return
for dir_path, dir_list, file_list in os.walk(sys.argv[1]):
for file_name in file_list:
collect_suffixed_file(dir_path, file_name)
remove_files()
if __name__ == '__main__':
main()
| Python | 0.000001 | |
286c9c8a6618fc0a87dbe1b50787331986155940 | Create __init__.py | __init__.py | __init__.py | Python | 0.000429 | ||
959aecd612f66eee22e179f985227dbb6e63202a | Move buckling calcs to continuum_analysis | __init__.py | __init__.py | from abaqus_model import *
from abaqus_postproc import *
from continuum_analysis import *
from rayleighritz import RayleighRitzDiscrete
from stiffcalc import *
| Python | 0 | |
6d2735035d7230e6a709f66be93b760531a42868 | Create __init__.py | __init__.py | __init__.py | Python | 0.000429 | ||
662b0754ea73ef9dc19c50ac8d9b3e2aaa7fbb02 | Create __init__.py | __init__.py | __init__.py | Python | 0.000429 | ||
05a6080eed951f80da3b6f7ee4962101884f328e | add testing utility for checking term lookback windows | zipline/pipeline/factors/testing.py | zipline/pipeline/factors/testing.py | import numpy as np
from zipline.testing.predicates import assert_equal
from .factor import CustomFactor
class IDBox(object):
"""A wrapper that hashs to the id of the underlying object and compares
equality on the id of the underlying.
Parameters
----------
ob : any
The object to wrap.
Attributes
----------
ob : any
The object being wrapped.
Notes
-----
This is useful for storing non-hashable values in a set or dict.
"""
def __init__(self, ob):
self.ob = ob
def __hash__(self):
return id(self)
def __eq__(self, other):
if not isinstance(other, IDBox):
return NotImplemented
return id(self.ob) == id(other.ob)
class CheckWindowsFactor(CustomFactor):
"""A custom factor that makes assertions about the lookback windows that
it gets passed.
Parameters
----------
input_ : Term
The input term to the factor.
window_length : int
The length of the lookback window.
expected_windows : dict[int, dict[pd.Timestamp, np.ndarray]]
For each asset, for each day, what the expected lookback window is.
Notes
-----
The output of this factor is the same as ``Latest``. Any assets or days
not in ``expected_windows`` are not checked.
"""
params = ('expected_windows',)
def __new__(cls, input_, window_length, expected_windows):
return super(CheckWindowsFactor, cls).__new__(
cls,
inputs=[input_],
dtype=input_.dtype,
window_length=window_length,
expected_windows=frozenset(
(k, IDBox(v)) for k, v in expected_windows.items()
),
)
def compute(self, today, assets, out, input_, expected_windows):
for asset, expected_by_day in expected_windows:
expected_by_day = expected_by_day.ob
col_ix = np.searchsorted(assets, asset)
if assets[col_ix] != asset:
raise AssertionError('asset %s is not in the window' % asset)
try:
expected = expected_by_day[today]
except KeyError:
pass
else:
expected = np.array(expected)
actual = input_[:, col_ix]
assert_equal(actual, expected)
# output is just latest
out[:] = input_[-1]
| Python | 0 | |
d3a684b06d2d61f2a498346f78a5cbbabd7828e7 | Create elastic_search.py | misc/elastic_search.py | misc/elastic_search.py | import requests
import json
import pprint
es = 'http://hostt:9200/'
query = '''
{'fields': ['field1', 'field2',],
'filter': {'bool': {'must': [{'terms': {'field1': [1,
2]}},
{'bool': {'should': [{'term': {'field2': 'p'}},
{'bool': {'must': [{'term': {'field3': 'interesting'}},
]
}
}
]
}
}
]
}
}
'from': 0,
'query': {'match_all': {}},
'size': 100,
'search_type: 'scan',
}
index = '/index-name'
method = '/_search'
payload = json.dumps(query)
res = requests.get(es + index + method, data=payload)
pprint.pprint(res.json())
| Python | 0.000057 | |
7bf376c57cc989f382f6a1cdc6a5f956b2c73fd6 | Add pixels_with_value() | ml/img/segmentation.py | ml/img/segmentation.py | import numpy as np
def pixels_with_value(img, val):
return np.all(img==np.array(val), axis=2)
| Python | 0 | |
13c40d631c5d0e6035ea143a68e45201691b46a5 | Create 0303_restaurant_plural_foods.py | 2019/0303_restaurant_plural_foods.py | 2019/0303_restaurant_plural_foods.py | # -*- coding: utf-8 -*-
"""
NPR 2019-03-03
https://www.npr.org/2019/03/03/699735287/sunday-puzzle-in-this-game-a-chance-to-claim-vic-tor-y
Name a popular restaurant chain in two words.
Its letters can be rearranged to spell some things to eat and some things to drink.
Both are plural words. What things are these, and what's the chain?
"""
import sys
sys.path.append('..')
import nprcommontools as nct
import json
#%%
# Get a list of restaurants
restaurants = nct.wikipedia_category_members('Restaurant_chains_in_the_United_States',3)
# Two-word restaurants
good_restaurants = set(x for x in restaurants if x.count(' ') == 1)
#%%
# Food and drink are both under the category 'food' in Wordnet
food_and_drink = nct.get_category_members('food')
#%%
# Get plurals of foods
with open(r'../plurals.json','r') as fid:
plurals1 = json.load(fid)
plurals = set()
for word,pls in plurals1.items():
if word in food_and_drink:
for pl in pls:
plurals.add(pl)
#%%
# All sorted strings consisting of two plurals
plural_dict = dict()
plurals_list = list(plurals)
for i in range(len(plurals_list)):
for j in range(i+1,len(plurals_list)):
plural_dict[nct.sort_string(nct.alpha_only(plurals_list[i]+plurals_list[j]))] = (plurals_list[i],plurals_list[j])
#%%
for r in good_restaurants:
r_sorted = nct.sort_string(nct.alpha_only(r.lower()))
if r_sorted in plural_dict:
print(r,plural_dict[r_sorted])
| Python | 0.999927 | |
bfdac16ca4e0ae30e345b221c7754f19669a55da | update full version of criteria module. | biokit/stats/criteria.py | biokit/stats/criteria.py | <<<<<<< HEAD
# -*- coding: utf-8 -*-
import math
__all__ = ['AIC', 'AICc', 'BIC']
def AIC(L, k):
"""Return Akaike information criterion (AIC)
:param int k: number of parameters
:param float L: maximised value of the likelihood function
Suppose that we have a statistical model of some data, from which we computed
its likelihood function and let k be the number of parameters in the model
(i.e. degrees of freedom). Then the AIC value is ::
:math:`\mathrm{AIC} = 2k - 2\ln(L)`
Given a set of candidate models for the data, the preferred model is the one
with the minimum AIC value. Hence AIC rewards goodness of fit (as assessed
by the likelihood function), but it also includes a penalty that is an
increasing function of the number of estimated parameters. The penalty
discourages overfitting.
Suppose that there are R candidate models AIC1, AIC2, AIC3, AICR.
Let AICmin be the minimum of those values. Then, exp((AICmin - AICi)/2)
can be interpreted as the relative probability that the ith model
minimizes the (estimated) information loss.
Suppose that there are three candidate models, whose AIC values are 100,
102, and 110. Then the second model is exp((100 - 102)/2) = 0.368 times
as probable as the first model to minimize the information loss. Similarly,
the third model is exp((100โฏ-โฏ110)/2) = 0.007 times as probable as
the first model, which can therefore be discarded.
With the remaining two models, we can (1) gather more data, (2) conclude
that the data is insufficient to support selecting one model from among
the first two (3) take a weighted average of the first two models,
with weights 1 and 0.368.
The quantity exp((AICminโฏ-โฏAICi)/2) is the relative likelihood of model i.
If all the models in the candidate set have the same number of parameters,
then using AIC might at first appear to be very similar to using the
likelihood-ratio test. There are, however, important distinctions.
In particular, the likelihood-ratio test is valid only for nested models,
whereas AIC (and AICc) has no such restriction.
Reference: Burnham, K. P.; Anderson, D. R. (2002), Model Selection and
Multimodel Inference: A Practical Information-Theoretic Approach (2nd ed.),
Springer-Verlag, ISBN 0-387-95364-7.
"""
return 2*k -2 * math.log(L)
def AICc(L, k, n):
"""AICc criteria
:param int k: number of parameters
:param int n: sample size
:param float L: maximised value of the likelihood function
AIC with a correction for finite sample sizes.
The formula for AICc depends upon the statistical model.
Assuming that the model is univariate, linear, and has normally-distributed
residuals (conditional upon regressors), the formula for AICc is as follows:
AICc is essentially AIC with a greater penalty for extra parameters.
Using AIC, instead of AICc, when n is not many times larger than k2, increases
the probability of selecting models that have too many parameters, i.e. of
overfitting. The probability of AIC overfitting can be substantial, in some cases.
"""
res = AIC(L, k) + 2*k*(k+1.) / (n-k-1.)
return res
def BIC(L, k, n):
"""Bayesian information criterion
Given any two estimated models, the model with the lower value of BIC is the one to be preferred.
"""
res = -2 * math.log(L) + k * (math.log(n) - math.log(2 * math.pi))
# For large n
#res = -2 * math.log(L) + k * math.log(n)
return res
=======
import math
def AIC(L, k):
return 2*k - 2 * math.log(L)
def AICc(L, k, n):
return AIC(L, k) + 2*k*(k+1.)/(n-k-1.)
def BIC(L, k, n):
return -2 * math.log(L) + k * (math.log(n) - math.log(2*math.pi))
>>>>>>> 514a04b5ffa7c9e3ede068c860933e9a404e6063
| Python | 0 | |
ed46c3887c7b51cd75d46523af7b901b79eb92fc | add import script for Milton Keynes (closes #863) | polling_stations/apps/data_collection/management/commands/import_milton_keynes.py | polling_stations/apps/data_collection/management/commands/import_milton_keynes.py | from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = 'E06000042'
addresses_name = 'parl.2017-06-08/Version 1/Democracy_Club__08June2017 (1).tsv'
stations_name = 'parl.2017-06-08/Version 1/Democracy_Club__08June2017 (1).tsv'
elections = ['parl.2017-06-08']
csv_delimiter = '\t'
| Python | 0 | |
4f042e64e3155abfc4b86f61623a4d999dad0f89 | Move tinyHttpServer.py | tinyHttpServer.py | tinyHttpServer.py | import SimpleHTTPServer
import SocketServer
PORT = 8080
try:
Handler = SimpleHTTPServer.SimpleHTTPRequestHandler
httpd = SocketServer.TCPServer(("", PORT), Handler)
print("serving at port %d" % (PORT))
print("Type Ctrl+C to quit")
httpd.serve_forever()
except KeyboardInterrupt as e:
print("\nserver stopped\nBye...")
| Python | 0.000004 | |
5fb6b31ea928162c5185d66381ae99c7454d33c0 | Add comb sort algorithm | sorts/comb_sort.py | sorts/comb_sort.py | """
Comb sort is a relatively simple sorting algorithm originally designed by Wlodzimierz Dobosiewicz in 1980.
Later it was rediscovered by Stephen Lacey and Richard Box in 1991. Comb sort improves on bubble sort.
This is pure python implementation of counting sort algorithm
For doctests run following command:
python -m doctest -v comb_sort.py
or
python3 -m doctest -v comb_sort.py
For manual testing run:
python comb_sort.py
"""
def comb_sort(data):
"""Pure implementation of comb sort algorithm in Python
:param collection: some mutable ordered collection with heterogeneous
comparable items inside
:return: the same collection ordered by ascending
Examples:
>>> comb_sort([0, 5, 3, 2, 2])
[0, 2, 2, 3, 5]
>>> comb_sort([])
[]
>>> comb_sort([-2, -5, -45])
[-45, -5, -2]
"""
shrink_factor = 1.3
gap = len(data)
swapped = True
i = 0
while gap > 1 or swapped:
# Update the gap value for a next comb
gap = int(float(gap) / shrink_factor)
swapped = False
i = 0
while gap + i < len(data):
if data[i] > data[i+gap]:
# Swap values
data[i], data[i+gap] = data[i+gap], data[i]
swapped = True
i += 1
return data
if __name__ == '__main__':
try:
raw_input # Python 2
except NameError:
raw_input = input # Python 3
user_input = raw_input('Enter numbers separated by a comma:\n').strip()
unsorted = [int(item) for item in user_input.split(',')]
print(comb_sort(unsorted))
| Python | 0.000008 | |
2ed853301e8cedb72c7c07367d58e55cac23aa7c | add PI to arduino | raspi-pl2303.py | raspi-pl2303.py | #!/usr/bin/env python
import serial
print 'RaspberryPi PL2303(USB2SerialTTL) communicate.'
print "Copyright (c) 2015 winlin(winlin@vip.126.com)"
f = serial.Serial('/dev/ttyUSB0', 115200)
while True:
v = "Hello, Arduino, this is RaspberryPi 2.0~"
f.write(v)
print 'PI: %s'%(v)
r = ''
for i in v:
r += f.read()
print 'Arduino: %s'%(r) | Python | 0.999473 | |
bb7a6ecc72c007ba3c13069d7f3bddfa8c8309f4 | Remove structures_dir for now, to be developed later | ssbio/core/complex.py | ssbio/core/complex.py | """
Complex
=======
"""
import os.path as op
import logging
import ssbio.utils
from ssbio.core.object import Object
from ssbio.core.protein import Protein
from cobra.core import DictList
log = logging.getLogger(__name__)
class Complex(Object):
"""Store information about a protein complex, a generic representation of a 3D oligomeric complex composed of
individual protein subunits.
The main utilities of this class are to:
* Allow as input a name for the complex and a dictionary of its subunit composition
* Map each single subunit to its available experimental structures and homology models using methods in the
:class:`~ssbio.core.protein.Protein` class
* Map experimental structures and homology models to their available oliogmeric states
* Select a single :attr:`~ssbio.core.complex.Complex.representative_complex` to which best represents the 3D
oligomeric structure that best matches the defined subunit composition
* Calculate, store, and access properties related to this complex
* Provide summaries of available structures and selection details for the representative complex
Args:
ident (str): Unique identifier for this protein
subunits (dict): Subunit composition defined as ``{protein_subunit_id: number_of_times_used_in_complex}``
description (str): Optional description for this complex
root_dir (str): Path to where the folder named by this complex's ID will be created.
Default is current working directory.
pdb_file_type (str): ``pdb``, ``mmCif``, ``xml``, ``mmtf`` - file type for files downloaded from the PDB
"""
def __init__(self, ident, subunits, description=None, root_dir=None, pdb_file_type='mmtf'):
Object.__init__(self, id=ident, description=description)
self.subunit_dict = subunits
"""dict: Subunit composition defined as ``{protein_subunit_id: number_of_times_used_in_complex}``"""
self._subunits = None
self._oligomeric_state = None
self.pdb_file_type = pdb_file_type
"""str: ``pdb``, ``pdb.gz``, ``mmcif``, ``cif``, ``cif.gz``, ``xml.gz``, ``mmtf``, ``mmtf.gz`` - choose a file
type for files downloaded from the PDB"""
# Create directories
self._root_dir = None
if root_dir:
self.root_dir = root_dir
@property
def root_dir(self):
"""str: Path to where the folder named by this complex's ID will be created. Default is current working
directory."""
return self._root_dir
@root_dir.setter
def root_dir(self, path):
if not path:
raise ValueError('No path specified')
if not op.exists(path):
raise ValueError('{}: folder does not exist'.format(path))
if self._root_dir:
log.debug('Changing root directory of Complex "{}" from {} to {}'.format(self.id, self.root_dir, path))
if not op.exists(op.join(path, self.id)) and not op.exists(op.join(path, self.id) + '_complex'):
raise IOError('Complex "{}" does not exist in folder {}'.format(self.id, path))
self._root_dir = path
for d in [self.complex_dir]:
ssbio.utils.make_dir(d)
@property
def complex_dir(self):
"""str: Complex folder"""
if self.root_dir:
# Add a _complex suffix to the folder if it has the same name as its root folder
folder_name = self.id
if folder_name == op.basename(self.root_dir):
folder_name = self.id + '_complex'
return op.join(self.root_dir, folder_name)
else:
log.warning('Root directory not set')
return None
@property
def oligomeric_state(self):
"""Return the oligomeric state based on the contents of the :attr:`~ssbio.core.complex.Complex.subunit_dict`
dictionary.
Returns:
str: Oligomeric state of the complex, currently can be ``monomer``, ``homomer``, or ``heteromer``
"""
# TODO: [VizRecon]
# Check the number of keys in the self.subunit_dict dictionary
# Set as monomer if there is one key, and the value == 1
# Set as homomer if there is one key and value > 1
# Set as heteromer otherwise
return None
@property
def subunits(self):
"""DictList: Subunits represented as a DictList of Protein objects"""
# TODO: [VizRecon]
# TODO: will need to adapt this to allow for input of previously created Protein objects
subunits = DictList()
for s in self.subunit_dict:
subunits.append(Protein(ident=s, description='Subunit of complex {}'.format(self.id),
root_dir=self.complex_dir, pdb_file_type=self.pdb_file_type))
return subunits
def map_subunit_to_sequence_and_structures(self, subunit_id):
"""Run the sequence and structure mapping code for a specified protein subunit.
This stores the mapping information directly inside the Protein subunit object itself
Args:
subunit_id (str): ID of protein subunit to run mapping code for
"""
# TODO: Nathan
def set_representative_complex(self):
"""Set the representative 3D structure for this complex based on coverage of subunits.
Args:
Returns:
"""
# TODO: [VizRecon]
if self.oligomeric_state == 'monomer':
pass
elif self.oligomeric_state == 'homomer':
pass
elif self.oligomeric_state == 'heteromer':
pass
def get_structure_stoichiometry(structure_file):
"""Parse a structure file and return a chain stoichiometry dictionary.
Args:
structure_file (str): Path to protein structure file (experimental or homology model
Returns:
dict: Dictionary of ``{chain_id: number_of_times_used}``
"""
# TODO: [VizRecon]
# TODO: most likely to be moved to another module, need to brainstorm
pass | Python | 0 | |
30412406b354f510a7321c3b3a159df6d7743668 | Add a database file for Loren to complete | utils/database.py | utils/database.py | import database_setup
assert(database_setup.is_table_set_up())
# TODO: Create the stubs for database files
# Assignee: Loren
| Python | 0 | |
a24cdefd6e29a651d69db1a264a30fcea4ad6141 | Add LiteLogger class. | forge/lite_logger.py | forge/lite_logger.py | #!/usr/bin/env python
from inspect import getframeinfo
from inspect import stack
from threading import Lock
from threading import Event
from threading import Thread
from time import clock
from time import sleep
from weakref import ref
from colorama import Fore
from colorama import Style
from colorama import init as colorama_init
from enum import IntEnum
__copyright__ = "Copyright 2015 The Paladin's Forge"
__license__ = "MIT"
__email__ = "ThePaladinsForge@gmail.com"
__version__ = "1.0"
__status__ = "Development"
class LogLevel(IntEnum):
VERBOSE = 6,
DEBUG = 5,
INFO = 4,
WARNING = 3,
ERROR = 2,
CRITICAL = 1,
OFF = 0
class _LiteLog(object):
def __init__(self, logger_ref, log_level=LogLevel.INFO):
self._logger_ref = logger_ref
self._output_level = log_level
def verbose(self, msg):
if self._output_level >= LogLevel.VERBOSE:
self._enqueue(LogLevel.VERBOSE, clock(), LiteLogger.get_caller(), msg)
def debug(self, msg):
if self._output_level >= LogLevel.DEBUG:
self._enqueue(LogLevel.DEBUG, clock(), LiteLogger.get_caller(), msg)
def info(self, msg):
if self._output_level >= LogLevel.INFO:
self._enqueue(LogLevel.INFO, clock(), LiteLogger.get_caller(), msg)
def warning(self, msg):
if self._output_level >= LogLevel.WARNING:
self._enqueue(LogLevel.WARNING, clock(), LiteLogger.get_caller(), msg)
def error(self, msg):
if self._output_level >= LogLevel.ERROR:
self._enqueue(LogLevel.ERROR, clock(), LiteLogger.get_caller(), msg)
def critical(self, msg):
if self._output_level >= LogLevel.CRITICAL:
self._enqueue(LogLevel.CRITICAL, clock(), LiteLogger.get_caller(), msg)
def _enqueue(self, log_level, ts, caller, msg):
logger = self._logger_ref()
if logger:
logger.enqueue_msg(log_level, ts, caller, msg)
class LiteLogger(object):
""" class documentation """
INSTANCE = None
def __new__(cls, *args, **kwargs):
if cls.INSTANCE is None:
assert cls == LiteLogger, "%s can not derive from SimpleLogger class." % cls.__name__
return object.__new__(cls, args, kwargs)
else:
return cls.INSTANCE() # INSTANCE is a ref
def __init__(self, enable_threading=False):
if LiteLogger.INSTANCE is None:
colorama_init()
LiteLogger.INSTANCE = ref(self)
self._lock = Lock()
from random import random
self.id = random()
self._msg_queue = []
self._color = False
self._output_level = LogLevel.INFO
self._thread_shutdown = Event()
self._thread = Thread(target=self._thread_write_messages)
if enable_threading:
self.set_threading(True)
def __repr__(self):
return str()
def shutdown(self):
# Ensure threading is off and remove reference to provide proper class cleanup
self.set_threading(False)
LiteLogger.INSTANCE = None
@staticmethod
def get_caller():
s = stack()[2]
parse_len = 25
caller = getframeinfo(s[0])
full_file_name = "{}:{}".format(caller.filename, caller.lineno)
if len(full_file_name) > parse_len:
file_name = "...{}".format(full_file_name[-parse_len + 3:])
else:
fmt_str = "{{0:>{}}}".format(parse_len)
file_name = fmt_str.format(full_file_name)
return file_name
def get_log(self, log_level=LogLevel.INFO):
return _LiteLog(ref(self), log_level)
def set_output_level(self, level=LogLevel.INFO):
self._output_level = level
def set_threading(self, active):
if active is True:
self._enable_threading()
else:
if self._thread.is_alive():
self._disable_threading()
def set_color(self, active):
if active is True:
self._enable_color()
else:
self._disable_color()
def write_messages(self):
with self._lock:
msg_list, self._msg_queue = self._msg_queue, []
for msg in msg_list:
print(self._format(*msg))
def enqueue_msg(self, log_level, ts, caller, msg):
if log_level <= self._output_level:
msg_obj = (log_level, ts, caller, msg)
with self._lock:
self._msg_queue.append(msg_obj)
def _thread_write_messages(self):
while not self._thread_shutdown.is_set():
sleep(.0001)
self.write_messages()
def _enable_threading(self):
self._thread_shutdown.clear()
self._thread = Thread(target=self._thread_write_messages)
self._thread.setDaemon(True)
self._thread.start()
def _disable_threading(self):
self._thread_shutdown.set()
self._thread.join()
self.write_messages()
def _enable_color(self):
self._color = True
def _disable_color(self):
self._color = False
def _format(self, log_level, time_stamp, caller, msg):
msg = str(msg)
log_name = "INVALID"
log_name_prefix = ""
log_name_suffix = ""
if log_level == LogLevel.VERBOSE:
log_name = "VERBOSE"
if self._color:
log_name_prefix = Fore.WHITE
log_name_suffix = Style.RESET_ALL
elif log_level == LogLevel.DEBUG:
log_name = "DEBUG"
if self._color:
log_name_prefix = Fore.WHITE
log_name_suffix = Style.RESET_ALL
elif log_level == LogLevel.INFO:
log_name = "INFO"
if self._color:
log_name_prefix = Fore.WHITE
log_name_suffix = Style.RESET_ALL
elif log_level == LogLevel.WARNING:
log_name = "WARNING"
if self._color:
log_name_prefix = Fore.YELLOW
log_name_suffix = Style.RESET_ALL
elif log_level == LogLevel.ERROR:
log_name = "ERROR"
if self._color:
log_name_prefix = Fore.RED
log_name_suffix = Style.RESET_ALL
elif log_level == LogLevel.CRITICAL:
log_name = "CRITICAL"
if self._color:
log_name_prefix = "{}{}".format(Fore.RED, Style.BRIGHT)
log_name_suffix = Style.RESET_ALL
header = "{}{:<8}{} : {} : {{}}".format(log_name_prefix, log_name, log_name_suffix, caller)
next_line = "\n{}{{}}".format(" " * len("{:<8} : {} : ".format(log_name, caller)))
output = ""
for idx, msg_part in enumerate(msg.split("\n")):
if idx == 0:
output += header.format(msg_part)
else:
output += next_line.format(msg_part)
return output
| Python | 0 | |
929abedc5f971a58dfb54b706c66548609351835 | Create fair_warning.py | google-code-jam/fair_warning.py | google-code-jam/fair_warning.py | """
https://code.google.com/codejam/contest/433101/dashboard#s=p1
"""
def gcd(a, b):
if b > a:
return gcd(b, a)
elif b == 0:
return a
else:
return gcd(b, a % b)
def big_gcd(a):
return reduce(lambda x,y: gcd(x, y), a)
def solve(nums):
nums = sorted(nums)
diffs = [(nums[i] - nums[i-1]) for i in xrange(1, len(nums))]
T = big_gcd(diffs)
n = nums[0]
return 0 if n % T == 0 else T - (n % T)
def main():
C = int(raw_input())
for c in xrange(1, C+1):
nums = map(int, raw_input().strip().split())
print 'Case #{}: {}'.format(c, solve(nums[1:]))
if __name__ == '__main__':
main()
| Python | 0.002125 | |
b7a019b41cbfac78ff48fe604d401921786d7459 | Add size_continuous_layer helper tests | test/viz/helpers/test_size_continuous_layer.py | test/viz/helpers/test_size_continuous_layer.py | import unittest
from unittest.mock import Mock
from cartoframes.viz import helpers, Source
class TestSizeContinuousLayerHelper(unittest.TestCase):
def test_helpers(self):
"should be defined"
self.assertNotEqual(helpers.size_continuous_layer, None)
def test_size_continuous_layer(self):
"should create a layer with the proper attributes"
layer = helpers.size_continuous_layer(
source='sf_neighborhoods',
value='name'
)
self.assertNotEqual(layer.style, None)
self.assertEqual(layer.style._style['point']['width'], 'ramp(linear(sqrt($name), sqrt(globalMin($name)), sqrt(globalMax($name))), [2, 50])')
self.assertEqual(layer.style._style['line']['width'], 'ramp(linear($name), [1, 10])')
self.assertEqual(layer.style._style['point']['color'], 'opacity(#F46D43, 0.8)')
self.assertEqual(layer.style._style['line']['color'], 'opacity(#4CC8A3, 0.8)')
self.assertNotEqual(layer.popup, None)
self.assertEqual(layer.popup._hover, [{
'title': 'name',
'value': '$name'
}])
self.assertNotEqual(layer.legend, None)
self.assertEqual(layer.legend._type, 'size-continuous')
self.assertEqual(layer.legend._title, 'name')
self.assertEqual(layer.legend._description, '')
def test_size_continuous_layer_point(self):
"should create a point type layer"
layer = helpers.size_continuous_layer(
'sf_neighborhoods',
'name',
'Neighborhoods',
size=[10, 20],
color='blue'
)
self.assertEqual(
layer.style._style['point']['width'],
'ramp(linear(sqrt($name), sqrt(globalMin($name)), sqrt(globalMax($name))), [10, 20])'
)
self.assertEqual(
layer.style._style['point']['color'],
'opacity(blue, 0.8)'
)
def test_size_continuous_layer_line(self):
"should create a line type layer"
Source._get_geom_type = Mock(return_value='line')
layer = helpers.size_continuous_layer(
'sf_neighborhoods',
'name',
'Neighborhoods',
size=[10, 20],
color='blue'
)
self.assertEqual(
layer.style._style['line']['width'],
'ramp(linear($name), [10, 20])'
)
self.assertEqual(
layer.style._style['line']['color'],
'opacity(blue, 0.8)'
)
| Python | 0.000001 | |
e79445de75721b0d0b8ab1b6c8e24f036bf35a11 | make qsub | nexus_obj/ascii_txt.py | nexus_obj/ascii_txt.py | import os
def qsub_file(fnames,nmpi=64,title='title',hours=2):
header = """#!/bin/bash
#PBS -N %s
#PBS -l walltime=0%d:00:00
#PBS -l nodes=%d
#PBS -A mat158
#PBS -j oe
#PBS -k n
cd ${PBS_O_WORKDIR}
export OMP_NUM_THREADS=8
BIN=~/soft/kylin_qmcpack/qmcpack_cpu_comp\n\n""" % (
title,
hours,
len(fnames)*nmpi/2
)
body = 'cwd=`pwd`\n'
for floc in fnames:
fname = os.path.basename(floc)
rundir = os.path.dirname(floc)
move_cmd = 'cd '+rundir
run_cmd = 'aprun -n %d -d 8 -S 1 $BIN '%nmpi + fname + ' > out 2> err&'
body += '\n'.join([move_cmd,run_cmd,'cd $cwd']) + '\n'
# end for fname
body += '\nwait'
text = header + body
return text
# end def qsub_file
| Python | 0.999677 | |
430c5301d7db50b153b0ae33f5c281506948099c | Add new package | plasTeX/Packages/afterpage.py | plasTeX/Packages/afterpage.py | #!/usr/bin/env python
from plasTeX import Command, Environment
class afterpage(Command):
args = 'self:nox'
def invoke(self, tex):
super(afterpage, self).invoke(tex)
return []
| Python | 0.000001 | |
b40eb5723eeab38edb2440d04d65f1c5be4ad4c0 | Create solution.py | data_structures/linked_list/problems/anagrams/py/solution.py | data_structures/linked_list/problems/anagrams/py/solution.py | import LinkedList
# Problem description:
# Solution time complexity:
# Comments:
# Linked List Node inside the LinkedList module is declared as:
#
# class Node:
# def __init__(self, val, nxt=None):
# self.val = val
# self.nxt = nxt
#
def AreAnagrams(left: LinkedList.Node, right: LinkedList.Node) -> bool:
raise NotImplementedError()
| Python | 0.000018 | |
e9a71173eae28b378052ddce4e0fe8a3d3313c4e | Disable screenshot_sync_tests on Mac. | content/test/gpu/gpu_tests/screenshot_sync.py | content/test/gpu/gpu_tests/screenshot_sync.py | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import screenshot_sync_expectations as expectations
from telemetry import test
from telemetry.core import util
from telemetry.page import page
from telemetry.page import page_set
from telemetry.page import page_test
# pylint: disable=W0401,W0614
from telemetry.page.actions.all_page_actions import *
data_path = os.path.join(
util.GetChromiumSrcDir(), 'content', 'test', 'data', 'gpu')
class _ScreenshotSyncValidator(page_test.PageTest):
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs('--enable-gpu-benchmarking')
def ValidatePage(self, page, tab, results):
test_success = tab.EvaluateJavaScript('window.__testSuccess')
if not test_success:
message = tab.EvaluateJavaScript('window.__testMessage')
raise page_test.Failure(message)
@test.Disabled('mac')
class ScreenshotSyncPage(page.Page):
def __init__(self, page_set, base_dir):
super(ScreenshotSyncPage, self).__init__(
url='file://screenshot_sync.html',
page_set=page_set,
base_dir=base_dir,
name='ScreenshotSync')
self.user_agent_type = 'desktop'
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.WaitForJavaScriptCondition(
'window.__testComplete', timeout_in_seconds=120)
class ScreenshotSyncProcess(test.Test):
"""Tests that screenhots are properly synchronized with the frame one which
they were requested"""
test = _ScreenshotSyncValidator
def CreateExpectations(self, page_set):
return expectations.ScreenshotSyncExpectations()
def CreatePageSet(self, options):
ps = page_set.PageSet(file_path=data_path, serving_dirs=[''])
ps.AddPage(ScreenshotSyncPage(ps, ps.base_dir))
return ps
| # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import screenshot_sync_expectations as expectations
from telemetry import test
from telemetry.core import util
from telemetry.page import page
from telemetry.page import page_set
from telemetry.page import page_test
# pylint: disable=W0401,W0614
from telemetry.page.actions.all_page_actions import *
data_path = os.path.join(
util.GetChromiumSrcDir(), 'content', 'test', 'data', 'gpu')
class _ScreenshotSyncValidator(page_test.PageTest):
def CustomizeBrowserOptions(self, options):
options.AppendExtraBrowserArgs('--enable-gpu-benchmarking')
def ValidatePage(self, page, tab, results):
test_success = tab.EvaluateJavaScript('window.__testSuccess')
if not test_success:
message = tab.EvaluateJavaScript('window.__testMessage')
raise page_test.Failure(message)
class ScreenshotSyncPage(page.Page):
def __init__(self, page_set, base_dir):
super(ScreenshotSyncPage, self).__init__(
url='file://screenshot_sync.html',
page_set=page_set,
base_dir=base_dir,
name='ScreenshotSync')
self.user_agent_type = 'desktop'
def RunNavigateSteps(self, action_runner):
action_runner.NavigateToPage(self)
action_runner.WaitForJavaScriptCondition(
'window.__testComplete', timeout_in_seconds=120)
class ScreenshotSyncProcess(test.Test):
"""Tests that screenhots are properly synchronized with the frame one which
they were requested"""
test = _ScreenshotSyncValidator
def CreateExpectations(self, page_set):
return expectations.ScreenshotSyncExpectations()
def CreatePageSet(self, options):
ps = page_set.PageSet(file_path=data_path, serving_dirs=[''])
ps.AddPage(ScreenshotSyncPage(ps, ps.base_dir))
return ps
| Python | 0.000006 |
48e19852c6e1f5a0f2792a62adeb560121d77d11 | Create __init__.py | crispy/__init__.py | crispy/__init__.py | Python | 0.000429 | ||
bad7bfee43ef7d560b186d3f41af2aa35fe0c4af | allow FactorScheduler factor=1 | python/mxnet/lr_scheduler.py | python/mxnet/lr_scheduler.py | """
learning rate scheduler, which adaptive changes the learning rate based on the
progress
"""
import logging
class LRScheduler(object):
"""Base class of a learning rate scheduler"""
def __init__(self):
"""
base_lr : float
the initial learning rate
"""
self.base_lr = 0.01
def __call__(self, num_update):
"""
Call to schedule current learning rate
The training progress is presented by `num_update`, which can be roughly
viewed as the number of minibatches executed so far. Its value is
non-decreasing, and increases at most by one.
The exact value is the upper bound of the number of updates applied to
a weight/index
See more details in https://github.com/dmlc/mxnet/issues/625
Parameters
----------
num_update: int
the maximal number of updates applied to a weight.
"""
raise NotImplementedError("must override this")
class FactorScheduler(LRScheduler):
"""Reduce learning rate in factor
Assume the weight has been updated by n times, then the learning rate will
be
base_lr * factor^(floor(n/step))
Parameters
----------
step: int
schedule learning rate after n updates
factor: float
the factor for reducing the learning rate
"""
def __init__(self, step, factor=1):
super(FactorScheduler, self).__init__()
if step < 1:
raise ValueError("Schedule step must be greater or equal than 1 round")
if factor > 1.0:
raise ValueError("Factor must be no more than 1 to make lr reduce")
self.step = step
self.factor = factor
self.count = 0
def __call__(self, num_update):
"""
Call to schedule current learning rate
Parameters
----------
num_update: int
the maximal number of updates applied to a weight.
"""
if num_update > self.count + self.step:
self.count += self.step
self.base_lr *= self.factor
logging.info("Update[%d]: Change learning rate to %0.5e",
num_update, self.base_lr)
return self.base_lr
class MultiFactorScheduler(LRScheduler):
"""Reduce learning rate in factor at steps specified in a list
Assume the weight has been updated by n times, then the learning rate will
be
base_lr * factor^(sum((step/n)<=1)) # step is an array
Parameters
----------
step: list of int
schedule learning rate after n updates
factor: float
the factor for reducing the learning rate
"""
def __init__(self, step, factor=1):
super(MultiFactorScheduler, self).__init__()
assert isinstance(step, list) and len(step) >= 1
for i, _step in enumerate(step):
if i != 0 and step[i] <= step[i-1]:
raise ValueError("Schedule step must be an increasing integer list")
if _step < 1:
raise ValueError("Schedule step must be greater or equal than 1 round")
if factor > 1.0:
raise ValueError("Factor must be no more than 1 to make lr reduce")
self.step = step
self.cur_step_ind = 0
self.factor = factor
self.count = 0
def __call__(self, num_update):
"""
Call to schedule current learning rate
Parameters
----------
num_update: int
the maximal number of updates applied to a weight.
"""
if self.cur_step_ind <= len(self.step)-1:
if num_update > self.step[self.cur_step_ind]:
self.count = self.step[self.cur_step_ind]
self.cur_step_ind += 1
self.base_lr *= self.factor
logging.info("Update[%d]: Change learning rate to %0.5e",
num_update, self.base_lr)
return self.base_lr
| """
learning rate scheduler, which adaptive changes the learning rate based on the
progress
"""
import logging
class LRScheduler(object):
"""Base class of a learning rate scheduler"""
def __init__(self):
"""
base_lr : float
the initial learning rate
"""
self.base_lr = 0.01
def __call__(self, num_update):
"""
Call to schedule current learning rate
The training progress is presented by `num_update`, which can be roughly
viewed as the number of minibatches executed so far. Its value is
non-decreasing, and increases at most by one.
The exact value is the upper bound of the number of updates applied to
a weight/index
See more details in https://github.com/dmlc/mxnet/issues/625
Parameters
----------
num_update: int
the maximal number of updates applied to a weight.
"""
raise NotImplementedError("must override this")
class FactorScheduler(LRScheduler):
"""Reduce learning rate in factor
Assume the weight has been updated by n times, then the learning rate will
be
base_lr * factor^(floor(n/step))
Parameters
----------
step: int
schedule learning rate after n updates
factor: float
the factor for reducing the learning rate
"""
def __init__(self, step, factor=1):
super(FactorScheduler, self).__init__()
if step < 1:
raise ValueError("Schedule step must be greater or equal than 1 round")
if factor >= 1.0:
raise ValueError("Factor must be less than 1 to make lr reduce")
self.step = step
self.factor = factor
self.count = 0
def __call__(self, num_update):
"""
Call to schedule current learning rate
Parameters
----------
num_update: int
the maximal number of updates applied to a weight.
"""
if num_update > self.count + self.step:
self.count += self.step
self.base_lr *= self.factor
logging.info("Update[%d]: Change learning rate to %0.5e",
num_update, self.base_lr)
return self.base_lr
class MultiFactorScheduler(LRScheduler):
"""Reduce learning rate in factor at steps specified in a list
Assume the weight has been updated by n times, then the learning rate will
be
base_lr * factor^(sum((step/n)<=1)) # step is an array
Parameters
----------
step: list of int
schedule learning rate after n updates
factor: float
the factor for reducing the learning rate
"""
def __init__(self, step, factor=1):
super(MultiFactorScheduler, self).__init__()
assert isinstance(step, list) and len(step) >= 1
for i, _step in enumerate(step):
if i != 0 and step[i] <= step[i-1]:
raise ValueError("Schedule step must be an increasing integer list")
if _step < 1:
raise ValueError("Schedule step must be greater or equal than 1 round")
if factor >= 1.0:
raise ValueError("Factor must be less than 1 to make lr reduce")
self.step = step
self.cur_step_ind = 0
self.factor = factor
self.count = 0
def __call__(self, num_update):
"""
Call to schedule current learning rate
Parameters
----------
num_update: int
the maximal number of updates applied to a weight.
"""
if self.cur_step_ind <= len(self.step)-1:
if num_update > self.step[self.cur_step_ind]:
self.count = self.step[self.cur_step_ind]
self.cur_step_ind += 1
self.base_lr *= self.factor
logging.info("Update[%d]: Change learning rate to %0.5e",
num_update, self.base_lr)
return self.base_lr
| Python | 0.000005 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.