commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
1d7fa31d9f4ce42586fb33bea98d5af87bd95f3a | Allow setup.py install | setup.py | setup.py | from setuptools import setup
setup(name='multifil',
version='0.2',
description='A spatial half-sarcomere model and the means to run it',
url='https://github.com/cdw/multifil',
author='C David Williams',
author_email='cdave@uw.edu',
license='MIT',
packages=['multifil'],
install_requires=['numpy', 'boto']
)
| Python | 0 | |
30220f57bc5052cb05ed5c7e3dc01c763152d175 | Add setup for python installation | setup.py | setup.py | #!/usr/bin/env python
from distutils.core import setup
setup(name='lqrrt',
version='1.0',
description='Kinodynamic RRT Implementation',
author='Jason Nezvadovitz',
packages=['lqrrt'],
)
| Python | 0 | |
0c7ec853c97a71eacc838be925c46ac0c26d1518 | Create setup.py | setup.py | setup.py | from distutils.core import setup
setup(
name = 'ratio-merge',
packages = ['ratio-merge'],
version = '0.1',
description = 'A small utility function for merging two lists by some ratio',
author = 'Adam Lev-Libfeld',
author_email = 'adam@tamarlabs.com',
url = 'https://github.com/daTokenizer/ratio-merge-python',
download_url = 'https://github.com/daTokenizer/ratio-merge-python/archive/0.1.tar.gz',
keywords = ['merge', 'ratio', 'lists'], # arbitrary keywords
classifiers = [],
)
| Python | 0.000001 | |
a29b7195af2550e5646f3aac581cbaf47244e8f4 | Create setup.py | setup.py | setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# setup.py
"""
Setup files
Copyright (c) 2020, David Hoffman
"""
import setuptools
# read in long description
with open("README.md", "r") as fh:
long_description = fh.read()
# get requirements
with open("requirements.txt", "r") as fh:
requirements = [line.strip() for line in fh]
setuptools.setup(
name="py-otf",
version="0.0.1",
author="David Hoffman",
author_email="dave.p.hoffman@gmail.com",
description="A python library for simulating and analyzing microscope point spread functions (PSFs)",
long_description=long_description,
long_description_content_type="text/markdown",
packages=setuptools.find_packages(),
classifiers=[
"Development Status :: Alpha",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering",
],
python_requires=">=3",
install_requires=requirements,
)
| Python | 0.000001 | |
45d734cb495e7f61c5cbbac2958e220868033a9d | Add setup.py for RTD | setup.py | setup.py | from distutils.core import setup
setup(
name='mayatools',
version='0.1-dev',
description='Collection of general tools and utilities for working in and with Maya.',
url='https://github.com/westernx/mayatools',
packages=['mayatools'],
author='Mike Boers',
author_email='mayatools@mikeboers.com',
license='BSD-3',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| Python | 0 | |
1ac147a2a9f627cccd917006f61cdda7b25ccc06 | Add setup.py | setup.py | setup.py | from distutils.core import setup
setup(
name='applied-sims',
version='0.1',
classifiers=[
'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',
'Programming Language :: Python :: 3',
'Topic :: Scientific/Engineering :: Physics',
'Intended Audience :: Other Audience',
],
packages=['polymer_states'],
url='http://github.com/szabba/applied-sims',
license='MPL-2.0',
author='Karol Marcjan',
author_email='karol.marcjan@gmail.com',
description=''
)
| Python | 0.000001 | |
e6e96d9fa725ec28028b090c900086474e69cdb8 | Add basic setup.py | setup.py | setup.py |
from distutils.core import setup
setup(
name='litemap',
version='1.0a',
description='Mapping class which stores in SQLite database.',
url='http://github.com/mikeboers/LiteMap',
py_modules=['litemap'],
author='Mike Boers',
author_email='litemap@mikeboers.com',
license='New BSD License',
)
| Python | 0.000002 | |
479ff810c07ebe5c309bb4c9f712e689e831945e | Add setup.py | setup.py | setup.py | import os
from setuptools import setup
this_dir = os.path.dirname(__file__)
long_description = "\n" + open(os.path.join(this_dir, 'README.rst')).read()
setup(
name='ansible_role_apply',
version='0.0.0',
description='Apply a single Ansible role to host(s) easily',
long_description=long_description,
keywords='ansible',
author='Marc Abramowitz',
author_email='mmsabramo@gmail.com',
url='https://github.com/msabramo/ansible-role-apply',
py_modules=['ansible-role-apply'],
zip_safe=False,
install_requires=[
'ansible',
'click',
],
entry_points="""\
[console_scripts]
ansible-role-apply = ansible_role_apply:ansible_role_apply
""",
license='MIT',
classifiers=[
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Topic :: Software Development :: Testing',
'Natural Language :: English',
'Intended Audience :: Developers',
],
)
| Python | 0.000001 | |
9d12617170982fc1b6b01d109d986f5cd45e0552 | Update setup.py. | setup.py | setup.py | from setuptools import setup,find_packages
setup (
name = 'pymatgen',
version = '1.0.1',
packages = find_packages(),
# Declare your packages' dependencies here, for eg:
install_requires = ['numpy','scipy','matplotlib','PyCIFRW'],
author = 'Shyue Ping Ong, Anubhav Jain, Michael Kocher, Dan Gunter',
author_email = 'shyue@mit.edu, anubhavj@mit.edu, mpkocher@lbnl.gov, dkgunter@lbl.gov',
summary = 'The Materials Project Python Library',
url = 'www.materialsproject.org',
license = '',
long_description= 'pymatgen is a Python library for the Materials Project. It includes core structure definition and utilities, electronic structure objects, and convenient IO from VASP and CIF files.',
# could also include long_description, download_url, classifiers, etc.
)
| from setuptools import setup,find_packages
setup (
name = 'pymatgen',
version = '1.0.1',
packages = find_packages(),
# Declare your packages' dependencies here, for eg:
install_requires = ['numpy','matplotlib','pymongo','PyCIFRW','psycopg2'],
author = 'Shyue Ping Ong, Anubhav Jain, Michael Kocher, Dan Gunter',
author_email = 'shyue@mit.edu, anubhavj@mit.edu, mpkocher@lbnl.gov, dkgunter@lbl.gov',
summary = 'The Materials Project Python Library',
url = 'www.materialsproject.org',
license = '',
long_description= 'pymatgen is a Python library for the Materials Project. It includes core structure definition and utilities, electronic structure objects, database access APIs, and convenient IO from VASP and CIF files.',
# could also include long_description, download_url, classifiers, etc.
)
| Python | 0 |
4a7234d4592166a1a13bc6b8e8b3b201019df23b | Create prims_minimum_spanning.py | algorithms/graph/prims_minimum_spanning.py | algorithms/graph/prims_minimum_spanning.py | import heapq # for priority queue
# input number of nodes and edges in graph
n, e = map (int,input().split())
# initializing empty graph as a dictionary (of the form {int:list})
g = dict (zip ([i for i in range(1,n+1)],[[] for i in range(n)]))
# input graph data
for i in range(e):
a, b, c = map (int,input().split())
g[a].append([c,b])
g[b].append([c,a])
vis = []
s = [[0,1]]
prim = []
mincost = 0
# prim's algo. to find weight of minimum spanning tree
while (len(s)>0):
v = heapq.heappop(s)
x = v[1]
if (x in vis):
continue
mincost += v[0]
prim.append(x)
vis.append(x)
for j in g[x]:
i = j[-1]
if(i not in vis):
heapq.heappush(s,j)
print(mincost)
| Python | 0.000041 | |
24f6cbdcf2f4261a651d058934c65c3696988586 | add setup.py to document deps | setup.py | setup.py | from setuptools import setup
setup(
name='gentle',
version='0.1',
description='Robust yet lenient forced-aligner built on Kaldi.',
url='http://lowerquality.com/gentle',
author='Robert M Ochshorn',
license='MIT',
packages=['gentle'],
install_requires=['twisted'],
)
| Python | 0 | |
654bd46a8226ea97000a1263132a37f7bf130718 | ADD setup.py | setup.py | setup.py | #!/usr/bin/env python
from distutils.core import setup
setup(name='kernel_regression',
version='1.0',
description='Implementation of Nadaraya-Watson kernel regression with automatic bandwidth selection compatible with sklearn.',
author='Jan Hendrik Metzen',
author_email='jhm@informatik.uni-bremen.de',
url='https://github.com/jmetzen/kernel_regression',
py_modules = ['kernel_regression']
)
| Python | 0.000001 | |
1707306cdee6442e78fe9eaee1d472a0248f75d5 | make license consistent | setup.py | setup.py | # -*- coding: utf-8 -*-
"""
argcomplete
~~~~
Argcomplete provides easy and extensible automatic tab completion of arguments and options for your Python script.
It makes two assumptions:
- You're using bash as your shell
- You're using argparse to manage your command line options
See AUTODOCS_LINK for more info.
"""
from setuptools import setup, find_packages
setup(
name='argcomplete',
version='0.1.0',
url='https://github.com/kislyuk/argcomplete',
license='GPL',
author='Andrey Kislyuk',
author_email='kislyuk@gmail.com',
description='Bash tab completion for argparse',
long_description=__doc__,
packages = find_packages(),
scripts = ['scripts/register-python-argcomplete'],
zip_safe=False,
include_package_data=True,
platforms=['MacOS X', 'Posix'],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| # -*- coding: utf-8 -*-
"""
argcomplete
~~~~
Argcomplete provides easy and extensible automatic tab completion of arguments and options for your Python script.
It makes two assumptions:
- You're using bash as your shell
- You're using argparse to manage your command line options
See AUTODOCS_LINK for more info.
"""
from setuptools import setup, find_packages
setup(
name='argcomplete',
version='0.1.0',
url='https://github.com/kislyuk/argcomplete',
license='BSD',
author='Andrey Kislyuk',
author_email='kislyuk@gmail.com',
description='Bash tab completion for argparse',
long_description=__doc__,
packages = find_packages(),
scripts = ['scripts/register-python-argcomplete'],
zip_safe=False,
include_package_data=True,
platforms=['MacOS X', 'Posix'],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| Python | 0.000004 |
1f1096046e11067c4d42235d3b1aadbfec869bff | Remove setuptools from install_requires | setup.py | setup.py | #!/usr/bin/env python
from setuptools import setup, find_packages
from os import path
import codecs
import os
import re
import sys
def read(*parts):
file_path = path.join(path.dirname(__file__), *parts)
return codecs.open(file_path, encoding='utf-8').read()
def find_version(*parts):
version_file = read(*parts)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return str(version_match.group(1))
raise RuntimeError("Unable to find version string.")
setup(
name='django-polymorphic',
version=find_version('polymorphic', '__init__.py'),
license='BSD',
description='Seamless Polymorphic Inheritance for Django Models',
long_description=read('README.rst'),
url='https://github.com/django-polymorphic/django-polymorphic',
author='Bert Constantin',
author_email='bert.constantin@gmx.de',
maintainer='Christopher Glass',
maintainer_email='tribaal@gmail.com',
packages=find_packages(),
package_data={
'polymorphic': [
'templates/admin/polymorphic/*.html',
],
},
test_suite='runtests',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Framework :: Django',
'Framework :: Django :: 1.4',
'Framework :: Django :: 1.5',
'Framework :: Django :: 1.6',
'Framework :: Django :: 1.7',
'Framework :: Django :: 1.8',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| #!/usr/bin/env python
from setuptools import setup, find_packages
from os import path
import codecs
import os
import re
import sys
def read(*parts):
file_path = path.join(path.dirname(__file__), *parts)
return codecs.open(file_path, encoding='utf-8').read()
def find_version(*parts):
version_file = read(*parts)
version_match = re.search(r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M)
if version_match:
return str(version_match.group(1))
raise RuntimeError("Unable to find version string.")
setup(
name='django-polymorphic',
version=find_version('polymorphic', '__init__.py'),
license='BSD',
description='Seamless Polymorphic Inheritance for Django Models',
long_description=read('README.rst'),
url='https://github.com/django-polymorphic/django-polymorphic',
author='Bert Constantin',
author_email='bert.constantin@gmx.de',
maintainer='Christopher Glass',
maintainer_email='tribaal@gmail.com',
packages=find_packages(),
package_data={
'polymorphic': [
'templates/admin/polymorphic/*.html',
],
},
install_requires=['setuptools'],
test_suite='runtests',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Framework :: Django',
'Framework :: Django :: 1.4',
'Framework :: Django :: 1.5',
'Framework :: Django :: 1.6',
'Framework :: Django :: 1.7',
'Framework :: Django :: 1.8',
'Topic :: Software Development :: Libraries :: Python Modules',
]
)
| Python | 0 |
260911a0a46601092aa75882c806ca921a0cbf6d | Add setup.py file so we can install | setup.py | setup.py | from __future__ import with_statement
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test as TestCommand
version = "0.0.1-dev"
def readme():
with open('README.md') as f:
return f.read()
reqs = [line.strip() for line in open('requirements.txt')]
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = []
self.test_suite = True
def run_tests(self):
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
setup(
name = "pyaxiom",
version = version,
description = "A library to manage various Axiom assets using Python",
long_description = readme(),
license = 'LGPLv3',
author = "Kyle Wilcox",
author_email = "kyle@axiomalaska.com",
url = "https://git.axiom/axiom/pyncml",
packages = find_packages(),
install_requires = reqs,
tests_require = ['pytest'],
cmdclass = {'test': PyTest},
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Topic :: Scientific/Engineering',
],
include_package_data = True,
)
| Python | 0 | |
b1d87a8f96fb6a019bc7ebab71fe8e0c5921d80f | Include setup.py | setup.py | setup.py | from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = ['distance', 'tensorflow', 'numpy', 'six']
setup(
name='attentionocr',
url='https://github.com/emedvedev/attention-ocr',
author_name='Ed Medvedev',
version='0.1',
install_requires=REQUIRED_PACKAGES,
packages=find_packages(),
include_package_data=True,
description='''Optical character recognition model
for Tensorflow based on Visual Attention.'''
)
| Python | 0 | |
6ded510fa9c694e8a836302131157604859d40b1 | add setup settings | setup.py | setup.py | from setuptools import setup
setup(name='uc-numero-alumno',
version='0.1.0',
description='Valida un número de alumno de la UC ',
url='https://github.com/mrpatiwi/uc-numero-alumno-python',
author='Patricio López',
author_email='patricio@lopezjuri.com',
license='MIT',
packages=['ucnumber'],
zip_safe=False)
| Python | 0.000001 | |
414c5d0f9e7e92772cf65be976791889e96e2799 | Package with setuptools | setup.py | setup.py | #!/usr/bin/env python
from setuptools import setup, find_packages
classifiers = [
'Development Status :: 5 - Production/Stable',
'Framework :: Twisted',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Natural Language :: English',
'Topic :: Software Development :: Libraries :: Python Modules'
]
setup(
name='npyscreenreactor',
version='1.1',
license='MIT',
classifiers=classifiers,
author='Mark Tearle',
author_email='mark@tearle.com',
description = 'Twisted reactor for npyscreen',
long_description = 'npyscreenreactor is a Twisted reactor for the npyscreen curses library',
url='https://github.com/mtearle/npyscreenreactor',
packages=find_packages(),
keywords=['npyscreen', 'twisted'],
install_requires=['twisted', 'npyscreen']
)
| Python | 0 | |
01caadc16b58305d52c12517cd89300b9f0f80ae | Add forgotten file | nipy/modalities/fmri/tests/test_iterators.py | nipy/modalities/fmri/tests/test_iterators.py | #TODO the iterators are deprecated
import numpy as np
from nipy.testing import *
from nipy.core.api import Image
import nipy.core.reference.coordinate_map as coordinate_map
from nipy.modalities.fmri.api import FmriImageList
"""
Comment out since these are slated for deletion and currently are broken.
Keep for reference until generators are working.
class test_Iterators(TestCase):
def setUp(self):
spacetime = ['time', 'zspace', 'yspace', 'xspace']
im = Image(np.zeros((3,4,5,6)),
coordinate_map = coordinate_map.CoordinateMap.identity((3,4,5,6), spacetime))
self.img = FmriImageList(im)
def test_fmri_parcel(self):
parcelmap = np.zeros(self.img.shape[1:])
parcelmap[0,0,0] = 1
parcelmap[1,1,1] = 1
parcelmap[2,2,2] = 1
parcelmap[1,2,1] = 2
parcelmap[2,3,2] = 2
parcelmap[0,1,0] = 2
parcelseq = (0, 1, 2, 3)
expected = [np.product(self.img.shape[1:]) - 6, 3, 3, 0]
iterator = parcel_iterator(self.img, parcelmap, parcelseq)
for i, slice_ in enumerate(iterator):
self.assertEqual((self.img.shape[0], expected[i],), slice_.shape)
iterator = parcel_iterator(self.img, parcelmap)
for i, slice_ in enumerate(iterator):
self.assertEqual((self.img.shape[0], expected[i],), slice_.shape)
def test_fmri_parcel_write(self):
parcelmap = np.zeros(self.img.shape[1:])
parcelmap[0,0,0] = 1
parcelmap[1,1,1] = 1
parcelmap[2,2,2] = 1
parcelmap[1,2,1] = 2
parcelmap[2,3,2] = 2
parcelmap[0,1,0] = 2
parcelseq = (0, 1, 2, 3)
expected = [np.product(self.img.shape[1:]) - 6, 3, 3, 0]
iterator = parcel_iterator(self.img, parcelmap, parcelseq, mode='w')
for i, slice_ in enumerate(iterator):
value = np.asarray([np.arange(expected[i]) for _ in range(self.img.shape[0])])
slice_.set(value)
iterator = parcel_iterator(self.img, parcelmap, parcelseq)
for i, slice_ in enumerate(iterator):
self.assertEqual((self.img.shape[0], expected[i],), slice_.shape)
assert_equal(slice_, np.asarray([np.arange(expected[i]) for _ in range(self.img.shape[0])]))
iterator = parcel_iterator(self.img, parcelmap, mode='w')
for i, slice_ in enumerate(iterator):
value = np.asarray([np.arange(expected[i]) for _ in range(self.img.shape[0])])
slice_.set(value)
iterator = parcel_iterator(self.img, parcelmap)
for i, slice_ in enumerate(iterator):
self.assertEqual((self.img.shape[0], expected[i],), slice_.shape)
assert_equal(slice_, np.asarray([np.arange(expected[i]) for _ in range(self.img.shape[0])]))
def test_fmri_parcel_copy(self):
parcelmap = np.zeros(self.img.shape[1:])
parcelmap[0,0,0] = 1
parcelmap[1,1,1] = 1
parcelmap[2,2,2] = 1
parcelmap[1,2,1] = 2
parcelmap[2,3,2] = 2
parcelmap[0,1,0] = 2
parcelseq = (0, 1, 2, 3)
expected = [np.product(self.img.shape[1:]) - 6, 3, 3, 0]
iterator = parcel_iterator(self.img, parcelmap, parcelseq)
tmp = FmriImageList(self.img[:] * 1., self.img.coordmap)
new_iterator = iterator.copy(tmp)
for i, slice_ in enumerate(new_iterator):
self.assertEqual((self.img.shape[0], expected[i],), slice_.shape)
iterator = parcel_iterator(self.img, parcelmap)
for i, slice_ in enumerate(new_iterator):
self.assertEqual((self.img.shape[0], expected[i],), slice_.shape)
def test_fmri_sliceparcel(self):
parcelmap = np.asarray([[[0,0,0,1,2,2]]*5,
[[0,0,1,1,2,2]]*5,
[[0,0,0,0,2,2]]*5])
parcelseq = ((1, 2), 0, 2)
iterator = slice_parcel_iterator(self.img, parcelmap, parcelseq)
for i, slice_ in enumerate(iterator):
pm = parcelmap[i]
ps = parcelseq[i]
try:
x = len([n for n in pm.flat if n in ps])
except TypeError:
x = len([n for n in pm.flat if n == ps])
self.assertEqual(x, slice_.shape[1])
self.assertEqual(self.img.shape[0], slice_.shape[0])
def test_fmri_sliceparcel_write(self):
parcelmap = np.asarray([[[0,0,0,1,2,2]]*5,
[[0,0,1,1,2,2]]*5,
[[0,0,0,0,2,2]]*5])
parcelseq = ((1, 2), 0, 2)
iterator = slice_parcel_iterator(self.img, parcelmap, parcelseq, mode='w')
for i, slice_ in enumerate(iterator):
pm = parcelmap[i]
ps = parcelseq[i]
try:
x = len([n for n in pm.flat if n in ps])
except TypeError:
x = len([n for n in pm.flat if n == ps])
value = [i*np.arange(x) for i in range(self.img.shape[0])]
slice_.set(value)
iterator = slice_parcel_iterator(self.img, parcelmap, parcelseq)
for i, slice_ in enumerate(iterator):
pm = parcelmap[i]
ps = parcelseq[i]
try:
x = len([n for n in pm.flat if n in ps])
except TypeError:
x = len([n for n in pm.flat if n == ps])
value = [i*np.arange(x) for i in range(self.img.shape[0])]
self.assertEqual(x, slice_.shape[1])
self.assertEqual(self.img.shape[0], slice_.shape[0])
assert_equal(slice_, value)
"""
| Python | 0.000001 | |
8aada38d951d039e11e03a6bae9445c784bb4cce | Write a brief demo using nltk | parse-demo.py | parse-demo.py | #!/usr/bin/python3
import sys, os
import nltk
if len(sys.argv) < 2:
print("Please supply a filename.")
sys.exit(1)
filename = sys.argv[1]
with open(filename, 'r') as f:
data = f.read()
# Break the input down into sentences, then into words, and position tag
# those words.
sentences = [nltk.pos_tag(nltk.word_tokenize(sentence)) \
for sentence in nltk.sent_tokenize(data)]
# Define a grammar, and identify the noun phrases in the sentences.
chunk_parser = nltk.RegexpParser(r"NP: {<DT>?<JJ>*<NN>}")
trees = [chunk_parser.parse(sentence) for sentence in sentences]
for tree in trees:
print(tree)
#for subtree in tree.subtrees(filter = lambda t: t.label() == 'NP'):
#print(subtree)
| Python | 0.000001 | |
89fa937d218bef113d2bcc681cb4dbd547940c45 | Add setup.py | setup.py | setup.py | from distutils.core import setup
setup(
name = 'koofr',
packages = ['koofr'], # this must be the same as the name above
install_requires=['requests'],
version = '0.1',
description = 'Python SDK for Koofr',
author = 'Andraz Vrhovec',
author_email = 'andraz@koofr.net',
url = 'https://github.com/koofr/python-koofr', # use the URL to the github repo
download_url = 'https://github.com/koofr/python-koofr/tarball/0.1', # I'll explain this in a second
keywords = ['api', 'koofr', 'cloud'], # arbitrary keywords
classifiers = [],
)
| Python | 0.000001 | |
8ecfe73916fbca42b9a1b47fb2758bb561b76eec | Remove print. | setup.py | setup.py | import os
from setuptools import setup, find_packages
README = os.path.join(os.path.dirname(__file__), 'README.md')
long_description = open(README).read() + '\n\n'
setup (
name = 'pymatgen',
version = '1.2.4',
packages = find_packages(),
install_requires = ['numpy', 'scipy', 'matplotlib', 'PyCIFRW'],
package_data = {'pymatgen.core': ['*.json'], 'pymatgen.io': ['*.cfg']},
author = 'Shyue Ping Ong, Anubhav Jain, Michael Kocher, Geoffroy Hautier, Will Richards, Dan Gunter, Vincent L Chevrier, Rickard Armiento',
author_email = 'shyue@mit.edu, anubhavj@mit.edu, mpkocher@lbnl.gov, geoffroy.hautier@uclouvain.be, wrichard@mit.edu, dkgunter@lbl.gov, vincentchevrier@gmail.com, armiento@mit.edu',
maintainer = 'Shyue Ping Ong',
url = 'https://github.com/CederGroupMIT/pymatgen_repo/',
license = 'MIT',
description = "pymatgen is the Python library powering the Materials Project (www.materialsproject.org).",
long_description = long_description,
keywords = ["vasp", "materials", "project", "electronic", "structure"],
classifiers = [
"Programming Language :: Python :: 2.7",
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Scientific/Engineering :: Chemistry",
"Topic :: Software Development :: Libraries :: Python Modules",
],
download_url = "https://github.com/CederGroupMIT/pymatgen_repo/tarball/master",
test_suite = 'nose.collector',
test_requires = ['nose']
)
| import os
from setuptools import setup, find_packages
README = os.path.join(os.path.dirname(__file__), 'README.md')
long_description = open(README).read() + '\n\n'
print find_packages()
setup (
name = 'pymatgen',
version = '1.2.4',
packages = find_packages(),
install_requires = ['numpy', 'scipy', 'matplotlib', 'PyCIFRW'],
package_data = {'pymatgen.core': ['*.json'], 'pymatgen.io': ['*.cfg']},
author = 'Shyue Ping Ong, Anubhav Jain, Michael Kocher, Geoffroy Hautier, Will Richards, Dan Gunter, Vincent L Chevrier, Rickard Armiento',
author_email = 'shyue@mit.edu, anubhavj@mit.edu, mpkocher@lbnl.gov, geoffroy.hautier@uclouvain.be, wrichard@mit.edu, dkgunter@lbl.gov, vincentchevrier@gmail.com, armiento@mit.edu',
maintainer = 'Shyue Ping Ong',
url = 'https://github.com/CederGroupMIT/pymatgen_repo/',
license = 'MIT',
description = "pymatgen is the Python library powering the Materials Project (www.materialsproject.org).",
long_description = long_description,
keywords = ["vasp", "materials", "project", "electronic", "structure"],
classifiers = [
"Programming Language :: Python :: 2.7",
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Scientific/Engineering :: Chemistry",
"Topic :: Software Development :: Libraries :: Python Modules",
],
download_url = "https://github.com/CederGroupMIT/pymatgen_repo/tarball/master",
test_suite = 'nose.collector',
test_requires = ['nose']
)
| Python | 0.000001 |
fe8cc65832b389314ee6e83c76371809e40cc5d1 | Bump to 0.1.1 | setup.py | setup.py | from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup
setup(
name='Kivy Garden',
version='0.1.1',
license='MIT',
packages=['garden'],
scripts=['bin/garden', 'bin/garden.bat'],
install_requires=['requests'],
)
| from ez_setup import use_setuptools
use_setuptools()
from setuptools import setup
setup(
name='Kivy Garden',
version='0.1',
license='MIT',
packages=['garden'],
scripts=['bin/garden', 'bin/garden.bat'],
install_requires=['requests'],
)
| Python | 0.000683 |
9a0d2a8d207d9f8a105795eb97bdeaac0c30ddec | add ping_interval property | aiohttp_sse/__init__.py | aiohttp_sse/__init__.py | import asyncio
from aiohttp import hdrs
from aiohttp.protocol import Response as ResponseImpl
from aiohttp.web import StreamResponse
from aiohttp.web import HTTPMethodNotAllowed
__version__ = '0.0.1'
__all__ = ['EventSourceResponse']
class EventSourceResponse(StreamResponse):
DEFAULT_PING_INTERVAL = 15
def __init__(self, *, status=200, reason=None, headers=None):
super().__init__(status=status, reason=reason)
if headers is not None:
self.headers.extend(headers)
self.headers['Content-Type'] = 'text/event-stream'
self.headers['Cache-Control'] = 'no-cache'
self.headers['Connection'] = 'keep-alive'
self._loop = None
self._finish_fut = None
self._ping_interval = self.DEFAULT_PING_INTERVAL
self._ping_task = None
def send(self, data, id=None, event=None, retry=None):
if id is not None:
self.write('id: {0}\n'.format(id).encode('utf-8'))
if event is not None:
self.write('event: {0}\n'.format(event).encode('utf-8'))
for chunk in data.split('\n'):
self.write('data: {0}\n'.format(chunk).encode('utf-8'))
if retry is not None:
self.write('retry: {0}\n'.format(retry).encode('utf-8'))
self.write(b'\n')
def start(self, request):
if request.method != 'GET':
raise HTTPMethodNotAllowed(request.method, ['GET'])
self._loop = request.app.loop
self._finish_fut = asyncio.Future(loop=self._loop)
self._finish_fut.add_done_callback(self._cancel_ping)
resp_impl = self._start_pre_check(request)
if resp_impl is not None:
return resp_impl
self._req = request
self._keep_alive = True
resp_impl = self._resp_impl = ResponseImpl(
request._writer,
self._status,
request.version,
not self._keep_alive,
self._reason)
self._copy_cookies()
if self._compression:
if (self._compression_force or
'deflate' in request.headers.get(
hdrs.ACCEPT_ENCODING, '')):
resp_impl.add_compression_filter()
if self._chunked:
resp_impl.enable_chunked_encoding()
if self._chunk_size:
resp_impl.add_chunking_filter(self._chunk_size)
headers = self.headers.items()
for key, val in headers:
resp_impl.add_header(key, val)
resp_impl.send_headers()
self._ping_task = asyncio.Task(self._ping(), loop=self._loop)
return resp_impl
@property
def ping_interval(self):
return self._ping_interval
@ping_interval.setter
def ping_interval(self, value):
if not isinstance(value, int):
raise TypeError("ping interval must be int")
if value < 0:
raise ValueError("ping interval must be greater then 0")
self._ping_interval = value
def _cancel_ping(self, fut):
self._ping_task.cancel()
def wait(self):
if not self._finish_fut:
raise RuntimeError('Response is not started')
return self._finish_fut
def stop_streaming(self):
if not self._finish_fut:
raise RuntimeError('Response is not started')
self._finish_fut.set_result(None)
@asyncio.coroutine
def _ping(self):
while True:
yield from asyncio.sleep(self._ping_interval, loop=self._loop)
if self._finish_fut.done():
break
self.write(b':ping\n\n')
| import asyncio
from aiohttp import hdrs
from aiohttp.protocol import Response as ResponseImpl
from aiohttp.web import StreamResponse
from aiohttp.web import HTTPMethodNotAllowed
__version__ = '0.0.1'
__all__ = ['EventSourceResponse']
class EventSourceResponse(StreamResponse):
PING_TIME = 15
def __init__(self, *, status=200, reason=None, headers=None):
super().__init__(status=status, reason=reason)
if headers is not None:
self.headers.extend(headers)
self.headers['Content-Type'] = 'text/event-stream'
self.headers['Cache-Control'] = 'no-cache'
self.headers['Connection'] = 'keep-alive'
self._loop = None
self._finish_fut = None
self._ping_task = None
def send(self, data, id=None, event=None, retry=None):
if id is not None:
self.write('id: {0}\n'.format(id).encode('utf-8'))
if event is not None:
self.write('event: {0}\n'.format(event).encode('utf-8'))
for chunk in data.split('\n'):
self.write('data: {0}\n'.format(chunk).encode('utf-8'))
if retry is not None:
self.write('retry: {0}\n'.format(retry).encode('utf-8'))
self.write(b'\n')
def start(self, request):
if request.method != 'GET':
raise HTTPMethodNotAllowed()
self._loop = request.app.loop
self._finish_fut = asyncio.Future(loop=self._loop)
self._finish_fut.add_done_callback(self._cancel_ping)
resp_impl = self._start_pre_check(request)
if resp_impl is not None:
return resp_impl
self._req = request
self._keep_alive = True
resp_impl = self._resp_impl = ResponseImpl(
request._writer,
self._status,
request.version,
not self._keep_alive,
self._reason)
self._copy_cookies()
if self._compression:
if (self._compression_force or
'deflate' in request.headers.get(
hdrs.ACCEPT_ENCODING, '')):
resp_impl.add_compression_filter()
if self._chunked:
resp_impl.enable_chunked_encoding()
if self._chunk_size:
resp_impl.add_chunking_filter(self._chunk_size)
headers = self.headers.items()
for key, val in headers:
resp_impl.add_header(key, val)
resp_impl.send_headers()
self._ping_task = asyncio.Task(self._ping(), loop=self._loop)
return resp_impl
def _cancel_ping(self, fut):
self._ping_task.cancel()
def wait(self):
if not self._finish_fut:
raise RuntimeError('Response is not started')
return self._finish_fut
def stop_streaming(self):
if not self._finish_fut:
raise RuntimeError('Response is not started')
self._finish_fut.set_result(None)
@asyncio.coroutine
def _ping(self):
while True:
yield from asyncio.sleep(self.PING_TIME, loop=self._loop)
if self._finish_fut.done():
break
self.write(b':ping\n\n')
| Python | 0.000001 |
b35affdf2183fa81e628f03a904ce80beb165de2 | Fix quote output | bertil.py | bertil.py | # -*- coding: utf-8 -*-
import sys
import datetime
import time
import urllib
import json
import socket
import re
import random
from slackbot.bot import Bot, listen_to, respond_to
from tinydb import TinyDB
db = TinyDB('/home/simon/bertil/quotes.json')
def get_food(day):
# Get JSON
URL = 'http://www.hanssonohammar.se/veckansmeny.json'
response = urllib.urlopen(URL)
data = json.loads(response.read().decode('utf-8'))
if day not in data:
return "(no mat " + str(day) + ")"
mat_today = data[day][0]
if 'IKSU' not in mat_today:
return "(no IKSU today)"
return "\n".join(mat_today['IKSU'])
@listen_to(r'^compile (.*)$')
def compile(message, code):
message.reply(u"Jag klarar inte av sånt längre :'(")
@listen_to(r'^run (.*)$')
def run(message, code):
message.reply(u"Jag klarar inte av sånt längre :'(")
@listen_to(r'^mat(\+*)$')
def mat(message, plus):
date = datetime.date.fromtimestamp(time.time() + (86400 * len(plus)))
try:
message.reply(u"```IKSU - {}\n{}```".format(str(date), get_food(str(date))))
except Exception as e:
message.reply(u"Kom inte åt maten 😞 ({what})".format(what=e.message))
@listen_to(ur'^[e\u00E4\u00C4]r.*fredag.*\?', re.IGNORECASE)
def fredag(message):
if datetime.datetime.today().weekday() == 4:
message.reply(u"Japp, idag är det fredag! :kreygasm:")
else:
message.reply(u"Nej, idag är det INTE fredag! :qq::gun:")
@listen_to(r'^temp(\+*)$')
def temp(message, plus):
if len(plus) > 0:
message.reply(u"Jag kan inte se in i framtiden... :qq::gun:")
else:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('temp.acc.umu.se', 2345))
tmp = s.recv(1024)
s.close()
time, temp = tmp[:len(tmp) - 1].split('=')
message.reply(u"{} C klockan {}".format(temp, time))
@listen_to(r'^quote add (.*)$')
def quote_add(message, quote):
db.insert({'quote': quote})
message.reply(u"Quote inlagd!")
@listen_to(r'^quote$')
def quote(message):
quotes = db.all()
if len(quotes) == 0:
message.reply(u"Inga quotes inlagda...")
else:
quote = random.choice(quotes)
message.reply(u"{}".format(quote['quote']))
def main():
bot = Bot()
bot.run()
if __name__ == '__main__':
main()
| # -*- coding: utf-8 -*-
import sys
import datetime
import time
import urllib
import json
import socket
import re
import random
from slackbot.bot import Bot, listen_to, respond_to
from tinydb import TinyDB
db = TinyDB('/home/simon/bertil/quotes.json')
def get_food(day):
# Get JSON
URL = 'http://www.hanssonohammar.se/veckansmeny.json'
response = urllib.urlopen(URL)
data = json.loads(response.read().decode('utf-8'))
if day not in data:
return "(no mat " + str(day) + ")"
mat_today = data[day][0]
if 'IKSU' not in mat_today:
return "(no IKSU today)"
return "\n".join(mat_today['IKSU'])
@listen_to(r'^compile (.*)$')
def compile(message, code):
message.reply(u"Jag klarar inte av sånt längre :'(")
@listen_to(r'^run (.*)$')
def run(message, code):
message.reply(u"Jag klarar inte av sånt längre :'(")
@listen_to(r'^mat(\+*)$')
def mat(message, plus):
date = datetime.date.fromtimestamp(time.time() + (86400 * len(plus)))
try:
message.reply(u"```IKSU - {}\n{}```".format(str(date), get_food(str(date))))
except Exception as e:
message.reply(u"Kom inte åt maten 😞 ({what})".format(what=e.message))
@listen_to(ur'^[e\u00E4\u00C4]r.*fredag.*\?', re.IGNORECASE)
def fredag(message):
if datetime.datetime.today().weekday() == 4:
message.reply(u"Japp, idag är det fredag! :kreygasm:")
else:
message.reply(u"Nej, idag är det INTE fredag! :qq::gun:")
@listen_to(r'^temp(\+*)$')
def temp(message, plus):
if len(plus) > 0:
message.reply(u"Jag kan inte se in i framtiden... :qq::gun:")
else:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('temp.acc.umu.se', 2345))
tmp = s.recv(1024)
s.close()
time, temp = tmp[:len(tmp) - 1].split('=')
message.reply(u"{} C klockan {}".format(temp, time))
@listen_to(r'^quote add (.*)$')
def quote_add(message, quote):
db.insert({'quote': quote})
message.reply(u"Quote inlagd!")
@listen_to(r'^quote$')
def quote(message):
quotes = db.all()
if len(quotes) == 0:
message.reply(u"Inga quotes inlagda...")
else:
quote = random.choice(quotes)
message.reply(u">{}".format(quote['quote']))
def main():
bot = Bot()
bot.run()
if __name__ == '__main__':
main()
| Python | 0.999424 |
ce7914dd35e66820248cb82760b50a31bc8a625b | Add setup.py script to install whip-neustar cli script | setup.py | setup.py | from setuptools import setup
setup(
name='whip-neustar',
version='0.1',
packages=['whip_neustar'],
entry_points={
'console_scripts': [
'whip-neustar = whip_neustar.cli:main',
],
}
)
| Python | 0 | |
a72bc73aab4b696113bee16f5f7f9da1540bc02f | Create playerlist.py | playerlist.py | playerlist.py | import config
class players:
def __init__(self):
self.path=config.install_path+"reg\\N_NOW_RUNNING\\PLAYERS\\LIST.nreg"
def get_names_str(self,level):
a=open(self.path,"r")
b=a.readlines()
string=""
for i in b:
string=string+i
a.close()
return string
def get_names_list(self, level):
a=open(self.path,"r")
b=a.readlines()
string=[]
for i in b:
string.append(i)
return string
def add(self, name, uuid, level, entity_id):
a=open(self.path,"a")
a.write(name+";"+uuid+";"+entity_id+";"+level)
a.close()
def remove(self, name, uuid, level, entity_id):
a=open(self.path, "r")
b=a.readlines()
b.remove(name+";"+uuid+";"+str(entity_id)+";"+str(level))
a=open(self.path,"w")
for i in b:
a.write(i)
a.close()
del b
| Python | 0.000001 | |
7fa6d8beb2637bed6b31cf1cea5fdafffc6049bf | add tests | tests/test_dfg.py | tests/test_dfg.py | #!/usr/bin/env python
import logging
import time
import sys
from os.path import join, dirname, realpath
l = logging.getLogger("angr.tests.test_dfg")
l.setLevel(logging.DEBUG)
import nose
import angr
import pyvex
test_location = str(join(dirname(realpath(__file__)), "../../binaries/tests"))
def perform_one(binary_path):
proj = angr.Project(join(test_location, binary_path),
load_options={'auto_load_libs': False},
)
start = time.time()
cfg = proj.analyses.CFG(context_sensitivity_level=2)
end = time.time()
duration = end - start
l.info("CFG generated in %f seconds.", duration)
dfg = proj.analyses.DFG(cfg=cfg)
nose.tools.assert_true(len(dfg.dfgs) <= len(cfg.nodes()))
for addr, d in dfg.dfgs.items():
nose.tools.assert_true(cfg.get_any_node(addr) is not None)
# We check there is not node that we ignored
for n in d.nodes():
nose.tools.assert_not_equal(n.tag, 'Ist_IMark')
nose.tools.assert_not_equal(n.tag, 'Ist_AbiHint')
nose.tools.assert_not_equal(n.tag, 'Ist_Exit')
if n.tag == 'Ist_Put':
nose.tools.assert_not_equal(n.offset, n.arch.ip_offset)
for (a, b) in d.edges():
if isinstance(a, pyvex.IRExpr.IRExpr):
# We check that there is no edge between two expressions/const
nose.tools.assert_false(isinstance(b, pyvex.IRExpr.IRExpr))
# If there is an edge coming from an expr/const it should be in
# the dependencies of the other node
# FIXME
# Impossible to check because of the Unop optimization in the
# DFG...
# nose.tools.assert_true(a in b.expressions)
elif hasattr(a, 'tmp'):
# If there is an edge between a tmp and another node
# be sure that this tmp is in the dependencies of this node
tmps = [ ]
for e in b.expressions:
if hasattr(e, 'tmp'):
tmps.append(e.tmp)
nose.tools.assert_true(a.tmp in tmps)
def test_dfg_isalnum():
perform_one("i386/isalnum")
def test_dfg_counter():
perform_one("i386/counter")
def test_dfg_cfg_0():
perform_one("x86_64/cfg_0")
def test_dfg_fauxware():
perform_one("mips/fauxware")
def run_all():
functions = globals()
all_functions = dict(filter((lambda (k, v): k.startswith('test_') and hasattr(v, '__call__')), functions.items()))
for f in sorted(all_functions.keys()):
all_functions[f]()
if __name__ == "__main__":
logging.getLogger("angr.analyses.dfg").setLevel(logging.DEBUG)
if len(sys.argv) > 1:
globals()['test_' + sys.argv[1]]()
else:
run_all()
| Python | 0.000001 | |
c9e7f66aa1715720e37e62ca4bd5618df6adf2a8 | Add tdb/upload subclass for elife titers. | tdb/elife_upload.py | tdb/elife_upload.py | import os, re, time, datetime, csv, sys, json
from upload import upload
import rethinkdb as r
from Bio import SeqIO
import argparse
from parse import parse
sys.path.append('') # need to import from base
from base.rethink_io import rethink_io
from vdb.flu_upload import flu_upload
parser = argparse.ArgumentParser()
parser.add_argument('-db', '--database', default='tdb', help="database to upload to")
parser.add_argument('-v', '--virus', default='flu', help="virus table to interact with, ie Flu")
parser.add_argument('--subtype', default=None, help="subtype of virus, ie h1n1pdm, vic, yam, h3n2")
parser.add_argument('--host', default='human', help="host of virus, ie human, swine")
parser.add_argument('--path', default=None, help="path to fasta file, default is \"data/virus/\"")
parser.add_argument('--fstem', help="input file stem")
parser.add_argument('--ftype', default='flat', help="input file format, default \"flat\", other is \"tables\"")
parser.add_argument('--overwrite', default=False, action="store_true", help ="Overwrite fields that are not none")
parser.add_argument('--exclusive', default=True, action="store_false", help ="download all docs in db to check before upload")
parser.add_argument('--replace', default=False, action="store_true", help ="If included, delete all documents in table")
parser.add_argument('--rethink_host', default=None, help="rethink host url")
parser.add_argument('--auth_key', default=None, help="auth_key for rethink database")
parser.add_argument('--local', default=False, action="store_true", help ="connect to local instance of rethinkdb database")
parser.add_argument('--preview', default=False, action="store_true", help ="If included, preview a virus document to be uploaded")
class elife_upload(upload):
def __init__(self, **kwargs):
upload.__init__(self, **kwargs)
self.ferret_id = "None"
def upload(self, ftype='flat', preview=False, **kwargs):
'''
format virus information, then upload to database
'''
print("Uploading Viruses to TDB")
measurements = self.parse(ftype, **kwargs)
print('Formatting documents for upload')
self.format_measurements(measurements, **kwargs)
measurements = self.filter(measurements)
measurements = self.create_index(measurements)
self.check_uniqueness(measurements)
self.adjust_tdb_strain_names(measurements)
print('Total number of indexes', len(self.indexes), 'Total number of measurements', len(measurements))
if not preview:
self.upload_documents(self.table, measurements, index='index', **kwargs)
else:
print("Titer Measurements:")
print(json.dumps(measurements[0], indent=1))
print("Remove \"--preview\" to upload documents")
print("Printed preview of viruses to be uploaded to make sure fields make sense")
def format_measurements(self, measurements, **kwargs):
'''
format virus information in preparation to upload to database table
'''
self.fix_whole_name = self.define_strain_fixes(self.strain_fix_fname)
self.fix_whole_name.update(self.define_strain_fixes(self.HI_strain_fix_fname))
self.HI_ref_name_abbrev =self.define_strain_fixes(self.HI_ref_name_abbrev_fname)
self.define_location_fixes("source-data/flu_fix_location_label.tsv")
self.define_countries("source-data/geo_synonyms.tsv")
for meas in measurements:
meas['ferret_id'] = self.ferret_id
meas['virus_strain'], meas['original_virus_strain'] = self.fix_name(self.HI_fix_name(meas['virus_strain'], serum=False))
meas['serum_strain'], meas['original_serum_strain'] = self.fix_name(self.HI_fix_name(meas['serum_strain'], serum=True))
self.test_location(meas['virus_strain'])
self.test_location(meas['serum_strain'])
self.add_attributes(meas, **kwargs)
self.format_date(meas)
meas['assay_date'] = "XXXX-XX-XX"
self.format_passage(meas, 'serum_passage', 'serum_passage_category')
self.format_passage(meas, 'virus_passage', 'virus_passage_category')
self.format_id(meas)
self.format_ref(meas)
self.format_titer(meas)
self.format_serum_sample(meas)
if meas['ref'] == True:
self.ref_serum_strains.add(meas['serum_strain'])
self.ref_virus_strains.add(meas['virus_strain'])
if meas['ref'] == False:
self.test_virus_strains.add(meas['virus_strain'])
self.rethink_io.check_optional_attributes(meas, self.optional_fields)
if len(self.new_different_date_format) > 0:
print("Found files that had a different date format, need to add to self.different_date_format")
print(self.new_different_date_format)
self.check_strain_names(measurements)
self.disambiguate_sources(measurements)
return measurements
def disambiguate_sources(self, measurements):
sources = {}
for meas in measurements:
src = meas['source']
if src not in sources.keys():
sources[src] = 0
else:
sources[src] += 1
new_src = src + '_' + str(sources[src])
meas['source'] = new_src
def check_uniqueness(self, measurements):
indices = []
unique = 0
nonunique = 0
uniq = True
for meas in measurements:
index_string = ''
for field in meas['index']:
index_string = index_string + str(field)
if index_string in indices:
print "Nonunique index field: ", index_string
nonunique += 1
uniq = False
else:
indices.append(index_string)
unique += 1
print "Unique fields: ", unique
print "Nonunique fields: ", nonunique
return uniq
if __name__=="__main__":
args = parser.parse_args()
if args.path is None:
args.path = "data/"
if not os.path.isdir(args.path):
os.makedirs(args.path)
connTDB = elife_upload(**args.__dict__)
connTDB.upload(**args.__dict__)
| Python | 0 | |
d7a0962a817e1a7e530fcd84a11dc51be82574a6 | Create get_qpf_f012.py | get_qpf_f012.py | get_qpf_f012.py | import sys
import os
import urllib2
import datetime
import time
import psycopg2
from subprocess import call, Popen
# pull the last hours worth of precip data
os.system("wget http://www.srh.noaa.gov/ridge2/Precip/qpfshp/latest/latest_rqpf_f012.tar.gz -O latest_rqpf_f012.tar.gz")
os.system("mv latest_rqpf_f012.tar.gz latest_rqpf_f012.tar")
os.system("tar xvf latest_rqpf_f012.tar")
latest_rqpf_f012_shp = './latest/latest_rqpf_f012.shp'
last_hr_shp2pgsql = 'ogr2ogr -f "PostgreSQL" PG:"user=postgres dbname=hamlet password=password" {} -t_srs EPSG:4326 -nln latest_rqpf_f012 -overwrite'.format(latest_rqpf_f012_shp)
print last_hr_shp2pgsql
call(last_hr_shp2pgsql, shell = True)
conn_string = "dbname='hamlet' user=postgres port='5432' host='127.0.0.1' password='password'"
print "Connecting to database..."
try:
conn = psycopg2.connect(conn_string)
except Exception as e:
print str(e)
sys.exit()
print "Connected!\n"
drop_cur = conn.cursor()
#creating views that show where the roads are potentially flooded or exposed to icy conditions
drop_cur.execute("""drop table if exists roads_flooded_bunco cascade;""")
drop_cur.execute("""drop table if exists roads_flooded_se_heavy cascade;""")
drop_cur.execute("""drop table if exists roads_flooded_se_moderate cascade;""")
drop_cur.execute("""drop table if exists roads_flooded_se_light cascade;""")
drop_cur.execute("""drop table if exists roads_flooded_se_drizzle cascade;""")
conn.commit()
drop_cur.close()
flooded_cur = conn.cursor()
flooded_cur.execute("""
create table roads_flooded_bunco as
select
a.gid,
street_nam,
sum(b.globvalue),
a.geom
from conterlines_poly as a
inner join last_hr_prcp as b
on st_dwithin(a.geom::geometry(MULTIpolygon, 4326), b.wkb_geometry::geometry(point, 4326), 0.025)
group by a.gid, a.street_nam, a.geom;""")
flooded_cur.execute("""create table roads_flooded_se_heavy as
select
gid
street_nam,
sum(b.globvalue),
a.geom
from se_road_polys as a
inner join last_hr_prcp as b
on st_dwithin(a.geom::geometry(MULTIpolygon, 4326), b.wkb_geometry::geometry(point, 4326), 0.025)
where b.globvalue >= 1
group by a.gid, a.geom;""")
flooded_cur.execute("""create table roads_flooded_se_moderate as
select
gid
street_nam,
sum(b.globvalue),
a.geom
from se_road_polys as a
inner join last_hr_prcp as b
on st_dwithin(a.geom::geometry(MULTIpolygon, 4326), b.wkb_geometry::geometry(point, 4326), 0.025)
where b.globvalue >= .5
group by a.gid, a.geom;
""")
flooded_cur.execute("""create table roads_flooded_se_light as
select
gid
street_nam,
sum(b.globvalue),
a.geom
from se_road_polys as a
inner join last_hr_prcp as b
on st_dwithin(a.geom::geometry(MULTIpolygon, 4326), b.wkb_geometry::geometry(point, 4326), 0.025)
where b.globvalue >= .25
group by a.gid, a.geom;""")
flooded_cur.execute("""create table roads_flooded_se_drizzle as
select
gid
street_nam,
sum(b.globvalue),
a.geom
from se_road_polys as a
inner join last_hr_prcp as b
on st_dwithin(a.geom::geometry(MULTIpolygon, 4326), b.wkb_geometry::geometry(point, 4326), 0.025)
where b.globvalue >= .1 and b.globvalue <= .25
group by a.gid, a.geom;""")
conn.commit()
flooded_cur.close()
| Python | 0.000001 | |
504612eb0c3c6ec210dd6e555941c13523333f12 | install without cython | setup.py | setup.py | from setuptools import setup, Extension
from glob import glob
library = ('primesieve', dict(
sources=glob("lib/primesieve/src/primesieve/*.cpp"),
include_dirs=["lib/primesieve/include"],
language="c++",
))
try:
from Cython.Build import cythonize
except ImportError:
cythonize = None
extension = Extension(
"primesieve",
["primesieve/primesieve.pyx"] if cythonize else ["primesieve/primesieve.cpp"],
include_dirs = ["lib/primesieve/include"],
language="c++",
)
if cythonize:
extension = cythonize(extension)
setup(
name='primesieve',
url = "https://github.com/hickford/primesieve-python",
license = "MIT",
libraries = [library],
ext_modules = [extension],
)
| from setuptools import setup, Extension
from Cython.Build import cythonize
from glob import glob
library = ('primesieve', dict(
sources=glob("lib/primesieve/src/primesieve/*.cpp"),
include_dirs=["lib/primesieve/include"],
language="c++",
))
extension = Extension(
"primesieve",
["primesieve/primesieve.pyx"],
include_dirs = ["lib/primesieve/include"],
language="c++",
)
setup(
name='primesieve',
url = "https://github.com/hickford/primesieve-python",
license = "MIT",
libraries = [library],
ext_modules = cythonize(extension),
)
| Python | 0 |
42ca323888dc13246fa7f6a01a6e29efcdb2d5c5 | Add setup.py | setup.py | setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from setuptools import setup
import molvs
if os.path.exists('README.rst'):
long_description = open('README.rst').read()
else:
long_description = ''''''
setup(
name='MolVS',
version=molvs.__version__,
author=molvs.__author__,
author_email=molvs.__email__,
license=molvs.__license__,
url='https://github.com/mcs07/MolVS',
packages=['molvs'],
description='',
long_description=long_description,
keywords='chemistry cheminformatics rdkit',
zip_safe=False,
test_suite='nose.collector',
entry_points={'console_scripts': ['molvs = molvs.cli:main']},
classifiers=[
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Healthcare Industry',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'Topic :: Scientific/Engineering :: Chemistry',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| Python | 0.000001 | |
e91b1c56b252ddc3073a15209e38e73424911b62 | Remove unused import. | setup.py | setup.py | #!/usr/bin/env python
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, Extension
from Cython.Build import cythonize
import numpy as np
ext_modules = [
Extension(
'zipline.assets._assets',
['zipline/assets/_assets.pyx'],
include_dirs=[np.get_include()],
),
Extension(
'zipline.lib.adjusted_array',
['zipline/lib/adjusted_array.pyx'],
include_dirs=[np.get_include()],
),
Extension(
'zipline.lib.adjustment',
['zipline/lib/adjustment.pyx'],
include_dirs=[np.get_include()],
),
Extension(
'zipline.data.ffc.loaders._us_equity_pricing',
['zipline/data/ffc/loaders/_us_equity_pricing.pyx'],
include_dirs=[np.get_include()],
),
]
setup(
name='zipline',
version='0.8.0rc1',
description='A backtester for financial algorithms.',
author='Quantopian Inc.',
author_email='opensource@quantopian.com',
packages=['zipline'],
ext_modules=cythonize(ext_modules),
scripts=['scripts/run_algo.py'],
include_package_data=True,
license='Apache 2.0',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Topic :: Office/Business :: Financial',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: System :: Distributed Computing',
],
install_requires=[
'Logbook',
'pytz',
'requests',
'numpy',
'pandas',
'six',
'Cython',
],
extras_require={
'talib': ["talib"],
},
url="http://zipline.io"
)
| #!/usr/bin/env python
#
# Copyright 2014 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages, Extension
from Cython.Build import cythonize
import numpy as np
ext_modules = [
Extension(
'zipline.assets._assets',
['zipline/assets/_assets.pyx'],
include_dirs=[np.get_include()],
),
Extension(
'zipline.lib.adjusted_array',
['zipline/lib/adjusted_array.pyx'],
include_dirs=[np.get_include()],
),
Extension(
'zipline.lib.adjustment',
['zipline/lib/adjustment.pyx'],
include_dirs=[np.get_include()],
),
Extension(
'zipline.data.ffc.loaders._us_equity_pricing',
['zipline/data/ffc/loaders/_us_equity_pricing.pyx'],
include_dirs=[np.get_include()],
),
]
setup(
name='zipline',
version='0.8.0rc1',
description='A backtester for financial algorithms.',
author='Quantopian Inc.',
author_email='opensource@quantopian.com',
packages=['zipline'],
ext_modules=cythonize(ext_modules),
scripts=['scripts/run_algo.py'],
include_package_data=True,
license='Apache 2.0',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Topic :: Office/Business :: Financial',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: System :: Distributed Computing',
],
install_requires=[
'Logbook',
'pytz',
'requests',
'numpy',
'pandas',
'six',
'Cython',
],
extras_require={
'talib': ["talib"],
},
url="http://zipline.io"
)
| Python | 0 |
d139ced0b482fa65720a3c8f268d71dbf25119fb | add jsonsoup plugin | commonplugs/jsonsoup.py | commonplugs/jsonsoup.py | # commonplugs/jsonsoup.py
#
#
## gozerlib imports
from gozerlib.callbacks import callbacks
from gozerlib.utils.url import posturl, getpostdata
from gozerlib.persistconfig import PersistConfig
from gozerlib.commands import cmnds
from gozerlib.socket.irc.monitor import outmonitor
from gozerlib.socket.rest.server import RestServer, RestRequestHandler
from gozerlib.eventbase import EventBase
from gozerlib.utils.exception import handle_exception
from gozerlib.examples import examples
## simplejson imports
from simplejson import dumps
## basic imports
import socket
import re
## VARS
outurl = "http://jsonsoup.appspot.com/soup/"
state = PersistConfig()
if not state.data:
state.data = {}
if not state.data.has_key('relay'):
state.data['relay'] = []
cfg = PersistConfig()
cfg.define('enable', 0)
cfg.define('host' , socket.gethostbyname(socket.getfqdn()))
cfg.define('name' , socket.getfqdn())
cfg.define('port' , 10102)
cfg.define('disable', [])
waitre = re.compile(' wait (\d+)', re.I)
hp = "%s:%s" % (cfg.get('host'), cfg.get('port'))
url = "http://%s" % hp
## callbacks
def preremote(bot, event):
if event.channel in state.data['relay']:
return True
def handle_doremote(bot, event):
if event.isremote:
return
posturl(outurl, {}, {'event': event.tojson() })
callbacks.add('PRIVMSG', handle_doremote, preremote, threaded=True)
callbacks.add('OUTPUT', handle_doremote, preremote, threaded=True)
callbacks.add('MESSAGE', handle_doremote, preremote, threaded=True)
callbacks.add('BLIP_SUBMITTED', handle_doremote, preremote, threaded=True)
outmonitor.add('soup', handle_doremote, preremote, threaded=True)
## server part
server = None
def soup_POST(server, request):
try:
input = getpostdata(request)
container = input['container']
except KeyError, AttributeError:
logging.warn("soup - %s - can't determine eventin" % request.ip)
return dumps(["can't determine eventin"])
event = EventBase()
event.load(container)
callbacks.check(event)
return dumps(['ok',])
def soup_GET(server, request):
try:
path, container = request.path.split('#', 1)
except ValueError:
logging.warn("soup - %s - can't determine eventin" % request.ip)
return dumps(["can't determine eventin", ])
try:
event = EventBase()
event.load(container)
callbacks.check(event)
except Exception, ex:
handle_exception()
return dumps(['ok', ])
def startserver():
try:
import google
return
except ImportError:
pass
global server
try:
server = RestServer((cfg.get('host'), cfg.get('port')), RestRequestHandler)
if server:
server.start()
logging.warn('soup - running at %s:%s' % (cfg.get('host'), cfg.get('port')))
server.addhandler('/soup/', 'POST', soup_POST)
server.addhandler('/soup/', 'GET', soup_GET)
for mount in cfg.get('disable'):
server.disable(mount)
else:
logging.error('soup - failed to start server at %s:%s' % (cfg.get('host'), cfg.get('port')))
except socket.error, ex:
logging.warn('soup - start - socket error: %s', (request.ip, str(ex)))
except Exception, ex:
handle_exception()
def stopserver():
try:
if not server:
logging.warn('soup - server is already stopped')
return
server.shutdown()
except Exception, ex:
handle_exception()
pass
## plugin init
def init():
if cfg['enable']:
startserver()
def shutdown():
if cfg['enable']:
stopserver()
def handle_soup_on(bot, event):
if not event.rest:
target = event.channel
else:
target = event.rest
if not target in state.data['relay']:
state.data['relay'].append(target)
state.save()
event.done()
cmnds.add('soup-on', handle_soup_on, 'OPER')
examples.add('soup-on', 'enable relaying of the channel to the JSONBOT event network (jsonsoup)', 'soup-on')
def handle_soup_off(bot, event):
if not event.rest:
target = event.channel
else:
target = event.rest
if target in state.data['relay']:
state.data['relay'].remove(target)
state.save()
event.done()
cmnds.add('soup-off', handle_soup_off, 'OPER')
examples.add('soup-off', 'disable relaying of channel to the JSONBOT event network (jsonsoup)', 'soup-off')
def handle_soup_startserver(bot, event):
cfg['enable'] = 1
cfg.save()
startserver()
event.done()
cmnds.add('soup-startserver', handle_soup_startserver, 'OPER')
examples.add('soup-startserver', 'start the JSONBOT event network server', 'soup-startserver')
def handle_soup_stopserver(bot, event):
cfg['enable'] = 0
cfg.save()
stopserver()
event.done()
cmnds.add('soup-stopserver', handle_soup_stopserver, 'OPER')
examples.add('soup-stopserver', 'stop the JSONBOT event network server', 'soup-startserver')
| Python | 0 | |
52cd79d7045a69ff5073af7ed14e9ed774de7a39 | Add setup.py. | setup.py | setup.py | from setuptools import setup
setup(
name='pySUMO',
version='0.0.0a1',
description='A graphical IDE for Ontologies written in SUO-Kif',
long_description='A graphical IDE for Ontologies written in SUO-Kif',
url='',
author='',
author_email='',
license='',
classifiers=['Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: ',
'Programming Language :: Python :: 3.4',
],
keywords='SUMO Ontologies SUO-Kif',
package_dir={'':'src'},
packages=['pysumo', 'pysumo.logger', 'pySUMOQt', 'pySUMOQt.Designer', 'pySUMOQt.Widget'],
install_requires=['pyside'],
extras_require={'test' : ['pytest']},
data_files=[('data', ['data/Merge.kif', 'data/MILO.kif']),
('data/wordnet', [''.join(['data/wordnet/sdata.', x]) for x in
['adj', 'adv', 'noun', 'verb']]),],
entry_points={'gui_scripts': ['pySUMOQt = pySUMOQt.MainWindow:main']},
)
| Python | 0 | |
7354dc674a4551169fb55bfcec208256e956d14e | Add skeleton class for conditions | components/condition.py | components/condition.py | """A class to store conditions (eg. WHERE [cond])."""
class SgConditionSimple:
"""
A class to store a simple condition.
A simple condition is composed of 2 operands and 1 operator.
"""
def __init__(self, operand-l, operator, operand-r):
self._op-l = operand-l
self._op = operator
self._op-r = operand-r
class SgCondition:
"""A class to store a (complex) condition."""
def __init__(self, expr):
self._expr = expr
self._conds = [] # simple conditions
self._conns = [] # connectors (eg. and, or)
# TODO(lnishan): parse expr into _conds and _conns.
def Evaluate(self, fields, row):
# TODO(lnishan): Evaluate the (complex) condition.
return True
| Python | 0 | |
8e3de37e14013dc371064eec5102f682b32d0cfc | modify cwd so setup.py can be run from anywhere | setup.py | setup.py | #!/usr/bin/python
import os
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
def generate_sources(dir_files_tuples):
sources = []
for dir, files in dir_files_tuples:
full_files = [
os.path.join(dir, file)
for file in files
]
sources.extend(full_files)
return sources
# make sure cwd is correct
this_file = os.path.abspath(__file__)
this_dir = os.path.split(this_file)[0]
os.chdir(this_dir)
# locations
pyx_src_dir = 'crosscat/cython_code'
cpp_src_dir = 'cpp_code/src'
include_dirs = ['cpp_code/include/CrossCat']
# specify sources
ContinuousComponentModel_pyx_sources = ['ContinuousComponentModel.pyx']
ContinuousComponentModel_cpp_sources = [
'utils.cpp',
'numerics.cpp',
'RandomNumberGenerator.cpp',
'ComponentModel.cpp',
'ContinuousComponentModel.cpp',
]
ContinuousComponentModel_sources = generate_sources([
(pyx_src_dir, ContinuousComponentModel_pyx_sources),
(cpp_src_dir, ContinuousComponentModel_cpp_sources),
])
#
MultinomialComponentModel_pyx_sources = ['MultinomialComponentModel.pyx']
MultinomialComponentModel_cpp_sources = [
'utils.cpp',
'numerics.cpp',
'RandomNumberGenerator.cpp',
'ComponentModel.cpp',
'MultinomialComponentModel.cpp',
]
MultinomialComponentModel_sources = generate_sources([
(pyx_src_dir, MultinomialComponentModel_pyx_sources),
(cpp_src_dir, MultinomialComponentModel_cpp_sources),
])
#
State_pyx_sources = ['State.pyx']
State_cpp_sources = [
'utils.cpp',
'numerics.cpp',
'RandomNumberGenerator.cpp',
'DateTime.cpp',
'View.cpp',
'Cluster.cpp',
'ComponentModel.cpp',
'MultinomialComponentModel.cpp',
'ContinuousComponentModel.cpp',
'State.cpp',
]
State_sources = generate_sources([
(pyx_src_dir, State_pyx_sources),
(cpp_src_dir, State_cpp_sources),
])
# create exts
ContinuousComponentModel_ext = Extension(
"crosscat.cython_code.ContinuousComponentModel",
libraries = ['boost_random'],
extra_compile_args = [],
sources=ContinuousComponentModel_sources,
include_dirs=include_dirs,
language="c++")
MultinomialComponentModel_ext = Extension(
"crosscat.cython_code.MultinomialComponentModel",
libraries = ['boost_random'],
extra_compile_args = [],
sources=MultinomialComponentModel_sources,
include_dirs=include_dirs,
language="c++")
State_ext = Extension(
"crosscat.cython_code.State",
libraries = ['boost_random'],
extra_compile_args = [],
sources=State_sources,
include_dirs=include_dirs,
language="c++")
#
ext_modules = [
ContinuousComponentModel_ext,
MultinomialComponentModel_ext,
State_ext,
]
packages = ['crosscat', 'crosscat.utils', 'crosscat.convergence_analysis', 'crosscat.jsonrpc_http']
setup(
name='CrossCat',
version='0.1',
author='MIT.PCP',
url='TBA',
long_description='TBA.',
packages=packages,
package_dir={'crosscat':'crosscat/'},
ext_modules=ext_modules,
cmdclass = {'build_ext': build_ext}
)
| #!/usr/bin/python
import os
# old crosscat setup.py
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distutils import build_ext
# venture setup.py
# from distutils.core import setup, Extension
def generate_sources(dir_files_tuples):
sources = []
for dir, files in dir_files_tuples:
full_files = [
os.path.join(dir, file)
for file in files
]
sources.extend(full_files)
return sources
# locations
pyx_src_dir = 'crosscat/cython_code'
cpp_src_dir = 'cpp_code/src'
include_dirs = ['cpp_code/include/CrossCat']
# specify sources
ContinuousComponentModel_pyx_sources = ['ContinuousComponentModel.pyx']
ContinuousComponentModel_cpp_sources = [
'utils.cpp',
'numerics.cpp',
'RandomNumberGenerator.cpp',
'ComponentModel.cpp',
'ContinuousComponentModel.cpp',
]
ContinuousComponentModel_sources = generate_sources([
(pyx_src_dir, ContinuousComponentModel_pyx_sources),
(cpp_src_dir, ContinuousComponentModel_cpp_sources),
])
#
MultinomialComponentModel_pyx_sources = ['MultinomialComponentModel.pyx']
MultinomialComponentModel_cpp_sources = [
'utils.cpp',
'numerics.cpp',
'RandomNumberGenerator.cpp',
'ComponentModel.cpp',
'MultinomialComponentModel.cpp',
]
MultinomialComponentModel_sources = generate_sources([
(pyx_src_dir, MultinomialComponentModel_pyx_sources),
(cpp_src_dir, MultinomialComponentModel_cpp_sources),
])
#
State_pyx_sources = ['State.pyx']
State_cpp_sources = [
'utils.cpp',
'numerics.cpp',
'RandomNumberGenerator.cpp',
'DateTime.cpp',
'View.cpp',
'Cluster.cpp',
'ComponentModel.cpp',
'MultinomialComponentModel.cpp',
'ContinuousComponentModel.cpp',
'State.cpp',
]
State_sources = generate_sources([
(pyx_src_dir, State_pyx_sources),
(cpp_src_dir, State_cpp_sources),
])
# create exts
ContinuousComponentModel_ext = Extension(
"crosscat.cython_code.ContinuousComponentModel",
libraries = ['boost_random'],
extra_compile_args = [],
sources=ContinuousComponentModel_sources,
include_dirs=include_dirs,
language="c++")
MultinomialComponentModel_ext = Extension(
"crosscat.cython_code.MultinomialComponentModel",
libraries = ['boost_random'],
extra_compile_args = [],
sources=MultinomialComponentModel_sources,
include_dirs=include_dirs,
language="c++")
State_ext = Extension(
"crosscat.cython_code.State",
libraries = ['boost_random'],
extra_compile_args = [],
sources=State_sources,
include_dirs=include_dirs,
language="c++")
#
ext_modules = [
ContinuousComponentModel_ext,
MultinomialComponentModel_ext,
State_ext,
]
packages = ['crosscat', 'crosscat.utils', 'crosscat.convergence_analysis', 'crosscat.jsonrpc_http']
setup(
name='CrossCat',
version='0.1',
author='MIT.PCP',
url='TBA',
long_description='TBA.',
packages=packages,
package_dir={'crosscat':'crosscat/'},
ext_modules=ext_modules,
cmdclass = {'build_ext': build_ext}
)
| Python | 0 |
a0607d0f9b7c08ddcf81459868b33761d8ed5bb2 | Set up the dependency | setup.py | setup.py | # Copyright 2021 The KerasNLP Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Setup script."""
from setuptools import find_packages
from setuptools import setup
setup(
name="keras-nlp",
description="High-level NLP libraries based on Keras",
url="https://github.com/keras-team/keras-nlp",
author="Keras team",
author_email="keras-nlp@google.com",
license="Apache License 2.0",
# tensorflow isn't a dependency because it would force the
# download of the gpu version or the cpu version.
# users should install it manually.
install_requires=["packaging", "tensorflow", "numpy"],
extras_require={"tests": ["flake8", "isort", "black",],},
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3.7",
"Operating System :: Unix",
"Operating System :: Microsoft :: Windows",
"Operating System :: MacOS",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"Topic :: Software Development",
],
packages=find_packages(exclude=("tests",)),
) | Python | 0.000008 | |
d9be2b8a61a88f0ee228c08d1f277770602840b1 | Add python version for compress | compression/compress.py | compression/compress.py |
def compress(uncompressed):
count = 1
compressed = ""
if not uncompressed:
return compressed
letter = uncompressed[0]
for nx in uncompressed[1:]:
if letter == nx:
count = count + 1
else:
compressed += "{}{}".format(letter, count)
count = 1
letter = nx
compressed += "{}{}".format(letter, count)
return compressed
if __name__ == "__main__":
print(compress("aaabbbccccd")) | Python | 0.000002 | |
d480c2738bb4d0ae72643fc9bc1f911cb630539c | add 12-list.py | python/12-list.py | python/12-list.py | #!/usr/bin/env python
import math
list = ['physics', 'chemistry', 1997, 2001];
print "list[2] = ", list[2]
print "list[1:3] = ", list[1:3]
list[2] = "math";
print "update, list[2] = ", list[2]
del list[2]
print "delete, list[2] = ", list[2]
print "length of delete:", len(list)
if ('physics' in list):
print "physics is in list"
else:
print "physics is not in list"
for elem in list:
print "elem :", elem
| Python | 0.000003 | |
240b22d0b078951b7d1f0df70156b6e2041a530f | fix setup.py dor pypi. | setup.py | setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2009 Benoit Chesneau <benoitc@e-engura.org>
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
import os
import sys
from setuptools import setup
data_files = []
root_dir = os.path.dirname(__file__)
if root_dir != '':
os.chdir(root_dir)
for dirpath, dirnames, filenames in os.walk('app-template'):
for i, dirname in enumerate(dirnames):
if dirname.startswith('.'): del dirnames[i]
data_files.append([dirpath, [os.path.join(dirpath, f) for f in filenames]])
setup(
name = 'Couchapp',
version = '0.1.4',
url = 'http://github.com/benoitc/couchapp/tree/master',
license = 'Apache License 2',
author = 'Benoit Chesneau',
author_email = 'benoitc@e-engura.org',
description = 'Standalone CouchDB Application Development Made Simple.',
long_description = """CouchApp is a set of helpers and a jQuery plugin
that conspire to get you up and running on CouchDB quickly and
correctly. It brings clarity and order to the freedom of CouchDB's
document-based approach.""",
keywords = 'couchdb couchapp',
platforms = 'any',
zip_safe = False,
packages= ['couchapp'],
package_dir={'couchapp': 'python/couchapp'},
data_files = data_files,
include_package_data = True,
scripts = ['python/couchapp/bin/couchapp'],
classifiers = [
'License :: OSI Approved :: Apache Software License',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Operating System :: OS Independent',
'Topic :: Database',
'Topic :: Utilities',
],
setup_requires = [
'setuptools>=0.6c9',
'couchdb',
],
)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2009 Benoit Chesneau <benoitc@e-engura.org>
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
import os
import sys
from setuptools import setup
data_files = []
root_dir = os.path.dirname(__file__)
if root_dir != '':
os.chdir(root_dir)
for dirpath, dirnames, filenames in os.walk('app-template'):
for i, dirname in enumerate(dirnames):
if dirname.startswith('.'): del dirnames[i]
data_files.append([dirpath, [os.path.join(dirpath, f) for f in filenames]])
setup(
name = 'Couchapp',
version = '0.1.4',
url = 'http://github.com/benoitc/couchapp/tree/master',
license = 'Apache License 2',
author = 'Benoit Chesneau',
author_email = 'benoitc@e-engura.org',
description = 'Standalone CouchDB Application Development Made Simple.',
long_description = """CouchApp is a set of helpers and a jQuery plugin
that conspire to get you up and running on CouchDB quickly and
correctly. It brings clarity and order to the freedom of CouchDB’s
document-based approach.""",
keywords = 'couchdb couchapp',
platforms = 'any',
zip_safe = False,
packages= ['couchapp'],
package_dir={'couchapp': 'python/couchapp'},
data_files = data_files,
include_package_data = True,
scripts = ['python/couchapp/bin/couchapp'],
classifiers = [
'License :: OSI Approved :: Apache Software License',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Development Status :: 4 - Beta',
'Programming Language :: Python',
'Operating System :: OS Independent',
'Topic :: Database',
'Topic :: Utilities',
],
setup_requires = [
'setuptools>=0.6c9',
'couchdb>=0.5dev',
]
)
| Python | 0 |
588750832d8bfb1047bb2c56f335cb70f6b2ff5f | add QualityControl | qualitycontrol.py | qualitycontrol.py | import os,sys
from optparse import OptionParser
import time
import fastq
maxLen = 200
allbases = ("A", "T", "C", "G");
########################### QualityControl
class QualityControl:
readLen = 0
readCount = 0
counts = {}
percents = {}
qualities = [0 for x in xrange(maxLen)]
def __init__(self):
for base in allbases:
self.counts[base] = [0 for x in xrange(maxLen)]
self.percents[base] = [0.0 for x in xrange(maxLen)]
def statRead(self, read):
seq = read[1]
for i in xrange(len(seq)):
b = seq[i]
if b in allbases:
self.counts[b][i] += 1
def calcReadLen(self):
for pos in xrange(maxLen):
hasData = False
for base in allbases:
if self.counts[base][pos]>0:
hasData = True
if hasData == False:
self.readLen = pos
break
def calcPercents(self):
#calc percents of each base
for pos in xrange(self.readLen):
total = 0
for base in allbases:
total += self.counts[base][pos]
for base in allbases:
self.percents[base][pos] = float(self.counts[base][pos])/float(total)
def statFile(self, filename):
reader = fastq.Reader(filename)
#sample up to maxSample reads for stat
maxSample = 5000
while True:
read = reader.nextRead()
self.readCount += 1
if read==None or self.readCount>maxSample:
break
self.statRead(read)
self.calcReadLen()
self.calcPercents()
def autoTrim(self):
#use (center-5, center+5) as initial good segment
center = int(self.readLen/2)
left = center-5
right = center+5
threshold = 0.05
lastStepIsLeft = False
leftFinished = False
rightFinished = False
current = -1
#expand the good segment
meanPercents = {}
while not (leftFinished and rightFinished):
for base in allbases:
meanPercents[base] = 0.0
for pos in xrange(left, right):
meanPercents[base] += self.percents[base][pos]
meanPercents[base] /= (right-left)
if leftFinished:
current = right + 1
lastStepIsLeft = False
elif rightFinished:
current = left - 1
lastStepIsLeft = True
elif lastStepIsLeft:
current = right + 1
lastStepIsLeft = False
else:
current = left - 1
lastStepIsLeft = True
percentBias = 0.0
for base in allbases:
percentBias += abs(meanPercents[base] - self.percents[base][current])
if percentBias > threshold:
if lastStepIsLeft:
leftFinished = True
else:
rightFinished = True
else:
if lastStepIsLeft:
left = current
if left == 0: leftFinished = True
else:
right = current
if right == self.readLen-1: rightFinished = True
#find the bad segment from front, considering a small window
#if any in the window is bad, it is bad
trimFront = left
window = 3
for pos in xrange(0, left):
isGood = True
for posInWindow in xrange(pos, min(pos+3, self.readLen)):
percentBias = 0.0
for base in allbases:
percentBias += abs(meanPercents[base] - self.percents[base][posInWindow])
if percentBias > threshold:
isGood = False
if isGood:
trimFront = pos
break
#find the bad segment from tail, considering a small window
#if any in the window is bad, it is bad
trimTail = right
for pos in xrange(self.readLen-1, right, -1):
isGood = True
for posInWindow in xrange(pos, max(pos-3, 0), -1):
percentBias = 0.0
for base in allbases:
percentBias += abs(meanPercents[base] - percents[base][posInWindow])
if percentBias > threshold:
isGood = False
if isGood:
trimTail = pos
break
trimFront = min(self.readLen*0.1,trimFront)
trimTail = min(self.readLen*0.05,self.readLen-1-trimTail)
# the last base should be definitely trimmed for illumina sequencer output
trimTail = max(1, trimTail)
return (int(trimFront), int(trimTail))
if __name__ == "__main__":
qc = QualityControl()
qc.statFile("R1.fq")
| Python | 0 | |
3ada80358a059b3a5ee4dd4ceed572f933a1ec67 | Create setup.py | setup.py | setup.py | from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='compare-with-remote',
version='0.1',
description=' Compare local script output with remote script output',
long_description=long_description,
url='https://github.com/guettli/compare-with-remote/',
author='Thomas Guettler',
author_email='info.compare-with-remote@thomas-guettler.de',
license='Apache2',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: Apache2',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
entry_points={
'console_scripts': [
'sample=compare-with-remote:compare_with_remote/compare_with_remote:main',
],
},
)
| Python | 0.000001 | |
606853d904c1967b41b30d828940c4aa7ab4c0ab | add setup.py | setup.py | setup.py | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from distutils.core import setup
setup(name="fusion",
version="0.1.0",
author="kgiusti",
author_email="kgiusti@apache.org",
packages=["fusion"],
package_dir={"fusion": "python"},
license="Apache Software License")
| Python | 0.000001 | |
90ec011ebec93f4c0b0e93fc831b0f782be1b13e | Add the setup.py PIP install config file. | setup.py | setup.py | from setuptools import setup
setup(
name='SedLex',
version='0.1',
install_requires=[
'html5lib',
'beautifulsoup4',
'requests',
'jinja2',
'python-gitlab'
]
)
| Python | 0 | |
fa88dac9c35fc473ebfea05926e0200926251d9d | Create setup.py | setup.py | setup.py | #!/usr/bin/env python
from distutils.core import setup
setup(name='RPiProcessRig',
version='1.0',
description='A simple industrial rig that can be used for experimentation with a variety of different control algortithms',
author='Alexander Leech',
author_email='alex.leech@talktalk.net',
license = 'MIT',
keywords = "Raspberry Pi Process Control Industrial Rig Hardware Experimentation",
url='https://github.com/FlaminMad/RPiProcessRig',
packages=['yaml', 'pymodbus','spidev','RPi.GPIO'],
py_modules=
)
| Python | 0 | |
c0989ce01ee62367a92eb48855a42c3c4986de84 | Add setup.py. | setup.py | setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import codecs
import os
from setuptools import find_packages, setup
def read(file_name):
file_path = os.path.join(os.path.dirname(__file__), file_name)
return codecs.open(file_path, encoding='utf-8').read()
PACKAGE = "add_another"
NAME = "django-add-another"
DESCRIPTION = "'Add another' functionality outside Django admin"
AUTHOR = "Karim Amzil"
AUTHOR_EMAIL = "djkartsa@gmail.com"
URL = "https://github.com/djkartsa/django-add-another"
VERSION = __import__(PACKAGE).__version__
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=read("README.md"),
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license="LICENSE.txt",
url=URL,
packages=find_packages(),
include_package_data=True,
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Framework :: Django",
],
install_requires=[
'Django',
],
zip_safe=False,
)
| Python | 0 | |
7634b58b1bd0fc2eee121bad2a20b61077a48d7b | Update setup.py | setup.py | setup.py | #!/usr/bin/env python
import sys
from distutils.core import setup
try:
import fontTools
except:
print "*** Warning: defcon requires FontTools, see:"
print " fonttools.sf.net"
try:
import robofab
except:
print "*** Warning: defcon requires RoboFab, see:"
print " robofab.com"
#if "sdist" in sys.argv:
# import os
# import subprocess
# import shutil
# docFolder = os.path.join(os.getcwd(), "documentation")
# # remove existing
# doctrees = os.path.join(docFolder, "build", "doctrees")
# if os.path.exists(doctrees):
# shutil.rmtree(doctrees)
# # compile
# p = subprocess.Popen(["make", "html"], cwd=docFolder)
# p.wait()
# # remove doctrees
# shutil.rmtree(doctrees)
setup(name="defconAppKit",
version="0.1",
description="A set of interface objects for working with font data.",
author="Tal Leming",
author_email="tal@typesupply.com",
url="https://github.com/typesupply/defconAppKit",
license="MIT",
packages=[
"defconAppKit",
"defconAppKit.controls",
"defconAppKit.representationFactories",
"defconAppKit.tools",
"defconAppKit.windows"
],
package_dir={"":"Lib"}
)
| #!/usr/bin/env python
import sys
from distutils.core import setup
try:
import fontTools
except:
print "*** Warning: defcon requires FontTools, see:"
print " fonttools.sf.net"
try:
import robofab
except:
print "*** Warning: defcon requires RoboFab, see:"
print " robofab.com"
#if "sdist" in sys.argv:
# import os
# import subprocess
# import shutil
# docFolder = os.path.join(os.getcwd(), "documentation")
# # remove existing
# doctrees = os.path.join(docFolder, "build", "doctrees")
# if os.path.exists(doctrees):
# shutil.rmtree(doctrees)
# # compile
# p = subprocess.Popen(["make", "html"], cwd=docFolder)
# p.wait()
# # remove doctrees
# shutil.rmtree(doctrees)
setup(name="defconAppKit",
version="0.1",
description="A set of interface objects for working with font data.",
author="Tal Leming",
author_email="tal@typesupply.com",
url="http://code.typesupply.com",
license="MIT",
packages=[
"defconAppKit",
"defconAppKit.controls",
"defconAppKit.representationFactories",
"defconAppKit.tools",
"defconAppKit.windows"
],
package_dir={"":"Lib"}
) | Python | 0 |
3c5802bda34ed9c772f7bb2e33b29f265440f286 | Add a simple setup.py. | setup.py | setup.py | import os
from setuptools import setup, find_packages
README_PATH = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'README.markdown')
description = 'django-goodfields makes creating good form fields easy.'
long_description = os.path.exists(README_PATH) and open(README_PATH).read() or description
setup(
name='django-goodfields',
version='0.0.1',
description=description,
long_description=long_description,
author='Steve Losh',
author_email='steve@stevelosh.com',
url='http://bitbucket.org/dwaiter/django-goodfields/',
packages=find_packages(),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Operating System :: OS Independent',
'Framework :: Django',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
],
)
| Python | 0 | |
26cc1c4ff2b5c0de8b83bb9bd088d80f5650dda1 | Create setup.py | setup.py | setup.py | __author__ = 'Alumne'
from distutils.core import setup
setup(name='PEACHESTORE',
version='python 3',
author='albert cuesta',
author_email='albert_cm_91@hotmail.com',
url='https://github.com/albertcuesta/PEACHESTORE',
description='es una tienda online de aplicaciones moviles similar a google play',
packager=['PEACHSTORE']
)
| Python | 0.000001 | |
456babd37b63e36b1041472aa6bb913c90e46816 | install of python interface via setup.py | setup.py | setup.py | # see https://stackoverflow.com/questions/42585210/extending-setuptools-extension-to-use-cmake-in-setup-py
import os
import pathlib
import re
import sys
import sysconfig
import platform
import subprocess
import shutil
from distutils.command.install_data import install_data
from setuptools import find_packages, setup, Extension
from setuptools.command.build_ext import build_ext
from setuptools.command.install_lib import install_lib
from setuptools.command.install_scripts import install_scripts
class CMakeExtension(Extension):
"""
An extension to run the cmake build
This simply overrides the base extension class so that setuptools
doesn't try to build your sources for you
"""
def __init__(self, name, sourcedir=''):
Extension.__init__(self, name, sources=[])
self.sourcedir = os.path.abspath(sourcedir)
class InstallCMakeLibsData(install_data):
"""
Just a wrapper to get the install data into the egg-info
Listing the installed files in the egg-info guarantees that
all of the package files will be uninstalled when the user
uninstalls your package through pip
"""
def run(self):
"""
Outfiles are the libraries that were built using cmake
"""
# There seems to be no other way to do this; I tried listing the
# libraries during the execution of the InstallCMakeLibs.run() but
# setuptools never tracked them, seems like setuptools wants to
# track the libraries through package data more than anything...
# help would be appriciated
self.outfiles = self.distribution.data_files
class InstallCMakeLibs(install_lib):
"""
Get the libraries from the parent distribution, use those as the outfiles
Skip building anything; everything is already built, forward libraries to
the installation step
"""
def run(self):
"""
Copy libraries from the bin directory and place them as appropriate
"""
self.announce("Moving library files", level=3)
# We have already built the libraries in the previous build_ext step
self.skip_build = True
build_dir = self.build_dir
# Depending on the files that are generated from your cmake
# build chain, you may need to change the below code, such that
# your files are moved to the appropriate location when the installation
# is run
libs = [os.path.join(build_dir, _lib) for _lib in
os.listdir(build_dir) if
os.path.isfile(os.path.join(build_dir, _lib)) and
os.path.splitext(_lib)[1] in [".dll", ".so", ".dylib"]]
for lib in libs:
shutil.move(lib, os.path.join(self.build_dir,
os.path.basename(lib)))
# Mark the libs for installation, adding them to
# distribution.data_files seems to ensure that setuptools' record
# writer appends them to installed-files.txt in the package's egg-info
#
# Also tried adding the libraries to the distribution.libraries list,
# but that never seemed to add them to the installed-files.txt in the
# egg-info, and the online recommendation seems to be adding libraries
# into eager_resources in the call to setup(), which I think puts them
# in data_files anyways.
#
# What is the best way?
# These are the additional installation files that should be
# included in the package, but are resultant of the cmake build
# step; depending on the files that are generated from your cmake
# build chain, you may need to modify the below code
self.distribution.data_files = [os.path.join(self.install_dir,
os.path.basename(lib))
for lib in libs]
# Must be forced to run after adding the libs to data_files
self.distribution.run_command("install_data")
if (sys.version_info < (3, 0)):
install_lib.run(self)
else:
super().run()
class BuildCMakeExt(build_ext):
"""
Builds using cmake instead of the python setuptools implicit build
"""
def run(self):
"""
Perform build_cmake before doing the 'normal' stuff
"""
try:
out = subprocess.check_output(['cmake', '--version'])
except OSError:
raise RuntimeError(
"CMake must be installed to build the following extensions: " +
", ".join(e.name for e in self.extensions))
if platform.system() == "Windows":
cmake_version = LooseVersion(re.search(r'version\s*([\d.]+)',
out.decode()).group(1))
if cmake_version < '3.1.0':
raise RuntimeError("CMake >= 3.1.0 is required on Windows")
for ext in self.extensions:
self.build_extension(ext)
def build_extension(self, ext):
"""
The steps required to build the extension
"""
self.announce("Preparing the build environment", level=3)
cwd = pathlib.Path().absolute()
# these dirs will be created in build_py, so if you don't have
# any python sources to bundle, the dirs will be missing
build_temp = pathlib.Path(self.build_temp)
extdir = pathlib.Path(self.get_ext_fullpath(ext.name))
if (sys.version_info < (3, 0)):
if not os.path.exists(self.build_temp):
os.makedirs(self.build_temp)
if not os.path.exists(str(build_temp.parent)):
os.makedirs(str(extdir.parent))
else:
build_temp.mkdir(parents=True, exist_ok=True)
extdir.mkdir(parents=True, exist_ok=True)
# Now that the necessary directories are created, build
self.announce("Configuring cmake project", level=3)
# Change your cmake arguments below as necessary
# os.chdir(str(build_temp))
# print(os.getcwd())
self.spawn(['cmake',str(cwd),
'-B'+self.build_temp,
"-DCMAKE_LIBRARY_OUTPUT_DIRECTORY="+ str(extdir.parent.absolute()),
"-DHTOOL_WITH_PYTHON_INTERFACE=True"])
self.spawn(['cmake', '--build', self.build_temp,"--target", "htool_shared",
"--config", "Release"])
# os.chdir(str(cwd))
# cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir]
# cfg = 'Debug' if self.debug else 'Release'
# build_args = ['--config', cfg]
# if platform.system() == "Windows":
# cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(
# cfg.upper(),
# extdir)]
# if sys.maxsize > 2**32:
# cmake_args += ['-A', 'x64']
# build_args += ['--', '/m']
# else:
# cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg]
# build_args += ['--', '-j2']
# env = os.environ.copy()
# env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(
# env.get('CXXFLAGS', ''),
# self.distribution.get_version())
# if not os.path.exists(self.build_temp):
# os.makedirs(self.build_temp)
# subprocess.check_call(['cmake', ext.sourcedir] + cmake_args,
# cwd=self.build_temp, env=env)
# subprocess.check_call(['cmake', '--build', '.'] + build_args,
# cwd=self.build_temp)
setup(
name='Htool',
version='1.0',
package_dir = {'': 'interface'},
py_modules=['htool'],
ext_modules=[CMakeExtension('htool')],
cmdclass={
'build_ext': BuildCMakeExt,
'install_data': InstallCMakeLibsData,
'install_lib': InstallCMakeLibs,
# 'install_scripts': InstallCMakeScripts
}
) | Python | 0 | |
d64367eda03772997af21792e82a2825848c1ae6 | add tests for splat utils | astroquery/splatalogue/tests/test_utils.py | astroquery/splatalogue/tests/test_utils.py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from ... import splatalogue
from astropy import units as u
import numpy as np
from .test_splatalogue import patch_post
from .. import utils
def test_clean(patch_post):
x = splatalogue.Splatalogue.query_lines(114*u.GHz,116*u.GHz,chemical_name=' CO ')
c = utils.clean_column_headings(x)
assert 'Resolved QNs' not in c.colnames
assert 'QNs' in c.colnames
def test_merge(patch_post):
x = splatalogue.Splatalogue.query_lines(114*u.GHz,116*u.GHz,chemical_name=' CO ')
c = utils.merge_frequencies(x)
assert 'Freq' in c.colnames
assert np.all(c['Freq'] > 0)
def test_minimize(patch_post):
x = splatalogue.Splatalogue.query_lines(114*u.GHz,116*u.GHz,chemical_name=' CO ')
c = utils.minimize_table(x)
assert 'Freq' in c.colnames
assert np.all(c['Freq'] > 0)
assert 'Resolved QNs' not in c.colnames
assert 'QNs' in c.colnames
| Python | 0 | |
4420556002c32f512f1afc9ea49ba8f01818f08a | add script to generate all jobs | scripts/generate_all_jobs.py | scripts/generate_all_jobs.py | #!/usr/bin/env python3
import argparse
import os
import subprocess
import sys
from ros_buildfarm.argument import add_argument_config_url
from ros_buildfarm.config import get_index
from ros_buildfarm.config import get_release_build_files
from ros_buildfarm.config import get_source_build_files
from ros_buildfarm.jenkins import connect
def main(argv=sys.argv[1:]):
parser = argparse.ArgumentParser(
description='Generate all jobs on Jenkins')
add_argument_config_url(parser)
parser.add_argument(
'--ros-distro-names',
nargs='*',
metavar='ROS_DISTRO_NAME',
default=[],
help='The list of ROS distribution names if not generating all')
args = parser.parse_args(argv)
config = get_index(args.config_url)
ros_distro_names = config.distributions.keys()
invalid_ros_distro_name = [
n for n in args.ros_distro_names if n not in ros_distro_names]
if invalid_ros_distro_name:
parser.error(
'The following ROS distribution names are not part of the ' +
'buildfarm index: ' + ', '.join(sorted(invalid_ros_distro_name)))
# try to connect to Jenkins master
connect(config.jenkins_url)
generate_dashboard_job(args.config_url)
selected_ros_distro_names = [
n for n in ros_distro_names
if not args.ros_distro_names or n in args.ros_distro_names]
for ros_distro_name in sorted(selected_ros_distro_names):
print(ros_distro_name)
generate_rosdistro_cache_job(args.config_url, ros_distro_name)
release_build_files = get_release_build_files(config, ros_distro_name)
for release_build_name in release_build_files.keys():
generate_release_status_page_job(
args.config_url, ros_distro_name, release_build_name)
generate_release_maintenance_jobs(
args.config_url, ros_distro_name, release_build_name)
source_build_files = get_source_build_files(config, ros_distro_name)
for source_build_name in source_build_files.keys():
generate_devel_maintenance_jobs(
args.config_url, ros_distro_name, source_build_name)
generate_repos_status_page_jobs(
args.config_url, config, ros_distro_name)
def generate_dashboard_job(config_url):
cmd = [
'misc/generate_dashboard_job.py',
config_url,
]
_check_call(cmd)
def generate_rosdistro_cache_job(config_url, ros_distro_name):
cmd = [
'misc/generate_rosdistro_cache_job.py',
config_url,
ros_distro_name,
]
_check_call(cmd)
def generate_release_status_page_job(
config_url, ros_distro_name, release_build_name):
cmd = [
'status/generate_release_status_page_job.py',
config_url,
ros_distro_name,
release_build_name,
]
_check_call(cmd)
def generate_repos_status_page_jobs(
config_url, config, ros_distro_name):
# collect all target repositories (building) and their targets
# from all release build files
target_repositories = {}
release_build_files = get_release_build_files(config, ros_distro_name)
for release_build_file in release_build_files.values():
target_repository = release_build_file.target_repository
if target_repository not in target_repositories:
target_repositories[target_repository] = []
# TODO support other OS names
if 'ubuntu' in release_build_file.targets:
targets = release_build_file.targets['ubuntu']
for os_code_name in sorted(targets.keys()):
target = '%s:source' % os_code_name
if target not in target_repositories[target_repository]:
target_repositories[target_repository].append(target)
for arch in sorted(targets[os_code_name].keys()):
target = '%s:%s' % (os_code_name, arch)
if target not in target_repositories[target_repository]:
target_repositories[target_repository].append(target)
# generate a repos status page for each unique building repo
# using all targets listed in any release build file with that target repo
for i, target_repository in enumerate(sorted(target_repositories.keys())):
if not target_repository.endswith('/building'):
print("Skipped target repository '%s' " % target_repository +
" because it does not end with '/building'", file=sys.stderr)
continue
os_code_name_and_arch_tuples = target_repositories[target_repository]
# derive testing and main urls from building url
base_url = os.path.dirname(target_repository)
testing_url = os.path.join(base_url, 'testing')
main_url = os.path.join(base_url, 'main')
output_name = '%s_repos' % ros_distro_name
if i > 0:
output_name += str(i + 1)
generate_repos_status_page_job(
config_url,
[target_repository, testing_url, main_url],
os_code_name_and_arch_tuples,
output_name)
def generate_repos_status_page_job(
config_url, debian_repository_urls, os_code_name_and_arch_tuples,
output_name):
cmd = [
'status/generate_repos_status_page_job.py',
config_url
]
cmd += debian_repository_urls
cmd += ['--os-code-name-and-arch-tuples'] + os_code_name_and_arch_tuples
cmd += ['--output-name', output_name]
_check_call(cmd)
def generate_release_maintenance_jobs(
config_url, ros_distro_name, release_build_name):
cmd = [
'release/generate_release_maintenance_jobs.py',
config_url,
ros_distro_name,
release_build_name,
]
_check_call(cmd)
def generate_devel_maintenance_jobs(
config_url, ros_distro_name, release_build_name):
cmd = [
'devel/generate_devel_maintenance_jobs.py',
config_url,
ros_distro_name,
release_build_name,
]
_check_call(cmd)
def _check_call(cmd):
print('')
print("Invoking '%s'" % ' '.join(cmd))
print('')
basepath = os.path.dirname(__file__)
cmd[0] = os.path.join(basepath, cmd[0])
subprocess.check_call(cmd)
print('')
if __name__ == '__main__':
main()
| Python | 0.000001 | |
b72f8a9b0d9df7d42c43c6a294cc3aab2cb91641 | Add missing migrations for limit_choices_to on BlogPage.author | blog/migrations/0002_auto_20190605_1104.py | blog/migrations/0002_auto_20190605_1104.py | # Generated by Django 2.2.2 on 2019-06-05 08:04
import blog.abstract
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_squashed_0006_auto_20180206_2239'),
]
operations = [
migrations.AlterField(
model_name='blogpage',
name='author',
field=models.ForeignKey(blank=True, limit_choices_to=blog.abstract.limit_author_choices, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='author_pages', to=settings.AUTH_USER_MODEL, verbose_name='Author'),
),
]
| Python | 0.000001 | |
7fdf796440c3a4ed84ffcb4343cd92f0013c8b1f | add current client, supports basic chatting | slack.py | slack.py | from slackclient import SlackClient
def get_client(token='4577027817.4577075131'):
return SlackClient(token)
print get_client().api_call('api.test')
| Python | 0 | |
f1c1206af29ee0f7be8b7477cd409f2844c816b3 | add Todo generator | todo/generator.py | todo/generator.py | # coding=utf8
"""
Generator from todo object to todo format string
"""
from models import Task
from models import Todo
class Generator(object):
"""
Generator from todo object to readable string.
"""
newline = "\n"
def gen_task_id(self, task_id):
"""
int => str e.g. 12 => '12.'
"""
return str(task_id) + "."
def gen_task_done(self, done):
"""
boolen => str e.g. True => '[x]'
"""
if done is True:
return '[x]'
else:
return ' '
def gen_task_content(self, content):
"""
str => str
"""
return content
def gen_name(self, name):
"""
str => str e.g. 'name' => 'name\n------'
"""
return name + self.newline + '-' * len(name)
def gen_task(self, task):
"""
Task => str
e.g. Task(1, "Write email", True) => '1. [x] Write email'
"""
lst = []
lst.append(self.gen_task_id(task.id))
lst.append(self.gen_task_done(task.done))
lst.append(self.gen_task_content(task.content))
return " ".join(lst)
def generate(self, todo):
"""
Generate todo object to string.
e.g. Todo(name, tasks) => "1. (x) do something..."
"""
lst = []
if todo.name:
head = self.gen_name(todo.name)
else:
head = ""
lst.append(head)
for task in todo.tasks:
lst.append(self.gen_task(task))
return self.newline.join(lst)
generator = Generator() # build generator
| Python | 0 | |
3c290803bbd6d7401903506b3a27cf2c9ebad0b4 | Add ChatInfoFormatter | bot/action/standard/info/formatter/chat.py | bot/action/standard/info/formatter/chat.py | from bot.action.standard.info.formatter import ApiObjectInfoFormatter
from bot.action.util.format import ChatFormatter
from bot.api.api import Api
from bot.api.domain import ApiObject
class ChatInfoFormatter(ApiObjectInfoFormatter):
def __init__(self, api: Api, chat: ApiObject, bot_user: ApiObject, user: ApiObject):
super().__init__(api, chat)
self.bot_user = bot_user
self.user = user
def format(self, full_info: bool = False):
"""
:param full_info: If True, adds more info about the chat. Please, note that this additional info requires
to make THREE synchronous api calls.
"""
chat = self.api_object
if full_info:
self.__format_full(chat)
else:
self.__format_simple(chat)
def __format_full(self, chat: ApiObject):
chat = self.api.getChat(chat_id=chat.id)
description = chat.description
invite_link = self._invite_link(chat.invite_link)
pinned_message = self._pinned_message(chat.pinned_message)
sticker_set_name = self._group_sticker_set(chat.sticker_set_name)
member_count = self.api.getChatMembersCount(chat_id=chat.id)
admins = self.api.getChatAdministrators(chat_id=chat.id)
admin_count = len(admins)
me_admin = self._yes_no(self._is_admin(self.bot_user, admins))
you_admin = self._yes_no(self._is_admin(self.user, admins))
self.__format_simple(chat)
self._add_info("Description", description)
self._add_info("Invite link", invite_link)
self._add_info("Pinned message", pinned_message)
self._add_info("Group sticker set", sticker_set_name)
self._add_info("Members", member_count)
self._add_info("Admins", admin_count, "(not counting other bots)")
self._add_info("Am I admin", me_admin)
self._add_info("Are you admin", you_admin)
def __format_simple(self, chat: ApiObject):
full_data = ChatFormatter(chat).full_data
title = chat.title
username = self._username(chat.username)
_type = chat.type
_id = chat.id
all_members_are_admins = self._yes_no(chat.all_members_are_administrators)
self._add_title(full_data)
self._add_empty()
self._add_info("Title", title)
self._add_info("Username", username)
self._add_info("Type", _type)
self._add_info("Id", _id)
self._add_info("All members are admins", all_members_are_admins)
| Python | 0 | |
1ad56e631c29869d127931b555d0b366f7e75641 | Add test for fftpack. | numpy/fft/tests/test_fftpack.py | numpy/fft/tests/test_fftpack.py | import sys
from numpy.testing import *
set_package_path()
from numpy.fft import *
restore_path()
class test_fftshift(NumpyTestCase):
def check_fft_n(self):
self.failUnlessRaises(ValueError,fft,[1,2,3],0)
if __name__ == "__main__":
NumpyTest().run()
| Python | 0 | |
ab6fa9717b092f3b8eea4b70920a1d7cef042b69 | Return disappeared __main__ | certchecker/__main__.py | certchecker/__main__.py | import click
from certchecker import CertChecker
@click.command()
@click.option(
'--profile',
default='default',
help="Section name in your boto config file"
)
def main(profile):
cc = CertChecker(profile)
print(cc.result)
if __name__ == "__main__":
print(main())
| Python | 0.000169 | |
b9feeb2a37f0596b48f9582e8953d29485167fc8 | Add an event-driven recording tool | tools/sofa-edr.py | tools/sofa-edr.py | #!/usr/bin/env python3
import subprocess
import time
import argparse
if __name__ == '__main__':
bwa_is_recorded = False
smb_is_recorded = False
htvc_is_recorded = False
parser = argparse.ArgumentParser(description='A SOFA wrapper which supports event-driven recording.')
parser.add_argument('--trace-points', default='', metavar='Comma-sperated string list for interested keywords, e.g., "keyword1,keyword2"')
args = parser.parse_args()
while True:
time.sleep(3)
print(time.time())
with open('/home/ubuntu/pbrun_error.log') as f:
lines = f.readlines()
lc = 0
for line in lines:
#print('Line%d'%lc, line)
lc = lc + 1
if lc < 6:
continue
if line.find('BWA') != -1 and not smb_is_recorded:
bwa_is_recorded = True
print('BWA begins at ', time.time())
time.sleep(120)
subprocess.call('sofa record "sleep 20" --profile_all_cpus --logdir=sofalog-bwa ', shell=True)
break
if line.find('BQSR') != -1 and not smb_is_recorded:
smb_is_recorded = True
print('SMB begins at ', time.time())
time.sleep(120)
subprocess.call('sofa record "sleep 20" --profile_all_cpus --logdir=sofalog-smb ', shell=True)
break
if line.find('HaplotypeCaller') != -1 and not htvc_is_recorded:
htvc_is_recorded = True
print('HTVC begins at ', time.time())
time.sleep(120)
subprocess.call('sofa record "sleep 20" --profile_all_cpus --logdir=sofalog-htvc ', shell=True)
break
if bwa_is_recorded and smb_is_recorded and htvc_is_recorded:
print("Tracing is done.")
break
| Python | 0.000002 | |
3fc58964cc6291698f92cff51f9f0e00f1263357 | Task b | project2/b.py | project2/b.py | from sklearn.feature_extraction import text
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import Pipeline
from pandas import DataFrame
import nltk
from nltk import word_tokenize
from nltk.stem import WordNetLemmatizer
from nltk.stem.snowball import SnowballStemmer
from nltk.tokenize import RegexpTokenizer
# Uncomment if the machine is missing punkt, wordnet or stopwords modules.
# nltk.download('punkt')
# nltk.download('wordnet')
# nltk.download('stopwords')
# RegExpTokenizer reduces term count from 29k to 25k
class StemTokenizer(object):
def __init__(self):
self.wnl = WordNetLemmatizer()
self.snowball_stemmer = SnowballStemmer("english", ignore_stopwords=True)
self.regex_tokenizer = RegexpTokenizer(r'\w+')
def __call__(self, doc):
# tmp = [self.wnl.lemmatize(t) for t in word_tokenize(doc)]
tmp = [self.snowball_stemmer.stem(t) for t in self.regex_tokenizer.tokenize(doc)]
return tmp
stop_words = text.ENGLISH_STOP_WORDS
categories=['comp.graphics','comp.os.ms-windows.misc','comp.sys.ibm.pc.hardware','comp.sys.mac.hardware','rec.autos','rec.motorcycles','rec.sport.baseball','rec.sport.hockey']
newsgroups_train = fetch_20newsgroups(subset='train',categories=categories)
print("%d documents" % len(newsgroups_train.filenames))
print("%d categories" % len(newsgroups_train.target_names))
# Ignore words appearing in less than 2 documents or more than 99% documents.
# min_df reduces from 100k to 29k
vectorizer = CountVectorizer(analyzer='word',stop_words=stop_words,ngram_range=(1, 1), tokenizer=StemTokenizer(),
lowercase=True,max_df=0.99, min_df=2)
# test_corpus = [
# 'This is the first document.',
# 'This is the second second document.',
# 'And the third one.',
# 'Is this the first document?',
# ]
vectorized_newsgroups_train = vectorizer.fit_transform(newsgroups_train.data)
#print "All terms:", vectorizer.get_feature_names()
tfidf_transformer = TfidfTransformer(norm='l2')
train_idf = tfidf_transformer.fit_transform(vectorized_newsgroups_train)
print "Number of terms in TF-IDF representation:",train_idf.shape[1]
# pipeline = Pipeline([
# ('vect', CountVectorizer()),
# ('tfidf', TfidfTransformer()),
# ])
# parameters = {
# 'vect__max_df': (0.5, 0.75, 1.0),
# #'vect__max_features': (None, 5000, 10000, 50000),
# 'vect__ngram_range': ((1, 1)), # unigrams only
# 'analyzer': 'word',
# 'stop_words': stop_words,
# #'tfidf__use_idf': (True, False),
# #'tfidf__norm': ('l1', 'l2'),
# }
# multiprocessing requires the fork to happen in a __main__ protected
# block
# find the best parameters for feature extraction -> use later to identify if bigrams help majorly
#grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1, verbose=1)
# print("Performing grid search...")
# print("pipeline:", [name for name, _ in pipeline.steps])
# print("parameters:")
# pprint(parameters)
# t0 = time()
# grid_search.fit(data.data, data.target)
# print("done in %0.3fs" % (time() - t0))
# print()
# print("Best score: %0.3f" % grid_search.best_score_)
# print("Best parameters set:")
# best_parameters = grid_search.best_estimator_.get_params()
# for param_name in sorted(parameters.keys()):
# print("\t%s: %r" % (param_name, best_parameters[param_name])) | Python | 1 | |
0bd65e0e20911e7ac87aba3ef076b327f57b2f6f | Add get-aixdzs.py | get-aixdzs.py | get-aixdzs.py | #!/usr/bin/env python3
import argparse
import html.parser
from typing import List, Tuple
import urllib.request
class AixdzsHTMLParser(html.parser.HTMLParser):
def __init__(self):
super().__init__()
self.last_url: str = ''
self.next_url: str = ''
self.is_in_content_tag: bool = False
self.content_tag_nested_count: int = 0
self.content: str = ''
self.is_in_episode_name_tag: bool = False
self.episode_name_tag_nested_count: int = 0
self.episode_name: str = ''
def handle_starttag(self, tag: str, attrs: List[Tuple[str, str]]):
attr: Tuple[str, str]
if tag == 'a':
for attr in attrs:
if attr[0] != 'href':
continue
self.last_url = attr[1]
elif tag == 'div':
if self.is_in_content_tag:
self.content_tag_nested_count += 1
for attr in attrs:
if attr[0] != 'class':
continue
if attr[1] == 'content':
self.is_in_content_tag = True
elif tag == 'h1':
if self.is_in_episode_name_tag:
self.episode_name_tag_nested_count += 1
else:
self.is_in_episode_name_tag = True
def handle_endtag(self, tag: str):
if tag == 'div':
if self.content_tag_nested_count > 0:
self.content_tag_nested_count -= 1
return
if self.is_in_content_tag:
self.is_in_content_tag = False
elif tag == 'h1':
if self.episode_name_tag_nested_count > 0:
self.episode_name_tag_nested_count -= 1
return
if self.is_in_episode_name_tag:
self.is_in_episode_name_tag = False
def handle_data(self, data: str):
if data == '下一章[→]':
self.next_url = self.last_url
elif self.is_in_content_tag:
self.content += data
elif self.is_in_episode_name_tag:
self.episode_name = data
self.content += '\n' + self.episode_name + '\n'
class TxtDownloader:
def __init__(self, begin_url: str, num_of_episodes_to_get: int):
self.begin_url: str = begin_url
self.episode_urls: List[str] = list()
self.num_of_episodes_to_get: int = num_of_episodes_to_get
self.content: str = ''
def start(self):
current_url: str = self.begin_url
for _ in range(self.num_of_episodes_to_get):
page: str = urllib.request.urlopen(current_url).read().decode()
aixdzs_html_parser: AixdzsHTMLParser = AixdzsHTMLParser()
aixdzs_html_parser.feed(page)
self.content += aixdzs_html_parser.content
if not aixdzs_html_parser.next_url:
break
current_url = urllib.parse.urljoin(self.begin_url, aixdzs_html_parser.next_url)
self.episode_urls.append(current_url)
def parse_args() -> argparse.Namespace:
parser: argparse.ArgumentParser = argparse.ArgumentParser(
description='Download episodes from 愛下電子書 https://tw.aixdzs.com/')
parser.add_argument('begin_url', help='the URL of the begin episode')
parser.add_argument('number_of_episodes', type=int, help='the number of the episodes to download')
return parser.parse_args()
args: argparse.Namespace = parse_args()
tdler: TxtDownloader = TxtDownloader(args.begin_url, args.number_of_episodes)
tdler.start()
print(tdler.content)
| Python | 0.000001 | |
42ab52b6d077443fac20ea872b503589f6ddb3f7 | Create pyPostings.py | pyPostings.py | pyPostings.py | import re
import string
def posting(corpus):
posting = []
tokens = tokenize(corpus)
for index, token in enumerate(tokens):
posting.append([token, (index+1)])
return posting
def posting_list(corpus):
posting_list = {}
tokens = tokenize(corpus)
for index, token in enumerate(tokens):
if token not in posting_list:
posting_list[token] = [(index + 1)]
else:
posting_list[token].append(index + 1)
return posting_list
def tokenize(corpus):
assert type(corpus) is str, 'Corpus must be a string of characters.'
# split
tokenized = corpus.split()
# normalize
for index, token in enumerate(tokenized):
tokenized[index] = re.sub('\W\Z', '', tokenized[index])
tokenized[index] = re.sub('\A\W', '', tokenized[index])
return tokenized
def not_string(a):
return a != " " and a != ""
| Python | 0 | |
ee39e69fe5d6e93844f47eaff0d9547622600fa7 | make parsing times easier | py/phlsys_strtotime.py | py/phlsys_strtotime.py | #!/usr/bin/env python
# encoding: utf-8
"""A poor substitute for PHP's strtotime function."""
import datetime
def describeDurationStringToTimeDelta():
return str('time can be specified like "5 hours 20 minutes", use '
'combinations of seconds, minutes, hours, days, weeks. '
'each unit should only appear once. you may use floating '
'point numbers and negative numbers. '
'e.g. "1 weeks -1.5 days".')
def durationStringToTimeDelta(s):
"""Return a datetime.timedelta based on the supplied string 's'.
Usage examples:
>>> str(durationStringToTimeDelta("1 seconds"))
'0:00:01'
>>> str(durationStringToTimeDelta("2 minutes"))
'0:02:00'
>>> str(durationStringToTimeDelta("2 hours 2 minutes"))
'2:02:00'
>>> str(durationStringToTimeDelta("1 days 2 hours 2 minutes"))
'1 day, 2:02:00'
>>> str(durationStringToTimeDelta("1.5 days"))
'1 day, 12:00:00'
>>> str(durationStringToTimeDelta("1 days -1 hours"))
'23:00:00'
>>> str(durationStringToTimeDelta("1 milliseconds"))
'0:00:00.001000'
:s: a string in the appropriate time format
:returns: a datetime.timedelta
"""
clauses = s.split()
if len(clauses) % 2:
raise ValueError("odd number of clauses: " + s)
pairs = zip(clauses[::2], clauses[1::2])
d = {p[1]: float(p[0]) for p in pairs}
if len(d) != len(pairs):
raise ValueError("duplicated clauses: " + s)
return datetime.timedelta(**d)
#------------------------------------------------------------------------------
# Copyright (C) 2012 Bloomberg L.P.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#------------------------------- END-OF-FILE ----------------------------------
| Python | 0.000002 | |
2cf2a89bf3c7ccf667e4bcb623eeb6d0e1ea37bb | print sumthing pr1 | python/py1.py | python/py1.py | #!/usr/bin/env python3
"""
If we list all the natural numbers below 10 that are multiples of 3 or 5, we get 3, 5, 6 and 9. The sum of these multiples is 23.
Find the sum of all the multiples of 3 or 5 below 1000.
"""
thing = []
for urmom in range(1,1000):
if urmom % 5 == 0 or urmom % 3 == 0:
thing.append(urmom)
print(sum(thing))
| Python | 0.999948 | |
784fd8b08ee0f268350a2003a9c06522c0678874 | Add python code for doing tensor decomposition with scikit-tensor. | python/run.py | python/run.py | import logging
import numpy
from numpy import genfromtxt
from sktensor import sptensor, cp_als
# Set logging to DEBUG to see CP-ALS information
logging.basicConfig(level=logging.DEBUG)
data = genfromtxt('../datasets/movielens-synthesized/ratings-synthesized-50k.csv', delimiter=',')
# we need to convert data into two lists; subscripts/coordinates and values
n = len(data)
subs_1 = numpy.append(data[:,:2], numpy.zeros((n, 1)), 1)
subs_2 = numpy.append(data[:,:2], numpy.ones((n, 1)), 1)
subs = numpy.vstack([subs_1, subs_2])
subs = subs.astype(int)
vals = numpy.hstack([data[:,2], data[:, 3]])
vals = vals.flatten()
# convert subs tuple of arrays (rows, cols, tubes)
subs = (subs[:,0], subs[:,1], subs[:,2])
# load into sparse tensor
T = sptensor(subs, vals)
# Decompose tensor using CP-ALS
P, fit, itr, exectimes = cp_als(T, 500, init='random')
P = P.totensor()
print P[1,1193,0] # 5
print P[1,661, 0] # 3
print P[1,594, 1] # 1.6
print P[1,1193, 1] # 2.2
#print numpy.allclose(T, P)
#print P.U[0].shape
#print "-------"
##print P.U[1].shape
#print "-------"
#print P.U[2].shape
| Python | 0 | |
00413958a12607aab942c98581b1a9e6d682ef28 | Create Single-Prime.py | python/Single-Prime.py | python/Single-Prime.py | #By Isabelle.
#Checks a single number and lists all of its factors (except 1 and itself)
import math
num = int(input("Pick a number to undergo the primality test!\n"))
root = int(round(math.sqrt(num)))
prime = True
for looper in range(2,root + 1): #53225 should normally be 3
if num % 2 == 0 or num % 3 == 0 or num % 5 == 0: #End if number is even
print("{} is divisible by a prime number from 2 and 5. Silly you, stop wasting my time.".format(num))
prime = False
break
elif looper % 2 == 0 or looper % 3 == 0 or looper % 5 == 0:
continue
else:
if num % looper == 0:
print("{} can be divided by {}.".format(num, looper))
looper += 1
prime = False
break
else:
print("{} cannot be divided by {}.".format(num, looper)) #delete
looper += 1
if prime == True:
print("{} is prime".format(num))
else:
print("{} is not prime.".format(num))
| Python | 0.000003 | |
1cb8df64d4f6f257d0bd03caaaddb33ad11a5c2c | Add or_gate | python/ch02/or_gate.py | python/ch02/or_gate.py | import numpy as np
def OR(x1, x2):
x = np.array([x1, x2])
w = np.array([0.5, 0.5])
b = -0.2
tmp = np.sum(w * x) + b
if tmp <= 0:
return 0
else:
return 1
if __name__ == '__main__':
for xs in [(0, 0), (1, 0), (0, 1), (1, 1)]:
y = OR(xs[0], xs[1])
print(str(xs) + " -> " + str(y))
| Python | 0.000001 | |
a58a31a6037babdc607593196da2841f13791bfa | Revert "去掉camelcase和underscore的转换, 直接用三方的" | railguns/utils/text.py | railguns/utils/text.py | """
https://github.com/tomchristie/django-rest-framework/issues/944
"""
import re
first_cap_re = re.compile('(.)([A-Z][a-z]+)')
all_cap_re = re.compile('([a-z0-9])([A-Z])')
def camelcase_to_underscore(name):
s1 = first_cap_re.sub(r'\1_\2', name)
return all_cap_re.sub(r'\1_\2', s1).lower()
def underscore_to_camelcase(name, lower_first=True):
result = name.title().replace('_', '')
if lower_first:
return result[0].lower() + result[1:]
else:
return result
def recursive_key_map(function, data):
if isinstance(data, dict):
new_dict = {}
for key, value in data.items():
if isinstance(key, str):
new_key = function(key)
new_dict[new_key] = recursive_key_map(function, value)
return new_dict
elif isinstance(data, (list, tuple)):
return [recursive_key_map(function, value) for value in data]
else:
return data
| Python | 0 | |
cd727a5e17cabcc4ee03f2973775f30b7c8b5a26 | add terrible copypasta'd watchdog-using piece of shit for test running | tasks.py | tasks.py | import sys
import time
from invocations.docs import docs, www
from invocations.testing import test, coverage
from invocations.packaging import vendorize, release
from invoke import ctask as task, Collection, Context
@task(help=test.help)
def integration(c, module=None, runner=None, opts=None):
"""
Run the integration test suite. May be slow!
"""
opts = opts or ""
opts += " --tests=integration/"
test(c, module, runner, opts)
@task
def sites(c):
"""
Build both doc sites w/ maxed nitpicking.
"""
# Turn warnings into errors, emit warnings about missing references.
# This gives us a maximally noisy docs build.
# Also enable tracebacks for easier debuggage.
opts = "-W -n -T"
# This is super lolzy but we haven't actually tackled nontrivial in-Python
# task calling yet, so...
docs_c = Context(config=c.config.clone())
www_c = Context(config=c.config.clone())
docs_c.update(**docs.configuration())
www_c.update(**www.configuration())
docs['build'](docs_c, opts=opts)
www['build'](www_c, opts=opts)
@task
def watch_docs(c):
"""
Watch both doc trees & rebuild them if files change.
This includes e.g. rebuilding the API docs if the source code changes;
rebuilding the WWW docs if the README changes; etc.
"""
try:
from watchdog.observers import Observer
from watchdog.events import RegexMatchingEventHandler
except ImportError:
sys.exit("If you want to use this, 'pip install watchdog' first.")
class APIBuildHandler(RegexMatchingEventHandler):
def on_any_event(self, event):
my_c = Context(config=c.config.clone())
my_c.update(**docs.configuration())
docs['build'](my_c)
class WWWBuildHandler(RegexMatchingEventHandler):
def on_any_event(self, event):
my_c = Context(config=c.config.clone())
my_c.update(**www.configuration())
www['build'](my_c)
# Readme & WWW triggers WWW
www_handler = WWWBuildHandler(
regexes=['\./README.rst', '\./sites/www'],
ignore_regexes=['.*/\..*\.swp', '\./sites/www/_build'],
)
# Code and docs trigger API
api_handler = APIBuildHandler(
regexes=['\./invoke/', '\./sites/docs'],
ignore_regexes=['.*/\..*\.swp', '\./sites/docs/_build'],
)
# Run observer loop
observer = Observer()
# TODO: Find parent directory of tasks.py and use that.
for x in (www_handler, api_handler):
observer.schedule(x, '.', recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
@task
def watch_tests(c, module=None):
"""
Watch source tree and test tree for changes, rerunning tests as necessary.
"""
try:
from watchdog.observers import Observer
from watchdog.events import RegexMatchingEventHandler
except ImportError:
sys.exit("If you want to use this, 'pip install watchdog' first.")
class BuildHandler(RegexMatchingEventHandler):
def on_any_event(self, event):
test(c, module=module)
# Code and docs trigger API
handler = BuildHandler(
regexes=['\./invoke/', '\./tests'],
ignore_regexes=['.*/\..*\.swp'],
)
# Run observer loop
observer = Observer()
# TODO: Find parent directory of tasks.py and use that.
observer.schedule(handler, '.', recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
ns = Collection(
test, coverage, integration, vendorize, release, www, docs, sites,
watch_docs, watch_tests
)
ns.configure({'coverage': {'package': 'invoke'}})
| import sys
import time
from invocations.docs import docs, www
from invocations.testing import test, coverage
from invocations.packaging import vendorize, release
from invoke import ctask as task, Collection, Context
@task(help=test.help)
def integration(c, module=None, runner=None, opts=None):
"""
Run the integration test suite. May be slow!
"""
opts = opts or ""
opts += " --tests=integration/"
test(c, module, runner, opts)
@task
def sites(c):
"""
Build both doc sites w/ maxed nitpicking.
"""
# Turn warnings into errors, emit warnings about missing references.
# This gives us a maximally noisy docs build.
# Also enable tracebacks for easier debuggage.
opts = "-W -n -T"
# This is super lolzy but we haven't actually tackled nontrivial in-Python
# task calling yet, so...
docs_c = Context(config=c.config.clone())
www_c = Context(config=c.config.clone())
docs_c.update(**docs.configuration())
www_c.update(**www.configuration())
docs['build'](docs_c, opts=opts)
www['build'](www_c, opts=opts)
@task
def watch(c):
"""
Watch both doc trees & rebuild them if files change.
This includes e.g. rebuilding the API docs if the source code changes;
rebuilding the WWW docs if the README changes; etc.
"""
try:
from watchdog.observers import Observer
from watchdog.events import RegexMatchingEventHandler
except ImportError:
sys.exit("If you want to use this, 'pip install watchdog' first.")
class APIBuildHandler(RegexMatchingEventHandler):
def on_any_event(self, event):
my_c = Context(config=c.config.clone())
my_c.update(**docs.configuration())
docs['build'](my_c)
class WWWBuildHandler(RegexMatchingEventHandler):
def on_any_event(self, event):
my_c = Context(config=c.config.clone())
my_c.update(**www.configuration())
www['build'](my_c)
# Readme & WWW triggers WWW
www_handler = WWWBuildHandler(
regexes=['\./README.rst', '\./sites/www'],
ignore_regexes=['.*/\..*\.swp', '\./sites/www/_build'],
)
# Code and docs trigger API
api_handler = APIBuildHandler(
regexes=['\./invoke/', '\./sites/docs'],
ignore_regexes=['.*/\..*\.swp', '\./sites/docs/_build'],
)
# Run observer loop
observer = Observer()
# TODO: Find parent directory of tasks.py and use that.
for x in (www_handler, api_handler):
observer.schedule(x, '.', recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
ns = Collection(
test, coverage, integration, vendorize, release, www, docs, sites, watch
)
ns.configure({'coverage': {'package': 'invoke'}})
| Python | 0 |
a723c70a0ae9da0f2207dd9278c619be323bda4a | move test parts to avnav_test | avnav_test/avn_debug.py | avnav_test/avn_debug.py | import sys
sys.path.append(r'/home/pi/avnav/pydev')
import pydevd
from avnav_server import *
pydevd.settrace(host='10.222.10.45',stdoutToServer=True, stderrToServer=True)
main(sys.argv)
| Python | 0 | |
aa1b39b455f7145848c287ee9ee85507f5b66de0 | Add Meduza | collector/rss/meduza.py | collector/rss/meduza.py | # coding=utf-8
import feedparser
import logging
from util import date, tags
SOURCE_NAME = 'Meduza'
FEED_URL = 'https://meduza.io/rss/all'
log = logging.getLogger('app')
def parse():
feed = feedparser.parse(FEED_URL)
data = []
for entry in feed['entries']:
data.append({
'title': entry['title'],
'description': entry['description'],
'link': entry['link'],
'published': date.utc_format(entry['published']),
'source_name': SOURCE_NAME,
'source_title': feed['feed']['title'],
'source_link': feed['feed']['link'],
'tags': tags.string_format('world', 'no_tech', 'meduza'),
})
log.info('%s: got %d documents', SOURCE_NAME, len(data))
return data
if __name__ == '__main__':
print parse()
| Python | 0 | |
d50814603217ca9ea47324a0ad516ce7418bc9bf | Add script to generate a standalone timeline view. | build/generate_standalone_timeline_view.py | build/generate_standalone_timeline_view.py | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import parse_deps
import sys
import os
srcdir = os.path.abspath(os.path.join(os.path.dirname(__file__), "../src"))
js_warning_message = """/**
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
* WARNING: This file is generated by generate_standalone_timeline_view.py
*
* Do not edit directly.
*/
"""
css_warning_message = """/**
/* Copyright (c) 2012 The Chromium Authors. All rights reserved.
* Use of this source code is governed by a BSD-style license that can be
* found in the LICENSE file. */
* WARNING: This file is generated by generate_standalone_timeline_view.py
*
* Do not edit directly.
*/
"""
def generate_css(filenames):
load_sequence = parse_deps.calc_load_sequence(filenames)
style_sheet_chunks = [css_warning_message, '\n']
for module in load_sequence:
for style_sheet in module.style_sheets:
style_sheet_chunks.append("""%s\n""" % style_sheet.timeline_view)
return ''.join(style_sheet_chunks)
def generate_js(filenames):
load_sequence = parse_deps.calc_load_sequence(filenames)
js_chunks = [js_warning_message, '\n']
js_chunks.append("window.FLATTENED = {};\n")
for module in load_sequence:
js_chunks.append( "window.FLATTENED['%s'] = true;\n" % module.name)
for module in load_sequence:
js_chunks.append(module.timeline_view)
js_chunks.append("\n")
return ''.join(js_chunks)
def main(args):
parser = optparse.OptionParser()
parser.add_option("--js", dest="js_file",
help="Where to place generated javascript file")
parser.add_option("--css", dest="css_file",
help="Where to place generated css file")
options, args = parser.parse_args(args)
if not options.js_file and not options.css_file:
print "Must specify one, or both of --js and --css"
return 1
input_filenames = [os.path.join(srcdir, f)
for f in ['base.js', 'timeline_view.js']]
if options.js_file:
with open(options.js_file, 'w') as f:
f.write(generate_js(input_filenames))
if options.css_file:
with open(options.css_file, 'w') as f:
f.write(generate_css(input_filenames))
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| Python | 0.999893 | |
93df464ec396774cb161b51d4988773e4ce95e44 | Create lfu-cache.py | Python/lfu-cache.py | Python/lfu-cache.py | # Time: O(1), per operation
# Space: O(k), k is the capacity of cache
# Design and implement a data structure for Least Frequently Used (LFU) cache.
# It should support the following operations: get and put.
#
# get(key) - Get the value (will always be positive) of the key
# if the key exists in the cache, otherwise return -1.
# put(key, value) - Set or insert the value if the key is not already present.
# When the cache reaches its capacity,
# it should invalidate the least frequently used item before inserting a new item.
# For the purpose of this problem, when there is a tie
# (i.e., two or more keys that have the same frequency),
# the least recently used key would be evicted.
#
# Follow up:
# Could you do both operations in O(1) time complexity?
#
# Example:
#
# LFUCache cache = new LFUCache( 2 /* capacity */ );
#
# cache.put(1, 1);
# cache.put(2, 2);
# cache.get(1); // returns 1
# cache.put(3, 3); // evicts key 2
# cache.get(2); // returns -1 (not found)
# cache.get(3); // returns 3.
# cache.put(4, 4); // evicts key 1.
# cache.get(1); // returns -1 (not found)
# cache.get(3); // returns 3
# cache.get(4); // returns 4
class ListNode(object):
def __init__(self, key):
self.key = key
self.next = None
self.prev = None
class LinkedList(object):
def __init__(self):
self.head = None
self.tail = None
def append(self, node):
if self.head is None:
self.head = node
else:
self.tail.next = node
node.prev = self.tail
self.tail = node
def delete(self, node):
if node.prev:
node.prev.next = node.next
else:
self.head = node.next
if node.next:
node.next.prev = node.prev
else:
self.tail = node.prev
del node
class LFUCache(object):
def __init__(self, capacity):
"""
:type capacity: int
"""
self.__capa = capacity
self.__size = 0
self.__min_freq = 0
self.__freq_to_nodes = collections.defaultdict(LinkedList)
self.__key_to_node = {}
self.__key_to_val_freq = {}
def get(self, key):
"""
:type key: int
:rtype: int
"""
if key not in self.__key_to_val_freq:
return -1
self.__freq_to_nodes[self.__key_to_val_freq[key][1]].delete(self.__key_to_node[key])
if not self.__freq_to_nodes[self.__key_to_val_freq[key][1]].head:
del self.__freq_to_nodes[self.__key_to_val_freq[key][1]]
if self.__min_freq == self.__key_to_val_freq[key][1]:
self.__min_freq += 1
self.__key_to_val_freq[key][1] += 1
self.__freq_to_nodes[self.__key_to_val_freq[key][1]].append(ListNode(key))
self.__key_to_node[key] = self.__freq_to_nodes[self.__key_to_val_freq[key][1]].tail
return self.__key_to_val_freq[key][0]
def put(self, key, value):
"""
:type key: int
:type value: int
:rtype: void
"""
if self.__capa <= 0:
return
if self.get(key) != -1:
self.__key_to_val_freq[key][0] = value
return
if self.__size == self.__capa:
del self.__key_to_val_freq[self.__freq_to_nodes[self.__min_freq].head.key]
del self.__key_to_node[self.__freq_to_nodes[self.__min_freq].head.key]
self.__freq_to_nodes[self.__min_freq].delete(self.__freq_to_nodes[self.__min_freq].head)
if not self.__freq_to_nodes[self.__min_freq].head:
del self.__freq_to_nodes[self.__min_freq]
self.__size -= 1
self.__min_freq = 1
self.__key_to_val_freq[key] = [value, self.__min_freq]
self.__freq_to_nodes[self.__min_freq].append(ListNode(key))
self.__key_to_node[key] = self.__freq_to_nodes[self.__min_freq].tail
self.__size += 1
# Your LFUCache object will be instantiated and called as such:
# obj = LFUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value)
| Python | 0.000001 | |
f6ef8e0c31163f95fa0c62873a7195ab51f65cf1 | Add cw_are_they_the_same.py | cw_are_they_the_same.py | cw_are_they_the_same.py | """Codewars: Are they the "same"?
6 kyu
URL: https://www.codewars.com/kata/550498447451fbbd7600041c
Given two arrays a and b write a function comp(a, b) (compSame(a, b) in Clojure)
that checks whether the two arrays have the "same" elements, with the same
multiplicities. "Same" means, here, that the elements in b are the elements i
a squared, regardless of the order.
Examples
Valid arrays
a = [121, 144, 19, 161, 19, 144, 19, 11]
b = [121, 14641, 20736, 361, 25921, 361, 20736, 361]
comp(a, b) returns true because in b 121 is the square of 11, 14641 is the
square of 121, 20736 the square of 144, 361 the square of 19, 25921 the
square of 161, and so on. It gets obvious if we write b's elements in terms of
squares:
a = [121, 144, 19, 161, 19, 144, 19, 11]
b = [11*11, 121*121, 144*144, 19*19, 161*161, 19*19, 144*144, 19*19]
Invalid arrays
If we change the first number to something else, comp may not return true
anymore:
a = [121, 144, 19, 161, 19, 144, 19, 11]
b = [132, 14641, 20736, 361, 25921, 361, 20736, 361]
comp(a,b) returns false because in b 132 is not the square of any number of a.
a = [121, 144, 19, 161, 19, 144, 19, 11]
b = [121, 14641, 20736, 36100, 25921, 361, 20736, 361]
comp(a,b) returns false because in b 36100 is not the square of any number of a.
Remarks
- a or b might be [] (all languages except R, Shell). a or b might be nil or null
or None or nothing (except in Haskell, Elixir, C++, Rust, R, Shell, PureScript).
- If a or b are nil (or null or None), the problem doesn't make sense so return false.
- If a or b are empty then the result is self-evident.
- a or b are empty or not empty lists.
"""
def comp(array1, array2):
# your code
def main():
pass
if __name__ == '__main__':
main()
| Python | 0.01265 | |
64ea416a335d9c1a8946411c2b3b1a67cd450131 | Add first pass at reconstructed targets module. | vizard/targets.py | vizard/targets.py | import viz
import vizact
import vizshape
import vrlab
class Target:
'''A target is a single cube in the motion-capture space.
Subjects are tasked with touching the cubes during the experiment.
'''
def __init__(self, index, x, y, z):
self.center = x, y, z
self.sphere = vizshape.addSphere(
0.7, center=self.center, color=viz.WHITE)
self.sound = viz.addAudio('{:02d}.wav'.format(index))
self.signal = vizact.Signal()
self.sensor = vizproximity.addBoundingSphereSensor(self.sphere, scale=1)
def activate(self, prox):
prox.clearSensors()
prox.addSensor(self.sensor)
prox.onEnter(self.sensor, lambda e: vrlab.sounds.drip.play())
prox.onEnter(self.sensor, lambda e: self.sphere.color(viz.BLUE))
prox.onEnter(self.sensor, self.signal.send)
prox.onExit(self.sensor, lambda e: self.sphere.color(viz.WHITE))
NUMBERED = (
Target( 0, -1.98, 0.05, -1.86),
Target( 1, -1.72, 1.83, 2.26),
Target( 2, 0.00, 0.05, 1.86),
Target( 3, 1.73, 0.05, -1.79),
Target( 4, 1.89, 0.99, 2.26),
Target( 5, -2.14, 0.93, 0.10),
Target( 6, -0.24, 0.90, -1.76),
Target( 7, 1.51, 1.81, -1.76),
Target( 9, 1.79, 0.05, 0.00),
Target(10, 0.10, 1.89, 0.10),
Target(11, -0.24, 1.86, 2.26),
)
CIRCUITS = (
(10, 0, 1, 3, 8, 4, 11, 7, 9, 6, 5, 2),
(7, 1, 0, 11, 9, 2, 8, 3, 6, 4, 10, 5),
(3, 0, 8, 11, 5, 10, 6, 1, 4, 2, 9, 7),
(11, 8, 7, 3, 4, 6, 9, 5, 0, 2, 1, 10),
(4, 7, 8, 5, 6, 0, 3, 1, 9, 10, 2, 11),
(10, 3, 9, 1, 2, 4, 5, 7, 11, 0, 6, 8),
)
| Python | 0 | |
f1f57561c4ebb5a374b168cd5e6274cbb854611d | change except lines | wakatime/queue.py | wakatime/queue.py | # -*- coding: utf-8 -*-
"""
wakatime.queue
~~~~~~~~~~~~~~
Queue for offline time logging.
http://wakatime.com
:copyright: (c) 2014 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
import logging
import os
import sqlite3
import traceback
from time import sleep
log = logging.getLogger(__name__)
class Queue(object):
DB_FILE = os.path.join(os.path.expanduser('~'), '.wakatime.db')
def connect(self):
conn = sqlite3.connect(self.DB_FILE)
c = conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS action (
file text,
time real,
project text,
language text,
lines integer,
branch text,
is_write integer,
plugin text)
''')
return (conn, c)
def push(self, data, plugin):
try:
conn, c = self.connect()
action = {
'file': data.get('file'),
'time': data.get('time'),
'project': data.get('project'),
'language': data.get('language'),
'lines': data.get('lines'),
'branch': data.get('branch'),
'is_write': 1 if data.get('is_write') else 0,
'plugin': plugin,
}
c.execute('INSERT INTO action VALUES (:file,:time,:project,:language,:lines,:branch,:is_write,:plugin)', action)
conn.commit()
conn.close()
except sqlite3.Error:
log.error(traceback.format_exc())
def pop(self):
tries = 3
wait = 0.1
action = None
try:
conn, c = self.connect()
except sqlite3.Error:
log.debug(traceback.format_exc())
return None
loop = True
while loop and tries > -1:
try:
c.execute('BEGIN IMMEDIATE')
c.execute('SELECT * FROM action LIMIT 1')
row = c.fetchone()
if row is not None:
values = []
clauses = []
index = 0
for row_name in ['file', 'time', 'project', 'language', 'lines', 'branch', 'is_write']:
if row[index] is not None:
clauses.append('{0}=?'.format(row_name))
values.append(row[index])
else:
clauses.append('{0} IS NULL'.format(row_name))
index += 1
if len(values) > 0:
c.execute('DELETE FROM action WHERE {0}'.format(u' AND '.join(clauses)), values)
else:
c.execute('DELETE FROM action WHERE {0}'.format(u' AND '.join(clauses)))
conn.commit()
if row is not None:
action = {
'file': row[0],
'time': row[1],
'project': row[2],
'language': row[3],
'lines': row[4],
'branch': row[5],
'is_write': True if row[6] is 1 else False,
'plugin': row[7],
}
loop = False
except sqlite3.Error:
log.debug(traceback.format_exc())
sleep(wait)
tries -= 1
try:
conn.close()
except sqlite3.Error:
log.debug(traceback.format_exc())
return action
| # -*- coding: utf-8 -*-
"""
wakatime.queue
~~~~~~~~~~~~~~
Queue for offline time logging.
http://wakatime.com
:copyright: (c) 2014 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
import logging
import os
import sqlite3
import traceback
from time import sleep
log = logging.getLogger(__name__)
class Queue(object):
DB_FILE = os.path.join(os.path.expanduser('~'), '.wakatime.db')
def connect(self):
conn = sqlite3.connect(self.DB_FILE)
c = conn.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS action (
file text,
time real,
project text,
language text,
lines integer,
branch text,
is_write integer,
plugin text)
''')
return (conn, c)
def push(self, data, plugin):
try:
conn, c = self.connect()
action = {
'file': data.get('file'),
'time': data.get('time'),
'project': data.get('project'),
'language': data.get('language'),
'lines': data.get('lines'),
'branch': data.get('branch'),
'is_write': 1 if data.get('is_write') else 0,
'plugin': plugin,
}
c.execute('INSERT INTO action VALUES (:file,:time,:project,:language,:lines,:branch,:is_write,:plugin)', action)
conn.commit()
conn.close()
except sqlite3.Error, e:
log.error(str(e))
def pop(self):
tries = 3
wait = 0.1
action = None
try:
conn, c = self.connect()
except sqlite3.Error, e:
log.debug(traceback.format_exc())
return None
loop = True
while loop and tries > -1:
try:
c.execute('BEGIN IMMEDIATE')
c.execute('SELECT * FROM action LIMIT 1')
row = c.fetchone()
if row is not None:
values = []
clauses = []
index = 0
for row_name in ['file', 'time', 'project', 'language', 'lines', 'branch', 'is_write']:
if row[index] is not None:
clauses.append('{0}=?'.format(row_name))
values.append(row[index])
else:
clauses.append('{0} IS NULL'.format(row_name))
index += 1
if len(values) > 0:
c.execute('DELETE FROM action WHERE {0}'.format(u' AND '.join(clauses)), values)
else:
c.execute('DELETE FROM action WHERE {0}'.format(u' AND '.join(clauses)))
conn.commit()
if row is not None:
action = {
'file': row[0],
'time': row[1],
'project': row[2],
'language': row[3],
'lines': row[4],
'branch': row[5],
'is_write': True if row[6] is 1 else False,
'plugin': row[7],
}
loop = False
except sqlite3.Error, e:
log.debug(traceback.format_exc())
sleep(wait)
tries -= 1
try:
conn.close()
except sqlite3.Error, e:
log.debug(traceback.format_exc())
return action
| Python | 0.000137 |
58626e757b463f2aec6751e04fbaf0e83cf0adf9 | Create Bigram.py | src/3-trained-classifier/Bigram.py | src/3-trained-classifier/Bigram.py | __author__ = 'Atef Bellaaj'
__author__ = 'Bellaaj'
import collections
import nltk.metrics
import nltk.classify.util
from nltk.classify import NaiveBayesClassifier
from nltk.corpus import movie_reviews
neg_ids = movie_reviews.fileids('neg')
pos_ids = movie_reviews.fileids('pos')
import itertools
from nltk.collocations import BigramCollocationFinder
from nltk.metrics import BigramAssocMeasures
def bigram_word_feats(words, score_fn=BigramAssocMeasures.chi_sq, n=200):
bigram_finder = BigramCollocationFinder.from_words(words)
bigrams = bigram_finder.nbest(score_fn, n)
return dict([(ngram, True) for ngram in itertools.chain(words, bigrams)])
neg_feats = [(bigram_word_feats(movie_reviews.words(fileids=[f])), 'neg') for f in neg_ids]
pos_feats = [(bigram_word_feats(movie_reviews.words(fileids=[f])), 'pos') for f in pos_ids]
neg_limit = len(neg_feats)*3/4
pos_limit = len(pos_feats)*3/4
trainfeats = neg_feats[:neg_limit] + pos_feats[:pos_limit]
testfeats = neg_feats[neg_limit:] + pos_feats[pos_limit:]
print 'train on %d instances, test on %d instances' % (len(trainfeats), len(testfeats))
print neg_feats[1]
classifier = NaiveBayesClassifier.train(trainfeats)
import pickle
f = open('bigram_classifier.pickle', 'wb')
pickle.dump(classifier, f)
f.close()
print 'accuracy:', nltk.classify.util.accuracy(classifier, testfeats)
classifier.show_most_informative_features()
refsets = collections.defaultdict(set)
testsets = collections.defaultdict(set)
for i, (feats, label) in enumerate(testfeats):
refsets[label].add(i)
observed = classifier.classify(feats)
testsets[observed].add(i)
print 'pos precision:', nltk.metrics.precision(refsets['pos'], testsets['pos'])
print 'pos recall:', nltk.metrics.recall(refsets['pos'], testsets['pos'])
print 'pos F-measure:', nltk.metrics.f_measure(refsets['pos'], testsets['pos'])
print 'neg precision:', nltk.metrics.precision(refsets['neg'], testsets['neg'])
print 'neg recall:', nltk.metrics.recall(refsets['neg'], testsets['neg'])
print 'neg F-measure:', nltk.metrics.f_measure(refsets['neg'], testsets['neg'])
| Python | 0.000001 | |
527e9270f599b0bd574a8c2d2fd762c73ad78fb8 | Add migration to changes to upload all historical data | fellowms/migrations/0029_auto_20160714_1435.py | fellowms/migrations/0029_auto_20160714_1435.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-07-14 14:35
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('fellowms', '0028_auto_20160713_1301'),
]
operations = [
migrations.RemoveField(
model_name='event',
name='budget_approve',
),
migrations.AddField(
model_name='event',
name='budget_approved',
field=models.DecimalField(blank=True, decimal_places=2, default=0.0, max_digits=10, null=True),
),
migrations.AddField(
model_name='expense',
name='amount_claimed',
field=models.DecimalField(decimal_places=2, default=0.0, max_digits=10),
),
migrations.AddField(
model_name='fellow',
name='application_year',
field=models.IntegerField(default=2016),
),
migrations.AddField(
model_name='fellow',
name='research_area_code',
field=models.CharField(default='', max_length=4),
preserve_default=False,
),
migrations.AddField(
model_name='fellow',
name='selected',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='event',
name='budget_request_attendance_fees',
field=models.DecimalField(blank=True, decimal_places=2, default=0.0, max_digits=10),
),
migrations.AlterField(
model_name='event',
name='budget_request_catering',
field=models.DecimalField(blank=True, decimal_places=2, default=0.0, max_digits=10),
),
migrations.AlterField(
model_name='event',
name='budget_request_others',
field=models.DecimalField(blank=True, decimal_places=2, default=0.0, max_digits=10),
),
migrations.AlterField(
model_name='event',
name='budget_request_subsistence_cost',
field=models.DecimalField(blank=True, decimal_places=2, default=0.0, max_digits=10),
),
migrations.AlterField(
model_name='event',
name='budget_request_travel',
field=models.DecimalField(blank=True, decimal_places=2, default=0.0, max_digits=10),
),
migrations.AlterField(
model_name='event',
name='budget_request_venue_hire',
field=models.DecimalField(blank=True, decimal_places=2, default=0.0, max_digits=10),
),
migrations.AlterField(
model_name='event',
name='end_date',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='event',
name='fellow',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='fellowms.Fellow'),
),
migrations.AlterField(
model_name='event',
name='justification',
field=models.TextField(blank=True),
),
migrations.AlterField(
model_name='event',
name='location',
field=models.CharField(blank=True, max_length=120),
),
migrations.AlterField(
model_name='event',
name='name',
field=models.CharField(blank=True, max_length=120),
),
migrations.AlterField(
model_name='event',
name='start_date',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='event',
name='url',
field=models.CharField(blank=True, max_length=120),
),
migrations.AlterField(
model_name='expense',
name='proof',
field=models.FileField(blank=True, null=True, upload_to='expenses/'),
),
migrations.AlterField(
model_name='fellow',
name='email',
field=models.EmailField(max_length=254),
),
migrations.AlterField(
model_name='fellow',
name='phone',
field=models.CharField(max_length=120),
),
migrations.AlterField(
model_name='fellow',
name='photo',
field=models.FileField(blank=True, null=True, upload_to='photos/'),
),
migrations.AlterField(
model_name='fellow',
name='research_area',
field=models.TextField(blank=True, null=True),
),
migrations.AlterUniqueTogether(
name='fellow',
unique_together=set([]),
),
migrations.RemoveField(
model_name='fellow',
name='inauguration_year',
),
]
| Python | 0 | |
ad0a1c1404c53f1565ef728a747d5d5f319f1992 | Add tests for Enterprise | auth0/v2/test/authentication/test_enterprise.py | auth0/v2/test/authentication/test_enterprise.py | import unittest
import mock
from ...authentication.enterprise import Enterprise
class TestEnterprise(unittest.TestCase):
@mock.patch('auth0.v2.authentication.enterprise.Enterprise.get')
def test_saml_metadata(self, mock_get):
e = Enterprise('my.domain.com')
e.saml_metadata('cid')
mock_get.assert_called_with(
url='https://my.domain.com/samlp/metadata/cid'
)
@mock.patch('auth0.v2.authentication.enterprise.Enterprise.get')
def test_wsfed_metadata(self, mock_get):
e = Enterprise('my.domain.com')
e.wsfed_metadata()
mock_get.assert_called_with(
url='https://my.domain.com/wsfed/FederationMetadata' \
'/2007-06/FederationMetadata.xml'
)
| Python | 0 | |
8780243a88f505c06962247fdcc6e4bc4abb2912 | add prototype at python | prototype.py | prototype.py | #!/usr/bin/env python
import copy
class Manager:
def __init__(self):
self.showcase = {}
def register(self, name, obj):
self.showcase[name] = obj
def clone(self, name):
return copy.deepcopy(self.showcase[name])
class MessageBox:
def __init__(self, deco_char):
self.deco_char = deco_char
def display(self, message):
print(self.deco_char * (len(message) + len(self.deco_char) * 2 + 2))
print('{0} {1} {0}'.format(self.deco_char, message))
print(self.deco_char * (len(message) + len(self.deco_char) * 2 + 2))
if __name__ == '__main__':
manager = Manager()
box1 = MessageBox('*')
manager.register('ast', box1)
box2 = manager.clone('ast')
print(id(box1))
print(id(box2))
box1.display('hogehoge')
box2.display('hogehoge')
| Python | 0 | |
c61452cb7358c3000992e593349158a0e24a5f51 | Add migration | allseasons/convert/migrations/0004_message.py | allseasons/convert/migrations/0004_message.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-07-28 14:05
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('convert', '0003_auto_20170714_1421'),
]
operations = [
migrations.CreateModel(
name='Message',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sender', models.CharField(max_length=100)),
('receiver', models.CharField(max_length=100)),
('date', models.DateTimeField(auto_now=True)),
('mtype', models.CharField(choices=[('email', 'email')], max_length=100)),
('event', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='convert.EventOfInterest')),
],
options={
'ordering': ('date',),
},
),
]
| Python | 0.000002 | |
c68d2492b8dcc6fbd7fc91e784994ef9cf43db0f | Create LORA_Repeater_logger.py | LORA_Repeater/LORA_Repeater_logger.py | LORA_Repeater/LORA_Repeater_logger.py | from datetime import datetime
NOME_FILE = "LORA_LOG.txt"
import serial
ser = serial.Serial('/dev/ttyACM0', 9600)
while ser.inWaiting()!=0:
trash = ser.readline()
while(True):
while ser.inWaiting()!=0:
incoming = ser.readline().decode("utf-8")
#print(incoming)
parsed = str(incoming).split(",")
time = datetime.now().strftime("%H:%M:%S")
data = parsed[1] +"," + parsed[2] +"," + parsed[3] + "," + time + "\n"
print(data)
with open(NOME_FILE, "a+") as f:
f.write(data)
| Python | 0 | |
399af52c20a5c490471f8e98c4c72aa6e99466df | fix a import typo | src/diamond/handler/mysql.py | src/diamond/handler/mysql.py | # coding=utf-8
"""
Insert the collected values into a mysql table
"""
from Handler import Handler
import MySQLdb
class MySQLHandler(Handler):
"""
Implements the abstract Handler class, sending data to a mysql table
"""
conn = None
def __init__(self, config=None):
"""
Create a new instance of the MySQLHandler class
"""
# Initialize Handler
Handler.__init__(self, config)
# Initialize Options
self.hostname = self.config['hostname']
self.port = int(self.config['port'])
self.username = self.config['username']
self.password = self.config['password']
self.database = self.config['database']
self.table = self.config['table']
self.col_time = self.config['col_time']
self.col_metric = self.config['col_metric']
self.col_value = self.config['col_value']
# Connect
self._connect()
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this handler
"""
config = super(MySQLHandler, self).get_default_config_help()
config.update({
})
return config
def get_default_config(self):
"""
Return the default config for the handler
"""
config = super(MySQLHandler, self).get_default_config()
config.update({
})
return config
def __del__(self):
"""
Destroy instance of the MySQLHandler class
"""
self._close()
def process(self, metric):
"""
Process a metric
"""
# Just send the data
self._send(str(metric))
def _send(self, data):
"""
Insert the data
"""
data = data.strip().split(' ')
try:
cursor = self.conn.cursor()
cursor.execute("INSERT INTO %s (%s, %s, %s) VALUES(%%s, %%s, %%s)"
% (self.table, self.col_metric,
self.col_time, self.col_value),
(data[0], data[2], data[1]))
cursor.close()
self.conn.commit()
except BaseException, e:
# Log Error
self.log.error("MySQLHandler: Failed sending data. %s.", e)
# Attempt to restablish connection
self._connect()
def _connect(self):
"""
Connect to the MySQL server
"""
self._close()
self.conn = MySQLdb.Connect(host=self.hostname,
port=self.port,
user=self.username,
passwd=self.password,
db=self.database)
def _close(self):
"""
Close the connection
"""
if self.conn:
self.conn.commit()
self.conn.close()
| # coding=utf-8
"""
Insert the collected values into a mysql table
"""
from handler import Handler
import MySQLdb
class MySQLHandler(Handler):
"""
Implements the abstract Handler class, sending data to a mysql table
"""
conn = None
def __init__(self, config=None):
"""
Create a new instance of the MySQLHandler class
"""
# Initialize Handler
Handler.__init__(self, config)
# Initialize Options
self.hostname = self.config['hostname']
self.port = int(self.config['port'])
self.username = self.config['username']
self.password = self.config['password']
self.database = self.config['database']
self.table = self.config['table']
self.col_time = self.config['col_time']
self.col_metric = self.config['col_metric']
self.col_value = self.config['col_value']
# Connect
self._connect()
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this handler
"""
config = super(MySQLHandler, self).get_default_config_help()
config.update({
})
return config
def get_default_config(self):
"""
Return the default config for the handler
"""
config = super(MySQLHandler, self).get_default_config()
config.update({
})
return config
def __del__(self):
"""
Destroy instance of the MySQLHandler class
"""
self._close()
def process(self, metric):
"""
Process a metric
"""
# Just send the data
self._send(str(metric))
def _send(self, data):
"""
Insert the data
"""
data = data.strip().split(' ')
try:
cursor = self.conn.cursor()
cursor.execute("INSERT INTO %s (%s, %s, %s) VALUES(%%s, %%s, %%s)"
% (self.table, self.col_metric,
self.col_time, self.col_value),
(data[0], data[2], data[1]))
cursor.close()
self.conn.commit()
except BaseException, e:
# Log Error
self.log.error("MySQLHandler: Failed sending data. %s.", e)
# Attempt to restablish connection
self._connect()
def _connect(self):
"""
Connect to the MySQL server
"""
self._close()
self.conn = MySQLdb.Connect(host=self.hostname,
port=self.port,
user=self.username,
passwd=self.password,
db=self.database)
def _close(self):
"""
Close the connection
"""
if self.conn:
self.conn.commit()
self.conn.close()
| Python | 0.999997 |
1553863d25eb3053fdf558a290e2eb0a1fae28c0 | Add debug tests. | tests/test_debug.py | tests/test_debug.py | #!/usr/bin/env python
# Test Inform debug functions
try: # python3
import builtins
except ImportError: # python2
import __builtin__ as builtins
# Imports {{{1
from inform import Inform, aaa, ddd, ppp, sss, vvv
from textwrap import dedent
# Test cases {{{1
def test_anglicize(capsys):
Inform(colorscheme=None, prog_name=False)
ppp()
out, err = capsys.readouterr()
assert out == dedent('''
DEBUG: test_debug.py, 18, test_debug.test_anglicize()
''').lstrip()
def test_grouch(capsys):
Inform(colorscheme=None, prog_name=False)
a = 0
b = 'b'
ppp('hey now!', a, b)
out, err = capsys.readouterr()
assert out == dedent('''
DEBUG: test_debug.py, 28, test_debug.test_grouch(): hey now! 0 b
''').lstrip()
def test_salver(capsys):
Inform(colorscheme=None, prog_name=False)
a = 0
b = 'b'
c = [a, b]
d = {a, b}
e = {a:b}
ddd('hey now!', a, b, c, d, e)
out, err = capsys.readouterr()
assert out == dedent('''
DEBUG: test_debug.py, 41, test_debug.test_salver():
'hey now!'
0
'b'
[0, 'b']
{0, 'b'}
{0: 'b'}
''').lstrip()
def test_daiquiri(capsys):
Inform(colorscheme=None, prog_name=False)
a = 0
b = 'b'
c = [a, b]
d = {a, b}
e = {a:b}
ddd(s='hey now!', a=a, b=b, c=c, d=d, e=e)
out, err = capsys.readouterr()
assert out == dedent('''
DEBUG: test_debug.py, 60, test_debug.test_daiquiri():
a = 0
b = 'b'
c = [0, 'b']
d = {0, 'b'}
e = {0: 'b'}
s = 'hey now!'
''').lstrip()
class Info:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
ddd(self=self, **kwargs)
def test_prude(capsys):
Inform(colorscheme=None, prog_name=False)
Info(email='ted@ledbelly.com')
out, err = capsys.readouterr()
assert out == dedent('''
DEBUG: test_debug.py, 75, test_debug.Info.__init__():
email = 'ted@ledbelly.com'
self = Info object containing {'email': 'ted@ledbelly.com'}
''').lstrip()
def test_update(capsys):
Inform(colorscheme=None, prog_name=False)
a = 0
b = 'b'
c = [a, b]
d = {a, b}
e = {a:b}
vvv()
out, err = capsys.readouterr()
out = '\n'.join(l for l in out.split('\n') if 'capsys' not in l)
assert out == dedent('''
DEBUG: test_debug.py, 94, test_debug.test_update():
a = 0
b = 'b'
c = [0, 'b']
d = {0, 'b'}
e = {0: 'b'}
''').lstrip()
def test_shear(capsys):
Inform(colorscheme=None, prog_name=False)
a = 0
b = 'b'
c = [a, b]
d = {a, b}
e = {a:b}
vvv(a, b, c, d, e)
out, err = capsys.readouterr()
assert out == dedent('''
DEBUG: test_debug.py, 113, test_debug.test_shear():
a = 0
b = 'b'
c = [0, 'b']
d = {0, 'b'}
e = {0: 'b'}
''').lstrip()
def test_prostrate(capsys):
Inform(colorscheme=None, prog_name=False)
sss()
out, err = capsys.readouterr()
out = out.strip().split('\n')
assert out[0] == 'DEBUG: test_debug.py, 126, test_debug.test_prostrate():'
assert out[-2] == " File '/home/ken/src/inform/tests/test_debug.py', line 126, in test_prostrate,"
assert out[-1] == ' sss()'
def test_rubber(capsys):
Inform(colorscheme=None, prog_name=False)
a = aaa('a')
out, err = capsys.readouterr()
assert out == dedent('''
DEBUG: test_debug.py, 135, test_debug.test_rubber(): 'a'
''').lstrip()
assert a == 'a'
b = aaa(b = 'b')
out, err = capsys.readouterr()
assert out == dedent('''
DEBUG: test_debug.py, 142, test_debug.test_rubber(): b: 'b'
''').lstrip()
assert b == 'b'
| Python | 0 | |
60c10a781501b0a467b55a599d835bdc760c8891 | Add test_utils | tests/test_utils.py | tests/test_utils.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_django-watchman
------------
Tests for `django-watchman` decorators module.
"""
from __future__ import unicode_literals
import unittest
from watchman.utils import get_checks
class TestWatchman(unittest.TestCase):
def setUp(self):
pass
def test_get_checks_returns_all_available_checks_by_default(self):
self.assertEqual([check.__name__ for check in get_checks()], ['caches_status', 'email_status', 'databases_status'])
def test_get_checks_with_check_list_returns_union(self):
check_list = ['watchman.checks.caches_status']
self.assertEqual([check.__name__ for check in get_checks(check_list=check_list)], ['caches_status'])
def test_get_checks_with_skip_list_returns_difference(self):
skip_list = ['watchman.checks.caches_status']
self.assertEqual([check.__name__ for check in get_checks(skip_list=skip_list)], ['databases_status', 'email_status'])
def test_get_checks_with_matching_check_and_skip_list_returns_empty_list(self):
check_list, skip_list = ['watchman.checks.caches_status'], ['watchman.checks.caches_status']
self.assertEqual([check.__name__ for check in get_checks(check_list=check_list, skip_list=skip_list)], [])
def test_get_checks_with_check_and_skip_list(self):
check_list = ['watchman.checks.caches_status', 'watchman.checks.databases_status']
skip_list = ['watchman.checks.caches_status']
self.assertEqual([check.__name__ for check in get_checks(check_list=check_list, skip_list=skip_list)], ['databases_status'])
| Python | 0.000006 | |
52219c4d55c7b80b4a2185887675615c4d427298 | Add is_sequence util function | lib/ansible/module_utils/common/collections.py | lib/ansible/module_utils/common/collections.py | # Copyright (c), Sviatoslav Sydorenko <ssydoren@redhat.com> 2018
# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause)
"""Collection of low-level utility functions."""
from __future__ import absolute_import, division, print_function
__metaclass__ = type
from ..six import binary_type, text_type
from ._collections_compat import Sequence
def is_string(seq):
"""Identify whether the input has a string-like type (inclding bytes)."""
return isinstance(seq, (text_type, binary_type))
def is_sequence(seq, include_strings=False):
"""Identify whether the input is a sequence.
Strings and bytes are not sequences here,
unless ``include_string`` is ``True``.
Non-indexable things are never of a sequence type.
"""
if not include_strings and is_string(seq):
return False
return isinstance(seq, Sequence)
| Python | 0.999999 | |
973c2098eec88c9656fe858d4815bd7925d532f6 | add Memento pattern | memento/Memento.py | memento/Memento.py | #
# Python Design Patterns: Memento
# Author: Jakub Vojvoda [github.com/JakubVojvoda]
# 2016
#
# Source code is licensed under MIT License
# (for more details see LICENSE)
#
import sys
#
# Memento
# stores internal state of the Originator object and protects
# against access by objects other than the originator
#
class Memento:
def __init__(self, state):
self._state = state
def setState(self, state):
self._state = state;
def getState(self):
return self._state
#
# Originator
# creates a memento containing a snapshot of its current internal
# state and uses the memento to restore its internal state
#
class Originator:
def __init__(self):
self._state = 0
def setState(self, state):
print("Set state to " + str(state) + ".")
self._state = state
def getState(self):
return self._state
def setMemento(self, memento):
self._state = memento.getState()
def createMemento(self):
return Memento(self._state)
#
# CareTaker
# is responsible for the memento's safe keeping
#
class CareTaker:
def __init__(self, originator):
self._originator = originator
self._history = []
def save(self):
print("Save state.")
self._history.append(self._originator.createMemento())
def undo(self):
print("Undo state.")
self._originator.setMemento(self._history[-1])
self._history.pop()
if __name__ == "__main__":
originator = Originator()
caretaker = CareTaker(originator)
originator.setState(1)
caretaker.save()
originator.setState(2)
caretaker.save()
originator.setState(3)
caretaker.undo()
print("Actual state is " + str(originator.getState()) + ".")
| Python | 0.000001 | |
cbbf4ec62bc8b8ed2c375e9e60939f932d2034e8 | Create jogovelha.py | src/jogovelha.py | src/jogovelha.py | Python | 0.000002 | ||
0e12011edc31f964db8ce419d2f64b6d525be641 | Create delete_occurrences_of_an_element_if_it_occurs_more_than_n_times.py | delete_occurrences_of_an_element_if_it_occurs_more_than_n_times.py | delete_occurrences_of_an_element_if_it_occurs_more_than_n_times.py | #Kunal Gautam
#Codewars : @Kunalpod
#Problem name: Delete occurrences of an element if it occurs more than n times
#Problem level: 6 kyu
def delete_nth(order,max_e):
i=0
while(i<len(order)):
if order[:i].count(order[i])>=max_e:
order.pop(i)
else: i+=1
return order
| Python | 0.00002 | |
06451bdb55faaa7fd22f7bac403d00dda0018c5d | Create setup.py | setup.py | setup.py | from distutils.core import setup
from setuptools import find_packages
setup(
name="nhlscrapi",
version=nhlscrapi.__version__,
description='NHL Scrapr API for Python',
author='Rob Howley',
author_email='howley.robert@gmail.com',
url='https://github.com/robhowley/nhlscrapi',
packages=find_packages(),
include_package_data=True,
license="Apache Software License version 2.0",
platforms='any',
zip_safe=False,
keywords='nhlscrapi',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
],
test_suite='tests',
# Dependent packages (distributions)
install_requires=[],
)
| Python | 0.000001 | |
f1d277c58f80a352b3715c145ce55a4030a4ab6a | add setup.py | setup.py | setup.py | #!/usr/bin/env python
from distutils.core import setup
from setuptools import find_packages
setup(
name='Fake Zato',
version='0.1.0',
description='Fake Zato',
author='Zetaops',
author_email='aliriza@zetaops.io',
url='https://github.com/zetaops/fake_zato',
packages=find_packages(),
)
| Python | 0.000001 | |
a262aeda8b706848b33d30353a9f269daf3acb0d | Bump version | setup.py | setup.py | # Copyright (C) 2011-2012 Yaco Sistemas <lgs@yaco.es>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from setuptools import setup, find_packages
def read(*rnames):
return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
setup(
name='djangosaml2',
version='0.13.1',
description='pysaml2 integration in Django',
long_description='\n\n'.join([read('README'), read('CHANGES')]),
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI",
"Topic :: Security",
"Topic :: Software Development :: Libraries :: Application Frameworks",
],
keywords="django,pysaml2,saml2,federated authentication,authentication",
author="Yaco Sistemas",
author_email="lgs@yaco.es",
url="https://bitbucket.org/lgs/djangosaml2",
license='Apache 2.0',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=[
'pysaml2==2.2.0',
'python-memcached==1.48',
],
)
| # Copyright (C) 2011-2012 Yaco Sistemas <lgs@yaco.es>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from setuptools import setup, find_packages
def read(*rnames):
return open(os.path.join(os.path.dirname(__file__), *rnames)).read()
setup(
name='djangosaml2',
version='0.13.0',
description='pysaml2 integration in Django',
long_description='\n\n'.join([read('README'), read('CHANGES')]),
classifiers=[
"Development Status :: 3 - Alpha",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI",
"Topic :: Security",
"Topic :: Software Development :: Libraries :: Application Frameworks",
],
keywords="django,pysaml2,saml2,federated authentication,authentication",
author="Yaco Sistemas",
author_email="lgs@yaco.es",
url="https://bitbucket.org/lgs/djangosaml2",
license='Apache 2.0',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=[
'pysaml2==2.2.0',
'python-memcached==1.48',
],
)
| Python | 0 |
9eacc3c3b81002c721cb24a1641583bf49bc3a53 | bump version number | setup.py | setup.py | # setup.py inspired by the PyPA sample project:
# https://github.com/pypa/sampleproject/blob/master/setup.py
from setuptools import setup, find_packages
from codecs import open # To use a consistent encoding
from os import path
def get_long_description():
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name = 'pymtl',
version = '1.4alpha0', # https://www.python.org/dev/peps/pep-0440/
description = 'Python-based hardware modeling framework',
long_description = get_long_description(),
url = 'https://github.com/cornell-brg/pymtl',
author = 'Derek Lockhart',
author_email = 'lockhart@csl.cornell.edu',
# BSD 3-Clause License:
# - http://choosealicense.com/licenses/bsd-3-clause
# - http://opensource.org/licenses/BSD-3-Clause
license='BSD',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
],
packages = find_packages(
exclude=['scripts', 'tests', 'ubmark', 'perf_tests']
),
package_data={
'pymtl': [
'tools/translation/verilator_wrapper.templ.c',
'tools/translation/verilator_wrapper.templ.py',
'tools/translation/cpp_wrapper.templ.py',
],
},
install_requires = [
'cffi',
'greenlet',
'pytest',
'pytest-xdist',
# Note: leaving out numpy due to pypy incompatibility
#'numpy==1.9.0',
],
)
| # setup.py inspired by the PyPA sample project:
# https://github.com/pypa/sampleproject/blob/master/setup.py
from setuptools import setup, find_packages
from codecs import open # To use a consistent encoding
from os import path
def get_long_description():
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name = 'pymtl',
version = '1.3alpha3', # https://www.python.org/dev/peps/pep-0440/
description = 'Python-based hardware modeling framework',
long_description = get_long_description(),
url = 'https://github.com/cornell-brg/pymtl',
author = 'Derek Lockhart',
author_email = 'lockhart@csl.cornell.edu',
# BSD 3-Clause License:
# - http://choosealicense.com/licenses/bsd-3-clause
# - http://opensource.org/licenses/BSD-3-Clause
license='BSD',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2.7',
'Operating System :: MacOS :: MacOS X',
'Operating System :: POSIX :: Linux',
],
packages = find_packages(
exclude=['scripts', 'tests', 'ubmark', 'perf_tests']
),
package_data={
'pymtl': [
'tools/translation/verilator_wrapper.templ.c',
'tools/translation/verilator_wrapper.templ.py',
'tools/translation/cpp_wrapper.templ.py',
],
},
install_requires = [
'cffi',
'greenlet',
'pytest',
'pytest-xdist',
# Note: leaving out numpy due to pypy incompatibility
#'numpy==1.9.0',
],
)
| Python | 0.000001 |
c99b5e564252aff55f14dd63c9cdef1728026561 | Add setup.py | setup.py | setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import twid
from setuptools import setup, find_packages
setup(
name = "twid",
version = twid.__version__,
description = "The relevant functions about Taiwan Identification Card system.",
author = "Plenty Su",
author_email = "plenty.su@gmail.com",
license = "MIT",
packages = find_packages()
)
| Python | 0.000001 | |
f16a21776eafc7fc373b9c43d5db74cea213c897 | Create SoftwareCategory.py | SoftwareCategory.py | SoftwareCategory.py | from lxml import etree
class SoftwareCategory:
def __init__(self, parent, category, unlock, scan=False):
self.software = category
self.feature = unlock
if not scan:
self.create_software_category(parent, category, unlock)
@classmethod
def delete_category(cls, feature, software_category):
"""
* Parameter: feature (etree element -Tag- 'Feature')
* Parameter: software_category (SoftwareCategory Object)
* Remove the dependency from feature (etree element)
"""
for child in feature:
if child.tag == 'SoftwareCategory' and child.text == software_category:
feature.remove(child)
break
def create_software_category(self, parent, category, unlock):
"""
* Parameter: parent (etree element -Tag- 'Feature')
* Parameter: category (str)
* Parameter: unlock (str)
* Create an etree subElement with a Tag "SoftwareCategory",
* an attribute of Software equal to the parameter category.
* Set text to the unlock parameter value
* Return etree element
"""
etree.SubElement(parent, "SoftwareCategory", Category=category).text = unlock
| Python | 0 | |
c54bd0cf16891bbc8b82dd2cb2af1455795325a2 | add setup.py | setup.py | setup.py | import os
import sys
from setuptools import setup
exec(open('dsplice/version.py').read())
setup(name='dsplice',
version=version,
packages=['dsplice'],
description='Docker image merge tool',
author='Bradley Cicenas',
author_email='bradley@vektor.nyc',
url='https://github.com/bcicen/dsplice',
install_requires=['docker-py>=1.7.2'],
license='http://opensource.org/licenses/MIT',
classifiers=(
'License :: OSI Approved :: MIT License ',
'Natural Language :: English',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
),
keywords='docker image merge devops',
entry_points = {
'console_scripts' : ['dsplice = dsplice.cli:main']
}
)
| Python | 0.000001 | |
5d9ac40273f9dae541ffa20b8767ae289b743b95 | Add loader calls in main | nose2/main.py | nose2/main.py | import os
from nose2.compat import unittest
from nose2 import loader, session
class PluggableTestProgram(unittest.TestProgram):
sessionClass = session.Session
loaderClass = loader.PluggableTestLoader
# XXX override __init__ to warn that testLoader and testRunner are ignored?
def parseArgs(self, argv):
self.session = self.sessionClass()
self.argparse = self.session.argparse # for convenience
# XXX force these? or can it be avoided?
self.testLoader = self.loaderClass(self.session)
# Parse initial arguments like config file paths, verbosity
self.setInitialArguments()
cfg_args, argv = self.argparse.parse_args(argv)
self.handleCfgArgs(cfg_args)
# Parse arguments for plugins (if any) and test names
self.argparse.add_argument('testNames', nargs='*')
args, argv = self.argparse.parse_args(argv)
if argv:
self.argparse.error("Unrecognized arguments: %s" % ' '.join(argv))
self.handleArgs(args)
self.createTests()
def setInitialArguments(self):
self.argparse.add_argument('--config', '-c', nargs='?', action='append',
default=['unittest.cfg', 'nose2.cfg'])
self.argparse.add_argument('--no-user-config', action='store_const',
dest='user_config', const=False, default=True)
self.argparse.add_argument('--no-plugins', action='store_const',
dest='load_plugins', const=False, default=True)
self.argparse.add_argument('--verbose', '-v', action='count')
self.argparse.add_argument('--quiet', action='store_const',
dest='verbose', const=0)
def handleCfgArgs(self, cfg_args):
self.session.loadConfigFiles(*self.findConfigFiles(cfg_args))
if cfg_args.load_plugins:
self.loadPlugins()
# FIXME set verbosity
def findConfigFiles(self, cfg_args):
filenames = cfg_args.config[:]
if cfg_args.user_config:
opts = ('unittest.cfg', 'nose2.cfg', '.unittest.cfg', '.nose2.cfg')
for fn in opts:
filenames.append(os.path.expanduser(fn))
return filenames
def handleArgs(self, args):
# FIXME activate or deactivate plugins,
# pass arguments to plugins that want them
self.testNames = args.testNames
def loadPlugins(self):
# FIXME pass in plugins set via __init__ args
self.session.loadPlugins()
def createTests(self):
# fire plugin hook
if self.testNames is None:
self.test = self.testLoader.loadTestsFromModule(self.module)
else:
self.test = self.testLoader.loadTestsFromNames(self.testNames)
def runTests(self):
# fire plugin hook
pass
main_ = PluggableTestProgram
| import os
from nose2.compat import unittest
from nose2 import loader, session
class PluggableTestProgram(unittest.TestProgram):
sessionClass = session.Session
loaderClass = loader.PluggableTestLoader
# XXX override __init__ to warn that testLoader and testRunner are ignored?
def parseArgs(self, argv):
self.session = self.sessionClass()
self.argparse = self.session.argparse # for convenience
# XXX force these? or can it be avoided?
self.testLoader = self.loaderClass(self.session)
# Parse initial arguments like config file paths, verbosity
self.setInitialArguments()
cfg_args, argv = self.argparse.parse_args(argv)
self.handleCfgArgs(cfg_args)
# Parse arguments for plugins (if any) and test names
self.argparse.add_argument('testNames', nargs='*')
args, argv = self.argparse.parse_args(argv)
if argv:
self.argparse.error("Unrecognized arguments: %s" % ' '.join(argv))
self.handleArgs(args)
self.createTests()
def setInitialArguments(self):
self.argparse.add_argument('--config', '-c', nargs='?', action='append',
default=['unittest.cfg', 'nose2.cfg'])
self.argparse.add_argument('--no-user-config', action='store_const',
dest='user_config', const=False, default=True)
self.argparse.add_argument('--no-plugins', action='store_const',
dest='load_plugins', const=False, default=True)
self.argparse.add_argument('--verbose', '-v', action='count')
self.argparse.add_argument('--quiet', action='store_const',
dest='verbose', const=0)
def handleCfgArgs(self, cfg_args):
self.session.loadConfigFiles(*self.findConfigFiles(cfg_args))
if cfg_args.load_plugins:
self.loadPlugins()
# FIXME set verbosity
def findConfigFiles(self, cfg_args):
filenames = cfg_args.config[:]
if cfg_args.user_config:
opts = ('unittest.cfg', 'nose2.cfg', '.unittest.cfg', '.nose2.cfg')
for fn in opts:
filenames.append(os.path.expanduser(fn))
return filenames
def handleArgs(self, args):
# FIXME activate or deactivate plugins,
# pass arguments to plugins that want them
pass
def loadPlugins(self):
# FIXME pass in plugins set via __init__ args
self.session.loadPlugins()
def createTests(self):
# fire plugin hook
pass
def runTests(self):
# fire plugin hook
pass
main_ = PluggableTestProgram
| Python | 0.000001 |
3f66dbc15cb0564b22d304e09ed3c0b673d59476 | Add setup.py | setup.py | setup.py | from distutils.core import setup
setup(name='fbmq',
version='1.0.1',
install_requires=['json', 'requests>=2.0']
)
| Python | 0.000001 | |
a1f17cf4b56edf861c9b650ccd18049ecf168e03 | Add setup.py | setup.py | setup.py | import os
import re
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
PACKAGE_NAME = "humanizepy"
HERE = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(HERE, "README.md")) as fp:
README = fp.read()
with open(os.path.join(HERE, PACKAGE_NAME, "__init__.py")) as fp:
VERSION = re.search("__version__ = \"([^\"]+)\"", fp.read()).group(1)
setup(
name=PACKAGE_NAME,
version=VERSION,
author="James \"clug\"",
author_email="pip@clug.xyz",
maintainer="James \"clug\"",
maintainer_email="pip@clug.xyz",
url="https://github.com/clugg/humanizepy",
description=("Humanize values that are readable only for developers."),
long_description=README,
classifiers=["Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Topic :: Utilities"],
license="MIT",
keywords="humanize values roman numeral binary",
packages=[PACKAGE_NAME]
)
| Python | 0.000001 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.