commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13
values | lang stringclasses 23
values |
|---|---|---|---|---|---|---|---|---|
35f19e61df4c14a2766709f6ab88f08e7fab9756 | add run_test | ericdill/databroker,ericdill/databroker | run_tests.py | run_tests.py | #!/usr/bin/env python
import sys
import pytest
if __name__ == '__main__':
# show output results from every test function
args = ['-v']
# show the message output for skipped and expected failure tests
args.append('-vrxs')
if len(sys.argv) > 1:
args.extend(sys.argv[1:])
print('pytest arguments: {}'.format(args))
# call pytest and exit with the return code from pytest so that
# travis will fail correctly if tests fail
sys.exit(pytest.main(args))
| bsd-3-clause | Python | |
a91b633ba88a01b12305fdfafd570c0b3776b42d | Add a tool script to print errors statistics in output JSON files. | jdhp-sap/sap-cta-data-pipeline,jdhp-sap/data-pipeline-standalone-scripts,jdhp-sap/sap-cta-data-pipeline,jdhp-sap/data-pipeline-standalone-scripts | utils/print_num_errors.py | utils/print_num_errors.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Make statistics on score files (stored in JSON files).
"""
import argparse
import json
import numpy as np
def parse_json_file(json_file_path):
with open(json_file_path, "r") as fd:
json_data = json.load(fd)
return json_data
def extract_data_list(json_dict):
io_list = json_dict["io"]
success_list = [image_dict for image_dict in io_list if "error" not in image_dict]
aborted_list = [image_dict for image_dict in io_list if "error" in image_dict]
return success_list, aborted_list
if __name__ == '__main__':
# PARSE OPTIONS ###########################################################
parser = argparse.ArgumentParser(description="Make statistics on score files (JSON files).")
parser.add_argument("fileargs", nargs=1, metavar="FILE",
help="The JSON file to process")
args = parser.parse_args()
json_file_path = args.fileargs[0]
# FETCH SCORE #############################################################
json_data = parse_json_file(json_file_path)
success_list, aborted_list = extract_data_list(json_data)
print("{} images".format(len(success_list) + len(aborted_list)))
print("{} succeeded".format(len(success_list)))
print("{} failed".format(len(aborted_list)))
if len(aborted_list) > 0:
error_message_dict = {}
for image_dict in aborted_list:
error_message = image_dict["error"]["message"]
if error_message in error_message_dict:
error_message_dict[error_message] += 1
else:
error_message_dict[error_message] = 1
for error_message, count in error_message_dict.items():
print("-> {}: {}".format(error_message, count))
| mit | Python | |
4a06723a475fb6312196ea4e0a5ee47414a2c157 | add power_line, power_minor_line | mapzen/vector-datasource,mapzen/vector-datasource,mapzen/vector-datasource | integration-test/232-power-lines.py | integration-test/232-power-lines.py | from . import FixtureTest
class PowerLines(FixtureTest):
def test_power_line(self):
self.load_fixtures(['http://www.openstreetmap.org/way/29399873'])
self.assert_has_feature(
14, 2621, 6338, 'landuse',
{'id': 29399873, 'kind': 'power_line', 'min_zoom': 14, 'sort_rank': 272})
def test_power_line(self):
self.load_fixtures(['http://www.openstreetmap.org/way/444660087'])
self.assert_has_feature(
16, 10485, 25335, 'landuse',
{'id': 444660087, 'kind': 'power_minor_line', 'min_zoom': 17, 'sort_rank': 271})
| mit | Python | |
e2d72a20b241fe2dca1dad1eb391350460c06060 | Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/4efe74c2473de1220989199258b7a4924dd2a679. | tensorflow/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-Corporation/tensorflow,yongtang/tensorflow,karllessard/tensorflow,Intel-tensorflow/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,gautam1858/tensorflow,Intel-tensorflow/tensorflow,karllessard/tensorflow,frreiss/tensorflow-fred,gautam1858/tensorflow,paolodedios/tensorflow,karllessard/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,frreiss/tensorflow-fred,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-Corporation/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow,gautam1858/tensorflow,frreiss/tensorflow-fred,Intel-tensorflow/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-experimental_link_static_libraries_once,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,frreiss/tensorflow-fred,frreiss/tensorflow-fred,tensorflow/tensorflow-pywrap_tf_optimizer,frreiss/tensorflow-fred,gautam1858/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-Corporation/tensorflow,gautam1858/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,karllessard/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,gautam1858/tensorflow,Intel-Corporation/tensorflow,gautam1858/tensorflow,yongtang/tensorflow,tensorflow/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,tensorflow/tensorflow,yongtang/tensorflow,Intel-tensorflow/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,frreiss/tensorflow-fred,yongtang/tensorflow,gautam1858/tensorflow,Intel-tensorflow/tensorflow,Intel-Corporation/tensorflow,frreiss/tensorflow-fred,frreiss/tensorflow-fred,karllessard/tensorflow,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,karllessard/tensorflow,paolodedios/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,paolodedios/tensorflow,yongtang/tensorflow,karllessard/tensorflow,Intel-tensorflow/tensorflow,gautam1858/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-tensorflow/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow | third_party/tf_runtime/workspace.bzl | third_party/tf_runtime/workspace.bzl | """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "4efe74c2473de1220989199258b7a4924dd2a679"
TFRT_SHA256 = "76e8b79220d0d68362782b1877b279dacdaa0a9c4b3a80004cf70cca8131414f"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = [
"http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
"https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
],
)
| """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "ab36dc9c9829c8574b7760e98de18cc4d2b2eaf3"
TFRT_SHA256 = "afe45af9014fdf72e5a58cbfcea2c17522c7ce1a082c1a33b8bcf3db3fac5fd1"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = [
"http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
"https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
],
)
| apache-2.0 | Python |
0b47397b91fec94910f18ea1711184ecfd0f6bf0 | Add tests for file storage engine | prophile/jacquard,prophile/jacquard | jacquard/storage/tests/test_file.py | jacquard/storage/tests/test_file.py | from jacquard.storage.file import FileStore
def test_get_nonexistent_key():
# Just test this works without errors
store = FileStore(':memory:')
assert store.get('test') is None
def test_simple_write():
storage = FileStore(':memory:')
with storage.transaction() as store:
store['test'] = "Bees"
with storage.transaction() as store:
assert store['test'] == "Bees"
def test_enumerate_keys():
storage = FileStore(':memory:')
with storage.transaction() as store:
store['foo1'] = "Bees"
store['foo2'] = "Faces"
with storage.transaction() as store:
assert set(store.keys()) == set(('foo1', 'foo2'))
def test_update_key():
storage = FileStore(':memory:')
with storage.transaction() as store:
store['foo'] = "Bees"
with storage.transaction() as store:
store['foo'] = "Eyes"
with storage.transaction() as store:
assert store['foo'] == "Eyes"
def test_delete_key():
storage = FileStore(':memory:')
with storage.transaction() as store:
store['foo'] = "Bees"
with storage.transaction() as store:
del store['foo']
with storage.transaction() as store:
assert 'foo' not in store
def test_exceptions_back_out_writes():
storage = FileStore(':memory:')
try:
with storage.transaction() as store:
store['foo'] = "Blah"
raise RuntimeError()
except RuntimeError:
pass
with storage.transaction() as store:
assert 'foo' not in store
| mit | Python | |
d56387ee3edb05aee87bb732fb60b9d3a5e8a94b | Add a simple setup.py script | rtyler/greendns | setup.py | setup.py | #!/usr/bin/env python
USE_SETUPTOOLS = False
try:
from setuptools import setup, Extension
USE_SETUPTOOLS = True
except ImportError:
from distutils.core import setup, Extension
setup_kwargs = dict(
name='greendns',
description='''A module for providing greened DNS access via dnspython ''',
version='0.1.0',
author='R. Tyler Ballance',
author_email='tyler@monkeypox.org',
py_modules=['greendns',],
url='http://rtyler.github.com/greendns')
if USE_SETUPTOOLS:
setup_kwargs.update({'test_suite' : 'test_greendns'})
setup(**setup_kwargs)
| bsd-3-clause | Python | |
daf53c1d5564942651e5efac0d1daa9dbd7248f2 | Create setup.py | cganterh/lechat | setup.py | setup.py | """Setup script for lechat."""
from setuptools import setup
setup()
| mit | Python | |
f02d51237443ce239ab44ef7bb38fb625cd0fac1 | Add generic main function setup to test env | HKuz/Test_Code | setup.py | setup.py | #!/Applications/anaconda/envs/Python3/bin
def main():
print("Hello, World!")
if __name__ == '__main__':
main()
| mit | Python | |
7938b647951bb83604c34ebf0932200e13913e35 | Bump version. | skorokithakis/django-loginas,jarcoal/django-loginas,stochastic-technologies/django-loginas,skorokithakis/django-loginas,stochastic-technologies/django-loginas,jarcoal/django-loginas,topletal/django-loginas,intellisense/django-loginas,intellisense/django-loginas,topletal/django-loginas | setup.py | setup.py | #!/usr/bin/env python
import sys
assert sys.version >= '2.5', "Requires Python v2.5 or above."
from distutils.core import setup
from setuptools import find_packages
setup(
name="django-loginas",
version="0.1.3",
author="Stochastic Technologies",
author_email="info@stochastictechnologies.com",
url="https://github.com/stochastic-technologies/django-loginas/",
description="""An app to add a "Log in as user" button in the Django user admin page.""",
long_description="A short Django app that adds a button in the Django user admin page. "
"When a superuser clicks the button, they are instantly logged in as that "
"user.",
license="BSD",
keywords="django",
zip_safe=False,
include_package_data=True,
packages=find_packages(),
test_suite='runtests.run_tests',
tests_require=['Django>=1.4'],
)
| #!/usr/bin/env python
import sys
assert sys.version >= '2.5', "Requires Python v2.5 or above."
from distutils.core import setup
from setuptools import find_packages
setup(
name="django-loginas",
version="0.1.2",
author="Stochastic Technologies",
author_email="info@stochastictechnologies.com",
url="https://github.com/stochastic-technologies/django-loginas/",
description="""An app to add a "Log in as user" button in the Django user admin page.""",
long_description="A short Django app that adds a button in the Django user admin page. "
"When a superuser clicks the button, they are instantly logged in as that "
"user.",
license="BSD",
keywords="django",
zip_safe=False,
include_package_data=True,
packages=find_packages(),
test_suite='runtests.run_tests',
tests_require=['Django>=1.4'],
)
| bsd-3-clause | Python |
162c3cd9b12e559242215d395ae85fc75d8ba37d | Create pl_list.py | dancwilliams/Prefix_List_Script,dancwilliams/Prefix_List_Script | pl_list.py | pl_list.py | __author__ = "Dan C Williams"
__version__ = "0.2"
__date__ = "Jul-24-2016"
__email__ = "dan.c.williams@gmail.com"
__status__ = "Development"
__version__ = "Python 3.5"
import netaddr
import collections
raw_lines = [line.rstrip('\n') for line in open('TEST_PL_DATA.txt')]
blackList = ['0.0.0.0/0', 'description', '!']
split_list = []
ip_list =[]
pl_list = []
pl_dict = collections.defaultdict(list)
pl_dict_final = {}
del_list = []
temp_list = []
split_list = []
def main():
temp_list = []
for i, line in enumerate(raw_lines): # IDs DESCRIPTIONS & DEFAULT ROUTES
for j in blackList:
if j in line:
temp_list.append(line)
for del_lines in temp_list: # CLEARS OUT DESCRIPTIONS & DEFAULT ROUTES
raw_lines.remove(del_lines)
for i, line in enumerate(raw_lines):
temp_list = line.split() #SPLITTING LINES INTO LIST
split_list.append(temp_list) #ADDING LIST TO LIST OF LIST
temp_list = [] #Reset TEMP List
for i, line in enumerate(split_list): #Grab PL name and network and place them
temp_list.append(line[2]) #in a list of lists for further processing
temp_list.append(line[6])
pl_list.append(temp_list)
temp_list = []
for key, value in pl_list: #create pl_dict using the key and network
pl_dict[key].append(netaddr.IPNetwork(value))
for key, value in pl_dict.items():
value = netaddr.cidr_merge(value)
value.sort()
pl_dict_final[key] = value
target = open('test_output.txt', 'w')
d = collections.OrderedDict(sorted(pl_dict_final.items()))
for key, value in d.items():
seq_num = 5
target.write("ip prefix-list " + str(key) + " description Permit Networks Assigned to the Aviation BU\n")
for i, ip_address in enumerate(value):
if str(ip_address.netmask) == '255.255.255.255':
target.write("ip prefix-list " + str(key) + " seq " + str(seq_num) + " permit " + str(ip_address) + ("\n"))
else:
target.write("ip prefix-list " + str(key) + " seq " + str(seq_num) + " permit " + str(ip_address) + " le 32\n")
seq_num += 5
target.write("ip prefix-list " + str(key) + " seq 500000 deny 0.0.0.0/0 le 32\n")
target.write("!\n")
target.close()
print('COMPLETE')
if __name__ == "__main__":
main()
| mit | Python | |
dc1d43acb5730bd9b555b63aa589b0eeceb14e52 | Add a test case to exercise the 'target stop-hook add' command without relying on pexpect to spawn an lldb child command. The test is not "correct" in that the '** Stop Hooks **' message emitted by the Target implementation is invoked asynchronously and is using a separate: | apple/swift-lldb,llvm-mirror/lldb,llvm-mirror/lldb,llvm-mirror/lldb,apple/swift-lldb,llvm-mirror/lldb,apple/swift-lldb,llvm-mirror/lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb | test/stop-hook/TestStopHookCmd.py | test/stop-hook/TestStopHookCmd.py | """
Test lldb target stop-hook command.
"""
import os
import unittest2
import lldb
import pexpect
from lldbtest import *
class StopHookCmdTestCase(TestBase):
mydir = "stop-hook"
@unittest2.skipUnless(sys.platform.startswith("darwin"), "requires Darwin")
def test_with_dsym(self):
"""Test a sequence of target add-hook commands."""
self.buildDsym()
self.stop_hook_cmd_sequence()
def test_with_dwarf(self):
"""Test a sequence of target add-hook commands."""
self.buildDwarf()
self.stop_hook_cmd_sequence()
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line numbers inside main.cpp.
self.begl = line_number('main.cpp', '// Set breakpoint here to test target stop-hook.')
self.endl = line_number('main.cpp', '// End of the line range for which stop-hook is to be run.')
self.line = line_number('main.cpp', '// Another breakpoint which is outside of the stop-hook range.')
def stop_hook_cmd_sequence(self):
"""Test a sequence of target stop-hook commands."""
exe = os.path.join(os.getcwd(), "a.out")
self.runCmd("file " + exe, CURRENT_EXECUTABLE_SET)
self.expect('breakpoint set -f main.cpp -l %d' % self.begl,
BREAKPOINT_CREATED,
startstr = "Breakpoint created: 1: file ='main.cpp', line = %d" %
self.begl)
self.expect('breakpoint set -f main.cpp -l %d' % self.line,
BREAKPOINT_CREATED,
startstr = "Breakpoint created: 2: file ='main.cpp', line = %d" %
self.line)
self.runCmd("target stop-hook add -f main.cpp -l %d -e %d -o 'expr ptr'" % (self.begl, self.endl))
self.runCmd('target stop-hook list')
# Now run the program, expect to stop at the the first breakpoint which is within the stop-hook range.
#self.expect('run', 'Stop hook fired',
# substrs = '** Stop Hooks **')
self.runCmd('run')
self.runCmd('thread step-over')
self.expect('thread step-over', 'Stop hook fired again',
substrs = '** Stop Hooks **')
# Now continue the inferior, we'll stop at another breakpoint which is outside the stop-hook range.
self.runCmd('process continue')
# Verify that the 'Stop Hooks' mechanism is NOT BEING fired off.
self.expect('thread step-over', 'Stop hook should not be fired', matching=False,
substrs = '** Stop Hooks **')
if __name__ == '__main__':
import atexit
lldb.SBDebugger.Initialize()
atexit.register(lambda: lldb.SBDebugger.Terminate())
unittest2.main()
| apache-2.0 | Python | |
250e0d2d0e2264b83a82548df3b30dbc784a4fe5 | Add some example client code | twaugh/docker-registry-client,yodle/docker-registry-client | docker-registry-show.py | docker-registry-show.py | """
Copyright 2015 Red Hat, Inc
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import argparse
from docker_registry_client import DockerRegistryClient
import logging
import requests
class CLI(object):
def __init__(self):
self.parser = argparse.ArgumentParser()
excl_group = self.parser.add_mutually_exclusive_group()
excl_group.add_argument("-q", "--quiet", action="store_true")
excl_group.add_argument("-v", "--verbose", action="store_true")
self.parser.add_argument('--verify-ssl', dest='verify_ssl',
action='store_true')
self.parser.add_argument('--no-verify-ssl', dest='verify_ssl',
action='store_false')
self.parser.add_argument('registry', metavar='REGISTRY', nargs=1,
help='registry URL (including scheme)')
self.parser.add_argument('repository', metavar='REPOSITORY', nargs='?')
self.parser.set_defaults(verify_ssl=True)
def run(self):
args = self.parser.parse_args()
basic_config_args = {}
if args.verbose:
basic_config_args['level'] = logging.DEBUG
elif args.quiet:
basic_config_args['level'] = logging.WARNING
logging.basicConfig(**basic_config_args)
client = DockerRegistryClient(args.registry[0],
verify_ssl=args.verify_ssl)
if args.repository:
self.show_tags(client, args.repository)
else:
self.show_repositories(client)
def show_repositories(self, client):
try:
repositories = client.repositories()
except requests.HTTPError as e:
if e.response.status_code == requests.codes.not_found:
print("Catalog/Search not supported")
else:
raise
else:
print("Repositories:")
for repository in repositories.keys():
print(" - {0}".format(repository))
def show_tags(self, client, repository):
try:
repo = client.repository(repository)
except requests.HTTPError as e:
if e.response.status_code == requests.codes.not_found:
print("Repository {0} not found".format(repository))
else:
raise
else:
print("Tags in repository {0}:".format(repository))
for tag in repo.tags():
print(" - {0}".format(tag))
if __name__ == '__main__':
try:
cli = CLI()
cli.run()
except KeyboardInterrupt:
pass
| apache-2.0 | Python | |
e7cbd3e2d5a21b003c6ee392da5b8ebe70d279a8 | add lockfile dependency | cloudify-cosmo/cloudify-script-plugin,aria-tosca/cloudify-script-plugin | setup.py | setup.py | #########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from setuptools import setup
setup(
name='cloudify-script-plugin',
version='1.1a1',
author='Gigaspaces',
author_email='cloudify@gigaspaces.com',
packages=['script_runner'],
description='Plugin for running scripts',
install_requires=[
'cloudify-plugins-common==3.1a1',
'pyzmq==14.3.1',
'bottle==0.12.7',
'lockfile==0.9.1'
],
license='LICENSE',
entry_points={
'console_scripts': [
'ctx = script_runner.ctx_proxy:main',
'ctx-server = script_runner.ctx_server:main'
]
}
)
| #########
# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
from setuptools import setup
setup(
name='cloudify-script-plugin',
version='1.1a1',
author='Gigaspaces',
author_email='cloudify@gigaspaces.com',
packages=['script_runner'],
description='Plugin for running scripts',
install_requires=[
'cloudify-plugins-common==3.1a1',
'pyzmq==14.3.1',
'bottle==0.12.7'
],
license='LICENSE',
entry_points={
'console_scripts': [
'ctx = script_runner.ctx_proxy:main',
'ctx-server = script_runner.ctx_server:main'
]
}
)
| apache-2.0 | Python |
83bcb62c98c406e2aa6ce6a9a98750d0b565f750 | Add tests for generic hash | mindw/libnacl,johnttan/libnacl,cachedout/libnacl,coinkite/libnacl,saltstack/libnacl,RaetProtocol/libnacl | tests/unit/test_raw_generichash.py | tests/unit/test_raw_generichash.py | # Import nacl libs
import libnacl
# Import python libs
import unittest
class TestGenericHash(unittest.TestCase):
'''
Test sign functions
'''
def test_keyless_generichash(self):
msg1 = b'Are you suggesting coconuts migrate?'
msg2 = b'Not at all, they could be carried.'
chash1 = libnacl.crypto_generichash(msg1)
chash2 = libnacl.crypto_generichash(msg2)
self.assertNotEqual(msg1, chash1)
self.assertNotEqual(msg2, chash2)
self.assertNotEqual(chash2, chash1)
| apache-2.0 | Python | |
ff814e3dff10ffa54a0569868f32056d37babff6 | Create test1.py | SygtOpenSoftWareTeam/electricwaverecorder | test1.py | test1.py | import this
| mit | Python | |
064124d09973dc58a444d22aa7c47acf94f8fa81 | Add a script to generate JSON bigram frequencies for English | Kitware/clique,Kitware/clique,XDATA-Year-3/clique,XDATA-Year-3/clique,Kitware/clique,XDATA-Year-3/clique | data/bigramfreq.py | data/bigramfreq.py | import json
import lxml.html
from lxml.cssselect import CSSSelector
import requests
import sys
def main():
raw = requests.get("http://norvig.com/mayzner.html")
if not raw:
print >>sys.stderr, "Request failed with code %d" % (raw.status_code)
return 1
tree = lxml.html.fromstring(raw.text)
sel = CSSSelector("td")
freq = {key[:-1].lower(): float(value[:-2]) / 100 for key, value, _ in map(lambda x: x.get("title").split(), filter(lambda y: y.get("title") is not None, sel(tree)))}
print json.dumps(freq)
return 0
if __name__ == "__main__":
sys.exit(main())
| apache-2.0 | Python | |
b9b6b8a9337888fdcfcf15af1dedc758d0662dd0 | add nexted evalute=False test | Arafatk/sympy,abhiii5459/sympy,jaimahajan1997/sympy,bukzor/sympy,AunShiLord/sympy,Sumith1896/sympy,Designist/sympy,yukoba/sympy,kaushik94/sympy,Vishluck/sympy,kumarkrishna/sympy,shipci/sympy,kaushik94/sympy,dqnykamp/sympy,jamesblunt/sympy,beni55/sympy,toolforger/sympy,kaichogami/sympy,moble/sympy,postvakje/sympy,Arafatk/sympy,Arafatk/sympy,grevutiu-gabriel/sympy,sunny94/temp,bukzor/sympy,dqnykamp/sympy,vipulroxx/sympy,farhaanbukhsh/sympy,grevutiu-gabriel/sympy,shipci/sympy,yashsharan/sympy,iamutkarshtiwari/sympy,cswiercz/sympy,farhaanbukhsh/sympy,jamesblunt/sympy,toolforger/sympy,garvitr/sympy,oliverlee/sympy,ahhda/sympy,jaimahajan1997/sympy,Titan-C/sympy,beni55/sympy,Curious72/sympy,pandeyadarsh/sympy,bukzor/sympy,emon10005/sympy,iamutkarshtiwari/sympy,VaibhavAgarwalVA/sympy,hargup/sympy,souravsingh/sympy,Vishluck/sympy,sampadsaha5/sympy,madan96/sympy,atsao72/sympy,drufat/sympy,MridulS/sympy,lindsayad/sympy,jbbskinny/sympy,maniteja123/sympy,MechCoder/sympy,hargup/sympy,postvakje/sympy,Titan-C/sympy,toolforger/sympy,kevalds51/sympy,abloomston/sympy,garvitr/sympy,madan96/sympy,wanglongqi/sympy,hargup/sympy,chaffra/sympy,AunShiLord/sympy,pbrady/sympy,souravsingh/sympy,AkademieOlympia/sympy,shikil/sympy,kumarkrishna/sympy,drufat/sympy,pandeyadarsh/sympy,wanglongqi/sympy,yukoba/sympy,abhiii5459/sympy,garvitr/sympy,Shaswat27/sympy,Designist/sympy,atsao72/sympy,emon10005/sympy,Gadal/sympy,asm666/sympy,Mitchkoens/sympy,sahmed95/sympy,mafiya69/sympy,atreyv/sympy,jaimahajan1997/sympy,VaibhavAgarwalVA/sympy,emon10005/sympy,abloomston/sympy,yashsharan/sympy,madan96/sympy,cccfran/sympy,sahilshekhawat/sympy,sahmed95/sympy,jamesblunt/sympy,meghana1995/sympy,jerli/sympy,pbrady/sympy,asm666/sympy,Designist/sympy,shipci/sympy,rahuldan/sympy,jbbskinny/sympy,Shaswat27/sympy,oliverlee/sympy,vipulroxx/sympy,sahilshekhawat/sympy,mafiya69/sympy,AkademieOlympia/sympy,abhiii5459/sympy,dqnykamp/sympy,souravsingh/sympy,chaffra/sympy,pandeyadarsh/sympy,MridulS/sympy,Sumith1896/sympy,shikil/sympy,liangjiaxing/sympy,Davidjohnwilson/sympy,maniteja123/sympy,lindsayad/sympy,saurabhjn76/sympy,skidzo/sympy,ChristinaZografou/sympy,cswiercz/sympy,skidzo/sympy,wanglongqi/sympy,kevalds51/sympy,skirpichev/omg,ga7g08/sympy,chaffra/sympy,jbbskinny/sympy,beni55/sympy,mafiya69/sympy,farhaanbukhsh/sympy,MechCoder/sympy,yukoba/sympy,ChristinaZografou/sympy,rahuldan/sympy,wyom/sympy,pbrady/sympy,aktech/sympy,grevutiu-gabriel/sympy,wyom/sympy,Davidjohnwilson/sympy,meghana1995/sympy,moble/sympy,atreyv/sympy,MechCoder/sympy,Vishluck/sympy,skidzo/sympy,AunShiLord/sympy,kaushik94/sympy,ahhda/sympy,ahhda/sympy,lindsayad/sympy,kaichogami/sympy,diofant/diofant,maniteja123/sympy,debugger22/sympy,mcdaniel67/sympy,ChristinaZografou/sympy,Mitchkoens/sympy,moble/sympy,asm666/sympy,kaichogami/sympy,rahuldan/sympy,oliverlee/sympy,cswiercz/sympy,saurabhjn76/sympy,kevalds51/sympy,aktech/sympy,Davidjohnwilson/sympy,abloomston/sympy,sunny94/temp,Shaswat27/sympy,debugger22/sympy,MridulS/sympy,sahmed95/sympy,atsao72/sympy,shikil/sympy,cccfran/sympy,debugger22/sympy,Mitchkoens/sympy,Gadal/sympy,wyom/sympy,Curious72/sympy,sampadsaha5/sympy,liangjiaxing/sympy,ga7g08/sympy,mcdaniel67/sympy,sunny94/temp,Titan-C/sympy,vipulroxx/sympy,Curious72/sympy,liangjiaxing/sympy,jerli/sympy,ga7g08/sympy,meghana1995/sympy,AkademieOlympia/sympy,sahilshekhawat/sympy,mcdaniel67/sympy,drufat/sympy,postvakje/sympy,aktech/sympy,saurabhjn76/sympy,Gadal/sympy,sampadsaha5/sympy,iamutkarshtiwari/sympy,VaibhavAgarwalVA/sympy,yashsharan/sympy,jerli/sympy,Sumith1896/sympy,cccfran/sympy,atreyv/sympy,kumarkrishna/sympy | sympy/core/tests/test_evaluate.py | sympy/core/tests/test_evaluate.py | from sympy.abc import x, y
from sympy.core.evaluate import evaluate
from sympy.core import Mul, Add
def test_add():
with evaluate(False):
expr = x + x
assert isinstance(expr, Add)
assert expr.args == (x, x)
assert isinstance(x + x, Mul)
def test_nested():
with evaluate(False):
expr = (x + x) + (y + y)
assert expr.args == ((x + x), (y + y))
assert expr.args[0].args == (x, x)
| bsd-3-clause | Python | |
6330db1c6d3261d7a22046fa11cc1c661ba57484 | Add basic code for uploading videos to youtube. | punchagan/gg2yt | yt.py | yt.py | """ Parse messages and post links to youtube. """
from __future__ import print_function
import email
import logging
from os.path import join
import re
def configure_logger():
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler('yt.log')
fh.setLevel(logging.DEBUG)
# create console handler with a higher log level
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
# create formatter and add it to the handlers
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
logger.addHandler(fh)
logger.addHandler(ch)
return logger
LOGGER = configure_logger()
URL_RE = re.compile('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+')
VID_RE = re.compile('(v=(?P<query>[^&]*))|(youtu.be/(?P<path>.*))')
def get_message_text(message):
msg = email.message_from_string(message)
for part in msg.walk():
content_type = part.get_content_type()
if content_type == 'text/plain':
text = part.get_payload()
if 'X-Google-Groups:' in text: # email has double headers!
text = get_message_text(text)
break
else:
raise RuntimeError('No text for email ...')
return text
def get_urls(text):
urls = set()
for line in text.splitlines():
if not line.startswith('>'):
urls.update(set(URL_RE.findall(line)))
for url in urls:
yield url
def get_video_id(url):
m = VID_RE.search(url)
if m is not None:
return m.groupdict()['query'] or m.groupdict()['path']
def get_yt_client(username, password, developer_key):
from gdata.youtube.service import YouTubeService
yt = YouTubeService()
yt.ClientLogin(username, password, 'gg2yt')
yt.developer_key = developer_key
return yt
def add_video_to_playlist(yt_client, playlist_id, video_id):
playlist_uri = 'http://gdata.youtube.com/feeds/api/playlists/%s' % playlist_id
try:
yt_client.AddPlaylistVideoEntryToPlaylist(playlist_uri, video_id)
except Exception as e:
LOGGER.error('Failed to upload %s with error %s' % (video_id, e.message))
if __name__ == '__main__':
from newschimp.social.gg import WebSession
from settings import (group_id, topic_id, playlist_id, username, password, developer_key)
yt_client = get_yt_client(username, password, developer_key)
for page_number in range(1, 33):
session = WebSession(username, password)
for msg_id, message in session.get_messages_in_page(group_id, topic_id, page_number):
url_counter = 0
message_path = join(session.cache_dir, group_id, topic_id, str(page_number), msg_id)
LOGGER.info('Processing message - %s' % message_path)
text = get_message_text(message)
for url in get_urls(text):
url_counter += 1
video_id = get_video_id(url)
if video_id is not None:
add_video_to_playlist(yt_client, playlist_id, video_id)
LOGGER.info('Uploading %s' % video_id)
else:
LOGGER.error('Failed to parse url: %s' % url)
if url_counter == 0:
LOGGER.warn('No urls found in: %s' % message_path)
session.close()
| mit | Python | |
9f551b236a5d4052f2371cc11613e84808f43dee | Add setup.py | sgaynetdinov/py-vkontakte | setup.py | setup.py | #!/usr/bin/env python
from distutils.core import setup
setup(
name='pyvk',
version='2016.08',
packages=['pyvk'],
url='https://github.com/sgaynetdinov/pyvk',
license='MIT License',
author='Sergey Gaynetdinov',
author_email='s.gaynetdinov@gmail.com',
description='Python API wrapper around vk.com API',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2.7'
],
install_requires=[
'requests',
],
)
| mit | Python | |
fcd027a115fd3c690f042468c303eeedc74774fa | Use setuptools instead of distribute. | Aloomaio/facebook-sdk,mobolic/facebook-sdk | setup.py | setup.py | #!/usr/bin/env python
from setuptools import setup
setup(
name='facebook-sdk',
version='0.4.0',
description='This client library is designed to support the Facebook '
'Graph API and the official Facebook JavaScript SDK, which '
'is the canonical way to implement Facebook authentication.',
author='Facebook',
maintainer='Martey Dodoo',
maintainer_email='facebook-sdk@marteydodoo.com',
url='https://github.com/pythonforfacebook/facebook-sdk',
license='Apache',
py_modules=[
'facebook',
],
long_description=open("README.rst").read(),
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
install_requires=[
'requests',
],
)
| #!/usr/bin/env python
from distutils.core import setup
setup(
name='facebook-sdk',
version='0.4.0',
description='This client library is designed to support the Facebook '
'Graph API and the official Facebook JavaScript SDK, which '
'is the canonical way to implement Facebook authentication.',
author='Facebook',
maintainer='Martey Dodoo',
maintainer_email='facebook-sdk@marteydodoo.com',
url='https://github.com/pythonforfacebook/facebook-sdk',
license='Apache',
py_modules=[
'facebook',
],
long_description=open("README.rst").read(),
classifiers=[
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
install_requires=[
'requests',
],
)
| apache-2.0 | Python |
923c6b5a4181dfd9ae3f3c26ba411a63e27a79fe | add test for custom argument parser | FunTimeCoding/python-utility,FunTimeCoding/python-utility | tests/test_custom_argument_parser.py | tests/test_custom_argument_parser.py | import pytest
from python_utility.custom_argument_parser import CustomArgumentParser
def test_custom_argument_parser() -> None:
parser = CustomArgumentParser()
with pytest.raises(SystemExit):
parser.error(message='test')
| mit | Python | |
220671d4bc2300983cf200cc6f7834efef458ff1 | add a new factors module | svimanet/IRC-Bob | modules/factors.py | modules/factors.py | # finding all factors for a given number
def find_factors(x): # define a function
print ("The factors of ", x, " are: ")
for i in range(1, x + 1):
if x % i == 0:
print(i)
def print_factors():
# ask the user to input a number
num = input("Enter a number to print its factors: ")
if num >= 0: # check if the number is positive
find_factors(num)
elif num < 0: # if not, take the absolute value and find its factors
numNew = abs(num)
find_factors(numNew)
else:
print("Please enter a valid number.")
| unlicense | Python | |
291117e0c56fb00fd27c93a95b883784cf69c9bc | add dataset parser | Superjom/NeuralNetworks,Superjom/NeuralNetworks | apps/paper/dataset.py | apps/paper/dataset.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on March 7, 2014
@author: Chunwei Yan @ PKU
@mail: yanchunwei@outlook.com
'''
from __future__ import division
class DUC(object):
def __init__(self, path):
self.path = path
def get_text(self):
'''
get lines of the TEXT
'''
content = []
with open(self.path) as f:
begin = False
for line in f.readlines():
if line.find('<TEXT>') != -1:
begin = True
elif line.find('</TEXT>') != -1:
break
elif begin == True:
content.append(line.strip())
return content
def get_sentences(self):
'''
:return:
list of sentences(str)
'''
content = ' '.join(self.get_text())
sentences = content.split('.')
return sentences
if __name__ == '__main__':
duc = DUC('/home/chunwei/Lab/NeuralNetworks/apps/paper/data/duc2005_docs/d301i/FT921-10162')
duc.get_sentences()
| apache-2.0 | Python | |
070d3f780ffab6e866fb3d1d7fc21bd77fb31ae6 | Add momento pattern | voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts | momento-pattern.py | momento-pattern.py | #!/usr/bin/python
import copy
class Originator(object):
class Memento(object):
def __init__(self, mstate):
self.mstate = mstate
def rollback_state(self):
return self.mstate
def set_state(self, state):
print ('Originator: setup state to: {0}'.format(state))
self.state = state
def get_state(self):
print ('Originator: reading state to: {0}'.format(self.state))
def save_state(self):
print ('Originator: saving state')
return self.Memento(copy.deepcopy(self))
def rollback_state(self, memento):
self = memento.rollback_state()
print ('Originator: rollbac to state: {0}'.format(self.state))
if __name__ == '__main__':
orig = Originator()
orig.set_state('State 1')
orig.get_state()
orig.set_state('State 2')
orig.get_state()
saved_state = orig.save_state()
orig.set_state('State 3')
orig.get_state()
orig.rollback_state(saved_state)
| mit | Python | |
8e3e1883b9aa25091b6a9a1b4684128bd56659f7 | Add new test | phuonghuynh/compressor,phuonghuynh/compressor,phuonghuynh/compressor,phuonghuynh/compressor,phuonghuynh/compressor | setup.py | setup.py | """
Finix Python client library.
See ``README.md`` for usage advice.
"""
import os
import re
try:
import setuptools
except ImportError:
import distutils.core
setup = distutils.core.setup
else:
setup = setuptools.setup
PACKAGE = next((str(s) for s in setuptools.find_packages('.', exclude=("tests", "tests.*"))), None)
PWD = os.path.abspath(os.path.dirname(__file__))
VERSION = (
re
.compile(r".*__version__ = '(.*?)'", re.S)
.match(open(os.path.join(PWD, PACKAGE, "__init__.py")).read())
.group(1)
)
with open(os.path.join(PWD, "README.md")) as f:
README = f.read()
requires = [
"coreapi==1.20.0",
"finix-wac==0.31"
]
extras_require = {
"tests": [
]
}
scripts = [
# 'bin/citadel'
]
setup(
name=PACKAGE,
version=VERSION,
url='https://finixpayments.com/',
license='MIT License',
author='Finix Payments',
author_email='dev@finixpayments.com',
description='Payments API',
long_description=README,
packages=[PACKAGE],
test_suite='nose.collector',
install_requires=requires,
tests_require=extras_require['tests'],
dependency_links=[],
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules',
],
include_package_data=True,
zip_safe=False,
scripts=scripts,
extras_require=extras_require,
setup_requires=['nose>=1.3.7']
)
| apache-2.0 | Python | |
0fe1d3eb78ef3d2c5dbbd5a662829309ab808a6f | Add setup.py | diogo149/treeano,jagill/treeano,diogo149/treeano,jagill/treeano,nsauder/treeano,diogo149/treeano,nsauder/treeano,nsauder/treeano,jagill/treeano | setup.py | setup.py | from setuptools import setup
treeano_version = '0.0.1'
setup(
name="treeano",
version=treeano_version,
packages=["treeano", "canopy"]
)
| apache-2.0 | Python | |
529d9328570febd037077cfe865feedb841a1162 | Create setup.py | WeiXuanChan/autoD | setup.py | setup.py | from distutils.core import setup
setup(
name = 'autoD', # How you named your package folder (MyLib)
packages = ['autoD'], # Chose the same as "name"
version = '3.7.0', # Start with a small number and increase it with every change you make
license='MIT', # Chose a license from here: https://help.github.com/articles/licensing-a-repository
description = 'Forward automatic differentiation', # Give a short description about your library
author = 'Wei Xuan Chan', # Type in your name
author_email = 'w.x.chan1986@gmail.com', # Type in your E-Mail
url = 'https://github.com/WeiXuanChan/autoD', # Provide either the link to your github or to your website
download_url = 'https://github.com/WeiXuanChan/autoD/archive/v3.7.0.tar.gz', # I explain this later on
keywords = ['automatic', 'differentiation'], # Keywords that define your package best
install_requires=['numpy'],
classifiers=[
'Development Status :: 5 - Production/Stable', # Chose either "3 - Alpha", "4 - Beta" or "5 - Production/Stable" as the current state of your package
'Intended Audience :: Developers', # Define that your audience are developers
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License', # Again, pick a license
'Programming Language :: Python :: 3', #Specify which pyhton versions that you want to support
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
)
| mit | Python | |
20b31aa5faa155639df8c206de2864af80924254 | add setup.py script | thomlake/pytorch-attention | setup.py | setup.py | from distutils.core import setup
setup(
name='attention',
version='0.1.0',
author='tllake',
author_email='thom.l.lake@gmail.com',
packages=['attention'],
description='An attention function for PyTorch.',
long_description=open('README.md').read()) | bsd-2-clause | Python | |
5df5a19cba3bd543bcadd92d57fdd07d84b38339 | update project page link in setup script | dementrock/pycparser,fjalex/pycparser,strazzere/py010parser,bowlofstew/pycparser,fjalex/pycparser,jorik041/pycparser,Nairolf21/pycparser,dementrock/pycparser,bowlofstew/pycparser,dubslow/pycparser,jorik041/pycparser,sideeffects/pycparser,keulraesik/pycparser,CtheSky/pycparser,fjalex/pycparser,sideeffects/pycparser,dubslow/pycparser,Nairolf21/pycparser,jorik041/pycparser,dubslow/pycparser,strazzere/py010parser,Nairolf21/pycparser,bowlofstew/pycparser,keulraesik/pycparser,sideeffects/pycparser,sideeffects/pycparser,dementrock/pycparser,CtheSky/pycparser,keulraesik/pycparser,CtheSky/pycparser | setup.py | setup.py | import os, sys
from distutils.core import setup
setup(
# metadata
name='pycparser',
description='C parser in Python',
long_description="""
pycparser is a complete parser of the C language, written in
pure Python using the PLY parsing library.
It parses C code into an AST and can serve as a front-end for
C compilers or analysis tools.
""",
license='BSD',
version='2.09',
author='Eli Bendersky',
maintainer='Eli Bendersky',
author_email='eliben@gmail.com',
url='https://bitbucket.org/eliben/pycparser',
platforms='Cross Platform',
classifiers = [
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',],
packages=['pycparser'],
package_data={'pycparser': ['*.cfg']},
)
| import os, sys
from distutils.core import setup
setup(
# metadata
name='pycparser',
description='C parser in Python',
long_description="""
pycparser is a complete parser of the C language, written in
pure Python using the PLY parsing library.
It parses C code into an AST and can serve as a front-end for
C compilers or analysis tools.
""",
license='BSD',
version='2.09',
author='Eli Bendersky',
maintainer='Eli Bendersky',
author_email='eliben@gmail.com',
url='http://code.google.com/p/pycparser/',
platforms='Cross Platform',
classifiers = [
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',],
packages=['pycparser'],
package_data={'pycparser': ['*.cfg']},
)
| bsd-3-clause | Python |
229c54fa4122f9c08aae9b31dc6720e78daaf90d | add setup | cdimascio/py-readability-metrics | setup.py | setup.py | #!/user/bin/env python
from setuptools import setup
setup(
name='py-readability',
version='0.0.1',
description='Calculate readability scores. e.g. Gunning Fog',
author='Carmine DiMAscio',
url='https://github.com/cdimascio/py-readability',
packages=['py-readabilitiy-metrics'],
install_requires=['nltk>=3.3'],
package_data={'readability': [], '': ['README.md', 'LICENSE']},
package_dir={'readabiliity': 'readabiliity'},
include_package_data=True,
author_email='cdimascio@gmail.com',
license='MIT',
zip_safe=False,
)
| mit | Python | |
e3cd2d3880dbc00e254ac503d5f5c84ab77edc4f | Add Invoke tasks for cleaning and building docs. | audreyr/binaryornot,hackebrot/binaryornot,0k/binaryornot,pombredanne/binaryornot,pombredanne/binaryornot,hackebrot/binaryornot,0k/binaryornot,pombredanne/binaryornot,audreyr/binaryornot,audreyr/binaryornot,hackebrot/binaryornot | tasks.py | tasks.py | from invoke import task, run
@task
def clean():
run("rm -rf docs/_build")
@task('clean')
def build():
run("sphinx-build docs docs/_build")
| bsd-3-clause | Python | |
75131bdf806c56970f3160de3e6d476d9ecbc3a7 | Add problem delete note in a linked list | guozengxin/myleetcode,guozengxin/myleetcode | python/deleteNodeInALinkedList.py | python/deleteNodeInALinkedList.py | # https://leetcode.com/problems/delete-node-in-a-linked-list/
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def deleteNode(self, node):
"""
:type node: ListNode
:rtype: void Do not return anything, modify node in-place instead.
"""
while node.next != None:
node.val = node.next.val
if node.next.next is None:
node.next = None
else:
node = node.next | mit | Python | |
1c59296f6819c5d8e6222c237afa9146ddf6a56b | add new status poller | hep-gc/cloudscheduler,hep-gc/cloudscheduler,hep-gc/cloudscheduler,hep-gc/cloudscheduler | data_collectors/general/csstatus.py | data_collectors/general/csstatus.py | import multiprocessing
from multiprocessing import Process
import logging
import time
import sys
import os
from cloudscheduler.lib.csv2_config import Config
from cloudscheduler.lib.poller_functions import \
start_cycle, \
wait_cycle
import htcondor
import classad
from sqlalchemy import create_engine
from sqlalchemy.orm import Session
from sqlalchemy.ext.automap import automap_base
def _service_msg(service_name):
return os.popen("service "+service_name+" status | grep 'Active' | cut -c12-").read()
def status_poller():
multiprocessing.current_process().name = "Status Poller"
services = ["csv2-main", "csv2-openstack", "csv2-jobs", "csv2-machines", "mariadb", "condor"]
# Initialize database objects
Base = automap_base()
db_engine = create_engine(
'mysql://%s:%s@%s:%s/%s' % (
config.db_user,
config.db_password,
config.db_host,
str(config.db_port),
config.db_name
)
)
Base.prepare(db_engine, reflect=True)
STATUS = Base.classes.csv2_system_status
db_session = Session(db_engine)
cycle_start_time = 0
new_poll_time = 0
poll_time_history = [0,0,0,0]
try:
while True:
new_poll_time, cycle_start_time = start_cycle(new_poll_time, cycle_start_time)
# id will always be zero because we only ever want one row of these
system_dict = {'id': 0}
for service in services:
system_dict[service + "_msg"] = service_msg(service)
if "running" in system_dict[service + "_msg"]:
system_dict[service + "_status"] = 1
else:
system_dict[service + "_status"] = 0
new_status = STATUS(**system_dict)
try:
db_session.merge(new_status)
db_session.commit()
except Exception as exc:
logging.exception("Failed to merge and commit status update exiting")
exit(1)
wait_cycle(cycle_start_time, poll_time_history, config.sleep_interval_status)
if __name__ == '__main__':
config = Config(os.path.basename(sys.argv[0]))
logging.basicConfig(
filename=config.log_file,
level=config.log_level,
format='%(asctime)s - %(processName)-14s - %(levelname)s - %(message)s')
logging.info("**************************** starting csstatus *********************************")
processes = {}
process_ids = {
'status': status_poller,
}
# Wait for keyboard input to exit
try:
while True:
for process in sorted(process_ids):
if process not in processes or not processes[process].is_alive():
if process in processes:
logging.error("%s process died, restarting...", process)
del(processes[process])
else:
logging.info("Restarting %s process", process)
processes[process] = Process(target=process_ids[process])
processes[process].start()
time.sleep(config.sleep_interval_main_short)
time.sleep(config.sleep_interval_main_long)
except (SystemExit, KeyboardInterrupt):
logging.error("Caught KeyboardInterrupt, shutting down threads and exiting...")
for process in processes:
try:
process.join()
except:
logging.error("failed to join process %s", process.name) | apache-2.0 | Python | |
1a8c361d90243c44a877ebdc4ae92fbfb3226b40 | add test file for words | mcdickenson/python-washu-2014 | day1/words_test.py | day1/words_test.py | import unittest
import words
class TestWordsCode(unittest.TestCase):
def test_has_no_e(self):
self.assertEqual(words.has_no_e("bet"), False)
self.assertEqual(words.has_no_e("bit"), True)
def test_uses_only(self):
self.assertEqual(words.uses_only("ababab", "a"), False)
self.assertEqual(words.uses_only("ababab", "ab"), True)
def test_uses_all(self):
self.assertEqual(words.uses_all("ababab", "abc"), False)
self.assertEqual(words.uses_all("ababab", "ab"), True)
def test_is_abecedarian(self):
self.assertEqual(words.is_abecedarian("abcxyz"), True)
self.assertEqual(words.is_abecedarian("abczyx"), False)
| mit | Python | |
6b92d9fe24fe682c357e3f5a5e6c19f1569bd29e | Add riak backend | disqus/nydus | nydus/db/backends/riak.py | nydus/db/backends/riak.py | """
nydus.db.backends.riak
~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2011 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
from __future__ import absolute_import
import socket
import httplib
from riak import RiakClient, RiakError
from nydus.db.backends import BaseConnection
class Riak(BaseConnection):
# Exceptions that can be retried by this backend
retryable_exceptions = frozenset([socket.error, httplib.HTTPException, RiakError])
supports_pipelines = False
def __init__(self, host='127.0.0.1', port=8098, prefix='riak', mapred_prefix='mapred', client_id=None, **options):
self.host = host
self.port = port
self.prefix = prefix
self.mapred_prefix = mapred_prefix
self.client_id = client_id
super(Riak, self).__init__(**options)
@property
def identifier(self):
mapping = vars(self)
return "http://%(host)s:%(port)s/%(prefix)s" % mapping
def connect(self):
return RiakClient(host=self.host, port=self.port, prefix=self.prefix,\
mapred_prefix=self.mapred_prefix, client_id=self.client_id)
def disconnect(self):
pass
| apache-2.0 | Python | |
6b630687336de18bb0c9179b7002d310772b6871 | Add corpwiki/iptool | kevinxw/namebench,Forgen/namebench,evelynmitchell/namebench,ulaskaraoren/namebench,thatchristoph/namebench,cloudcache/namebench,feardax/namebench,Trinitaria/namebench,movermeyer/namebench,doadin/namebench,leeoo/namebench,felipsmartins/namebench,sbalun/namebench,asolfre/namebench,beermix/namebench,jjoaonunes/namebench,MANICX100/namebench,jakeylube95/namebench,bluemask2001/namebench,razrichter/namebench,aman-tugnawat/namebench,AgentN/namebench,santoshsahoo/namebench,RichardWilliamPearse/namebench,CookiesandCake/namebench,thanhuwng/namebench,rubasben/namebench,watchamakulit02/namebench,hwuiwon/namebench,Xeleste/namebench,ZuluPro/namebench,RomanHargrave/namebench,phy0/namebench,KingPsychopath/namebench,xubayer786/namebench,Bandito43/namebench,michaeldavidcarr/namebench,vishnunuk/namebench,jtrag/namebench,seshin/namebench,takuya/namebench,qbektrix/namebench,omerhasan/namebench,etxc/namebench,palimadra/namebench,edmilson19/namebench,alexlovelltroy/namebench,stefrobb/namebench,Jeff-Lewis/namebench,TheNite/namebench,AdamHull/namebench,imranrony/namebench,ItsAGeekThing/namebench,petabytekr/namebench,siripuramrk/namebench,CrazeeIvan/namebench,tushevorg/namebench,shannonjlove/namebench,richardgroves/namebench,gdbdzgd/namebench,illAdvised/namebench,isoriss123/namebench,arjun372/namebench,mspringett/namebench,renanrodm/namebench,deeb230/namebench,eladelad/namebench,webhost/namebench,benklaasen/namebench,Arrowofdarkness/namebench,HerlonNascimento/namebench,when30/namebench,el-lumbergato/namebench,melissaihrig/namebench,kristi29091988/namebench,nt1st/namebench,chosen1/namebench,jaechankim/namebench,pombreda/namebench,cvanwie/namebench,cartersgenes/namebench,hashem78/namebench,FatBumbleee/namebench,cah0211/namebench,renatogames2/namebench,souzainf3/namebench,Spindletop16/namebench,rosemead/namebench,manaure/namebench,dsjr2006/namebench,sund/namebench,mystique1029/namebench,nishad/namebench,MarnuLombard/namebench,DanielAttia/namebench,ajitsonlion/namebench,doantranhoang/namebench,unreal666/namebench,rbenjamin/namebench,Ritvik1512/namebench,uwevil/namebench,yiyuandao/namebench,ericmckean/namebench,perrytm/namebench,jackjshin/namebench,fbidu/namebench,gavinfaux/namebench,teknix/namebench,woozzoom/namebench,xeoron/namebench,wluizguedes/namebench,ronzohan/namebench,techsd/namebench,GLMeece/namebench,corruptnova/namebench,xxhank/namebench,antar2801/namebench,chamakov/namebench,accomac/namebench,TorpedoXL/namebench,LavyshAlexander/namebench,jimb0616/namebench,BeZazz/lamebench,repomain/namebench,trulow/namebench,antsant/namebench,hypnotika/namebench,cyranodb/namebench,tcffisher/namebench,hitrust/namebench,deepak5/namebench,LegitSavage/namebench,donavoncade/namebench,erasilva/namebench,edesiocs/namebench,Kudeshido/namebench,AViisiion/namebench,KibaAmor/namebench,Hazer/namebench,jevgen/namebench,ajs124/namebench,ran0101/namebench,dimazalfrianz/namebench,snailbob/namebench,iamang/namebench,thiagomagero/namebench,kiseok7/namebench,llaera/namebench,edumatos/namebench,wa111/namebench,pacav69/namebench,crocleco/namebench,bgammill/namebench,MicroWorldwide/namebench,Max-Vader/namebench,lukasfenix/namebench,21winner/namebench,sushifant/namebench,jlobaton/namebench,danieljl/namebench,pyshcoder/namebench,jaded44/namebench,PyroShark/namebench,Jasoning/namebench,nadeemat/namebench,fevangelou/namebench,alebcay/namebench,skuarch/namebench,tectronics/namebench | tools/check_nameserver_popularity.py | tools/check_nameserver_popularity.py | #!/usr/bin/env python
import os
import sys
import pickle
import time
import traceback
import yahoo.search
from yahoo.search.web import WebSearch
APP_ID = 'P5ihFKzV34G69QolFfb3nN7p0rSsYfC9tPGq.IUS.NLWEeJ14SG9Lei0rwFtgwL8cDBrA6Egdw--'
QUERY_MODIFIERS = '-site:txdns.net -site:sitedossier.com -mx -site:dataopedia.com -site:l0t3k.net -syslog -"4.2.2.1" -site:cqcounter.com -site:flow.nttu.edu.tw -site:websiteoutlook.com -site:ipgeolocator.com -site:tdyndns.org -site:ebrara.com -site:onsamehost.com -site:ipaddresscentral.com -site:quia.jp -inetnum -site:domaintools.com -site:domainbyip.com -site:pdos.csail.mit.edu -statistics -"country name" -"Q_RTT" -site:botsvsbrowsers.com -"ptr record" -site:ip-db.com -site:chaip.com.cn -site:lookup365.com -"IP Country" -site:iptoolboxes.com -"Unknown Country" -"Q_RTT" -amerika -whois -Mozilla -site:domaincrawler.com -site:geek-tools.org -site:visualware.com -site:robtex.com -site:domaintool.se -site:opendns.se -site:ungefiltert-surfen.de -site:datakitteh.org -"SLOVAKIA (SK)" -"IP Search" -site:www.medicore.com.ua -site:dig.similarbase.com -site:ipcorporationwiki.com -site:coolwhois.com -site:corporationwiki.com -site:iptool.us'
CACHE_DIR = os.getenv('HOME') + '/.ycache'
def CheckPopularity(ip):
cache_path = os.path.join(CACHE_DIR, ip) + '.pickle'
if os.path.exists(cache_path):
f = open(cache_path)
return pickle.load(f)
else:
try:
query = '"%s" %s' % (ip, QUERY_MODIFIERS)
srch = WebSearch(APP_ID, query=query, results=50)
results = srch.parse_results()
pf = open(cache_path + '.pickle', 'w')
pickle.dump(results.results, pf)
pf.close()
return results
except yahoo.search.SearchError:
print "%s failed" % (ip)
return []
if __name__ == "__main__":
for ip in sys.argv[1:]:
print '%s = %s' % (ip, total)
for result in results.results:
try:
print ' - %s: %s' % (result['Url'], result['Title'])
except UnicodeEncodeError:
print ' - %s' % result['Url']# print results.results
time.sleep(0.5)
| #!/usr/bin/env python
import os
import sys
import pickle
import time
import traceback
import yahoo.search
from yahoo.search.web import WebSearch
APP_ID = 'P5ihFKzV34G69QolFfb3nN7p0rSsYfC9tPGq.IUS.NLWEeJ14SG9Lei0rwFtgwL8cDBrA6Egdw--'
QUERY_MODIFIERS = '-site:txdns.net -site:sitedossier.com -mx -site:dataopedia.com -site:l0t3k.net -syslog -"4.2.2.1" -site:cqcounter.com -site:flow.nttu.edu.tw -site:websiteoutlook.com -site:ipgeolocator.com -site:tdyndns.org -site:ebrara.com -site:onsamehost.com -site:ipaddresscentral.com -site:quia.jp -inetnum -site:domaintools.com -site:domainbyip.com -site:pdos.csail.mit.edu -statistics -"country name" -"Q_RTT" -site:botsvsbrowsers.com -"ptr record" -site:ip-db.com -site:chaip.com.cn -site:lookup365.com -"IP Country" -site:iptoolboxes.com -"Unknown Country" -"Q_RTT" -amerika -whois -Mozilla -site:domaincrawler.com -site:geek-tools.org -site:visualware.com -site:robtex.com -site:domaintool.se -site:opendns.se -site:ungefiltert-surfen.de -site:datakitteh.org -"SLOVAKIA (SK)" -"IP Search" -site:www.medicore.com.ua -site:dig.similarbase.com -site:ipcorporationwiki.com -site:coolwhois.com'
CACHE_DIR = os.getenv('HOME') + '/.ycache'
def CheckPopularity(ip):
cache_path = os.path.join(CACHE_DIR, ip) + '.pickle'
if os.path.exists(cache_path):
f = open(cache_path)
return pickle.load(f)
else:
try:
query = '"%s" %s' % (ip, QUERY_MODIFIERS)
srch = WebSearch(APP_ID, query=query, results=50)
results = srch.parse_results()
pf = open(cache_path + '.pickle', 'w')
pickle.dump(results.results, pf)
pf.close()
return results
except yahoo.search.SearchError:
print "%s failed" % (ip)
return []
if __name__ == "__main__":
for ip in sys.argv[1:]:
print '%s = %s' % (ip, total)
for result in results.results:
try:
print ' - %s: %s' % (result['Url'], result['Title'])
except UnicodeEncodeError:
print ' - %s' % result['Url']# print results.results
time.sleep(0.5)
| apache-2.0 | Python |
dc15986b0ff890250d21a36350b689809d535f44 | Create KMP.py | saru95/DSA,saru95/DSA,saru95/DSA,saru95/DSA,saru95/DSA | KMP.py | KMP.py | # Github username : yatingupta10
# Website : http://www.yatingupta.me/
# Find occurrences of pattern as a contiguous subsequence of the text.
# For the KMP versions the pattern must be a list or string, because we
# perform array indexing into it, but the text can be anything that can
# be used in a for-loop. The naive match shown first requires the text
# to be a list or string as well.
from __future__ import generators
# Naive algorithm to find and return starting position of first match
# takes O(p*t) time e.g. for pattern='a'*(p-1)+'b', text='a'*t
def naiveMatch(pattern, text):
for startPos in range(len(text) - len(pattern) + 1):
matchLen = 0
while pattern[matchLen] == text[startPos + matchLen]:
matchLen += 1
if matchLen == len(pattern):
return startPos
# Find and return starting position of first match, or None if no match exists
#
# Time analysis:
# each iteration of the inner or outer loops increases 2*startPos + matchLen
# this quantity starts at 0 and ends at most at 2*t+p
# so the total number of iterations of both loops is O(t+p)
#
def kmpFirstMatch(pattern, text):
shift = computeShifts(pattern)
startPos = 0
matchLen = 0
for c in text:
while matchLen >= 0 and pattern[matchLen] != c:
startPos += shift[matchLen]
matchLen -= shift[matchLen]
matchLen += 1
if matchLen == len(pattern):
return startPos
# Slightly more complicated version to return sequence of all matches
# using Python 2.2 generators (yield keyword in place of return).
# Same time analysis as kmpFirstMatch.
def kmpAllMatches(pattern, text):
shift = computeShifts(pattern)
startPos = 0
matchLen = 0
for c in text:
while matchLen >= 0 and pattern[matchLen] != c:
startPos += shift[matchLen]
matchLen -= shift[matchLen]
matchLen += 1
if matchLen == len(pattern):
yield startPos
startPos += shift[matchLen]
matchLen -= shift[matchLen]
# Construct shift table used in KMP matching
# Time analysis: each iteration of either loop increases shift+pos
# This quantity starts at 0 and ends at most at 2*p
# So total time is O(p).
#
def computeShifts(pattern):
shifts = [None] * (len(pattern) + 1)
shift = 1
for pos in range(len(pattern) + 1):
while shift < pos and pattern[pos-1] != pattern[pos-shift-1]:
shift += shifts[pos-shift-1]
shifts[pos] = shift
return shifts
if __name__ == "__main__":
n = raw_input("Enter the text: ")
print (computeShifts(n))
| mit | Python | |
51642c95ce9d7c7d95648952340d90f4ef2254f3 | Add test for record_panel | khchine5/opal,khchine5/opal,khchine5/opal | opal/tests/test_panels.py | opal/tests/test_panels.py | """
Tests create_singletons command
"""
from opal.core.test import OpalTestCase
from opal.templatetags import panels
from opal.tests.models import Demographics
class RecordPanelTestCase(OpalTestCase):
def test_record_panel(self):
expected = dict(
name='demographics',
singleton=True,
title='Demographics',
detail_template='records/demographics.html',
icon=None,
editable=1,
angular_filter=None,
noentries=None,
only_display_if_exists=False
)
result = panels.record_panel(Demographics())
self.assertEqual(expected, result)
| agpl-3.0 | Python | |
d9959b9a8e38fc5c6b23618fdbd8a67423302e4e | include forgotten exceptions.py file | Duke-GCB/DukeDSClient,Duke-GCB/DukeDSClient | ddsc/exceptions.py | ddsc/exceptions.py | class DDSUserException(Exception):
"""
Exception with an error message to be displayed to the user on the command line.
"""
pass
| mit | Python | |
45215b36e544f8d7a9ac21a825807d6e49d2ade9 | Add binarySearch function | iandmyhand/python-utils | DataStructuresAndAlgorithmsInPython/BinarySearch.py | DataStructuresAndAlgorithmsInPython/BinarySearch.py | ##-*- coding: utf-8 -*-
#!/usr/bin/python
""" Returns either the index of the location in the array,
or -1 if the array did not contain the targetValue
"""
import math
def binarySearch (array, targetValue):
minimum = 0;
maximum = len(array) - 1;
guess = -1;
guessesCount = 0;
while (maximum >= minimum):
guessesCount += 1;
guess = int(math.floor((minimum + maximum) / 2));
print "A number of guess: " + str(guess);
if (array[guess] == targetValue):
print "Total number of guesses: " + str(guessesCount);
return guess;
elif (array[guess] < targetValue):
minimum = guess + 1;
else:
maximum = guess - 1;
return -1;
primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37,
41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97];
result = binarySearch(primes, 73);
print "Found prime at index " + str(result);
assert (binarySearch(primes, 7) is 3)
assert (binarySearch(primes, 13) is 5)
assert (binarySearch(primes, 73) is 20) | mit | Python | |
b423ea140a8f041bca84390ef698d13789a128df | Convert Numbers to words | yoda-yoda/numbers-to-words | number_to_words.py | number_to_words.py | """Convert Numbers to Words.
1001 - One thousand and One
"""
import math
class NumbersToWord(object):
"""Convert Numbers to words."""
hyphen = '-'
conjunction = ' and '
separator = ', '
negative = 'negative '
decimal = ' point '
space = ' '
dictionary = {
0: 'zero',
1: 'one',
2: 'two',
3: 'three',
4: 'four',
5: 'five',
6: 'six',
7: 'seven',
8: 'eight',
9: 'nine',
10: 'ten',
11: 'eleven',
12: 'twelve',
13: 'thirteen',
14: 'fourteen',
15: 'fifteen',
16: 'sixteen',
17: 'seventeen',
18: 'eighteen',
19: 'nineteen',
20: 'twenty',
30: 'thirty',
40: 'fourty',
50: 'fifty',
60: 'sixty',
70: 'seventy',
80: 'eighty',
90: 'ninety',
100: 'hundred',
1000: 'thousand',
1000000: 'million',
1000000000: 'billion',
1000000000000: 'trillion',
1000000000000000: 'quadrillion',
1000000000000000000: 'quintillion'
}
def number_to_words(self, number):
"""Convert a number into words."""
if not isinstance(number, (int, float,)):
return False
if number < 0:
return self.negative + self.number_to_words(abs(number))
string = fraction = None
if "." in str(number):
number, fraction = str(number).split('.')
number = int(number)
fraction = int(fraction) if fraction else fraction
if number < 21:
string = self.dictionary[number]
elif number < 100:
tens = (number / 10) * 10
units = number % 10
string = self.dictionary[tens]
if units:
string += self.hyphen + self.dictionary[units]
elif number < 1000:
hundreds = number / 100
remainder = number % 100
string = self.dictionary[hundreds] + self.space + \
self.dictionary[100]
if remainder:
string += self.conjunction + self.number_to_words(remainder)
else:
log = math.floor(math.log(number, 1000))
base_unit = math.pow(1000, log)
num_base_units = int(number / base_unit)
remainder = number % base_unit
string = self.number_to_words(num_base_units) + self.space + \
self.dictionary[base_unit]
if remainder:
string += self.conjunction if remainder < 100 else \
self.separator
string += self.number_to_words(remainder)
if (fraction is not None) and isinstance(fraction, int):
string += self.decimal
words = ''
for each in str(fraction):
words += self.dictionary[int(each)] + self.space
string += words
return string
| mit | Python | |
0e43fce67c2c53fe2a7dbf233df86c042501e477 | Move explain_sam_flags.py to public repository | alecw/picard,nh13/picard,broadinstitute/picard,alecw/picard,broadinstitute/picard,nh13/picard,annkupi/picard,broadinstitute/picard,alecw/picard,nh13/picard,alecw/picard,annkupi/picard,nh13/picard,annkupi/picard,broadinstitute/picard,annkupi/picard,broadinstitute/picard | src/scripts/explain_sam_flags.py | src/scripts/explain_sam_flags.py | #!/usr/bin/env python
# The Broad Institute
# SOFTWARE COPYRIGHT NOTICE AGREEMENT
# This software and its documentation are copyright 2008 by the
# Broad Institute/Massachusetts Institute of Technology. All rights are
# reserved.
# This software is supplied without any warranty or guaranteed support
# whatsoever. Neither the Broad Institute nor MIT can be responsible for its
# use, misuse, or functionality.
# $Header$
"""usage %prog decimal-flag [decimal-flag...]
Explain each flag on the command line in plain English
"""
from __future__ import division
import sys
lstFlags = [
("read paired", 0x1),
("read mapped in proper pair", 0x2),
("read unmapped", 0x4),
("mate unmapped", 0x8),
("read reverse strand", 0x10),
("mate reverse strand", 0x20),
("first in pair", 0x40),
("second in pair", 0x80),
("not primary alignment", 0x100),
("read fails platform/vendor quality checks", 0x200),
("read is PCR or optical duplicate", 0x400)
]
def explain_sam_flags(iFlags):
print iFlags, ":"
for strFlagName, iMask in lstFlags:
if iFlags & iMask:
print "\t" + strFlagName
def main(argv=None):
if argv is None:
argv = sys.argv
for strArg in argv[1:]:
explain_sam_flags(int(strArg))
if __name__ == "__main__":
sys.exit(main())
| mit | Python | |
5352740a1cc508a6b902f447a80960fa237414aa | Add ProgressPathView | controversial/ui2 | ui2/view_classes/ProgressPathView.py | ui2/view_classes/ProgressPathView.py | from objc_util import *
import ui
def _get_CGColor(color):
"""Get a CGColor from a wide range of formats."""
return UIColor.colorWithRed_green_blue_alpha_(
*ui.parse_color(color)
).CGColor()
class ProgressPathView(ui.View):
"""A view class which can turn a ui.Path into a progress bar.
This allows you not only to create linear and circular progress bars, but
to create progress bars of any shape """
def __init__(self, path, width=5, color="#21abed"):
self._objc = ObjCInstance(self)
# Set up the layer on which the path is rendered
self._layer = ObjCClass("CAShapeLayer").new()
self._layer.setPath_(ObjCInstance(path).CGPath())
self._objc.layer().addSublayer_(self._layer)
self._layer.setFillColor_(UIColor.clearColor().CGColor()) # No fill
self.tint_color = color
self.stroke_width = width
self.progress = 0 # Progress starts at 0
@property
def progress(self):
return self._layer.strokeEnd()
@progress.setter
def progress(self, value):
self._layer.setStrokeEnd_(value)
@property
def stroke_width(self):
return self._layer.lineWidth()
@stroke_width.setter
def stroke_width(self, width):
self._layer.setLineWidth_(width)
@property
def tint_color(self):
color = UIColor.colorWithCGColor_(self._layer.strokeColor())
return color.red(), color.green(), color.blue(), color.alpha()
@tint_color.setter
def tint_color(self, color):
self._layer.setStrokeColor_(_get_CGColor(color))
if __name__ == "__main__":
p = ui.Path()
p.move_to(10, 10)
p.line_to(50, 10)
p.line_to(50, 50)
p.close()
a = ProgressPathView(p)
b = ui.View()
b.add_subview(a)
b.present("sheet")
a.progress = 0.1
def advance():
a.progress = 0.7
ui.delay(advance, 0.75)
def advance2():
a.progress = 1
ui.delay(advance2, 1.5)
| mit | Python | |
8738accb2a612a3c1e41cc00aa337d0be890f4a0 | add problem 052 | smrmkt/project_euler | problem_052.py | problem_052.py | #!/usr/bin/env python
#-*-coding:utf-8-*-
'''
'''
import timeit
def loop(n):
for i in range(10, 10000000):
if str(i)[0] != '1':
continue
f = [1 for j in range(2, n+1) if sorted(list(str(i))) != sorted(str(i*j))]
if len(f) == 0:
return i
if __name__ == '__main__':
print loop(6)
print timeit.Timer('problem_052.loop(6)', 'import problem_052').timeit(1)
| mit | Python | |
88f0e5ba8a404f0fcdaaaacc69109775182b7213 | Add squashed migrations | francbartoli/dj-experiment,francbartoli/dj-experiment | dj_experiment/migrations/0002_auto_20170802_1206_squashed_0004_auto_20170802_1230.py | dj_experiment/migrations/0002_auto_20170802_1206_squashed_0004_auto_20170802_1230.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-02 17:31
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
replaces = [('dj_experiment', '0002_auto_20170802_1206'), ('dj_experiment', '0003_auto_20170802_1708'), ('dj_experiment', '0004_auto_20170802_1230')]
dependencies = [
('dj_experiment', '0001_initial_squashed_0006_auto_20170802_0904'),
]
operations = [
migrations.RemoveField(
model_name='catalog',
name='xperiment',
),
migrations.AddField(
model_name='catalog',
name='xperiments',
field=models.ForeignKey(default='', on_delete=django.db.models.deletion.CASCADE, related_name='catalogs', to='dj_experiment.Experiment'),
preserve_default=False,
),
migrations.AlterField(
model_name='experiment',
name='data_dir',
field=models.CharField(default='./', max_length=250),
),
migrations.AlterField(
model_name='experiment',
name='separator',
field=models.CharField(default='.', max_length=1),
),
migrations.AlterField(
model_name='experiment',
name='data_dir',
field=models.CharField(default=b'RCM data', max_length=250),
),
migrations.AlterField(
model_name='experiment',
name='separator',
field=models.CharField(default=b'_', max_length=1),
),
]
| mit | Python | |
26eba1f16c44ed6693b2a575a6a2c5ebef9401b5 | Create Movie object city_lights | vishallama/udacity-fullstack-movie-trailer,vishallama/udacity-fullstack-movie-trailer | entertainment_center.py | entertainment_center.py | # entertainment_center.py
import media
__author__ = 'vishal lama'
city_lights = media.Movie(
"City Lights",
"A tramp falls in love with a beautiful blind girl. Her family is in "
"financial trouble. The tramp's on-and-off friendship with a wealthy "
"man allows him to be the girl's benefactor and suitor.",
"https://upload.wikimedia.org/wikipedia/en/f/f2/City_Lights_film.jpg",
"https://www.youtube.com/watch?v=b2NTUnujk1I",
["Charles Chaplin", "Virginia Cherrill",
"Florence Lee", "Harry Myers",
"Al Ernest Garcia", "Hank Mann"
],
"7 March 1931",
"G",
"English",
"87 min",
"Charles Chaplin Productions",
["When the film opened on 31 January 1931, Albert Einstein joined "
"Charles Chaplin at the theater. When the film opened in England, "
"George Bernard Shaw joined him.",
"Charles Chaplin's personal favorite of all his films.",
"Russian director Andrei Tarkovsky cited this as his favorite film. "
"Woody Allen also calls it 'Chaplin's best picture'.",
"In 2008, this film was voted #1 on AFI's list over the ten best "
"romantic comedies of all time.",
"Virginia Cherrill was cast on a whim when Charles Chaplin spotted "
"her at a boxing match."
]
)
| mit | Python | |
714537e1cff4009a5e8ba93da94954b84536127a | Add Teli API | tuxxy/SMIRCH | api.py | api.py | import requests
class Teli:
TOKEN = ""
API = ""
def __init__(self, TOKEN):
self.TOKEN = TOKEN
self.API = "https://sms.teleapi.net/{}/send"
def send_sms(self, src, dest, message):
args = {
'token': self.TOKEN,
'source': src,
'destination': dest,
'message': message
}
return requests.post(self.API.format("sms"), data=args).status_code
def send_mms(self, src, dest, file_name=None, file_data=None, file_url=None):
args = {
'token': self.TOKEN,
'source': src,
'destination': dest,
'file_name': file_name,
'file_data': file_data,
'file_url': file_url
}
return requests.post(self.API.format("mms"), data=args).status_code
| agpl-3.0 | Python | |
8bdf94c29418a3826e5c6fd3a76f96051326bfe6 | Add management command extract votes #126 | MTG/freesound-datasets,MTG/freesound-datasets,MTG/freesound-datasets,MTG/freesound-datasets | datasets/management/commands/extract_votes.py | datasets/management/commands/extract_votes.py | from django.core.management.base import BaseCommand
import json
from datasets.models import CandidateAnnotation, Vote, TaxonomyNode, Dataset
class Command(BaseCommand):
help = 'Extract user votes' \
'Usage: python manage.py extract_votes <dataset_shor_name> <output_file>'
def add_arguments(self, parser):
parser.add_argument('dataset_short_name', type=str)
parser.add_argument('output_file', type=str)
def handle(self, *args, **options):
dataset_short_name = options['dataset_short_name']
output_file = options['output_file']
dataset = Dataset.objects.get(short_name=dataset_short_name)
nodes = TaxonomyNode.objects.all()
votes_dict = {node_id: {'PP': list(),
'PNP': list(),
'NP': list(),
'U': list(),
'candidates': list()} for node_id in nodes.values_list('node_id', flat=True)}
vote_value_to_letter = {1: 'PP', 0.5: 'PNP', -1: 'NP', 0: 'U'}
votes_with_info = Vote.objects.filter(candidate_annotation__sound_dataset__dataset=dataset)\
.values('vote', 'candidate_annotation__taxonomy_node__node_id',
'candidate_annotation__sound_dataset__sound__freesound_id')
candidate_annotations = CandidateAnnotation.objects.filter(sound_dataset__dataset=dataset)\
.values('taxonomy_node__node_id', 'sound_dataset__sound__freesound_id')
for vote in votes_with_info:
votes_dict[vote['candidate_annotation__taxonomy_node__node_id']]\
[vote_value_to_letter[vote['vote']]]\
.append(vote['candidate_annotation__sound_dataset__sound__freesound_id'])
for candidate_annotation in candidate_annotations:
votes_dict[candidate_annotation['taxonomy_node__node_id']]['candidates']\
.append(candidate_annotation['sound_dataset__sound__freesound_id'])
json.dump(votes_dict, open(output_file, 'w'))
| agpl-3.0 | Python | |
1fe2fea7f7f35c61bb63c641042b9bf12f896fca | add module oslo_policy.common.sql | darren-wang/op | oslo_policy/common/sql.py | oslo_policy/common/sql.py | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""SQL backends for the various services.
Before using this module, call initialize(). This has to be done before
CONF() because it sets up configuration options.
"""
import contextlib
from oslo_db.sqlalchemy import models
from oslo_db.sqlalchemy import session as db_session
from oslo_log import log
from oslo_serialization import jsonutils
import six
import sqlalchemy as sql
from sqlalchemy.ext import declarative
from sqlalchemy import types as sql_types
from oslo_policy import exception
LOG = log.getLogger(__name__)
ModelBase = declarative.declarative_base()
# For exporting to other modules
Column = sql.Column
String = sql.String
ForeignKey = sql.ForeignKey
NotFound = sql.orm.exc.NoResultFound
Boolean = sql.Boolean
Text = sql.Text
UniqueConstraint = sql.UniqueConstraint
def initialize(conf):
"""Initialize the module."""
connection="sqlite:///keystone.db"
conf.set_default('policy_conn', connection, group='oslo_policy')
def initialize_decorator(init):
"""Ensure that the length of string field do not exceed the limit.
This decorator check the initialize arguments, to make sure the
length of string field do not exceed the length limit, or raise a
'StringLengthExceeded' exception.
Use decorator instead of inheritance, because the metaclass will
check the __tablename__, primary key columns, etc. at the class
definition.
"""
def initialize(self, *args, **kwargs):
cls = type(self)
for k, v in kwargs.items():
if hasattr(cls, k):
attr = getattr(cls, k)
if isinstance(attr, InstrumentedAttribute):
column = attr.property.columns[0]
if isinstance(column.type, String):
if not isinstance(v, six.text_type):
v = six.text_type(v)
if column.type.length and column.type.length < len(v):
raise exception.StringLengthExceeded(
string=v, type=k, length=column.type.length)
init(self, *args, **kwargs)
return initialize
ModelBase.__init__ = initialize_decorator(ModelBase.__init__)
# Special Fields
class JsonBlob(sql_types.TypeDecorator):
impl = sql.Text
def process_bind_param(self, value, dialect):
return jsonutils.dumps(value)
def process_result_value(self, value, dialect):
return jsonutils.loads(value)
class DictBase(models.ModelBase):
attributes = []
@classmethod
def from_dict(cls, d):
new_d = d.copy()
new_d['extra'] = {k: new_d.pop(k) for k in six.iterkeys(d)
if k not in cls.attributes and k != 'extra'}
return cls(**new_d)
def to_dict(self, include_extra_dict=False):
"""Returns the model's attributes as a dictionary.
If include_extra_dict is True, 'extra' attributes are literally
included in the resulting dictionary twice, for backwards-compatibility
with a broken implementation.
"""
d = self.extra.copy()
for attr in self.__class__.attributes:
d[attr] = getattr(self, attr)
if include_extra_dict:
d['extra'] = self.extra.copy()
return d
def __getitem__(self, key):
if key in self.extra:
return self.extra[key]
return getattr(self, key)
_engine_facade = None
def _get_engine_facade(conf):
global _engine_facade
if not _engine_facade:
_engine_facade = db_session.EngineFacade(conf.oslo_policy.policy_conn)
return _engine_facade
def cleanup():
global _engine_facade
_engine_facade = None
def get_engine(conf):
return _get_engine_facade(conf).get_engine()
def get_session(conf, expire_on_commit=False):
return _get_engine_facade(conf).get_session(expire_on_commit=expire_on_commit)
@contextlib.contextmanager
def transaction(conf, expire_on_commit=False):
"""Return a SQLAlchemy session in a scoped transaction."""
session = get_session(conf, expire_on_commit=expire_on_commit)
with session.begin():
yield session
| apache-2.0 | Python | |
1ede9bd211cd8ea6aac4db6f8818804cb778a022 | Add a view that serves a single static file | chrisseto/dinosaurs.sexy,chrisseto/dinosaurs.sexy | dinosaurs/views.py | dinosaurs/views.py | import os
import tornado.web
import tornado.ioloop
class SingleStatic(tornado.web.StaticFileHandler):
def initialize(self, path):
self.dirname, self.filename = os.path.split(path)
super(SingleStatic, self).initialize(self.dirname)
def get(self, path=None, include_body=True):
super(SingleStatic, self).get(self.filename, include_body)
| mit | Python | |
3f7a03baad15da817e81a8524b87f32c9ca79c1b | Add image service tests | CptSpaceToaster/memegen,joshfriend/memegen,DanLindeman/memegen,DanLindeman/memegen,DanLindeman/memegen,joshfriend/memegen,DanLindeman/memegen,CptSpaceToaster/memegen,joshfriend/memegen,CptSpaceToaster/memegen,joshfriend/memegen | memegen/test/test_services_image.py | memegen/test/test_services_image.py | from unittest.mock import Mock
import pytest
class TestImageService:
def test_find_template(self, image_service):
mock_template = Mock()
image_service.template_store.read.return_value = mock_template
template = image_service.find_template('my_key')
assert image_service.template_store.read.called
assert template is mock_template
def test_find_template_not_found(self, image_service):
image_service.template_store.read.return_value = None
with pytest.raises(KeyError):
image_service.find_template('unknown_key')
| mit | Python | |
33a439d5b52036bb272c8866017b973bef18237d | Create tests.py | bskinn/opan,bskinn/opan | tests.py | tests.py | #...
| mit | Python | |
234897a36cdf5a5cf5b7550f6d176f4168d7a6c7 | add basic test suite | kua-hosi-GRUp/Flask-Bones,Urumasi/Flask-Bones,cburmeister/flask-bones,cburmeister/flask-bones,kua-hosi-GRUp/Flask-Bones,kua-hosi-GRUp/Flask-Bones,Urumasi/Flask-Bones,Urumasi/Flask-Bones,cburmeister/flask-bones | tests.py | tests.py | import os
import app
import unittest
class TestCase(unittest.TestCase):
def setUp(self):
self.
self.app = app.app.test_client()
def tearDown(self):
pass
def test_index(self):
resp = self.app.get('/')
assert 'Hello World!' in resp.data
if __name__ == '__main__':
unittest.main()
| mit | Python | |
7e283316050dd4e33f1f0a7182c13eef18c82039 | Create AmbyByeBye.py | MyRobotLab/pyrobotlab,sstocker46/pyrobotlab,MyRobotLab/pyrobotlab,sstocker46/pyrobotlab,MyRobotLab/pyrobotlab,sstocker46/pyrobotlab,MyRobotLab/pyrobotlab,MyRobotLab/pyrobotlab | home/AdolphSmith/AmbyByeBye.py | home/AdolphSmith/AmbyByeBye.py | arduino = Runtime.createAndStart("arduino","Arduino")
arduino.connect("COM9")
mouth = Runtime.create("mouth","Speech")
s8 = Runtime.createAndStart("s8","Servo")
s9 = Runtime.createAndStart("s9","Servo")
s10 = Runtime.createAndStart("s10","Servo")
s11 = Runtime.createAndStart("s11","Servo")
s13 = Runtime.createAndStart("s13","Servo")
s14 = Runtime.createAndStart("s14","Servo")
# s15 = Runtime.createAndStart("s15","Servo")
s16 = Runtime.createAndStart("s16","Servo")
s17 = Runtime.createAndStart("s17","Servo")
# s34 = Runtime.createAndStart("s43","Servo")
# s35 = Runtime.createAndStart("s35","Servo")
# s30 = Runtime.createAndStart("s30","Servo")
# s31 = Runtime.createAndStart("s31","Servo")
# s36 = Runtime.createAndStart("s36","Servo")
# s37 = Runtime.createAndStart("s37","Servo")
# s38 = Runtime.createAndStart("s38","Servo")
# s39 = Runtime.createAndStart("s39","Servo")
# s42 = Runtime.createAndStart("s42","Servo")
# s43 = Runtime.createAndStart("s43","Servo")
# s26 = Runtime.createAndStart("s26","Servo")
s8.setRest(12) # Left Elbow
s9.setRest(75) # left arm Turn
s10.setRest(103) # left Shaulder up
s11.setRest(123) # Left Omniplate
s13.setRest(81) # Neck Turn
s14.setRest(93) # Right Arm Up
# s15.setRest(63) # Right Omniplate
s16.setRest(79) # Right Arm Turn
s17.setRest(138) # Right Elbow
# s34.setRest(66) # Left Hip
# s35.setRest(126) # Right Hip
# s30.setRest(97) # Left Leg Turn
# s31.setRest(120) # Right Leg Turn
# s36.setRest(54) # Left Leg Up
# s37.setRest(46) # Right Leg up
# s38.setRest(75) # Knee Left
# s39.setRest(75) # Knee Right
# s42.setRest(132) # Ankle Left
# s43.setRest(89) # Ankle Right
# s26.setRest(71) # Mouth
s8.attach("arduino",8)
s9.attach("arduino",9)
s10.attach("arduino",10)
s11.attach("arduino",11)
s13.attach("arduino",13)
s17.attach("ardiuno",17)
s16.attach("ardiuno",16)
s14.attach("ardiuno",14)
def rest():
s8.rest() # right shoulder
s9.rest() # right arm up
s10.rest() # right elbow
s13.rest() # head\
s11.rest()
s17.rest()
s16.rest()
s14.rest()
def wave():
s8.moveTo(9)
s9.moveTo(80)
s8.moveTo(7)
s8.moveTo(105)
s9.moveTo(157)
s10.moveTo(6)
s8.moveTo(149)
sleep(0.4)
s8.moveTo(60)
sleep(0.4)
s8.moveTo(149)
sleep(0.4)
s8.moveTo(60)
sleep(0.4)
s8.moveTo(135)
s10.moveTo(102)
s9.moveTo(68)
s8.moveTo(1)
def bye():
s17.moveTo(139)
s16.moveTo(81)
s17.moveTo(53)
s16.moveTo(157)
s14.moveTo(0)
s17.moveTo(5)
s17.moveTo(74)
sleep(0.4)
s17.moveTo(1)
sleep(0.4)
s17.moveTo(74)
sleep(0.4)
s17.moveTo(1)
sleep(0.4)
s17.moveTo(32)
s14.moveTo(98)
s16.moveTo(83)
s17.moveTo(136)
def attach():
s8.attach() # right shoulder
s9.attach() # right arm up
s10.attach() # Left elbow
s11.attach() # Left Omniplate
s13.attach() # head
s14.attach()
s16.attach()
s17.attach()
def detach():
s8.detach() # Left shoulder
s9.detach() # Left arm up
s10.detach() # Left elbow
s11.detach() # Left Omniplate
s13.detach() # Neck Turn
s14.detach()
s16.detach()
s17.detach()
for x in range(0, 3):
attach()
# do a gesture
rest()
mouth.speakBlocking("I want to show you what i can do")
mouth.speakBlocking("I am the new Amby bot made by Adolph")
mouth.speakBlocking("Hello every one")
wave()
sleep(1)
rest()
sleep(1)
# wait for a second
sleep(1)
# do another gesture
mouth.speakBlocking("I think you are going to like me")
s13.moveTo(36) # Neck Turn
mouth.speakBlocking("I am alive")
sleep(0.5)
s13.moveTo(109) # Neck Turn
sleep(0.5)
mouth.speakBlocking("I am alive thrue mirobotlab")
s13.moveTo(62) # Neck Turn
sleep(0.5)
mouth.speakBlocking("I like you all")
s13.moveTo(36) # Neck Turn
sleep(0.2)
s13.moveTo(109) # Neck Turn
sleep(0.2)
s13.rest()
mouth.speakBlocking("You all are nice")
mouth.speakBlocking("Bye Bye Guys")
wave()
sleep(1)
rest()
sleep(1)
| apache-2.0 | Python | |
4d3ed1ff13cde88abe695c724d7c8946578cde21 | Add py-docopt package (#8236) | LLNL/spack,mfherbst/spack,matthiasdiener/spack,iulian787/spack,krafczyk/spack,tmerrick1/spack,tmerrick1/spack,LLNL/spack,mfherbst/spack,iulian787/spack,iulian787/spack,tmerrick1/spack,iulian787/spack,matthiasdiener/spack,LLNL/spack,matthiasdiener/spack,mfherbst/spack,iulian787/spack,mfherbst/spack,krafczyk/spack,LLNL/spack,krafczyk/spack,mfherbst/spack,krafczyk/spack,krafczyk/spack,matthiasdiener/spack,tmerrick1/spack,LLNL/spack,tmerrick1/spack,matthiasdiener/spack | var/spack/repos/builtin/packages/py-docopt/package.py | var/spack/repos/builtin/packages/py-docopt/package.py | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyDocopt(PythonPackage):
"""Command-line interface description language."""
homepage = "http://docopt.org/"
url = "https://pypi.io/packages/source/d/docopt/docopt-0.6.2.tar.gz"
import_modules = ['docopt']
version('0.6.2', '4bc74561b37fad5d3e7d037f82a4c3b1')
depends_on('py-setuptools', type='build')
| lgpl-2.1 | Python | |
889b6254526b5b49cd27d2f7bf7603a60f4f64fe | Add py-geeadd package (#12366) | iulian787/spack,iulian787/spack,LLNL/spack,LLNL/spack,LLNL/spack,iulian787/spack,iulian787/spack,iulian787/spack,LLNL/spack,LLNL/spack | var/spack/repos/builtin/packages/py-geeadd/package.py | var/spack/repos/builtin/packages/py-geeadd/package.py | # Copyright 2013-2019 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyGeeadd(PythonPackage):
"""Google Earth Engine Batch Assets Manager with Addons."""
homepage = "https://github.com/samapriya/gee_asset_manager_addon"
url = "https://pypi.io/packages/source/g/geeadd/geeadd-0.3.0.tar.gz"
version('0.3.0', sha256='591e6ff2847122598ed5b0452a892a76e332ce227d4ba75e4d03eca2c7a4beea')
depends_on('py-setuptools', type='build')
depends_on('py-earthengine-api@0.1.87:', type=('build', 'run'))
depends_on('py-requests@2.10.0:', type=('build', 'run'))
depends_on('py-poster@0.8.1:', type=('build', 'run'))
depends_on('py-retrying@1.3.3:', type=('build', 'run'))
depends_on('py-clipboard@0.0.4:', type=('build', 'run'))
depends_on('py-beautifulsoup4@4.5.1:', type=('build', 'run'))
depends_on('py-requests-toolbelt@0.7.0:', type=('build', 'run'))
depends_on('py-pytest@3.0.0:', type=('build', 'test'))
depends_on('py-future@0.16.0:', type=('build', 'run'))
depends_on('py-google-cloud-storage@1.1.1:', type=('build', 'run'))
depends_on('py-oauth2client@4.1.3:', type=('build', 'run'))
| lgpl-2.1 | Python | |
06e7dd815a77739089b2ad0aed5cb9f01a194967 | Add script to normalize image using Ops | bic-kn/imagej-scripts | Normalize_Image.py | Normalize_Image.py | # @Dataset data
# @OpService ops
# @OUTPUT Img normalized
# Create normalized image to the [0, 1] range.
#
# Stefan Helfrich (University of Konstanz), 03/10/2016
from net.imglib2.type.numeric.real import FloatType
from net.imglib2.type.numeric.integer import ByteType
from net.imagej.ops import Ops
normalized = ops.create().imgPlus(data.getImgPlus(), data.getImgPlus());
normalized.setName("normalized");
normalizeOp = ops.op(Ops.Image.Normalize, normalized, data.getImgPlus(), None, None, FloatType(0.0), FloatType(1.0));
ops.slicewise(normalized, data.getImgPlus(), normalizeOp, [0,1], False);
| bsd-2-clause | Python | |
b4fdb95ef8a88cfd2d283698ac005ce8d9ec3468 | Create fetch-wms-urls.py | VirtualWatershed/vw-py,VirtualWatershed/vw-py,tri-state-epscor/wcwave_adaptors,tri-state-epscor/wcwave_adaptors | scripts/fetch-wms-urls.py | scripts/fetch-wms-urls.py | #!/usr/bin/python
import requests
import json
url = "http://129.24.196.43/apps/my_app/search/datasets.json?version=3&model_run_uuid=20f303cd-624d-413d-b485-6113319003d4&model_set=outputs&model_set_type=vis"
r = requests.get(url)
data = json.loads(r.text)
for i in data["results"]:
full = i["services"][0]["wms"]
print full
| bsd-2-clause | Python | |
6d4efa0bd1199bbe900a8913b829ca7201dde6ab | Add migration to add new Juniper SASS vars to sites | appsembler/edx-platform,appsembler/edx-platform,appsembler/edx-platform,appsembler/edx-platform | openedx/core/djangoapps/appsembler/sites/migrations/0003_add_juniper_new_sass_vars.py | openedx/core/djangoapps/appsembler/sites/migrations/0003_add_juniper_new_sass_vars.py | # -*- coding: utf-8 -*-
import json
from django.db import migrations, models
def add_juniper_new_sass_vars(apps, schema_editor):
"""
This migration adds all the new SASS variabled added during the initial
pass of the Tahoe Juniper release upgrade.
"""
new_sass_var_keys = {
"$base-container-width": "calcRem(1200)",
"$base-learning-container-width": "calcRem(1000)",
"$courseware-content-container-side-padding": "calcRem(100)",
"$courseware-content-container-sidebar-width": "calcRem(240)",
"$courseware-content-container-width": "$base-learning-container-width",
"$site-nav-width": "$base-container-width",
"$inline-link-color": "$brand-primary-color",
"$light-border-color": "#dedede",
"$font-size-base-courseware": "calcRem(18)",
"$line-height-base-courseware": "200%",
"$in-app-container-border-radius": "calcRem(15)",
"$login-register-container-width": "calcRem(480)",
}
SiteConfiguration = apps.get_model('site_configuration', 'SiteConfiguration')
sites = SiteConfiguration.objects.all()
for site in sites:
for sass_var, sass_value in new_sass_var_keys.items():
exists = False
for key, val in site.sass_variables:
if key == sass_var:
exists = True
break
if not exists:
site.sass_variables.append([sass_var, [sass_value, sass_value]])
site.save()
class Migration(migrations.Migration):
dependencies = [
('appsembler_sites', '0001_initial'),
('appsembler_sites', '0002_add_hide_linked_accounts_sass_var'),
('site_configuration', '0004_auto_20161120_2325'),
]
operations = [
migrations.RunPython(add_juniper_new_sass_vars),
]
| agpl-3.0 | Python | |
46351669c279764e1b070943366d7c0ea84a243a | Build pipeline directly in build/action_maketokenizer.py. Review URL: http://codereview.chromium.org/67086 | robclark/chromium,hgl888/chromium-crosswalk,dednal/chromium.src,bright-sparks/chromium-spacewalk,TheTypoMaster/chromium-crosswalk,axinging/chromium-crosswalk,markYoungH/chromium.src,Fireblend/chromium-crosswalk,Jonekee/chromium.src,hujiajie/pa-chromium,mohamed--abdel-maksoud/chromium.src,mohamed--abdel-maksoud/chromium.src,mogoweb/chromium-crosswalk,ondra-novak/chromium.src,rogerwang/chromium,Fireblend/chromium-crosswalk,Chilledheart/chromium,zcbenz/cefode-chromium,timopulkkinen/BubbleFish,ondra-novak/chromium.src,robclark/chromium,ltilve/chromium,dednal/chromium.src,ChromiumWebApps/chromium,fujunwei/chromium-crosswalk,Jonekee/chromium.src,M4sse/chromium.src,Just-D/chromium-1,rogerwang/chromium,keishi/chromium,zcbenz/cefode-chromium,ChromiumWebApps/chromium,jaruba/chromium.src,robclark/chromium,hgl888/chromium-crosswalk-efl,pozdnyakov/chromium-crosswalk,anirudhSK/chromium,Pluto-tv/chromium-crosswalk,Just-D/chromium-1,littlstar/chromium.src,keishi/chromium,pozdnyakov/chromium-crosswalk,axinging/chromium-crosswalk,Jonekee/chromium.src,pozdnyakov/chromium-crosswalk,ChromiumWebApps/chromium,pozdnyakov/chromium-crosswalk,krieger-od/nwjs_chromium.src,ChromiumWebApps/chromium,M4sse/chromium.src,pozdnyakov/chromium-crosswalk,dushu1203/chromium.src,patrickm/chromium.src,timopulkkinen/BubbleFish,anirudhSK/chromium,hgl888/chromium-crosswalk,mogoweb/chromium-crosswalk,keishi/chromium,chuan9/chromium-crosswalk,Jonekee/chromium.src,anirudhSK/chromium,Pluto-tv/chromium-crosswalk,fujunwei/chromium-crosswalk,timopulkkinen/BubbleFish,mohamed--abdel-maksoud/chromium.src,M4sse/chromium.src,junmin-zhu/chromium-rivertrail,zcbenz/cefode-chromium,jaruba/chromium.src,Fireblend/chromium-crosswalk,dednal/chromium.src,crosswalk-project/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,junmin-zhu/chromium-rivertrail,dushu1203/chromium.src,anirudhSK/chromium,mogoweb/chromium-crosswalk,Chilledheart/chromium,bright-sparks/chromium-spacewalk,ltilve/chromium,hujiajie/pa-chromium,bright-sparks/chromium-spacewalk,dushu1203/chromium.src,jaruba/chromium.src,mohamed--abdel-maksoud/chromium.src,nacl-webkit/chrome_deps,hgl888/chromium-crosswalk-efl,zcbenz/cefode-chromium,anirudhSK/chromium,ondra-novak/chromium.src,Chilledheart/chromium,Jonekee/chromium.src,hgl888/chromium-crosswalk,Jonekee/chromium.src,littlstar/chromium.src,Jonekee/chromium.src,littlstar/chromium.src,timopulkkinen/BubbleFish,patrickm/chromium.src,Chilledheart/chromium,junmin-zhu/chromium-rivertrail,rogerwang/chromium,Pluto-tv/chromium-crosswalk,hgl888/chromium-crosswalk-efl,ltilve/chromium,ondra-novak/chromium.src,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk-efl,dushu1203/chromium.src,axinging/chromium-crosswalk,jaruba/chromium.src,rogerwang/chromium,ltilve/chromium,patrickm/chromium.src,bright-sparks/chromium-spacewalk,junmin-zhu/chromium-rivertrail,chuan9/chromium-crosswalk,bright-sparks/chromium-spacewalk,crosswalk-project/chromium-crosswalk-efl,axinging/chromium-crosswalk,anirudhSK/chromium,markYoungH/chromium.src,hgl888/chromium-crosswalk,dednal/chromium.src,chuan9/chromium-crosswalk,keishi/chromium,zcbenz/cefode-chromium,keishi/chromium,dednal/chromium.src,zcbenz/cefode-chromium,axinging/chromium-crosswalk,markYoungH/chromium.src,robclark/chromium,nacl-webkit/chrome_deps,nacl-webkit/chrome_deps,TheTypoMaster/chromium-crosswalk,hgl888/chromium-crosswalk,timopulkkinen/BubbleFish,PeterWangIntel/chromium-crosswalk,Just-D/chromium-1,keishi/chromium,krieger-od/nwjs_chromium.src,pozdnyakov/chromium-crosswalk,hujiajie/pa-chromium,robclark/chromium,rogerwang/chromium,robclark/chromium,ltilve/chromium,anirudhSK/chromium,nacl-webkit/chrome_deps,nacl-webkit/chrome_deps,rogerwang/chromium,junmin-zhu/chromium-rivertrail,mohamed--abdel-maksoud/chromium.src,pozdnyakov/chromium-crosswalk,bright-sparks/chromium-spacewalk,Pluto-tv/chromium-crosswalk,junmin-zhu/chromium-rivertrail,bright-sparks/chromium-spacewalk,Chilledheart/chromium,ChromiumWebApps/chromium,fujunwei/chromium-crosswalk,dednal/chromium.src,chuan9/chromium-crosswalk,jaruba/chromium.src,fujunwei/chromium-crosswalk,axinging/chromium-crosswalk,robclark/chromium,hgl888/chromium-crosswalk-efl,PeterWangIntel/chromium-crosswalk,keishi/chromium,zcbenz/cefode-chromium,mogoweb/chromium-crosswalk,hgl888/chromium-crosswalk-efl,M4sse/chromium.src,patrickm/chromium.src,krieger-od/nwjs_chromium.src,jaruba/chromium.src,axinging/chromium-crosswalk,Just-D/chromium-1,M4sse/chromium.src,jaruba/chromium.src,markYoungH/chromium.src,zcbenz/cefode-chromium,Pluto-tv/chromium-crosswalk,patrickm/chromium.src,hgl888/chromium-crosswalk-efl,M4sse/chromium.src,zcbenz/cefode-chromium,M4sse/chromium.src,hgl888/chromium-crosswalk-efl,Jonekee/chromium.src,Chilledheart/chromium,littlstar/chromium.src,M4sse/chromium.src,Jonekee/chromium.src,jaruba/chromium.src,PeterWangIntel/chromium-crosswalk,keishi/chromium,fujunwei/chromium-crosswalk,nacl-webkit/chrome_deps,ChromiumWebApps/chromium,TheTypoMaster/chromium-crosswalk,dushu1203/chromium.src,patrickm/chromium.src,junmin-zhu/chromium-rivertrail,ChromiumWebApps/chromium,ltilve/chromium,littlstar/chromium.src,timopulkkinen/BubbleFish,hujiajie/pa-chromium,Fireblend/chromium-crosswalk,Just-D/chromium-1,crosswalk-project/chromium-crosswalk-efl,axinging/chromium-crosswalk,Fireblend/chromium-crosswalk,hujiajie/pa-chromium,TheTypoMaster/chromium-crosswalk,Just-D/chromium-1,M4sse/chromium.src,ltilve/chromium,timopulkkinen/BubbleFish,dushu1203/chromium.src,robclark/chromium,pozdnyakov/chromium-crosswalk,krieger-od/nwjs_chromium.src,keishi/chromium,zcbenz/cefode-chromium,nacl-webkit/chrome_deps,rogerwang/chromium,keishi/chromium,bright-sparks/chromium-spacewalk,hgl888/chromium-crosswalk,chuan9/chromium-crosswalk,hgl888/chromium-crosswalk,markYoungH/chromium.src,patrickm/chromium.src,fujunwei/chromium-crosswalk,dednal/chromium.src,Jonekee/chromium.src,jaruba/chromium.src,ondra-novak/chromium.src,chuan9/chromium-crosswalk,fujunwei/chromium-crosswalk,dushu1203/chromium.src,crosswalk-project/chromium-crosswalk-efl,TheTypoMaster/chromium-crosswalk,junmin-zhu/chromium-rivertrail,patrickm/chromium.src,anirudhSK/chromium,ondra-novak/chromium.src,ChromiumWebApps/chromium,dushu1203/chromium.src,dednal/chromium.src,M4sse/chromium.src,Just-D/chromium-1,timopulkkinen/BubbleFish,hgl888/chromium-crosswalk-efl,Chilledheart/chromium,ChromiumWebApps/chromium,krieger-od/nwjs_chromium.src,dednal/chromium.src,Chilledheart/chromium,krieger-od/nwjs_chromium.src,dushu1203/chromium.src,chuan9/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,littlstar/chromium.src,TheTypoMaster/chromium-crosswalk,TheTypoMaster/chromium-crosswalk,dushu1203/chromium.src,pozdnyakov/chromium-crosswalk,mogoweb/chromium-crosswalk,markYoungH/chromium.src,Just-D/chromium-1,anirudhSK/chromium,axinging/chromium-crosswalk,robclark/chromium,ChromiumWebApps/chromium,mohamed--abdel-maksoud/chromium.src,nacl-webkit/chrome_deps,dednal/chromium.src,PeterWangIntel/chromium-crosswalk,ondra-novak/chromium.src,hujiajie/pa-chromium,hujiajie/pa-chromium,pozdnyakov/chromium-crosswalk,markYoungH/chromium.src,mogoweb/chromium-crosswalk,hgl888/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,Fireblend/chromium-crosswalk,Pluto-tv/chromium-crosswalk,Fireblend/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,dednal/chromium.src,krieger-od/nwjs_chromium.src,hgl888/chromium-crosswalk-efl,ondra-novak/chromium.src,chuan9/chromium-crosswalk,littlstar/chromium.src,rogerwang/chromium,fujunwei/chromium-crosswalk,PeterWangIntel/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,anirudhSK/chromium,rogerwang/chromium,Just-D/chromium-1,Pluto-tv/chromium-crosswalk,timopulkkinen/BubbleFish,nacl-webkit/chrome_deps,PeterWangIntel/chromium-crosswalk,crosswalk-project/chromium-crosswalk-efl,robclark/chromium,dushu1203/chromium.src,axinging/chromium-crosswalk,markYoungH/chromium.src,ChromiumWebApps/chromium,mogoweb/chromium-crosswalk,junmin-zhu/chromium-rivertrail,PeterWangIntel/chromium-crosswalk,hujiajie/pa-chromium,TheTypoMaster/chromium-crosswalk,zcbenz/cefode-chromium,mogoweb/chromium-crosswalk,markYoungH/chromium.src,PeterWangIntel/chromium-crosswalk,junmin-zhu/chromium-rivertrail,bright-sparks/chromium-spacewalk,ondra-novak/chromium.src,mohamed--abdel-maksoud/chromium.src,markYoungH/chromium.src,anirudhSK/chromium,mohamed--abdel-maksoud/chromium.src,Chilledheart/chromium,pozdnyakov/chromium-crosswalk,mohamed--abdel-maksoud/chromium.src,mohamed--abdel-maksoud/chromium.src,Pluto-tv/chromium-crosswalk,hujiajie/pa-chromium,hgl888/chromium-crosswalk,nacl-webkit/chrome_deps,krieger-od/nwjs_chromium.src,nacl-webkit/chrome_deps,krieger-od/nwjs_chromium.src,hujiajie/pa-chromium,ChromiumWebApps/chromium,M4sse/chromium.src,ltilve/chromium,jaruba/chromium.src,TheTypoMaster/chromium-crosswalk,keishi/chromium,jaruba/chromium.src,mogoweb/chromium-crosswalk,ltilve/chromium,Fireblend/chromium-crosswalk,rogerwang/chromium,chuan9/chromium-crosswalk,axinging/chromium-crosswalk,timopulkkinen/BubbleFish,patrickm/chromium.src,crosswalk-project/chromium-crosswalk-efl,mogoweb/chromium-crosswalk,littlstar/chromium.src,krieger-od/nwjs_chromium.src,Pluto-tv/chromium-crosswalk,anirudhSK/chromium,Fireblend/chromium-crosswalk,Jonekee/chromium.src,junmin-zhu/chromium-rivertrail,fujunwei/chromium-crosswalk,hujiajie/pa-chromium,markYoungH/chromium.src,timopulkkinen/BubbleFish | webkit/build/action_maketokenizer.py | webkit/build/action_maketokenizer.py | #!/usr/bin/python
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# usage: action_maketokenizer.py OUTPUTS -- INPUTS
#
# Multiple INPUTS may be listed. The sections are separated by -- arguments.
#
# OUTPUTS must contain a single item: a path to tokenizer.cpp.
#
# INPUTS must contain exactly two items. The first item must be the path to
# maketokenizer. The second item must be the path to tokenizer.flex.
import os
import subprocess
import sys
def SplitArgsIntoSections(args):
sections = []
while len(args) > 0:
if not '--' in args:
# If there is no '--' left, everything remaining is an entire section.
dashes = len(args)
else:
dashes = args.index('--')
sections.append(args[:dashes])
# Next time through the loop, look at everything after this '--'.
if dashes + 1 == len(args):
# If the '--' is at the end of the list, we won't come back through the
# loop again. Add an empty section now corresponding to the nothingness
# following the final '--'.
args = []
sections.append(args)
else:
args = args[dashes + 1:]
return sections
def main(args):
sections = SplitArgsIntoSections(args[1:])
assert len(sections) == 2
(outputs, inputs) = sections
assert len(outputs) == 1
output = outputs[0]
assert len(inputs) == 2
maketokenizer = inputs[0]
flex_input = inputs[1]
# Do it. check_call is new in 2.5, so simulate its behavior with call and
# assert.
outfile = open(output, 'wb')
p1 = subprocess.Popen(['flex', '-t', flex_input], stdout=subprocess.PIPE)
p2 = subprocess.Popen(['perl', maketokenizer], stdin=p1.stdout,
stdout=outfile)
r1 = p1.wait()
r2 = p2.wait()
assert r1 == 0
assert r2 == 0
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| #!/usr/bin/python
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# usage: action_maketokenizer.py OUTPUTS -- INPUTS
#
# Multiple INPUTS may be listed. The sections are separated by -- arguments.
#
# OUTPUTS must contain a single item: a path to tokenizer.cpp.
#
# INPUTS must contain exactly two items. The first item must be the path to
# maketokenizer. The second item must be the path to tokenizer.flex.
import os
import subprocess
import sys
def SplitArgsIntoSections(args):
sections = []
while len(args) > 0:
if not '--' in args:
# If there is no '--' left, everything remaining is an entire section.
dashes = len(args)
else:
dashes = args.index('--')
sections.append(args[:dashes])
# Next time through the loop, look at everything after this '--'.
if dashes + 1 == len(args):
# If the '--' is at the end of the list, we won't come back through the
# loop again. Add an empty section now corresponding to the nothingness
# following the final '--'.
args = []
sections.append(args)
else:
args = args[dashes + 1:]
return sections
def main(args):
sections = SplitArgsIntoSections(args[1:])
assert len(sections) == 2
(outputs, inputs) = sections
assert len(outputs) == 1
output = outputs[0]
assert len(inputs) == 2
maketokenizer = inputs[0]
flex_input = inputs[1]
# Build up the command.
command = 'flex -t %s | perl %s > %s' % (flex_input, maketokenizer, output)
# Do it. check_call is new in 2.5, so simulate its behavior with call and
# assert.
# TODO(mark): Don't use shell=True, build up the pipeline directly.
p = subprocess.Popen(command, shell=True)
return_code = p.wait()
assert return_code == 0
return return_code
if __name__ == '__main__':
sys.exit(main(sys.argv))
| bsd-3-clause | Python |
163c214f8d714e3f1dc08324f9d48a34f813d9fe | Add agency creation script. | sunlightlabs/regulations-scraper,sunlightlabs/regulations-scraper,sunlightlabs/regulations-scraper | regscrape/regscrape_lib/commands/create_agencies.py | regscrape/regscrape_lib/commands/create_agencies.py | def run():
from regscrape_lib.util import get_db
from regscrape_lib.search import get_agencies
from pymongo.errors import DuplicateKeyError
db = get_db()
new = 0
print 'Fetching agencies...'
agencies = get_agencies()
print 'Saving agencies...'
stop_words = ['the', 'and', 'of', 'on', 'in', 'for']
for agency in agencies:
name_parts = agency.name.split(' ')
capitalized_parts = [name_parts[0].title()] + [word.title() if word.lower() not in stop_words else word.lower() for word in name_parts[1:]]
name = ' '.join(capitalized_parts)
record = {
'_id': agency.abbr,
'name': name
}
result = db.agencies.update(
{
'_id': record['_id']
},
{
'$set': {'name': record['name']}
},
upsert=True,
safe=True
)
new += 1 if 'updatedExisting' in result and not result['updatedExisting'] else 0
print 'Iterated over %s agencies, of which %s were new.' % (len(agencies), new)
return {'total': len(agencies), 'new': new}
| bsd-3-clause | Python | |
392125f2b3fae38b4f4d32877ad2abaa60ea6ffd | Add pony/orm/examples/demo.py | ponyorm/pony,gwecho/pony,gwecho/pony,gwecho/pony,ponyorm/pony,ponyorm/pony,ponyorm/pony | pony/orm/examples/demo.py | pony/orm/examples/demo.py | from decimal import Decimal
from pony.orm import *
db = Database("sqlite", "demo.sqlite", create_db=True)
class Customer(db.Entity):
id = PrimaryKey(int, auto=True)
name = Required(unicode)
email = Required(unicode, unique=True)
orders = Set("Order")
class Order(db.Entity):
id = PrimaryKey(int, auto=True)
total_price = Required(Decimal)
customer = Required(Customer)
items = Set("OrderItem")
class Product(db.Entity):
id = PrimaryKey(int, auto=True)
name = Required(unicode)
price = Required(Decimal)
items = Set("OrderItem")
class OrderItem(db.Entity):
quantity = Required(int, default=1)
order = Required(Order)
product = Required(Product)
PrimaryKey(order, product)
sql_debug(True)
db.generate_mapping(create_tables=True)
# db.generate_mapping(check_tables=True)
def populate_database():
c1 = Customer(name='John Smith', email='john@example.com')
c2 = Customer(name='Matthew Reed', email='matthew@example.com')
c3 = Customer(name='Chuan Qin', email='chuanqin@example.com')
c4 = Customer(name='Rebecca Lawson', email='rebecca@example.com')
c5 = Customer(name='Oliver Blakey', email='oliver@example.com')
p1 = Product(name='Kindle Fire HD', price=Decimal('284.00'))
p2 = Product(name='Apple iPad with Retina Display', price=Decimal('478.50'))
p3 = Product(name='SanDisk Cruzer 16 GB USB Flash Drive', price=Decimal('9.99'))
p4 = Product(name='Kingston DataTraveler 16GB USB 2.0', price=Decimal('9.98'))
p5 = Product(name='Samsung 840 Series 120GB SATA III SSD', price=Decimal('98.95'))
p6 = Product(name='Crucial m4 256GB SSD SATA 6Gb/s', price=Decimal('188.67'))
o1 = Order(customer=c1, total_price=Decimal('292.00'))
OrderItem(order=o1, product=p1)
OrderItem(order=o1, product=p4, quantity=2)
o2 = Order(customer=c1, total_price=Decimal('478.50'))
OrderItem(order=o2, product=p2)
o3 = Order(customer=c2, total_price=Decimal('680.50'))
OrderItem(order=o3, product=p2)
OrderItem(order=o3, product=p4, quantity=2)
OrderItem(order=o3, product=p6)
o4 = Order(customer=c3, total_price=Decimal('99.80'))
OrderItem(order=o4, product=p4, quantity=10)
o5 = Order(customer=c4, total_price=Decimal('722.00'))
OrderItem(order=o5, product=p1)
OrderItem(order=o5, product=p2)
commit()
| apache-2.0 | Python | |
d5ed0cf979fa393d45e2f719d3096618e0f723aa | Add utils.py file for util functions | sonph/gdaxcli,sonph/gdaxcli | utils.py | utils.py | """Utilities."""
import logging
def configure_logging(to_stderr=True, to_file=True, file_name='main.log'):
"""Configure logging destinations."""
root_logger = logging.getLogger()
root_logger.setLevel(logging.INFO)
format_str = '%(asctime)s - %(levelname)s - %(message)s'
formatter = logging.Formatter(format_str)
if to_stderr:
stderr_handler = logging.StreamHandler()
stderr_handler.setFormatter(formatter)
root_logger.addHandler(stderr_handler)
if to_file:
file_handler = logging.FileHandler(file_name)
file_handler.setFormatter(formatter)
root_logger.addHandler(file_handler)
| mit | Python | |
f2b97f029e61bd70b9f4ef5d79c875132907e45e | add missing file. | alex/gunicorn,GitHublong/gunicorn,wong2/gunicorn,prezi/gunicorn,ccl0326/gunicorn,malept/gunicorn,zhoucen/gunicorn,ammaraskar/gunicorn,beni55/gunicorn,jamesblunt/gunicorn,gtrdotmcs/gunicorn,ccl0326/gunicorn,alex/gunicorn,WSDC-NITWarangal/gunicorn,urbaniak/gunicorn,zhoucen/gunicorn,urbaniak/gunicorn,urbaniak/gunicorn,gtrdotmcs/gunicorn,malept/gunicorn,keakon/gunicorn,mvaled/gunicorn,1stvamp/gunicorn,wong2/gunicorn,tempbottle/gunicorn,ccl0326/gunicorn,jamesblunt/gunicorn,1stvamp/gunicorn,ephes/gunicorn,1stvamp/gunicorn,mvaled/gunicorn,tejasmanohar/gunicorn,jamesblunt/gunicorn,wong2/gunicorn,z-fork/gunicorn,MrKiven/gunicorn,gtrdotmcs/gunicorn,zhoucen/gunicorn,malept/gunicorn,prezi/gunicorn,mvaled/gunicorn,harrisonfeng/gunicorn,elelianghh/gunicorn,prezi/gunicorn,alex/gunicorn | gunicorn/monkey.py | gunicorn/monkey.py | # -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
def patch_django():
""" monkey patch django.
This patch make sure that we use real threads to get the ident which
is going to happen if we are using gevent or eventlet.
"""
try:
from django.db import DEFAULT_DB_ALIAS
from django.db.backends import BaseDatabaseWrapper, DatabaseError
if "validate_thread_sharing" in BaseDatabaseWrapper.__dict__:
import thread
_get_ident = thread.get_ident
def _init(self, settings_dict, alias=DEFAULT_DB_ALIAS,
allow_thread_sharing=False):
self.connection = None
self.queries = []
self.settings_dict = settings_dict
self.alias = alias
self.use_debug_cursor = None
# Transaction related attributes
self.transaction_state = []
self.savepoint_state = 0
self._dirty = None
self._thread_ident = _get_ident()
self.allow_thread_sharing = allow_thread_sharing
def _validate_thread_sharing(self):
if (not self.allow_thread_sharing
and self._thread_ident != _get_ident()):
raise DatabaseError("DatabaseWrapper objects created in a "
"thread can only be used in that same thread. The object "
"with alias '%s' was created in thread id %s and this is "
"thread id %s."
% (self.alias, self._thread_ident, _get_ident()))
BaseDatabaseWrapper.__init__ = _init
BaseDatabaseWrapper.validate_thread_sharing = _validate_thread_sharing
except ImportError, e:
patch_django_db_backends = None
| mit | Python | |
b0f8c27325c9b4cbc5cd5bc83ece6f3d7569f7da | Add gzip stream | voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts | gzipinputstream.py | gzipinputstream.py | import zlib
import string
BLOCK_SIZE = 16384
"""Read block size"""
WINDOW_BUFFER_SIZE = 16 + zlib.MAX_WBITS
"""zlib window buffer size, set to gzip's format"""
class GzipInputStream(object):
"""
Simple class that allow streaming reads from GZip files.
Python 2.x gzip.GZipFile relies on .seek() and .tell(), so it
doesn't support this (@see: http://bo4.me/YKWSsL).
Adapted from: http://effbot.org/librarybook/zlib-example-4.py
"""
def __init__(self, fileobj):
"""
Initialize with the given file-like object.
@param fileobj: file-like object,
"""
self._file = fileobj
self._zip = zlib.decompressobj(WINDOW_BUFFER_SIZE)
self._offset = 0 # position in unzipped stream
self._data = ""
def __fill(self, num_bytes):
"""
Fill the internal buffer with 'num_bytes' of data.
@param num_bytes: int, number of bytes to read in (0 = everything)
"""
if not self._zip:
return
while not num_bytes or len(self._data) < num_bytes:
data = self._file.read(BLOCK_SIZE)
if not data:
self._data = self._data + self._zip.flush()
self._zip = None # no more data
break
self._data = self._data + self._zip.decompress(data)
def __iter__(self):
return self
def seek(self, offset, whence=0):
if whence == 0:
position = offset
elif whence == 1:
position = self._offset + offset
else:
raise IOError("Illegal argument")
if position < self._offset:
raise IOError("Cannot seek backwards")
# skip forward, in blocks
while position > self._offset:
if not self.read(min(position - self._offset, BLOCK_SIZE)):
break
def tell(self):
return self._offset
def read(self, size=0):
self.__fill(size)
if size:
data = self._data[:size]
self._data = self._data[size:]
else:
data = self._data
self._data = ""
self._offset = self._offset + len(data)
return data
def next(self):
line = self.readline()
if not line:
raise StopIteration()
return line
def readline(self):
# make sure we have an entire line
while self._zip and "\n" not in self._data:
self.__fill(len(self._data) + 512)
pos = string.find(self._data, "\n") + 1
if pos <= 0:
return self.read()
return self.read(pos)
def readlines(self):
lines = []
while True:
line = self.readline()
if not line:
break
lines.append(line)
return lines
| mit | Python | |
95e5b80117b090ae0458df18e062bad50b0c0b5a | add module init file | Berling/project-zombye-exporter | io_exporter_zombye/__init__.py | io_exporter_zombye/__init__.py | # The MIT License (MIT)
#
# Copyright (c) 2015 Georg Schäfer
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
bl_info = {
"name" : "Zombye Model Exporter",
"author" : "Georg Schäfer",
"version" : (0, 1),
"blender" : (2, 7, 5),
"location" : "File > Export > Zombye Model (.zmdl)",
"description" : "The script exports meshes, armatures and animations to the Zombye Model format (zmdl)",
"category" : "Import-Export"
}
| mit | Python | |
19df1ece66f815d0aaaae5e7273117b2da9541ac | Create mpu9250_i2c_modi.py | mikechan0731/RaspBerryPi_MPU9250_data_read,mikechan0731/GY-91_and_PiCamera_RaspberryPi | mpu9250_i2c_modi.py | mpu9250_i2c_modi.py | import smbus
import time,timeit
#import RPi.GPIO as GPIO
# Global varible
i2c = smbus.SMBus(1)
addr = 0x68
c_t0 = time.clock()
t_t0 = time.time()
try:
device_id = i2c.read_byte_data(addr,0x75)
print "Device ID:" + str(hex(device_id))
print "MPU9250 I2C Connected."
except:
print "Connect failed"
i2c.write_byte_data(0x68, 0x6a, 0x00)
i2c.write_byte_data(0x68, 0x37, 0x02)
i2c.write_byte_data(0x0c, 0x0a, 0x16)
# Open File
f = open("IMU_LOG_9axis.txt", "w")
# Loop area
def read_write_mpu9250():
count = 1
while True:
if count <= 500:
def smbus_data_get():
i2c.write_byte_data(0x0c, 0x0a, 0x16)
#temp_out = i2c.read_i2c_block_data(addr, 0x41, 2)
xyz_g_offset = i2c.read_i2c_block_data(addr, 0x13, 6)
xyz_a_out = i2c.read_i2c_block_data(addr, 0x3B, 6)
xyz_g_out = i2c.read_i2c_block_data(addr, 0x43, 6)
xyz_a_offset = i2c .read_i2c_block_data(addr, 0x77, 6)
xyz_mag = i2c.read_i2c_block_data(0x0c, 0x03, 6)
c_t1 = time.clock() - c_t0
t_t1 = time.time() - t_t0
smbus_data_get()
print >> f, count
print >> f, c_t1, t_t1
print >> f, xyz_a_out
print >> f, xyz_a_offset
print >> f, xyz_g_out
print >> f, xyz_g_offset
print >> f, xyz_mag
count += 1
else:
f.close()
i2c.write_byte_data(addr, 0x6A, 0x07)
break
print timeit.timeit(read_write_mpu9250, number = 1)
print "Process End"
| mit | Python | |
7acd91331d97a9a4c2190c7d6c8844bd4b7ccfe3 | add cache to diagnostics/__init__.py | jakirkham/dask,mikegraham/dask,pombredanne/dask,cowlicks/dask,vikhyat/dask,gameduell/dask,clarkfitzg/dask,wiso/dask,dask/dask,clarkfitzg/dask,mraspaud/dask,chrisbarber/dask,ssanderson/dask,mrocklin/dask,pombredanne/dask,jcrist/dask,cpcloud/dask,vikhyat/dask,jcrist/dask,dask/dask,mraspaud/dask,ContinuumIO/dask,ssanderson/dask,blaze/dask,mrocklin/dask,PhE/dask,ContinuumIO/dask,blaze/dask,jakirkham/dask,wiso/dask,PhE/dask | dask/diagnostics/__init__.py | dask/diagnostics/__init__.py | from .profile import Profiler
from .progress import ProgressBar
from .cache import Cache
| from .profile import Profiler
from .progress import ProgressBar
| bsd-3-clause | Python |
e383dc5c52db12aee5327743e26301b4d0f48af9 | Add files via upload | wiseman/dramabot | nearest_chat_bot.py | nearest_chat_bot.py |
import gensim
import pandas
import numpy as np
from sklearn.neighbors import NearestNeighbors
import pickle
def get_soap_data(location=None):
if type(location)==type(None):
location="D:\dialogue_agent\dr_word2vec_code\input_data\soaps_all.txt" #Change direcotry
f=open(location,mode="r")
content=f.readlines()
f.close()
return content
def average_vector(doc, embedding): # just average all the words in a sentence
if embedding == None:
raise Exception("in average vector function, w2v embedding does not exist or has not been loaded ")
doc = doc.rstrip()
doc = doc.split()
size = 0
full_model = [0] * embedding.layer1_size
for key in doc:
try:
ary = embedding[key]
size += 1
full_model += ary
except:
pass
if size != 0: full_model = np.array(full_model) / float(size)
return full_model
def _get_w2v_embedding(name=None):
if name == None:
name="D:\dialogue_agent\GoogleNews-vectors-negative300.bin" #change directory
try:
embedding = gensim.models.word2vec.Word2Vec.load_word2vec_format(name, binary=True)
except: # maybe it was saved in a different format
embedding = gensim.models.word2vec.Word2Vec.load(name)
return embedding
def quick_save(name,embedded_data):
pickle.dump(embedded_data, open(name+".p", "wb"))
result = pickle.load(open(name+".p", "rb"))
if __name__ == '__main__':
text=get_soap_data()
print len(text)
embedding=_get_w2v_embedding()
data=pandas.DataFrame()
data["Transcript"]=text[0:2000000]
data["Transcript"]=data["Transcript"].str.lower()
data["index_value"]=data.index
vals=data["Transcript"].values
vector_rep=[average_vector(s, embedding) for s in vals]
#quick_save("big_ver",vector_rep)
neighbors = NearestNeighbors(n_neighbors=10, metric="euclidean")
neighbors.fit(vector_rep)
threshold=.6 #Of the top N, take the longest response
while True:
sentence=raw_input("ENTER YOUR SENTENCE - THIS WOULD NORMALLY BE AN API CALL")
sentence=sentence.lower()
embedded= average_vector(sentence,embedding)
distance,indices=neighbors.kneighbors([embedded])
best=indices[0][0]
indice= data.iloc[best].index_value #Get the correct location
print distance
print indice
best_response=indice+1
print data["Transcript"][indice+1] | mit | Python | |
63318185d5477fbf99e570e5ccaba303ebe26493 | add testcases | yosida95/python-jsmapper | jsmapper/tests/test_mapping.py | jsmapper/tests/test_mapping.py | # -*- coding: utf-8 -*-
from nose.tools import (
eq_,
ok_,
)
from ..mapping import (
Mapping,
MappingProperty,
object_property,
)
from ..schema import JSONSchema
from ..types import (
Integer,
String,
)
def test_object_property():
schema = JSONSchema()
@object_property(name='property')
def prop():
return schema
ok_(isinstance(prop, MappingProperty))
eq_(prop.name, 'property')
eq_(prop.schema, schema)
class Base(Mapping):
foo = JSONSchema(type=Integer())
bar = JSONSchema(type=Integer())
class Extended(Base):
foo = JSONSchema(type=Integer())
baz = JSONSchema(type=String())
def test_inheritance():
eq_({Base.foo, Base.bar},
set(prop.schema for prop in Base._properties()))
eq_({Extended.foo, Extended.bar, Extended.baz},
set(prop.schema for prop in Extended._properties()))
| mit | Python | |
190df1378844c6294c6f48ad6cb0272f2146fc48 | Add example of force https | timothycrosley/hug,MuhammadAlkarouri/hug,timothycrosley/hug,timothycrosley/hug,MuhammadAlkarouri/hug,MuhammadAlkarouri/hug | examples/force_https.py | examples/force_https.py | """An example of using a middleware to require HTTPS connections.
requires https://github.com/falconry/falcon-require-https to be installed via
pip install falcon-require-https
"""
import hug
from falcon_require_https import RequireHTTPS
hug.API(__name__).http.add_middleware(RequireHTTPS())
@hug.get()
def my_endpoint():
return 'Success!'
| mit | Python | |
c7e8f255d5ad85dc03f5f302f49295d491ac11a1 | Create app.py | mailmevj/apiai-flightassistant-webhook-sample | app.py | app.py | #!/usr/bin/env python
from __future__ import print_function
from future.standard_library import install_aliases
install_aliases()
from urllib.parse import urlparse, urlencode
from urllib.request import urlopen, Request
from urllib.error import HTTPError
import json
import os
from flask import Flask
from flask import request
from flask import make_response
# Flask app should start in global layout
app = Flask(__name__)
@app.route('/webhook', methods=['POST'])
def webhook():
req = request.get_json(silent=True, force=True)
print("Request:")
print(json.dumps(req, indent=4))
res = processRequest(req)
res = json.dumps(res, indent=4)
# print(res)
r = make_response(res)
r.headers['Content-Type'] = 'application/json'
return r
def processRequest(req):
if req.get("result").get("action") != "yahooWeatherForecast":
return {}
baseurl = "https://query.yahooapis.com/v1/public/yql?"
yql_query = makeYqlQuery(req)
if yql_query is None:
return {}
yql_url = baseurl + urlencode({'q': yql_query}) + "&format=json"
result = urlopen(yql_url).read()
data = json.loads(result)
res = makeWebhookResult(data)
return res
def makeYqlQuery(req):
result = req.get("result")
parameters = result.get("parameters")
city = parameters.get("geo-city")
if city is None:
return None
return "select * from weather.forecast where woeid in (select woeid from geo.places(1) where text='" + city + "')"
def makeWebhookResult(data):
query = data.get('query')
if query is None:
return {}
result = query.get('results')
if result is None:
return {}
channel = result.get('channel')
if channel is None:
return {}
item = channel.get('item')
location = channel.get('location')
units = channel.get('units')
if (location is None) or (item is None) or (units is None):
return {}
condition = item.get('condition')
if condition is None:
return {}
# print(json.dumps(item, indent=4))
speech = "Today in " + location.get('city') + ": " + condition.get('text') + \
", the temperature is " + condition.get('temp') + " " + units.get('temperature')
print("Response:")
print(speech)
return {
"speech": speech,
"displayText": speech,
# "data": data,
# "contextOut": [],
"source": "apiai-weather-webhook-sample"
}
if __name__ == '__main__':
port = int(os.getenv('PORT', 5000))
print("Starting app on port %d" % port)
app.run(debug=False, port=port, host='0.0.0.0')
| apache-2.0 | Python | |
2b74fccbed0a63a503d59ac46fe90d0916abe39c | Add sublime script | chrisdone/bdo,chrisdone/bdo | bdo.py | bdo.py | import sublime, sublime_plugin, subprocess, threading, time
class Bdo(sublime_plugin.TextCommand):
def run(self, cmd):
sublime.active_window().show_input_panel("bdo ", "update", self.execute, None, None)
def execute(self, cmd):
output = subprocess.Popen(
"echo " + cmd + " | nc -w 10 localhost 9090",
shell=True, stdout=subprocess.PIPE).stdout.read()
print output
if len(output) > 0:
view = sublime.active_window().new_file()
edit = view.begin_edit()
view.insert(edit, 0, output)
view.end_edit(edit)
| bsd-3-clause | Python | |
876365a7f19a3786db15dc7debbd2686fa5d02ef | Add WmataError class and start of Wmata class. | ExperimentMonty/py3-wmata | wmata.py | wmata.py | import datetime
import urllib
import json
class WmataError(Exception):
pass
class Wmata(object):
base_url = 'http://api.wmata.com/%(svc)s.svc/json/%(endpoint)s'
# By default, we'll use the WMATA demonstration key
api_key = 'kfgpmgvfgacx98de9q3xazww'
def __init__(self, api_key=None):
if api_key is not None:
self.api_key = api_key
| mit | Python | |
51f02779f306c516bbd6d9cd1550e25c972932cf | Create base.py | jleeothon/urlmodel | base.py | base.py | from django.db import models
from django.core.urlresolvers import reverse
class UrlModelMixin(object):
"""
Provides methods for the URLs of basic actions such as searching,
creating, inspecting (detail), updating, deleting.
If a model instance is identified by pk, ``slug_field_name`` should be left
blank, otherwise, give the name of the field.
# Slug field name and slug kwarg name.
The name of the argument in the URL that identifies this slug or the
primary key is set in ``slug_kwarg_name``. A pk specified by an anonymous
argument is not supported.
Usually, both variables should be set to the same value.
E.g.
````
# urls.py
urlpatterns('',
url(
r'pokemons/(?P<hoenn_pokedex_number>\d+)/$'),
views.pokemon_detail_by_pokedex_number,
name='pokemon-hoenn'
),
url(
r'pokemons/(?<pk>\d+)/?'), # National Pokédex number
views.pokemon_detail_by_pk,
name='pokemon-pk'
)
)
class
````
# Arguments
The first argument, optional, is ``url_name``. By default this will be:
- search: 'modelname-search'
- create: 'modelname-create'
- detail: 'modelname'
- update: 'modelname-update'
- delete: 'modelname-delete'
Any extra arguments to be passed as ``*args`` and ``**kwargs``.
The create and search urls can be called from the class instead of and
instance.
To make custom instance urls, use ``get_instance_url`` and to make
custom class urls, use ``get_class_url``.
Usage: self.get_instance_url('hide', )
Note: all methods are lazily evaluated and stored. Except for
``get_absolute_url``, though, one usually expects the same value to be
returned by ``get_absolute_url`` and ``detail_url``, the latter accepts
argument lists and keyword arguements.
"""
slug_kwarg_name = None
slug_field_name = None
action_url_formatter = lambda model, action: '%s-%s' % (model, action)
# this signature will override action_url_formatter
# @classmethod
# def format_action(cls, modelname, action):
# pass
@classmethod
def get_action_url_formatter(cls, action):
if hasattr(cls, 'format_action'):
return self.format_action(cls._meta.model_name, action)
elif cls.action_url_formatter:
return cls.action_url_formatter(cls._meta.model_name, action)
@classmethod
def get_class_url(cls, url_name, *args, **kwargs):
url = reverse(url_name, args=args, kwargs=kwargs)
return url
@classmethod
def get_class_action_url(cls, action, *args, **kwargs):
url_name = cls.get_action_url_formatter(action)
return cls.get_class_url(url_name, *args, **kwargs)
def get_instance_url(self, url_name, *args, **kwargs):
slug_kwarg = self.slug_kwarg_name or 'pk'
slug_field = self.slug_field_name or 'pk'
kwargs[slug_kwarg] = getattr(self, slug_field)
url = reverse(url_name, args=args, kwargs=kwargs)
return url
def get_instance_action_url(self, action, *args, **kwargs):
url_name = self.get_action_url_formatter(action)
return self.get_instance_url(action, *args, **kwargs)
class UrlModel(UrlModelMixin, models.Model):
"""
A class for any basic model that will only implement the UrlModelMixin.
"""
class CrudUrlModelMixin(UrlModelMixin):
"""
Provides implementation for a non-lazy evaluation of a five 'action' URLs:
- list (classmethod)
- search(classmethod)
- create (classmethod)
- detail (instance method)
- update (instance method)
- delete (instance method)
"""
def get_absolute_url(self):
"""
Returns the URL for the detail view assuming that it does not receive
any other arguments or keywords. The `detail_url` could be overriden
to make this also work.
"""
return self.detail_url()
@classmethod
def list_url(cls, *args, **kwargs):
return cls.get_class_action_url('list', *args, **kwargs)
@classmethod
def search_url(cls, *args, **kwargs):
return cls.get_class_action_url('search', *args, **kwargs)
@classmethod
def create_url(cls, *args, **kwargs):
return cls.get_class_action_url('create', *args, **kwargs)
def detail_url(self, *args, **kwargs):
return self.get_instance_action_url('detail', *args, **kwargs)
def update_url(self, *args, **kwargs):
return self.get_instance_action_url('update', *args, **kwargs)
def delete_url(self, *args, **kwargs):
return self.get_instance_action_url('delete', *args, **kwargs)
class CrudUrlModel(CrudUrlModelMixin, models.Model):
"""
This class should be enough for most applications. But you might want
evaluation of urls to be lazy. Consider using `LazyCrudUrlModel`.
"""
| mit | Python | |
a4ab01d64c505b786e6fef217829fb56c3d6b6ce | Add management script to generate hansard appearance scores. | geoffkilpin/pombola,patricmutwiri/pombola,patricmutwiri/pombola,ken-muturi/pombola,Hutspace/odekro,mysociety/pombola,geoffkilpin/pombola,hzj123/56th,mysociety/pombola,hzj123/56th,patricmutwiri/pombola,hzj123/56th,hzj123/56th,Hutspace/odekro,geoffkilpin/pombola,mysociety/pombola,geoffkilpin/pombola,Hutspace/odekro,hzj123/56th,patricmutwiri/pombola,Hutspace/odekro,ken-muturi/pombola,ken-muturi/pombola,hzj123/56th,mysociety/pombola,ken-muturi/pombola,mysociety/pombola,mysociety/pombola,Hutspace/odekro,patricmutwiri/pombola,patricmutwiri/pombola,geoffkilpin/pombola,ken-muturi/pombola,geoffkilpin/pombola,ken-muturi/pombola | mzalendo/scorecards/management/commands/scorecard_update_person_hansard_appearances.py | mzalendo/scorecards/management/commands/scorecard_update_person_hansard_appearances.py | import datetime
from django.core.management.base import NoArgsCommand
from django.core.exceptions import ImproperlyConfigured
class Command(NoArgsCommand):
help = 'Create/update hansard scorecard entry for all mps'
args = ''
def handle_noargs(self, **options):
# Imports are here to avoid an import loop created when the Hansard
# search indexes are checked
from core.models import Person
from scorecards.models import Category, Entry
# create the category
try:
category = Category.objects.get(slug="hansard-appearances")
except Category.DoesNotExist:
raise ImproperlyConfigured("Please create a scorecard category with the slug 'hansard-appearances'")
# Find all the people we should score for
people = Person.objects.all().is_mp()
lower_limit = datetime.date.today() - datetime.timedelta(183)
for person in people:
# NOTE: We could certainly do all this in a single query.
hansard_count = person.hansard_entries.filter(sitting__start_date__gte=lower_limit).count()
try:
entry = person.scorecard_entries.get(category=category)
except Entry.DoesNotExist:
entry = Entry(content_object=person, category=category)
if hansard_count < 6:
entry.score = -1
entry.remark = "Hardly ever speaks in parliament"
elif hansard_count < 60:
entry.score = 0
entry.remark = "Sometimes speaks in parliament"
else:
entry.score = 1
entry.remark = "Frequently speaks in parliament"
entry.date = datetime.date.today()
entry.save()
| agpl-3.0 | Python | |
4a404709081515fa0cc91683b5a9ad8f6a68eae6 | Add a migration to drop mandatory assessment methods from brief data | alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api | migrations/versions/630_remove_mandatory_assessment_methods_.py | migrations/versions/630_remove_mandatory_assessment_methods_.py | """Remove mandatory assessment methods from briefs
Revision ID: 630
Revises: 620
Create Date: 2016-06-03 15:26:53.890401
"""
# revision identifiers, used by Alembic.
revision = '630'
down_revision = '620'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import table, column
from sqlalchemy.dialects import postgresql
briefs = table(
'briefs',
column('id', sa.Integer),
column('lot_id', sa.Integer),
column('data', postgresql.JSON),
)
def upgrade():
conn = op.get_bind()
for brief in conn.execute(briefs.select()):
if brief.data.get('evaluationType') is None:
continue
optional_methods = list([
method for method in brief.data['evaluationType']
if method not in ['Work history', 'Written proposal']
])
if brief.data['evaluationType'] != optional_methods:
if optional_methods:
brief.data['evaluationType'] = optional_methods
else:
brief.data.pop('evaluationType')
conn.execute(briefs.update().where(briefs.c.id == brief.id).values(
data=brief.data
))
def downgrade():
conn = op.get_bind()
for brief in conn.execute(briefs.select()):
# Add written proposal to all outcomes and research participants briefs
if brief.lot_id in [5, 8]:
brief.data['evaluationType'] = ['Written proposal'] + brief.data.get('evaluationType', [])
# Add work history to all specialists briefs
elif brief.lot_id == 6:
brief.data['evaluationType'] = ['Work history'] + brief.data.get('evaluationType', [])
conn.execute(briefs.update().where(briefs.c.id == brief.id).values(
data=brief.data
))
| mit | Python | |
d44fc89f27be0e618d02202b5d067466079be16d | add tool to download and extract latest firmware | zsquareplusc/wipy-environment | download-mcuimg.py | download-mcuimg.py | #! /usr/bin/env python3
import urllib.request
from pprint import pprint
import zipfile
import json
print('Downloading release info..')
release_info = json.loads(urllib.request.urlopen('https://api.github.com/repos/wipy/wipy/releases/latest').read().decode('utf-8'))
with open('mcuimg.txt', 'w') as f:
pprint(release_info, f)
print('TAG: {}'.format(release_info['tag_name']))
print('NAME: {}'.format(release_info['name']))
zip_url = release_info['assets'][0]['browser_download_url']
print('Downloading ZIP from: {}'.format(zip_url))
with open('Binaries.zip', 'wb') as f:
f.write(urllib.request.urlopen(zip_url).read())
print('Extracting mcuimg.bin...')
with zipfile.ZipFile('Binaries.zip', 'r') as archive:
with open('mcuimg.bin', 'wb') as f:
f.write(archive.open('mcuimg.bin').read())
print('perform firmware upgrade with "wipy-ftp.py upgrade"...')
| bsd-3-clause | Python | |
373bdc41b35f75a15430eb2f9a03a8ab38d401e8 | Test for upcast with parent unbound method. | pfalcon/micropython,pfalcon/micropython,pfalcon/micropython,pfalcon/micropython,pfalcon/micropython | tests/basics/subclass_native6.py | tests/basics/subclass_native6.py | # Calling native base class unbound method with subclass instance.
class mylist(list):
pass
l = mylist((1, 2, 3))
assert type(l) is mylist
print(l)
list.append(l, 4)
print(l)
| mit | Python | |
cd69cf46d0d40e3f70c9757c981d8a9b75aab9de | Create run_zaspe.py | rabrahm/zaspe,rabrahm/zaspe | run_zaspe.py | run_zaspe.py | import new2
import numpy as np
import pyfits
import time
f = open('zaspe.pars','r')
lines = f.readlines()
for line in lines:
cos = line.split()
if len(cos)==2:
if cos[0] == 'mod':
mod = cos[1]
elif cos[0] == 'spec':
spec = cos[1]
elif cos[0] == 'RV0':
RV0 = float(cos[1])
elif cos[0] == 'vsini':
guess_vsini = float(cos[1])
elif cos[0] == 'RESI':
RESI = float(cos[1])
elif cos[0] == 'ncores':
ncores = int(cos[1])
elif cos[0] == 'trunc':
trunc = int(cos[1])
elif cos[0] == 'nit':
nit = int(cos[1])
elif cos[0] == 'nsim':
nsim = int(cos[1])
elif cos[0] == 'fixG':
fixG = float(cos[1])
elif cos[0] == 'efixG':
efixG = float(cos[1])
elif cos[0] == 'T':
T = float(cos[1])
elif cos[0] == 'G':
G = float(cos[1])
elif cos[0] == 'Z':
Z = float(cos[1])
elif cos[0] == 'R':
R = float(cos[1])
elif cos[0] == 'V':
V = float(cos[1])
isfits = True
try:
sc = pyfits.getdata(spec)
ttemp = spec
except:
isfits = False
d = np.loadtxt(spec)
ords = np.unique(d[:,0]).astype('int')
first = True
for o in ords:
I = np.where(d[:,0]==o)[0]
if first:
wave = d[:,1][I]
flux = d[:,2][I]
first = False
else:
wave = np.vstack((wave,d[:,1][I]))
flux = np.vstack((flux,d[:,2][I]))
sc = np.zeros((4,len(ords),wave.shape[1]))
sc[0] = wave
sc[3] = flux
hdu = pyfits.PrimaryHDU(sc)
rtemp = 'temp_zaspe_spectra.fits'
hdu.writeto(rtemp)
ttemp = spec
spec = rtemp
print isfits
if 'P' in mod:
'Performing the search of the optimal parameters ...'
pars = new2.get_rough_pars(spec,RV0=RV0,guess_vsini=guess_vsini,RESI=RESI,ncores=ncores,\
trunc=trunc,printing=True,use_masks=True,fixG=fixG,nit=nit)
print 'ZAPE parameters:'
print 'Teff=', pars[0], 'log(g)=', pars[1], '[Fe/H]=',pars[2], 'vsin(i)=', pars[3], 'RV=', pars[4]
else:
if T!=-1 and G!=-1 and Z!=-1 and R!=-1 and V!=-1:
pars = np.array([T,G,Z,R,V])
else:
raise ValueError('You have not requested the computation of the stellar parameters and you have not provided them')
if 'E' in mod:
print 'Performing the Monte Carlo simulation to obtain the covariance in the parameters ...'
mc = new2.get_precise_parameters(spec,pars,RESI=RESI,ncores=ncores,trunc=trunc,fixG=fixG,nsim=nsim,efixG=efixG)
print 'Simulation done.'
et = np.around(np.sqrt(np.var(mc[:,0])))
eg = np.around(np.sqrt(np.var(mc[:,1])),3)
ez = np.around(np.sqrt(np.var(mc[:,2])),3)
er = np.around(np.sqrt(np.var(mc[:,3])),3)
ev = np.around(np.sqrt(np.var(mc[:,4])),3)
print 'ZAPE parameters:'
print 'Teff=',pars[0], '+/-', et
print 'log(g)=',pars[1], '+/-', eg
print '[Fe/H]=',pars[2], '+/-', ez
print 'vsin(i)=',pars[3], '+/-', er
print 'RV=', pars[4], '+/-', ev
out = np.vstack((pars,mc))
hdu = pyfits.PrimaryHDU(out)
date = str(time.localtime()).split('=')
odat = '_'+date[1].split(',')[0]+'_'+date[2].split(',')[0]+'_'+date[3].split(',')[0]+\
'_'+date[4].split(',')[0]+'_'+date[4].split(',')[0]+'_'+date[6].split(',')[0] + '_'
rout = ttemp.split('/')[-1][:-5] + odat + 'zaspe_out.fits'
hdu.header.update('DATE',odat)
hdu.header.update('SPEC',ttemp)
hdu.header.update('RES',RESI)
hdu.header.update('TRUNC',trunc)
hdu.header.update('FIXG',False)
if fixG != -1:
hdu.header['FIXG'] = True
hdu.header.update('TEFF',pars[0])
hdu.header.update('LOGG',pars[1])
hdu.header.update('FEH',pars[2])
hdu.header.update('VSINI',pars[3])
hdu.header.update('RV',pars[4])
hdu.header.update('ETEFF',et)
hdu.header.update('ELOGG',eg)
hdu.header.update('EFEH',ez)
hdu.header.update('EVSINI',er)
hdu.header.update('ERV',ev)
hdu.header.update('LIB',library)
hdu.header.update('WI',wi)
hdu.header.update('WF',wf)
hdu.writeto(rout)
if not isfits:
os.system('rm ' + spec)
| mit | Python | |
9ffa7abeccbce24b037a644612681fd397e9d13a | add dict example | devlights/try-python | trypython/basic/dict_preserved_insert_order_py37.py | trypython/basic/dict_preserved_insert_order_py37.py | """
Python 3.7 で 辞書の挿入順序が保持されることを確認するサンプルです。
REFERENCES:: http://bit.ly/2VIggXP
http://bit.ly/2VySRIe
http://bit.ly/2VFhjI4
http://bit.ly/2VEq058
http://bit.ly/2VBKrzK
"""
from trypython.common.commoncls import SampleBase
from trypython.common.commonfunc import pr
class Sample(SampleBase):
def exec(self):
"""コードのネタは http://bit.ly/2VBKrzK から拝借"""
languages = ['Python', 'Ruby', 'Perl', 'Python', 'JavaScript']
# sorted + set + list.index を組み合わせて 順序キープ しながら重複削除 (from http://bit.ly/2VBKrzK)
pr('sorted + set + list.index', sorted(set(languages), key=languages.index))
# python 3.7 からは dict にて挿入順序が保持されることがPython 言語仕様の一部となったので、これでも良い
pr('dict (python 3.7)', list(dict.fromkeys(languages)))
# ちなみに 順序保証 をしない set() を使うと重複削除はできるけど、当然順序はキープできない
pr('set (python 3.7)', list(set(languages)))
def go():
obj = Sample()
obj.exec()
if __name__ == '__main__':
go()
| mit | Python | |
f1b91a52b52dfab3b350191ede23731f0a30f4c4 | Add pythonrc | EnTeQuAk/dotfiles,EnTeQuAk/dotfiles,EnTeQuAk/dotfiles,EnTeQuAk/dotfiles,EnTeQuAk/dotfiles | python/pythonrc.py | python/pythonrc.py | #!/usr/bin/env python
# Inspired by https://github.com/dag/dotfiles/blob/master/python/.pythonrc
import os
import readline
readline.parse_and_bind('tab: complete')
history = os.path.expanduser("~/.pythonhist")
if os.path.exists(history):
try:
readline.read_history_file(history)
except IOError, e:
print "Failed to read %r: %s" % (history, e)
readline.set_history_length(1024 * 5)
def write_history(history):
def wrapped():
import readline
readline.write_history_file(history)
return wrapped
import atexit
atexit.register(write_history(history))
| unlicense | Python | |
12ad56d1360d6140093f2871c32593751b8ae052 | Add modeset_event.py | tomba/kmsxx,tomba/kmsxx,tomba/kmsxx,tomba/kmsxx | py/tests/modeset_event.py | py/tests/modeset_event.py | #!/usr/bin/python3
import pykms
import selectors
import sys
def readdrm(fileobj, mask):
for ev in card.read_events():
ev.data(ev)
def waitevent(sel):
events = sel.select(1)
if not events:
print("Error: timeout receiving event")
else:
for key, mask in events:
key.data(key.fileobj, mask)
def eventhandler(event):
print("Received %s event successfully (seq %d time %f)" %
(event.type, event.seq, event.time))
card = pykms.Card()
sel = selectors.DefaultSelector()
sel.register(card.fd, selectors.EVENT_READ, readdrm)
res = pykms.ResourceManager(card)
conn = res.reserve_connector()
crtc = res.reserve_crtc(conn)
pplane = res.reserve_primary_plane(crtc)
mode = conn.get_default_mode()
modeb = mode.to_blob(card)
for format in pplane.formats:
if format == pykms.PixelFormat.XRGB8888:
break
if format == pykms.PixelFormat.RGB565:
break
fb = pykms.DumbFramebuffer(card, mode.hdisplay, mode.vdisplay, format);
pykms.draw_test_pattern(fb);
# Disable request
card.disable_planes()
print("Setting %s to %s using %s" % (conn.fullname, mode.name, format))
req = pykms.AtomicReq(card)
req.add(conn, "CRTC_ID", crtc.id)
req.add(crtc, {"ACTIVE": 1,
"MODE_ID": modeb.id})
req.add(pplane, {"FB_ID": fb.id,
"CRTC_ID": crtc.id,
"SRC_X": 0 << 16,
"SRC_Y": 0 << 16,
"SRC_W": mode.hdisplay << 16,
"SRC_H": mode.vdisplay << 16,
"CRTC_X": 0,
"CRTC_Y": 0,
"CRTC_W": mode.hdisplay,
"CRTC_H": mode.vdisplay})
ret = req.test(True)
if ret != 0:
print("Atomic test failed: %d" % ret)
sys.exit()
req.commit(eventhandler, allow_modeset = True)
waitevent(sel)
input("press enter to exit\n")
| mpl-2.0 | Python | |
2ab8680c1a5e420de3f6b82db9a994eaeace164f | Add a snippet. | jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets,jeremiedecock/snippets | python/unicode/unicode.py | python/unicode/unicode.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2013 Jérémie DECOCK (http://www.jdhp.org)
# DEFINE
str1 = "Hello!"
unicode_obj1 = u"¡Buenos días!"
unicode_obj2 = u"你好!"
# PRINT
print
print str1
print unicode_obj1
print unicode_obj2
# CONCAT
print
print str1 + " " + unicode_obj1 + " " + unicode_obj2,
print type(str1 + " " + unicode_obj1 + " " + unicode_obj2)
# PRINT TYPE
print
print str1, type(str1)
print unicode_obj1, type(unicode_obj1)
print unicode_obj2, type(unicode_obj2)
# LENGTH
print
print "len(", unicode_obj2, ") = ", len(unicode_obj2)
print u"len({0}) = {1}".format(unicode_obj2, len(unicode_obj2))
print u"len(%s) = %s" % (unicode_obj2, len(unicode_obj2))
# UNICODE TO ASCII (你好! -> )
# ASCII TO UNICODE ( -> 你好!)
# UNICODE TO HEX ASCII (你好! -> hex)
print
hex_str = unicode_obj2.encode("utf-8").encode("hex")
print "{0} {1} (len: {2})".format(hex_str, type(hex_str), len(hex_str))
hex_list = [unicode_char.encode("utf-8").encode("hex") for unicode_char in unicode_obj2]
print(hex_list)
| mit | Python | |
f6acf955904765f57ba15837fd6440a524590268 | add migrations | Ilhasoft/ureport,xkmato/ureport,Ilhasoft/ureport,rapidpro/ureport,rapidpro/ureport,eHealthAfrica/ureport,rapidpro/ureport,auduaboki/ureport,Ilhasoft/ureport,Ilhasoft/ureport,eHealthAfrica/ureport,rapidpro/ureport,xkmato/ureport,xkmato/ureport,eHealthAfrica/ureport,auduaboki/ureport,auduaboki/ureport | ureport/polls/migrations/0024_auto_20160118_0934.py | ureport/polls/migrations/0024_auto_20160118_0934.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('polls', '0023_populate_flow_date'),
]
operations = [
migrations.AlterField(
model_name='poll',
name='poll_date',
field=models.DateTimeField(),
),
]
| agpl-3.0 | Python | |
5390abc3f53f18515cd9a658d6286ac8a9b09d81 | Create parrot_trouble.py | dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey,dvt32/cpp-journey | Python/CodingBat/parrot_trouble.py | Python/CodingBat/parrot_trouble.py | # http://codingbat.com/prob/p166884
def parrot_trouble(talking, hour):
if talking and (hour < 7 or hour > 20):
return True
else:
return False
| mit | Python | |
7c4df6bfa4d8d2370c96ffd9efe0017447629a5d | add dep-free baseclass for typing | graphistry/pygraphistry,graphistry/pygraphistry | graphistry/Plottable.py | graphistry/Plottable.py | from typing import Iterable, List, Optional, Union
from typing_extensions import Protocol
import pandas as pd
class Plottable(Protocol):
@property
def _point_title(self) -> Optional[str]:
return None
@property
def _point_label(self) -> Optional[str]:
return None
@property
def _nodes(self) -> Optional[pd.DataFrame]:
return None
@property
def _edges(self) -> Optional[pd.DataFrame]:
return None
def nodes(self, nodes: pd.DataFrame, node: Optional[str]) -> 'Plottable':
return self
def edges(self, nodes: pd.DataFrame, source: Optional[str], destination: Optional[str]) -> 'Plottable':
return self
def bind(self, **kwargs) -> 'Plottable':
return self
| bsd-3-clause | Python | |
14a9296056c4dede324465791052119890f40725 | add a TransactionTestCase to cover the flush command | novafloss/django-north,novafloss/django-north | tests/functionals/test_transactiontestcase.py | tests/functionals/test_transactiontestcase.py | from django.test import TransactionTestCase
from tests.north_app.models import Author
from tests.north_app.models import Book
class BookTestCase(TransactionTestCase):
def setUp(self):
self.author = Author.objects.create(name="George R. R. Martin")
self.book1 = Book.objects.create(
author=self.author,
title="A Game of Thrones",
pages=1234)
self.book2 = Book.objects.create(
author=self.author,
title="A Clash of Kings",
pages=1235)
def test_delete_book(self):
self.book1.delete()
self.assertEquals(self.author.book_set.count(), 1)
| mit | Python | |
9b345bba13b572ebdd52c6dca534a7cf95e11335 | Add examples | cujomalainey/matrixtoolkit | examples/colors.py | examples/colors.py | from PIL import Image, ImageDraw
from time import sleep
OFF_TARGET = True
if OFF_TARGET:
from matrixtoolkit import Adafruit_RGBmatrix
else:
from rgbmatrix import Adafruit_RGBmatrix
class drawer():
"""
handles controls what is being drawn
"""
def __init__(self):
# this config switch is optional as scale is by default 6
if OFF_TARGET:
self.matrix = Adafruit_RGBmatrix(32, 4, scale=5)
else:
self.matrix = Adafruit_RGBmatrix(32, 4)
self.alive = True
def run(self):
if OFF_TARGET:
self.matrix.start(self.main, self.kill)
else:
self.main()
def main(self):
self.image = Image.new('RGB', (64, 32))
draw = ImageDraw.Draw(self.image)
try:
while self.alive:
self.matrix.Fill((0, 255, 0))
except KeyboardInterrupt:
# hook in to make sure any future deconstructors are called
self.kill()
def updateMatrix(self, image):
self.matrix.SetImage(image if OFF_TARGET else
image.im.id, 0, 0)
def kill(self):
self.alive = False
if __name__ == '__main__':
d = drawer()
d.run()
| mit | Python | |
6fdf7cc68e05ce6e8e18306eca7d8e36d1a166ea | Add Client class to abstract from different datbase clients | wearhacks/hackathon_hotline | hotline/db/db_client.py | hotline/db/db_client.py | import importlib
import os
class DBClient:
db_defaults = {'mongo': 'mongodb://localhost:27017/',
'redis': 'redis://localhost:6379',
'postgresql': 'postgresql://localhost:5432'
}
def __init__(self, url=None, db_type=None, db_name=None):
self.db_type = db_type
self.url = url or DBClient.db_defaults[db_type]
db_module = importlib.import_module('db.db_{0}'.format(db_type))
self.client = getattr(db_module, '{0}Client'.format(db_type.capitalize()))(self.url)
def connect(self):
pass
# Update later to remove default db_type 'mongo'
db_client = DBClient(db_type='mongo')
db_client.connect()
| mit | Python | |
aac6b16b3c532d74d788cbad942af6a147a06f4b | add broadcast org | alphagov/notifications-api,alphagov/notifications-api | migrations/versions/0331_add_broadcast_org.py | migrations/versions/0331_add_broadcast_org.py | """
Revision ID: 0331_add_broadcast_org
Revises: 0330_broadcast_invite_email
Create Date: 2020-09-23 10:11:01.094412
"""
from alembic import op
import sqlalchemy as sa
import os
revision = '0331_add_broadcast_org'
down_revision = '0330_broadcast_invite_email'
environment = os.environ['NOTIFY_ENVIRONMENT']
organisation_id = '38e4bf69-93b0-445d-acee-53ea53fe02df'
def upgrade():
# we've already done this manually on production
if environment != "production":
insert_sql = """
INSERT INTO organisation
(
id,
name,
active,
created_at,
agreement_signed,
crown,
organisation_type
)
VALUES (
:id,
:name,
:active,
current_timestamp,
:agreement_signed,
:crown,
:organisation_type
)
"""
update_service_set_broadcast_org_sql = """
UPDATE services
SET organisation_id = :organisation_id
WHERE id in (
SELECT service_id
FROM service_permissions
WHERE permission = 'broadcast'
)
"""
conn = op.get_bind()
conn.execute(
sa.text(insert_sql),
id=organisation_id,
name=f'Broadcast Services ({environment})',
active=True,
agreement_signed=None,
crown=None,
organisation_type='central',
)
conn.execute(
sa.text(update_service_set_broadcast_org_sql),
organisation_id=organisation_id
)
def downgrade():
update_service_remove_org_sql = """
UPDATE services
SET organisation_id = NULL, updated_at = current_timestamp
WHERE organisation_id = :organisation_id
"""
delete_sql = """
DELETE FROM organisation
WHERE id = :organisation_id
"""
conn = op.get_bind()
conn.execute(sa.text(update_service_remove_org_sql), organisation_id=organisation_id)
conn.execute(sa.text(delete_sql), organisation_id=organisation_id)
| mit | Python | |
353edcdcfae15f06b998a4ad1481b3ad99e514bd | Remove easeventuid migration. | closeio/nylas,closeio/nylas,Eagles2F/sync-engine,PriviPK/privipk-sync-engine,ErinCall/sync-engine,closeio/nylas,nylas/sync-engine,PriviPK/privipk-sync-engine,Eagles2F/sync-engine,Eagles2F/sync-engine,jobscore/sync-engine,ErinCall/sync-engine,ErinCall/sync-engine,ErinCall/sync-engine,gale320/sync-engine,wakermahmud/sync-engine,wakermahmud/sync-engine,gale320/sync-engine,nylas/sync-engine,PriviPK/privipk-sync-engine,nylas/sync-engine,gale320/sync-engine,wakermahmud/sync-engine,nylas/sync-engine,wakermahmud/sync-engine,closeio/nylas,EthanBlackburn/sync-engine,jobscore/sync-engine,EthanBlackburn/sync-engine,EthanBlackburn/sync-engine,wakermahmud/sync-engine,gale320/sync-engine,PriviPK/privipk-sync-engine,EthanBlackburn/sync-engine,ErinCall/sync-engine,jobscore/sync-engine,gale320/sync-engine,PriviPK/privipk-sync-engine,EthanBlackburn/sync-engine,Eagles2F/sync-engine,jobscore/sync-engine,Eagles2F/sync-engine | migrations/versions/127_remove_easeventuid.py | migrations/versions/127_remove_easeventuid.py | """remove easeventuid
Revision ID: 581e91bd7141
Revises: 262436681c4
Create Date: 2015-01-10 00:57:50.944460
"""
# revision identifiers, used by Alembic.
revision = '581e91bd7141'
down_revision = '262436681c4'
from alembic import op
def upgrade():
from inbox.ignition import main_engine
engine = main_engine()
if not engine.has_table('easeventuid'):
return
op.drop_constraint('easeventuid_ibfk_1', 'easeventuid', type_='foreignkey')
op.drop_constraint('easeventuid_ibfk_2', 'easeventuid', type_='foreignkey')
op.drop_constraint('easeventuid_ibfk_3', 'easeventuid', type_='foreignkey')
op.drop_table('easeventuid')
def downgrade():
raise Exception('No going back.')
| agpl-3.0 | Python | |
5f7344b8a99880bec7195b951b495970116f0b0d | Initialize P2_blankRowInserter | JoseALermaIII/python-tutorials,JoseALermaIII/python-tutorials | books/AutomateTheBoringStuffWithPython/Chapter12/PracticeProjects/P2_blankRowInserter.py | books/AutomateTheBoringStuffWithPython/Chapter12/PracticeProjects/P2_blankRowInserter.py | # Create a program blankRowInserter.py that takes two integers and a filename
# string as command line arguments. Let’s call the first integer N and the second
# integer M. Starting at row N, the program should insert M blank rows into the
# spreadsheet.
| mit | Python | |
37f5ddd7e8802b5d5213b5cadb905c39abe92dfc | Add test.. | Clinical-Genomics/scout,Clinical-Genomics/scout,Clinical-Genomics/scout | tests/adapter/mongo/test_case_group_handling.py | tests/adapter/mongo/test_case_group_handling.py | import pytest
import copy
import pymongo
import logging
logger = logging.getLogger(__name__)
def test_init_case_group(adapter, institute_obj):
# given a database and an institute
owner = institute_obj["_id"]
# when attempting to create a case group
result = adapter.init_case_group(owner)
# the result is ok
assert result
def test_remove_case_group(adapter, institute_obj):
# given a database and an institute
owner = institute_obj["_id"]
# when successfully creating a case group
resulting_id = adapter.init_case_group(owner)
assert resulting_id
# when removing it again
result = adapter.remove_case_group(resulting_id)
# the result is ok
assert result
| bsd-3-clause | Python | |
28ad4d2770921c7d148b00ed0533b9051fb08122 | enable utils.get to get any url with or without selector/username/password | kissiel/btc,bittorrent/btc | utils.py | utils.py | #! /usr/bin/env python
import httplib, mimetypes, base64
def encode_multipart_formdata(fields, files):
"""
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'
CRLF = '\r\n'
L = []
for (key, value) in fields:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"' % key)
L.append('')
L.append(value)
for (key, filename, value) in files:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
L.append('Content-Type: %s' % mimetypes.guess_type(filename)[0] or 'application/octet-stream')
L.append('')
L.append(value)
L.append('--' + BOUNDARY + '--')
L.append('')
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def post_multipart(host, selector, fields, files, username, password):
"""
Post fields and files to an http host as multipart/form-data.
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return the server's response page.
"""
base64string = base64.encodestring('%s:%s' % (username, password))[:-1]
content_type, body = encode_multipart_formdata(fields, files)
h = httplib.HTTPConnection(host)
headers = { 'Authorization': 'Basic %s' % base64string,
'Content-Type': content_type,
'Content-Length': str(len(body)) }
h.request('POST', selector, body, headers)
return h.getresponse().read()
def get(host, selector="", username=None, password=None):
if username:
base64string = base64.encodestring('%s:%s' % (username, password))[:-1]
h = httplib.HTTPConnection(host)
if username:
headers = { 'Authorization': 'Basic %s' % base64string }
else:
headers = {}
h.request('GET', selector, "", headers)
return h.getresponse().read()
| #! /usr/bin/env python
import httplib, mimetypes, base64
def encode_multipart_formdata(fields, files):
"""
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return (content_type, body) ready for httplib.HTTP instance
"""
BOUNDARY = '----------ThIs_Is_tHe_bouNdaRY_$'
CRLF = '\r\n'
L = []
for (key, value) in fields:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"' % key)
L.append('')
L.append(value)
for (key, filename, value) in files:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
L.append('Content-Type: %s' % mimetypes.guess_type(filename)[0] or 'application/octet-stream')
L.append('')
L.append(value)
L.append('--' + BOUNDARY + '--')
L.append('')
body = CRLF.join(L)
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def post_multipart(host, selector, fields, files, username, password):
"""
Post fields and files to an http host as multipart/form-data.
fields is a sequence of (name, value) elements for regular form fields.
files is a sequence of (name, filename, value) elements for data to be uploaded as files
Return the server's response page.
"""
base64string = base64.encodestring('%s:%s' % (username, password))[:-1]
content_type, body = encode_multipart_formdata(fields, files)
h = httplib.HTTPConnection(host)
headers = { 'Authorization': 'Basic %s' % base64string,
'Content-Type': content_type,
'Content-Length': str(len(body)) }
h.request('POST', selector, body, headers)
return h.getresponse().read()
def get(host, selector, username, password):
base64string = base64.encodestring('%s:%s' % (username, password))[:-1]
h = httplib.HTTPConnection(host)
headers = { 'Authorization': 'Basic %s' % base64string }
h.request('GET', selector, "", headers)
return h.getresponse().read()
| mit | Python |
ecbc691307c43ad06d7f539f008fccbff690d538 | Add unit tests for the precomputed_io module | HumanBrainProject/neuroglancer-scripts | unit_tests/test_precomputed_io.py | unit_tests/test_precomputed_io.py | # Copyright (c) 2018 CEA
# Author: Yann Leprince <yann.leprince@cea.fr>
#
# This software is made available under the MIT licence, see LICENCE.txt.
import numpy as np
import pytest
from neuroglancer_scripts.accessor import get_accessor_for_url
from neuroglancer_scripts.chunk_encoding import InvalidInfoError
from neuroglancer_scripts.precomputed_io import (
get_IO_for_existing_dataset,
get_IO_for_new_dataset,
)
DUMMY_INFO = {
"type": "image",
"data_type": "uint16",
"num_channels": 1,
"scales": [
{
"key": "key",
"size": [8, 3, 15],
"resolution": [1e6, 1e6, 1e6],
"voxel_offset": [0, 0, 0],
"chunk_sizes": [[8, 8, 8]],
"encoding": "raw",
}
]
}
def test_precomputed_IO_chunk_roundtrip(tmpdir):
accessor = get_accessor_for_url(str(tmpdir))
# Minimal info file
io = get_IO_for_new_dataset(DUMMY_INFO, accessor)
dummy_chunk = np.arange(8 * 3 * 7, dtype="uint16").reshape(1, 7, 3, 8)
chunk_coords = (0, 8, 0, 3, 8, 15)
io.write_chunk(dummy_chunk, "key", chunk_coords)
assert np.array_equal(io.read_chunk("key", chunk_coords), dummy_chunk)
io2 = get_IO_for_existing_dataset(accessor)
assert io2.info == DUMMY_INFO
assert np.array_equal(io2.read_chunk("key", chunk_coords), dummy_chunk)
def test_precomputed_IO_info_error(tmpdir):
with (tmpdir / "info").open("w") as f:
f.write("invalid JSON")
accessor = get_accessor_for_url(str(tmpdir))
with pytest.raises(InvalidInfoError):
get_IO_for_existing_dataset(accessor)
def test_precomputed_IO_validate_chunk_coords(tmpdir):
accessor = get_accessor_for_url(str(tmpdir))
# Minimal info file
io = get_IO_for_new_dataset(DUMMY_INFO, accessor)
good_chunk_coords = (0, 8, 0, 3, 0, 8)
bad_chunk_coords = (0, 8, 1, 4, 0, 8)
assert io.validate_chunk_coords("key", good_chunk_coords) is True
assert io.validate_chunk_coords("key", bad_chunk_coords) is False
| mit | Python | |
de4f3d3b31b5336cb541c0e6d17f198799c4dc53 | Remove unnecessary argument | ProsperWorks/graphite-pager,seatgeek/graphite-pager,ProsperWorks/graphite-pager,seatgeek/graphite-pager | graphitepager/config.py | graphitepager/config.py | import os
import yaml
from alerts import Alert
def contents_of_file(filename):
open_file = open(filename)
contents = open_file.read()
open_file.close()
return contents
def get_config(path):
return Config(path)
class Config(object):
def __init__(self, path):
alert_yml = contents_of_file(path)
self._data = yaml.load(alert_yml)
def data(self, key):
return self._data[key]
def get(self, key, default=None):
return os.environ.get(key, self._data.get(key.lower(), default))
def has(self, key):
value = None
_key = key.lower()
if _key in self._data:
value = self._data[_key]
elif key in os.environ:
value = os.environ.get(key, None)
return value is not None and value != ''
def get_alerts(self):
alerts = []
doc_url = self.config.data('docs_url')
for alert_string in self.config.data('alerts'):
alerts.append(Alert(alert_string, doc_url))
return alerts
def has_keys(self, keys):
for key in keys:
if self.has(key) is False:
return False
return True
| import os
import yaml
from alerts import Alert
def contents_of_file(filename):
open_file = open(filename)
contents = open_file.read()
open_file.close()
return contents
def get_config(path):
return Config(path)
class Config(object):
def __init__(self, path):
alert_yml = contents_of_file(path)
self._data = yaml.load(alert_yml)
def data(self, key):
return self._data[key]
def get(self, key, default=None):
return os.environ.get(key, self._data.get(key.lower(), default))
def has(self, key):
value = None
_key = key.lower()
if _key in self._data:
value = self._data[_key]
elif key in os.environ:
value = os.environ.get(key, None)
return value is not None and value != ''
def get_alerts(self, config):
alerts = []
doc_url = self.config.data('docs_url')
for alert_string in self.config.data('alerts'):
alerts.append(Alert(alert_string, doc_url))
return alerts
def has_keys(self, keys):
for key in keys:
if self.has(key) is False:
return False
return True
| bsd-2-clause | Python |
3ce64bd781b59fffe42a59155a6f81f641647653 | add package information | neuropower/neurodesign | source/src/info.py | source/src/info.py | # -*- coding: utf-8 -*-
"""
Base module variables
"""
__version__ = '0.2.00'
__author__ = 'Joke Durnez'
__license__ = 'MIT'
__email__ = 'joke.durnez@gmail.com'
__status__ = 'Prototype'
__url__ = 'https://www.neuropowertools.org'
__packagename__ = 'neurodesign'
| mit | Python | |
117ddac033b0b337ced9589851e74056740cdb3e | patch to create workflow for existing leave applications | gsnbng/erpnext,gsnbng/erpnext,gsnbng/erpnext,gsnbng/erpnext | erpnext/patches/v10_0/workflow_leave_application.py | erpnext/patches/v10_0/workflow_leave_application.py | # Copyright (c) 2017, Frappe and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
def execute():
frappe.reload_doc("hr", "doctype", "leave_application")
frappe.reload_doc("workflow", "doctype", "workflow")
doc = frappe.get_doc({
'doctype': 'Workflow State',
'workflow_state_name': 'Open',
'style': 'Warning'
}).insert(ignore_permissions=True)
doc = frappe.get_doc({
'doctype': 'Workflow',
'workflow_name': 'Leave Approval',
'document_type': 'Leave Application',
'is_active': 1,
'workflow_state_field': 'workflow_state',
'states': [{
"state": 'Open',
"doc_status": 0,
"allow_edit": 'Employee'
}, {
"state": 'Approved',
"doc_status": 1,
"allow_edit": 'Leave Approver'
}, {
"state": 'Rejected',
"doc_status": 1,
"allow_edit": 'Leave Approver'
}],
'transitions': [{
"state": 'Open',
"action": 'Approve',
"next_state": 'Approved',
"allowed": 'Leave Approver'
},
{
"state": 'Open',
"action": 'Reject',
"next_state": 'Rejected',
"allowed": 'Leave Approver'
}]
}).insert(ignore_permissions=True)
frappe.db.sql("""update `tabLeave Application` set workflow_state = status""")
frappe.db.sql("""alter table `tabLeave Application` drop column status""")
| agpl-3.0 | Python | |
3aaa64c7ca9721e74fd52d3274a91fdd4c4cb678 | add initial test cron | mozilla/service-map | cron.py | cron.py | import boto3
import credstash
import gspread
import json
from oauth2client.service_account import ServiceAccountCredentials
from oauth2client import file, client, tools
from models.v1.assets.asset import Asset
from models.v1.asset_groups.asset_group import AssetGroup
from models.v1.services.service import Service
def event(event, context):
print('event: {}'.format(event))
# get our gdrive creds
# and auth to google
gcreds_json=credstash.getSecret(
name="serviceapi.gdrive",
context={'app': 'serviceapi'},
region="us-east-1"
)
scopes = ['https://www.googleapis.com/auth/drive.metadata.readonly',
'https://www.googleapis.com/auth/drive.file ',
'https://www.googleapis.com/auth/drive']
credentials = ServiceAccountCredentials.from_json_keyfile_dict(json.loads(gcreds_json),scopes)
gs = gspread.authorize(credentials)
# get rras
rras=gs.open("Mozilla Information Security Risk Register").worksheet("RRA3")
heading_keys=[]
for r in range(1,rras.row_count):
if r==1:
row_keys=rras.row_values(r)
for key in row_keys:
#lowercase and underscore the keys to fields
heading_keys.append(key.lower().replace(' ','_'))
elif r >88:
row=rras.row_values(r)
if len(row)==0:
break
else:
print (json.dumps(dict(zip(heading_keys, row)),indent=4))
| mpl-2.0 | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.