commit stringlengths 40 40 | subject stringlengths 4 1.73k | repos stringlengths 5 127k | old_file stringlengths 2 751 | new_file stringlengths 2 751 | new_contents stringlengths 1 8.98k | old_contents stringlengths 0 6.59k | license stringclasses 13 values | lang stringclasses 23 values |
|---|---|---|---|---|---|---|---|---|
8f15a2964c1cbbd85ed8301997c05a38268c79a7 | add script used during position comparison | ekg/vg,ekg/vg,ekg/vg | scripts/pos_compare.py | scripts/pos_compare.py | #!/usr/bin/python
import sys
threshold = 100
for line in sys.stdin:
fields = line.split(' ')
aln_name = fields[0]
true_chr = fields[1]
true_pos = int(fields[2])
aln_chr = fields[3]
aln_pos = int(fields[4])
aln_mapq = int(fields[5])
aln_correct = 1 if aln_chr == true_chr and abs(true_pos - aln_pos) < threshold else 0
print aln_name, aln_correct, aln_mapq
| mit | Python | |
bb2fa19aa09e5687e13dedf40da1c7a2507c4c62 | add script to replace `@` and `.` in slugsin for input to gr1x | johnyf/gr1experiments | examples/slugsin_chars_for_gr1x.py | examples/slugsin_chars_for_gr1x.py | import argparse
def main():
p = argparse.ArgumentParser()
p.add_argument('source', type=str,
help='input file')
p.add_argument('target', type=str,
help='output file')
args = p.parse_args()
with open(args.source, 'r') as f:
s = f.read()
snew = s.replace('.', 'dot')
snew = snew.replace('@', 'at')
with open(args.target, 'w') as f:
f.write(snew)
if __name__ == '__main__':
main()
| bsd-3-clause | Python | |
1c615be1d3da720d2d0a1974808e3856cbd9d498 | Create Virgil highlevel api implementation | VirgilSecurity/virgil-sdk-python | virgil_sdk/api/virgil_api.py | virgil_sdk/api/virgil_api.py | # Copyright (C) 2016 Virgil Security Inc.
#
# Lead Maintainer: Virgil Security Inc. <support@virgilsecurity.com>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# (1) Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# (2) Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# (3) Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ''AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
from virgil_sdk.api import IdentitiesManager
from virgil_sdk.api import VirgilContext
from virgil_sdk.api.card_manager import CardManager
from virgil_sdk.api.key_manager import KeyManager
class Virgil(object):
"""The Virgil class is a high-level API that provides easy access to
Virgil Security services and allows to perform cryptographic operations by using two domain entities
VirgilKey and VirgilCard. Where the VirgilKey is an entity
that represents a user's Private key, and the VirgilCard is the entity that represents
user's identity and a Public key."""
def __init__(
self,
access_token=None, # type: str
context=None # type: VirgilContext
):
# type: (...) -> None
self.__access_token = access_token
self._context = context
self.keys = KeyManager(self.__context)
self.cards = CardManager(self.__context)
self.identities = IdentitiesManager(self.__context)
@property
def __context(self):
# type: () -> VirgilContext
"""Gets context for further use in api"""
if not self._context:
self._context = VirgilContext(self.__access_token)
return self._context
| bsd-3-clause | Python | |
cfc37e2556dc018f1150e647253f377c3e8b75ae | Add subtree_sim.py | kdmurray91/kwip-experiments,kdmurray91/kwip-experiments,kdmurray91/kwip-experiments | scripts/subtree_sim.py | scripts/subtree_sim.py | import numpy as np
from cogent import LoadTree
CLI = """
USAGE:
random_subtree <tree> <n>
Subsamples <n> taxa from the Newick tree in <tree>, preserving the branch
lengths of subsampled taxa.
"""
def main(treefile, n):
n = int(n)
tree = LoadTree(
with open(treefile) as trees:
for tree in trees:
tree = ete.Tree(tree.strip())
leaves = tree.get_leaf_names()
subsample = [leaves[i] for i in np.random.choice(n, size=len(tree))]
tree.prune(subsample, preserve_branch_length=True)
print(tree.write())
if __name__ == "__main__":
import docopt
opts = docopt.docopt(CLI)
main(opts['<tree>'], int(opts['<n>']))
| mit | Python | |
e81bfc0ddcfdc321af0608d553b123fa2188de38 | Consolidate implementations | llllllllll/datashape,ContinuumIO/datashape,quantopian/datashape,quantopian/datashape,llllllllll/datashape,blaze/datashape,cpcloud/datashape,cowlicks/datashape,cowlicks/datashape,blaze/datashape,cpcloud/datashape,ContinuumIO/datashape | datashape/user.py | datashape/user.py | from __future__ import print_function, division, absolute_import
from datashape.dispatch import dispatch
from .coretypes import *
from .predicates import isdimension
from .util import dshape
import sys
from datetime import date, time, datetime
__all__ = ['validate', 'issubschema']
basetypes = np.generic, int, float, str, date, time, datetime
@dispatch(np.dtype, basetypes)
def validate(schema, value):
return np.issubdtype(type(value), schema)
@dispatch(CType, basetypes)
def validate(schema, value):
return validate(to_numpy_dtype(schema), value)
@dispatch(DataShape, (tuple, list))
def validate(schema, value):
head = schema[0]
return ((len(schema) == 1 and validate(head, value))
or (isdimension(head)
and (isinstance(head, Var) or int(head) == len(value))
and all(validate(DataShape(*schema[1:]), item) for item in value)))
@dispatch(DataShape, object)
def validate(schema, value):
if len(schema) == 1:
return validate(schema[0], value)
@dispatch(Record, dict)
def validate(schema, d):
return all(validate(sch, d.get(k)) for k, sch in schema.parameters[0])
@dispatch(Record, (tuple, list))
def validate(schema, seq):
return all(validate(sch, item) for (k, sch), item
in zip(schema.parameters[0], seq))
@dispatch(str, object)
def validate(schema, value):
return validate(dshape(schema), value)
@dispatch(type, object)
def validate(schema, value):
return isinstance(value, schema)
@dispatch(tuple, object)
def validate(schemas, value):
return any(validate(schema, value) for schema in schemas)
@dispatch(object, object)
def validate(schema, value):
return False
@validate.register(String, str)
@validate.register(Time, time)
@validate.register(Date, date)
@validate.register(DateTime, datetime)
def validate_always_true(schema, value):
return True
@dispatch(DataShape, np.ndarray)
def validate(schema, value):
return issubschema(from_numpy(value.shape, value.dtype), schema)
@dispatch(object, object)
def issubschema(a, b):
return issubschema(dshape(a), dshape(b))
@dispatch(DataShape, DataShape)
def issubschema(a, b):
if a == b:
return True
# TODO, handle cases like float < real
# TODO, handle records {x: int, y: int, z: int} < {x: int, y: int}
return None # We don't know, return something falsey
| from __future__ import print_function, division, absolute_import
from datashape.dispatch import dispatch
from .coretypes import *
from .predicates import isdimension
from .util import dshape
import sys
from datetime import date, time, datetime
__all__ = ['validate', 'issubschema']
basetypes = np.generic, int, float, str, date, time, datetime
@dispatch(np.dtype, basetypes)
def validate(schema, value):
return np.issubdtype(type(value), schema)
@dispatch(CType, basetypes)
def validate(schema, value):
return validate(to_numpy_dtype(schema), value)
@dispatch(DataShape, (tuple, list))
def validate(schema, value):
head = schema[0]
return ((len(schema) == 1 and validate(head, value))
or (isdimension(head)
and (isinstance(head, Var) or int(head) == len(value))
and all(validate(DataShape(*schema[1:]), item) for item in value)))
@dispatch(DataShape, object)
def validate(schema, value):
if len(schema) == 1:
return validate(schema[0], value)
@dispatch(Record, dict)
def validate(schema, d):
return all(validate(sch, d.get(k)) for k, sch in schema.parameters[0])
@dispatch(String, str)
def validate(schema, value):
return True
@dispatch(Record, (tuple, list))
def validate(schema, seq):
return all(validate(sch, item) for (k, sch), item
in zip(schema.parameters[0], seq))
@dispatch(str, object)
def validate(schema, value):
return validate(dshape(schema), value)
@dispatch(type, object)
def validate(schema, value):
return isinstance(value, schema)
@dispatch(tuple, object)
def validate(schemas, value):
return any(validate(schema, value) for schema in schemas)
@dispatch(object, object)
def validate(schema, value):
return False
@dispatch(Time, time)
def validate(schema, value):
return True
@dispatch(Date, date)
def validate(schema, value):
return True
@dispatch(DateTime, datetime)
def validate(schema, value):
return True
@dispatch(DataShape, np.ndarray)
def validate(schema, value):
return issubschema(from_numpy(value.shape, value.dtype), schema)
@dispatch(object, object)
def issubschema(a, b):
return issubschema(dshape(a), dshape(b))
@dispatch(DataShape, DataShape)
def issubschema(a, b):
if a == b:
return True
# TODO, handle cases like float < real
# TODO, handle records {x: int, y: int, z: int} < {x: int, y: int}
return None # We don't know, return something falsey
| bsd-2-clause | Python |
5d0c16c877fb445114d2b77ee7a4d14686320688 | Add Python solution for day 19 | Mark-Simulacrum/advent-of-code-2015,Mark-Simulacrum/advent-of-code-2015,Mark-Simulacrum/advent-of-code-2015,Mark-Simulacrum/advent-of-code-2015 | day19/solution.py | day19/solution.py | import re
data = open("data", "r").read()
possibleReplacements = {}
possibleReverseReplacements = {}
for replacement in data.split("\n"):
lhs, rhs = replacement.split(" => ")
if lhs in possibleReplacements:
if rhs not in possibleReplacements[lhs]:
possibleReplacements[lhs].append(rhs)
else:
possibleReplacements[lhs] = [rhs];
if rhs in possibleReverseReplacements:
if lhs not in possibleReverseReplacements[rhs]:
possibleReverseReplacements[rhs].append(lhs)
else:
possibleReverseReplacements[rhs] = [lhs];
inputMolecule = "CRnSiRnCaPTiMgYCaPTiRnFArSiThFArCaSiThSiThPBCaCaSiRnSiRnTiTiMgArPBCaPMgYPTiRnFArFArCaSiRnBPMgArPRnCaPTiRnFArCaSiThCaCaFArPBCaCaPTiTiRnFArCaSiRnSiAlYSiThRnFArArCaSiRnBFArCaCaSiRnSiThCaCaCaFYCaPTiBCaSiThCaSiThPMgArSiRnCaPBFYCaCaFArCaCaCaCaSiThCaSiRnPRnFArPBSiThPRnFArSiRnMgArCaFYFArCaSiRnSiAlArTiTiTiTiTiTiTiRnPMgArPTiTiTiBSiRnSiAlArTiTiRnPMgArCaFYBPBPTiRnSiRnMgArSiThCaFArCaSiThFArPRnFArCaSiRnTiBSiThSiRnSiAlYCaFArPRnFArSiThCaFArCaCaSiThCaCaCaSiRnPRnCaFArFYPMgArCaPBCaPBSiRnFYPBCaFArCaSiAl"
inputAtoms = re.findall(r'[A-Z][a-z]?', inputMolecule)
synthesizeable = []
for inputAtom in inputAtoms:
if inputAtom in possibleReplacements:
synthesizeable.append([inputAtom] + possibleReplacements[inputAtom])
else:
synthesizeable.append([inputAtom])
def getCombinations(array):
combinations = []
firstElements = map(lambda a: a[0], array)
for index, current in enumerate(array):
before = "".join(firstElements[:index])
after = "".join(firstElements[index + 1:])
for x in range(1, len(current)):
value = current[x]
combinations.append(before + value + after)
return combinations
print "Distinct molecules after one replacement:", len(set(getCombinations(synthesizeable)))
# This solution is credited to /u/askalski ; https://www.reddit.com/r/adventofcode/comments/3xflz8/day_19_solutions/cy4etju
print "Replacements to get to medicine molecule from 'e':", \
len(inputAtoms) - \
len(filter(lambda a: a == "Rn" or a == "Ar", inputAtoms)) - \
2 * len(filter(lambda a: a == "Y", inputAtoms)) - 1
| mit | Python | |
cdfdf0646151c54001ccbc80eca5c0e8f83ff38a | add tests for Compose class | red-hat-storage/rhcephcompose,red-hat-storage/rhcephcompose | rhcephcompose/tests/test_compose.py | rhcephcompose/tests/test_compose.py | import os
import time
from rhcephcompose.compose import Compose
from kobo.conf import PyConfigParser
TESTS_DIR = os.path.dirname(os.path.abspath(__file__))
FIXTURES_DIR = os.path.join(TESTS_DIR, 'fixtures')
class TestCompose(object):
conf_file = os.path.join(FIXTURES_DIR, 'basic.conf')
conf = PyConfigParser()
conf.load_from_file(conf_file)
def test_constructor(self):
c = Compose(self.conf)
assert isinstance(c, Compose)
assert c.target == 'trees'
def test_output_dir(self, tmpdir, monkeypatch):
monkeypatch.chdir(tmpdir)
c = Compose(self.conf)
compose_date = time.strftime('%Y%m%d')
expected = 'trees/Ceph-2-Ubuntu-x86_64-%s.t.0' % compose_date
assert c.output_dir == expected
| mit | Python | |
885bf944b7839e54a83ce0737b05ff11fa7d3d86 | Create selfTest.py | christopher-henderson/Experiments,christopher-henderson/Experiments,christopher-henderson/Experiments,christopher-henderson/Experiments,christopher-henderson/Experiments | searchSort/selfTest.py | searchSort/selfTest.py | from random import randrange, shuffle
def quick_sort(collection, low, high):
if low < high:
p = partition(collection, low, high)
quick_sort(collection, low, p)
quick_sort(collection, p + 1, high)
def partition(collection, low, high):
pivot = collection[(low + high) // 2]
low -= 1
high += 1
while True:
high -= 1
while collection[high] > pivot:
high -= 1
low += 1
while collection[low] < pivot:
low += 1
if low < high:
temp = collection[low]
collection[low] = collection[high]
collection[high] = temp
else:
return high
def bubble_sort(collection):
for _ in range(len(collection)):
for index in range(len(collection) - 1):
if collection[index] > collection[index + 1]:
temp = collection[index]
collection[index] = collection[index + 1]
collection[index + 1] = temp
def main():
test = [randrange(100) for _ in range(100)]
fixture = sorted(test)
quick_sort(test, 0, len(test) - 1)
assert test == fixture
shuffle(test)
bubble_sort(test)
assert test == fixture
shuffle(test)
main()
| mit | Python | |
20d1a1784f0831c14e6e03bbb86f5b8dd5ae49ea | Create learn.py | cn04/smalltwo | smalltwo/learn.py | smalltwo/learn.py | cc0-1.0 | Python | ||
6090dc1539bd0701381c73128a5ca0606adc09e4 | Add SSDP unit test case (init) | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | tests/utils/test_ssdp.py | tests/utils/test_ssdp.py | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Bo Maryniuk <bo@suse.de>`
'''
from __future__ import absolute_import, print_function, unicode_literals
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
NO_MOCK,
NO_MOCK_REASON,
MagicMock,
patch)
# Import Salt libs
import salt.exceptions
import salt.state
try:
import pytest
except ImportError as err:
pytest = None
@skipIf(NO_MOCK, NO_MOCK_REASON)
@skipIf(pytest is None, 'PyTest is missing')
class SSDPTestCase(TestCase):
'''
TestCase for SSDP-related parts.
'''
def test_ssdp_base(self):
'''
Test SSDP base class main methods.
:return:
'''
| apache-2.0 | Python | |
b6d3ab372f57ad9e3c8427ba8bc211136bc1037b | Set version to 0.1.9a1 | gnotaras/django-powerdns-manager,d9pouces/django-powerdns-manager,d9pouces/django-powerdns-manager,kumina/django-powerdns-manager,gnotaras/django-powerdns-manager,kumina/django-powerdns-manager | src/powerdns_manager/__init__.py | src/powerdns_manager/__init__.py | # -*- coding: utf-8 -*-
#
# This file is part of django-powerdns-manager.
#
# django-powerdns-manager is a web based PowerDNS administration panel.
#
# Development Web Site:
# - http://www.codetrax.org/projects/django-powerdns-manager
# Public Source Code Repository:
# - https://source.codetrax.org/hgroot/django-powerdns-manager
#
# Copyright 2012 George Notaras <gnot [at] g-loaded.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Scheme: <major>.<minor>.<maintenance>.<maturity>.<revision>
# maturity: final/beta/alpha
VERSION = (0, 1, 9, 'alpha', 1)
def get_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2] is not None:
version = '%s.%s' % (version, VERSION[2])
if VERSION[3] != 'final':
if VERSION[4] > 0:
version = '%s%s%s' % (version, VERSION[3][0], VERSION[4])
else:
version = '%s%s' % (version, VERSION[3][0])
return version
__version__ = get_version()
def get_status_classifier():
if VERSION[3] == 'final':
return 'Development Status :: 5 - Production/Stable'
elif VERSION[3] == 'beta':
return 'Development Status :: 4 - Beta'
elif VERSION[3] == 'alpha':
return 'Development Status :: 3 - Alpha'
raise NotImplementedError
| # -*- coding: utf-8 -*-
#
# This file is part of django-powerdns-manager.
#
# django-powerdns-manager is a web based PowerDNS administration panel.
#
# Development Web Site:
# - http://www.codetrax.org/projects/django-powerdns-manager
# Public Source Code Repository:
# - https://source.codetrax.org/hgroot/django-powerdns-manager
#
# Copyright 2012 George Notaras <gnot [at] g-loaded.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Scheme: <major>.<minor>.<maintenance>.<maturity>.<revision>
# maturity: final/beta/alpha
VERSION = (0, 1, 8, 'alpha', 1)
def get_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2] is not None:
version = '%s.%s' % (version, VERSION[2])
if VERSION[3] != 'final':
if VERSION[4] > 0:
version = '%s%s%s' % (version, VERSION[3][0], VERSION[4])
else:
version = '%s%s' % (version, VERSION[3][0])
return version
__version__ = get_version()
def get_status_classifier():
if VERSION[3] == 'final':
return 'Development Status :: 5 - Production/Stable'
elif VERSION[3] == 'beta':
return 'Development Status :: 4 - Beta'
elif VERSION[3] == 'alpha':
return 'Development Status :: 3 - Alpha'
raise NotImplementedError
| apache-2.0 | Python |
70b4afc095873ad226947edc757cbc4d29daf44a | Add test_incomplete_write.py test to reproduce #173 | akumuli/Akumuli,akumuli/Akumuli,akumuli/Akumuli,akumuli/Akumuli | functests/test_incomplete_write.py | functests/test_incomplete_write.py | from __future__ import print_function
import os
import sys
import socket
import datetime
import time
import akumulid_test_tools as att
import json
try:
from urllib2 import urlopen
except ImportError:
from urllib import urlopen
import traceback
import itertools
import math
HOST = '127.0.0.1'
TCPPORT = 8282
HTTPPORT = 8181
def main(path):
akumulid = att.create_akumulid(path)
# Reset database
akumulid.delete_database()
akumulid.create_database()
# start ./akumulid server
print("Starting server...")
akumulid.serve()
time.sleep(5)
try:
chan = att.TCPChan(HOST, TCPPORT)
# fill data in
invalid_sample = "+cpuload host=machine1\r\n:1418224205000000000\r\r+25.0\r\n" # reportd in issue#173
chan.send(invalid_sample)
time.sleep(5) # wait untill all messagess will be processed
query = {"select":"cpuload","range": {"from":1418224205000000000, "to":1418224505000000000}}
queryurl = "http://{0}:{1}/api/query".format(HOST, HTTPPORT)
response = urlopen(queryurl, json.dumps(query))
# response should be empty
for line in response:
print("Unexpected response: {0}".format(line))
raise ValueError("Unexpected response")
except:
traceback.print_exc()
sys.exit(1)
finally:
print("Stopping server...")
akumulid.stop()
time.sleep(5)
if __name__ == '__main__':
if len(sys.argv) < 2:
print("Not enough arguments")
sys.exit(1)
main(sys.argv[1])
else:
raise ImportError("This module shouldn't be imported")
| apache-2.0 | Python | |
67328984667246325244c0eaba75de7413c3079f | add fibonacci example in redid-example | python-cache/python-cache | example/redis-example/Fibonacci.py | example/redis-example/Fibonacci.py | # import redis driver
import redis
import pymongo
# import python-cache pycache package
from pycache.Adapter import RedisItemPool
from pycache.Adapter import MongoItemPool
from pycache import cached
client = redis.Redis(host='192.168.99.100', port=32771)
pool = RedisItemPool(client)
mongo_client = pymongo.MongoClient(host="192.168.99.100", port=27017)
mongo_pool = MongoItemPool(mongo_client)
def fib(num):
if num == 1 or num == 0:
return num
return fib(num-1) + fib(num-2)
@cached(CacheItemPool=pool)
def cached_fib(num):
if num == 1 or num == 0:
return num
return cached_fib(num - 1) + cached_fib(num - 2)
@cached(CacheItemPool=mongo_pool)
def mongo_cached_fib(num):
if num == 1 or num == 0:
return num
return mongo_cached_fib(num - 1) + mongo_cached_fib(num - 2)
from datetime import datetime
for i in range(1, 50):
cur = datetime.utcnow()
fib(i)
diff = (datetime.utcnow() - cur)
nocached = float(diff.seconds) + float(diff.microseconds) / 1000000
cur = datetime.utcnow()
cached_fib(i)
diff = (datetime.utcnow() - cur)
cached = float(diff.seconds) + float(diff.microseconds) / 1000000
pool.clear()
cur = datetime.utcnow()
mongo_cached_fib(i)
diff = (datetime.utcnow() - cur)
mongo_cached = float(diff.seconds) + float(diff.microseconds) / 1000000
mongo_pool.clear()
print i, nocached, cached, mongo_cached | mit | Python | |
e39415fbe6a325894abb8d098504150b1c515b57 | Create split-linked-list-in-parts.py | kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,kamyu104/LeetCode,kamyu104/LeetCode,kamyu104/LeetCode,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015,tudennis/LeetCode---kamyu104-11-24-2015 | Python/split-linked-list-in-parts.py | Python/split-linked-list-in-parts.py | # Time: O(n)
# Space: O(n)
# Given a chemical formula (given as a string), return the count of each atom.
#
# An atomic element always starts with an uppercase character,
# then zero or more lowercase letters, representing the name.
#
# 1 or more digits representing the count of that element may follow if the count is greater than 1.
# If the count is 1, no digits will follow. For example, H2O and H2O2 are possible, but H1O2 is impossible.
#
# Two formulas concatenated together produce another formula. For example, H2O2He3Mg4 is also a formula.
#
# A formula placed in parentheses, and a count (optionally added) is also a formula.
# For example, (H2O2) and (H2O2)3 are formulas.
#
# Given a formula, output the count of all elements as a string in the following form:
# the first name (in sorted order), followed by its count (if that count is more than 1),
# followed by the second name (in sorted order),
# followed by its count (if that count is more than 1), and so on.
#
# Example 1:
# Input:
# formula = "H2O"
# Output: "H2O"
# Explanation:
# The count of elements are {'H': 2, 'O': 1}.
#
# Example 2:
# Input:
# formula = "Mg(OH)2"
# Output: "H2MgO2"
# Explanation:
# The count of elements are {'H': 2, 'Mg': 1, 'O': 2}.
#
# Example 3:
# Input:
# formula = "K4(ON(SO3)2)2"
# Output: "K4N2O14S4"
# Explanation:
# The count of elements are {'K': 4, 'N': 2, 'O': 14, 'S': 4}.
# Note:
#
# All atom names consist of lowercase letters, except for the first character which is uppercase.
# The length of formula will be in the range [1, 1000].
# formula will only consist of letters, digits, and round parentheses,
# and is a valid formula as defined in the problem.
class Solution(object):
def countOfAtoms(self, formula):
"""
:type formula: str
:rtype: str
"""
parse = re.findall(r"([A-Z][a-z]*)(\d*)|(\()|(\))(\d*)", formula)
stk = [collections.Counter()]
for name, m1, left_open, right_open, m2 in parse:
if name:
stk[-1][name] += int(m1 or 1)
if left_open:
stk.append(collections.Counter())
if right_open:
top = stk.pop()
for k, v in top.iteritems():
stk[-1][k] += v * int(m2 or 1)
return "".join(name + (str(stk[-1][name]) if stk[-1][name] > 1 else '') \
for name in sorted(stk[-1]))
| mit | Python | |
4709a38f67c80c1516b4eae6eb7d8f54cdd985e2 | Create songsched.py | AbrhmSanchez/Songsched | Songsched/songsched.py | Songsched/songsched.py | mit | Python | ||
9beaa2052b4e47a2fd075f4d9b7988b03a38a8ad | Create main.py | gappleto97/Senior-Project | main.py | main.py | import optparse, settings
def main():
parser = optparse.OptionParser()
parser.add_option('-c',
'--charity',
dest='charity',
default=None,
action="store_true",
help='Sets whether you accept rewardless bounties')
parser.add_option('-l',
'--latency',
dest='accept_latency',
default=None,
help='Maximum acceptable latency from a server')
parser.add_option('-f',
'--propagation-factor',
dest='propagate-factor',
default=None,
help='Minimum funds:reward ratio you'll propagate bounties at')
| mit | Python | |
c36f36555b8fba183220456e51e04ccbaa08bb60 | add a data file to play with. | cellnopt/cellnopt,cellnopt/cellnopt | cno/data/ToyMMB/__init__.py | cno/data/ToyMMB/__init__.py | from cellnopt.core import XMIDAS, CNOGraph
pknmodel = CNOGraph("PKN-ToyMMB.sif")
data = XMIDAS("MD-ToyMMB.csv")
description = open("README.rst").read()
| bsd-2-clause | Python | |
c1ef15e895d4f79a9ef5c83aa13964d30bc8dbff | Add main loop | smpcole/tic-tac-toe | main.py | main.py | from Board import *
from Player import *
def main():
board = Board()
players = (HumanPlayer('x', board), HumanPlayer('o', board))
turnNum = 0
currentPlayer = None
while not board.endGame():
currentPlayer = players[turnNum % 2]
print "%s's turn" % currentPlayer
currentPlayer.takeTurn(board)
print board
turnNum += 1
if board.winner != None:
print "%ss win!" % board.winner
else:
print "It's a tie!"
if __name__ == "__main__":
main()
| mit | Python | |
f31b42ae43e7cd2af53a504c1cc2ab398bf7810d | Add api call for Premier League standings | conormag94/pyscores | main.py | main.py | import json
import requests
from tabulate import tabulate
BASE_URL = "http://api.football-data.org/alpha/"
soccer_seasons = "soccerseasons/"
epl_current_season = "soccerseasons/398/"
league_table = "leagueTable/"
def print_standings(table):
standings = []
for team in table:
entry = [team['position'], team['teamName'], team['points']]
standings.append(entry)
print tabulate(standings, headers=['Pos', 'Club', 'Points'], tablefmt="rst")
def main():
resp = requests.get(BASE_URL + epl_current_season + league_table)
data = resp.json()
league_standings = data['standing']
print_standings(league_standings)
if __name__ == '__main__':
main()
| mit | Python | |
c1d5dca7c487075229b384585e0eb11cd91bbef8 | add import script for Chesterfield | chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,chris48s/UK-Polling-Stations | polling_stations/apps/data_collection/management/commands/import_chesterfield.py | polling_stations/apps/data_collection/management/commands/import_chesterfield.py | from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = 'E07000034'
addresses_name = 'May 2017/ChesterfieldDemocracy_Club__04May2017a.txt'
stations_name = 'May 2017/ChesterfieldDemocracy_Club__04May2017a.txt'
elections = ['local.derbyshire.2017-05-04']
csv_delimiter = '\t'
| bsd-3-clause | Python | |
04155a80531e58422253e22635f1e496a27a5647 | add import script for South Ribble | DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,chris48s/UK-Polling-Stations,chris48s/UK-Polling-Stations | polling_stations/apps/data_collection/management/commands/import_south_ribble.py | polling_stations/apps/data_collection/management/commands/import_south_ribble.py | from django.contrib.gis.geos import Point
from data_collection.management.commands import BaseHalaroseCsvImporter
class Command(BaseHalaroseCsvImporter):
council_id = 'E07000126'
addresses_name = 'Properties.csv'
stations_name = 'Polling Stations.csv'
elections = ['local.lancashire.2017-05-04']
# South Ribble use Halarose, but they've split the standard export up into
# 2 files and removed some columns. They've also added grid refs for the
# stations :) We need to customise a bit..
station_address_fields = [
'pollingstationname',
'pollingstationaddress_1',
'pollingstationaddress_2',
'pollingstationaddress_3',
'pollingstationaddress_4',
]
def get_station_hash(self, record):
raise NotImplementedError
def get_station_point(self, record):
return Point(
float(record.easting),
float(record.northing),
srid=27700
)
def get_residential_address(self, record):
def replace_na(text):
if text.strip() == 'n/a':
return ''
return text.strip()
address_line_1 = replace_na(record.housenumber)
street_address = replace_na(record.streetname)
address_line_1 = address_line_1 + ' ' + street_address
address = "\n".join([
address_line_1.strip(),
replace_na(record.locality),
replace_na(record.town),
replace_na(record.adminarea),
])
while "\n\n" in address:
address = address.replace("\n\n", "\n").strip()
return address
| bsd-3-clause | Python | |
e0d728519292377915983385285a3560d3207b19 | Create main.py | commagere/commagere.com,commagere/commagere.com,commagere/commagere.com | main.py | main.py | import webapp2
class MainHandler(webapp2.RequestHandler):
def get(self):
self.response.write('Hello world!')
app = webapp2.WSGIApplication([
('/', MainHandler)
], debug=True)
| mit | Python | |
99b4a6fa8eb96f9228635142d2686b9601f293b5 | Put main.py back | baylee-d/cos.io,baylee-d/cos.io,baylee-d/cos.io,baylee-d/cos.io | main.py | main.py | import wysiweb
import shutil
try:
shutil.rmtree('./frozen')
except:
pass
# site_path: Where are the files that will build the website
# static_path: where are the static files? has to be within site_path
# static_route: what is the route to access static files: <a href="/static/logo.jpg"> for instance.
w = wysiweb.WYSIWeb(
site_path= './www',
static_path='./www/static',
static_route='/static',
input_encoding='utf-8'
)
app = w.app # Needed if you want to serve with, say, nginx
if __name__ == "__main__":
# app.run(port=4000, debug=True) # Ready to serve Flask app from the templates in www
# or
static_app = w.freeze('./frozen')
#static_app.run(port=5000, debug=False) # Ready to serve Flask app that mimics a static server
| apache-2.0 | Python | |
62df41780706161c4e25f854de0b6cc5d2664a39 | Add test for templates in include_router path (#349) | tiangolo/fastapi,tiangolo/fastapi,tiangolo/fastapi | tests/test_router_prefix_with_template.py | tests/test_router_prefix_with_template.py | from fastapi import APIRouter, FastAPI
from starlette.testclient import TestClient
app = FastAPI()
router = APIRouter()
@router.get("/users/{id}")
def read_user(segment: str, id: str):
return {"segment": segment, "id": id}
app.include_router(router, prefix="/{segment}")
client = TestClient(app)
def test_get():
response = client.get("/seg/users/foo")
assert response.status_code == 200
assert response.json() == {"segment": "seg", "id": "foo"}
| mit | Python | |
9123b90f21fc341cbb2e333eb53c5149dfda8e3b | Add Taiwan time transformed. | toomore/goristock | cttwt.py | cttwt.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011 Toomore Chiang, http://toomore.net/
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import time,datetime
class TWTime(object):
''' Transform localtime to Taiwan time in UTF+8 '''
def __init__(self,tz = 8):
try:
self.TimeZone = int(tz)
except:
self.TimeZone = 8
@property
def now(self):
''' Display Taiwan Time now '''
localtime = datetime.datetime.now()
return localtime + datetime.timedelta(hours = time.timezone/60/60 + self.TimeZone)
@property
def localtime(self):
''' Display localtime now '''
return datetime.datetime.now()
| mit | Python | |
4b7fb1ad3cd03e0593ecd3d0626bca385fb2800d | Add scroller example. | arminha/python-aosd | examples/scroller/scroller.py | examples/scroller/scroller.py | # coding=utf8
#
# python-aosd -- python bindings for libaosd
#
# Copyright (C) 2010 Armin Häberling <armin.aha@gmail.com>
#
# Based on the scroller example from libaosd.
#
import sys
import aosd
def scroll(osd, width, height):
pos = 8
step = 3
osd.set_position(pos, width, height)
(x, y, _, _) = osd.get_geometry()
osd.set_position_offset(width, height)
osd.show()
x -= 1
y += height - 1;
for i in range(1, height + 1, step):
osd.loop_for(20)
y -= step
osd.set_geometry(x, y, width, i)
osd.set_position(pos, width, height)
osd.set_position_offset(-1, -1)
(x, y, _, _) = osd.get_geometry()
osd.loop_for(2000)
for i in range(height, 0, -step):
y += step
osd.set_geometry(x, y, width, i);
osd.loop_for(20);
osd.hide();
def setup():
osd = aosd.AosdText()
osd.set_transparency(aosd.TRANSPARENCY_COMPOSITE)
if osd.get_transparency() != aosd.TRANSPARENCY_COMPOSITE:
osd.set_transparency(aosd.TRANSPARENCY_NONE)
osd.geom_x_offset = 10
osd.geom_y_offset = 10
osd.back_color = "white"
osd.back_opacity = 80
osd.shadow_color = "black"
osd.shadow_opacity = 127
osd.shadow_x_offset = 2
osd.shadow_y_offset = 2
osd.fore_color = "green"
osd.fore_opacity = 255
osd.set_font("Times New Roman Italic 24")
osd.wrap = aosd.PANGO_WRAP_WORD_CHAR
osd.alignment = aosd.PANGO_ALIGN_RIGHT
osd.set_layout_width(osd.get_screen_wrap_width())
return osd
def set_string(osd, text):
osd.set_text(text)
return osd.get_text_size()
def main(argv):
osd = setup()
for text in argv[1:]:
width, height = set_string(osd, text)
scroll(osd, width, height)
if __name__ == "__main__":
main(sys.argv)
| mit | Python | |
a9fee0cab6899effa865c73c38baf73e5272d87e | Create main.py | meagtan/harmonizer | main.py | main.py | ### Command line interface for functions
import freqs
import harmonize
import error
import enharmonic
import voice
# 1. Take input from file or user
# 2. Call desired function on input, specifying type of output (show, save to file, etc.)
# 3. Display result
| mit | Python | |
370d6eb7fc4adb2f2769bdf94f56df239760ef0c | Create quiz2.py | mdmirabal/uip-prog3 | laboratorio-f/quiz2.py | laboratorio-f/quiz2.py | print("OFERTAS El Emperador")
ofer1 = 0.30
ofer2 = 0.20
ofer3 = 0.10
while clientes <5:
monto = int(input("Ingrese monto: "))
clientes += 1
if monto >= 500:
subtotal = monto * ofer1
total = monto - subtotal
print("El total es {0}: ".format(total)
if monto < 500 or monto > 200
subtotal = monto * ofer2
total = monto + subtotal
print("El total es {0}: ".format(total)
if monto < 200: or monto > 100:
subtotal = monto * ofer3
total = monto + subtotal
print("El total es {0}: ".format(total)
else
print("No hay descuento")
print("gracias por su compra")
break
| mit | Python | |
c8e6fce132d1eaa9d789dd0c7bd2e5c53e4e5424 | Add python user exception function example (#2333) | nkurihar/pulsar,jai1/pulsar,yahoo/pulsar,yahoo/pulsar,ArvinDevel/incubator-pulsar,ArvinDevel/incubator-pulsar,yahoo/pulsar,jai1/pulsar,nkurihar/pulsar,merlimat/pulsar,massakam/pulsar,nkurihar/pulsar,yahoo/pulsar,massakam/pulsar,nkurihar/pulsar,massakam/pulsar,nkurihar/pulsar,jai1/pulsar,nkurihar/pulsar,massakam/pulsar,merlimat/pulsar,jai1/pulsar,jai1/pulsar,ArvinDevel/incubator-pulsar,merlimat/pulsar,yahoo/pulsar,merlimat/pulsar,ArvinDevel/incubator-pulsar,jai1/pulsar,jai1/pulsar,jai1/pulsar,jai1/pulsar,ArvinDevel/incubator-pulsar,nkurihar/pulsar,ArvinDevel/incubator-pulsar,ArvinDevel/incubator-pulsar,ArvinDevel/incubator-pulsar,nkurihar/pulsar,yahoo/pulsar,massakam/pulsar,merlimat/pulsar,massakam/pulsar,merlimat/pulsar,nkurihar/pulsar | pulsar-functions/python-examples/user_exception.py | pulsar-functions/python-examples/user_exception.py | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from pulsar import Function
# Example function that throws an excpetion
class UserException(Function):
def __init__(self):
pass
def process(self, input, context):
raise Exception('this will not work') | apache-2.0 | Python | |
3aaee714d59650f36e3918e184905048a83d1cfc | Add new filesystem_stat module. | mk23/snmpy,mk23/snmpy | lib/filesystem_stat.py | lib/filesystem_stat.py | import os
import stat
import snmpy
import time
class filesystem_stat(snmpy.plugin):
def s_type(self, obj):
if stat.S_ISDIR(obj):
return 'directory'
if stat.S_ISCHR(obj):
return 'character special device'
if stat.S_ISBLK(obj):
return 'block special device'
if stat.S_ISREG(obj):
return 'regular file'
if stat.S_ISFIFO(obj):
return 'named pipe'
if stat.S_ISLNK(obj):
return 'symbolic link'
if stat.S_ISSOCK(obj):
return 'socket'
return 'unknown'
def s_time(self, obj):
return int(time.time() - obj)
def s_pass(self, obj):
return obj
def gather(self, obj):
try:
info = os.lstat(self.name)
for k, v in self.stat.items():
self.data['2.%s' % k] = v[2](info.getattr('st_%s' % v[0], info.st_mode))
except:
for k, v in self.stat.items():
self.data['2.%s' % k] = v[1]
def create(self):
self.stat = {
'1': ('name', self.name, self.s_pass),
'2': ('type', 'missing', self.s_type),
'3': ('atime', -1, self.s_time),
'4': ('mtime', -1, self.s_time),
'5': ('ctime', -1, self.s_time),
'6': ('nlink', -1, self.s_pass),
'7': ('size', -1, self.s_pass),
'8': ('ino', -1, self.s_pass),
'9': ('uid', -1, self.s_pass),
'10': ('gid', -1, self.s_pass),
}
for k, v in self.stat.items():
snmp_type = 'integer' if type(v[1]) == int else 'string'
self.data['1.%s' % k] = 'string', v[0]
if k == '1':
self.data['2.%s' % k] = snmp_type, v[1]
else:
self.data['2.%s' % k] = snmp_type, v[1], {'run': self.gather}
| mit | Python | |
7ac6007e28740e0aff89925b10ae09fa6c0b63d3 | add tests for g.current_user | PnEcrins/GeoNature,PnEcrins/GeoNature,PnEcrins/GeoNature,PnEcrins/GeoNature | backend/geonature/tests/test_users_login.py | backend/geonature/tests/test_users_login.py | import pytest
from flask import g, url_for, current_app
from geonature.utils.env import db
from pypnusershub.db.models import User, Application, AppUser, UserApplicationRight, ProfilsForApp
from . import login, temporary_transaction
from .utils import logged_user_headers
@pytest.mark.usefixtures("client_class", "temporary_transaction")
class TestUsersLogin:
@pytest.fixture
def user(self, app):
id_app = app.config['ID_APP']
with db.session.begin_nested():
user = User(groupe=False, active=True, identifiant='user', password='password')
db.session.add(user)
application = Application.query.get(id_app)
profil = ProfilsForApp.query.filter_by(id_application=application.id_application) \
.order_by(ProfilsForApp.id_profil.desc()).first().profil
right = UserApplicationRight(role=user, id_profil=profil.id_profil, id_application=application.id_application)
db.session.add(right)
return user
@pytest.fixture
def app_user(self, app, user):
return AppUser.query.filter_by(id_role=user.id_role, id_application=app.config['ID_APP']).one()
def test_current_user(self, app, user, app_user):
with app.test_request_context(headers=logged_user_headers(app_user)):
app.preprocess_request()
assert(g.current_user == user)
| bsd-2-clause | Python | |
205bbba27a89e6f89e26164dbf25ce9763865d36 | add ping.py | liuluheng/utils,liuluheng/utils | ping.py | ping.py | #!/usr/bin/env python
# -*- coding:utf8 -*-
import Queue
import threading
import subprocess
import re
import sys
lock = threading.Lock()
def getip(ip):
a = re.match(r'(.*\d+)\.(\d+)-(\d+)',ip)
print a.groups()
start = int(a.group(2))
end = int(a.group(3))+1
iplist = []
for i in range(start,end):
iplist.append(a.group(1)+"."+str(i))
return iplist
def ping(qlist):
while 1:
if qlist.empty():
sys.exit()
ip = qlist.get()
ret=subprocess.call("ping -c 1 %s" \
% ip,stdout=open('/dev/null','w'),\
stderr=subprocess.STDOUT,shell=True)
lock.acquire()
if ret==0:
print "%s is Alive " % ip
else:
pass
lock.release()
qlist.task_done()
def main():
queue = Queue.Queue()
for i in getip(sys.argv[1]):
queue.put(i)
for q in range(int(sys.argv[2])):
worker=threading.Thread(target=ping,args=(queue,))
worker.setDaemon(True)
worker.start()
queue.join()
if __name__=="__main__":
if len (sys.argv) != 3:
print "usage %s IP段(192.168.1.1-254) 线程数" % sys.argv[0]
else:
main()
| mit | Python | |
1266fd79369634e2a0399e857107487ae589ea20 | add vpc vpc check script | zerOnepal/sensu-community-plugins,rikaard-groupby/sensu-community-plugins,aryeguy/sensu-community-plugins,warmfusion/sensu-community-plugins,pkaeding/sensu-community-plugins,royalj/sensu-community-plugins,justanshulsharma/sensu-community-plugins,tuenti/sensu-community-plugins,nilroy/sensu-community-plugins,broadinstitute/sensu-community-plugins,alphagov/sensu-community-plugins,cmattoon/sensu-community-plugins,cotocisternas/sensu-community-plugins,aryeguy/sensu-community-plugins,thehyve/sensu-community-plugins,Seraf/sensu-community-plugins,gferguson-gd/sensu-community-plugins,new23d/sensu-community-plugins,pkaeding/sensu-community-plugins,FlorinAndrei/sensu-community-plugins,khuongdp/sensu-community-plugins,julienba/sensu-community-plugins,jennytoo/sensu-community-plugins,lfdesousa/sensu-community-plugins,lenfree/sensu-community-plugins,aryeguy/sensu-community-plugins,jbehrends/sensu-community-plugins,cread/sensu-community-plugins,lfdesousa/sensu-community-plugins,alexhjlee/sensu-community-plugins,estately/sensu-community-plugins,optimizely/sensu-community-plugins,justanshulsharma/sensu-community-plugins,khuongdp/sensu-community-plugins,ideais/sensu-community-plugins,nagas/sensu-community-plugins,warmfusion/sensu-community-plugins,jameslegg/sensu-community-plugins,nagas/sensu-community-plugins,circleback/sensu-community-plugins,loveholidays/sensu-plugins,himyouten/sensu-community-plugins,intoximeters/sensu-community-plugins,petere/sensu-community-plugins,PerfectMemory/sensu-community-plugins,intoximeters/sensu-community-plugins,royalj/sensu-community-plugins,julienba/sensu-community-plugins,reevoo/sensu-community-plugins,jameslegg/sensu-community-plugins,emillion/sensu-community-plugins,Seraf/sensu-community-plugins,alexhjlee/sensu-community-plugins,lfdesousa/sensu-community-plugins,Squarespace/sensu-community-plugins,ideais/sensu-community-plugins,nilroy/sensu-community-plugins,mecavity/sensu-community-plugins,lenfree/sensu-community-plugins,plasticbrain/sensu-community-plugins,reevoo/sensu-community-plugins,yieldbot/sensu-community-plugins,justanshulsharma/sensu-community-plugins,JonathanHuot/sensu-community-plugins,cmattoon/sensu-community-plugins,broadinstitute/sensu-community-plugins,thehyve/sensu-community-plugins,loveholidays/sensu-plugins,madAndroid/sensu-community-plugins,JonathanHuot/sensu-community-plugins,jbehrends/sensu-community-plugins,cread/sensu-community-plugins,gferguson-gd/sensu-community-plugins,circleback/sensu-community-plugins,lfdesousa/sensu-community-plugins,yieldbot/sensu-community-plugins,maoe/sensu-community-plugins,jbehrends/sensu-community-plugins,jbehrends/sensu-community-plugins,nagas/sensu-community-plugins,leedm777/sensu-community-plugins,jameslegg/sensu-community-plugins,plasticbrain/sensu-community-plugins,justanshulsharma/sensu-community-plugins,rikaard-groupby/sensu-community-plugins,leedm777/sensu-community-plugins,klangrud/sensu-community-plugins,estately/sensu-community-plugins,tuenti/sensu-community-plugins,shnmorimoto/sensu-community-plugins,nagas/sensu-community-plugins,lenfree/sensu-community-plugins,Seraf/sensu-community-plugins,JonathanHuot/sensu-community-plugins,madAndroid/sensu-community-plugins,maoe/sensu-community-plugins,klangrud/sensu-community-plugins,maoe/sensu-community-plugins,circleback/sensu-community-plugins,tuenti/sensu-community-plugins,petere/sensu-community-plugins,optimizely/sensu-community-plugins,circleback/sensu-community-plugins,giorgiosironi/sensu-community-plugins,Squarespace/sensu-community-plugins,ideais/sensu-community-plugins,shnmorimoto/sensu-community-plugins,alphagov/sensu-community-plugins,himyouten/sensu-community-plugins,pkaeding/sensu-community-plugins,jennytoo/sensu-community-plugins,gferguson-gd/sensu-community-plugins,tuenti/sensu-community-plugins,luisdalves/sensu-community-plugins,intoximeters/sensu-community-plugins,maoe/sensu-community-plugins,himyouten/sensu-community-plugins,PerfectMemory/sensu-community-plugins,madAndroid/sensu-community-plugins,cread/sensu-community-plugins,alertlogic/sensu-community-plugins,cotocisternas/sensu-community-plugins,warmfusion/sensu-community-plugins,Seraf/sensu-community-plugins,giorgiosironi/sensu-community-plugins,cmattoon/sensu-community-plugins,giorgiosironi/sensu-community-plugins,jennytoo/sensu-community-plugins,Squarespace/sensu-community-plugins,zerOnepal/sensu-community-plugins,luisdalves/sensu-community-plugins,tuenti/sensu-community-plugins,shnmorimoto/sensu-community-plugins,JonathanHuot/sensu-community-plugins,plasticbrain/sensu-community-plugins,FlorinAndrei/sensu-community-plugins,royalj/sensu-community-plugins,reevoo/sensu-community-plugins,yieldbot/sensu-community-plugins,cmattoon/sensu-community-plugins,emillion/sensu-community-plugins,emillion/sensu-community-plugins,PerfectMemory/sensu-community-plugins,royalj/sensu-community-plugins,alexhjlee/sensu-community-plugins,plasticbrain/sensu-community-plugins,luisdalves/sensu-community-plugins,cotocisternas/sensu-community-plugins,himyouten/sensu-community-plugins,loveholidays/sensu-plugins,aryeguy/sensu-community-plugins,intoximeters/sensu-community-plugins,warmfusion/sensu-community-plugins,new23d/sensu-community-plugins,luisdalves/sensu-community-plugins,petere/sensu-community-plugins,nilroy/sensu-community-plugins,estately/sensu-community-plugins,alphagov/sensu-community-plugins,new23d/sensu-community-plugins,lenfree/sensu-community-plugins,Squarespace/sensu-community-plugins,julienba/sensu-community-plugins,ideais/sensu-community-plugins,zerOnepal/sensu-community-plugins,madAndroid/sensu-community-plugins,klangrud/sensu-community-plugins,thehyve/sensu-community-plugins,pkaeding/sensu-community-plugins,rikaard-groupby/sensu-community-plugins,PerfectMemory/sensu-community-plugins,julienba/sensu-community-plugins,shnmorimoto/sensu-community-plugins,optimizely/sensu-community-plugins,alertlogic/sensu-community-plugins,khuongdp/sensu-community-plugins,reevoo/sensu-community-plugins,mecavity/sensu-community-plugins,gferguson-gd/sensu-community-plugins,leedm777/sensu-community-plugins,leedm777/sensu-community-plugins,alertlogic/sensu-community-plugins,petere/sensu-community-plugins,mecavity/sensu-community-plugins,zerOnepal/sensu-community-plugins,cotocisternas/sensu-community-plugins,loveholidays/sensu-plugins,emillion/sensu-community-plugins,FlorinAndrei/sensu-community-plugins,cread/sensu-community-plugins,alexhjlee/sensu-community-plugins,alertlogic/sensu-community-plugins,new23d/sensu-community-plugins,khuongdp/sensu-community-plugins,jennytoo/sensu-community-plugins,estately/sensu-community-plugins,giorgiosironi/sensu-community-plugins,klangrud/sensu-community-plugins,nilroy/sensu-community-plugins,broadinstitute/sensu-community-plugins,thehyve/sensu-community-plugins,rikaard-groupby/sensu-community-plugins,mecavity/sensu-community-plugins | plugins/aws/check_vpc_vpn.py | plugins/aws/check_vpc_vpn.py | #!/usr/bin/python
import argparse
import boto.ec2
from boto.vpc import VPCConnection
import sys
def main():
try:
conn = boto.vpc.VPCConnection(aws_access_key_id=args.aws_access_key_id, aws_secret_access_key=args.aws_secret_access_key, region=boto.ec2.get_region(args.region))
except:
print "UNKNOWN: Unable to connect to reqion %s" % args.region
sys.exit(3)
errors = []
for vpn_connection in conn.get_all_vpn_connections():
for tunnel in vpn_connection.tunnels:
if tunnel.status != 'UP':
errors.append("[gateway: %s connection: %s tunnel: %s status: %s]" % (vpn_connection.vpn_gateway_id, vpn_connection.id, tunnel.outside_ip_address, tunnel.status))
if len(errors) > 1:
print 'CRITICAL: ' + ' '.join(errors)
sys.exit(2)
elif len(errors) > 0:
print 'WARN: ' + ' '.join(errors)
sys.exit(1)
else:
print 'OK'
sys.exit(0)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Check status of all existing AWS VPC VPN Tunnels')
parser.add_argument('-a', '--aws-access-key-id', required=True, dest='aws_access_key_id', help='AWS Access Key')
parser.add_argument('-s', '--aws-secret-access-key', required=True, dest='aws_secret_access_key', help='AWS Secret Access Key')
parser.add_argument('-r', '--region', required=True, dest='region', help='AWS Region')
args = parser.parse_args()
main() | mit | Python | |
2d6d5c0a07a751a66c3f0495e3a3a67e4296dd77 | Create subreddits_with_zero_gildings.py | Statistica/reddit-gold | subreddits_with_zero_gildings.py | subreddits_with_zero_gildings.py | # Written by Jonathan Saewitz, released March 26th, 2016 for Statisti.ca
# Released under the MIT License (https://opensource.org/licenses/MIT)
import json, plotly.plotly as plotly, plotly.graph_objs as go
########################
# Config #
########################
graph_title="Largest Subreddits Who Have Given 0 Gold"
x_axis_title="Subreddit"
y_axis_title="Subreddit size (subscribers)"
filename="zero_gildings.json"
########################
# End Config #
########################
subreddits=[]
subscribers=[]
f=open(filename, 'r')
for i in range(10):
cur_line=json.loads(f.next())
subreddits.append(cur_line['url'])
subscribers.append(int(cur_line['subscribers']))
f.close()
trace = go.Bar(
x = subreddits,
y = subscribers
)
layout=go.Layout(
title=graph_title,
xaxis=dict(
title=x_axis_title,
),
yaxis=dict(
title=y_axis_title,
)
)
data=[trace]
fig = go.Figure(data=data, layout=layout)
plotly.plot(fig)
| mit | Python | |
29e18ed63177dbe8306a22e3c0583342f4591464 | Exit routine for a controlled exit from ample | linucks/ample,rigdenlab/ample,linucks/ample,rigdenlab/ample | python/ample_exit.py | python/ample_exit.py | '''
Created on Mar 18, 2015
@author: jmht
'''
import logging
import sys
import traceback
# external imports
try: import pyrvapi
except: pyrvapi=None
def exit(msg):
logger = logging.getLogger()
#header="**** AMPLE ERROR ****\n\n"
header="*"*70+"\n"
header+="*"*20 + " "*10 + "AMPLE ERROR" + " "*10 +"*"*19 + "\n"
header+="*"*70+"\n\n"
footer="\n\n" + "*"*70+"\n\n"
# Bit dirty - get the name of the debug log file
debug_log=None
for d in logger.handlers:
n='baseFilename'
if hasattr(d,n) and d.level==logging.DEBUG:
debug_log=getattr(d, n)
if debug_log:
footer+="More information may be found in the debug log file: {0}\n".format(debug_log)
footer += "\nIf you believe that this is an error with AMPLE, please email: ccp4@stfc.ac.uk\n"
footer += "providing as much information as you can about how you ran the program.\n"
if debug_log:
footer += "\nPlease include the debug logfile with your email: {0}\n".format(debug_log)
# String it all together
msg=header + msg + footer
logger.critical(msg)
# Get traceback of where we failed for the log file
logger.debug("AMPLE EXITING AT...")
logger.debug("".join(traceback.format_list(traceback.extract_stack())))
# Make sure the error widget is updated
if pyrvapi: pyrvapi.rvapi_flush()
sys.exit(1)
| bsd-3-clause | Python | |
fa3450a44621fab4a9a2f2ed1599d08f66860f70 | Integrate densities to check normalization | jarthurgross/bloch_distribution | integrate_density.py | integrate_density.py | import argparse
import numpy as np
import h5py
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Integrate probability ' +
'densities to verify that they are ' +
'normalized')
parser.add_argument('data_filenames', metavar='files', nargs='+',
help='List of hdf5 files containing densities')
args = parser.parse_args()
data_files = [h5py.File(data_filename, 'r') for data_filename in
args.data_filenames]
epsilons = [data_file['densities'].attrs['epsilon'] for data_file in
data_files]
Density_meshes = [data_file['densities'][:] for data_file in data_files]
Phi_meshes = [data_file['Phi'][:] for data_file in data_files]
Theta_meshes = [-2*np.arccos(data_file['R'][:]/2) + np.pi for data_file in
data_files]
Total_probs = []
for Density_mesh, Phi_mesh, Theta_mesh in zip(Density_meshes, Phi_meshes,
Theta_meshes):
# Scale Density_mesh so that the integration can be thought of as on a
# rectangle rather than a hemisphere
Scaled_density_mesh = Density_mesh*np.sin(Theta_mesh)
Total_probs.append(np.trapz(np.trapz(Scaled_density_mesh, Phi_mesh),
Theta_mesh[:,0]))
for epsilon, prob in zip(epsilons, Total_probs):
print(epsilon, prob)
| mit | Python | |
72a573c24d5234003b9eeb9e0cc487d174908a2e | Add a Trie for storage of data string tokens. | geekofalltrades/quora-coding-challenges | typeahead_search/trie.py | typeahead_search/trie.py | """A Trie (prefix tree) class for use in typeahead search.
Every node in the TypeaheadSearchTrie is another TypeaheadSearchTrie instance.
"""
from weakref import WeakSet
class TypeaheadSearchTrie(object):
def __init__(self):
# The children of this node. Because ordered traversals are not
# important, these are stored in a dictionary.
self.children = {}
# Data entries associated with the word stored in the path to
# this node. Stored in a WeakSet so that entries disappear
# automatically when data entries are deleted.
self.entries = WeakSet()
def add(self, word, entry):
"""Adds the given data entry to the given Trie word.
The word is created in the Trie if it doesn't already exist.
"""
if word:
self.children.setdefault(
word[0],
TypeaheadSearchTrie()
).add(word[1:], entry)
else:
self.entries.add(entry)
def search(self, word):
"""Return a set of all data entries represented by prefix `word`.
Returns an empty set if this prefix is not in the Trie.
"""
if word:
try:
return self.children[word[0]].search(word[1:])
except KeyError:
return set()
else:
return self.gather_entries()
def gather_entries(self):
"""Gather all data entries stored in this node and its children."""
return set(self.entries).update(
child.gather_entries() for child in self.children.itervalues()
)
| mit | Python | |
e5dd1722911e580caca136fda9b81bb53221c65c | add table widget | Ubuntu-Solutions-Engineering/ubuntu-tui-widgets | ubuntui/widgets/table.py | ubuntui/widgets/table.py | # Copyright 2014, 2015 Canonical, Ltd.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from urwid import (Columns, Pile, Divider)
class Cols:
def __init__(self):
self.columns = []
def add(self, widget, width=None):
""" Add a widget to a columns list
Arguments:
widget: widget
width: width of column
"""
if width is None:
self.columns.append(widget)
else:
self.columns.append(('fixed', width, widget))
def render(self):
""" Renders columns with proper spacing
"""
return Columns(self.columns)
class Table:
def __init__(self):
self.rows = Pile([])
def append(self, item):
""" Appends widget to Pile
"""
self.rows.append((item, self.rows.options()))
self.rows.append((
Divider("\N{BOX DRAWINGS LIGHT HORIZONTAL}"),
self.rows.options()))
def render(self):
return self.rows
| agpl-3.0 | Python | |
51fc613214f20738270f37280fb465aea84ed065 | test the wsgi logging | slimta/python-slimta,fisele/slimta-abusix | test/test_slimta_logging_wsgi.py | test/test_slimta_logging_wsgi.py |
import unittest
from testfixtures import log_capture
from slimta.logging import getWsgiLogger
class TestWsgiLogger(unittest.TestCase):
def setUp(self):
self.log = getWsgiLogger('test')
self.environ = {'var': 'val'}
@log_capture()
def test_request(self, l):
self.log.request(self.environ)
l.check(('test', 'DEBUG', 'http:{0}:request environ={{\'var\': \'val\'}}'.format(id(self.environ))))
@log_capture()
def test_response(self, l):
self.log.response(self.environ, '200 OK', [('Header', 'Value')])
l.check(('test', 'DEBUG', 'http:{0}:response headers=[(\'Header\', \'Value\')] status=\'200 OK\''.format(id(self.environ))))
# vim:et:fdm=marker:sts=4:sw=4:ts=4
| mit | Python | |
8af510b18a3f0f8298f9a992bffdccc9aee2c8c2 | add sandbox file | erdincay/gmvault,guaycuru/gmvault,gaubert/gmvault,gaubert/gmvault,gaubert/gmvault,guaycuru/gmvault,guaycuru/gmvault,erdincay/gmvault,erdincay/gmvault | src/gmv/sandbox.py | src/gmv/sandbox.py | '''
Created on Jan 30, 2012
@author: guillaume.aubert@gmail.com
'''
from cmdline_utils import CmdLineParser
if __name__ == '__main__':
global_parser = CmdLineParser()
global_parser.disable_interspersed_args()
| agpl-3.0 | Python | |
65f149c33c1ec6e7d7262092def4b175aa52fe54 | Create BinTreeRightSideView_001.py | Chasego/codi,cc13ny/algo,Chasego/cod,Chasego/codi,cc13ny/algo,Chasego/codi,cc13ny/algo,cc13ny/Allin,cc13ny/Allin,cc13ny/Allin,Chasego/codirit,Chasego/cod,Chasego/codirit,Chasego/codirit,cc13ny/algo,Chasego/codi,Chasego/codirit,cc13ny/algo,Chasego/cod,Chasego/cod,Chasego/codi,cc13ny/Allin,Chasego/codirit,cc13ny/Allin,Chasego/cod | leetcode/199-Binary-Tree-Right-Side-View/BinTreeRightSideView_001.py | leetcode/199-Binary-Tree-Right-Side-View/BinTreeRightSideView_001.py | # Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param root, a tree node
# @return a list of integers
def rightSideView(self, root):
if root == None:
return []
lv = self.rightSideView(root.left)
rv = self.rightSideView(root.right)
if len(lv) > len(rv):
rv[len(rv):] = lv[len(rv):]
return [root.val].extend(rv)
| mit | Python | |
01c74cfea946eac098a0e144380314cd4676cf2f | Split lowpass filtering into another script. | lmjohns3/cube-experiment,lmjohns3/cube-experiment,lmjohns3/cube-experiment | analysis/04-lowpass.py | analysis/04-lowpass.py | #!/usr/bin/env python
from __future__ import division
import climate
import lmj.cubes
import pandas as pd
import scipy.signal
logging = climate.get_logger('lowpass')
def lowpass(df, freq=10., order=4):
'''Filter marker data using a butterworth low-pass filter.
This method alters the data in `df` in-place.
Parameters
----------
freq : float, optional
Use a butterworth filter with this cutoff frequency. Defaults to
10Hz.
order : int, optional
Order of the butterworth filter. Defaults to 4.
'''
nyquist = 1 / (2 * pd.Series(df.index).diff().mean())
assert 0 < freq < nyquist
passes = 2 # filtfilt makes two passes over the data.
correct = (2 ** (1 / passes) - 1) ** 0.25
b, a = scipy.signal.butter(order / passes, (freq / correct) / nyquist)
for c in df.columns:
if c.startswith('marker') and c[-1] in 'xyz':
df.loc[:, c] = scipy.signal.filtfilt(b, a, df[c])
@climate.annotate(
root='load data files from this directory tree',
output='save smoothed data files to this directory tree',
pattern=('process only trials matching this pattern', 'option'),
freq=('lowpass filter at N Hz', 'option', None, float),
)
def main(root, output, pattern='*', freq=None):
for t in lmj.cubes.Experiment(root).trials_matching(pattern):
lowpass(t.df, freq)
t.save(t.root.replace(root, output))
if __name__ == '__main__':
climate.call(main)
| mit | Python | |
0224a259c7fd61fbabdb8ab632471e68b7fd6b4a | Add script used to generate devstats repo groups | pwittrock/community,pwittrock/community,kubernetes/community,kubernetes/community,kubernetes/community | hack/generate-devstats-repo-sql.py | hack/generate-devstats-repo-sql.py | #!/usr/bin/env python3
# Copyright 2019 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Output devstats repo_groups.sql based on subproject defintions in sigs.yaml
This is likely missing a few repos because:
- some repos lack an owner (eg: kubernetes/kubernetes)
- it doesn't enumerate all repos from all kubernetes-owned orgs
- it ignores the fact that committees can own repos, only grouping by sig
The sql generated is NOT intended to overwrite/replace the file that lives at
github.com/cncf/devstats/scripts/kubernetes/repo_groups.sql, but instead aid a
human in doing some manual updates to the file. Future improvements to this
script could eliminate that part of the process, but it's where we are today.
"""
import argparse
import ruamel.yaml as yaml
import json
import re
import sys
update_gha_repos_template = """
update gha_repos set repo_group = 'SIG {}' where name in (
{}
);
"""
def repos_from_sig(sig):
"""Returns a list of org/repos given a sig"""
repos = {}
subprojects = sig.get('subprojects', [])
if subprojects is None:
subprojects = []
for sp in subprojects:
for uri in sp['owners']:
owners_path = re.sub(r"https://raw.githubusercontent.com/(.*)/master/(.*)",r"\1/\2",uri)
path_parts = owners_path.split('/')
# org/repo is owned by sig if org/repo/OWNERS os in one of their subprojects
if path_parts[2] == 'OWNERS':
repo = '/'.join(path_parts[0:2])
repos[repo] = True
return sorted(repos.keys())
def write_repo_groups_sql(sigs, fp):
for sig in sigs['sigs']:
repos = repos_from_sig(sig)
if len(repos):
fp.write(
update_gha_repos_template.format(
sig['name'],
',\n'.join([' \'{}\''.format(r) for r in repos])))
def main(sigs_yaml, repo_groups_sql):
with open(sigs_yaml) as fp:
sigs = yaml.round_trip_load(fp)
if repo_groups_sql is not None:
with open(repo_groups_sql, 'w') as fp:
write_repo_groups_sql(sigs, fp)
else:
write_repo_groups_sql(sigs, sys.stdout)
if __name__ == '__main__':
PARSER = argparse.ArgumentParser(
description='Do things with sigs.yaml')
PARSER.add_argument(
'--sigs-yaml',
default='./sigs.yaml',
help='Path to sigs.yaml')
PARSER.add_argument(
'--repo-groups-sql',
help='Path to output repo_groups.sql if provided')
ARGS = PARSER.parse_args()
main(ARGS.sigs_yaml, ARGS.repo_groups_sql)
| apache-2.0 | Python | |
6a6b9eff5e5d0d7c4a1a969b15a2a4583cf79855 | add game-of-throne-ii | EdisonAlgorithms/HackerRank,zeyuanxy/hacker-rank,zeyuanxy/hacker-rank,EdisonAlgorithms/HackerRank,zeyuanxy/hacker-rank,EdisonCodeKeeper/hacker-rank,zeyuanxy/hacker-rank,EdisonCodeKeeper/hacker-rank,EdisonCodeKeeper/hacker-rank,EdisonCodeKeeper/hacker-rank,EdisonAlgorithms/HackerRank,EdisonCodeKeeper/hacker-rank,EdisonAlgorithms/HackerRank,zeyuanxy/hacker-rank,EdisonAlgorithms/HackerRank,zeyuanxy/hacker-rank,EdisonCodeKeeper/hacker-rank,EdisonAlgorithms/HackerRank | algorithms/strings/game-of-throne-ii/game-of-throne-ii.py | algorithms/strings/game-of-throne-ii/game-of-throne-ii.py | from collections import Counter
MOD = 10**9 + 7
def factMod(x):
ret = 1
for i in range(1, x):
ret = (ret * (i + 1)) % MOD;
return ret
def powMod(x, y):
if y == 0:
return 1
if y == 1:
return x % MOD
temp = powMod(x, y / 2)
if y % 2 == 0:
return (temp * temp) % MOD
else:
return (x * temp * temp) % MOD
if __name__ == '__main__':
word = raw_input()
counter = Counter(word)
denominator = 1
numerator = 0
for i in counter:
numerator += counter[i] / 2
denominator = (denominator * factMod(counter[i] / 2)) % MOD
answer = (factMod(numerator) * powMod(denominator, MOD - 2)) % MOD
print answer
| mit | Python | |
6a9b6f0227b37d9c4da424c25d20a2b7e9397a9f | Make `publication_date` column not nullable. | gthank/pytips,gthank/pytips,gthank/pytips,gthank/pytips | alembic/versions/3800f47ba771_publication_date_not_nullable.py | alembic/versions/3800f47ba771_publication_date_not_nullable.py | """Make the `publication_date` column required.
Revision ID: 3800f47ba771
Revises: 17c1af634026
Create Date: 2012-12-13 21:14:19.363112
"""
# revision identifiers, used by Alembic.
revision = '3800f47ba771'
down_revision = '17c1af634026'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.alter_column('tip', 'publication_date', nullable=False)
def downgrade():
op.alter_column('tip', 'publication_date', nullable=True)
| isc | Python | |
7bae0fdf5fb6c92548875d21d00daa01cfe86100 | Add test | dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq | corehq/motech/repeaters/expression/tests.py | corehq/motech/repeaters/expression/tests.py | import json
from datetime import datetime, timedelta
from django.test import TestCase
from casexml.apps.case.mock import CaseFactory
from corehq.apps.accounting.models import SoftwarePlanEdition
from corehq.apps.accounting.tests.utils import DomainSubscriptionMixin
from corehq.apps.accounting.utils import clear_plan_version_cache
from corehq.apps.domain.shortcuts import create_domain
from corehq.motech.models import ConnectionSettings
from corehq.motech.repeaters.dbaccessors import delete_all_repeat_records
from corehq.motech.repeaters.expression.repeaters import CaseExpressionRepeater
from corehq.motech.repeaters.models import RepeatRecord
from corehq.util.test_utils import flag_enabled
@flag_enabled('EXPRESSION_REPEATER')
class CaseExpressionRepeaterTest(TestCase, DomainSubscriptionMixin):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.domain = 'test'
cls.domain_obj = create_domain(cls.domain)
cls.setup_subscription(cls.domain, SoftwarePlanEdition.PRO)
cls.factory = CaseFactory(cls.domain)
url = 'fake-url'
cls.connection = ConnectionSettings.objects.create(domain=cls.domain, name=url, url=url)
cls.repeater = CaseExpressionRepeater(
domain=cls.domain,
connection_settings_id=cls.connection.id,
configured_filter={
"type": "boolean_expression",
"expression": {
"type": "property_name",
"property_name": "type",
},
"operator": "eq",
"property_value": "forward-me",
},
configured_expression={
"type": "dict",
"properties": {
"case_id": {
"type": "property_name",
"property_name": "case_id",
},
"a-constant": {
"type": "constant",
"constant": "foo",
}
}
}
)
cls.repeater.save()
@classmethod
def tearDownClass(cls):
cls.repeater.delete()
cls.connection.delete()
cls.teardown_subscriptions()
cls.domain_obj.delete()
clear_plan_version_cache()
super().tearDownClass()
def tearDown(self):
delete_all_repeat_records()
@classmethod
def repeat_records(cls, domain_name):
# Enqueued repeat records have next_check set 48 hours in the future.
later = datetime.utcnow() + timedelta(hours=48 + 1)
return RepeatRecord.all(domain=domain_name, due_before=later)
def test_filter_cases(self):
forwardable_case = self.factory.create_case(case_type='forward-me')
unforwardable_case = self.factory.create_case(case_type='dont-forward-me') # noqa
repeat_records = self.repeat_records(self.domain).all()
self.assertEqual(RepeatRecord.count(domain=self.domain), 1)
self.assertEqual(repeat_records[0].payload_id, forwardable_case.case_id)
def test_payload(self):
forwardable_case = self.factory.create_case(case_type='forward-me')
repeat_record = self.repeat_records(self.domain).all()[0]
self.assertEqual(repeat_record.get_payload(), json.dumps({
"case_id": forwardable_case.case_id,
"a-constant": "foo",
}))
| bsd-3-clause | Python | |
57e2776a59214318d335f2fa0e2cc1854c33d488 | Add lc0532_k_diff_pairs_in_an_array.py | bowen0701/algorithms_data_structures | lc0532_k_diff_pairs_in_an_array.py | lc0532_k_diff_pairs_in_an_array.py | """Leetcode 532. K-diff Pairs in an Array
Easy
URL: https://leetcode.com/problems/k-diff-pairs-in-an-array/
Given an array of integers and an integer k, you need to find the number
of unique k-diff pairs in the array. Here a k-diff pair is defined as an
integer pair (i, j), where i and j are both numbers in the array and
their absolute difference is k.
Example 1:
Input: [3, 1, 4, 1, 5], k = 2
Output: 2
Explanation: There are two 2-diff pairs in the array, (1, 3) and (3, 5).
Although we have two 1s in the input, we should only return the number
of unique pairs.
Example 2:
Input:[1, 2, 3, 4, 5], k = 1
Output: 4
Explanation: There are four 1-diff pairs in the array, (1, 2), (2, 3),
(3, 4) and (4, 5).
Example 3:
Input: [1, 3, 1, 5, 4], k = 0
Output: 1
Explanation: There is one 0-diff pair in the array, (1, 1).
Note:
- The pairs (i, j) and (j, i) count as the same pair.
- The length of the array won't exceed 10,000.
- All the integers in the given input belong to the range: [-1e7, 1e7].
"""
class Solution(object):
def findPairs(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
pass
def main():
pass
if __name__ == '__main__':
main()
| bsd-2-clause | Python | |
c5a736a742897874262259a5199674b7f949de75 | test coverage for templates | evansde77/dockerstache,evansde77/dockerstache,evansde77/dockerstache | test/unit/templates_tests.py | test/unit/templates_tests.py | #!/usr/bin/env python
"""
templates tests
"""
import os
import json
import unittest
import tempfile
import dockerstache.templates as templ
class TemplatesTests(unittest.TestCase):
"""
test coverage for templates module
"""
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.target_dir = tempfile.mkdtemp()
self.subdirs1 = os.path.join(self.tempdir, 'dir1', 'dir2', 'dir3')
self.subdirs2 = os.path.join(self.tempdir, 'dir1', 'dir4', 'dir5')
os.makedirs(self.subdirs1)
os.makedirs(self.subdirs2)
self.template1 = os.path.join(
self.tempdir, 'dir1', 'dir2', 'template1.json.mustache'
)
self.template2 = os.path.join(
self.tempdir, 'dir1', 'dir4', 'dir5', 'template2.json.mustache'
)
with open(self.template1, 'w') as handle:
json.dump({
'template': 1,
'value': '{{user}}'
}, handle)
with open(self.template2, 'w') as handle:
json.dump({
'template': 2,
'value': '{{group}}'
}, handle)
def tearDown(self):
"""cleanup test data """
if os.path.exists(self.tempdir):
os.system("rm -rf {}".format(self.tempdir))
if os.path.exists(self.target_dir):
os.system("rm -rf {}".format(self.target_dir))
def test_replicate_directory_tree(self):
"""test replication of dir structure"""
templ.replicate_directory_tree(self.tempdir, self.target_dir)
expected_path1 = os.path.join(self.target_dir, 'dir1', 'dir2', 'dir3')
expected_path2 = os.path.join(self.target_dir, 'dir1', 'dir4', 'dir5')
self.failUnless(os.path.exists(expected_path1))
self.failUnless(os.path.exists(expected_path2))
def test_find_templates(self):
"""test find template function"""
templates_found = templ.find_templates(self.tempdir)
self.assertEqual(len(templates_found), 2)
self.failUnless(self.template1 in templates_found)
self.failUnless(self.template2 in templates_found)
def test_render_template(self):
"""test render template call"""
file_out = os.path.join(self.target_dir, 'template1.json')
context = {'user': 'steve', 'group': 'vanhalen'}
templ.render_template(self.template1, file_out, context)
self.failUnless(os.path.exists(file_out))
with open(file_out, 'r') as handle:
data = json.load(handle)
self.failUnless('template' in data)
self.failUnless('value' in data)
self.assertEqual(data['template'], 1)
self.assertEqual(data['value'], context['user'])
def test_process_templates(self):
"""test end to end process templates call"""
context = {'user': 'steve', 'group': 'vanhalen'}
templ.process_templates(self.tempdir, self.target_dir, context)
expected_path1 = os.path.join(self.target_dir, 'dir1', 'dir2', 'dir3')
expected_path2 = os.path.join(self.target_dir, 'dir1', 'dir4', 'dir5')
self.failUnless(os.path.exists(expected_path1))
self.failUnless(os.path.exists(expected_path2))
expected_file1 = os.path.join(
self.target_dir, 'dir1', 'dir2', 'template1.json'
)
expected_file2 = os.path.join(
self.target_dir, 'dir1', 'dir4', 'dir5', 'template2.json'
)
self.failUnless(os.path.exists(expected_file1))
self.failUnless(os.path.exists(expected_file2))
with open(expected_file1, 'r') as handle:
data1 = json.load(handle)
with open(expected_file2, 'r') as handle:
data2 = json.load(handle)
self.failUnless('template' in data1)
self.failUnless('value' in data1)
self.assertEqual(data1['template'], 1)
self.assertEqual(data1['value'], context['user'])
self.failUnless('template' in data2)
self.failUnless('value' in data2)
self.assertEqual(data2['template'], 2)
self.assertEqual(data2['value'], context['group'])
if __name__ == '__main__':
unittest.main()
| apache-2.0 | Python | |
a66ce55c2abcb434168aadb195fd00b8df6f4fd1 | add scoreboard game test | jaebradley/nba_data | tests/test_scoreboardGame.py | tests/test_scoreboardGame.py | from unittest import TestCase
from datetime import datetime
from nba_data.data.scoreboard_game import ScoreboardGame
from nba_data.data.season import Season
from nba_data.data.team import Team
from nba_data.data.matchup import Matchup
class TestScoreboardGame(TestCase):
def test_instantiation(self):
game_id_value = "1234"
season_value = Season.season_2016
start_time_value = datetime.now()
matchup_value = Matchup(home_team=Team.atlanta_hawks, away_team=Team.boston_celtics)
self.assertIsNotNone(ScoreboardGame(game_id=game_id_value, season=season_value, start_time=start_time_value,
matchup=matchup_value))
self.assertRaises(AssertionError, ScoreboardGame, 1234, season_value, start_time_value, matchup_value)
self.assertRaises(AssertionError, ScoreboardGame, game_id_value, 1234, start_time_value, matchup_value)
self.assertRaises(AssertionError, ScoreboardGame, game_id_value, season_value, 1234, matchup_value)
self.assertRaises(AssertionError, ScoreboardGame, game_id_value, season_value, start_time_value, 1234)
| mit | Python | |
6b1be6883ead01cc226226499644adb7e99542f8 | Add functionality to load and test a saved model | harpribot/representation-music,harpribot/representation-music | Experiments/evaluate_model.py | Experiments/evaluate_model.py | # import os
import sys
import tensorflow as tf
# sys.path.append(os.path.abspath(os.path.dirname(__file__) + '/' + '../..'))
from Models.low_level_sharing_four_hidden import LowLevelSharingModel
from utils.data_utils.labels import Labels
from utils.data_utils.data_handler import fetch_data
class EvaluateModel(object):
def __init__(self, task_ids):
self.x_test = None
self.y_test = {}
self.input_dimension = 0
self.output_dimensions = {}
self.task_ids = task_ids
self.model = None
self.sess = None
def load_model(self, model_file, model_class):
"""
Loads the model from the given checkpoint file.
:param model_file: The checkpoint file from which the model should be loaded.
:param model_class: The :class:`Model` class or any of its child classes.
"""
self.sess = tf.Session()
self.model = model_class(self.task_ids, self.input_dimension, self.output_dimensions)
self.model.create_model()
saver = tf.train.Saver()
saver.restore(self.sess, model_file)
sys.stderr.write("Model " + model_file + " loaded.\n")
def load_data(self):
"""
Loads the test dataset.
"""
_, _, self.x_test, _, _, self.y_test = fetch_data(self.task_ids)
self.input_dimension = self.x_test.shape[1]
self.train_samples = self.x_test.shape[0]
self.output_dimensions = {task_id: self.y_test[task_id].shape[1] for task_id in self.task_ids}
def evaluate_model(self):
"""
Returns a dictionary of errors indexed by task identifiers where each element denotes the error for that
task on the test set.
:return dictionary of test errors
"""
feed_dict = dict()
feed_dict[self.model.get_layer('input')] = self.x_test
for id_ in self.task_ids:
feed_dict[self.model.get_layer(id_ + '-ground-truth')] = self.y_test[id_]
errors = {}
for task_id in self.task_ids:
errors[task_id] = self.model.get_layer(task_id + '-loss').eval(session=self.sess, feed_dict=feed_dict)
return errors
if __name__ == '__main__':
model_file = sys.argv[1]
model_class = LowLevelSharingModel
task_ids = [Labels.hotness.value, Labels.duration.value, Labels.year.value]
evaluation = EvaluateModel(task_ids)
evaluation.load_data()
evaluation.load_model(model_file, model_class)
errors = evaluation.evaluate_model()
sys.stderr.write(str(errors) + "\n")
| mit | Python | |
d883cfac71c9ec39abcd75e79b9bec0f53e7890d | Initialize transpositionHacker | JoseALermaIII/python-tutorials,JoseALermaIII/python-tutorials | books/CrackingCodesWithPython/Chapter12/transpositionHacker.py | books/CrackingCodesWithPython/Chapter12/transpositionHacker.py | # Transposition Cipher Hacker
# https://www.nostarch.com/crackingcodes/ (BSD Licensed)
import pyperclip, detectEnglish, transpositionDecrypt
def main():
# You might want to copy & paste this text from the source code at
# https://www.nostarch.com/crackingcodes/:
myMessage = """AaKoosoeDe5 b5sn ma reno ora'lhlrrceey e enlh na indeit n uhoretrm au ieu v er Ne2 gmanw,forwnlbsya apor tE.no euarisfatt e mealefedhsppmgAnlnoe(c -or)alat r lw o eb nglom,Ain one dtes ilhetcdba. t tg eturmudg,tfl1e1 v nitiaicynhrCsaemie-sp ncgHt nie cetrgmnoa yc r,ieaa toesa- e a0m82e1w shcnth ekh gaecnpeutaaieetgn iodhso d ro hAe snrsfcegrt NCsLc b17m8aEheideikfr aBercaeu thllnrshicwsg etriebruaisss d iorr."""
hackedMessage = hackTransposition(myMessage)
if hackedMessage == None:
print('Failed to hack encryption.')
else:
print('Copying hacked message to clipboard:')
print('hackedMessage')
pyperclip.copy(hackedMessage)
def hackTransposition(message):
print('Hacking...')
# Python programs can be stopped at any time by pressing
# Ctrl-C (on Windows) or Ctrl-D (on macOS and Linux):
print('(Press Ctl-C (on Windows) or Ctrl-D (on macOS and Linux) to quit at any time.)')
# Brute-force by looping through every possible key:
for key in range(1, len(message)):
print('Trying key #%s...' % (key))
decryptedText = transpositionDecrypt.decryptMessage(key, message)
if detectEnglish.isEnglish(decryptedText):
# Ask user if this is the correct decryption:
print()
print('Possible encryption hack:')
print('Key %s: %s' % (key, decryptedText[:100]))
print()
print('Enter D if done, anything else to continue hacking:')
response = input('> ')
if response.strip().upper().startswith('D'):
return decryptedText
return None
if __name__ == '__main__':
main() | mit | Python | |
75805397dd62cfa00eb9a9d253259ea9c79f426b | Test Issue #605 | Gregory-Howard/spaCy,explosion/spaCy,explosion/spaCy,recognai/spaCy,oroszgy/spaCy.hu,Gregory-Howard/spaCy,raphael0202/spaCy,raphael0202/spaCy,aikramer2/spaCy,oroszgy/spaCy.hu,aikramer2/spaCy,honnibal/spaCy,honnibal/spaCy,raphael0202/spaCy,recognai/spaCy,Gregory-Howard/spaCy,banglakit/spaCy,oroszgy/spaCy.hu,spacy-io/spaCy,honnibal/spaCy,oroszgy/spaCy.hu,recognai/spaCy,spacy-io/spaCy,explosion/spaCy,aikramer2/spaCy,oroszgy/spaCy.hu,recognai/spaCy,banglakit/spaCy,aikramer2/spaCy,spacy-io/spaCy,explosion/spaCy,oroszgy/spaCy.hu,spacy-io/spaCy,banglakit/spaCy,Gregory-Howard/spaCy,explosion/spaCy,raphael0202/spaCy,spacy-io/spaCy,honnibal/spaCy,banglakit/spaCy,recognai/spaCy,banglakit/spaCy,aikramer2/spaCy,aikramer2/spaCy,Gregory-Howard/spaCy,raphael0202/spaCy,Gregory-Howard/spaCy,explosion/spaCy,spacy-io/spaCy,banglakit/spaCy,raphael0202/spaCy,recognai/spaCy | spacy/tests/regression/test_issue605.py | spacy/tests/regression/test_issue605.py | from ...attrs import LOWER, ORTH
from ...tokens import Doc
from ...vocab import Vocab
from ...matcher import Matcher
def return_false(doc, ent_id, label, start, end):
return False
def test_matcher_accept():
doc = Doc(Vocab(), words=[u'The', u'golf', u'club', u'is', u'broken'])
golf_pattern = [
{ ORTH: "golf"},
{ ORTH: "club"}
]
matcher = Matcher(doc.vocab)
matcher.add_entity('Sport_Equipment', acceptor=return_false)
matcher.add_pattern("Sport_Equipment", golf_pattern)
match = matcher(doc)
assert match == []
| mit | Python | |
38dd3604918b2e0d7770e855f775db9ff6720de8 | Add initial DrugBank client | bgyori/indra,johnbachman/belpy,bgyori/indra,johnbachman/indra,sorgerlab/indra,sorgerlab/belpy,johnbachman/belpy,johnbachman/indra,sorgerlab/indra,johnbachman/belpy,bgyori/indra,johnbachman/indra,sorgerlab/belpy,sorgerlab/belpy,sorgerlab/indra | indra/databases/drugbank_client.py | indra/databases/drugbank_client.py | import os
from indra.util import read_unicode_csv
mappings_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
os.pardir, 'resources', 'drugbank_mappings.tsv')
def get_chebi_id(drugbank_id):
return drugbank_chebi.get(drugbank_id)
def get_chembl_id(drugbank_id):
return drugbank_chembl.get(drugbank_id)
def get_drugbank_from_chebi(chebi_id):
return chebi_drugbank.get(chebi_id)
def get_drugbank_from_chembl(chembl_id):
return chembl_drugbank.get(chembl_id)
def load_mappings():
drugbank_chebi = {}
chebi_drugbank = {}
drugbank_chembl = {}
chembl_drugbank = {}
for row in read_unicode_csv(mappings_file, delimiter='\t', skiprows=1):
drugbank_id, db_ns, db_id, source = row
if db_ns == 'CHEBI':
chebi_id = 'CHEBI:%s' % db_id
if drugbank_id in drugbank_chebi or chebi_id in chebi_drugbank:
import ipdb; ipdb.set_trace()
drugbank_chebi[drugbank_id] = chebi_id
chebi_drugbank[chebi_id] = drugbank_id
elif db_ns == 'CHEMBL':
if drugbank_id in drugbank_chembl or db_id in chembl_drugbank:
import ipdb; ipdb.set_trace()
drugbank_chembl[drugbank_id] = db_id
chembl_drugbank[db_id] = drugbank_id
return drugbank_chebi, chebi_drugbank, drugbank_chembl, chembl_drugbank
drugbank_chebi, chebi_drugbank, drugbank_chembl, chembl_drugbank = \
load_mappings()
| bsd-2-clause | Python | |
84fcbb34005c5bfa19d33e583ca48583b04baeb4 | Create mp3tag.py | WebShark025/TheZigZagProject,WebShark025/TheZigZagProject | plugins/mp3tag.py | plugins/mp3tag.py | .
| mit | Python | |
407e0b6596539a5f8fcac099c11f1fabc956ea26 | add plugin to show available package updates | bmbove/i3pybar | plugins/pacman.py | plugins/pacman.py | """
@author Brian Bove https://github.com/bmbove
"""
import re
import subprocess
from .base import PluginBase
class PacmanPlugin(PluginBase):
def configure(self):
defaults = {
'format': 'pacman {updates}'
}
return defaults
def get_update_count(self):
lines = subprocess.check_output(["checkupdates"])
lines = lines.decode('ascii').strip()
return {'updates': len(lines.split("\n"))}
def update(self):
res = self.get_update_count()
if type(res) == dict:
locals().update(res)
self.set_text(self.config['format'] % locals())
else:
self.set_text(res)
| mit | Python | |
9f9e69ac19e982cd6cc577262704fa5c9f4ebdfc | Create test_logo_client.py (#90) | gijzelaerr/python-snap7 | test/test_logo_client.py | test/test_logo_client.py | import unittest
import logging
import time
#import mock
from subprocess import Popen
from os import path, kill
import snap7
logging.basicConfig(level=logging.WARNING)
ip = '127.0.0.1'
tcpport = 1102
db_number = 1
rack = 0x1000
slot = 0x2000
class TestLogoClient(unittest.TestCase):
@classmethod
def setUpClass(cls):
server_path = path.join(path.dirname(path.realpath(snap7.__file__)),
"bin/snap7-server.py")
cls.server_pid = Popen([server_path]).pid
time.sleep(2) # wait for server to start
@classmethod
def tearDownClass(cls):
kill(cls.server_pid, 1)
def setUp(self):
self.client = snap7.logo.Client()
self.client.connect(ip, rack, slot, tcpport)
def tearDown(self):
self.client.disconnect()
self.client.destroy()
def test_read(self):
vm_address = "V40"
value = 50
self.client.write(vm_address, value)
result = self.client.read(vm_address)
self.assertEqual(value, result)
def test_write(self):
vm_address = "V20"
value = 8
self.client.write(vm_address, value)
def test_read_area(self):
area = snap7.snap7types.areas.DB
dbnumber = 1
amount = 1
start = 1
self.client.read_area(area, dbnumber, start, amount)
def test_write_area(self):
area = snap7.snap7types.areas.DB
dbnumber = 1
size = 1
start = 20
data = bytearray(size)
self.client.write_area(area, dbnumber, start, data)
def test_get_connected(self):
self.client.get_connected()
def test_set_param(self):
values = (
(snap7.snap7types.PingTimeout, 800),
(snap7.snap7types.SendTimeout, 15),
(snap7.snap7types.RecvTimeout, 3500),
(snap7.snap7types.SrcRef, 128),
(snap7.snap7types.DstRef, 128),
(snap7.snap7types.SrcTSap, 128),
(snap7.snap7types.PDURequest, 470),
)
for param, value in values:
self.client.set_param(param, value)
self.assertRaises(Exception, self.client.set_param,
snap7.snap7types.RemotePort, 1)
def test_get_param(self):
expected = (
(snap7.snap7types.RemotePort, tcpport),
(snap7.snap7types.PingTimeout, 750),
(snap7.snap7types.SendTimeout, 10),
(snap7.snap7types.RecvTimeout, 3000),
(snap7.snap7types.SrcRef, 256),
(snap7.snap7types.DstRef, 0),
(snap7.snap7types.SrcTSap, 4096),
(snap7.snap7types.PDURequest, 480),
)
for param, value in expected:
self.assertEqual(self.client.get_param(param), value)
non_client = snap7.snap7types.LocalPort, snap7.snap7types.WorkInterval,\
snap7.snap7types.MaxClients, snap7.snap7types.BSendTimeout,\
snap7.snap7types.BRecvTimeout, snap7.snap7types.RecoveryTime,\
snap7.snap7types.KeepAliveTime
# invalid param for client
for param in non_client:
self.assertRaises(Exception, self.client.get_param, non_client)
class TestClientBeforeConnect(unittest.TestCase):
"""
Test suite of items that should run without an open connection.
"""
def setUp(self):
self.client = snap7.client.Client()
def test_set_param(self):
values = (
(snap7.snap7types.RemotePort, 1102),
(snap7.snap7types.PingTimeout, 800),
(snap7.snap7types.SendTimeout, 15),
(snap7.snap7types.RecvTimeout, 3500),
(snap7.snap7types.SrcRef, 128),
(snap7.snap7types.DstRef, 128),
(snap7.snap7types.SrcTSap, 128),
(snap7.snap7types.PDURequest, 470),
)
for param, value in values:
self.client.set_param(param, value)
if __name__ == '__main__':
unittest.main()
| mit | Python | |
71b84632478d5767e742a178edb222745dbd3aa3 | Add tests for bson serialization functions | oneklc/dimod,oneklc/dimod | tests/test_serialization_bson.py | tests/test_serialization_bson.py | import unittest
import dimod
from dimod.serialization.bson import bqm_bson_decoder, bqm_bson_encoder
import numpy as np
try:
import bson
_bson_imported = True
except ImportError:
_bson_imported = False
class TestBSONSerialization(unittest.TestCase):
def test_empty_bqm(self):
bqm = dimod.BinaryQuadraticModel.from_qubo({})
encoded = bqm_bson_encoder(bqm)
expected_encoding = {
'as_complete': False,
'linear': b'',
'quadratic_vals': b'',
'variable_type': 'BINARY',
'offset': 0.0,
'variable_order': [],
'index_dtype': '<u2',
'quadratic_head': b'',
'quadratic_tail': b'',
}
self.assertDictEqual(encoded, expected_encoding)
decoded = bqm_bson_decoder(encoded)
self.assertEqual(bqm, decoded)
def test_single_variable_bqm(self):
bqm = dimod.BinaryQuadraticModel.from_ising({"a": -1}, {})
encoded = bqm_bson_encoder(bqm)
expected_encoding = {
'as_complete': False,
'linear': b'\x00\x00\x80\xbf',
'quadratic_vals': b'',
'variable_type': 'SPIN',
'offset': 0.0,
'variable_order': ['a'],
'index_dtype': '<u2',
'quadratic_head': b'',
'quadratic_tail': b'',
}
self.assertDictEqual(encoded, expected_encoding)
decoded = bqm_bson_decoder(encoded)
self.assertEqual(bqm, decoded)
def test_small_bqm(self):
bqm = dimod.BinaryQuadraticModel.from_ising(
{"a": 1, "b": 3, "c": 4.5, "d": 0},
{"ab": -3, "cd": 3.5, "ad": 2}
)
encoded = bqm_bson_encoder(bqm)
expected_encoding = {
'as_complete': True,
'linear': b'\x00\x00\x80?\x00\x00@@\x00\x00\x90@\x00\x00\x00\x00',
'quadratic_vals': b'\x00\x00@\xc0\x00\x00\x00\x00\x00\x00\x00@'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00`@',
'variable_type': 'SPIN',
'offset': 0.0,
'variable_order': ['a', 'b', 'c', 'd'],
'index_dtype': '<u2',
}
self.assertDictEqual(encoded, expected_encoding)
decoded = bqm_bson_decoder(encoded)
# no easy way to directly check if the bqm objects are equal (b/c float
# precision, missing edges), so for now check if the qubo matrices are
# the same
var_order = sorted(bqm)
np.testing.assert_almost_equal(bqm.to_numpy_matrix(var_order),
decoded.to_numpy_matrix(var_order))
@unittest.skipUnless(_bson_imported, "no pymongo bson installed")
def test_bsonable(self):
bqm = dimod.BinaryQuadraticModel.from_ising(
{"a": 1, "b": 3, "c": 4.5, "d": 0},
{"ab": -3, "cd": 3.5, "ad": 2}
)
encoded = bqm_bson_encoder(bqm)
bson.BSON.encode(encoded)
| apache-2.0 | Python | |
3501462ebafa15b19ef436231a5a0d9e3b5d430a | Add first implementation of virtual ontology | johnbachman/indra,johnbachman/belpy,sorgerlab/indra,bgyori/indra,sorgerlab/belpy,johnbachman/belpy,johnbachman/belpy,sorgerlab/indra,bgyori/indra,sorgerlab/belpy,bgyori/indra,sorgerlab/belpy,sorgerlab/indra,johnbachman/indra,johnbachman/indra | indra/ontology/virtual_ontology.py | indra/ontology/virtual_ontology.py | import requests
from .ontology_graph import IndraOntology
class VirtualOntology(IndraOntology):
def __init__(self, url, ontology='bio'):
super().__init__()
self.url = url
self.ontology = ontology
def initialize(self):
self._initialized = True
def _rel(self, ns, id, rel_types, direction):
url = self.url + '/%s_rel' % direction
res = requests.get(url,
json={'ns': ns,
'id': id,
'rel_types': rel_types,
'ontology': self.ontology})
return res.json()
def child_rel(self, ns, id, rel_types):
return self._rel(ns, id, rel_types, 'child')
def parent_rel(self, ns, id, rel_types):
return self._rel(ns, id, rel_types, 'parent')
def get_node_property(self, ns, id, property):
url = self.url + '/get_node_property'
res = requests.get(url,
json={'ns': ns,
'id': id,
'property': property,
'ontology': self.ontology})
return res.json()
| bsd-2-clause | Python | |
2154c816cdb3ff0f4a98980a2d590888f6819c81 | add signals | MrKiven/REST_ARCH,MrKiven/REST_ARCH | rest_arch/signals.py | rest_arch/signals.py | # -*- coding: utf-8 -*-
from blinker import signal
before_api_called = signal('before_api_called')
after_api_called = signal('after_api_called')
# TODO add more signals
| mit | Python | |
00c5dbbdeee045d9e474ce7b6094cd49df528b05 | add container tests | lmtierney/watir-snake | tests/container_tests.py | tests/container_tests.py | import pytest
from watir_snake.container import Container
class TestContainerExtractSelector(object):
def test_converts_2_arg_selector_into_a_dict(self):
assert Container()._extract_selector('how', 'what') == {'how': 'what'}
def test_returns_the_kwargs_given(self):
assert Container()._extract_selector(how='what') == {'how': 'what'}
def test_returns_an_empty_dict_if_given_no_args(self):
assert Container()._extract_selector() == {}
def test_raises_correct_exception_if_given_1_arg(self):
with pytest.raises(ValueError):
Container()._extract_selector('how')
def test_raises_correct_exception_if_given_over_2_args(self):
with pytest.raises(ValueError):
Container()._extract_selector('how', 'what', 'value')
| mit | Python | |
1aebdce5d2fb233927930175fe60e205bca50962 | Fix test :) | peterjanes/dosage,blade2005/dosage,webcomics/dosage,blade2005/dosage,webcomics/dosage,peterjanes/dosage | tests/test_comicnames.py | tests/test_comicnames.py | # -*- coding: utf-8 -*-
# Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs
# Copyright (C) 2012-2014 Bastian Kleineidam
# Copyright (C) 2015-2016 Tobias Gruetzmacher
from __future__ import absolute_import, division, print_function
import re
from dosagelib import scraper
class TestComicNames(object):
def test_names(self):
for scraperclass in scraper.get_scraperclasses():
name = scraperclass.getName()
assert name.count('/') <= 1
if '/' in name:
comicname = name.split('/')[1]
else:
comicname = name
assert re.sub("[^0-9a-zA-Z_]", "", comicname) == comicname
| # -*- coding: utf-8 -*-
# Copyright (C) 2012-2014 Bastian Kleineidam
# Copyright (C) 2016 Tobias Gruetzmacher
from dosagelib import scraper, util
class TestComicNames(object):
def test_names(self):
for scraperclass in scraper.get_scraperclasses():
name = scraperclass.getName()
assert name.count('/') <= 1
if '/' in name:
comicname = name.split('/')[1]
else:
comicname = name
assert util.asciify(comicname) == comicname
| mit | Python |
8b55c8a524dd853be2c72951f3656db1a991d0bc | test for Experiment class | olivierverdier/odelab | tests/test_experiment.py | tests/test_experiment.py | #!/usr/bin/env python
# −*− coding: UTF−8 −*−
from __future__ import division
from odelab.solver import SingleStepSolver
from odelab.system import System
from odelab.scheme import ExplicitEuler
from odelab.experiment import Experiment
import numpy as np
import nose.tools as nt
def f(t,u):
return -u
def test_experiment():
params = {
'family': 'tmpbank',
'name': 'tmpexp',
'system': System,
'system_params': {'f': f},
'solver': SingleStepSolver,
'scheme': ExplicitEuler,
'scheme_params': {},
'initialize': {
'u0' : np.array([1.]),
'time': 1.,
'h': .1,
},
}
exp = Experiment(params)
exp.run()
s = Experiment.load('tmpbank', 'tmpexp')
nt.assert_true(isinstance(s, SingleStepSolver))
nt.assert_true(isinstance(s.scheme, ExplicitEuler))
nt.assert_equal(len(s), 11)
| bsd-3-clause | Python | |
5e52a7551b20f74d0b08393e8da89463bb6b5366 | add new tests for busco | sequana/sequana,sequana/sequana,sequana/sequana,sequana/sequana,sequana/sequana | test/test_busco.py | test/test_busco.py | from sequana.busco import BuscoConfig, BuscoDownload
from sequana import sequana_data
from easydev import TempFile
def test_busco_config():
bc = BuscoConfig("species", outpath="test", sample_name="test",
conda_bin_path="test", tmp_path="test", hmmsearch_bin_path="itest",
Rscript_bin_path=None)
with TempFile as fh:
bc.save_config_file(fh.name)
def test_busco_download():
bd = BuscoDownload()
bd.filenames = ['proteobacteria_odb9']
bd.download()
| bsd-3-clause | Python | |
a4ad209ba361ed07574de37598bcedd3ea499a0a | add test file for testing patches | grahamegee/diffr | test/test_patch.py | test/test_patch.py | # The positive cases of patch are extensively tested in test_diff.py because a
# sensible way to validate a diff of two objects is to check that when you apply
# the patch to the first object you get the second.
# Here the testing mainly focuses on patch operations which would fail and some
# of the obscure positive cases. For example you should be able to apply a patch
# to an object that isn't one of the ones involved in the diff under certain
# conditions.
import unittest
from collections import namedtuple, OrderedDict
from copy import deepcopy
from differ import diff, patch
from differ.patch import (
patch_sequence,
patch_named_tuple,
patch_mapping,
patch_ordered_mapping,
patch_set)
class PatchSequenceTests(unittest.TestCase):
def patch_has_no_side_effects(self):
a = [1, 2, 3]
copy_of_a = deepcopy(a)
b = [3, 2, 1]
d = diff(a, b)
self.assertEqual(patch(a, d), b)
self.assertEqual(a, copy_of_a)
class PatchNamedTupleTests(unittest.TestCase):
def patch_has_no_side_effects(self):
ThreeDPoint = namedtuple('ThreeDPoint', ('x', 'y', 'z'))
a = ThreeDPoint(1, 2, 3)
copy_of_a = deepcopy(a)
b = ThreeDPoint(2, 3, 4)
d = diff(a, b)
self.assertEqual(patch(a, d), b)
self.assertEqual(a, copy_of_a)
self.assertTrue(False)
class PatchMappingTests(unittest.TestCase):
def patch_has_no_side_effects(self):
pass
class PatchOrderedMappingTests(unittest.TestCase):
def patch_has_no_side_effects(self):
pass
class PatchSetTests(unittest.TestCase):
def patch_has_no_side_effects(self):
pass
class PatchTests(unittest.TestCase):
def patch_has_no_side_effects(self):
pass
| mit | Python | |
3e8dad480392cc654bca0b0fdf3ac27f4f4be3c6 | Add speed test script | hammerlab/mhcflurry,hammerlab/mhcflurry | test/test_speed.py | test/test_speed.py | import numpy
numpy.random.seed(0)
import time
import cProfile
import pstats
import pandas
from mhcflurry import Class1AffinityPredictor
from mhcflurry.common import random_peptides
NUM = 100000
DOWNLOADED_PREDICTOR = Class1AffinityPredictor.load()
def test_speed(profile=False):
starts = {}
timings = {}
profilers = {}
def start(name):
starts[name] = time.time()
if profile:
profilers[name] = cProfile.Profile()
profilers[name].enable()
def end(name):
timings[name] = time.time() - starts[name]
if profile:
profilers[name].disable()
start("first")
DOWNLOADED_PREDICTOR.predict(["SIINFEKL"], allele="HLA-A*02:01")
end("first")
peptides = random_peptides(NUM)
start("pred_%d" % NUM)
DOWNLOADED_PREDICTOR.predict(peptides, allele="HLA-A*02:01")
end("pred_%d" % NUM)
print("SPEED BENCHMARK")
print("Results:\n%s" % str(pandas.Series(timings)))
return dict(
(key, pstats.Stats(value)) for (key, value) in profilers.items())
if __name__ == '__main__':
# If run directly from python, do profiling and leave the user in a shell
# to explore results.
result = test_speed(profile=True)
result["pred_%d" % NUM].sort_stats("cumtime").reverse_order().print_stats()
# Leave in ipython
locals().update(result)
import ipdb ; ipdb.set_trace()
| apache-2.0 | Python | |
3283c9ac640112ab7a26ec3f82e051394ca72ecf | Add catapult presubmit with list of trybots. | catapult-project/catapult-csm,dstockwell/catapult,sahiljain/catapult,catapult-project/catapult,zeptonaut/catapult,catapult-project/catapult,SummerLW/Perf-Insight-Report,catapult-project/catapult-csm,modulexcite/catapult,SummerLW/Perf-Insight-Report,catapult-project/catapult,catapult-project/catapult-csm,dstockwell/catapult,catapult-project/catapult,sahiljain/catapult,SummerLW/Perf-Insight-Report,sahiljain/catapult,benschmaus/catapult,sahiljain/catapult,modulexcite/catapult,benschmaus/catapult,0x90sled/catapult,catapult-project/catapult,catapult-project/catapult-csm,SummerLW/Perf-Insight-Report,catapult-project/catapult,catapult-project/catapult,scottmcmaster/catapult,catapult-project/catapult-csm,SummerLW/Perf-Insight-Report,SummerLW/Perf-Insight-Report,modulexcite/catapult,danbeam/catapult,0x90sled/catapult,benschmaus/catapult,dstockwell/catapult,benschmaus/catapult,zeptonaut/catapult,zeptonaut/catapult,danbeam/catapult,sahiljain/catapult,benschmaus/catapult,danbeam/catapult,catapult-project/catapult-csm,catapult-project/catapult-csm,danbeam/catapult,benschmaus/catapult,0x90sled/catapult,scottmcmaster/catapult,scottmcmaster/catapult,benschmaus/catapult,dstockwell/catapult,sahiljain/catapult | PRESUBMIT.py | PRESUBMIT.py | # Copyright (c) 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Top-level presubmit script for catapult.
See https://www.chromium.org/developers/how-tos/depottools/presubmit-scripts
for more details about the presubmit API built into depot_tools.
"""
def GetPreferredTryMasters(project, change):
return {
'tryserver.client.catapult': {
'Catapult Linux Tryserver': set(['defaulttests']),
'Catapult Mac Tryserver': set(['defaulttests']),
'Catapult Windows Tryserver': set(['defaulttests']),
}
} | bsd-3-clause | Python | |
e39abc889b27c5cebb4c098b2c3858f2a861a6d3 | test to build lstm ner model | dragoon/kilogram,dragoon/kilogram,dragoon/kilogram | kilogram/entity_types/test.py | kilogram/entity_types/test.py | import sys
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.recurrent import LSTM
from gensim.models import word2vec
def get_features(sentence, index):
vector = np.array([])
vector = vector.reshape((0, 128))
# get the context and create a training sample
for j in range(index-3, index+4):
if j != index:
if j > 0 and j < len(sentence):
try:
vector = np.append(vector, [word2vec_model[sentence[j]]], axis=0)
except:
vector = np.append(vector, [[0]*128], axis=0)
else:
vector = np.append(vector, [[0]*128], axis=0)
return vector
if __name__ == "__main__":
word2vec_model = word2vec.Word2Vec.load(sys.argv[1])
model = Sequential()
model.add(LSTM(128, 128)) # try using a GRU instead, for fun
model.add(Dropout(0.5))
model.add(Dense(128, 1))
model.add(Activation('sigmoid'))
# try using different optimizers and different optimizer configs
model.compile(loss='binary_crossentropy', optimizer='adam', class_mode="binary")
NUM_SAMPLES = 200000
import numpy as np
X_train = np.empty((NUM_SAMPLES, 6, 128))
y_train = np.empty((NUM_SAMPLES,))
#y_train = np.empty((10000, 128))
data = open(sys.argv[2])
entity_index = 0
for line in data:
if entity_index > NUM_SAMPLES - 1:
break
words = line.split()
for i, word in enumerate(words):
if word.startswith('<dbpedia:'):
if word not in word2vec_model:
continue
true_vector = word2vec_model[word]
X_train[entity_index] = get_features(words, i)
#y_train = np.append(y_train, [true_vector], axis=0)
y_train[entity_index] = int(word == '<dbpedia:Person>')
entity_index += 1
data.close()
def balanced_subsample(x, y, subsample_size=1.0):
class_xs = []
min_elems = None
for yi in np.unique(y):
elems = x[(y == yi)]
class_xs.append((yi, elems))
if min_elems == None or elems.shape[0] < min_elems:
min_elems = elems.shape[0]
use_elems = min_elems
if subsample_size < 1:
use_elems = int(min_elems*subsample_size)
xs = []
ys = []
for ci,this_xs in class_xs:
if len(this_xs) > use_elems:
np.random.shuffle(this_xs)
x_ = this_xs[:use_elems]
y_ = np.empty(use_elems)
y_.fill(ci)
xs.append(x_)
ys.append(y_)
xs = np.concatenate(xs)
ys = np.concatenate(ys)
return xs, ys
X_train, y_train = balanced_subsample(X_train, y_train)
model.fit(X_train, y_train, batch_size=8, nb_epoch=30, validation_split=0.1, show_accuracy=True)
model.save_weights('model_lstm.bin')
| apache-2.0 | Python | |
05a5599fd0cf08cf33c8a90673e8c71b4c1d6c36 | Test implementation of convex hull | tylerburnham42/ProgrammingTeam,MercerBinaryBears/Slides,tylerburnham42/ProgrammingTeam,MercerBinaryBears/Slides,MercerBinaryBears/Slides,MercerBinaryBears/Slides,MercerBinaryBears/Slides,tylerburnham42/ProgrammingTeam | slides/ComputationalGeometry/convex-hull.py | slides/ComputationalGeometry/convex-hull.py | import math
class Vector:
def __init__(self, x, y):
self.x = x
self.y = y
# add theta, so we can sort by it later
self.theta = math.atan2(y, x)
def add(self, other):
return Vector(self.x + other.x, self.y + other.y)
def negate(self):
return Vector(-self.x, -self.y)
def subtract(self, other):
return self.add(other.negate())
def dot(self, other):
return self.x * other.x + self.y * other.y
def magnitude(self):
return (self.dot(self)) ** 0.5
def cross(self, other):
return self.x * other.y - self.y * other.x
def __repr__(self):
# update format, so we can just print the vectors
return "({0},{1})".format(self.x, self.y)
def parse_point(raw_string):
x,y = map(int, raw_string.split(','))
return Vector(x, y)
def turn_direction(p1, p2, p3):
d1 = p2.subtract(p1)
d2 = p3.subtract(p1)
return d1.cross(d2)
def convex_hull(points):
# first get the point with min y value
# first, sort the points by their angle theta
sorted_points = sorted(points, key=lambda P : P.theta)
N = len(points)
hull = sorted_points
for i in range(0, N + 1):
current_point = sorted_points[i % N]
previous_point = sorted_points[(i + N - 1) % N]
next_point = sorted_points[(i + 1) % N]
print(current_point, turn_direction(previous_point, current_point, next_point))
if turn_direction(previous_point, current_point, next_point) >= 0:
hull.append(current_point)
return hull
point_count = int(input().strip())
points = []
for i in range(point_count):
points.append(parse_point(input()))
hull = convex_hull(points)
# Resort the hull, so the we get the
print(hull)
| mit | Python | |
e0597427d93f2260dfce35cfdd3e2714037fb0fb | Implement cheb_dif for getting 1D chebyshev grids and differentiation matrices. | dsteinmo/pysws,dsteinmo/pysws | src/spatial_discretizations/FourierChebyshevSpatialDiscretization.py | src/spatial_discretizations/FourierChebyshevSpatialDiscretization.py | import numpy as np
from numpy.fft import fft, ifft, fftshift, fft2, ifft2
from scipy.linalg import toeplitz
class FourierChebyshevSpatialDiscretization:
def __init__(self, config):
self.length_x = config['length_x']
self.length_y = config['length_y']
self.num_points_x = config['num_points_x']
self.num_points_y = config['num_points_y']
# self.__build_grid__()
# self.__build_wavenumbers__()
# self.__build_filter__()
def cheb_dif(self, N, M):
I = np.eye(N)
n1 = np.floor(N/2)
n2 = np.ceil(N/2)
k = np.array([np.arange(0, N)]).T
th = k*np.pi/(N-1)
# Compute Chebyshev points.
vec = np.arange(N-1, 1-N-1, -2)
x = np.sin(np.pi*vec/(2*(N-1)))
T = np.tile(th/2, (1, N)) # Like repmat(th/2, 1, N) for 2nd order tensors.
Tt = T.T
DX = 2*np.sin(Tt+T)*np.sin(Tt-T)
DX = np.vstack([DX[0:n1, :], -np.flipud(np.fliplr(DX[0:n2, :]))])
for i in range(0,N):
DX[i,i] = 1.0
C = toeplitz((-1.0)**k)
C[0,:] = C[0,:]*2.0
C[N-1,:] = C[N-1,:]*2.0
C[:,0] = C[:,0] / 2.0
C[:,N-1] = C[:,N-1] / 2.0
Z = 1.0 / DX
for i in range(0,N):
Z[i,i] = 0.0
D = np.eye(N)
DM = np.zeros([N, N, M])
for ell in range(1,M+1):
D = ell*Z*(C*np.tile(np.array([np.diag(D)]).T,(1,N)) - D)
diag = -np.sum(D,1)
for i in range(0,N):
D[i,i] = diag[i]
DM[:,:,ell-1] = D
return (x,DM) | mit | Python | |
0cb320dee7336f7e68bc9cc5efe0ae88de5541fb | Add YCM config | roman-kashitsyn/libdocset,roman-kashitsyn/libdocset,roman-kashitsyn/libdocset | .ycm_extra_conf.py | .ycm_extra_conf.py | import os
import ycm_core
# These are the compilation flags that will be used in case there's no
# compilation database set (by default, one is not set).
# CHANGE THIS LIST OF FLAGS. YES, THIS IS THE DROID YOU HAVE BEEN LOOKING FOR.
flags = [
'-Wall',
'-Wextra',
'-Werror',
'-Wno-long-long',
'-Wno-variadic-macros',
'-DNDEBUG',
# THIS IS IMPORTANT! Without a "-std=<something>" flag, clang won't know which
# language to use when compiling headers. So it will guess. Badly. So C++
# headers will be compiled as C headers. You don't want that so ALWAYS specify
# a "-std=<something>".
# For a C project, you would set this to something like 'c99' instead of
# 'c++11'.
'-std=c++11',
# ...and the same thing goes for the magic -x option which specifies the
# language that the files to be compiled are written in. This is mostly
# relevant for c++ headers.
# For a C project, you would set this to 'c' instead of 'c++'.
'-x',
'c++',
'-Isrc',
]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
if os.path.exists( compilation_database_folder ):
database = ycm_core.CompilationDatabase( compilation_database_folder )
else:
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def MakeRelativePathsInFlagsAbsolute( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_absolute = False
path_flags = [ '-isystem', '-I', '-iquote', '--sysroot=' ]
for flag in flags:
new_flag = flag
if make_next_absolute:
make_next_absolute = False
if not flag.startswith( '/' ):
new_flag = os.path.join( working_directory, flag )
for path_flag in path_flags:
if flag == path_flag:
make_next_absolute = True
break
if flag.startswith( path_flag ):
path = flag[ len( path_flag ): ]
new_flag = path_flag + os.path.join( working_directory, path )
break
if new_flag:
new_flags.append( new_flag )
return new_flags
def IsHeaderFile( filename ):
extension = os.path.splitext( filename )[ 1 ]
return extension in [ '.h', '.hxx', '.hpp', '.hh' ]
def GetCompilationInfoForFile( filename ):
# The compilation_commands.json file generated by CMake does not have entries
# for header files. So we do our best by asking the db for flags for a
# corresponding source file, if any. If one exists, the flags for that file
# should be good enough.
if IsHeaderFile( filename ):
basename = os.path.splitext( filename )[ 0 ]
for extension in SOURCE_EXTENSIONS:
replacement_file = basename + extension
if os.path.exists( replacement_file ):
compilation_info = database.GetCompilationInfoForFile(
replacement_file )
if compilation_info.compiler_flags_:
return compilation_info
return None
return database.GetCompilationInfoForFile( filename )
def FlagsForFile( filename, **kwargs ):
if database:
# Bear in mind that compilation_info.compiler_flags_ does NOT return a
# python list, but a "list-like" StringVec object
compilation_info = GetCompilationInfoForFile( filename )
if not compilation_info:
return None
final_flags = MakeRelativePathsInFlagsAbsolute(
compilation_info.compiler_flags_,
compilation_info.compiler_working_dir_ )
else:
relative_to = DirectoryOfThisScript()
final_flags = MakeRelativePathsInFlagsAbsolute( flags, relative_to )
return {
'flags': final_flags,
'do_cache': True
}
| mit | Python | |
23bd2cedbeeef22715fbd65229f881e7230507d8 | Create decorator.py | joshavenue/python_notebook | notebook2/decorator.py | notebook2/decorator.py | def decor(func):
def wrap():
print('===')
func()
print('===')
return wrap
def print_text():
print('Text')
decorated = decor(print_text)
decorated()
| unlicense | Python | |
42c9ce432f1e5a328fe35eef64d0667a01eeeb19 | allow it to have a name and a type | dmerejkowsky/qibuild,aldebaran/qibuild,aldebaran/qibuild,aldebaran/qibuild,dmerejkowsky/qibuild,dmerejkowsky/qibuild,dmerejkowsky/qibuild,aldebaran/qibuild,dmerejkowsky/qibuild | python/qidoc/template_project.py | python/qidoc/template_project.py | class TemplateProject(object):
def __init__(self, doc_worktree, worktree_project):
self.doc_type = "template"
self.name = "template"
self.src = worktree_project.src
self.path = worktree_project.path
self.doc_worktree = doc_worktree
##
# Add self.doxfile_in, self.sphinx_conf_in, etc.
def __repr__(self):
return "<TemplateProject in %s>" % self.src
| class TemplateProject(object):
def __init__(self, doc_worktree, worktree_project):
self.src = worktree_project.src
self.path = worktree_project.path
self.doc_worktree = doc_worktree
##
# Add self.doxfile_in, self.sphinx_conf_in, etc.
def __repr__(self):
return "<TemplateProject in %s>" % self.src
| bsd-3-clause | Python |
2a1e09f99c5c1c80286048a27d6ba0c2ef7fc5b3 | Add none property store | trevor/calendarserver,trevor/calendarserver,trevor/calendarserver | txdav/base/propertystore/none.py | txdav/base/propertystore/none.py | # -*- test-case-name: txdav.base.propertystore.test.test_none,txdav.caldav.datastore,txdav.carddav.datastore -*-
##
# Copyright (c) 2010-2011 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Always-empty property store.
"""
from __future__ import absolute_import
__all__ = [
"PropertyStore",
]
from txdav.idav import PropertyChangeNotAllowedError
from txdav.base.propertystore.base import AbstractPropertyStore, validKey
class PropertyStore(AbstractPropertyStore):
"""
Always-empty property store.
Writing properties is not allowed.
"""
def __init__(self, defaultuser, pathFactory):
super(PropertyStore, self).__init__(defaultuser)
del self.__setitem__
del self.__delitem__
#
# Required implementations
#
def _getitem_uid(self, key, uid):
validKey(key)
raise KeyError(key)
def _setitem_uid(self, key, value, uid):
validKey(key)
raise PropertyChangeNotAllowedError("Property store is read-only.", (key,))
def _delitem_uid(self, key, uid):
validKey(key)
raise KeyError(key)
def _keys_uid(self, uid):
return ()
#
# I/O
#
def flush(self):
return None
def abort(self):
return None
| apache-2.0 | Python | |
d77dd62203e0898ab326092c410638a0274e53d9 | Initialize P02_errorExample | JoseALermaIII/python-tutorials,JoseALermaIII/python-tutorials | books/AutomateTheBoringStuffWithPython/Chapter10/P02_errorExample.py | books/AutomateTheBoringStuffWithPython/Chapter10/P02_errorExample.py | # This program raises an exception and automatically displays the traceback
def spam():
bacon()
def bacon():
raise Exception("This is the error message.")
spam()
| mit | Python | |
61d0649925fae2d1eca1f512ec519f440f4a5528 | Create OutputNeuronGroup_multiple_outputs_2.py | ricardodeazambuja/BrianConnectUDP | examples/OutputNeuronGroup_multiple_outputs_2.py | examples/OutputNeuronGroup_multiple_outputs_2.py | '''
Example of a spike receptor (only receives spikes)
In this example spikes are received and processed creating a raster plot at the end of the simulation.
'''
from brian import *
import numpy
from brian_multiprocess_udp import BrianConnectUDP
# The main function with the NeuronGroup(s) and Synapse(s) must be named "main_NeuronGroup".
# It will receive two objects: input_Neuron_Group and the simulation_clock. The input_Neuron_Group
# will supply the input spikes to the network. The size of the spike train received equals NumOfNeuronsInput.
# The size of the output spike train equals NumOfNeuronsOutput and must be the same size of the NeuronGroup who is
# going to interface with the rest of the system to send spikes.
# The function must return all the NeuronGroup objects and all the Synapse objects this way:
# ([list of all NeuronGroups],[list of all Synapses])
# and the FIRST (index 0) NeuronGroup of the list MUST be the one where the OUTPUT spikes will be taken by the simulation.
#
# Here is also possible to use "dummy" NeuronGroups only to receive and/or send spikes.
my_neuron_input_number = 45
def main_NeuronGroup(input_Neuron_Group, simulation_clock):
print "main_NeuronGroup!" #DEBUG!
simclock = simulation_clock
Nr=NeuronGroup(my_neuron_input_number, model='v:1', reset=0, threshold=0.5, clock=simclock)
Nr.v=0
# SYNAPSES BETWEEN REAL NEURON NETWORK AND THE INPUT
Syn_iNG_Nr=Synapses(input_Neuron_Group, Nr, model='w:1', pre='v+=w', clock=simclock)
Syn_iNG_Nr[:,:]='i==j'
print "Total Number of Synapses:", len(Syn_iNG_Nr) #DEBUG!
Syn_iNG_Nr.w=1
MExt=SpikeMonitor(Nr) # Spikes sent by UDP
Mdummy=SpikeMonitor(input_Neuron_Group) # Spikes received by UDP
return ([Nr],[Syn_iNG_Nr],[MExt,Mdummy])
def post_simulation_function(input_NG, simulation_NG, simulation_SYN, simulation_MN):
"""
input_NG: the neuron group that receives the input spikes
simulation_NG: the neuron groups list passed to the system by the user function (main_NeuronGroup)
simulation_SYN: the synapses list passed to the system by the user function (main_NeuronGroup)
simulation_MN: the monitors list passed to the system by the user function (main_NeuronGroup)
This way it is possible to plot, save or do whatever you want with these objects after the end of the simulation!
"""
pass
figure()
raster_plot(simulation_MN[1])
title("Spikes Received by UDP")
show(block=True)
# savefig('output.pdf')
if __name__=="__main__":
my_simulation = BrianConnectUDP(main_NeuronGroup, NumOfNeuronsInput=my_neuron_input_number, post_simulation_function=post_simulation_function,
input_addresses=[("127.0.0.1", 12121, 45)], simclock_dt=1, inputclock_dt=2, TotalSimulationTime=10000, sim_repetitions=0, brian_address=2)
| cc0-1.0 | Python | |
180a1cd82b02d23b824d706c44d4c6838eca0dd2 | Add from_nailgun.py manager | zen/solar,loles/solar,pigmej/solar,loles/solar,openstack/solar,zen/solar,pigmej/solar,Mirantis/solar,openstack/solar,Mirantis/solar,pigmej/solar,openstack/solar,loles/solar,Mirantis/solar,zen/solar,zen/solar,Mirantis/solar,loles/solar | f2s/resources/role_data/managers/from_nailgun.py | f2s/resources/role_data/managers/from_nailgun.py | #!/usr/bin/env python
import sys
import json
from fuelclient.objects.environment import Environment
ARGS = json.loads(sys.stdin.read())
env = Environment(ARGS['env'])
facts = env.get_default_facts('deployment', [ARGS['uid']])
sys.stdout.write(json.dumps(facts))
| apache-2.0 | Python | |
cd444633870a83adc4220b0bc7025a4ee014ba69 | Add e2e testing python file | anirbanroydas/ci-testing-python,anirbanroydas/ci-testing-python,anirbanroydas/ci-testing-python | tests/e2e/test_e2e_identidock.py | tests/e2e/test_e2e_identidock.py | import sys
print(sys.path)
| mit | Python | |
d22e9e6c5c7bded0be5d5c90e86c8dd4ea9ba7d0 | add tests for tree plotting | simpeg/discretize,simpeg/discretize,simpeg/discretize | tests/tree/test_tree_plotting.py | tests/tree/test_tree_plotting.py | import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import unittest
from discretize import TreeMesh
matplotlib.use("Agg")
class TestOcTreePlotting(unittest.TestCase):
def setUp(self):
mesh = TreeMesh([32, 32, 32])
mesh.refine_box([0.2, 0.2, 0.2], [0.5, 0.7, 0.8], 5)
self.mesh = mesh
def test_plot_slice(self):
mesh = self.mesh
plt.figure()
ax = plt.subplot(111)
mesh.plot_grid(faces=True, edges=True, nodes=True)
# CC plot
mod_cc = np.random.rand(len(mesh)) + 1j*np.random.rand(len(mesh))
mod_cc[np.random.rand(len(mesh))<0.2] = np.nan
mesh.plot_slice(mod_cc, normal='X', grid=True)
mesh.plot_slice(mod_cc, normal='Y', ax=ax)
mesh.plot_slice(mod_cc, normal='Z', ax=ax)
mesh.plot_slice(mod_cc, view='imag', ax=ax)
mesh.plot_slice(mod_cc, view='abs', ax=ax)
mod_ccv = np.random.rand(len(mesh), 3)
mesh.plot_slice(mod_ccv, v_type='CCv', view='vec', ax=ax)
# F plot tests
mod_f = np.random.rand(mesh.n_faces)
mesh.plot_slice(mod_f, v_type='Fx', ax=ax)
mesh.plot_slice(mod_f, v_type='Fy', ax=ax)
mesh.plot_slice(mod_f, v_type='Fz', ax=ax)
mesh.plot_slice(mod_f, v_type='F', ax=ax)
mesh.plot_slice(mod_f, v_type='F', view='vec', ax=ax)
# E plot tests
mod_e = np.random.rand(mesh.n_edges)
mesh.plot_slice(mod_e, v_type='Ex', ax=ax)
mesh.plot_slice(mod_e, v_type='Ey', ax=ax)
mesh.plot_slice(mod_e, v_type='Ez', ax=ax)
mesh.plot_slice(mod_e, v_type='E', ax=ax)
mesh.plot_slice(mod_e, v_type='E', view='vec', ax=ax)
#Nodes
mod_n = np.random.rand(mesh.n_nodes)
mesh.plot_slice(mod_n, v_type='N')
plt.close()
class TestQuadTreePlotting(unittest.TestCase):
def setUp(self):
mesh = TreeMesh([32, 32])
mesh.refine_box([0.2, 0.2], [0.5, 0.8], 5)
self.mesh = mesh
def test_plot_slice(self):
mesh = self.mesh
plt.figure()
ax = plt.subplot(111)
mesh.plot_grid(faces=True, edges=True, nodes=True)
# CC plot
mod_cc = np.random.rand(len(mesh)) + 1j*np.random.rand(len(mesh))
mod_cc[np.random.rand(len(mesh))<0.2] = np.nan
mesh.plot_image(mod_cc)
mesh.plot_image(mod_cc, ax=ax)
mesh.plot_image(mod_cc, view='imag', ax=ax)
mesh.plot_image(mod_cc, view='abs', ax=ax)
mod_ccv = np.random.rand(len(mesh), 2)
mesh.plot_image(mod_ccv, v_type='CCv', view='vec', ax=ax)
# F plot tests
mod_f = np.random.rand(mesh.n_faces)
mesh.plot_image(mod_f, v_type='Fx', ax=ax)
mesh.plot_image(mod_f, v_type='Fy', ax=ax)
mesh.plot_image(mod_f, v_type='F', ax=ax)
mesh.plot_image(mod_f, v_type='F', view='vec', ax=ax)
# E plot tests
mod_e = np.random.rand(mesh.n_edges)
mesh.plot_image(mod_e, v_type='Ex', ax=ax)
mesh.plot_image(mod_e, v_type='Ey', ax=ax)
mesh.plot_image(mod_e, v_type='E', ax=ax)
mesh.plot_image(mod_e, v_type='E', view='vec', ax=ax)
#Nodes
mod_n = np.random.rand(mesh.n_nodes)
mesh.plot_image(mod_n, v_type='N', ax=ax)
plt.close()
| mit | Python | |
475560d9e7320f93bf3e3d40506ffe2092e59d07 | check soft clip position | shengqh/ngsperl,shengqh/ngsperl,shengqh/ngsperl,shengqh/ngsperl | lib/QC/bamSoftClipPosition.py | lib/QC/bamSoftClipPosition.py | import pysam
import argparse
import sys
import logging
import os
from asyncore import read
parser = argparse.ArgumentParser(description="Build soft clip position distribution in BAM file.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
DEBUG=False
NOT_DEBUG = not DEBUG
parser.add_argument('-i', '--input', action='store', nargs='?', help='Input BAM file', required=NOT_DEBUG)
parser.add_argument('--min-mapq', action='store', nargs='?', type=int, default=10, help="Minimum mapping quality of read")
parser.add_argument('--binsize', action='store', nargs='?', type=int, default=1000, help="Bin size of position")
parser.add_argument('--min-depth', action='store', nargs='?', type=int, default=100, help="Minimum depth for output")
parser.add_argument('-o', '--output', action='store', nargs='?', help="Output soft clip distribution file name", required=NOT_DEBUG)
if NOT_DEBUG and len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
if DEBUG:
args.input = "/scratch/cqs/shengq2/jennifer/20190906_lindsay_exomeseq_3772_hg38/softclip/P_175_06.indel.recal.TP53.bam"
args.output = "/scratch/cqs/shengq2/jennifer/20190906_lindsay_exomeseq_3772_hg38/softclip/P_175_06.softclip.position.tsv"
logger = logging.getLogger('bamSoftClipPosition')
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)-8s - %(message)s')
def filterReadQuality(read, min_mapq):
return(read.is_unmapped or read.mapping_quality < min_mapq or read.is_secondary or read.is_qcfail or read.is_duplicate or read.is_supplementary)
def hasSoftClip(read):
return("S" in read.cigarstring)
chrPositionMap = {}
processed = 0
logger.info("reading %s" % args.input)
with pysam.Samfile(args.input, "rb") as samfile:
for read in samfile.fetch(until_eof=True):
processed += 1
if processed % 1000000 == 0:
logger.info("processed %d" % processed)
#break
if filterReadQuality(read, args.min_mapq):
continue
if len(read.reference_name) > 5:
continue
if not read.reference_name in chrPositionMap:
chrPositionMap[read.reference_name] = {}
positionMap = chrPositionMap[read.reference_name]
position = int(read.reference_start / args.binsize)
if not position in positionMap:
positionMap[position] = [0, 0]
posvalues = positionMap[position]
if hasSoftClip(read):
posvalues[0] = posvalues[0] + 1
else:
posvalues[1] = posvalues[1] + 1
with open(args.output, "wt") as sw:
sw.write("Chr\tStartPosition\tSoftClipRead\tOtherRead\tSoftClipPerc\n")
for chr in chrPositionMap.keys():
positionMap = chrPositionMap[chr]
positions = sorted(positionMap.keys())
for pos in positions:
posvalues = positionMap[pos]
sread = posvalues[0]
oread = posvalues[1]
allread = sread + oread
if allread >= args.min_depth:
sw.write("%s\t%d\t%d\t%d\t%.2f\n" % (chr, pos * args.binsize, sread, oread, sread * 1.0 / allread) )
logger.info("done.") | apache-2.0 | Python | |
4c6de322d04504e4c0c2c46f686820d3d62b7dac | Add mdata grains as separate module | saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt,saltstack/salt | salt/grains/mdata.py | salt/grains/mdata.py | # -*- coding: utf-8 -*-
'''
test grains
'''
from __future__ import absolute_import
# Import python libs
import os
import logging
# Import salt libs
import salt.utils
# Solve the Chicken and egg problem where grains need to run before any
# of the modules are loaded and are generally available for any usage.
import salt.modules.cmdmod
__virtualname__ = 'mdata'
__salt__ = {
'cmd.run': salt.modules.cmdmod.run,
'cmd.run_all': salt.modules.cmdmod.run_all,
}
log = logging.getLogger(__name__)
def __virtual__():
'''
Figure out if we need to be loaded
'''
## collect mdata grains in a SmartOS zone
if salt.utils.is_smartos_zone():
return __virtualname__
## collect mdata grains in a LX zone
if salt.utils.is_linux() and 'BrandZ virtual linux' in os.uname():
return __virtualname__
return False
def mdata():
'''
Provide grains from the SmartOS metadata
'''
grains = {}
mdata_list = salt.utils.which('mdata-list')
mdata_get = salt.utils.which('mdata-get')
# parse sdc metadata
grains['hypervisor_uuid'] = __salt__['cmd.run']('{0} sdc:server_uuid'.format(mdata_get))
if "FAILURE" in grains['hypervisor_uuid'] or "No metadata" in grains['hypervisor_uuid']:
grains['hypervisor_uuid'] = "Unknown"
grains['datacenter'] = __salt__['cmd.run']('{0} sdc:datacenter_name'.format(mdata_get))
if "FAILURE" in grains['datacenter'] or "No metadata" in grains['datacenter']:
grains['datacenter'] = "Unknown"
# parse vmadm metadata
for mdata_grain in __salt__['cmd.run'](mdata_list).splitlines():
grain_data = __salt__['cmd.run']('{0} {1}'.format(mdata_get, mdata_grain))
if mdata_grain == 'roles': # parse roles as roles grain
grain_data = grain_data.split(',')
grains['roles'] = grain_data
else: # parse other grains into mdata
if not mdata_grain.startswith('sdc:'):
if 'mdata' not in grains:
grains['mdata'] = {}
mdata_grain = mdata_grain.replace('-', '_')
mdata_grain = mdata_grain.replace(':', '_')
grains['mdata'][mdata_grain] = grain_data
return grains
| apache-2.0 | Python | |
ccdc943f4c0292d6046b32cacab410ba6cf1477a | Add the StatePass module | MarquisLP/Sidewalk-Champion | lib/game_states/state_pass.py | lib/game_states/state_pass.py | """This module contains the StatePass class which defines the data
object that will be passed between Game States.
"""
from pygame.mixer import Channel
from lib.custom_data.settings_data import SettingsData
class StatePass(object):
"""Stores common data that will be passed between Game States.
All States should have reference to the same, singular StatePass
object. In this way, when one State modifies the StatePass details,
the details will automatically be updated for all other States as
well.
Attributes:
announcer_channel: A PyGame Channel for playing announcer voice
clips.
ui_channel: A PyGame Channel for playing user interface sound
effects.
p1_channel_one: One of the two PyGame Channels that Player 1's
character can use when playing action sounds.
p2_channel_two: One of the two PyGame Channels that Player 1's
character can use when playing action sounds.
p2_channel_one: One of the two PyGame Channels that Player 2's
character can use when playing action sounds.
p2_channel_two: One of the two PyGame Channels that Player 2's
character can use when playing action sounds.
character_one: The integer line index for for player one's
character within the character list. Setting this to None
means no character has been chosen yet.
character_two: The integer line index for for player two's
character within the character list. Setting this to None
means no character has been chosen yet.
stage: The integer line index for the chosen battle Stage within
the stage list. Setting this to None means no Stage has
been chosen yet.
battle_rounds: The number of rounds for the current
battle. The possible values are 1, 3, and 5.
Note that setting this to 0 means that Training Mode has
been selected.
time_limit: The amount of time, in seconds, allotted to each
round in the upcoming battle. This can be 30, 60, or 99
seconds.
Note that setting this to 0 means that Training Mode has
been selected.
settings_data: A SettingsData object for various options that
can be set by the players via the Settings Screen.
"""
def __init__(self, settings_data):
"""Declare and initialize instance variables.
Keyword arguments:
settings_data: The SettingsData object that will be
passed between all States.
"""
self.announcer_channel = Channel(0)
self.ui_channel = Channel(1)
self.p1_channel_one = Channel(2)
self.p1_channel_two = Channel(3)
self.p2_channel_one = Channel(4)
self.p2_channel_two = Channel(5)
self.character_one = None
self.character_two = None
self.stage = None
self.battle_rounds = 3
self.time_limit = 99
self.settings = settings_data
| unlicense | Python | |
7d09120e1122c5b9888368e7d98b41fe8fdedf87 | add script to send test messages to MNOs | myvoice-nigeria/myvoice,myvoice-nigeria/myvoice,myvoice-nigeria/myvoice,myvoice-nigeria/myvoice | scripts/test-MNOs.py | scripts/test-MNOs.py | #!/usr/bin/env python
import urllib2
import urllib
import sys
import datetime
import pytz
test_phones = [
('MTN', '2348142235832'),
('Etisalat', '2348183273915'),
('Glo', '2348117159357'),
('Airtel', '2347010915898'),
]
wat = pytz.timezone('Africa/Lagos')
for via_operator, _ in test_phones:
for phone_operator, phone in test_phones:
now = datetime.datetime.now(wat).strftime('%H:%M:%S on %d/%m/%Y')
params = {
'username': 'rapidsms',
'password': '', # XXX add password here
'to': phone,
'from': '55999',
'smsc': 'starfish-%s' % via_operator.lower(),
'text': 'Test message to %s via %s. Sent at %s' % (phone_operator, via_operator, now),
}
data = urllib.urlencode(params)
url = 'https://myvoice-testing.caktusgroup.com/sendsms?%s' % data
print 'Loading %s' % url
try:
result = urllib2.urlopen(url)
except urllib2.HTTPError, e:
result = e
print 'Status code and result for %s via %s: %s' % (phone_operator, via_operator, result.getcode())
print result.read()
print ''
| bsd-2-clause | Python | |
0eab290aa16a28a3efd82dadea1e545796b7ca68 | Add spider for UPS Store | iandees/all-the-places,iandees/all-the-places,iandees/all-the-places | locations/spiders/upsstore.py | locations/spiders/upsstore.py | import scrapy
import json
from locations.items import GeojsonPointItem
from locations.hours import OpeningHours
DAY_MAPPING = {
"MONDAY": "Mo",
"TUESDAY": "Tu",
"WEDNESDAY": "We",
"THURSDAY": "Th",
"FRIDAY": "Fr",
"SATURDAY": "Sa",
"SUNDAY": "Su"
}
class UpsStoreSpider(scrapy.Spider):
name = "upsstore"
allowed_domains = ["theupsstore.com"]
download_delay = 0.1
start_urls = (
'https://locations.theupsstore.com/',
)
def parse_hours(self, hours):
"""
:param hours:
:return:
"""
hours = json.loads(hours)
o = OpeningHours()
for day in hours["hours"]["days"]:
if not day["isClosed"]:
interval = day["intervals"][0]
o.add_range(DAY_MAPPING[day["day"]],
open_time=str(interval["start"]),
close_time=str(interval["end"]),
time_format="%H%M")
return o.as_opening_hours()
def parse_store(self, response):
properties = {
'name': response.xpath('//span[@class="LocationName-geo"]/text()').extract_first(),
'phone': response.xpath('//span[@itemprop="telephone"]/text()').extract_first(),
'addr_full': response.xpath('//meta[@itemprop="streetAddress"]/@content').extract_first(),
'city': response.xpath('//meta[@itemprop="addressLocality"]/@content').extract_first(),
'state': response.xpath('//abbr[@itemprop="addressRegion"]/text()').extract_first(),
'country': response.xpath('//abbr[@itemprop="addressCountry"]/text()').extract_first(),
'postcode': response.xpath('//span[@itemprop="postalCode"]/text()').extract_first(),
'ref': response.xpath('//input[@id="store_id"]/@value').extract_first(),
'website': response.url,
'lat': float(response.xpath('//meta[@itemprop="latitude"]/@content').extract_first()),
'lon': float(response.xpath('//meta[@itemprop="longitude"]/@content').extract_first()),
}
hours = response.xpath('//script[@id="location_info_hours"]/text()').extract_first()
try:
hours = self.parse_hours(hours)
if hours:
properties['opening_hours'] = hours
except:
pass
yield GeojsonPointItem(**properties)
def parse(self, response):
urls = response.xpath('//a[@class="Directory-listLink"]/@href').extract()
if urls:
for url in urls:
if len(url.split('/')) == 3:
callback = self.parse_store
else:
callback = self.parse
yield scrapy.Request(
response.urljoin(url),
callback=callback,
)
else:
urls = response.xpath('//a[@class="Link"]/@href').extract()
for url in urls:
yield scrapy.Request(
response.urljoin(url),
callback=self.parse_store,
) | mit | Python | |
308fb5c3cb69966d7f7bf20ea1e4753d68d3fe4b | Add init | jeffreyliu3230/scrapi,fabianvf/scrapi,CenterForOpenScience/scrapi,mehanig/scrapi,fabianvf/scrapi,erinspace/scrapi,alexgarciac/scrapi,felliott/scrapi,CenterForOpenScience/scrapi,erinspace/scrapi,ostwald/scrapi,felliott/scrapi,mehanig/scrapi,icereval/scrapi | scrapi/consumers/cmu/__init__.py | scrapi/consumers/cmu/__init__.py | from consumer import consume, normalize | apache-2.0 | Python | |
79343dda0711e34ef577ff37bddfe3f83d0035f5 | add script to fetch NOMADS data | akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem,akrherz/iem | scripts/model/fetch_nomads_nc.py | scripts/model/fetch_nomads_nc.py | """
Download netcdf data from NOMADS Thredds service, run as
/usr/local/python/bin/python fetch_nomads_nc.py
"""
import mx.DateTime
import subprocess
# start time, GMT
sts = mx.DateTime.DateTime(2010,11,1)
# end time, GMT
ets = mx.DateTime.DateTime(2012,9,17)
# Interval
interval = mx.DateTime.RelativeDateTime(hours=6)
now = sts
while now < ets:
for F in ['000', '006', '012', '018', '024']:
print 'Downloading', now.strftime("%Y-%m-%d %H"), ' Forecast Hr:', F
command = now.strftime("wget -q -O '%Y%m%d%H-"+F+".nc' 'http://nomads.ncdc.noaa.gov/thredds/ncss/grid/gfs-004/%Y%m/%Y%m%d/gfs_4_%Y%m%d_%H00_"+F+".grb2?var=Geopotential_height&spatial=all&north=90.0000&west=0.0000&east=-0.5000&south=-90.0000&temporal=all&time_start=%Y-%m-%dT%H:00:00Z&time_end=%Y-%m-%dT%H:00:00Z&horizStride='")
subprocess.call(command, shell=True)
now += interval | mit | Python | |
44f81107d829f76d9f6338a0ba2545a68539515e | Introduce Partial differences class. | microy/MeshToolkit,microy/PyMeshToolkit,microy/MeshToolkit,microy/PyMeshToolkit | Core/Difference.py | Core/Difference.py | # -*- coding:utf-8 -*-
#--
#
# Copyright (C) 2013-2014 Michaël Roy
#
#--
#--
#
# External dependencies
#
#--
#
from numpy import array
#--
#
# Difference
#
#--
#
# Defines a class representing partial differences on triangular mesh
#
class Difference :
#--
#
# Initialisation
#
#--
#
def __init__( self, mesh=None ) :
# Base mesh
self.mesh = mesh
#--
#
# Gradient
#
#--
#
def Gradient( self ) :
for u in range(len( self.vertices )) :
for v in self.mesh.neighbor_vertices[ u ] :
| mit | Python | |
059a9b14e6db26f6131d41e758d1f14b33bc25b8 | add python script to jump to a random line | vmiklos/vmexam,vmiklos/vmexam,vmiklos/vmexam,vmiklos/vmexam,vmiklos/vmexam,vmiklos/vmexam,vmiklos/vmexam,vmiklos/vmexam,vmiklos/vmexam,vmiklos/vmexam,vmiklos/vmexam,vmiklos/vmexam,vmiklos/vmexam,vmiklos/vmexam | vim/goto-random.py | vim/goto-random.py | import random
import vim
# Jumps to a random line inside the current buffer. Helpful if you have lots of
# testcases inside a single file and you want to minimize conflicts, i.e. just
# appending tests to the end of the file is a bad strategy.
def main():
# Add an entry to the jump list.
vim.command("normal! m'")
# Jump to a line.
line = random.choice(range(len(vim.current.buffer)))
# cursor() is 1-based.
vim.eval("cursor(" + str(line + 1) + ", 0)")
# Move the cursor to the center of the screen.
vim.command("normal! zz")
if __name__ == '__main__':
main()
# vim: set shiftwidth=4 softtabstop=4 expandtab:
| mit | Python | |
ae6eb7d4716cab50e8850a94a93c96167337c150 | add fourth tool of Ultimate family, Ultimate GemCutter | ultimate-pa/benchexec,ultimate-pa/benchexec,ultimate-pa/benchexec,sosy-lab/benchexec,ultimate-pa/benchexec,sosy-lab/benchexec,ultimate-pa/benchexec,ultimate-pa/benchexec,sosy-lab/benchexec,sosy-lab/benchexec,sosy-lab/benchexec,sosy-lab/benchexec | benchexec/tools/ultimategemcutter.py | benchexec/tools/ultimategemcutter.py | # This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2016-2021 Daniel Dietsch <dietsch@informatik.uni-freiburg.de>
# SPDX-FileCopyrightText: 2016-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
from . import ultimate
class Tool(ultimate.UltimateTool):
"""
This is the tool info module for ULTIMATE GemCutter.
You can download the latest release from GitHub or build the latest development snapshot by following the
instructions at https://github.com/ultimate-pa/ultimate/wiki/Usage
Please report any issues to our issue tracker at https://github.com/ultimate-pa/ultimate/issues
Latest release: https://github.com/ultimate-pa/ultimate/releases/latest
Git repository: https://github.com/ultimate-pa/ultimate.git
"""
def name(self):
return "ULTIMATE GemCutter"
| apache-2.0 | Python | |
b24cc6048a07f1e0787cbd732c29583bcdf5ba3d | Add the roman.py module which docutils require. | sk-/python2.7-type-annotator,sk-/python2.7-type-annotator,sk-/python2.7-type-annotator | Doc/tools/roman.py | Doc/tools/roman.py | """Convert to and from Roman numerals"""
__author__ = "Mark Pilgrim (f8dy@diveintopython.org)"
__version__ = "1.4"
__date__ = "8 August 2001"
__copyright__ = """Copyright (c) 2001 Mark Pilgrim
This program is part of "Dive Into Python", a free Python tutorial for
experienced programmers. Visit http://diveintopython.org/ for the
latest version.
This program is free software; you can redistribute it and/or modify
it under the terms of the Python 2.1.1 license, available at
http://www.python.org/2.1.1/license.html
"""
import re
#Define exceptions
class RomanError(Exception): pass
class OutOfRangeError(RomanError): pass
class NotIntegerError(RomanError): pass
class InvalidRomanNumeralError(RomanError): pass
#Define digit mapping
romanNumeralMap = (('M', 1000),
('CM', 900),
('D', 500),
('CD', 400),
('C', 100),
('XC', 90),
('L', 50),
('XL', 40),
('X', 10),
('IX', 9),
('V', 5),
('IV', 4),
('I', 1))
def toRoman(n):
"""convert integer to Roman numeral"""
if not (0 < n < 5000):
raise OutOfRangeError, "number out of range (must be 1..4999)"
if int(n) <> n:
raise NotIntegerError, "decimals can not be converted"
result = ""
for numeral, integer in romanNumeralMap:
while n >= integer:
result += numeral
n -= integer
return result
#Define pattern to detect valid Roman numerals
romanNumeralPattern = re.compile("""
^ # beginning of string
M{0,4} # thousands - 0 to 4 M's
(CM|CD|D?C{0,3}) # hundreds - 900 (CM), 400 (CD), 0-300 (0 to 3 C's),
# or 500-800 (D, followed by 0 to 3 C's)
(XC|XL|L?X{0,3}) # tens - 90 (XC), 40 (XL), 0-30 (0 to 3 X's),
# or 50-80 (L, followed by 0 to 3 X's)
(IX|IV|V?I{0,3}) # ones - 9 (IX), 4 (IV), 0-3 (0 to 3 I's),
# or 5-8 (V, followed by 0 to 3 I's)
$ # end of string
""" ,re.VERBOSE)
def fromRoman(s):
"""convert Roman numeral to integer"""
if not s:
raise InvalidRomanNumeralError, 'Input can not be blank'
if not romanNumeralPattern.search(s):
raise InvalidRomanNumeralError, 'Invalid Roman numeral: %s' % s
result = 0
index = 0
for numeral, integer in romanNumeralMap:
while s[index:index+len(numeral)] == numeral:
result += integer
index += len(numeral)
return result
| mit | Python | |
772ebd24f21f69eacfaae2b1a6658b82031dbd75 | add import script for North Norfolk | chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations,DemocracyClub/UK-Polling-Stations,chris48s/UK-Polling-Stations | polling_stations/apps/data_collection/management/commands/import_north_norfolk.py | polling_stations/apps/data_collection/management/commands/import_north_norfolk.py | from django.contrib.gis.geos import Point
from data_collection.management.commands import BaseCsvStationsCsvAddressesImporter
from data_finder.helpers import geocode_point_only, PostcodeError
class Command(BaseCsvStationsCsvAddressesImporter):
council_id = 'E07000147'
addresses_name = 'PropertyPostCodePollingStationWebLookup-2017-01-16.CSV'
stations_name = 'PropertyPostCodePollingStationWebLookup-2017-01-16.CSV'
elections = ['local.norfolk.2017-05-04']
def get_station_hash(self, record):
return "-".join([
record.pollingplaceid,
])
def station_record_to_dict(self, record):
# format address
address = "\n".join([
record.pollingplaceaddress1,
record.pollingplaceaddress2,
record.pollingplaceaddress3,
record.pollingplaceaddress4,
record.pollingplaceaddress5,
record.pollingplaceaddress6,
])
while "\n\n" in address:
address = address.replace("\n\n", "\n").strip()
# no points supplied, so attempt to attach them by geocoding
try:
location_data = geocode_point_only(record.pollingplaceaddress7)
location = Point(
location_data['wgs84_lon'],
location_data['wgs84_lat'],
srid=4326)
except PostcodeError:
location = None
return {
'internal_council_id': record.pollingplaceid,
'postcode' : record.pollingplaceaddress7,
'address' : address,
'location' : location
}
def address_record_to_dict(self, record):
if record.propertynumber.strip() == '0' or record.propertynumber.strip() == '':
address = record.streetname.strip()
else:
address = '%s %s' % (record.propertynumber.strip(), record.streetname.strip())
return {
'address' : address,
'postcode' : record.postcode.strip(),
'polling_station_id': record.pollingplaceid
}
| bsd-3-clause | Python | |
912ec1162e18b6ffc05ecebaf74f0b946748fa00 | fix device/proxy functions arguments names to | caidongyun/pyzmq,dash-dash/pyzmq,caidongyun/pyzmq,swn1/pyzmq,swn1/pyzmq,dash-dash/pyzmq,ArvinPan/pyzmq,dash-dash/pyzmq,swn1/pyzmq,Mustard-Systems-Ltd/pyzmq,caidongyun/pyzmq,yyt030/pyzmq,Mustard-Systems-Ltd/pyzmq,yyt030/pyzmq,Mustard-Systems-Ltd/pyzmq,yyt030/pyzmq,ArvinPan/pyzmq,ArvinPan/pyzmq | zmq/cffi_core/devices.py | zmq/cffi_core/devices.py | # coding: utf-8
from ._cffi import C, ffi, zmq_version_info
from .socket import Socket
from zmq.error import ZMQError
def device(device_type, frontend, backend):
rc = C.zmq_device(device_type, frontend._zmq_socket, backend._zmq_socket)
if rc != 0:
raise ZMQError(C.zmq_errno())
return rc
def proxy(frontend, backend, capture=None):
if isinstance(capture, Socket):
capture = capture._zmq_socket
else:
capture = ffi.NULL
rc = C.zmq_proxy(frontend._zmq_socket, backend._zmq_socket, capture)
if rc != 0:
raise ZMQError(C.zmq_errno())
return rc
__all__ = ['device', 'proxy']
| # coding: utf-8
from ._cffi import C, ffi, zmq_version_info
from .socket import Socket
from zmq.error import ZMQError
def device(device_type, isocket, osocket):
rc = C.zmq_device(device_type, isocket.zmq_socket, osocket.zmq_socket)
if rc != 0:
raise ZMQError(C.zmq_errno())
return rc
def proxy(isocket, osocket, msocket=None):
if isinstance(msocket, Socket):
msocket = msocket.zmq_socket
else:
msocket = ffi.NULL
rc = C.zmq_proxy(isocket.zmq_socket, osocket.zmq_socket, msocket)
if rc != 0:
raise ZMQError(C.zmq_errno())
return rc
__all__ = ['device', 'proxy']
| bsd-3-clause | Python |
a9348b49b6e91046941fb3af3a6b85edd072d7d9 | add a module for descriptors | SMFOSS/CheesePrism,sciyoshi/CheesePrism,whitmo/CheesePrism,sciyoshi/CheesePrism,SMFOSS/CheesePrism,whitmo/CheesePrism,whitmo/CheesePrism | cheeseprism/desc.py | cheeseprism/desc.py | class updict(dict):
"""
A descriptor that updates it's internal represention on set, and
returns the dictionary to original state on deletion.
"""
def __init__(self, *args, **kw):
super(updict, self).__init__(*args, **kw)
self.default = self.copy()
def __get__(self, obj, objtype):
return self
def __set__(self, val):
self.update(val)
def __delete__(self, obj):
self.clear()
self.update(self.default)
| bsd-2-clause | Python | |
dc477c7b1f0e0ffca01b934919cd32cbd635baab | Implement web scraper for GitHub repos | hackebrot/cibopath | cibopath/scraper.py | cibopath/scraper.py | # -*- coding: utf-8 -*-
import asyncio
import logging
import aiohttp
from cibopath import readme_parser, github_api
from cibopath.templates import Template
logger = logging.getLogger('cibopath')
JSON_STORE = 'templates.json'
class CibopathError(Exception):
"""Custom error class for the app."""
class CookiecutterReadmeError(CibopathError):
"""Unable to retrieve readme from github.com/audreyr/cookiecutter."""
class UnableToFindTemplateLinks(CibopathError):
"""Cannot find links to templates in README."""
def fetch_template_data(username, token):
semaphore = asyncio.Semaphore(10)
loop = asyncio.get_event_loop()
auth = aiohttp.BasicAuth(username, token)
with aiohttp.ClientSession(loop=loop, auth=auth) as client:
logger.debug('Load Cookiecutter readme')
cookiecutter_readme = loop.run_until_complete(
github_api.get_readme(semaphore, client, 'audreyr', 'cookiecutter')
)
if not cookiecutter_readme:
raise CookiecutterReadmeError
logger.debug('Find GitHub links in Cookiecutter readme')
github_links, _ = readme_parser.read(cookiecutter_readme)
if not github_links:
raise UnableToFindTemplateLinks
tasks = [
github_api.get_template(semaphore, client, link)
for link in github_links
]
logger.debug('Fetch template data from links')
results = loop.run_until_complete(asyncio.gather(*tasks))
yield from filter(None, results) # Ignore all invalid templates
def load_templates(username, token):
templates = []
template_data = fetch_template_data(username, token)
for name, author, repo, context, readme in template_data:
_, tags = readme_parser.read(readme)
templates.append(Template(name, author, repo, context, tags))
return templates
| bsd-3-clause | Python | |
549562247018e9c51e8cb8023972c1cf73fc84f4 | add gc01.py | devlights/try-python | trypython/stdlib/gc01.py | trypython/stdlib/gc01.py | # coding: utf-8
"""gcモジュールについてのサンプルです。"""
import gc
import secrets
import string
from trypython.common.commoncls import SampleBase, timetracer
from trypython.common.commonfunc import pr
class Sample(SampleBase):
def __init__(self) -> None:
super().__init__()
self._data_list = None
self._characters = string.ascii_letters + string.digits
def exec(self):
# ------------------------------------------------
# gcモジュール
# ------------------------------------------------
# gcモジュールには、その名前の通りガベージコレクション
# 関連の操作が行えるモジュールとなっている。
#
# gc.get_objects() は、現在Pythonが追跡対象と
# マークしているオブジェクトのリストを返す。
# -----------------------------------------------
alive_objects = gc.get_objects()
pr('gc.get_objects()', len(alive_objects))
with timetracer('heavy proc'):
self._heavy_proc()
alive_objects = gc.get_objects()
pr('gc.get_objects()', len(alive_objects))
def _heavy_proc(self, count=100000) -> None:
self._data_list = [self._generate_password() for _ in range(count)]
def _generate_password(self, nbytes=32) -> str:
return ''.join(secrets.choice(self._characters) for _ in range(nbytes))
def go():
obj = Sample()
obj.exec()
if __name__ == '__main__':
go()
| mit | Python | |
4593aa5edf05b014aa6c7fe9de8b239ab2fa91b8 | Add snapshot_framework_stats.py script | alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api,alphagov/digitalmarketplace-api | scripts/snapshot_framework_stats.py | scripts/snapshot_framework_stats.py | #!/usr/bin/env python
"""Change user password
Usage:
snapshot_framework_stats.py <framework_slug> <stage> <api_token>
Example:
./snapshot_framework_stats.py g-cloud-7 dev myToken
"""
import sys
import logging
logger = logging.getLogger('script')
logging.basicConfig(level=logging.INFO)
from docopt import docopt
from dmutils import apiclient
from dmutils.audit import AuditTypes
def get_api_endpoint_from_stage(stage):
stage_prefixes = {
'preview': 'preview-api.development',
'staging': 'staging-api',
'production': 'api'
}
if stage in ['local', 'dev', 'development']:
return 'http://localhost:5000'
return "https://{}.digitalmarketplace.service.gov.uk".format(
stage_prefixes[stage]
)
def snapshot_framework_stats(api_endpoint, api_token, framework_slug):
data_client = apiclient.DataAPIClient(api_endpoint, api_token)
try:
stats = data_client.get_framework_stats(framework_slug)
data_client.create_audit_event(
AuditTypes.snapshot_framework_stats,
data=stats,
object_type='frameworks',
object_id=framework_slug
)
except apiclient.APIError:
sys.exit(1)
logger.info("Framework stats snapshot saved")
if __name__ == '__main__':
arguments = docopt(__doc__)
snapshot_framework_stats(
get_api_endpoint_from_stage(arguments['<stage>']),
arguments['<api_token>'],
arguments['<framework_slug>'],
)
| mit | Python | |
dcd02e0a7b626111bc0fc344df9f6fff2de832ae | Add a (bad) example of missing method. | zeth/ainod,zeth/ainod,zeth/ainod | examples/missingmethod.py | examples/missingmethod.py | #!/usr/bin/python3
"""Send an invalid request with missing method member."""
from simpleclient import send_data_to_socket
EXAMPLE = {
"params": {
"filter": {
'store': 'catalog',
'schema': 'product',
'id': '704e418e-682d-4ade-99be-710f2208102e'
}
}
}
def main():
"""Send the example to the simple client."""
send_data_to_socket(EXAMPLE)
if __name__ == '__main__':
main()
| lgpl-2.1 | Python | |
57b9fcfa5b200ec971f8f3070447cbc98026f5a5 | add example of variable-length array branch | ndawe/rootpy,ndawe/rootpy,ndawe/rootpy,kreczko/rootpy,rootpy/rootpy,rootpy/rootpy,rootpy/rootpy,kreczko/rootpy,kreczko/rootpy | examples/tree/vararray.py | examples/tree/vararray.py | #!/usr/bin/env python
"""
=================================
Trees with variable-length arrays
=================================
This example demonstrates how to create a tree with a variable-length array.
"""
print(__doc__)
from rootpy.tree import Tree, TreeModel, IntCol, FloatArrayCol
from rootpy.io import root_open
class Event(TreeModel):
num_vals = IntCol()
vals = FloatArrayCol(10, length_name='num_vals')
rfile = root_open('test.root', 'w')
tree = Tree('events', model=Event)
for i in range(10):
tree.num_vals = i + 1
for j in range(i + 1):
tree.vals[j] = j
tree.fill()
tree.write()
tree.vals.reset()
tree.csv()
rfile.close()
print("===")
# CSV output from tree read from file should match above output
root_open('test.root', 'r').events.csv()
| bsd-3-clause | Python | |
8ab83988f66270c76b28f36e8263f029011e773b | use Task & Job, a Task has many Jobs | zws0932/farmer,zws0932/farmer,huoxy/farmer | farmer/models.py | farmer/models.py | #coding=utf8
import os
import time
import json
from datetime import datetime
from commands import getstatusoutput
from django.db import models
class Task(models.Model):
# hosts, like web_servers:host1 .
inventories = models.TextField(null = False, blank = False)
# 0, do not use sudo; 1, use sudo .
sudo = models.BooleanField(default = True)
# for example: ansible web_servers -m shell -a 'du -sh /tmp'
# the 'du -sh /tmp' is cmd here
cmd = models.TextField(null = False, blank = False)
# return code of this job
rc = models.IntegerField(null = True)
start = models.DateTimeField(null = True)
end = models.DateTimeField(null = True)
@property
def cmd_shell(self):
option = self.sudo and '--sudo' or ''
option += ' -f 20 -m shell'
return 'ansible %s %s -a "%s"' % (self.inventories, option, self.cmd)
def run(self):
if os.fork() == 0:
#if 0 == 0:
self.start = datetime.now()
self.save()
# initial jobs
cmd_shell = self.cmd_shell + ' --list-hosts'
status, output = getstatusoutput(cmd_shell)
hosts = map(str.strip, output.splitlines())
for host in hosts:
self.job_set.add(Job(host = host, cmd = self.cmd))
# run ansible
tmpdir = '/tmp/ansible_%s' % self.id
os.mkdir(tmpdir)
cmd_shell = self.cmd_shell + ' -t ' + tmpdir
status, output = getstatusoutput(cmd_shell)
self.rc = status
self.end = datetime.now()
for f in os.listdir(tmpdir):
result = json.loads(open(tmpdir + '/' + f).read())
job = self.job_set.get(host = f)
job.start = result.get('start')
job.end = result.get('end')
job.rc = result.get('rc')
job.stdout = result.get('stdout')
job.stderr = result.get('stderr')
job.save()
self.save()
# clean tmp dir
os.system('rm -rf ' + tmpdir)
def __unicode__(self):
return self.cmd_shell
class Job(models.Model):
task = models.ForeignKey(Task)
host = models.TextField(null = False, blank = False)
cmd = models.TextField(null = False, blank = False)
start = models.DateTimeField(null = True)
end = models.DateTimeField(null = True)
rc = models.IntegerField(null = True)
stdout = models.TextField(null = True)
stderr = models.TextField(null = True)
def __unicode__(self):
return self.host + ' : ' + self.cmd
| #coding=utf8
import os
import time
import json
from datetime import datetime
from commands import getstatusoutput
from django.db import models
class Job(models.Model):
# hosts, like web_servers:host1 .
inventories = models.TextField(null = False, blank = False)
# 0, do not use sudo; 1, use sudo .
sudo = models.BooleanField(default = True)
# for example: ansible web_servers -m shell -a 'du -sh /tmp'
# the 'du -sh /tmp' is cmd here
cmd = models.TextField(null = False, blank = False)
# return code of this job
rc = models.IntegerField(null = True)
result = models.TextField(null = True)
start = models.DateTimeField(null = True)
end = models.DateTimeField(null = True)
@property
def cmd_shell(self):
option = self.sudo and '--sudo' or ''
option += ' -f 20 -m shell'
return 'ansible %s %s -a "%s"' % (self.inventories, option, self.cmd)
def run(self):
if os.fork() == 0:
#if 0 == 0:
tmpdir = '/tmp/ansible_%s' % time.time()
os.mkdir(tmpdir)
self.start = datetime.now()
self.save()
cmd_shell = self.cmd_shell + ' -t ' + tmpdir
status, output = getstatusoutput(cmd_shell)
self.end = datetime.now()
result = {}
for f in os.listdir(tmpdir):
result[f] = json.loads(open(tmpdir + '/' + f).read())
self.rc = status
self.result = json.dumps(result)
self.save()
os.system('rm -rf ' + tmpdir)
def __unicode__(self):
return self.cmd_shell
| mit | Python |
d692508e9c6fba847f3bb179bbfd3684e6ebcef0 | Add py solution for 384. Shuffle an Array | ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode,ckclark/leetcode | py/shuffle-an-array.py | py/shuffle-an-array.py | from random import randint
class Solution(object):
def __init__(self, nums):
"""
:type nums: List[int]
"""
self.nums = nums
def reset(self):
"""
Resets the array to its original configuration and return it.
:rtype: List[int]
"""
return self.nums
def shuffle(self):
"""
Returns a random shuffling of the array.
:rtype: List[int]
"""
out = self.nums[:]
n = len(self.nums)
for i in xrange(n - 1):
r = randint(i, n - 1)
if r != i:
out[r], out[i] = out[i], out[r]
return out
# Your Solution object will be instantiated and called as such:
# obj = Solution(nums)
# param_1 = obj.reset()
# param_2 = obj.shuffle()
| apache-2.0 | Python | |
6c7b9a0315bf12fb3e40ddd49f43fe8bec5c6132 | Create 0001_0.py | Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Yrthgze/prueba-sourcetree2,Yrthgze/prueba-sourcetree2,Yrthgze/prueba-sourcetree2,Show-Me-the-Code/python,Show-Me-the-Code/python | pylyria/0001/0001_0.py | pylyria/0001/0001_0.py | # -*- coding: utf-8 -*-
#!/usr/bin/env python
#第 0001 题:做为 Apple Store App 独立开发者,你要搞限时促销,为你的应用生成激活码(或者优惠券),使用 Python 如何生成 200 个激活码(或者优惠券)?
import random
import string
def activation_code(id,length=16):
prefix = hex(int(id))[2:]+'V'
length = length - len(prefix)
chars=string.ascii_uppercase+string.digits
return prefix + ''.join([random.choice(chars) for i in range(length)])
def get_id(code):
return str(int(code.upper(), 16))
if __name__ == '__main__':
for i in range(10, 500, 23):
code = activation_code(i)
id_hex = code.split('L')[0]
id = get_id(id_hex)
print code,id
| mit | Python |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.