commit
stringlengths 40
40
| subject
stringlengths 1
1.49k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| new_contents
stringlengths 1
29.8k
| old_contents
stringlengths 0
9.9k
| lang
stringclasses 3
values | proba
float64 0
1
|
|---|---|---|---|---|---|---|---|
e8a5a97ea18120915dba74b9a73fdca4eb381568
|
Fix indentation level
|
tail/tests/test_tail.py
|
tail/tests/test_tail.py
|
"""
Tests for the tail implementation
"""
from tail import FileBasedTail
def test_tail_from_file():
"""Tests that tail works as advertised from a file"""
from unittest.mock import mock_open, patch, Mock
# The mock_data we are using for our test
mock_data = """A
B
C
D
E
F
"""
mocked_open = mock_open(read_data=mock_data)
# mock_open does not support iteration by lines by default so
# we must define the following:
mocked_open.return_value.__iter__.return_value = mock_data.splitlines()
# The file check in the class returns no value upon a valid file
# the error states just raise exceptions.
mocked_file_validity_check = Mock()
# We need to patch the open found in the namespace of the module
# where the function is defined
with patch('tail.open', mocked_open, create=True) as mocked_file_open:
# We also need to patch the file checking because we are not dealing
# with an actual file in the filesystem in this unit test
with patch('tail.tail.check_file_validity', mocked_file_validity_check):
res = FileBasedTail('Test_filename.txt').tail(3)
mocked_file_validity_check.assert_called_once_with('Test_filename.txt')
mocked_file_open.assert_called_once_with('Test_filename.txt', 'r')
assert len(res) == 3
assert res == ["D", "E", "F"]
def test_head_from_file():
"""Tests that tail works as advertised from a file"""
from unittest.mock import mock_open, patch, Mock
# The mock_data we are using for our test
mock_data = """A
B
C
D
E
F
"""
mocked_open = mock_open(read_data=mock_data)
# mock_open does not support iteration by lines by default so
# we must define the following:
mocked_open.return_value.__iter__.return_value = mock_data.splitlines()
# The file check in the class returns no value upon a valid file
# the error states just raise exceptions.
mocked_file_validity_check = Mock()
# We need to patch the open found in the namespace of the module
# where the function is defined
with patch('tail.open', mocked_open, create=True) as mocked_file_open:
# We also need to patch the file checking because we are not dealing
# with an actual file in the filesystem in this unit test
with patch('tail.tail.check_file_validity', mocked_file_validity_check):
res = FileBasedTail('Test_filename.txt').head(3)
mocked_file_validity_check.assert_called_once_with('Test_filename.txt')
mocked_file_open.assert_called_once_with('Test_filename.txt', 'r')
assert len(res) == 3
assert res == ["A", "B", "C"]
|
"""
Tests for the tail implementation
"""
from tail import FileBasedTail
def test_tail_from_file():
"""Tests that tail works as advertised from a file"""
from unittest.mock import mock_open, patch, Mock
# The mock_data we are using for our test
mock_data = """A
B
C
D
E
F
"""
mocked_open = mock_open(read_data=mock_data)
# mock_open does not support iteration by lines by default so
# we must define the following:
mocked_open.return_value.__iter__.return_value = mock_data.splitlines()
# The file check in the class returns no value upon a valid file
# the error states just raise exceptions.
mocked_file_validity_check = Mock()
# We need to patch the open found in the namespace of the module
# where the function is defined
with patch('builtins.open', mocked_open, create=True) as mocked_file_open:
# We also need to patch the file checking because we are not dealing
# with an actual file in the filesystem in this unit test
with patch('tail.tail.check_file_validity', mocked_file_validity_check):
res = FileBasedTail('Test_filename.txt').tail(3)
mocked_file_validity_check.assert_called_once_with('Test_filename.txt')
mocked_file_open.assert_called_once_with('Test_filename.txt', 'r')
assert len(res) == 3
assert res == ["D", "E", "F"]
def test_head_from_file():
"""Tests that tail works as advertised from a file"""
from unittest.mock import mock_open, patch, Mock
# The mock_data we are using for our test
mock_data = """A
B
C
D
E
F
"""
mocked_open = mock_open(read_data=mock_data)
# mock_open does not support iteration by lines by default so
# we must define the following:
mocked_open.return_value.__iter__.return_value = mock_data.splitlines()
# The file check in the class returns no value upon a valid file
# the error states just raise exceptions.
mocked_file_validity_check = Mock()
# We need to patch the open found in the namespace of the module
# where the function is defined
with patch('builtins.open', mocked_open, create=True) as mocked_file_open:
# We also need to patch the file checking because we are not dealing
# with an actual file in the filesystem in this unit test
with patch('tail.tail.check_file_validity', mocked_file_validity_check):
res = FileBasedTail('Test_filename.txt').head(3)
mocked_file_validity_check.assert_called_once_with('Test_filename.txt')
mocked_file_open.assert_called_once_with('Test_filename.txt', 'r')
assert len(res) == 3
assert res == ["A", "B", "C"]
|
Python
| 0.035546
|
17951915f22d12223373bec5e8003b4de666b843
|
__main__ compatible with python 3.5
|
pyqualtrics/__main__.py
|
pyqualtrics/__main__.py
|
# -*- coding: utf-8 -*-
#
# This file is part of the pyqualtrics package.
# For copyright and licensing information about this package, see the
# NOTICE.txt and LICENSE.txt files in its top-level directory; they are
# available at https://github.com/Baguage/pyqualtrics
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
from pyqualtrics import Qualtrics
try:
# Python 2.7
input = raw_input
except NameError:
# Python 3.5
pass
def main(argv):
kwargs = {}
iterator = iter(argv)
executable = next(iterator) # argv[0]
try:
command = next(iterator) # argv[1]
except StopIteration:
print("The name of the API call to be made is required")
return None
user = None
if "QUALTRICS_USER" not in os.environ:
user = input("Enter Qualtrics username: ")
token = None
if "QUALTRICS_TOKEN" not in os.environ:
token = input("Enter Qualtrics token: ")
qualtrics = Qualtrics(user, token)
method = getattr(qualtrics, command)
if not method:
print("%s API call is not implement" % method)
return None
for option in argv:
try:
arg, value = option.split("=")
kwargs[arg] = value
except ValueError:
# Ignore parameter in wrong format
pass
return method(**kwargs)
if __name__ == "__main__":
# main(["", "createPanel", "library_id=1", "name=b"])
result = main(sys.argv)
if result is None:
print("Error executing API Call")
else:
print("Success: %s" % result)
|
# -*- coding: utf-8 -*-
#
# This file is part of the pyqualtrics package.
# For copyright and licensing information about this package, see the
# NOTICE.txt and LICENSE.txt files in its top-level directory; they are
# available at https://github.com/Baguage/pyqualtrics
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import os
from pyqualtrics import Qualtrics
def main(argv):
kwargs = {}
iterator = iter(argv)
executable = next(iterator) # argv[0]
try:
command = next(iterator) # argv[1]
except StopIteration:
print("The name of the API call to be made is required")
return None
user = None
if "QUALTRICS_USER" not in os.environ:
user = raw_input("Enter Qualtrics username: ")
token = None
if "QUALTRICS_TOKEN" not in os.environ:
token = raw_input("Enter Qualtrics token: ")
qualtrics = Qualtrics(user, token)
method = getattr(qualtrics, command)
if not method:
print("%s API call is not implement" % method)
return None
for option in argv:
try:
arg, value = option.split("=")
kwargs[arg] = value
except ValueError:
# Ignore parameter in wrong format
pass
return method(**kwargs)
if __name__ == "__main__":
# main(["", "createPanel", "library_id=1", "name=b"])
result = main(sys.argv)
if result is None:
print("Error executing API Call")
else:
print("Success: %s" % result)
|
Python
| 0.999665
|
33efe92104ad139f9313d91ae7b2eea8a76da9d7
|
fix flake8
|
pyscalambda/__init__.py
|
pyscalambda/__init__.py
|
from pyscalambda.operands import Underscore
from pyscalambda.operators import UnaryOperator
from pyscalambda.quote import quote
from pyscalambda.scalambdable import scalambdable_const, scalambdable_func, scalambdable_iterator
from pyscalambda.utility import convert_operand
_ = Underscore(0)
_1 = Underscore(1)
_2 = Underscore(2)
_3 = Underscore(3)
_4 = Underscore(4)
_5 = Underscore(5)
_6 = Underscore(6)
_7 = Underscore(7)
_8 = Underscore(8)
_9 = Underscore(9)
SF = scalambdable_func
SC = scalambdable_const
SI = scalambdable_iterator
Q = quote
def not_(value):
return UnaryOperator("not ", convert_operand(value))
__all__ = ("_", "_1", "_2", "_3", "_4", "_5", "_6", "_7", "_8", "_9", "SF", "SC", "Q", "not_")
|
from pyscalambda.operands import Underscore
from pyscalambda.operators import UnaryOperator
from pyscalambda.quote import quote
from pyscalambda.scalambdable import scalambdable_const, scalambdable_func, scalambdable_iterator
from pyscalambda.utility import convert_operand
_ = Underscore(0)
_1 = Underscore(1)
_2 = Underscore(2)
_3 = Underscore(3)
_4 = Underscore(4)
_5 = Underscore(5)
_6 = Underscore(6)
_7 = Underscore(7)
_8 = Underscore(8)
_9 = Underscore(9)
SF = scalambdable_func
SC = scalambdable_const
SI = scalambdable_iterator
Q = quote
def not_(value):
return UnaryOperator("not ", convert_operand(value))
__all__ = ("_", "_1", "_2", "_3", "_4", "_5", "_6", "_7", "_8", "_9", "SF", "SC", "Q", "not_")
|
Python
| 0
|
4b6117fd4835cbde52e8d3fba79e46c2ec63a637
|
Add explanatory comments about the parent-child relationships
|
mapit/management/commands/find_parents.py
|
mapit/management/commands/find_parents.py
|
# This script is used after Boundary-Line has been imported to
# associate shapes with their parents. With the new coding
# system coming in, this could be done from a BIG lookup table; however,
# I reckon P-in-P tests might be quick enough...
from django.core.management.base import NoArgsCommand
from mapit.models import Area, Generation
class Command(NoArgsCommand):
help = 'Find parents for shapes'
def handle_noargs(self, **options):
new_generation = Generation.objects.new()
if not new_generation:
raise Exception, "No new generation to be used for import!"
parentmap = {
# A District council ward's parent is a District council:
'DIW': 'DIS',
# A County council ward's parent is a County council:
'CED': 'CTY',
# A London borough ward's parent is a London borough:
'LBW': 'LBO',
# A London Assembly constituency's parent is the Greater London Authority:
'LAC': 'GLA',
# A Metropolitan district ward's parent is a Metropolitan district:
'MTW': 'MTD',
# A Unitary Authority ward (UTE)'s parent is a Unitary Authority:
'UTE': 'UTA',
# A Unitary Authority ward (UTW)'s parent is a Unitary Authority:
'UTW': 'UTA',
# A Scottish Parliament constituency's parent is a Scottish Parliament region:
'SPC': 'SPE',
# A Welsh Assembly constituency's parent is a Welsh Assembly region:
'WAC': 'WAE',
# A Civil Parish's parent is one of:
# District council
# Unitary Authority
# Metropolitan district
# London borough
# Scilly Isles
'CPC': ('DIS', 'UTA', 'MTD', 'LBO', 'COI'),
}
for area in Area.objects.filter(
type__code__in=parentmap.keys(),
generation_low__lte=new_generation, generation_high__gte=new_generation,
):
polygon = area.polygons.all()[0]
try:
args = {
'polygons__polygon__contains': polygon.polygon.point_on_surface,
'generation_low__lte': new_generation,
'generation_high__gte': new_generation,
}
if isinstance(parentmap[area.type.code], str):
args['type__code'] = parentmap[area.type.code]
else:
args['type__code__in'] = parentmap[area.type.code]
parent = Area.objects.get(**args)
except Area.DoesNotExist:
raise Exception, "Area %s does not have a parent?" % (self.pp_area(area))
if area.parent_area != parent:
print "Parent for %s was %s, is now %s" % (self.pp_area(area), self.pp_area(area.parent_area), self.pp_area(parent))
area.parent_area = parent
area.save()
def pp_area(self, area):
if not area: return "None"
return "%s [%d] (%s)" % (area.name, area.id, area.type.code)
|
# This script is used after Boundary-Line has been imported to
# associate shapes with their parents. With the new coding
# system coming in, this could be done from a BIG lookup table; however,
# I reckon P-in-P tests might be quick enough...
from django.core.management.base import NoArgsCommand
from mapit.models import Area, Generation
class Command(NoArgsCommand):
help = 'Find parents for shapes'
def handle_noargs(self, **options):
new_generation = Generation.objects.new()
if not new_generation:
raise Exception, "No new generation to be used for import!"
parentmap = {
'DIW': 'DIS',
'CED': 'CTY',
'LBW': 'LBO',
'LAC': 'GLA',
'MTW': 'MTD',
'UTE': 'UTA',
'UTW': 'UTA',
'SPC': 'SPE',
'WAC': 'WAE',
'CPC': ('DIS', 'UTA', 'MTD', 'LBO', 'COI'),
}
for area in Area.objects.filter(
type__code__in=parentmap.keys(),
generation_low__lte=new_generation, generation_high__gte=new_generation,
):
polygon = area.polygons.all()[0]
try:
args = {
'polygons__polygon__contains': polygon.polygon.point_on_surface,
'generation_low__lte': new_generation,
'generation_high__gte': new_generation,
}
if isinstance(parentmap[area.type.code], str):
args['type__code'] = parentmap[area.type.code]
else:
args['type__code__in'] = parentmap[area.type.code]
parent = Area.objects.get(**args)
except Area.DoesNotExist:
raise Exception, "Area %s does not have a parent?" % (self.pp_area(area))
if area.parent_area != parent:
print "Parent for %s was %s, is now %s" % (self.pp_area(area), self.pp_area(area.parent_area), self.pp_area(parent))
area.parent_area = parent
area.save()
def pp_area(self, area):
if not area: return "None"
return "%s [%d] (%s)" % (area.name, area.id, area.type.code)
|
Python
| 0
|
5d94f90126260f147822ba8d3afe9c1c0a85e943
|
Discard FASTA headers by default.
|
pypeline/common/formats/msa.py
|
pypeline/common/formats/msa.py
|
#!/usr/bin/python
#
# Copyright (c) 2012 Mikkel Schubert <MSchubert@snm.ku.dk>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import sys
import gzip
import types
from collections import defaultdict
from pypeline.common.sequences import split
from pypeline.common.formats.fasta import parse_fasta, print_fasta
class MSAError(RuntimeError):
pass
def split_msa(msa, split_by = "123"):
validate_msa(msa)
if not split_by:
raise TypeError("No partitions to split by specified")
results = {}
for key in split_by:
results[key] = dict((name, {}) for name in msa)
for (name, sequence) in msa.iteritems():
for (key, partition) in split(sequence, split_by).iteritems():
results[key][name] = partition
return results
def join_msa(*msas):
validate_msa(*msas)
results = defaultdict(list)
for msa in msas:
for (name, sequence) in msa.iteritems():
results[name].append(sequence)
return dict((key, "".join(value)) for (key, value) in results.iteritems())
def parse_msa(lines, read_header = False):
msa, headers = {}, {}
for (header, sequence) in parse_fasta(lines):
name = header.split(None, 1)[0]
if name in msa:
raise MSAError("Duplicate names found, cannot be represented as MSA")
msa[name] = sequence
headers[name] = header
validate_msa(msa)
if read_header:
return msa, header
return msa
def read_msa(filename, read_header = False):
func = gzip.open if filename.endswith(".gz") else open
fasta_file = func(filename, "r")
try:
return parse_msa(iter(fasta_file), read_header = read_header)
finally:
fasta_file.close()
def print_msa(msa, file = sys.stdout):
validate_msa(msa)
for group in sorted(msa):
print_fasta(group, msa[group], file)
def write_msa(msa, filename):
validate_msa(msa)
with open(filename, "w") as fileobj:
print_msa(msa, fileobj)
def validate_msa(*msas):
if not msas:
raise TypeError("No MSAs given as arguments")
keywords = set(msas[0])
for msa in msas:
if not msa:
raise MSAError("MSA with no sequences found")
elif not all((name and isinstance(name, types.StringTypes)) for name in msa):
raise MSAError("Names in MSA must be non-empty strings")
elif len(set(len(seq) for seq in msa.itervalues())) != 1:
raise MSAError("MSA contains sequences of differing lengths")
elif set(msa) != keywords:
raise MSAError("MSAs contain mismatching sequences")
|
#!/usr/bin/python
#
# Copyright (c) 2012 Mikkel Schubert <MSchubert@snm.ku.dk>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import sys
import gzip
import types
from collections import defaultdict
from pypeline.common.sequences import split
from pypeline.common.formats.fasta import parse_fasta, print_fasta
class MSAError(RuntimeError):
pass
def split_msa(msa, split_by = "123"):
validate_msa(msa)
if not split_by:
raise TypeError("No partitions to split by specified")
results = {}
for key in split_by:
results[key] = dict((name, {}) for name in msa)
for (name, sequence) in msa.iteritems():
for (key, partition) in split(sequence, split_by).iteritems():
results[key][name] = partition
return results
def join_msa(*msas):
validate_msa(*msas)
results = defaultdict(list)
for msa in msas:
for (name, sequence) in msa.iteritems():
results[name].append(sequence)
return dict((key, "".join(value)) for (key, value) in results.iteritems())
def parse_msa(lines):
msa = {}
for (name, sequence) in parse_fasta(lines):
if name in msa:
raise MSAError("Duplicate names found, cannot be represented as MSA")
msa[name] = sequence
validate_msa(msa)
return msa
def read_msa(filename):
func = gzip.open if filename.endswith(".gz") else open
fasta_file = func(filename, "r")
try:
return parse_msa(iter(fasta_file))
finally:
fasta_file.close()
def print_msa(msa, file = sys.stdout):
validate_msa(msa)
for group in sorted(msa):
print_fasta(group, msa[group], file)
def write_msa(msa, filename):
validate_msa(msa)
with open(filename, "w") as fileobj:
print_msa(msa, fileobj)
def validate_msa(*msas):
if not msas:
raise TypeError("No MSAs given as arguments")
keywords = set(msas[0])
for msa in msas:
if not msa:
raise MSAError("MSA with no sequences found")
elif not all((name and isinstance(name, types.StringTypes)) for name in msa):
raise MSAError("Names in MSA must be non-empty strings")
elif len(set(len(seq) for seq in msa.itervalues())) != 1:
raise MSAError("MSA contains sequences of differing lengths")
elif set(msa) != keywords:
raise MSAError("MSAs contain mismatching sequences")
|
Python
| 0
|
c0824d3cb9cba811ba36c2f8937e91716f5a50df
|
Fix lint
|
ci/run_script.py
|
ci/run_script.py
|
"""
Run tests and linters on Travis CI.
"""
import os
import subprocess
import sys
from pathlib import Path
import pytest
def run_test(test_filename: str) -> None:
"""
Run pytest with a given filename.
"""
path = Path('tests') / 'mock_vws' / test_filename
result = pytest.main(
[
'-vvv',
'--exitfirst',
str(path),
'--cov=src',
'--cov=tests',
]
)
sys.exit(result)
if __name__ == '__main__':
TEST_FILENAME = os.environ.get('TEST_FILENAME')
if TEST_FILENAME:
run_test(test_filename=TEST_FILENAME)
else:
subprocess.check_call(['make', 'lint'])
|
"""
Run tests and linters on Travis CI.
"""
import os
import subprocess
import sys
from pathlib import Path
import pytest
def run_test(test_filename: str) -> None:
"""
Run pytest with a given filename.
"""
path = Path('tests') / 'mock_vws' / test_filename
result = pytest.main([
'-vvv',
'--exitfirst',
str(path),
'--cov=src',
'--cov=tests',
])
sys.exit(result)
if __name__ == '__main__':
TEST_FILENAME = os.environ.get('TEST_FILENAME')
if TEST_FILENAME:
run_test(test_filename=TEST_FILENAME)
else:
subprocess.check_call(['make', 'lint'])
|
Python
| 0.000032
|
2d60ef3a9ff53c1623747fd1a00df4d788dd3777
|
fix tobler init
|
pysal/model/tobler/__init__.py
|
pysal/model/tobler/__init__.py
|
from tobler import area_weighted
from tobler import dasymetric
from tobler import model
|
from tobler import area_weighted
from tobler import data
from tobler import dasymetric
|
Python
| 0.000124
|
d1c88387a129d64488a5ca2dee56d7fac36ffbf1
|
Disable GCC fallback, add time logging.
|
clang_wrapper.py
|
clang_wrapper.py
|
#!/usr/bin/env python
import optparse
import os
import subprocess
import sys
import time
WORLD_PATH = os.path.dirname(os.path.abspath(__file__))
COMPILER_PATH = {'gcc': 'gcc',
'clang': WORLD_PATH + '/third_party/llvm-build/Release+Asserts/bin/clang'
}
FILTER = {'gcc': ['-Qunused-arguments', '-no-integrated-as', '-mno-global-merge',
'-Wdate-time', '-Wno-unknown-warning-option', '-Wno-initializer-overrides', '-Wno-tautological-compare',
'-Wincompatible-pointer-types', '-Wno-gnu', '-Wno-format-invalid-specifier',
'-Werror=date-time', '-Werror=incompatible-pointer-types',
],'clang': []}
SOURCE = 'source'
WRAPPER_LOG = WORLD_PATH + '/wrapper.log'
LOG = sys.stderr
LOG_OPTIONS = {'time': True, 'argv': True}
def compiler(flags):
path = 'clang'
return path # no need to use GCC for now
if SOURCE in flags:
source = flags[SOURCE]
#print >>LOG, source
# kernel/* ok
# kernel/[st] broken
# kernel/[kmpstuw] broken
# kernel/[abckmpstuw] broken
# kernel/[abcdefgkmpstuw] ok
# kernel/[defgkmpstuw] ok
# kernel/[defgkm] ok
# kernel/[defg] ok
# kernel/[de] broken
# kernel/[fg] ok
# kernel/[f] broken
# kernel/[g] ok -- that's kernel/groups.h
if source.startswith('kernel/'):
pieces = source.split('/')
if pieces[1][0] in ['g']:
path = 'gcc'
#print >>LOG, path
return path
def filter_args(argv, cname):
new_argv = []
for arg in argv:
if arg not in FILTER[cname]:
new_argv.append(arg)
return new_argv
def compiler_argv(flags, argv):
cname = compiler(flags)
new_argv = [COMPILER_PATH[cname]] + filter_args(argv, cname)
return new_argv
def make_flags(argv):
flags = {}
argv = argv[1:]
for arg in argv:
if arg.endswith('.c'):
flags[SOURCE] = arg
return flags, argv
def main(argv):
global LOG
LOG = file(WRAPPER_LOG, 'a+')
if 'argv' in LOG_OPTIONS:
print >>LOG, ' '.join(argv)
flags, argv = make_flags(argv)
new_argv = compiler_argv(flags, argv)
#print >>LOG, ' '.join(new_argv)
start_time = time.time()
ret = subprocess.call(new_argv)
end_time = time.time()
if 'time' in LOG_OPTIONS:
print >> LOG, 'Time elapsed: {:.3f} seconds'.format(end_time - start_time)
LOG.close()
return ret
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
#!/usr/bin/env python
import optparse
import os
import subprocess
import sys
WORLD_PATH = os.path.dirname(os.path.abspath(__file__))
COMPILER_PATH = {'gcc': 'gcc',
'clang': WORLD_PATH + '/third_party/llvm-build/Release+Asserts/bin/clang'
}
FILTER = {'gcc': ['-Qunused-arguments', '-no-integrated-as', '-mno-global-merge',
'-Wdate-time', '-Wno-unknown-warning-option', '-Wno-initializer-overrides', '-Wno-tautological-compare',
'-Wincompatible-pointer-types', '-Wno-gnu', '-Wno-format-invalid-specifier',
'-Werror=date-time', '-Werror=incompatible-pointer-types',
],'clang': []}
SOURCE = 'source'
WRAPPER_LOG = WORLD_PATH + '/wrapper.log'
LOG = sys.stderr
def compiler(flags):
path = 'clang'
if SOURCE in flags:
source = flags[SOURCE]
print >>LOG, source
# kernel/* ok
# kernel/[st] broken
# kernel/[kmpstuw] broken
# kernel/[abckmpstuw] broken
# kernel/[abcdefgkmpstuw] ok
# kernel/[defgkmpstuw] ok
# kernel/[defgkm] ok
# kernel/[defg] ok
# kernel/[de] broken
# kernel/[fg] ok
# kernel/[f] broken
# kernel/[g] ok -- that's kernel/groups.h
if source.startswith('kernel/'):
pieces = source.split('/')
if pieces[1][0] in ['g']:
path = 'gcc'
print >>LOG, path
return path
def filter_args(argv, cname):
new_argv = []
for arg in argv:
if arg not in FILTER[cname]:
new_argv.append(arg)
return new_argv
def compiler_argv(flags, argv):
cname = compiler(flags)
new_argv = [COMPILER_PATH[cname]] + filter_args(argv, cname)
return new_argv
def make_flags(argv):
flags = {}
argv = argv[1:]
for arg in argv:
if arg.endswith('.c'):
flags[SOURCE] = arg
return flags, argv
def main(argv):
global LOG
LOG = file(WRAPPER_LOG, 'a+')
#print >>LOG, ' '.join(argv)
flags, argv = make_flags(argv)
new_argv = compiler_argv(flags, argv)
#print >>LOG, ' '.join(new_argv)
ret = subprocess.call(new_argv)
#print >>LOG, ret
LOG.close()
return ret
if __name__ == '__main__':
sys.exit(main(sys.argv))
|
Python
| 0
|
deebd351b09108d95b4759b179ad84b48b6c933e
|
Fix typo in random-seed's help
|
pytest_test_groups/__init__.py
|
pytest_test_groups/__init__.py
|
from random import Random
import math
def get_group_size(total_items, total_groups):
return int(math.ceil(float(total_items) / total_groups))
def get_group(items, group_size, group_id):
start = group_size * (group_id - 1)
end = start + group_size
if start >= len(items) or start < 0:
raise ValueError("Invalid test-group argument")
return items[start:end]
def pytest_addoption(parser):
group = parser.getgroup('split your tests into evenly sized groups and run them')
group.addoption('--test-group-count', dest='test-group-count', type=int,
help='The number of groups to split the tests into')
group.addoption('--test-group', dest='test-group', type=int,
help='The group of tests that should be executed')
group.addoption('--test-group-random-seed', dest='random-seed', type=int,
help='Integer to seed pseudo-random test ordering')
def pytest_collection_modifyitems(session, config, items):
group_count = config.getoption('test-group-count')
group_id = config.getoption('test-group')
seed = config.getoption('random-seed', False)
if not group_count or not group_id:
return
if seed:
seeded = Random(seed)
seeded.shuffle(items)
total_items = len(items)
group_size = get_group_size(total_items, group_count)
tests_in_group = get_group(items, group_size, group_id)
del items[:]
items.extend(tests_in_group)
print('Running test group #{0} ({1} tests)'.format(group_id, len(items)))
|
from random import Random
import math
def get_group_size(total_items, total_groups):
return int(math.ceil(float(total_items) / total_groups))
def get_group(items, group_size, group_id):
start = group_size * (group_id - 1)
end = start + group_size
if start >= len(items) or start < 0:
raise ValueError("Invalid test-group argument")
return items[start:end]
def pytest_addoption(parser):
group = parser.getgroup('split your tests into evenly sized groups and run them')
group.addoption('--test-group-count', dest='test-group-count', type=int,
help='The number of groups to split the tests into')
group.addoption('--test-group', dest='test-group', type=int,
help='The group of tests that should be executed')
group.addoption('--test-group-random-seed', dest='random-seed', type=int,
help='Integer to seed psuedo-random test ordering')
def pytest_collection_modifyitems(session, config, items):
group_count = config.getoption('test-group-count')
group_id = config.getoption('test-group')
seed = config.getoption('random-seed', False)
if not group_count or not group_id:
return
if seed:
seeded = Random(seed)
seeded.shuffle(items)
total_items = len(items)
group_size = get_group_size(total_items, group_count)
tests_in_group = get_group(items, group_size, group_id)
del items[:]
items.extend(tests_in_group)
print('Running test group #{0} ({1} tests)'.format(group_id, len(items)))
|
Python
| 0.002549
|
2eca98c216a590c6163c8236c392f19ddd8d85d9
|
update to 4.4.12
|
tensorgraph/__init__.py
|
tensorgraph/__init__.py
|
# import json
# from os.path import dirname
#
# with open(dirname(__file__) + '/pkg_info.json') as fp:
# _info = json.load(fp)
# __version__ = _info['version']
__version__ = "4.4.12"
from .stopper import EarlyStopper
from .sequential import Sequential
from .graph import Graph
from .node import StartNode, HiddenNode, EndNode
from .progbar import ProgressBar
from .data_iterator import SequentialIterator, StepIterator, SimpleBlocks, DataBlocks
from . import cost
from . import utils
from .dataset.preprocess import *
|
# import json
# from os.path import dirname
#
# with open(dirname(__file__) + '/pkg_info.json') as fp:
# _info = json.load(fp)
# __version__ = _info['version']
__version__ = "4.4.10"
from .stopper import EarlyStopper
from .sequential import Sequential
from .graph import Graph
from .node import StartNode, HiddenNode, EndNode
from .progbar import ProgressBar
from .data_iterator import SequentialIterator, StepIterator, SimpleBlocks, DataBlocks
from . import cost
from . import utils
from .dataset.preprocess import *
|
Python
| 0
|
c986507b9c020a2a81a290299f7ce74748641254
|
update linkedinviewer
|
linkedinviewer.py
|
linkedinviewer.py
|
from linkedin import linkedin
import oauthlib
class Linkedinviewer (object):
def __init__ (self, cred_file):
self.cred_file = cred_file
self.authentication = None
self.application = None
def authenticate(self):
# Authenticate with LinkedIn app credential
cred_list = None
with open(self.cred_file, 'r') as f:
cred_data = f.readlines()
for line in cred_data:
try:
cred_temp = line.split('=')[1]
except:
print "Bad credentials for LinkedIn api authentication"
if cred_list is None:
cred_list = []
cred_list.append(cred_temp.strip(' \t\n\r'))
try:
self.authentication = linkedin.LinkedInDeveloperAuthentication(cred_list[0], cred_list[1], cred_list[2],
cred_list[3], cred_list[4], linkedin.PERMISSIONS.enums.values())
self.application = application = linkedin.LinkedInApplication(self.authentication)
except:
print "Failed to authenticate with LinkedIn"
return None
def retrieve_profile(self):
# Get profile information
profile = self.application.get_profile()
print profile
return profile
def retrieve_company(self, company_ids=None, universal_names=None, selectors=None):
# Get company information
companies = None
count = 0
if company_ids is not None:
for company_id in company_ids:
try:
company_temp = self.application.get_companies(company_ids=[company_id], selectors=selectors)
if companies is None:
companies = {}
companies['values'] = []
companies['values'].append(company_temp['values'][0])
count = count + 1
except:
print "Unable to retrieve company id:", company_id
if universal_names is not None:
for universal_name in universal_names:
try:
company_temp = self.application.get_companies(universal_names=[universal_name], selectors=selectors)
if companies is None:
companies = {}
companies['values'] = []
companies['values'].append(company_temp['values'][0])
count = count + 1
except:
print "Unable to retrieve universal name:", universal_name
if count > 0:
companies['_total'] = count
for company in companies['values']:
print '========================\n'
print company
print '\n========================'
return companies
def retrieve_company_updates(self, companies=None, count=1):
# Get company updates
company_list = None
company_updates_dict = None
if companies is not None:
for i in range(companies['_total']):
if company_list is None:
company_list = []
company_list.append(companies['values'][i])
for company in company_list:
if company_updates_dict is None:
company_updates_dict = {}
company_updates_dict[company['name']] = self.application.get_company_updates(company['id'], params={'count': count})
for company_name, company_updates in company_updates_dict.iteritems():
print '\n************************', company_name, '************************\n'
for i in range(company_updates['_count']):
print '========================\n'
print company_updates['values'][i]
print '\n========================'
return company_updates_dict
if __name__ == "__main__":
lviewer = Linkedinviewer('linkedincred.conf')
lviewer.authenticate()
lviewer.retrieve_profile()
selectors = ['id', 'name', 'company-type', 'stock-exchange',
'ticker', 'industries', 'employee-count-range',
'locations', 'founded-year', 'num-followers'
]
companies = lviewer.retrieve_company(universal_names=['sciencelogic', 'splunk'], selectors=selectors)
company_updates_dict = lviewer.retrieve_company_updates(companies=companies, count=3)
|
from linkedin import linkedin
import oauthlib
class Linkedinviewer (object):
def __init__ (self, cred_file):
self.cred_file = cred_file
self.authentication = None
self.application = None
def authenticate(self):
# Authenticate with LinkedIn app credential
cred_list = None
with open(self.cred_file, 'r') as f:
cred_data = f.readlines()
for line in cred_data:
try:
cred_temp = line.split('=')[1]
except:
print "Bad credentials for LinkedIn api authentication"
if cred_list is None:
cred_list = []
cred_list.append(cred_temp.strip(' \t\n\r'))
try:
self.authentication = linkedin.LinkedInDeveloperAuthentication(cred_list[0], cred_list[1], cred_list[2],
cred_list[3], cred_list[4], linkedin.PERMISSIONS.enums.values())
self.application = application = linkedin.LinkedInApplication(self.authentication)
except:
print "Failed to authenticate with LinkedIn"
return None
def retrieve_profile(self):
# Get profile information
profile = self.application.get_profile()
print profile
return profile
def retrieve_company(self, company_ids=None, universal_names=None, selectors=None):
# Get company information
companies = None
count = 0
if company_ids is not None:
for company_id in company_ids:
try:
company_temp = self.application.get_companies(company_ids=[company_id], selectors=selectors)
if companies is None:
companies = {}
companies['values'] = []
companies['values'].append(company_temp['values'][0])
count = count + 1
except:
print "Unable to retrieve company id:", company_id
if universal_names is not None:
for universal_name in universal_names:
try:
company_temp = self.application.get_companies(universal_names=[universal_name], selectors=selectors)
if companies is None:
companies = {}
companies['values'] = []
companies['values'].append(company_temp['values'][0])
count = count + 1
except:
print "Unable to retrieve universal name:", universal_name
if count > 0:
companies['_total'] = count
for company in companies['values']:
print '========================\n'
print company
print '\n========================'
return companies
def retrieve_company_updates(self, companies=None, count=1):
# Get company updates
company_list = None
company_updates_dict = None
if companies is not None:
for i in range(companies['_total']):
if company_list is None:
company_list = []
company_list.append(companies['values'][i])
for company in company_list:
if company_updates_dict is None:
company_updates_dict = {}
company_updates_dict[company['name']] = self.application.get_company_updates(company['id'], params={'count': count})
for company_name, company_updates in company_updates_dict.iteritems():
print '\n************************', company_name, '************************\n'
for i in range(company_updates['_count']):
print '========================\n'
print company_updates['values'][i]
print '\n========================'
return company_updates_dict
if __name__ == "__main__":
lviewer = Linkedinviewer('linkedincred.conf')
lviewer.authenticate()
lviewer.retrieve_profile()
selectors = ['id', 'name', 'company-type', 'stock-exchange',
'ticker', 'industries', 'employee-count-range',
'locations', 'founded-year', 'num-followers'
]
companies = lviewer.retrieve_company(universal_names=['sciencelogic', 'splunk'], selectors=selectors)
company_updates_dict = lviewer.retrieve_company_updates(companies=companies, count=3)
|
Python
| 0
|
4f5d81b48a5bb48771b82f30e3853472550ee65c
|
add demo about using file iterator
|
python/src/file_iter.py
|
python/src/file_iter.py
|
# Copyright (c) 2014 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import sys
import fileinput
def process(string):
print 'Processing: ', string
def file_iter_by_ch(filename):
f = open(filename)
while True:
char = f.read(1)
if not char:
break
process(char)
f.close()
def file_iter_by_line(filename):
f = open(filename)
while True:
line = f.readline()
if not line:
break
process(line)
f.close()
def file_iter_by_ch_all(filename):
f = open(filename)
for char in f.read():
process(char)
f.close()
def file_iter_by_line_all(filename):
f = open(filename)
for line in f.readlines():
process(line)
f.close()
def file_iter_by_lazy(filename):
for line in fileinput.input(filename):
process(line)
def file_iter(filename):
f = open(filename)
for line in f:
process(line)
f.close()
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'invalid arguments'
exit(1)
filename = 'file_iter.py'
if sys.argv[1] == 'c':
file_iter_by_ch(filename)
elif sys.argv[1] == 'l':
file_iter_by_line(filename)
elif sys.argv[1] == 'ca':
file_iter_by_ch_all(filename)
elif sys.argv[1] == 'la':
file_iter_by_line_all(filename)
elif sys.argv[1] == 'lazy':
file_iter_by_lazy(filename)
else:
file_iter(filename)
|
# Copyright (c) 2014 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
import sys
import fileinput
def process(string):
print 'Processing: ', string
def file_iter_by_ch(filename):
f = open(filename)
while True:
char = f.read(1)
if not char:
break
process(char)
f.close()
def file_iter_by_line(filename):
f = open(filename)
while True:
line = f.readline()
if not line:
break
process(line)
f.close()
def file_iter_by_ch_all(filename):
f = open(filename)
for char in f.read():
process(char)
f.close()
def file_iter_by_line_all(filename):
f = open(filename)
for line in f.readlines():
process(line)
f.close()
def file_iter_by_lazy(filename):
for line in fileinput.input(filename):
process(line)
if __name__ == '__main__':
if len(sys.argv) < 2:
print 'invalid arguments'
exit(1)
filename = 'file_iter.py'
if sys.argv[1] == 'c':
file_iter_by_ch(filename)
elif sys.argv[1] == 'l':
file_iter_by_line(filename)
elif sys.argv[1] == 'ca':
file_iter_by_ch_all(filename)
elif sys.argv[1] == 'la':
file_iter_by_line_all(filename)
elif sys.argv[1] == 'lazy':
file_iter_by_lazy(filename)
else:
print 'error'
|
Python
| 0
|
61c693005de95557172ff78c85de2d5dc4be66f1
|
use N for missing nucleotides
|
vcfkit/phylo.py
|
vcfkit/phylo.py
|
#! /usr/bin/env python
"""
usage:
vk phylo fasta <vcf> [<region>]
vk phylo tree (nj|upgma) [--plot] <vcf> [<region>]
options:
-h --help Show this screen.
--version Show version.
"""
from docopt import docopt
from vcfkit import __version__
from utils.vcf import *
from subprocess import Popen, PIPE
from utils import check_program_exists
from clint.textui import colored, indent, puts_err
import os
from pkgutil import get_data
import sys
import numpy as np
def main(debug=None):
args = docopt(__doc__,
argv=debug,
options_first=False,
version=__version__)
def first(s):
return s[0].replace(".", "N")
firstv = np.vectorize(first)
v = vcf(args["<vcf>"])
if len(v.samples) <= 1:
exit(puts_err(colored.red("\n\tVCF must have at least two samples.\n")))
if args["<region>"]:
variant_set = v(args["<region>"])
else:
variant_set = v
if args["fasta"] or args["tree"]:
"""
Generate an aligned fasta from a VCF file.
"""
gt_set = np.chararray((0,len(v.samples)))
gt_set = []
for line in variant_set:
if line.is_snp:
gt_set.append(firstv(line.gt_bases))
if len(gt_set) == 0:
exit(puts_err("No genotypes"))
gt_set = np.vstack(gt_set)
seqs = zip(v.samples, np.transpose(gt_set))
if args["fasta"]:
for sample, seq in seqs:
print(">" + sample)
print(''.join(seq))
elif args["tree"]:
"""
Generate a phylogenetic tree using an aligned fasta with muscle.
"""
# Check for muscle dependency
check_program_exists("muscle")
fasta = ""
with indent(4):
puts_err(colored.blue("\nGenerating Fasta\n"))
for sample, seq in seqs:
fasta += ">" + sample + "\n" + ''.join(seq) + "\n"
tree_type = "upgma" # default is upgma
if args["nj"]:
tree_type = "neighborjoining"
with indent(4):
puts_err(colored.blue("\nGenerating " + tree_type + " Tree\n"))
comm = ["muscle", "-maketree", "-in", "-", "-cluster", tree_type]
tree, err = Popen(comm, stdin=PIPE, stdout=PIPE).communicate(input=fasta)
# output tree
print(tree)
if args["--plot"]:
from jinja2 import Template
import webbrowser
import tempfile
prefix = os.path.dirname(os.path.abspath(sys.modules['vcfkit'].__file__)) + "/static"
template = open(prefix + "/tree.html",'r').read()
tree_template = Template(template)
html_out = tempfile.NamedTemporaryFile(suffix=".html", delete=False)
with html_out as f:
tree = tree.replace("\n", "")
sample_len = len(v.samples)
f.write(tree_template.render(**locals()))
webbrowser.open("file://" + html_out.name)
if __name__ == '__main__':
main()
|
#! /usr/bin/env python
"""
usage:
vk phylo fasta <vcf> [<region>]
vk phylo tree (nj|upgma) [--plot] <vcf> [<region>]
options:
-h --help Show this screen.
--version Show version.
"""
from docopt import docopt
from vcfkit import __version__
from utils.vcf import *
from subprocess import Popen, PIPE
from utils import check_program_exists
from clint.textui import colored, indent, puts_err
import os
from pkgutil import get_data
import sys
import numpy as np
def main(debug=None):
args = docopt(__doc__,
argv=debug,
options_first=False,
version=__version__)
def first(s):
return s[0].replace(".", "-")
firstv = np.vectorize(first)
v = vcf(args["<vcf>"])
if len(v.samples) <= 1:
exit(puts_err(colored.red("\n\tVCF must have at least two samples.\n")))
if args["<region>"]:
variant_set = v(args["<region>"])
else:
variant_set = v
if args["fasta"] or args["tree"]:
"""
Generate an aligned fasta from a VCF file.
"""
gt_set = np.chararray((0,len(v.samples)))
gt_set = []
for line in variant_set:
if line.is_snp:
gt_set.append(firstv(line.gt_bases))
gt_set = np.vstack(gt_set)
seqs = zip(v.samples, np.transpose(gt_set))
if args["fasta"]:
for sample, seq in seqs:
print(">" + sample)
print(''.join(seq))
elif args["tree"]:
"""
Generate a phylogenetic tree using an aligned fasta with muscle.
"""
# Check for muscle dependency
check_program_exists("muscle")
fasta = ""
with indent(4):
puts_err(colored.blue("\nGenerating Fasta\n"))
for sample, seq in seqs:
fasta += ">" + sample + "\n" + ''.join(seq) + "\n"
tree_type = "upgma" # default is upgma
if args["nj"]:
tree_type = "neighborjoining"
with indent(4):
puts_err(colored.blue("\nGenerating " + tree_type + " Tree\n"))
comm = ["muscle", "-maketree", "-in", "-", "-cluster", tree_type]
tree, err = Popen(comm, stdin=PIPE, stdout=PIPE).communicate(input=fasta)
# output tree
print(tree)
if args["--plot"]:
from jinja2 import Template
import webbrowser
import tempfile
prefix = os.path.dirname(os.path.abspath(sys.modules['vcfkit'].__file__)) + "/static"
template = open(prefix + "/tree.html",'r').read()
tree_template = Template(template)
html_out = tempfile.NamedTemporaryFile(suffix=".html", delete=False)
with html_out as f:
tree = tree.replace("\n", "")
sample_len = len(v.samples)
f.write(tree_template.render(**locals()))
webbrowser.open("file://" + html_out.name)
if __name__ == '__main__':
main()
|
Python
| 0.004823
|
6aead3bfc4ef7a0140238855e118e4017af1ab73
|
Change order of tests
|
pywikibot/comms/http.py
|
pywikibot/comms/http.py
|
# -*- coding: utf-8 -*-
"""
Basic HTTP access interface.
This module handles communication between the bot and the HTTP threads.
This module is responsible for
- Setting up a connection pool
- Providing a (blocking) interface for HTTP requests
- Translate site objects with query strings into urls
- Urlencoding all data
- Basic HTTP error handling
"""
#
# (C) Pywikipedia bot team, 2007
#
# Distributed under the terms of the MIT license.
#
__version__ = '$Id$'
__docformat__ = 'epytext'
import Queue
import urllib
import urlparse
import logging
import atexit
from pywikibot import config
from pywikibot.exceptions import Server504Error
import pywikibot
import cookielib
import threadedhttp
logger = logging.getLogger("comms.http")
# global variables
useragent = 'Pywikipediabot/2.0' # This should include some global version string
numthreads = 1
threads = []
connection_pool = threadedhttp.ConnectionPool()
http_queue = Queue.Queue()
cookie_jar = threadedhttp.LockableCookieJar(
config.datafilepath("pywikibot.lwp"))
try:
cookie_jar.load()
except (IOError, cookielib.LoadError):
logger.debug("Loading cookies failed.")
else:
logger.debug("Loaded cookies from file.")
# Build up HttpProcessors
pywikibot.output('Starting %(numthreads)i threads...' % locals(),
level=pywikibot.VERBOSE)
for i in range(numthreads):
proc = threadedhttp.HttpProcessor(http_queue, cookie_jar, connection_pool)
proc.setDaemon(True)
threads.append(proc)
proc.start()
# Prepare flush on quit
def _flush():
for i in threads:
http_queue.put(None)
pywikibot.output(u'Waiting for threads to finish... ',
level=pywikibot.VERBOSE)
for i in threads:
i.join()
logger.debug('All threads finished.')
atexit.register(_flush)
# export cookie_jar to global namespace
import pywikibot
pywikibot.cookie_jar = cookie_jar
def request(site, uri, *args, **kwargs):
"""Queue a request to be submitted to Site.
All parameters not listed below are the same as
L{httplib2.Http.request}, but the uri is relative
@param site: The Site to connect to
@return: The received data (a unicode string).
"""
baseuri = "%s://%s/" % (site.protocol(), site.hostname())
uri = urlparse.urljoin(baseuri, uri)
# set default user-agent string
kwargs.setdefault("headers", {})
kwargs["headers"].setdefault("user-agent", useragent)
request = threadedhttp.HttpRequest(uri, *args, **kwargs)
http_queue.put(request)
request.lock.acquire()
#TODO: do some error correcting stuff
#if all else fails
if isinstance(request.data, Exception):
raise request.data
if request.data[0].status == 504:
raise Server504Error("Server %s timed out" % site.hostname())
if request.data[0].status != 200:
pywikibot.output(u"Http response status %(status)s"
% {'status': request.data[0].status},
level=pywikibot.WARNING)
return request.data[1]
|
# -*- coding: utf-8 -*-
"""
Basic HTTP access interface.
This module handles communication between the bot and the HTTP threads.
This module is responsible for
- Setting up a connection pool
- Providing a (blocking) interface for HTTP requests
- Translate site objects with query strings into urls
- Urlencoding all data
- Basic HTTP error handling
"""
#
# (C) Pywikipedia bot team, 2007
#
# Distributed under the terms of the MIT license.
#
__version__ = '$Id$'
__docformat__ = 'epytext'
import Queue
import urllib
import urlparse
import logging
import atexit
from pywikibot import config
from pywikibot.exceptions import Server504Error
import pywikibot
import cookielib
import threadedhttp
logger = logging.getLogger("comms.http")
# global variables
useragent = 'Pywikipediabot/2.0' # This should include some global version string
numthreads = 1
threads = []
connection_pool = threadedhttp.ConnectionPool()
http_queue = Queue.Queue()
cookie_jar = threadedhttp.LockableCookieJar(
config.datafilepath("pywikibot.lwp"))
try:
cookie_jar.load()
except (IOError, cookielib.LoadError):
logger.debug("Loading cookies failed.")
else:
logger.debug("Loaded cookies from file.")
# Build up HttpProcessors
pywikibot.output('Starting %(numthreads)i threads...' % locals(),
level=pywikibot.VERBOSE)
for i in range(numthreads):
proc = threadedhttp.HttpProcessor(http_queue, cookie_jar, connection_pool)
proc.setDaemon(True)
threads.append(proc)
proc.start()
# Prepare flush on quit
def _flush():
for i in threads:
http_queue.put(None)
pywikibot.output(u'Waiting for threads to finish... ',
level=pywikibot.VERBOSE)
for i in threads:
i.join()
logger.debug('All threads finished.')
atexit.register(_flush)
# export cookie_jar to global namespace
import pywikibot
pywikibot.cookie_jar = cookie_jar
def request(site, uri, *args, **kwargs):
"""Queue a request to be submitted to Site.
All parameters not listed below are the same as
L{httplib2.Http.request}, but the uri is relative
@param site: The Site to connect to
@return: The received data (a unicode string).
"""
baseuri = "%s://%s/" % (site.protocol(), site.hostname())
uri = urlparse.urljoin(baseuri, uri)
# set default user-agent string
kwargs.setdefault("headers", {})
kwargs["headers"].setdefault("user-agent", useragent)
request = threadedhttp.HttpRequest(uri, *args, **kwargs)
http_queue.put(request)
request.lock.acquire()
#TODO: do some error correcting stuff
if request.data[0].status == 504:
raise Server504Error("Server %s timed out" % site.hostname())
#if all else fails
if isinstance(request.data, Exception):
raise request.data
if request.data[0].status != 200:
pywikibot.output(u"Http response status %(status)s"
% {'status': request.data[0].status},
level=pywikibot.WARNING)
return request.data[1]
|
Python
| 0.000008
|
68e58114919208b69a01880f52e8b8e2918a4edb
|
make failing ogr/shape comparison a todo
|
tests/python_tests/ogr_and_shape_geometries_test.py
|
tests/python_tests/ogr_and_shape_geometries_test.py
|
#!/usr/bin/env python
from nose.tools import *
from utilities import execution_path, Todo
import os, sys, glob, mapnik
def setup():
# All of the paths used are relative, if we run the tests
# from another directory we need to chdir()
os.chdir(execution_path('.'))
# TODO - fix truncation in shapefile...
polys = ["POLYGON ((30 10, 10 20, 20 40, 40 40, 30 10))",
"POLYGON ((35 10, 10 20, 15 40, 45 45, 35 10),(20 30, 35 35, 30 20, 20 30))",
"MULTIPOLYGON (((30 20, 10 40, 45 40, 30 20)),((15 5, 40 10, 10 20, 5 10, 15 5)))"
"MULTIPOLYGON (((40 40, 20 45, 45 30, 40 40)),((20 35, 45 20, 30 5, 10 10, 10 30, 20 35),(30 20, 20 25, 20 15, 30 20)))"
]
plugins = mapnik.DatasourceCache.instance().plugin_names()
if 'shape' in plugins and 'ogr' in plugins:
def test_geometries_are_interpreted_equivalently():
shapefile = '../data/shp/wkt_poly.shp'
ds1 = mapnik.Ogr(file=shapefile,layer_by_index=0)
ds2 = mapnik.Shapefile(file=shapefile)
fs1 = ds1.featureset()
fs2 = ds2.featureset()
raise Todo("output will differ between ogr and shape, may not matter, needs a closer look")
count = 0;
while(True):
count += 1
feat1 = fs1.next()
feat2 = fs2.next()
if not feat1:
break
#import pdb;pdb.set_trace()
#print feat1
eq_(str(feat1),str(feat2))
eq_(feat1.geometries().to_wkt(),feat2.geometries().to_wkt())
if __name__ == "__main__":
setup()
[eval(run)() for run in dir() if 'test_' in run]
|
#!/usr/bin/env python
from nose.tools import *
from utilities import execution_path
import os, sys, glob, mapnik
def setup():
# All of the paths used are relative, if we run the tests
# from another directory we need to chdir()
os.chdir(execution_path('.'))
# TODO - fix truncation in shapefile...
polys = ["POLYGON ((30 10, 10 20, 20 40, 40 40, 30 10))",
"POLYGON ((35 10, 10 20, 15 40, 45 45, 35 10),(20 30, 35 35, 30 20, 20 30))",
"MULTIPOLYGON (((30 20, 10 40, 45 40, 30 20)),((15 5, 40 10, 10 20, 5 10, 15 5)))"
"MULTIPOLYGON (((40 40, 20 45, 45 30, 40 40)),((20 35, 45 20, 30 5, 10 10, 10 30, 20 35),(30 20, 20 25, 20 15, 30 20)))"
]
plugins = mapnik.DatasourceCache.instance().plugin_names()
if 'shape' in plugins and 'ogr' in plugins:
def test_geometries_are_interpreted_equivalently():
shapefile = '../data/shp/wkt_poly.shp'
ds1 = mapnik.Ogr(file=shapefile,layer_by_index=0)
ds2 = mapnik.Shapefile(file=shapefile)
fs1 = ds1.featureset()
fs2 = ds2.featureset()
count = 0;
while(True):
count += 1
feat1 = fs1.next()
feat2 = fs2.next()
if not feat1:
break
#import pdb;pdb.set_trace()
#print feat1
eq_(str(feat1),str(feat2))
eq_(feat1.geometries().to_wkt(),feat2.geometries().to_wkt())
if __name__ == "__main__":
setup()
[eval(run)() for run in dir() if 'test_' in run]
|
Python
| 0.000004
|
8e57bac9ca41bfcccfabc8524ddc2a8730ac4609
|
Update quality_score_filter.py
|
python/quality_score_filter.py
|
python/quality_score_filter.py
|
from Bio import SeqIO
import math
from Tkinter import Tk
import sys
name = sys.argv[1]
qs = float(sys.argv[3])
output = sys.argv[2]
count = 0
for rec in SeqIO.parse(name, "fastq"):
count += 1
qual_sequences = []
cnt = 0
for rec in SeqIO.parse(name, "fastq"):
rec.letter_annotations["phred_quality"]
probs = []
for q in rec.letter_annotations["phred_quality"]:
e = float(math.pow(10.0,-1*(float(q)/10.0)))
# print q, e
probs.append(e)
av_prob = float(sum(probs))/float(len((rec.letter_annotations["phred_quality"])))
# print av_prob
av_q = float(-10.0*(math.log10(float(av_prob))))
# print av_prob, av_q
if av_q >= qs:
cnt += 1
qual_sequences.append(rec)
output_handle = open(output +'.fa', "w")
SeqIO.write(qual_sequences, output_handle, "fasta")
output_handle.close()
output_handle = open(output +'.fq', "w")
SeqIO.write(qual_sequences, output_handle, "fastq")
output_handle.close()
|
from Bio import SeqIO
import math
from Tkinter import Tk
import sys
name = sys.argv[1]
qs = float(sys.argv[3])
output = sys.argv[2]
count = 0
for rec in SeqIO.parse(name, "fastq"):
count += 1
print("%i reads in fastq file" % count)
qual_sequences = [] # Setup an empty list
cnt = 0
for rec in SeqIO.parse(name, "fastq"):
rec.letter_annotations["phred_quality"]
probs = []
for q in rec.letter_annotations["phred_quality"]:
e = float(math.pow(10.0,-1*(float(q)/10.0)))
# print q, e
probs.append(e)
av_prob = float(sum(probs))/float(len((rec.letter_annotations["phred_quality"])))
# print av_prob
av_q = float(-10.0*(math.log10(float(av_prob))))
# print av_prob, av_q
if av_q >= qs:
cnt += 1
qual_sequences.append(rec)
print cnt,'Quality reads saved'
output_handle = open(output +'.fa', "w")
SeqIO.write(qual_sequences, output_handle, "fasta")
output_handle.close()
output_handle = open(output +'.fq', "w")
SeqIO.write(qual_sequences, output_handle, "fastq")
output_handle.close()
|
Python
| 0.000002
|
fa2c69bf4399f3a96505fe33050433f275ff6e0b
|
Bump version to 0.0.3
|
streamer/__init__.py
|
streamer/__init__.py
|
__version__ = "0.0.3"
|
__version__ = "0.0.2"
|
Python
| 0.000001
|
2304dcf3ebf189d7c3b1a00211a288e359c4cbb5
|
Rename signals for consistency
|
volt/signals.py
|
volt/signals.py
|
"""Signals for hooks."""
# Copyright (c) 2012-2022 Wibowo Arindrarto <contact@arindrarto.dev>
# SPDX-License-Identifier: BSD-3-Clause
from typing import Any
import structlog
from blinker import signal, NamedSignal
from structlog.contextvars import bound_contextvars
log = structlog.get_logger(__name__)
post_site_load_engines = signal("post_site_load_engines")
post_site_collect_targets = signal("post_site_collect_targets")
pre_site_write = signal("pre_site_write")
def send(signal: NamedSignal, *args: Any, **kwargs: Any) -> None:
with bound_contextvars(signal=f"{signal.name}"):
log.debug("sending to signal")
rvs = signal.send(*args, **kwargs)
log.debug("sent to signal", num_receiver=len(rvs))
return None
def _clear() -> None:
for s in (
post_site_load_engines,
post_site_collect_targets,
pre_site_write,
):
log.debug("clearing receivers", signal=s.name)
s.receivers.clear()
return None
|
"""Signals for hooks."""
# Copyright (c) 2012-2022 Wibowo Arindrarto <contact@arindrarto.dev>
# SPDX-License-Identifier: BSD-3-Clause
from typing import Any
import structlog
from blinker import signal, NamedSignal
from structlog.contextvars import bound_contextvars
log = structlog.get_logger(__name__)
post_site_load_engines = signal("post-site-load-engines")
post_site_collect_targets = signal("post-site-collect-targets")
pre_site_write = signal("pre-site-write")
def send(signal: NamedSignal, *args: Any, **kwargs: Any) -> None:
with bound_contextvars(signal=f"{signal.name}"):
log.debug("sending to signal")
rvs = signal.send(*args, **kwargs)
log.debug("sent to signal", num_receiver=len(rvs))
return None
def _clear() -> None:
for s in (
post_site_load_engines,
post_site_collect_targets,
pre_site_write,
):
log.debug("clearing receivers", signal=s.name)
s.receivers.clear()
return None
|
Python
| 0.000011
|
b58dcf4ce81b234de6701468296f4185ed63a8e2
|
Add filters to the admin interface
|
voting/admin.py
|
voting/admin.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from voting.models import Position, SACYear, Nomination
def make_rejected(ModelAdmin, request, queryset):
queryset.update(is_rejected=True)
make_rejected.short_description = "رفض المرشحـ/ين المختار/ين"
class NominationAdmin(admin.ModelAdmin):
list_filter = ['position', 'is_rejected']
list_display = ['__unicode__', 'cv', 'plan', 'is_rejected']
search_fields = ['position__title', 'user__username',
'user__email', 'user__profile__ar_first_name',
'user__profile__ar_middle_name',
'user__profile__ar_last_name',
'user__profile__en_first_name',
'user__profile__en_middle_name',
'user__profile__en_last_name',
'user__profile__student_id',
'user__profile__mobile_number']
actions = [make_rejected]
class PositionAdmin(admin.ModelAdmin):
list_filter = ['entity', 'year']
admin.site.register(Nomination, NominationAdmin)
admin.site.register(Position, PositionAdmin)
admin.site.register(SACYear)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.contrib import admin
from voting.models import Position, SACYear, Nomination
def make_rejected(ModelAdmin, request, queryset):
queryset.update(is_rejected=True)
make_rejected.short_description = "رفض المرشحـ/ين المختار/ين"
class NominationAdmin(admin.ModelAdmin):
list_filter = ['position', 'is_rejected']
list_display = ['__unicode__', 'cv', 'plan', 'is_rejected']
search_fields = ['position__title', 'user__username',
'user__email', 'user__profile__ar_first_name',
'user__profile__ar_middle_name',
'user__profile__ar_last_name',
'user__profile__en_first_name',
'user__profile__en_middle_name',
'user__profile__en_last_name',
'user__profile__student_id',
'user__profile__mobile_number']
actions = [make_rejected]
admin.site.register(Nomination, NominationAdmin)
admin.site.register(SACYear)
admin.site.register(Position)
|
Python
| 0
|
4ea4f12fe589d44b2f27f6e8a645f463b15d146a
|
Use raw_id_fields for TeamMembership inline to avoid select field with *all* users.
|
studygroups/admin.py
|
studygroups/admin.py
|
from django.contrib import admin
# Register your models here.
from studygroups.models import Course
from studygroups.models import StudyGroup
from studygroups.models import Meeting
from studygroups.models import Application
from studygroups.models import Reminder
from studygroups.models import Profile
from studygroups.models import Team
from studygroups.models import TeamMembership
from studygroups.models import TeamInvitation
class ApplicationInline(admin.TabularInline):
model = Application
class StudyGroupAdmin(admin.ModelAdmin):
inlines = [ ApplicationInline ]
list_display = ['course', 'city', 'facilitator', 'start_date', 'day', 'signup_open']
class TeamMembershipInline(admin.TabularInline):
model = TeamMembership
raw_id_fields = ("user",)
class TeamAdmin(admin.ModelAdmin):
list_display = ('name', 'page_slug')
inlines = [ TeamMembershipInline ]
class ApplicationAdmin(admin.ModelAdmin):
list_display = ('name', 'study_group', 'email', 'mobile', 'created_at')
def reminder_course_title(obj):
return obj.study_group.course.title
class ReminderAdmin(admin.ModelAdmin):
list_display = (reminder_course_title, 'email_subject', 'sent_at')
class StudyGroupInline(admin.TabularInline):
model = StudyGroup
fields = ('venue_name', 'city', 'start_date', 'day')
readonly_fields = fields
def has_add_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
class CourseAdmin(admin.ModelAdmin):
def get_queryset(self, request):
qs = super(CourseAdmin, self).get_queryset(request)
return qs.active()
def created_by(course):
def display_user(user):
return '{} {}'.format(user.first_name, user.last_name)
return display_user(course.created_by) if course.created_by else 'P2PU'
def email(course):
return course.created_by.email if course.created_by else '-'
def learning_circles(course):
return course.studygroup_set.active().count()
def listed(course):
return not course.unlisted
listed.boolean = True
list_display = ('id', 'title', 'provider', 'on_demand', 'topics', learning_circles, created_by, email, listed, 'license')
exclude = ('deleted_at',)
inlines = [StudyGroupInline]
search_fields = ['title', 'provider', 'topics', 'created_by__email', 'license']
class ProfileAdmin(admin.ModelAdmin):
def user(profile):
return " ".join([profile.user.first_name, profile.user.last_name])
list_display = [user, 'mailing_list_signup', 'communication_opt_in']
search_fields = ['user__email']
admin.site.register(Course, CourseAdmin)
admin.site.register(StudyGroup, StudyGroupAdmin)
admin.site.register(Meeting)
admin.site.register(Application, ApplicationAdmin)
admin.site.register(Reminder, ReminderAdmin)
admin.site.register(Team, TeamAdmin)
admin.site.register(TeamInvitation)
admin.site.register(Profile, ProfileAdmin)
|
from django.contrib import admin
# Register your models here.
from studygroups.models import Course
from studygroups.models import StudyGroup
from studygroups.models import Meeting
from studygroups.models import Application
from studygroups.models import Reminder
from studygroups.models import Profile
from studygroups.models import Team
from studygroups.models import TeamMembership
from studygroups.models import TeamInvitation
class ApplicationInline(admin.TabularInline):
model = Application
class StudyGroupAdmin(admin.ModelAdmin):
inlines = [ ApplicationInline ]
list_display = ['course', 'city', 'facilitator', 'start_date', 'day', 'signup_open']
class TeamMembershipInline(admin.TabularInline):
model = TeamMembership
class TeamAdmin(admin.ModelAdmin):
list_display = ('name', 'page_slug')
inlines = [ TeamMembershipInline ]
class ApplicationAdmin(admin.ModelAdmin):
list_display = ('name', 'study_group', 'email', 'mobile', 'created_at')
def reminder_course_title(obj):
return obj.study_group.course.title
class ReminderAdmin(admin.ModelAdmin):
list_display = (reminder_course_title, 'email_subject', 'sent_at')
class StudyGroupInline(admin.TabularInline):
model = StudyGroup
fields = ('venue_name', 'city', 'start_date', 'day')
readonly_fields = fields
def has_add_permission(self, request, obj=None):
return False
def has_delete_permission(self, request, obj=None):
return False
class CourseAdmin(admin.ModelAdmin):
def get_queryset(self, request):
qs = super(CourseAdmin, self).get_queryset(request)
return qs.active()
def created_by(course):
def display_user(user):
return '{} {}'.format(user.first_name, user.last_name)
return display_user(course.created_by) if course.created_by else 'P2PU'
def email(course):
return course.created_by.email if course.created_by else '-'
def learning_circles(course):
return course.studygroup_set.active().count()
def listed(course):
return not course.unlisted
listed.boolean = True
list_display = ('id', 'title', 'provider', 'on_demand', 'topics', learning_circles, created_by, email, listed, 'license')
exclude = ('deleted_at',)
inlines = [StudyGroupInline]
search_fields = ['title', 'provider', 'topics', 'created_by__email', 'license']
class ProfileAdmin(admin.ModelAdmin):
def user(profile):
return " ".join([profile.user.first_name, profile.user.last_name])
list_display = [user, 'mailing_list_signup', 'communication_opt_in']
search_fields = ['user__email']
admin.site.register(Course, CourseAdmin)
admin.site.register(StudyGroup, StudyGroupAdmin)
admin.site.register(Meeting)
admin.site.register(Application, ApplicationAdmin)
admin.site.register(Reminder, ReminderAdmin)
admin.site.register(Team, TeamAdmin)
admin.site.register(TeamInvitation)
admin.site.register(Profile, ProfileAdmin)
|
Python
| 0
|
847fc43b572384f8afcd395ada275b053e24a193
|
Fix aiohttp test
|
tests/test_aiohttp.py
|
tests/test_aiohttp.py
|
try:
import aiohttp
import aiohttp.web
except ImportError:
skip_tests = True
else:
skip_tests = False
import asyncio
import unittest
from uvloop import _testbase as tb
class _TestAioHTTP:
def test_aiohttp_basic_1(self):
PAYLOAD = '<h1>It Works!</h1>' * 10000
async def on_request(request):
return aiohttp.web.Response(text=PAYLOAD)
asyncio.set_event_loop(self.loop)
app = aiohttp.web.Application(loop=self.loop)
app.router.add_get('/', on_request)
f = self.loop.create_server(
app.make_handler(),
'0.0.0.0', '0')
srv = self.loop.run_until_complete(f)
port = srv.sockets[0].getsockname()[1]
async def test():
for addr in (('localhost', port),
('127.0.0.1', port)):
async with aiohttp.ClientSession() as client:
async with client.get('http://{}:{}'.format(*addr)) as r:
self.assertEqual(r.status, 200)
result = await r.text()
self.assertEqual(result, PAYLOAD)
self.loop.run_until_complete(test())
self.loop.run_until_complete(app.shutdown())
self.loop.run_until_complete(app.cleanup())
@unittest.skipIf(skip_tests, "no aiohttp module")
class Test_UV_AioHTTP(_TestAioHTTP, tb.UVTestCase):
pass
@unittest.skipIf(skip_tests, "no aiohttp module")
class Test_AIO_AioHTTP(_TestAioHTTP, tb.AIOTestCase):
pass
|
try:
import aiohttp
import aiohttp.server
except ImportError:
skip_tests = True
else:
skip_tests = False
import asyncio
import unittest
from uvloop import _testbase as tb
class _TestAioHTTP:
def test_aiohttp_basic_1(self):
PAYLOAD = b'<h1>It Works!</h1>' * 10000
class HttpRequestHandler(aiohttp.server.ServerHttpProtocol):
async def handle_request(self, message, payload):
response = aiohttp.Response(
self.writer, 200, http_version=message.version
)
response.add_header('Content-Type', 'text/html')
response.add_header('Content-Length', str(len(PAYLOAD)))
response.send_headers()
response.write(PAYLOAD)
await response.write_eof()
asyncio.set_event_loop(self.loop)
f = self.loop.create_server(
lambda: HttpRequestHandler(keepalive_timeout=1),
'0.0.0.0', '0')
srv = self.loop.run_until_complete(f)
port = srv.sockets[0].getsockname()[1]
async def test():
for addr in (('localhost', port),
('127.0.0.1', port)):
async with aiohttp.ClientSession() as client:
async with client.get('http://{}:{}'.format(*addr)) as r:
self.assertEqual(r.status, 200)
self.assertEqual(len(await r.text()), len(PAYLOAD))
self.loop.run_until_complete(test())
srv.close()
self.loop.run_until_complete(srv.wait_closed())
@unittest.skipIf(skip_tests, "no aiohttp module")
class Test_UV_AioHTTP(_TestAioHTTP, tb.UVTestCase):
pass
@unittest.skipIf(skip_tests, "no aiohttp module")
class Test_AIO_AioHTTP(_TestAioHTTP, tb.AIOTestCase):
pass
|
Python
| 0.000009
|
61969ac21d7eda1162cdedd3f066aa8e396fb5ba
|
Fix test output
|
raven/scripts/runner.py
|
raven/scripts/runner.py
|
"""
raven.scripts.runner
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from __future__ import print_function
import logging
import os
import sys
import time
from optparse import OptionParser
from raven import Client, get_version
from raven.utils.json import json
def store_json(option, opt_str, value, parser):
try:
value = json.loads(value)
except ValueError:
print("Invalid JSON was used for option %s. Received: %s" % (opt_str, value))
sys.exit(1)
setattr(parser.values, option.dest, value)
def get_loadavg():
if hasattr(os, 'getloadavg'):
return os.getloadavg()
return None
def get_uid():
try:
import pwd
except ImportError:
return None
return pwd.getpwuid(os.geteuid())[0]
def send_test_message(client, options):
sys.stdout.write("Client configuration:\n")
for k in ('base_url', 'project', 'public_key', 'secret_key'):
sys.stdout.write(' %-15s: %s\n' % (k, getattr(client.remote, k)))
sys.stdout.write('\n')
remote_config = client.remote
if not remote_config.is_active():
sys.stdout.write("Error: DSN configuration is not valid!\n")
sys.exit(1)
if not client.is_enabled():
sys.stdout.write('Error: Client reports as being disabled!\n')
sys.exit(1)
data = options.get('data', {
'culprit': 'raven.scripts.runner',
'logger': 'raven.test',
'request': {
'method': 'GET',
'url': 'http://example.com',
}
})
sys.stdout.write('Sending a test message... ')
sys.stdout.flush()
ident = client.get_ident(client.captureMessage(
message='This is a test message generated using ``raven test``',
data=data,
level=logging.INFO,
stack=True,
tags=options.get('tags', {}),
extra={
'user': get_uid(),
'loadavg': get_loadavg(),
},
))
sys.stdout.write('Event ID was %r\n' % (ident,))
def main():
root = logging.getLogger('sentry.errors')
root.setLevel(logging.DEBUG)
# if len(root.handlers) == 0:
# root.addHandler(logging.StreamHandler())
parser = OptionParser(version=get_version())
parser.add_option("--data", action="callback", callback=store_json,
type="string", nargs=1, dest="data")
parser.add_option("--tags", action="callback", callback=store_json,
type="string", nargs=1, dest="tags")
(opts, args) = parser.parse_args()
dsn = ' '.join(args[1:]) or os.environ.get('SENTRY_DSN')
if not dsn:
print("Error: No configuration detected!")
print("You must either pass a DSN to the command, or set the SENTRY_DSN environment variable.")
sys.exit(1)
print("Using DSN configuration:")
print(" ", dsn)
print()
client = Client(dsn, include_paths=['raven'])
send_test_message(client, opts.__dict__)
# TODO(dcramer): correctly support async models
time.sleep(3)
if client.state.did_fail():
sys.stdout.write('error!\n')
sys.exit(1)
sys.stdout.write('success!\n')
|
"""
raven.scripts.runner
~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2012 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from __future__ import print_function
import logging
import os
import sys
import time
from optparse import OptionParser
from raven import Client, get_version
from raven.utils.json import json
def store_json(option, opt_str, value, parser):
try:
value = json.loads(value)
except ValueError:
print("Invalid JSON was used for option %s. Received: %s" % (opt_str, value))
sys.exit(1)
setattr(parser.values, option.dest, value)
def get_loadavg():
if hasattr(os, 'getloadavg'):
return os.getloadavg()
return None
def get_uid():
try:
import pwd
except ImportError:
return None
return pwd.getpwuid(os.geteuid())[0]
def send_test_message(client, options):
sys.stdout.write("Client configuration:\n")
for k in ('servers', 'project', 'public_key', 'secret_key'):
sys.stdout.write(' %-15s: %s\n' % (k, getattr(client, k)))
sys.stdout.write('\n')
remote_config = client.remote
if not remote_config.is_active():
sys.stdout.write("Error: DSN configuration is not valid!\n")
sys.exit(1)
if not client.is_enabled():
sys.stdout.write('Error: Client reports as being disabled!\n')
sys.exit(1)
data = options.get('data', {
'culprit': 'raven.scripts.runner',
'logger': 'raven.test',
'request': {
'method': 'GET',
'url': 'http://example.com',
}
})
sys.stdout.write('Sending a test message... ')
sys.stdout.flush()
ident = client.get_ident(client.captureMessage(
message='This is a test message generated using ``raven test``',
data=data,
level=logging.INFO,
stack=True,
tags=options.get('tags', {}),
extra={
'user': get_uid(),
'loadavg': get_loadavg(),
},
))
sys.stdout.write('Event ID was %r\n' % (ident,))
def main():
root = logging.getLogger('sentry.errors')
root.setLevel(logging.DEBUG)
# if len(root.handlers) == 0:
# root.addHandler(logging.StreamHandler())
parser = OptionParser(version=get_version())
parser.add_option("--data", action="callback", callback=store_json,
type="string", nargs=1, dest="data")
parser.add_option("--tags", action="callback", callback=store_json,
type="string", nargs=1, dest="tags")
(opts, args) = parser.parse_args()
dsn = ' '.join(args[1:]) or os.environ.get('SENTRY_DSN')
if not dsn:
print("Error: No configuration detected!")
print("You must either pass a DSN to the command, or set the SENTRY_DSN environment variable.")
sys.exit(1)
print("Using DSN configuration:")
print(" ", dsn)
print()
client = Client(dsn, include_paths=['raven'])
send_test_message(client, opts.__dict__)
# TODO(dcramer): correctly support async models
time.sleep(3)
if client.state.did_fail():
sys.stdout.write('error!\n')
sys.exit(1)
sys.stdout.write('success!\n')
|
Python
| 0.988755
|
8b6cbdbae4dedfbbf025a7ecb20c7d7b3959ed11
|
support to overwrite position in border
|
rbgomoku/core/player.py
|
rbgomoku/core/player.py
|
from core import OverwritePositionException
from core.board import Piece
class AIPlayer:
""" Abstract AI players.
To construct an AI player:
Construct an instance (of its subclass) with the game Board
"""
def __init__(self, board, piece):
self._board = board
self.my_piece = piece
self.opponent = Piece.WHITE if piece == Piece.BLACK else Piece.BLACK
def play(self, row, col):
raise NotImplemented
class HumanPlayer(AIPlayer):
""" Human Player
"""
def __init__(self, board, piece, first=True):
super(HumanPlayer, self).__init__(board, piece)
self.first = not first
def play(self, row, col):
if self._board.get_piece(row, col) != Piece.NONE:
raise OverwritePositionException
return self._board.play_piece(self.my_piece, row, col)
def __repr__(self):
player_number = int(self.first) + 1
return 'Player {}'.format(player_number)
|
from core.board import Piece
class AIPlayer:
""" Abstract AI players.
To construct an AI player:
Construct an instance (of its subclass) with the game Board
"""
def __init__(self, board, piece):
self._board = board
self.my_piece = piece
self.opponent = Piece.WHITE if piece == Piece.BLACK else Piece.BLACK
# Abstract method to get next move. Return int[2] of {row, col} */
def play(self, row, col):
raise NotImplemented
class HumanPlayer(AIPlayer):
""" Human Player
"""
def __init__(self, board, piece):
super(HumanPlayer, self).__init__(board, piece)
def play(self, row, col):
self._board.play_piece(self.my_piece, row, col)
self._board.has_winner(self.my_piece, row, col)
return self._board.winner
|
Python
| 0
|
f0b7eea8a603e331be6db71beb2766d022dacb23
|
Refactor the method who check for changes in user agent
|
tests/test_browser.py
|
tests/test_browser.py
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
import __builtin__
try:
import unittest2 as unittest
except ImportError:
import unittest
import warnings
from splinter.exceptions import DriverNotFoundError
from splinter.utils import deprecate_driver_class
from fake_webapp import EXAMPLE_APP
class BrowserTest(unittest.TestCase):
def patch_driver(self, pattern):
self.old_import = __builtin__.__import__
def custom_import(name, *args, **kwargs):
if pattern in name:
return None
return self.old_import(name, *args, **kwargs)
__builtin__.__import__ = custom_import
def unpatch_driver(self, module):
__builtin__.__import__ = self.old_import
reload(module)
def browser_can_receive_user_agent(self, webdriver):
from splinter.browser import Browser
browser = Browser(driver_name=webdriver, user_agent="iphone")
browser.visit(EXAMPLE_APP + "useragent")
result = browser.is_text_present('iphone')
browser.quit()
return result
def test_should_work_even_without_zope_testbrowser(self):
self.patch_driver('zope')
from splinter import browser
reload(browser)
assert 'zope.testbrowser' not in browser._DRIVERS, 'zope.testbrowser driver should not be registered when zope.testbrowser is not installed'
self.unpatch_driver(browser)
def test_should_raise_an_exception_when_browser_driver_is_not_found(self):
with self.assertRaises(DriverNotFoundError):
from splinter.browser import Browser
Browser('unknown-driver')
def test_firefox_should_be_able_to_receive_user_agent(self):
self.assertTrue(self.browser_can_receive_user_agent('firefox'))
def test_chrome_should_be_able_to_receive_user_agent(self):
self.assertTrue(self.browser_can_receive_user_agent('chrome'))
class BrowserDeprecationTest(unittest.TestCase):
class Foo(object):
pass
def test_should_deprecate_with_the_given_message(self):
with warnings.catch_warnings(record=True) as warnings_list:
warnings.simplefilter('default')
cls = deprecate_driver_class(self.Foo, message="Foo was deprecated")
cls()
warning = warnings_list[0]
assert type(warning.message) is DeprecationWarning
self.assertEquals("Foo was deprecated", warning.message.args[0])
def test_should_prepend_a_Deprecated_to_class(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter('default')
cls = deprecate_driver_class(self.Foo, message="Foo was deprecated")
self.assertEquals("DeprecatedFoo", cls.__name__)
def test_webdriverfirefox_should_be_deprecated(self):
with warnings.catch_warnings(record=True) as warnings_list:
warnings.simplefilter('default')
from splinter.browser import Browser
browser = Browser('webdriver.firefox')
browser.quit()
warning_message = warnings_list[0].message.args[0]
self.assertEquals("'webdriver.firefox' is deprecated, use just 'firefox'", warning_message)
def test_webdriverchrome_should_be_deprecated(self):
with warnings.catch_warnings(record=True) as warnings_list:
warnings.simplefilter('default')
from splinter.browser import Browser
browser = Browser('webdriver.chrome')
browser.quit()
warning_message = warnings_list[0].message.args[0]
self.assertEquals("'webdriver.chrome' is deprecated, use just 'chrome'", warning_message)
|
# -*- coding: utf-8 -*-
from __future__ import with_statement
import __builtin__
try:
import unittest2 as unittest
except ImportError:
import unittest
import warnings
from splinter.exceptions import DriverNotFoundError
from splinter.utils import deprecate_driver_class
from fake_webapp import EXAMPLE_APP
class BrowserTest(unittest.TestCase):
def patch_driver(self, pattern):
self.old_import = __builtin__.__import__
def custom_import(name, *args, **kwargs):
if pattern in name:
return None
return self.old_import(name, *args, **kwargs)
__builtin__.__import__ = custom_import
def unpatch_driver(self, module):
__builtin__.__import__ = self.old_import
reload(module)
def test_should_work_even_without_zope_testbrowser(self):
self.patch_driver('zope')
from splinter import browser
reload(browser)
assert 'zope.testbrowser' not in browser._DRIVERS, 'zope.testbrowser driver should not be registered when zope.testbrowser is not installed'
self.unpatch_driver(browser)
def test_should_raise_an_exception_when_browser_driver_is_not_found(self):
with self.assertRaises(DriverNotFoundError):
from splinter.browser import Browser
Browser('unknown-driver')
def test_firefox_should_be_able_to_receive_user_agent(self):
from splinter.browser import Browser
browser = Browser(driver_name='firefox', user_agent="iphone")
browser.visit(EXAMPLE_APP + "useragent")
self.assertTrue(browser.is_text_present('iphone'))
browser.quit()
def test_chrome_should_be_able_to_receive_user_agent(self):
from splinter.browser import Browser
browser = Browser(driver_name='chrome', user_agent="iphone")
browser.visit(EXAMPLE_APP + "useragent")
self.assertTrue(browser.is_text_present('iphone'))
browser.quit()
class BrowserDeprecationTest(unittest.TestCase):
class Foo(object):
pass
def test_should_deprecate_with_the_given_message(self):
with warnings.catch_warnings(record=True) as warnings_list:
warnings.simplefilter('default')
cls = deprecate_driver_class(self.Foo, message="Foo was deprecated")
cls()
warning = warnings_list[0]
assert type(warning.message) is DeprecationWarning
self.assertEquals("Foo was deprecated", warning.message.args[0])
def test_should_prepend_a_Deprecated_to_class(self):
with warnings.catch_warnings(record=True):
warnings.simplefilter('default')
cls = deprecate_driver_class(self.Foo, message="Foo was deprecated")
self.assertEquals("DeprecatedFoo", cls.__name__)
def test_webdriverfirefox_should_be_deprecated(self):
with warnings.catch_warnings(record=True) as warnings_list:
warnings.simplefilter('default')
from splinter.browser import Browser
browser = Browser('webdriver.firefox')
browser.quit()
warning_message = warnings_list[0].message.args[0]
self.assertEquals("'webdriver.firefox' is deprecated, use just 'firefox'", warning_message)
def test_webdriverchrome_should_be_deprecated(self):
with warnings.catch_warnings(record=True) as warnings_list:
warnings.simplefilter('default')
from splinter.browser import Browser
browser = Browser('webdriver.chrome')
browser.quit()
warning_message = warnings_list[0].message.args[0]
self.assertEquals("'webdriver.chrome' is deprecated, use just 'chrome'", warning_message)
|
Python
| 0.00001
|
2b7d52369206f6a6b9f0ceb4afe28e73e652e806
|
Fix typo s/router/route
|
loafer/consumer.py
|
loafer/consumer.py
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
import asyncio
import json
from functools import partial
import logging
import boto3
import botocore.exceptions
from .conf import settings
from .exceptions import ConsumerError
logger = logging.getLogger(__name__)
class SQSConsumer(object):
def __init__(self, loop=None):
self._loop = loop or asyncio.get_event_loop()
self._semaphore = asyncio.Semaphore(settings.MAX_JOBS)
self._client = boto3.client('sqs')
async def process_message(self, route, message):
logger.info('Message received, handling to route={}'.format(route))
logger.debug('Processing Message={}', message)
# TODO: better heuristic
try:
body = json.loads(message['Body'])
except json.decoder.JSONDecodeError:
body = message['Body']
content = body
if isinstance(body, dict):
if 'Message' in body:
content = body['Message']
# Since we don't know what will happen on message handler, use semaphore
# to protect scheduling or executing too many coroutines/threads
with await self._semaphore:
# TODO: depending on content type, we should pass as *args or **kwargs
logger.info('Message content data type is {!r}'.format(type(content)))
await route.deliver(content)
await self.ack_message(route.queue_url, message['ReceiptHandle'])
async def ack_message(self, queue, receipt):
logger.info('Acking message')
logger.debug('receipt={}'.format(receipt))
fn = partial(self._client.delete_message, QueueUrl=queue, ReceiptHandle=receipt)
# XXX: Refactor this when boto support asyncio
return await self._loop.run_in_executor(None, fn)
async def consume(self, routes):
while True:
for route in routes:
try:
messages = await route.fetch_messages()
except botocore.exceptions.ClientError as exc:
logger.exception(exc)
raise ConsumerError('Error when fetching messages') from exc
for message in messages:
await self.process_message(route, message)
|
# -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
import asyncio
import json
from functools import partial
import logging
import boto3
import botocore.exceptions
from .conf import settings
from .exceptions import ConsumerError
logger = logging.getLogger(__name__)
class SQSConsumer(object):
def __init__(self, loop=None):
self._loop = loop or asyncio.get_event_loop()
self._semaphore = asyncio.Semaphore(settings.MAX_JOBS)
self._client = boto3.client('sqs')
async def process_message(self, route, message):
logger.info('Message received, handling to route={}'.format(route))
logger.debug('Processing Message={}', message)
# TODO: better heuristic
try:
body = json.loads(message['Body'])
except json.decoder.JSONDecodeError:
body = message['Body']
content = body
if isinstance(body, dict):
if 'Message' in body:
content = body['Message']
# Since we don't know what will happen on message handler, use semaphore
# to protect scheduling or executing too many coroutines/threads
with await self._semaphore:
# TODO: depending on content type, we should pass as *args or **kwargs
logger.info('Message content data type is {!r}'.format(type(content)))
await route.deliver(content)
await self.ack_message(route.queue_url, message['ReceiptHandle'])
async def ack_message(self, queue, receipt):
logger.info('Acking message')
logger.debug('receipt={}'.format(receipt))
fn = partial(self._client.delete_message, QueueUrl=queue, ReceiptHandle=receipt)
# XXX: Refactor this when boto support asyncio
return await self._loop.run_in_executor(None, fn)
async def consume(self, routes):
while True:
for router in routes:
try:
messages = await router.fetch_messages()
except botocore.exceptions.ClientError as exc:
logger.exception(exc)
raise ConsumerError('Error when fetching messages') from exc
for message in messages:
await self.process_message(router, message)
|
Python
| 0.999975
|
cc78aef74876049a4548398133bad64e405351de
|
Remove redundant parameters from wagtailuserbar tag; trigger a DeprecationWarning if people are still passing a css path
|
wagtail/wagtailadmin/templatetags/wagtailuserbar.py
|
wagtail/wagtailadmin/templatetags/wagtailuserbar.py
|
import warnings
from django import template
from wagtail.wagtailadmin.views import userbar
from wagtail.wagtailcore.models import Page
register = template.Library()
@register.simple_tag(takes_context=True)
def wagtailuserbar(context, css_path=None):
if css_path is not None:
warnings.warn(
"Passing a CSS path to the wagtailuserbar tag is no longer required; use {% wagtailuserbar %} instead",
DeprecationWarning
)
# Find request object
request = context['request']
# Don't render if user doesn't have permission to access the admin area
if not request.user.has_perm('wagtailadmin.access_admin'):
return ''
# Find page object
if 'self' in context and isinstance(context['self'], Page) and context['self'].id is not None:
pass
else:
return ''
# Render edit bird
return userbar.render_edit_frame(request, context) or ''
|
from django import template
from wagtail.wagtailadmin.views import userbar
from wagtail.wagtailcore.models import Page
register = template.Library()
@register.simple_tag(takes_context=True)
def wagtailuserbar(context, current_page=None, items=None):
# Find request object
request = context['request']
# Don't render if user doesn't have permission to access the admin area
if not request.user.has_perm('wagtailadmin.access_admin'):
return ''
# Find page object
if 'self' in context and isinstance(context['self'], Page) and context['self'].id is not None:
pass
else:
return ''
# Render edit bird
return userbar.render_edit_frame(request, context) or ''
|
Python
| 0
|
42162048981e26aecb942ca936de86dc1dd82041
|
Fix #23 actors.Worker identity sent on polling for activity task
|
swf/actors/worker.py
|
swf/actors/worker.py
|
#! -*- coding:utf-8 -*-
from swf.actors import Actor
from swf.models import ActivityTask
from swf.exceptions import PollTimeout
class ActivityWorker(Actor):
"""Activity task worker actor implementation
Once started, will start polling for activity task,
to process, and emitting heartbeat until it's stopped
or crashes for some reason.
:param domain: Domain the Actor should interact with
:type domain: swf.models.Domain
:param task_list: task list the Actor should watch for tasks on
:type task_list: string
:param identity: Identity of the worker making the request,
which is recorded in the ActivityTaskStarted
event in the workflow history. This enables
diagnostic tracing when problems arise.
The form of this identity is user defined.
:type identity: string
"""
def __init__(self, domain, task_list, identity=None):
super(ActivityWorker, self).__init__(
domain,
task_list
)
self.identity = identity
def cancel(self, task_token, details=None):
"""Responds to ``swf`` that the activity task was canceled
:param task_token: canceled activity task token
:type task_token: string
:param details: provided details about cancel
:type details: string
"""
return self.connection.respond_activity_task_canceled(task_token)
def complete(self, task_token, result=None):
"""Responds to ``swf` that the activity task is completed
:param task_token: completed activity task token
:type task_token: string
:param result: The result of the activity task.
:type result: string
"""
return self.connection.respond_activity_task_completed(
task_token,
result
)
def fail(self, task_token, details=None, reason=None):
"""Replies to ``swf`` that the activity task failed
:param task_token: canceled activity task token
:type task_token: string
:param details: provided details about cancel
:type details: string
:param reason: Description of the error that may assist in diagnostics
:type reason: string
"""
return self.connection.respond_activity_task_failed(
task_token,
details,
reason
)
def heartbeat(self, task_token, details=None):
"""Records activity task heartbeat
:param task_token: canceled activity task token
:type task_token: string
:param details: provided details about cancel
:type details: string
"""
return self.connection.record_activity_task_heartbeat(
task_token,
details
)
def poll(self, task_list=None, identity=None):
"""Polls for an activity task to process from current
actor's instance defined ``task_list``
if no activity task was polled, raises a PollTimeout
exception.
:param task_list: task list the Actor should watch for tasks on
:type task_list: string
:param identity: Identity of the worker making the request,
which is recorded in the ActivityTaskStarted
event in the workflow history. This enables
diagnostic tracing when problems arise.
The form of this identity is user defined.
:type identity: string
:raises: PollTimeout
:returns: polled activity task
:type: swf.models.ActivityTask
"""
task_list = task_list or self.task_list
identity = identity or self.identity
polled_activity_data = self.connection.poll_for_activity_task(
self.domain.name,
task_list,
identity=identity
)
if not 'taskToken' in polled_activity_data:
raise PollTimeout("Activity Worker poll timed out")
activity_task = ActivityTask.from_poll(
self.domain,
self.task_list,
polled_activity_data
)
task_token = activity_task.task_token
return task_token, activity_task
|
#! -*- coding:utf-8 -*-
from swf.actors import Actor
from swf.models import ActivityTask
from swf.exceptions import PollTimeout
class ActivityWorker(Actor):
"""Activity task worker actor implementation
Once started, will start polling for activity task,
to process, and emitting heartbeat until it's stopped
or crashes for some reason.
:param domain: Domain the Actor should interact with
:type domain: swf.models.Domain
:param task_list: task list the Actor should watch for tasks on
:type task_list: string
:param last_token: last seen task token
:type last_token: string
"""
def __init__(self, domain, task_list):
super(ActivityWorker, self).__init__(
domain,
task_list
)
def cancel(self, task_token, details=None):
"""Responds to ``swf`` that the activity task was canceled
:param task_token: canceled activity task token
:type task_token: string
:param details: provided details about cancel
:type details: string
"""
return self.connection.respond_activity_task_canceled(task_token)
def complete(self, task_token, result=None):
"""Responds to ``swf` that the activity task is completed
:param task_token: completed activity task token
:type task_token: string
:param result: The result of the activity task.
:type result: string
"""
return self.connection.respond_activity_task_completed(
task_token,
result
)
def fail(self, task_token, details=None, reason=None):
"""Replies to ``swf`` that the activity task failed
:param task_token: canceled activity task token
:type task_token: string
:param details: provided details about cancel
:type details: string
:param reason: Description of the error that may assist in diagnostics
:type reason: string
"""
return self.connection.respond_activity_task_failed(
task_token,
details,
reason
)
def heartbeat(self, task_token, details=None):
"""Records activity task heartbeat
:param task_token: canceled activity task token
:type task_token: string
:param details: provided details about cancel
:type details: string
"""
return self.connection.record_activity_task_heartbeat(
task_token,
details
)
def poll(self, task_list=None, **kwargs):
"""Polls for an activity task to process from current
actor's instance defined ``task_list``
if no activity task was polled, raises a PollTimeout
exception.
:param task_list: task list the Actor should watch for tasks on
:type task_list: string
:raises: PollTimeout
:returns: polled activity task
:type: swf.models.ActivityTask
"""
task_list = task_list or self.task_list
polled_activity_data = self.connection.poll_for_activity_task(
self.domain.name,
task_list,
)
if not 'taskToken' in polled_activity_data:
raise PollTimeout("Activity Worker poll timed out")
activity_task = ActivityTask.from_poll(self.domain, self.task_list, polled_activity_data)
task_token = activity_task.task_token
return task_token, activity_task
|
Python
| 0
|
153f7b28e5b4763dd41f95b4840dcf56d9895393
|
Update bot.py
|
code/bot1/bot.py
|
code/bot1/bot.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import tweepy, time, sys # pip install tweepy
import sys
sys.path.append("..")
from course_config import *
argfile = str(sys.argv[1])
# go to https://dev.twitter.com/ and register application
# you need CONSUMER_KEY, CONSUMER_SECRET, ACCESS_KEY, ACCESS_SECRET
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)
api = tweepy.API(auth)
filename=open(argfile,'r')
f=filename.readlines()
filename.close()
for line in f:
api.update_status(line)
time.sleep(30) #Tweet every 15 minutes
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import tweepy, time, sys # pip install tweepy
import sys
sys.path.append("..")
from course_config import *
argfile = str(sys.argv[1])
# need CONSUMER_KEY, CONSUMER_SECRET, ACCESS_KEY, ACCESS_SECRET
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_KEY, ACCESS_SECRET)
api = tweepy.API(auth)
filename=open(argfile,'r')
f=filename.readlines()
filename.close()
for line in f:
api.update_status(line)
time.sleep(30) #Tweet every 15 minutes
|
Python
| 0.000001
|
0bc98e3cbab019af6f0543c6618387511e354f5f
|
Add unittests for WhisperFinder
|
tests/test_finders.py
|
tests/test_finders.py
|
import os
import random
import time
from . import TestCase, WHISPER_DIR
from graphite_api.app import app
from graphite_api.intervals import Interval, IntervalSet
from graphite_api.node import LeafNode, BranchNode
from graphite_api.storage import Store
from graphite_api._vendor import whisper
class FinderTest(TestCase):
def test_custom_finder(self):
store = Store([DummyFinder()])
nodes = list(store.find("foo"))
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].path, 'foo')
nodes = list(store.find('bar.*'))
self.assertEqual(len(nodes), 10)
node = nodes[0]
self.assertEqual(node.path.split('.')[0], 'bar')
time_info, series = node.fetch(100, 200)
self.assertEqual(time_info, (100, 200, 10))
self.assertEqual(len(series), 10)
def test_multi_finder(self):
store = Store([DummyFinder(), DummyFinder()])
nodes = list(store.find("foo"))
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].path, 'foo')
nodes = list(store.find('bar.*'))
self.assertEqual(len(nodes), 10)
node = nodes[0]
self.assertEqual(node.path.split('.')[0], 'bar')
time_info, series = node.fetch(100, 200)
self.assertEqual(time_info, (100, 200, 10))
self.assertEqual(len(series), 10)
class DummyReader(object):
__slots__ = ('path',)
def __init__(self, path):
self.path = path
def fetch(self, start_time, end_time):
npoints = (end_time - start_time) // 10
return (start_time, end_time, 10), [
random.choice([None, 1, 2, 3]) for i in range(npoints)
]
def get_intervals(self):
return IntervalSet([Interval(time.time() - 3600, time.time())])
class DummyFinder(object):
def find_nodes(self, query):
if query.pattern == 'foo':
yield BranchNode('foo')
elif query.pattern == 'bar.*':
for i in range(10):
path = 'bar.{0}'.format(i)
yield LeafNode(path, DummyReader(path))
class WhisperFinderTest(TestCase):
_listdir_counter = 0
_original_listdir = os.listdir
def test_whisper_finder(self):
for db in (
('whisper_finder', 'foo.wsp'),
('whisper_finder', 'foo', 'bar', 'baz.wsp'),
('whisper_finder', 'bar', 'baz', 'baz.wsp'),
):
db_path = os.path.join(WHISPER_DIR, *db)
if not os.path.exists(os.path.dirname(db_path)):
os.makedirs(os.path.dirname(db_path))
whisper.create(db_path, [(1, 60)])
def listdir_mock(d):
self._listdir_counter += 1
return self._original_listdir(d)
try:
os.listdir = listdir_mock
store = app.config['GRAPHITE']['store']
print("store = %s" % store)
self._listdir_counter = 0
nodes = store.find('whisper_finder.foo')
self.assertEqual(len(list(nodes)), 2)
self.assertEqual(self._listdir_counter, 2)
self._listdir_counter = 0
nodes = store.find('whisper_finder.foo.bar.baz')
self.assertEqual(len(list(nodes)), 1)
self.assertEqual(self._listdir_counter, 4)
self._listdir_counter = 0
nodes = store.find('whisper_finder.*.ba?.{baz,foo}')
self.assertEqual(len(list(nodes)), 2)
self.assertEqual(self._listdir_counter, 6)
finally:
os.listdir = self._original_listdir
|
import random
import time
from . import TestCase
from graphite_api.intervals import Interval, IntervalSet
from graphite_api.node import LeafNode, BranchNode
from graphite_api.storage import Store
class FinderTest(TestCase):
def test_custom_finder(self):
store = Store([DummyFinder()])
nodes = list(store.find("foo"))
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].path, 'foo')
nodes = list(store.find('bar.*'))
self.assertEqual(len(nodes), 10)
node = nodes[0]
self.assertEqual(node.path.split('.')[0], 'bar')
time_info, series = node.fetch(100, 200)
self.assertEqual(time_info, (100, 200, 10))
self.assertEqual(len(series), 10)
def test_multi_finder(self):
store = Store([DummyFinder(), DummyFinder()])
nodes = list(store.find("foo"))
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].path, 'foo')
nodes = list(store.find('bar.*'))
self.assertEqual(len(nodes), 10)
node = nodes[0]
self.assertEqual(node.path.split('.')[0], 'bar')
time_info, series = node.fetch(100, 200)
self.assertEqual(time_info, (100, 200, 10))
self.assertEqual(len(series), 10)
class DummyReader(object):
__slots__ = ('path',)
def __init__(self, path):
self.path = path
def fetch(self, start_time, end_time):
npoints = (end_time - start_time) // 10
return (start_time, end_time, 10), [
random.choice([None, 1, 2, 3]) for i in range(npoints)
]
def get_intervals(self):
return IntervalSet([Interval(time.time() - 3600, time.time())])
class DummyFinder(object):
def find_nodes(self, query):
if query.pattern == 'foo':
yield BranchNode('foo')
elif query.pattern == 'bar.*':
for i in range(10):
path = 'bar.{0}'.format(i)
yield LeafNode(path, DummyReader(path))
|
Python
| 0
|
9e2bdfece7f5cd9e02b15e9fe11c432e10a12418
|
update api tests
|
test/test_naarad_api.py
|
test/test_naarad_api.py
|
# coding=utf-8
"""
© 2013 LinkedIn Corp. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");?you may not use this file except in compliance with the License.?You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software?distributed under the License is distributed on an "AS IS" BASIS,?WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
import os
import sys
import time
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'src')))
from naarad import Naarad
import naarad.naarad_constants as CONSTANTS
naarad_obj = None
def setup_module():
global naarad_obj
naarad_obj = Naarad()
def test_naarad_start_stop():
"""
:return: None
"""
examples_directory = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'examples')
global naarad_obj
test_id_1 = naarad_obj.signal_start(os.path.join(os.path.join(examples_directory, 'conf'),'config-gc'))
time.sleep(60)
naarad_obj.signal_stop(test_id_1)
test_id_2 = naarad_obj.signal_start(os.path.join(os.path.join(examples_directory, 'conf'),'config-gc'))
time.sleep(60)
naarad_obj.signal_stop(test_id_2)
if naarad_obj.analyze(os.path.join(examples_directory,'logs'), 'test_api_temp') != CONSTANTS.OK :
naarad_obj.get_failed_analyses()
naarad_obj.get_sla_data(test_id_1)
naarad_obj.get_stats_data(test_id_1)
naarad_obj.get_sla_data(test_id_2)
naarad_obj.get_stats_data(test_id_2)
if naarad_obj.diff(test_id_1, test_id_2, None) != CONSTANTS.OK:
print 'Error encountered during diff'
if naarad_obj.diff_reports_by_location('test_api_temp/0', 'test_api_temp/1', 'test_api_temp/diff_location', None):
print 'Error encountered during diff'
print 'Please inspect the generated reports manually'
setup_module()
test_naarad_start_stop()
|
# coding=utf-8
"""
© 2013 LinkedIn Corp. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");?you may not use this file except in compliance with the License.?You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software?distributed under the License is distributed on an "AS IS" BASIS,?WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""
import os
import sys
import time
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'src')))
from naarad import Naarad
import naarad.naarad_constants as CONSTANTS
naarad_obj = None
def setup_module():
global naarad_obj
naarad_obj = Naarad()
def test_naarad_start_stop():
"""
:return: None
"""
examples_directory = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'examples')
global naarad_obj
test_id_1 = naarad_obj.signal_start(os.path.join(os.path.join(examples_directory, 'conf'),'config-gc'))
time.sleep(60)
naarad_obj.signal_stop(test_id_1)
test_id_2 = naarad_obj.signal_start(os.path.join(os.path.join(examples_directory, 'conf'),'config-gc'))
time.sleep(60)
naarad_obj.signal_stop(test_id_2)
if naarad_obj.analyze(os.path.join(examples_directory,'logs'), 'test_api_temp') != CONSTANTS.OK :
naarad_obj.get_failed_analyses()
naarad_obj.get_sla_data(test_id_1)
naarad_obj.get_stats_data(test_id_1)
naarad_obj.get_sla_data(test_id_2)
naarad_obj.get_stats_data(test_id_2)
if naarad_obj.diff(test_id_1, test_id_2, None) != CONSTANTS.OK:
print 'Error encountered during diff'
print 'Please inspect the generated reports manually'
setup_module()
test_naarad_start_stop()
|
Python
| 0.000001
|
628a1418e64ba45890daee2d85223277f3a11a54
|
insert asset_specific_data into deck_spawn test
|
test/test_peerassets.py
|
test/test_peerassets.py
|
import pytest
import pypeerassets as pa
@pytest.mark.parametrize("prov", [pa.Explorer, pa.Cryptoid])
def test_find_deck(prov):
provider = prov(network="tppc")
deck = pa.find_deck(provider, 'b6a95f94fef093ee9009b04a09ecb9cb5cba20ab6f13fe0926aeb27b8671df43', 1)
assert deck.__dict__ == {'asset_specific_data': b'',
'id': 'b6a95f94fef093ee9009b04a09ecb9cb5cba20ab6f13fe0926aeb27b8671df43',
'issue_mode': 4,
'issue_time': 1488840533,
'issuer': 'msYThv5bf7KjhHT1Cj5D7Y1tofyhq9vhWM',
'name': 'hopium_v2',
'network': 'peercoin-testnet',
'number_of_decimals': 2,
'production': True,
'testnet': True,
'version': 1
}
@pytest.mark.parametrize("prov", [pa.Explorer, pa.Cryptoid])
def test_find_cards(prov):
provider = prov(network="tppc")
deck = pa.find_deck(provider, 'b6a95f94fef093ee9009b04a09ecb9cb5cba20ab6f13fe0926aeb27b8671df43', 1)
cards = pa.find_card_transfers(provider, deck)
assert cards
assert isinstance(next(cards)[0], pa.CardTransfer)
def test_deck_spawn():
provider = pa.Explorer(network='tppc')
inputs = provider.select_inputs("mthKQHpr7zUbMvLcj8GHs33mVcf91DtN6L", 0.02)
change_address = "mthKQHpr7zUbMvLcj8GHs33mVcf91DtN6L"
deck = pa.Deck(name="just-testing.", number_of_decimals=1, issue_mode=1,
network='tppc', production=True, version=1,
asset_specific_data='https://talk.peercoin.net/')
deck_spawn = pa.deck_spawn(provider, deck, inputs, change_address)
assert isinstance(deck_spawn, pa.Transaction)
def test_card_transfer():
provider = pa.Explorer(network='tppc')
address = "mthKQHpr7zUbMvLcj8GHs33mVcf91DtN6L"
inputs = provider.select_inputs(address, 0.02)
change_address = address
deck = pa.find_deck(provider, '078f41c257642a89ade91e52fd484c141b11eda068435c0e34569a5dfcce7915', 1, True)
card = pa.CardTransfer(deck=deck,
receiver=['n12h8P5LrVXozfhEQEqg8SFUmVKtphBetj',
'n422r6tcJ5eofjsmRvF6TcBMigmGbY5P7E'],
amount=[1, 2]
)
card_transfer = pa.card_transfer(provider, card, inputs, change_address)
assert isinstance(card_transfer, pa.Transaction)
|
import pytest
import pypeerassets as pa
@pytest.mark.parametrize("prov", [pa.Explorer, pa.Cryptoid])
def test_find_deck(prov):
provider = prov(network="tppc")
deck = pa.find_deck(provider, 'b6a95f94fef093ee9009b04a09ecb9cb5cba20ab6f13fe0926aeb27b8671df43', 1)
assert deck.__dict__ == {'asset_specific_data': b'',
'id': 'b6a95f94fef093ee9009b04a09ecb9cb5cba20ab6f13fe0926aeb27b8671df43',
'issue_mode': 4,
'issue_time': 1488840533,
'issuer': 'msYThv5bf7KjhHT1Cj5D7Y1tofyhq9vhWM',
'name': 'hopium_v2',
'network': 'peercoin-testnet',
'number_of_decimals': 2,
'production': True,
'testnet': True,
'version': 1
}
@pytest.mark.parametrize("prov", [pa.Explorer, pa.Cryptoid])
def test_find_cards(prov):
provider = prov(network="tppc")
deck = pa.find_deck(provider, 'b6a95f94fef093ee9009b04a09ecb9cb5cba20ab6f13fe0926aeb27b8671df43', 1)
cards = pa.find_card_transfers(provider, deck)
assert cards
assert isinstance(next(cards)[0], pa.CardTransfer)
def test_deck_spawn():
provider = pa.Explorer(network='tppc')
inputs = provider.select_inputs("mthKQHpr7zUbMvLcj8GHs33mVcf91DtN6L", 0.02)
change_address = "mthKQHpr7zUbMvLcj8GHs33mVcf91DtN6L"
deck = pa.Deck(name="just-testing.", number_of_decimals=1, issue_mode=1,
network='tppc', production=True, version=1)
deck_spawn = pa.deck_spawn(provider, deck, inputs, change_address)
assert isinstance(deck_spawn, pa.Transaction)
def test_card_transfer():
provider = pa.Explorer(network='tppc')
address = "mthKQHpr7zUbMvLcj8GHs33mVcf91DtN6L"
inputs = provider.select_inputs(address, 0.02)
change_address = address
deck = pa.find_deck(provider, '078f41c257642a89ade91e52fd484c141b11eda068435c0e34569a5dfcce7915', 1, True)
card = pa.CardTransfer(deck=deck,
receiver=['n12h8P5LrVXozfhEQEqg8SFUmVKtphBetj',
'n422r6tcJ5eofjsmRvF6TcBMigmGbY5P7E'],
amount=[1, 2]
)
card_transfer = pa.card_transfer(provider, card, inputs, change_address)
assert isinstance(card_transfer, pa.Transaction)
|
Python
| 0.000001
|
678cfd5d1acb1d1c3ed031deb8afb181c661650e
|
Refactor github_hook(): split github_pull_request() and update_project() into separate functions
|
wrapweb/hook.py
|
wrapweb/hook.py
|
# Copyright 2015 The Meson development team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import flask
# GitHub secret key support
import hashlib
import hmac
from mesonwrap import wrapupdater
from wrapweb import jsonstatus
from wrapweb.app import APP
RESTRICTED_PROJECTS = [
'mesonbuild/meson',
'mesonbuild/wrapweb',
'mesonbuild/meson-ci',
]
def get_wrapupdater():
db = getattr(flask.g, '_wrapupdater', None)
if db is None:
dbdir = APP.config['DB_DIRECTORY']
db = flask.g._wrapupdater = wrapupdater.WrapUpdater(dbdir)
return db
def update_project(project, repo_url, branch):
if branch == 'master':
return jsonstatus.error(406, 'Will not update master branch')
# FIXME, should launch in the background instead. This will now block
# until branching is finished.
try:
get_wrapupdater().update_db(project, repo_url, branch)
return jsonstatus.ok()
except Exception as e:
return jsonstatus.error(500, 'Wrap generation failed. %s' % e)
def github_pull_request():
d = flask.request.get_json()
base = d['pull_request']['base']
full_repo_name = base['repo']['full_name']
if not full_repo_name.startswith('mesonbuild/'):
return jsonstatus.error(406, 'Not a mesonbuild project')
if full_repo_name in RESTRICTED_PROJECTS:
return jsonstatus.error(406, "We don't run hook for "
"restricted project names")
if d['action'] == 'closed' and d['pull_request']['merged']:
return update_project(project=base['repo']['name'],
repo_url=base['repo']['clone_url'],
branch=base['ref'])
else:
APP.logger.warning(flask.request.data)
return jsonstatus.error(
417, 'We got hook which is not merged pull request')
@APP.route('/github-hook', methods=['POST'])
def github_hook():
headers = flask.request.headers
if not headers.get('User-Agent').startswith('GitHub-Hookshot/'):
return jsonstatus.error(401, 'Not a GitHub hook')
signature = ('sha1=%s' %
hmac.new(APP.config['SECRET_KEY'].encode('utf-8'),
flask.request.data, hashlib.sha1).hexdigest())
if headers.get('X-Hub-Signature') != signature:
return jsonstatus.error(401, 'Not a valid secret key')
if headers.get('X-Github-Event') == 'pull_request':
return github_pull_request()
else:
return jsonstatus.error(405, 'Not a Pull Request hook')
|
# Copyright 2015 The Meson development team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import flask
# GitHub secret key support
import hashlib
import hmac
from mesonwrap import wrapupdater
from wrapweb import jsonstatus
from wrapweb.app import APP
RESTRICTED_PROJECTS = [
'mesonbuild/meson',
'mesonbuild/wrapweb',
'mesonbuild/meson-ci',
]
def get_wrapupdater():
db = getattr(flask.g, '_wrapupdater', None)
if db is None:
dbdir = APP.config['DB_DIRECTORY']
db = flask.g._wrapupdater = wrapupdater.WrapUpdater(dbdir)
return db
@APP.route('/github-hook', methods=['POST'])
def github_hook():
headers = flask.request.headers
if not headers.get('User-Agent').startswith('GitHub-Hookshot/'):
return jsonstatus.error(401, 'Not a GitHub hook')
signature = ('sha1=%s' %
hmac.new(APP.config['SECRET_KEY'].encode('utf-8'),
flask.request.data, hashlib.sha1).hexdigest())
if headers.get('X-Hub-Signature') != signature:
return jsonstatus.error(401, 'Not a valid secret key')
if headers.get('X-Github-Event') != 'pull_request':
return jsonstatus.error(405, 'Not a Pull Request hook')
d = flask.request.get_json()
base = d['pull_request']['base']
if not base['repo']['full_name'].startswith('mesonbuild/'):
return jsonstatus.error(406, 'Not a mesonbuild project')
if base['repo']['full_name'] in RESTRICTED_PROJECTS:
return jsonstatus.error(406, "We don't run hook for "
"restricted project names")
if d['action'] == 'closed' and d['pull_request']['merged']:
project = base['repo']['name']
branch = base['ref']
repo_url = base['repo']['clone_url']
if branch == 'master':
return jsonstatus.error(406, 'No bananas for you')
db_updater = get_wrapupdater()
# FIXME, should launch in the background instead. This will now block
# until branching is finished.
try:
db_updater.update_db(project, repo_url, branch)
return jsonstatus.ok()
except Exception as e:
return jsonstatus.error(500, 'Wrap generation failed. %s' % e)
else:
APP.logger.warning(flask.request.data)
return jsonstatus.error(
417, 'We got hook which is not merged pull request')
|
Python
| 0.000147
|
0085e36491aa14f80c8979ee25c1ad0039bc3f00
|
Extend the 'test_parse_to_audio_requirement_bug' test case
|
tests/test_parsers.py
|
tests/test_parsers.py
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""Contains test cases for the parsers module."""
from __future__ import unicode_literals
import sys
import os.path
import unittest
PATH = os.path.realpath(os.path.abspath(__file__))
sys.path.insert(0, os.path.dirname(os.path.dirname(PATH)))
try:
from youtube_dl_gui.parsers import OptionsParser
except ImportError as error:
print error
sys.exit(1)
class TestParse(unittest.TestCase):
"""Test case for OptionsParser parse method."""
def test_parse_to_audio_requirement_bug(self):
"""Test case for the 'to_audio' requirement."""
options_dict = { # Extracted from youtube-dlG settings.json
'keep_video': False,
'opts_win_size': (640, 490),
'open_dl_dir': False,
'second_video_format': '0',
'native_hls': False,
'write_subs': False,
'workers_number': 3,
'max_downloads': 0,
'max_filesize': 0,
'youtube_dl_debug': False,
'shutdown': False,
'selected_format': 'mp3',
'write_all_subs': False,
'enable_log': True,
'embed_thumbnail': True,
'audio_quality': '9',
'subs_lang': 'en',
'audio_format': 'mp3',
'restrict_filenames': False,
'min_filesize_unit': '',
'selected_audio_formats': ['mp3', 'm4a', 'vorbis'],
'selected_video_formats': ['webm', 'mp4'],
'save_path': '/home/user/Workplace/test/youtube',
'output_template': '%(uploader)s/%(title)s.%(ext)s',
'show_completion_popup': True,
'locale_name': 'en_US',
'to_audio': False,
'confirm_deletion': True,
'min_filesize': 0,
'save_path_dirs': ['/home/user/Downloads', '/home/user/Desktop', '/home/user/Videos', '/home/user/Music', '/home/user/Workplace/test/youtube'],
'sudo_password': '',
'video_password': '',
'output_format': 1,
'embed_subs': False,
'write_auto_subs': False,
'video_format': '0',
'confirm_exit': False,
'referer': '',
'proxy': '',
'add_metadata': False,
'ignore_errors': False,
'log_time': True,
'password': '',
'playlist_end': 0,
'write_description': False,
'retries': 10,
'cmd_args': '',
'write_thumbnail': False,
'playlist_start': 1,
'nomtime': False,
'write_info': False,
'username': '',
'main_win_size': (930, 560),
'user_agent': '',
'max_filesize_unit': '',
'ignore_config': False,
'youtubedl_path': '/home/user/.config/youtube-dlg'
}
expected_cmd_list = ["--newline",
"-x",
"--audio-format",
"mp3",
"--embed-thumbnail",
"--audio-quality",
"9",
"-o",
"/home/user/Workplace/test/youtube/%(title)s.%(ext)s"]
options_parser = OptionsParser()
self.assertItemsEqual(options_parser.parse(options_dict), expected_cmd_list)
# Setting 'to_audio' to True should return the same results
# since the '-x' flag is already set on audio extraction
options_dict["to_audio"] = True
self.assertItemsEqual(options_parser.parse(options_dict), expected_cmd_list)
def main():
unittest.main()
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
"""Contains test cases for the parsers module."""
from __future__ import unicode_literals
import sys
import os.path
import unittest
PATH = os.path.realpath(os.path.abspath(__file__))
sys.path.insert(0, os.path.dirname(os.path.dirname(PATH)))
try:
from youtube_dl_gui.parsers import OptionsParser
except ImportError as error:
print error
sys.exit(1)
class TestParse(unittest.TestCase):
"""Test case for OptionsParser parse method."""
def test_parse_to_audio_requirement_bug(self):
"""Test case for the 'to_audio' requirement."""
options_dict = { # Extracted from youtube-dlG settings.json
'keep_video': False,
'opts_win_size': (640, 490),
'open_dl_dir': False,
'second_video_format': '0',
'native_hls': False,
'write_subs': False,
'workers_number': 3,
'max_downloads': 0,
'max_filesize': 0,
'youtube_dl_debug': False,
'shutdown': False,
'selected_format': 'mp3',
'write_all_subs': False,
'enable_log': True,
'embed_thumbnail': True,
'audio_quality': '9',
'subs_lang': 'en',
'audio_format': 'mp3',
'restrict_filenames': False,
'min_filesize_unit': '',
'selected_audio_formats': ['mp3', 'm4a', 'vorbis'],
'selected_video_formats': ['webm', 'mp4'],
'save_path': '/home/user/Workplace/test/youtube',
'output_template': '%(uploader)s/%(title)s.%(ext)s',
'show_completion_popup': True,
'locale_name': 'en_US',
'to_audio': False,
'confirm_deletion': True,
'min_filesize': 0,
'save_path_dirs': ['/home/user/Downloads', '/home/user/Desktop', '/home/user/Videos', '/home/user/Music', '/home/user/Workplace/test/youtube'],
'sudo_password': '',
'video_password': '',
'output_format': 1,
'embed_subs': False,
'write_auto_subs': False,
'video_format': '0',
'confirm_exit': False,
'referer': '',
'proxy': '',
'add_metadata': False,
'ignore_errors': False,
'log_time': True,
'password': '',
'playlist_end': 0,
'write_description': False,
'retries': 10,
'cmd_args': '',
'write_thumbnail': False,
'playlist_start': 1,
'nomtime': False,
'write_info': False,
'username': '',
'main_win_size': (930, 560),
'user_agent': '',
'max_filesize_unit': '',
'ignore_config': False,
'youtubedl_path': '/home/user/.config/youtube-dlg'
}
expected_cmd_list = ["--newline",
"-x",
"--audio-format",
"mp3",
"--embed-thumbnail",
"--audio-quality",
"9",
"-o",
"/home/user/Workplace/test/youtube/%(title)s.%(ext)s"]
options_parser = OptionsParser()
self.assertItemsEqual(options_parser.parse(options_dict), expected_cmd_list)
def main():
unittest.main()
if __name__ == '__main__':
main()
|
Python
| 0.999918
|
3c811b3f0a0fd974cdac2e53dfe0a6cb1ee44e55
|
update process tests, move to using example_resume.yml
|
tests/test_process.py
|
tests/test_process.py
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2014 Christopher C. Strelioff <chris.strelioff@gmail.com>
#
# Distributed under terms of the MIT license.
"""test_process.py
Test (non-command line) methods in the process.py module.
"""
import unittest
import os
import tempfile
import shutil
import yaml
from resumepy import process_html
from resumepy import process_pdf
from resumepy import process_text
from resumepy import resumepy_path
class ResumepyProcessTest(unittest.TestCase):
"""Test the elements of process.py"""
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.cwd = os.getcwd()
os.chdir(self.tempdir)
self.yaml = os.path.join(resumepy_path, 'data', 'examples', 'example_resume.yml')
with open(self.yaml) as f:
self.resume = yaml.load(f)
def tearDown(self):
shutil.rmtree(self.tempdir)
os.chdir(self.cwd)
def test_process_html_created(self):
"""
process: test_process_html_created()
"""
process_html(self.resume,
os.path.join(resumepy_path, 'data', 'templates'))
self.assertTrue(os.path.exists('build/html/resume.html'))
def test_process_pdf_bad(self):
"""
process: test_process_pdf_bad()
"""
with self.assertRaises(Exception):
process_pdf(self.resume_bad,
os.path.join(resumepy_path, 'data', 'templates'),
'template.tex')
def test_process_pdf_created(self):
"""
process: test_process_pdf_created()
"""
process_pdf(self.resume,
os.path.join(resumepy_path, 'data', 'templates'),
'template.tex')
self.assertTrue(os.path.exists('build/pdf/resume.pdf'))
def test_process_text_created(self):
"""
process: test_process_text_created()
"""
process_text(self.resume,
os.path.join(resumepy_path, 'data', 'templates'),
'template.txt')
self.assertTrue(os.path.exists('build/text/resume.txt'))
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2014 Christopher C. Strelioff <chris.strelioff@gmail.com>
#
# Distributed under terms of the MIT license.
"""test_process.py
Test (non-command line) methods in the process.py module.
"""
import unittest
import os
import tempfile
import shutil
import yaml
from resumepy import process_html
from resumepy import process_pdf
from resumepy import process_text
from resumepy import resumepy_path
class ResumepyProcessTest(unittest.TestCase):
"""Test the elements of process.py"""
def setUp(self):
self.tempdir = tempfile.mkdtemp()
self.cwd = os.getcwd()
self.yaml = """contact:
name: Jane Doe
address: 555 Beverly Hills Blvd.
city: Beverly Hills
state: CA
zip: 90210
email: jane@example.com
phone: 555.555.5555
jobtitle: Astronaut
website:
label: mysite
link: 'http://mysite.com'
objective:
Reclaim Mars from invaders.
work:
- organization: European Space Agency
start: Fall 2056
stop: Spring 2093
position: Captain
location: Space
notes:
- Destroyed alien battleship
- Recovered from engine failure
- organization: McDonald's
start: July 2012
stop: January 2014
position: Assistant to the Regional Manager
location: Detriot
notes:
- Record for the fastest cheeseburger made
- Employee of the year
- Helped lead an amazing team
"""
self.resume = yaml.load(self.yaml)
# put bad character (for LaTeX) if yaml
# the `&` in the email field
self.yaml_bad = """contact:
name: Jane Doe
address: 555 Beverly Hills Blvd.
city: Beverly Hills
state: CA
zip: 90210
email: jane@example.com & jand@another.net
phone: 555.555.5555
jobtitle: Astronaut
objective:
Reclaim Mars from invaders.
work:
- organization: European Space Agency
start: Fall 2056
stop: Spring 2093
position: Captain
location: Space
notes:
- Destroyed alien battleship
- Recovered from engine failure
- organization: McDonald's
start: July 2012
stop: January 2014
position: Assistant to the Regional Manager
location: Detriot
notes:
- Record for the fastest cheeseburger made
- Employee of the year
- Helped lead an amazing team
"""
self.resume_bad = yaml.load(self.yaml_bad)
os.chdir(self.tempdir)
def tearDown(self):
shutil.rmtree(self.tempdir)
os.chdir(self.cwd)
def test_process_html_created(self):
"""* test_process_html_created -- build/html/resume.html created"""
process_html(self.resume,
os.path.join(resumepy_path, 'data', 'templates'))
self.assertTrue(os.path.exists('build/html/resume.html'))
def test_process_pdf_bad(self):
"""* test_process_pdf_bad -- bad LaTeX character"""
with self.assertRaises(Exception):
process_pdf(self.resume_bad,
os.path.join(resumepy_path, 'data', 'templates'),
'template.tex')
def test_process_pdf_created(self):
"""* test_process_pdf_created -- build/pdf/resume.pdf created"""
process_pdf(self.resume,
os.path.join(resumepy_path, 'data', 'templates'),
'template.tex')
self.assertTrue(os.path.exists('build/pdf/resume.pdf'))
def test_process_text_created(self):
"""* test_process_text_created -- build/pdf/resume.txt created"""
process_text(self.resume,
os.path.join(resumepy_path, 'data', 'templates'))
self.assertTrue(os.path.exists('build/text/resume.txt'))
|
Python
| 0
|
ee1532cc226987904666eeb0bda61445455d04e3
|
Increase test timeout
|
tests/test_run_app.py
|
tests/test_run_app.py
|
import ssl
from unittest import mock
from aiohttp import web
def test_run_app_http(loop, mocker):
mocker.spy(loop, 'create_server')
loop.call_later(0.05, loop.stop)
app = web.Application(loop=loop)
mocker.spy(app, 'startup')
web.run_app(app, print=lambda *args: None)
assert loop.is_closed()
loop.create_server.assert_called_with(mock.ANY, '0.0.0.0', 8080,
ssl=None, backlog=128)
app.startup.assert_called_once_with()
def test_run_app_https(loop, mocker):
mocker.spy(loop, 'create_server')
loop.call_later(0.05, loop.stop)
app = web.Application(loop=loop)
mocker.spy(app, 'startup')
ssl_context = ssl.create_default_context()
web.run_app(app, ssl_context=ssl_context, print=lambda *args: None)
assert loop.is_closed()
loop.create_server.assert_called_with(mock.ANY, '0.0.0.0', 8443,
ssl=ssl_context, backlog=128)
app.startup.assert_called_once_with()
def test_run_app_nondefault_host_port(loop, unused_port, mocker):
port = unused_port()
host = 'localhost'
mocker.spy(loop, 'create_server')
loop.call_later(0.05, loop.stop)
app = web.Application(loop=loop)
mocker.spy(app, 'startup')
web.run_app(app, host=host, port=port, print=lambda *args: None)
assert loop.is_closed()
loop.create_server.assert_called_with(mock.ANY, host, port,
ssl=None, backlog=128)
app.startup.assert_called_once_with()
def test_run_app_custom_backlog(loop, mocker):
mocker.spy(loop, 'create_server')
loop.call_later(0.05, loop.stop)
app = web.Application(loop=loop)
mocker.spy(app, 'startup')
web.run_app(app, backlog=10, print=lambda *args: None)
assert loop.is_closed()
loop.create_server.assert_called_with(mock.ANY, '0.0.0.0', 8080,
ssl=None, backlog=10)
app.startup.assert_called_once_with()
|
import ssl
from unittest import mock
from aiohttp import web
def test_run_app_http(loop, mocker):
mocker.spy(loop, 'create_server')
loop.call_later(0.02, loop.stop)
app = web.Application(loop=loop)
mocker.spy(app, 'startup')
web.run_app(app, print=lambda *args: None)
assert loop.is_closed()
loop.create_server.assert_called_with(mock.ANY, '0.0.0.0', 8080,
ssl=None, backlog=128)
app.startup.assert_called_once_with()
def test_run_app_https(loop, mocker):
mocker.spy(loop, 'create_server')
loop.call_later(0.02, loop.stop)
app = web.Application(loop=loop)
mocker.spy(app, 'startup')
ssl_context = ssl.create_default_context()
web.run_app(app, ssl_context=ssl_context, print=lambda *args: None)
assert loop.is_closed()
loop.create_server.assert_called_with(mock.ANY, '0.0.0.0', 8443,
ssl=ssl_context, backlog=128)
app.startup.assert_called_once_with()
def test_run_app_nondefault_host_port(loop, unused_port, mocker):
port = unused_port()
host = 'localhost'
mocker.spy(loop, 'create_server')
loop.call_later(0.02, loop.stop)
app = web.Application(loop=loop)
mocker.spy(app, 'startup')
web.run_app(app, host=host, port=port, print=lambda *args: None)
assert loop.is_closed()
loop.create_server.assert_called_with(mock.ANY, host, port,
ssl=None, backlog=128)
app.startup.assert_called_once_with()
def test_run_app_custom_backlog(loop, mocker):
mocker.spy(loop, 'create_server')
loop.call_later(0.02, loop.stop)
app = web.Application(loop=loop)
mocker.spy(app, 'startup')
web.run_app(app, backlog=10, print=lambda *args: None)
assert loop.is_closed()
loop.create_server.assert_called_with(mock.ANY, '0.0.0.0', 8080,
ssl=None, backlog=10)
app.startup.assert_called_once_with()
|
Python
| 0.000001
|
e5b3de7ef4b068d1ce01e8fc9aec59b9182d8662
|
fix error in wizard tests
|
tests/test_wizards.py
|
tests/test_wizards.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import sys
from distutils.version import LooseVersion
import cms
from .base import BaseTest
try:
from unittest import skipIf
except ImportError:
from unittest2 import skipIf
class WizardTest(BaseTest):
def setUp(self):
try:
from cms.wizards.wizard_pool import wizard_pool
delete = [
'djangocms_blog',
'djangocms_blog.cms_wizards',
]
for module in delete:
if module in sys.modules:
del sys.modules[module]
wizard_pool._reset()
except ImportError:
# Not in django CMS 3.2+, no cleanup needed
pass
@skipIf(LooseVersion(cms.__version__) < LooseVersion('3.2'),
reason='Wizards not available for django CMS < 3.2')
def test_wizard(self):
"""
Test that Blog wizard is present and contains all items
"""
from cms.wizards.wizard_pool import wizard_pool
self.get_pages()
titles = [entry.title for entry in wizard_pool.get_entries()]
self.assertTrue('New Blog' in titles)
self.assertTrue('New Article' in titles)
@skipIf(LooseVersion(cms.__version__) < LooseVersion('3.2'),
reason='Wizards not available for django CMS < 3.2')
def test_wizard_init(self):
from cms.utils.permissions import current_user
from cms.wizards.wizard_pool import wizard_pool
from djangocms_blog.models import Post
self.get_pages()
with current_user(self.user_staff):
wizs = [entry for entry in wizard_pool.get_entries() if entry.model == Post]
for index, wiz in enumerate(wizs):
app_config = self.app_config_1.pk if wiz.title == 'New Blog' else self.app_config_2.pk
form = wiz.form()
self.assertTrue(form.initial.get('app_config', False), app_config)
self.assertTrue(form.fields['app_config'].widget.attrs['disabled'])
form = wiz.form(data={
'1-title': 'title{0}'.format(index),
'1-abstract': 'abstract{0}'.format(index),
'1-categories': [self.category_1.pk],
}, prefix=1)
self.assertEqual(form.default_appconfig, app_config)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['app_config'].pk, app_config)
instance = form.save()
self.assertEqual(instance.author, self.user_staff)
with self.settings(BLOG_AUTHOR_DEFAULT='normal'):
for index, wiz in enumerate(wizs):
app_config = self.app_config_1.pk if wiz.title == 'New Blog' else self.app_config_2.pk
form = wiz.form(data={
'1-title': 'title-2{0}'.format(index),
'1-abstract': 'abstract-2{0}'.format(index),
'1-categories': [self.category_1.pk],
}, prefix=1)
self.assertEqual(form.default_appconfig, app_config)
self.assertTrue(form.is_valid())
self.assertEqual(form.cleaned_data['app_config'].pk, app_config)
instance = form.save()
self.assertEqual(instance.author, self.user_normal)
def test_wizard_import(self):
# The following import should not fail in any django CMS version
from djangocms_blog import cms_wizards # NOQA
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
import sys
from distutils.version import LooseVersion
import cms
from .base import BaseTest
try:
from unittest import skipIf
except ImportError:
from unittest2 import skipIf
class WizardTest(BaseTest):
def setUp(self):
try:
from cms.wizards.wizard_pool import wizard_pool
delete = [
'djangocms_blog',
'djangocms_blog.cms_wizards',
]
for module in delete:
if module in sys.modules:
del sys.modules[module]
wizard_pool._reset()
except ImportError:
# Not in django CMS 3.2+, no cleanup needed
pass
@skipIf(LooseVersion(cms.__version__) < LooseVersion('3.2'),
reason='Wizards not available for django CMS < 3.2')
def test_wizard(self):
"""
Test that Blog wizard is present and contains all items
"""
from cms.wizards.wizard_pool import wizard_pool
self.get_pages()
titles = [entry.title for entry in wizard_pool.get_entries()]
self.assertTrue('New Blog' in titles)
self.assertTrue('New Article' in titles)
@skipIf(LooseVersion(cms.__version__) < LooseVersion('3.2'),
reason='Wizards not available for django CMS < 3.2')
def test_wizard_init(self):
from cms.utils.permissions import current_user
from cms.wizards.wizard_pool import wizard_pool
from djangocms_blog.models import Post
self.get_pages()
with current_user(self.user_staff):
wizs = [entry for entry in wizard_pool.get_entries() if entry.model == Post]
for index, wiz in enumerate(wizs):
app_config = self.app_config_1.pk if wiz.title == 'New Blog' else self.app_config_2.pk
form = wiz.form()
self.assertTrue(form.initial.get('app_config', False), app_config)
self.assertTrue(form.fields['app_config'].widget.attrs['disabled'])
form = wiz.form(data={
'1-title': 'title{0}'.format(index),
'1-abstract': 'abstract{0}'.format(index),
'1-categories': [self.category_1.pk],
}, prefix=1)
self.assertEqual(form.default_appconfig, app_config)
self.assertTrue(form.is_valid())
self.assertTrue(form.cleaned_data['app_config'], app_config)
instance = form.save()
self.assertEqual(instance.author, self.user_staff)
with self.settings(BLOG_AUTHOR_DEFAULT='normal'):
for index, wiz in enumerate(wizs):
app_config = self.app_config_1.pk if wiz.title == 'New Blog' else self.app_config_2.pk
form = wiz.form(data={
'1-title': 'title-2{0}'.format(index),
'1-abstract': 'abstract-2{0}'.format(index),
'1-categories': [self.category_1.pk],
}, prefix=1)
self.assertEqual(form.default_appconfig, app_config)
self.assertTrue(form.is_valid())
self.assertTrue(form.cleaned_data['app_config'], app_config)
instance = form.save()
self.assertEqual(instance.author, self.user_normal)
def test_wizard_import(self):
# The following import should not fail in any django CMS version
from djangocms_blog import cms_wizards # NOQA
|
Python
| 0.000001
|
d8b322439a5fdaf31ec52dc7c2a2ff9e18c12316
|
solve import error on install magpie
|
magpie/__init__.py
|
magpie/__init__.py
|
# -*- coding: utf-8 -*-
import logging
import sys
LOGGER = logging.getLogger(__name__)
def includeme(config):
# import needs to be here, otherwise ImportError happens during setup.py install (modules not yet installed)
from magpie import constants
LOGGER.info("Adding MAGPIE_MODULE_DIR='{}' to path.".format(constants.MAGPIE_MODULE_DIR))
sys.path.insert(0, constants.MAGPIE_MODULE_DIR)
# include magpie components (all the file which define includeme)
config.include('cornice')
config.include('cornice_swagger')
config.include('pyramid_chameleon')
config.include('pyramid_mako')
config.include('magpie.definitions')
config.include('magpie.api')
config.include('magpie.db')
config.include('magpie.ui')
|
# -*- coding: utf-8 -*-
from magpie import constants
import logging
import sys
LOGGER = logging.getLogger(__name__)
def includeme(config):
LOGGER.info("Adding MAGPIE_MODULE_DIR='{}' to path.".format(constants.MAGPIE_MODULE_DIR))
sys.path.insert(0, constants.MAGPIE_MODULE_DIR)
# include magpie components (all the file which define includeme)
config.include('cornice')
config.include('cornice_swagger')
config.include('pyramid_chameleon')
config.include('pyramid_mako')
config.include('magpie.definitions')
config.include('magpie.api')
config.include('magpie.db')
config.include('magpie.ui')
|
Python
| 0.000006
|
5dfcd4ea8633a6bc658cccd654fce2cc7c217269
|
Add helpful message to end of installer.
|
nbdiff/install.py
|
nbdiff/install.py
|
from __future__ import print_function
from . import __path__ as NBDIFF_PATH
import subprocess
import re
import os
import shutil
import sys
def install():
profile_name = 'nbdiff'
create_cmd = ['ipython', 'profile', 'create', profile_name]
message = subprocess.Popen(create_cmd, stderr=subprocess.PIPE)
message_str = message.stderr.read()
re_msgline = \
re.compile(r'^.ProfileCre.*u\'(?P<profilepath>.*)ipython_config\.py.$')
profile_paths = [
re_msgline.match(line).groups()[0]
for line in message_str.splitlines()
if re_msgline.match(line)
]
if len(profile_paths) == 0:
sys.stderr.write(
"It looks like creating the ipython profile "
"didn't work. Maybe you've already installed it?\n"
)
sys.exit(-1)
profile_path = profile_paths[0]
extension_copy_from = os.path.join(NBDIFF_PATH[0], 'extension/static')
extension_copy_dest = os.path.join(profile_path, 'static')
shutil.copytree(extension_copy_from, extension_copy_dest)
print("Finished installing NBDiff extension in profile `nbdiff`.")
|
from . import __path__ as NBDIFF_PATH
import subprocess
import re
import os
import shutil
import sys
def install():
profile_name = 'nbdiff'
create_cmd = ['ipython', 'profile', 'create', profile_name]
message = subprocess.Popen(create_cmd, stderr=subprocess.PIPE)
message_str = message.stderr.read()
re_msgline = \
re.compile(r'^.ProfileCre.*u\'(?P<profilepath>.*)ipython_config\.py.$')
profile_paths = [
re_msgline.match(line).groups()[0]
for line in message_str.splitlines()
if re_msgline.match(line)
]
if len(profile_paths) == 0:
sys.stderr.write("It looks like creating the ipython profile "
"didn't work. Maybe you've already installed it?\n")
sys.exit(-1)
profile_path = profile_paths[0]
extension_copy_from = os.path.join(NBDIFF_PATH[0], 'extension/static')
extension_copy_dest = os.path.join(profile_path, 'static')
print extension_copy_from
print extension_copy_dest
shutil.copytree(extension_copy_from, extension_copy_dest)
print profile_path
|
Python
| 0
|
390fa07c191d79290b1ef83c268f38431f68093a
|
Fix import in test client.
|
tests/clients/simple.py
|
tests/clients/simple.py
|
# -*- coding: utf-8 -*-
import os
import sys
base = os.path.dirname(os.path.abspath(__file__))
sys.path.append(base)
from base import jsonrpyc
class MyClass(object):
def one(self):
return 1
def twice(self, n):
return n * 2
def arglen(self, *args, **kwargs):
return len(args) + len(kwargs)
if __name__ == "__main__":
rpc = jsonrpyc.RPC(MyClass())
|
# -*- coding: utf-8 -*-
from base import jsonrpyc
class MyClass(object):
def one(self):
return 1
def twice(self, n):
return n * 2
def arglen(self, *args, **kwargs):
return len(args) + len(kwargs)
if __name__ == "__main__":
rpc = jsonrpyc.RPC(MyClass())
|
Python
| 0
|
3b706a6fb345d1b6c33c3ab8d438949fc35887d3
|
NotImplementedException should be called NotImplementedError
|
nbviewer/index.py
|
nbviewer/index.py
|
#-----------------------------------------------------------------------------
# Copyright (C) 2014 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
'''
Classes for Indexing Notebooks
'''
from tornado.log import app_log
import uuid
from elasticsearch import Elasticsearch
class Indexer():
def index_notebook(self, notebook_url, notebook_contents):
raise NotImplementedError("index_notebook not implemented")
class NoSearch(Indexer):
def __init__(self):
pass
def index_notebook(self, notebook_url, notebook_contents):
app_log.debug("Totally not indexing \"{}\"".format(notebook_url))
pass
class ElasticSearch(Indexer):
def __init__(self, host="127.0.0.1", port=9200):
self.elasticsearch = Elasticsearch([{'host':host, 'port':port}])
def index_notebook(self, notebook_url, notebook_contents, public=False):
notebook_url = notebook_url.encode('utf-8')
notebook_id = uuid.uuid5(uuid.NAMESPACE_URL, notebook_url)
# Notebooks API Model
# https://github.com/ipython/ipython/wiki/IPEP-16%3A-Notebook-multi-directory-dashboard-and-URL-mapping#notebooks-api
body = {
"content": notebook_contents,
"public": public
}
resp = self.elasticsearch.index(index='notebooks',
doc_type='ipynb',
body=body,
id=notebook_id.hex)
if(resp['created']):
app_log.info("Created new indexed notebook={}, public={}".format(notebook_url, public))
else:
app_log.info("Indexing old notebook={}, public={}".format(notebook_url, public, resp))
|
#-----------------------------------------------------------------------------
# Copyright (C) 2014 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
'''
Classes for Indexing Notebooks
'''
from tornado.log import app_log
import uuid
from elasticsearch import Elasticsearch
class Indexer():
def index_notebook(self, notebook_url, notebook_contents):
raise NotImplementedException("index_notebook not implemented")
class NoSearch(Indexer):
def __init__(self):
pass
def index_notebook(self, notebook_url, notebook_contents):
app_log.debug("Totally not indexing \"{}\"".format(notebook_url))
pass
class ElasticSearch(Indexer):
def __init__(self, host="127.0.0.1", port=9200):
self.elasticsearch = Elasticsearch([{'host':host, 'port':port}])
def index_notebook(self, notebook_url, notebook_contents, public=False):
notebook_url = notebook_url.encode('utf-8')
notebook_id = uuid.uuid5(uuid.NAMESPACE_URL, notebook_url)
# Notebooks API Model
# https://github.com/ipython/ipython/wiki/IPEP-16%3A-Notebook-multi-directory-dashboard-and-URL-mapping#notebooks-api
body = {
"content": notebook_contents,
"public": public
}
resp = self.elasticsearch.index(index='notebooks',
doc_type='ipynb',
body=body,
id=notebook_id.hex)
if(resp['created']):
app_log.info("Created new indexed notebook={}, public={}".format(notebook_url, public))
else:
app_log.info("Indexing old notebook={}, public={}".format(notebook_url, public, resp))
|
Python
| 0.998718
|
7c6754a439f8fa1c7ebe5c12b9c51651c02c35c4
|
修改post参数,添加全局editor配置
|
manage/new_post.py
|
manage/new_post.py
|
import datetime
import json
import os.path
import re
import shutil
from pypinyin import lazy_pinyin
from common import file
from manage import get_excerpt
def get_name(nameinput):
name_raw = re.sub("[\s+\.\!\/_,$%^*(+\"\']+|[+——!,。?、~@#¥%……&*()]+", "", nameinput)
namelist = lazy_pinyin(name_raw)
name = ""
for item in namelist:
name = name + "-" + item
return name[1:len(name)]
def new_post(name, title, filename, editor):
if len(name) == 0:
name = get_name(title)
if os.path.isfile(filename):
shutil.copyfile(filename, "./document/{0}.md".format(name))
else:
if editor is not None:
os.system("{0} ./document/{1}.md".format(editor, name))
excerpt = get_excerpt.get_excerpt("./document/{0}.md".format(name))
post_info = {"name": name, "title": title, "excerpt": excerpt, "time": str(datetime.date.today())}
if os.path.isfile("./config/page.json"):
page_list = json.loads(file.read_file("./config/page.json"))
else:
page_list = list()
page_list.insert(0, post_info)
file.write_file("./config/page.json", json.dumps(page_list, ensure_ascii=False))
def new_post_init(config_file=None, editor="None"):
if config_file is not None and os.path.exists(config_file):
config = json.loads(file.read_file(config_file))
title = config["title"]
name = config["name"]
filename = config["file"]
else:
title = input("Please enter the title of the article:")
name = input("Please enter the URL (Leave a blank use pinyin):")
filename = input("Please enter the file path to copy (blank or Non-existent will be new):")
if editor=="None":
system_info = json.loads(file.read_file("./config/system.json"))
if "Editor" in system_info:
editor=system_info["Editor"]
else:
editor=None
new_post(name, title, filename, editor)
print("Success!")
|
import datetime
import json
import os.path
import re
import shutil
from pypinyin import lazy_pinyin
from common import file
from manage import get_excerpt
def get_name(nameinput):
name_raw = re.sub("[\s+\.\!\/_,$%^*(+\"\']+|[+——!,。?、~@#¥%……&*()]+", "", nameinput)
namelist = lazy_pinyin(name_raw)
name = ""
for item in namelist:
name = name + "-" + item
return name[1:len(name)]
def new_post(name, title, filename, editor):
if len(name) == 0:
name = get_name(title)
if os.path.isfile(filename):
shutil.copyfile(filename, "./document/{0}.md".format(name))
else:
if editor is not None:
os.system("{0} ./document/{1}.md".format(editor, name))
excerpt = get_excerpt.get_excerpt("./document/{0}.md".format(name))
post_info = {"name": name, "title": title, "excerpt": excerpt, "time": str(datetime.date.today())}
if os.path.isfile("./config/page.json"):
page_list = json.loads(file.read_file("./config/page.json"))
else:
page_list = list()
page_list.insert(0, post_info)
file.write_file("./config/page.json", json.dumps(page_list, ensure_ascii=False))
def new_post_init(config_file=None, editor="vim"):
if config_file is not None and os.path.exists(config_file):
config = json.loads(file.read_file(config_file))
title = config["title"]
name = config["name"]
filename = config["file"]
else:
title = input("Please enter the title of the article:")
name = input("Please enter the URL (Leave a blank use pinyin):")
filename = input("Please enter the file path to copy (blank or Non-existent will be new):")
new_post(name, title, filename, editor)
print("Success!")
|
Python
| 0
|
7bd2bfa8deb59c97f7630ed10fe70fd7e8bd8587
|
Update dependency bazelbuild/bazel to latest version
|
third_party/bazel.bzl
|
third_party/bazel.bzl
|
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is autogenerated by copybara, please do not edit.
bazel_version = "50ce3f973cbc96a0326560a31b736a4f0ca8dc62"
bazel_sha256 = "977e63bacdec2cc29192ed52ea251915d4eda12c0cc666b4e71aade947404442"
|
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is autogenerated by copybara, please do not edit.
bazel_version = "d9ebac9c2bcca821902b86cdb5f1297790bba2f9"
bazel_sha256 = "f648383e43e4172a6787dcde60365091ff4dbced0485bbf9c4b515d5f2c96139"
|
Python
| 0.000007
|
73d0be7a432340b4ecd140ad1cc8792d3f049779
|
Use SelfAttribute instead of explicit lambda
|
tests/factories/user.py
|
tests/factories/user.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016 The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details.
import factory
from factory.faker import Faker
from pycroft.model.user import User
from .base import BaseFactory
from .facilities import RoomFactory
from .finance import AccountFactory
class UserFactory(BaseFactory):
class Meta:
model = User
login = Faker('user_name')
name = Faker('name')
registered_at = Faker('date_time')
password = Faker('password')
email = Faker('email')
account = factory.SubFactory(AccountFactory, type="USER_ASSET")
room = factory.SubFactory(RoomFactory)
address = factory.SelfAttribute('room.address')
class UserWithHostFactory(UserFactory):
host = factory.RelatedFactory('tests.factories.host.HostFactory', 'owner')
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016 The Pycroft Authors. See the AUTHORS file.
# This file is part of the Pycroft project and licensed under the terms of
# the Apache License, Version 2.0. See the LICENSE file for details.
import factory
from factory.faker import Faker
from pycroft.model.user import User
from .base import BaseFactory
from .facilities import RoomFactory
from .finance import AccountFactory
class UserFactory(BaseFactory):
class Meta:
model = User
login = Faker('user_name')
name = Faker('name')
registered_at = Faker('date_time')
password = Faker('password')
email = Faker('email')
account = factory.SubFactory(AccountFactory, type="USER_ASSET")
room = factory.SubFactory(RoomFactory)
address = factory.LazyAttribute(lambda o: o.room.address)
class UserWithHostFactory(UserFactory):
host = factory.RelatedFactory('tests.factories.host.HostFactory', 'owner')
|
Python
| 0
|
5c1a404353a0cdcd49610a21d7d19b79898ac7e3
|
make mpi example a little more verbose
|
tests/helloworld_mpi.py
|
tests/helloworld_mpi.py
|
#!/usr/bin/env python
# This is an example MPI4Py program that is used
# by different examples and tests.
import sys
import time
import traceback
from mpi4py import MPI
try :
print "start"
SLEEP = 10
name = MPI.Get_processor_name()
comm = MPI.COMM_WORLD
print "mpi rank %d/%d/%s" % (comm.rank, comm.size, name)
time.sleep(SLEEP)
comm.Barrier() # wait for everybody to synchronize here
except Exception as e :
traceback.print_exc ()
print "error : %s" % s
sys.exit (1)
finally :
print "done"
sys.exit (0)
|
#!/usr/bin/env python
# This is an example MPI4Py program that is used
# by different examples and tests.
from mpi4py import MPI
import time
SLEEP = 10
name = MPI.Get_processor_name()
comm = MPI.COMM_WORLD
print "mpi rank %d/%d/%s" % (comm.rank, comm.size, name)
time.sleep(SLEEP)
comm.Barrier() # wait for everybody to synchronize here
|
Python
| 0.000069
|
3bb6017897f9b8c859c2d3879c2e9d51b899f57c
|
Increase number of iterations for xor neural net
|
neuralnets/xor.py
|
neuralnets/xor.py
|
import numpy as np
from net import NeuralNet
net = NeuralNet(2, 1, 3, 1, 342047)
output_dot = True
inputs = np.array([[1,1],
[0,0],
[1,0],
[0,1]])
outputs = np.array([[0],
[0],
[1],
[1]])
for i in xrange(80000):
if i % 100 == 0 and output_dot:
open("/tmp/xor{:05d}graph".format(i), mode="w").write(net.output_dot((inputs,outputs)))
net.learn(inputs, outputs, 0.05)
print("trained")
print("error: {}".format(net.error(inputs, outputs)))
for inpt in inputs:
print(net.forward(inpt))
|
import numpy as np
from net import NeuralNet
net = NeuralNet(2, 1, 3, 1, 342047)
output_dot = True
inputs = np.array([[1,1],
[0,0],
[1,0],
[0,1]])
outputs = np.array([[0],
[0],
[1],
[1]])
for i in xrange(50000):
if i % 100 == 0 and output_dot:
open("/tmp/xor{:05d}graph".format(i), mode="w").write(net.output_dot((inputs,outputs)))
net.learn(inputs, outputs, 0.05)
print("trained")
print("error: {}".format(net.error(inputs, outputs)))
for inpt in inputs:
print(net.forward(inpt))
|
Python
| 0.000002
|
2c37ed091baf12e53885bfa06fdb835bb8de1218
|
Add Bitbucket to skipif marker reason
|
tests/skipif_markers.py
|
tests/skipif_markers.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
skipif_markers
--------------
Contains pytest skipif markers to be used in the suite.
"""
import pytest
import os
try:
os.environ[u'TRAVIS']
except KeyError:
travis = False
else:
travis = True
try:
os.environ[u'DISABLE_NETWORK_TESTS']
except KeyError:
no_network = False
else:
no_network = True
# For some reason pytest incorrectly uses the first reason text regardless of
# which condition matches. Using a unified message for now
# travis_reason = 'Works locally with tox but fails on Travis.'
# no_network_reason = 'Needs a network connection to GitHub.'
reason = (
'Fails on Travis or else there is no network connection to '
'GitHub/Bitbucket.'
)
skipif_travis = pytest.mark.skipif(travis, reason=reason)
skipif_no_network = pytest.mark.skipif(no_network, reason=reason)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
skipif_markers
--------------
Contains pytest skipif markers to be used in the suite.
"""
import pytest
import os
try:
os.environ[u'TRAVIS']
except KeyError:
travis = False
else:
travis = True
try:
os.environ[u'DISABLE_NETWORK_TESTS']
except KeyError:
no_network = False
else:
no_network = True
# For some reason pytest incorrectly uses the first reason text regardless of
# which condition matches. Using a unified message for now
# travis_reason = 'Works locally with tox but fails on Travis.'
# no_network_reason = 'Needs a network connection to GitHub.'
reason = 'Fails on Travis or else there is no network connection to GitHub'
skipif_travis = pytest.mark.skipif(travis, reason=reason)
skipif_no_network = pytest.mark.skipif(no_network, reason=reason)
|
Python
| 0
|
44dac786339716ad8cc05f6790b73b5fc47be812
|
Remove extra comma to avoid flake8 test failure in CircleCI
|
config/jinja2.py
|
config/jinja2.py
|
from django.urls import reverse
from django.utils import translation
from django.template.backends.jinja2 import Jinja2
from jinja2 import Environment
class FoodsavingJinja2(Jinja2):
app_dirname = 'templates'
def environment(**options):
env = Environment(extensions=['jinja2.ext.i18n'], **options)
env.globals.update({
'url': reverse,
})
env.install_gettext_translations(translation)
env.install_null_translations()
return env
|
from django.urls import reverse
from django.utils import translation
from django.template.backends.jinja2 import Jinja2
from jinja2 import Environment
class FoodsavingJinja2(Jinja2):
app_dirname = 'templates'
def environment(**options):
env = Environment(extensions=['jinja2.ext.i18n',], **options)
env.globals.update({
'url': reverse,
})
env.install_gettext_translations(translation)
env.install_null_translations()
return env
|
Python
| 0.000001
|
b447fa44ca1dd2e9d21af4ce61ee6092fe3c94ec
|
Update test_cmatrices to new interface
|
tests/test_cmatrices.py
|
tests/test_cmatrices.py
|
# to run this test, from directory above:
# setenv PYTHONPATH /path/to/pyradiomics/radiomics
# nosetests --nocapture -v tests/test_features.py
import logging
from nose_parameterized import parameterized
import numpy
import six
from radiomics import cMatsEnabled, getFeatureClasses
from testUtils import custom_name_func, RadiomicsTestUtils
testUtils = RadiomicsTestUtils()
testCases = ('brain1', 'brain2', 'breast1', 'lung1', 'lung2')
featureClasses = getFeatureClasses()
class TestFeatures:
def generate_scenarios():
global testCases, featureClasses
for testCase in testCases:
for className, featureClass in six.iteritems(featureClasses):
assert(featureClass is not None)
if "_calculateCMatrix" in dir(featureClass) or className == "shape":
logging.debug('generate_scenarios: featureClass = %s', className)
yield testCase, className
global testUtils
@parameterized.expand(generate_scenarios(), testcase_func_name=custom_name_func)
def test_scenario(self, test, featureClassName):
print("")
global testUtils, featureClasses
logging.debug('test_scenario: testCase = %s, featureClassName = %s', test, featureClassName)
assert cMatsEnabled()
testUtils.setFeatureClassAndTestCase(featureClassName, test)
testImage = testUtils.getImage('original')
testMask = testUtils.getMask('original')
featureClass = featureClasses[featureClassName](testImage, testMask, **testUtils.getSettings())
if featureClassName == 'shape':
cSA = getattr(featureClass, 'SurfaceArea') # pre-calculated value by C extension
assert (cSA is not None)
pySA = getattr(featureClass, '_calculateSurfaceArea')() # Function, call to calculate SA in full-python mode
assert (pySA is not None)
# Check if the calculated values match
assert (numpy.abs(pySA - cSA)) < 1e-3
else:
assert "_calculateMatrix" in dir(featureClass)
cMat = featureClass._calculateCMatrix()
assert cMat is not None
pyMat = featureClass._calculateMatrix()
assert pyMat is not None
# Check if the calculated arrays match
assert numpy.max(numpy.abs(pyMat - cMat)) < 1e-3
|
# to run this test, from directory above:
# setenv PYTHONPATH /path/to/pyradiomics/radiomics
# nosetests --nocapture -v tests/test_features.py
import logging
from nose_parameterized import parameterized
import numpy
import six
from radiomics import cMatsEnabled, getFeatureClasses
from testUtils import custom_name_func, RadiomicsTestUtils
testUtils = RadiomicsTestUtils()
testCases = ('brain1', 'brain2', 'breast1', 'lung1', 'lung2')
featureClasses = getFeatureClasses()
class TestFeatures:
def generate_scenarios():
global testCases, featureClasses
for testCase in testCases:
for className, featureClass in six.iteritems(featureClasses):
assert(featureClass is not None)
if "_calculateCMatrix" in dir(featureClass) or className == "shape":
logging.debug('generate_scenarios: featureClass = %s', className)
yield testCase, className
global testUtils
@parameterized.expand(generate_scenarios(), testcase_func_name=custom_name_func)
def test_scenario(self, testCase, featureClassName):
print("")
global testUtils, featureClasses
logging.debug('test_scenario: testCase = %s, featureClassName = %s', testCase, featureClassName)
assert cMatsEnabled()
testUtils.setFeatureClassAndTestCase(featureClassName, testCase)
testImage = testUtils.getImage()
testMask = testUtils.getMask()
featureClass = featureClasses[featureClassName](testImage, testMask, **testUtils.getSettings())
if featureClassName == 'shape':
cSA = getattr(featureClass, 'SurfaceArea') # pre-calculated value by C extension
assert (cSA is not None)
pySA = getattr(featureClass, '_calculateSurfaceArea')() # Function, call to calculate SA in full-python mode
assert (pySA is not None)
# Check if the calculated values match
assert (numpy.abs(pySA - cSA)) < 1e-3
else:
assert "_calculateMatrix" in dir(featureClass)
cMat = featureClass._calculateCMatrix()
assert cMat is not None
pyMat = featureClass._calculateMatrix()
assert pyMat is not None
# Check if the calculated arrays match
assert numpy.max(numpy.abs(pyMat - cMat)) < 1e-3
|
Python
| 0
|
3b408ed7702100b7f1755f819e05bb61b1740957
|
add medialab events search- left todo: json and date
|
media_lab_prado.py
|
media_lab_prado.py
|
# http://medialab-prado.es/events/2016-12-01
# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import urllib.request
import datetime
date = "2017-01-02"
url = "http://medialab-prado.es/events/" + date
request = urllib.request.urlopen(url)
if request.getcode() == 200:
request = request.read()
soup = BeautifulSoup(request, "html.parser")
pageevents = soup.find("ul", { "class" : "lista"}).findChildren("a")
for event in pageevents:
if event.text == "Seguir leyendo…":
event_url = event['href']
request2 = urllib.request.urlopen(event_url)
if request2.getcode() == 200:
request2 = request2.read()
soup = BeautifulSoup(request2, "html.parser")
location = soup.find("div", { "class" : "lugar"})
if location == None:
location = "MediaLab"
else:
location = location.find("p")
print (location)
description = soup.find("div", { "class" : "entradilla"})
print(description.text)
|
# http://medialab-prado.es/events/2016-12-01
|
Python
| 0
|
769e6209db066b8b5908426850fd300fd29098e8
|
Fix codemirror mode and language name
|
tcl_kernel/kernel.py
|
tcl_kernel/kernel.py
|
from ipykernel.kernelbase import Kernel
try:
import Tkinter
except ImportError:
import tkinter as Tkinter
__version__ = '0.0.1'
class TclKernel(Kernel):
implementation = 'tcl_kernel'
implementation_version = __version__
language_info = {'name': 'Tcl',
'codemirror_mode': 'Tcl',
'mimetype': 'text/x-script.tcl',
'file_extension': '.tcl'}
banner = "Tcl Kernel"
def __init__(self, **kwargs):
Kernel.__init__(self, **kwargs)
self.tcl = Tkinter.Tcl()
self.execution_count = 0
putsredef = 'rename puts original_puts \nproc puts {args} {\n if {[llength $args] == 1} {\n return "=> [lindex $args 0]"\n } else {\n eval original_puts $args\n }\n}\n'
self.tcl.eval(putsredef)
def do_execute(self, code, silent, store_history=True,
user_expressions=None, allow_stdin=False):
try:
output = self.tcl.eval(code.rstrip())
if not silent:
stream_content = {'name': 'stdout', 'text': output[3:]}
self.send_response(self.iopub_socket, 'stream', stream_content)
except Tkinter.TclError as scripterr:
output = "Tcl Error: " + scripterr.args[0]
if not silent:
stream_content = {
'name': 'stderr', 'text': output}
self.send_response(self.iopub_socket, 'stream', stream_content)
return {'status': 'ok', 'execution_count': self.execution_count,
'payload': [], 'user_expressions': {}}
|
from ipykernel.kernelbase import Kernel
try:
import Tkinter
except ImportError:
import tkinter as Tkinter
__version__ = '0.0.1'
class TclKernel(Kernel):
implementation = 'tcl_kernel'
implementation_version = __version__
language_info = {'name': 'bash',
'codemirror_mode': 'shell',
'mimetype': 'text/x-script.tcl',
'file_extension': '.tcl'}
banner = "Tcl Kernel"
def __init__(self, **kwargs):
Kernel.__init__(self, **kwargs)
self.tcl = Tkinter.Tcl()
self.execution_count = 0
putsredef = 'rename puts original_puts \nproc puts {args} {\n if {[llength $args] == 1} {\n return "=> [lindex $args 0]"\n } else {\n eval original_puts $args\n }\n}\n'
self.tcl.eval(putsredef)
def do_execute(self, code, silent, store_history=True,
user_expressions=None, allow_stdin=False):
try:
output = self.tcl.eval(code.rstrip())
if not silent:
stream_content = {'name': 'stdout', 'text': output[3:]}
self.send_response(self.iopub_socket, 'stream', stream_content)
except Tkinter.TclError as scripterr:
output = "Tcl Error: " + scripterr.args[0]
if not silent:
stream_content = {
'name': 'stderr', 'text': output}
self.send_response(self.iopub_socket, 'stream', stream_content)
return {'status': 'ok', 'execution_count': self.execution_count,
'payload': [], 'user_expressions': {}}
|
Python
| 0.00003
|
c517eb40b73151a9b14f46f1991ab692d8b81702
|
Add docstring for simulation class methods
|
teemof/simulation.py
|
teemof/simulation.py
|
# Date: August 2017
# Author: Kutay B. Sezginel
"""
Simulation class for reading and initializing Lammps simulations
"""
import pprint
from teemof.read import read_run, read_trial, read_trial_set
from teemof.parameters import k_parameters, plot_parameters
from teemof.visualize import plot_thermal_conductivity, plot_distance_histogram
class Simulation:
"""
Reading and initializing Lammps simulations
"""
def __init__(self, read=None, setup=None, parameters=k_parameters.copy()):
"""
Create a Lammps simulation object.
"""
self.parameters = parameters
self.plot_parameters = plot_parameters.copy()
if read is not None and setup is not None:
self.read(read, setup)
self.setup = setup
def read(self, sim_dir, setup='run'):
"""
Read Lammps simulation results from given directory.
"""
self.setup = setup
if setup == 'run':
self.run = read_run(sim_dir, k_par=self.parameters)
elif setup == 'trial':
self.trial = read_trial(sim_dir, k_par=self.parameters)
elif setup == 'trial_set':
self.trial_set = read_trial_set(sim_dir, k_par=self.parameters)
else:
print('Select setup: "run" | "trial" | "trial_set"')
def initialize(self):
"""
Initialize input files for a Lammps simulation.
"""
pass
def plot(self, selection):
"""
Plot Lammps simulation results.
"""
if selection == 'k':
plot_data = {}
plot_data['x'] = self.trial['data']['Run1']['time']
plot_data['y'] = [self.trial['data'][run]['k']['iso'] for run in self.trial['runs']]
plot_data['legend'] = self.trial['runs']
plot_thermal_conductivity(plot_data, self.plot_parameters['k'])
elif selection == 'hist':
plot_data = {}
plot_distance_histogram(plot_data, self.plot_parameters['hist'])
else:
print('Select plot: "k" | "k_est" | "hist"')
def show_parameters(self):
"""
Show thermal conductivity parameters.
"""
pprint.pprint(self.parameters)
def show_plot_parameters(self):
"""
Show plot parameters.
"""
pprint.pprint(self.plot_parameters)
|
# Date: August 2017
# Author: Kutay B. Sezginel
"""
Simulation class for reading and initializing Lammps simulations
"""
import pprint
from teemof.read import read_run, read_trial, read_trial_set
from teemof.parameters import k_parameters, plot_parameters
from teemof.visualize import plot_thermal_conductivity, plot_distance_histogram
class Simulation:
"""
Reading and initializing Lammps simulations
"""
def __init__(self, read=None, setup=None, parameters=k_parameters.copy()):
self.parameters = parameters
self.plot_parameters = plot_parameters.copy()
if read is not None and setup is not None:
self.read(read, setup)
self.setup = setup
def read(self, sim_dir, setup='run'):
self.setup = setup
if setup == 'run':
self.run = read_run(sim_dir, k_par=self.parameters)
elif setup == 'trial':
self.trial = read_trial(sim_dir, k_par=self.parameters)
elif setup == 'trial_set':
self.trial_set = read_trial_set(sim_dir, k_par=self.parameters)
else:
print('Select setup: "run" | "trial" | "trial_set"')
def initialize(self):
pass
def plot(self, selection):
if selection == 'k':
plot_data = {}
plot_data['x'] = self.trial['data']['Run1']['time']
plot_data['y'] = [self.trial['data'][run]['k']['iso'] for run in self.trial['runs']]
plot_data['legend'] = self.trial['runs']
plot_thermal_conductivity(plot_data, self.plot_parameters['k'])
elif selection == 'hist':
plot_data = {}
plot_distance_histogram(plot_data, self.plot_parameters['hist'])
else:
print('Select plot: "k" | "k_est" | "hist"')
def show_parameters(self):
pprint.pprint(self.parameters)
def show_plot_parameters(self):
pprint.pprint(self.plot_parameters)
|
Python
| 0
|
b646e4f376db710101e2c1825bd384b2727e6a79
|
Disable on win32
|
tests/test_dateentry.py
|
tests/test_dateentry.py
|
import sys
import datetime
import unittest
from kiwi.ui.dateentry import DateEntry
class TestDateEntry(unittest.TestCase):
def setUp(self):
self.date = datetime.date.today()
def testGetSetDate(self):
if sys.platform == 'win32':
return
entry = DateEntry()
entry.set_date(self.date)
self.assertEqual(entry.get_date(), self.date)
if __name__ == '__main__':
unittest.main()
|
import datetime
import unittest
from kiwi.ui.dateentry import DateEntry
class TestDateEntry(unittest.TestCase):
def setUp(self):
self.date = datetime.date.today()
def testGetSetDate(self):
entry = DateEntry()
entry.set_date(self.date)
self.assertEqual(entry.get_date(), self.date)
if __name__ == '__main__':
unittest.main()
|
Python
| 0.000002
|
17ad68fe77b124fa760857c9e93cbd3d4f9d293e
|
Write XML of input file to tempdir as well
|
tests/test_hintfonts.py
|
tests/test_hintfonts.py
|
from __future__ import print_function, division, absolute_import
import glob
from os.path import basename
import pytest
from fontTools.misc.xmlWriter import XMLWriter
from fontTools.cffLib import CFFFontSet
from fontTools.ttLib import TTFont
from psautohint.autohint import ACOptions, hintFiles
from .differ import main as differ
from . import DATA_DIR
class Options(ACOptions):
def __init__(self, inpath, outpath):
super(Options, self).__init__()
self.inputPaths = [inpath]
self.outputPaths = [outpath]
self.hintAll = True
self.verbose = False
@pytest.mark.parametrize("ufo", glob.glob("%s/*/*/font.ufo" % DATA_DIR))
def test_ufo(ufo, tmpdir):
out = str(tmpdir / basename(ufo))
options = Options(ufo, out)
hintFiles(options)
assert differ([ufo, out])
@pytest.mark.parametrize("otf", glob.glob("%s/*/*/font.otf" % DATA_DIR))
def test_otf(otf, tmpdir):
out = str(tmpdir / basename(otf)) + ".out"
options = Options(otf, out)
hintFiles(options)
for path in (otf, out):
font = TTFont(path)
assert "CFF " in font
writer = XMLWriter(str(tmpdir / basename(path)) + ".xml")
font["CFF "].toXML(writer, font)
del writer
del font
assert differ([str(tmpdir / basename(otf)) + ".xml",
str(tmpdir / basename(out)) + ".xml"])
@pytest.mark.parametrize("cff", glob.glob("%s/*/*/font.cff" % DATA_DIR))
def test_cff(cff, tmpdir):
out = str(tmpdir / basename(cff)) + ".out"
options = Options(cff, out)
hintFiles(options)
for path in (cff, out):
font = CFFFontSet()
writer = XMLWriter(str(tmpdir / basename(path)) + ".xml")
with open(path, "rb") as fp:
font.decompile(fp, None)
font.toXML(writer)
del writer
del font
assert differ([str(tmpdir / basename(cff)) + ".xml",
str(tmpdir / basename(out)) + ".xml"])
|
from __future__ import print_function, division, absolute_import
import glob
from os.path import basename
import pytest
from fontTools.misc.xmlWriter import XMLWriter
from fontTools.cffLib import CFFFontSet
from fontTools.ttLib import TTFont
from psautohint.autohint import ACOptions, hintFiles
from .differ import main as differ
from . import DATA_DIR
class Options(ACOptions):
def __init__(self, inpath, outpath):
super(Options, self).__init__()
self.inputPaths = [inpath]
self.outputPaths = [outpath]
self.hintAll = True
self.verbose = False
@pytest.mark.parametrize("ufo", glob.glob("%s/*/*/font.ufo" % DATA_DIR))
def test_ufo(ufo, tmpdir):
out = str(tmpdir / basename(ufo))
options = Options(ufo, out)
hintFiles(options)
assert differ([ufo, out])
@pytest.mark.parametrize("otf", glob.glob("%s/*/*/font.otf" % DATA_DIR))
def test_otf(otf, tmpdir):
out = str(tmpdir / basename(otf))
options = Options(otf, out)
hintFiles(options)
for path in (otf, out):
font = TTFont(path)
assert "CFF " in font
writer = XMLWriter(path + ".xml")
font["CFF "].toXML(writer, font)
del writer
del font
assert differ([otf + ".xml", out + ".xml"])
@pytest.mark.parametrize("cff", glob.glob("%s/*/*/font.cff" % DATA_DIR))
def test_cff(cff, tmpdir):
out = str(tmpdir / basename(cff))
options = Options(cff, out)
hintFiles(options)
for path in (cff, out):
font = CFFFontSet()
writer = XMLWriter(path + ".xml")
with open(path, "rb") as fp:
font.decompile(fp, None)
font.toXML(writer)
del writer
del font
assert differ([cff + ".xml", out + ".xml"])
|
Python
| 0
|
6e67a9e8eedd959d9d0193e746a375099e9784ef
|
Use bytes instead of str where appropriate for Python 3
|
toodlepip/consoles.py
|
toodlepip/consoles.py
|
class Console(object):
def __init__(self, shell, stdout):
self._shell = shell
self._stdout = stdout
def run(self, description, command, **kwargs):
return self.run_all(description, [command], **kwargs)
def run_all(self, description, commands, quiet=False, cwd=None):
stdout = None if quiet else self._stdout
# TODO: Test printing description
# TODO: detect terminal
self._stdout.write(b'\033[1m')
self._stdout.write(description.encode("utf8"))
self._stdout.write(b"\n")
self._stdout.write(b'\033[0m')
self._stdout.flush()
for command in commands:
# TODO: print command
result = self._shell.run(
command,
stdout=stdout,
stderr=stdout,
cwd=cwd,
allow_error=True
)
if result.return_code != 0:
return Result(result.return_code)
return Result(0)
class Result(object):
def __init__(self, return_code):
self.return_code = return_code
|
class Console(object):
def __init__(self, shell, stdout):
self._shell = shell
self._stdout = stdout
def run(self, description, command, **kwargs):
return self.run_all(description, [command], **kwargs)
def run_all(self, description, commands, quiet=False, cwd=None):
stdout = None if quiet else self._stdout
# TODO: Test printing description
# TODO: detect terminal
self._stdout.write('\033[1m')
self._stdout.write(description)
self._stdout.write("\n")
self._stdout.write('\033[0m')
self._stdout.flush()
for command in commands:
# TODO: print command
result = self._shell.run(
command,
stdout=stdout,
stderr=stdout,
cwd=cwd,
allow_error=True
)
if result.return_code != 0:
return Result(result.return_code)
return Result(0)
class Result(object):
def __init__(self, return_code):
self.return_code = return_code
|
Python
| 0.000561
|
8034a521692d9857b0d36e2efced40bb69f5efda
|
Refactor test for and operator
|
tests/test_operators.py
|
tests/test_operators.py
|
from pytest import mark
from intervals import IntInterval
class TestComparisonOperators(object):
def test_eq_operator(self):
assert IntInterval([1, 3]) == IntInterval([1, 3])
assert not IntInterval([1, 3]) == IntInterval([1, 4])
def test_ne_operator(self):
assert not IntInterval([1, 3]) != IntInterval([1, 3])
assert IntInterval([1, 3]) != IntInterval([1, 4])
def test_gt_operator(self):
assert IntInterval([1, 3]) > IntInterval([0, 2])
assert not IntInterval([2, 3]) > IntInterval([2, 3])
@mark.parametrize(('comparison', 'result'), (
(IntInterval([1, 3]) >= IntInterval([0, 2]), True),
(IntInterval((1, 4)) >= 1, False),
(IntInterval((1, 6)) >= [1, 6], False),
(IntInterval((1, 6)) >= 0, True)
))
def test_ge_operator(self, comparison, result):
assert comparison == result
def test_lt_operator(self):
assert IntInterval([0, 2]) < IntInterval([1, 3])
assert not IntInterval([2, 3]) < IntInterval([2, 3])
def test_le_operator(self):
assert IntInterval([0, 2]) <= IntInterval([1, 3])
assert IntInterval([1, 3]) >= IntInterval([1, 3])
def test_integer_comparison(self):
assert IntInterval([2, 2]) <= 3
assert IntInterval([1, 3]) >= 0
assert IntInterval([2, 2]) == 2
assert IntInterval([2, 2]) != 3
@mark.parametrize('value', (
IntInterval([0, 2]),
1,
(-1, 1),
))
def test_contains_operator_for_inclusive_interval(self, value):
assert value in IntInterval([-1, 2])
@mark.parametrize('value', (
IntInterval([0, 2]),
2,
'[-1, 1]',
))
def test_contains_operator_for_non_inclusive_interval(self, value):
assert value not in IntInterval((-1, 2))
class TestDiscreteRangeComparison(object):
@mark.parametrize(('interval', 'interval2'), (
([1, 3], '[1, 4)'),
('(1, 5]', '[2, 5]'),
('(1, 6)', '[2, 5]'),
))
def test_eq_operator(self, interval, interval2):
assert IntInterval(interval) == IntInterval(interval2)
class TestBinaryOperators(object):
@mark.parametrize(('interval1', 'interval2', 'result'), (
((2, 3), (3, 4), (3, 3)),
((2, 3), [3, 4], '[3, 3)'),
((2, 5), (3, 10), (3, 5)),
('(2, 3]', '[3, 4)', [3, 3]),
('(2, 10]', '[3, 40]', [3, 10]),
((2, 10), (3, 8), (3, 8)),
))
def test_and_operator(self, interval1, interval2, result):
assert (
IntInterval(interval1) & IntInterval(interval2) ==
IntInterval(result)
)
@mark.parametrize(('interval1', 'interval2', 'empty'), (
((2, 3), (3, 4), True),
((2, 3), [3, 4], True),
([2, 3], (3, 4), True),
('(2, 3]', '[3, 4)', False),
))
def test_and_operator_for_empty_results(self, interval1, interval2, empty):
assert (IntInterval(interval1) & IntInterval(interval2)).empty == empty
|
from pytest import mark
from intervals import IntInterval
class TestComparisonOperators(object):
def test_eq_operator(self):
assert IntInterval([1, 3]) == IntInterval([1, 3])
assert not IntInterval([1, 3]) == IntInterval([1, 4])
def test_ne_operator(self):
assert not IntInterval([1, 3]) != IntInterval([1, 3])
assert IntInterval([1, 3]) != IntInterval([1, 4])
def test_gt_operator(self):
assert IntInterval([1, 3]) > IntInterval([0, 2])
assert not IntInterval([2, 3]) > IntInterval([2, 3])
@mark.parametrize(('comparison', 'result'), (
(IntInterval([1, 3]) >= IntInterval([0, 2]), True),
(IntInterval((1, 4)) >= 1, False),
(IntInterval((1, 6)) >= [1, 6], False),
(IntInterval((1, 6)) >= 0, True)
))
def test_ge_operator(self, comparison, result):
assert comparison == result
def test_lt_operator(self):
assert IntInterval([0, 2]) < IntInterval([1, 3])
assert not IntInterval([2, 3]) < IntInterval([2, 3])
def test_le_operator(self):
assert IntInterval([0, 2]) <= IntInterval([1, 3])
assert IntInterval([1, 3]) >= IntInterval([1, 3])
def test_integer_comparison(self):
assert IntInterval([2, 2]) <= 3
assert IntInterval([1, 3]) >= 0
assert IntInterval([2, 2]) == 2
assert IntInterval([2, 2]) != 3
@mark.parametrize('value', (
IntInterval([0, 2]),
1,
(-1, 1),
))
def test_contains_operator_for_inclusive_interval(self, value):
assert value in IntInterval([-1, 2])
@mark.parametrize('value', (
IntInterval([0, 2]),
2,
'[-1, 1]',
))
def test_contains_operator_for_non_inclusive_interval(self, value):
assert value not in IntInterval((-1, 2))
class TestDiscreteRangeComparison(object):
@mark.parametrize(('interval', 'interval2'), (
([1, 3], '[1, 4)'),
('(1, 5]', '[2, 5]'),
('(1, 6)', '[2, 5]'),
))
def test_eq_operator(self, interval, interval2):
assert IntInterval(interval) == IntInterval(interval2)
class TestBinaryOperators(object):
@mark.parametrize(('interval1', 'interval2', 'result', 'empty'), (
((2, 3), (3, 4), (3, 3), True),
((2, 3), [3, 4], '[3, 3)', True),
((2, 5), (3, 10), (3, 5), False),
('(2, 3]', '[3, 4)', [3, 3], False),
('(2, 10]', '[3, 40]', [3, 10], False),
((2, 10), (3, 8), (3, 8), False),
))
def test_and_operator(self, interval1, interval2, result, empty):
assert (
IntInterval(interval1) & IntInterval(interval2) ==
IntInterval(result)
)
assert IntInterval(result).empty == empty
|
Python
| 0
|
5a817413b91adece6f5191d7fe0bf5b4baa430af
|
Fix test
|
tests/test_retrieval.py
|
tests/test_retrieval.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
from numpy.testing import assert_allclose
from theano import tensor
from dictlearn.vocab import Vocabulary
from dictlearn.retrieval import (
vec2str, Dictionary, Retrieval)
from dictlearn.ops import RetrievalOp
from tests.util import (
TEST_VOCAB, TEST_DICT_JSON, temporary_content_path)
def test_vec2str():
vector = map(ord, 'abc') + [0, 0]
assert vec2str(vector) == 'abc'
def test_retrieval():
with temporary_content_path(TEST_VOCAB) as path:
vocab = Vocabulary(path)
with temporary_content_path(TEST_DICT_JSON) as path:
dict_ = Dictionary(path)
# check a super simple case
batch = [['a']]
defs, def_map = Retrieval(vocab, dict_).retrieve(batch)
assert defs == [[8, 4, 5, 9], [8, 6, 7, 9]]
assert def_map == [(0, 0, 0), (0, 0, 1)]
# check that vectors are handled correctly
batch = numpy.array([ord('d'), ord(' '), ord('c'), 0, 0])[None, None, :]
defs, def_map = Retrieval(vocab, dict_).retrieve(batch)
assert defs == [[8, 3, 4, 9]]
assert def_map == [(0, 0, 0)]
# check a complex case
batch = [['a', 'b', 'b'], ['d c', 'a', 'b']]
defs, def_map = Retrieval(vocab, dict_).retrieve(batch)
assert defs == [[8, 4, 5, 9],
[8, 6, 7, 9],
[8, 7, 6, 9],
[8, 3, 4, 9]]
assert def_map == [(0, 0, 0), (0, 0, 1),
(0, 1, 2),
(0, 2, 2),
(1, 0, 3),
(1, 1, 0), (1, 1, 1),
(1, 2, 2)]
# check a complex case with exclude top k
batch = [['a', 'b', 'c', 'd'], ['a', 'e', 'b']]
exclude_top_k = 7 # should exclude 'a', 'b', 'c', 'd' and only define 'e'
defs, def_map = Retrieval(vocab, dict_, exclude_top_k=exclude_top_k).retrieve(batch)
assert defs == [[8, 4, 5, 6, 9]]
assert def_map == [(1, 1, 0)]
# check the op
retrieval_op = RetrievalOp(Retrieval(vocab, dict_))
batch = tensor.as_tensor_variable(
[[[ord('d'), ord(' '), ord('c'), 0, 0],
[ord('e'), 0, 0, 0, 0]]])
defs_var, mask_var, def_map_var = retrieval_op(batch)
assert defs_var.eval().tolist() == [[8, 3, 4, 9, 0],
[8, 4, 5, 6, 9]]
assert_allclose(mask_var.eval(), [[1, 1, 1, 1, 0], [1, 1, 1, 1, 1]])
assert def_map_var.eval().tolist() == [[0, 0, 0], [0, 1, 1]]
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy
from numpy.testing import assert_allclose
from theano import tensor
from dictlearn.vocab import Vocabulary
from dictlearn.retrieval import (
vec2str, Dictionary, Retrieval)
from dictlearn.ops import RetrievalOp
from tests.util import (
TEST_VOCAB, TEST_DICT_JSON, temporary_content_path)
def test_vec2str():
vector = map(ord, 'abc') + [0, 0]
assert vec2str(vector) == 'abc'
def test_retrieval():
with temporary_content_path(TEST_VOCAB) as path:
vocab = Vocabulary(path)
with temporary_content_path(TEST_DICT_JSON) as path:
dict_ = Dictionary(path)
# check a super simple case
batch = [['a']]
defs, def_map = Retrieval(vocab, dict_).retrieve(batch)
assert defs == [[8, 4, 5, 9], [8, 6, 7, 9]]
assert def_map == [(0, 0, 0), (0, 0, 1)]
# check that vectors are handled correctly
batch = numpy.array([ord('d'), ord(' '), ord('c'), 0, 0])[None, None, :]
defs, def_map = Retrieval(vocab, dict_).retrieve(batch)
assert defs == [[8, 3, 4, 9]]
assert def_map == [(0, 0, 0)]
# check a complex case
batch = [['a', 'b', 'b'], ['d c', 'a', 'b']]
defs, def_map = Retrieval(vocab, dict_).retrieve(batch)
assert defs == [[8, 4, 5, 9],
[8, 6, 7, 9],
[8, 7, 6, 9],
[8, 3, 4, 9]]
assert def_map == [(0, 0, 0), (0, 0, 1),
(0, 1, 2),
(0, 2, 2),
(1, 0, 3),
(1, 1, 0), (1, 1, 1),
(1, 2, 2)]
# check a complex case with exclude top k
batch = [['a', 'b', 'c', 'd'], ['a', 'e', 'b']]
exclude_top_k = 4 # should exclude 'a', 'b', 'c', 'd' and only define 'e'
defs, def_map = Retrieval(vocab, dict_, exclude_top_k=exclude_top_k).retrieve(batch)
assert defs == [[8, 4, 5, 6, 9]]
assert def_map == [(1, 1, 0)]
# check the op
retrieval_op = RetrievalOp(Retrieval(vocab, dict_))
batch = tensor.as_tensor_variable(
[[[ord('d'), ord(' '), ord('c'), 0, 0],
[ord('e'), 0, 0, 0, 0]]])
defs_var, mask_var, def_map_var = retrieval_op(batch)
assert defs_var.eval().tolist() == [[8, 3, 4, 9, 0],
[8, 4, 5, 6, 9]]
assert_allclose(mask_var.eval(), [[1, 1, 1, 1, 0], [1, 1, 1, 1, 1]])
assert def_map_var.eval().tolist() == [[0, 0, 0], [0, 1, 1]]
|
Python
| 0.000004
|
006e933a44241e30e1e54c24966d0859aa7c853d
|
test hub via vanilla, to check imports
|
tests/unit/test_core.py
|
tests/unit/test_core.py
|
import time
import vanilla
import vanilla.core
def test_lazy():
class C(object):
@vanilla.core.lazy
def now(self):
return time.time()
c = C()
want = c.now
time.sleep(0.01)
assert c.now == want
def test_Scheduler():
s = vanilla.core.Scheduler()
s.add(4, 'f2')
s.add(9, 'f4')
s.add(3, 'f1')
item3 = s.add(7, 'f3')
assert 0.003 - s.timeout() < 0.001
assert len(s) == 4
s.remove(item3)
assert 0.003 - s.timeout() < 0.001
assert len(s) == 3
assert s.pop() == ('f1', ())
assert 0.004 - s.timeout() < 0.001
assert len(s) == 2
assert s.pop() == ('f2', ())
assert 0.009 - s.timeout() < 0.001
assert len(s) == 1
assert s.pop() == ('f4', ())
assert not s
class TestHub(object):
def test_spawn(self):
h = vanilla.Hub()
a = []
h.spawn_later(10, lambda: a.append(1))
h.spawn(lambda: a.append(2))
h.sleep(1)
assert a == [2]
h.sleep(10)
assert a == [2, 1]
def test_exception(self):
h = vanilla.Hub()
def raiser():
raise Exception()
h.spawn(raiser)
h.sleep(1)
a = []
h.spawn(lambda: a.append(2))
h.sleep(1)
assert a == [2]
def test_stop(self):
h = vanilla.Hub()
@h.spawn
def _():
h.sleep(20)
h.stop()
|
import time
import vanilla.core
def test_lazy():
class C(object):
@vanilla.core.lazy
def now(self):
return time.time()
c = C()
want = c.now
time.sleep(0.01)
assert c.now == want
def test_Scheduler():
s = vanilla.core.Scheduler()
s.add(4, 'f2')
s.add(9, 'f4')
s.add(3, 'f1')
item3 = s.add(7, 'f3')
assert 0.003 - s.timeout() < 0.001
assert len(s) == 4
s.remove(item3)
assert 0.003 - s.timeout() < 0.001
assert len(s) == 3
assert s.pop() == ('f1', ())
assert 0.004 - s.timeout() < 0.001
assert len(s) == 2
assert s.pop() == ('f2', ())
assert 0.009 - s.timeout() < 0.001
assert len(s) == 1
assert s.pop() == ('f4', ())
assert not s
class TestHub(object):
def test_spawn(self):
h = vanilla.core.Hub()
a = []
h.spawn_later(10, lambda: a.append(1))
h.spawn(lambda: a.append(2))
h.sleep(1)
assert a == [2]
h.sleep(10)
assert a == [2, 1]
def test_exception(self):
h = vanilla.core.Hub()
def raiser():
raise Exception()
h.spawn(raiser)
h.sleep(1)
a = []
h.spawn(lambda: a.append(2))
h.sleep(1)
assert a == [2]
def test_stop(self):
h = vanilla.core.Hub()
@h.spawn
def _():
h.sleep(20)
h.stop()
|
Python
| 0
|
f20e76034eef1ea8b7b7f98ace521a3a6346103c
|
remove default 0.0.0.0 for ip address to pave the way for a unique constraint on the ip address column. Of course this means that network_id needs to be nullable. All of this weakens this table in a way that is making me unhappy. This can and will be solved with more clever check constraints (i.e. network_id can't be null if ip address is not null) AND by transforming IP address into a many-to-many assignment (i.e. an interface does not HAVE an ip as an indemic characteristic to itself, rather it is assigned after the fact.)
|
1.2.1/src/lib/python2.5/aquilon/aqdb/hw/interface.py
|
1.2.1/src/lib/python2.5/aquilon/aqdb/hw/interface.py
|
#!/ms/dist/python/PROJ/core/2.5.0/bin/python
# ex: set expandtab softtabstop=4 shiftwidth=4: -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# $Header$
# $Change$
# $DateTime$
# $Author$
# Copyright (C) 2008 Morgan Stanley
#
# This module is part of Aquilon
"""Classes and Tables relating to network interfaces"""
from datetime import datetime
import sys
import os
if __name__ == '__main__':
DIR = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.realpath(os.path.join(DIR, '..', '..', '..')))
import aquilon.aqdb.depends
from sqlalchemy import (Column, Table, Integer, Sequence, String, Index,
Boolean, CheckConstraint, UniqueConstraint, DateTime,
ForeignKey, PrimaryKeyConstraint, insert, select)
from sqlalchemy.orm import mapper, relation, deferred
from aquilon.aqdb.column_types.aqstr import AqStr
from aquilon.aqdb.column_types.IPV4 import IPV4
from aquilon.aqdb.db_factory import Base
from aquilon.aqdb.net.network import Network
#TODO: column type for MAC
#reg = re.compile('^([a-f0-9]{2,2}:){5,5}[a-f0-9]{2,2}$')
#if (not reg.match(self.mac)):
# raise ArgumentError ('Invalid MAC address: '+self.mac)
class Interface(Base):
__tablename__ = 'interface'
id = Column(Integer,
Sequence('interface_id_seq'), primary_key=True)
interface_type = Column(AqStr(32), nullable = False) #TODO: index
mac = Column(AqStr(18), nullable = False)
ip = Column(IPV4, nullable = True)
network_id = Column(Integer, ForeignKey(Network.__table__.c.id,
name = 'iface_net_id_fk'),
nullable = True)
creation_date = deferred(Column('creation_date',
DateTime, default = datetime.now,
nullable = False))
comments = deferred(Column(
'comments',String(255))) #TODO FK to IP table)
network = relation(Network, backref = 'interfaces' )
__mapper_args__ = {'polymorphic_on' : interface_type}
interface = Interface.__table__
interface.primary_key.name = 'interface_pk'
interface.append_constraint(UniqueConstraint('mac', name = 'iface_mac_addr_uk'))
interface.append_constraint(UniqueConstraint('ip', name = 'iface_ip_addr_uk'))
Index('iface_net_id_idx', interface.c.network_id)
def populate(*args, **kw):
from aquilon.aqdb.db_factory import db_factory, Base
from sqlalchemy import insert
dbf = db_factory()
Base.metadata.bind = dbf.engine
if 'debug' in args:
Base.metadata.bind.echo = True
s = dbf.session()
interface.create(checkfirst=True)
if len(s.query(Interface).all()) < 1:
#print 'no interfaces yet'
pass
if Base.metadata.bind.echo == True:
Base.metadata.bind.echo == False
|
#!/ms/dist/python/PROJ/core/2.5.0/bin/python
# ex: set expandtab softtabstop=4 shiftwidth=4: -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# $Header$
# $Change$
# $DateTime$
# $Author$
# Copyright (C) 2008 Morgan Stanley
#
# This module is part of Aquilon
"""Classes and Tables relating to network interfaces"""
from datetime import datetime
import sys
import os
if __name__ == '__main__':
DIR = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.realpath(os.path.join(DIR, '..', '..', '..')))
import aquilon.aqdb.depends
from sqlalchemy import (Column, Table, Integer, Sequence, String, Index,
Boolean, CheckConstraint, UniqueConstraint, DateTime,
ForeignKey, PrimaryKeyConstraint, insert, select)
from sqlalchemy.orm import mapper, relation, deferred
from aquilon.aqdb.column_types.aqstr import AqStr
from aquilon.aqdb.column_types.IPV4 import IPV4
from aquilon.aqdb.db_factory import Base
from aquilon.aqdb.net.network import Network
#TODO: column type for MAC
#reg = re.compile('^([a-f0-9]{2,2}:){5,5}[a-f0-9]{2,2}$')
#if (not reg.match(self.mac)):
# raise ArgumentError ('Invalid MAC address: '+self.mac)
class Interface(Base):
__tablename__ = 'interface'
id = Column(Integer,
Sequence('interface_id_seq'), primary_key=True)
interface_type = Column(AqStr(32), nullable = False) #TODO: index
mac = Column(AqStr(18), nullable = False)
ip = Column(IPV4, default='0.0.0.0')
network_id = Column(Integer, ForeignKey(Network.__table__.c.id,
name = 'iface_net_id_fk'),
nullable = False)
creation_date = deferred(Column('creation_date',
DateTime, default = datetime.now,
nullable = False))
comments = deferred(Column(
'comments',String(255))) #TODO FK to IP table)
network = relation(Network, backref = 'interfaces' )
__mapper_args__ = {'polymorphic_on' : interface_type}
interface = Interface.__table__
interface.primary_key.name = 'interface_pk'
interface.append_constraint(UniqueConstraint('mac', name = 'mac_addr_uk'))
Index('iface_ip_idx', interface.c.ip)
Index('iface_net_id_idx', interface.c.network_id)
def populate(*args, **kw):
from aquilon.aqdb.db_factory import db_factory, Base
from sqlalchemy import insert
dbf = db_factory()
Base.metadata.bind = dbf.engine
if 'debug' in args:
Base.metadata.bind.echo = True
s = dbf.session()
interface.create(checkfirst=True)
if len(s.query(Interface).all()) < 1:
#print 'no interfaces yet'
pass
if Base.metadata.bind.echo == True:
Base.metadata.bind.echo == False
|
Python
| 0.000001
|
bae05fd5c15e9360d09dd9456b6d4f1122ddf213
|
Print the url of dependency JARs being downloaded in buck build
|
tools/download_jar.py
|
tools/download_jar.py
|
#!/usr/bin/python
# Copyright (C) 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from hashlib import sha1
from optparse import OptionParser
from os import link, makedirs, path
import shutil
from subprocess import check_call, CalledProcessError
from sys import stderr
from zipfile import ZipFile, BadZipfile, LargeZipFile
def hashfile(p):
d = sha1()
with open(p, 'rb') as f:
while True:
b = f.read(8192)
if not b:
break
d.update(b)
return d.hexdigest()
def safe_mkdirs(d):
if path.isdir(d):
return
try:
makedirs(d)
except OSError as err:
if not path.isdir(d):
raise err
opts = OptionParser()
opts.add_option('-o', help='local output file')
opts.add_option('-u', help='URL to download')
opts.add_option('-v', help='expected content SHA-1')
opts.add_option('-x', action='append', help='file to delete from ZIP')
opts.add_option('--exclude_java_sources', action='store_true')
args, _ = opts.parse_args()
root_dir = args.o
while root_dir:
root_dir, n = path.split(root_dir)
if n == 'buck-out':
break
cache_ent = path.join(
root_dir,
'buck-cache',
'%s-%s' % (path.basename(args.o), sha1(args.u).hexdigest()))
if not path.exists(cache_ent):
try:
safe_mkdirs(path.dirname(cache_ent))
print >>stderr, "Download %s" % args.u
check_call(['curl', '-sfo', cache_ent, args.u])
except (OSError, CalledProcessError) as err:
print >>stderr, "error using curl: %s" % str(err)
exit(1)
if args.v:
have = hashfile(cache_ent)
if args.v != have:
o = cache_ent[len(root_dir) + 1:]
print >>stderr, (
'%s:\n' +
'expected %s\n' +
'received %s\n' +
' %s\n') % (args.u, args.v, have, o)
exit(1)
exclude = []
if args.x:
exclude += args.x
if args.exclude_java_sources:
try:
zf = ZipFile(cache_ent, 'r')
try:
for n in zf.namelist():
if n.endswith('.java'):
exclude.append(n)
finally:
zf.close()
except (BadZipfile, LargeZipFile) as err:
print >>stderr, "error opening %s: %s" % (cache_ent, str(err))
exit(1)
safe_mkdirs(path.dirname(args.o))
if exclude:
shutil.copyfile(cache_ent, args.o)
check_call(['zip', '-d', args.o] + exclude)
else:
try:
link(cache_ent, args.o)
except OSError as err:
symlink(cache_ent, args.o)
|
#!/usr/bin/python
# Copyright (C) 2013 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from hashlib import sha1
from optparse import OptionParser
from os import link, makedirs, path
import shutil
from subprocess import check_call, CalledProcessError
from sys import stderr
from zipfile import ZipFile, BadZipfile, LargeZipFile
def hashfile(p):
d = sha1()
with open(p, 'rb') as f:
while True:
b = f.read(8192)
if not b:
break
d.update(b)
return d.hexdigest()
def safe_mkdirs(d):
if path.isdir(d):
return
try:
makedirs(d)
except OSError as err:
if not path.isdir(d):
raise err
opts = OptionParser()
opts.add_option('-o', help='local output file')
opts.add_option('-u', help='URL to download')
opts.add_option('-v', help='expected content SHA-1')
opts.add_option('-x', action='append', help='file to delete from ZIP')
opts.add_option('--exclude_java_sources', action='store_true')
args, _ = opts.parse_args()
root_dir = args.o
while root_dir:
root_dir, n = path.split(root_dir)
if n == 'buck-out':
break
cache_ent = path.join(
root_dir,
'buck-cache',
'%s-%s' % (path.basename(args.o), sha1(args.u).hexdigest()))
if not path.exists(cache_ent):
try:
safe_mkdirs(path.dirname(cache_ent))
check_call(['curl', '-sfo', cache_ent, args.u])
except (OSError, CalledProcessError) as err:
print >>stderr, "error using curl: %s" % str(err)
exit(1)
if args.v:
have = hashfile(cache_ent)
if args.v != have:
o = cache_ent[len(root_dir) + 1:]
print >>stderr, (
'%s:\n' +
'expected %s\n' +
'received %s\n' +
' %s\n') % (args.u, args.v, have, o)
exit(1)
exclude = []
if args.x:
exclude += args.x
if args.exclude_java_sources:
try:
zf = ZipFile(cache_ent, 'r')
try:
for n in zf.namelist():
if n.endswith('.java'):
exclude.append(n)
finally:
zf.close()
except (BadZipfile, LargeZipFile) as err:
print >>stderr, "error opening %s: %s" % (cache_ent, str(err))
exit(1)
safe_mkdirs(path.dirname(args.o))
if exclude:
shutil.copyfile(cache_ent, args.o)
check_call(['zip', '-d', args.o] + exclude)
else:
try:
link(cache_ent, args.o)
except OSError as err:
symlink(cache_ent, args.o)
|
Python
| 0.000603
|
4043468de4fc448b6fda670f33b7f935883793a7
|
add a test to ensure False is never passed to Git.execute
|
test/git/test_git.py
|
test/git/test_git.py
|
import os
from test.testlib import *
from git import Git, GitCommandError
class TestGit(object):
def setup(self):
base = os.path.join(os.path.dirname(__file__), "../..")
self.git = Git(base)
@patch(Git, 'execute')
def test_method_missing_calls_execute(self, git):
git.return_value = ''
self.git.version()
assert_true(git.called)
# assert_equal(git.call_args, ((("%s version " % self.git_bin_base),), {}))
def test_it_transforms_kwargs_into_git_command_arguments(self):
assert_equal(["-s"], self.git.transform_kwargs(**{'s': True}))
assert_equal(["-s5"], self.git.transform_kwargs(**{'s': 5}))
assert_equal(["--max-count"], self.git.transform_kwargs(**{'max_count': True}))
assert_equal(["--max-count=5"], self.git.transform_kwargs(**{'max_count': 5}))
assert_equal(["-s", "-t"], self.git.transform_kwargs(**{'s': True, 't': True}))
def test_it_executes_git_to_shell_and_returns_result(self):
assert_match('^git version [\d\.]{2}.*$', self.git.execute(["git","version"]))
def test_it_accepts_stdin(self):
filename = fixture_path("cat_file_blob")
fh = open(filename, 'r')
assert_equal("70c379b63ffa0795fdbfbc128e5a2818397b7ef8",
self.git.hash_object(istream=fh, stdin=True))
fh.close()
def test_it_returns_status_and_ignores_stderr(self):
assert_equal((1, ""), self.git.this_does_not_exist(with_status=True))
@raises(GitCommandError)
def test_it_raises_errors(self):
self.git.this_does_not_exist(with_exceptions=True)
def test_it_returns_stderr_in_output(self):
# Note: no trailiing newline
assert_match(r"^git: 'this-does-not-exist' is not a git-command",
self.git.this_does_not_exist(with_stderr=True))
def test_it_does_not_strip_output_when_using_with_raw_output(self):
# Note: trailing newline
assert_match(r"^git: 'this-does-not-exist' is not a git-command" \
r"(\. See 'git --help'\.)?" + os.linesep,
self.git.this_does_not_exist(with_stderr=True,
with_raw_output=True))
def test_it_handles_large_input(self):
output = self.git.execute(["cat", "/bin/bash"])
assert_true(len(output) > 4096) # at least 4k
@patch(Git, 'execute')
def test_it_ignores_false_kwargs(self, git):
# this_should_not_be_ignored=False implies it *should* be ignored
output = self.git.version( pass_this_kwarg=False )
assert_true( "pass_this_kwarg" not in git.call_args[1] )
|
import os
from test.testlib import *
from git import Git, GitCommandError
class TestGit(object):
def setup(self):
base = os.path.join(os.path.dirname(__file__), "../..")
self.git = Git(base)
@patch(Git, 'execute')
def test_method_missing_calls_execute(self, git):
git.return_value = ''
self.git.version()
assert_true(git.called)
# assert_equal(git.call_args, ((("%s version " % self.git_bin_base),), {}))
def test_it_transforms_kwargs_into_git_command_arguments(self):
assert_equal(["-s"], self.git.transform_kwargs(**{'s': True}))
assert_equal(["-s5"], self.git.transform_kwargs(**{'s': 5}))
assert_equal(["--max-count"], self.git.transform_kwargs(**{'max_count': True}))
assert_equal(["--max-count=5"], self.git.transform_kwargs(**{'max_count': 5}))
assert_equal(["-s", "-t"], self.git.transform_kwargs(**{'s': True, 't': True}))
def test_it_executes_git_to_shell_and_returns_result(self):
assert_match('^git version [\d\.]{2}.*$', self.git.execute(["git","version"]))
def test_it_accepts_stdin(self):
filename = fixture_path("cat_file_blob")
fh = open(filename, 'r')
assert_equal("70c379b63ffa0795fdbfbc128e5a2818397b7ef8",
self.git.hash_object(istream=fh, stdin=True))
fh.close()
def test_it_returns_status_and_ignores_stderr(self):
assert_equal((1, ""), self.git.this_does_not_exist(with_status=True))
@raises(GitCommandError)
def test_it_raises_errors(self):
self.git.this_does_not_exist(with_exceptions=True)
def test_it_returns_stderr_in_output(self):
# Note: no trailiing newline
assert_match(r"^git: 'this-does-not-exist' is not a git-command",
self.git.this_does_not_exist(with_stderr=True))
def test_it_does_not_strip_output_when_using_with_raw_output(self):
# Note: trailing newline
assert_match(r"^git: 'this-does-not-exist' is not a git-command" \
r"(\. See 'git --help'\.)?" + os.linesep,
self.git.this_does_not_exist(with_stderr=True,
with_raw_output=True))
def test_it_handles_large_input(self):
output = self.git.execute(["cat", "/bin/bash"])
assert_true(len(output) > 4096) # at least 4k
|
Python
| 0
|
374e10b908fbedf73f3ad40634bb680206da0652
|
Add setUp
|
test/test_quality.py
|
test/test_quality.py
|
# -*- coding: utf-8 -*-
import unittest
from pychord import QualityManager, Chord
class TestQuality(unittest.TestCase):
def setUp(self):
self.quality_manager = QualityManager()
def test_eq(self):
q1 = self.quality_manager.get_quality("m7-5")
q2 = self.quality_manager.get_quality("m7-5")
self.assertEqual(q1, q2)
def test_eq_alias_maj9(self):
q1 = self.quality_manager.get_quality("M9")
q2 = self.quality_manager.get_quality("maj9")
self.assertEqual(q1, q2)
def test_eq_alias_m7b5(self):
q1 = self.quality_manager.get_quality("m7-5")
q2 = self.quality_manager.get_quality("m7b5")
self.assertEqual(q1, q2)
def test_eq_alias_min(self):
q1 = self.quality_manager.get_quality("m")
q2 = self.quality_manager.get_quality("min")
q3 = self.quality_manager.get_quality("-")
self.assertEqual(q1, q2)
self.assertEqual(q1, q3)
def test_invalid_eq(self):
q = self.quality_manager.get_quality("m7")
with self.assertRaises(TypeError):
print(q == 0)
class TestQualityManager(unittest.TestCase):
def test_singleton(self):
quality_manager = QualityManager()
quality_manager2 = QualityManager()
self.assertIs(quality_manager, quality_manager2)
class TestOverwriteQuality(unittest.TestCase):
def setUp(self):
self.quality_manager = QualityManager()
def test_overwrite(self):
self.quality_manager.set_quality("11", (0, 4, 7, 10, 14, 17))
chord = Chord("C11")
self.assertEqual(chord.components(), ['C', 'E', 'G', 'Bb', 'D', 'F'])
def test_keep_existing_chord(self):
chord = Chord("C11")
self.quality_manager.set_quality("11", (0, 4, 7, 10, 14, 17))
self.assertEqual(chord.components(), ['C', 'G', 'Bb', 'D', 'F'])
if __name__ == '__main__':
unittest.main()
|
# -*- coding: utf-8 -*-
import unittest
from pychord import QualityManager, Chord
class TestQuality(unittest.TestCase):
def setUp(self):
self.quality_manager = QualityManager()
def test_eq(self):
q1 = self.quality_manager.get_quality("m7-5")
q2 = self.quality_manager.get_quality("m7-5")
self.assertEqual(q1, q2)
def test_eq_alias_maj9(self):
q1 = self.quality_manager.get_quality("M9")
q2 = self.quality_manager.get_quality("maj9")
self.assertEqual(q1, q2)
def test_eq_alias_m7b5(self):
q1 = self.quality_manager.get_quality("m7-5")
q2 = self.quality_manager.get_quality("m7b5")
self.assertEqual(q1, q2)
def test_eq_alias_min(self):
q1 = self.quality_manager.get_quality("m")
q2 = self.quality_manager.get_quality("min")
q3 = self.quality_manager.get_quality("-")
self.assertEqual(q1, q2)
self.assertEqual(q1, q3)
def test_invalid_eq(self):
q = self.quality_manager.get_quality("m7")
with self.assertRaises(TypeError):
print(q == 0)
class TestQualityManager(unittest.TestCase):
def test_singleton(self):
quality_manager = QualityManager()
quality_manager2 = QualityManager()
self.assertIs(quality_manager, quality_manager2)
class TestOverwriteQuality(unittest.TestCase):
def test_overwrite(self):
quality_manager = QualityManager()
quality_manager.set_quality("11", (0, 4, 7, 10, 14, 17))
chord = Chord("C11")
self.assertEqual(chord.components(), ['C', 'E', 'G', 'Bb', 'D', 'F'])
def test_keep_existing_chord(self):
chord = Chord("C11")
quality_manager = QualityManager()
quality_manager.set_quality("11", (0, 4, 7, 10, 14, 17))
self.assertEqual(chord.components(), ['C', 'G', 'Bb', 'D', 'F'])
if __name__ == '__main__':
unittest.main()
|
Python
| 0.000002
|
3707ed6b193a5eed9ec4505f6a283fdaff07ad5e
|
fix deprecated method
|
mifiel/api_auth.py
|
mifiel/api_auth.py
|
"""
[ApiAuth](https://github.com/mgomes/api_auth) for python
Based on https://github.com/pd/httpie-api-auth by Kyle Hargraves
Usage:
import requests
requests.get(url, auth=ApiAuth(app_id, secret_key))
"""
import hmac, base64, hashlib, datetime
from requests.auth import AuthBase
from urllib.parse import urlparse
class ApiAuth(AuthBase):
def __init__(self, access_id, secret_key):
self.access_id = access_id
self.secret_key = secret_key.encode('ascii')
def __call__(self, request):
method = request.method.upper()
content_type = request.headers.get('content-type')
if not content_type:
content_type = ''
content_md5 = request.headers.get('content-md5')
if not content_md5:
m = hashlib.md5()
body = request.body
if not body: body = ''
m.update(body.encode('ascii'))
content_md5 = base64.b64encode(m.digest()).decode()
request.headers['content-md5'] = content_md5
httpdate = request.headers.get('date')
if not httpdate:
now = datetime.datetime.utcnow()
httpdate = now.strftime('%a, %d %b %Y %H:%M:%S GMT')
request.headers['Date'] = httpdate
url = urlparse(request.url)
path = url.path
if url.query:
path = path + '?' + url.query
canonical_string = '%s,%s,%s,%s,%s' % (method, content_type, content_md5, path, httpdate)
digest = hmac.new(
self.secret_key,
canonical_string.encode('ascii'),
hashlib.sha1
).digest()
signature = base64.encodebytes(digest).rstrip().decode()
request.headers['Authorization'] = 'APIAuth %s:%s' % (self.access_id, signature)
return request
|
"""
[ApiAuth](https://github.com/mgomes/api_auth) for python
Based on https://github.com/pd/httpie-api-auth by Kyle Hargraves
Usage:
import requests
requests.get(url, auth=ApiAuth(app_id, secret_key))
"""
import hmac, base64, hashlib, datetime
from requests.auth import AuthBase
from urllib.parse import urlparse
class ApiAuth(AuthBase):
def __init__(self, access_id, secret_key):
self.access_id = access_id
self.secret_key = secret_key.encode('ascii')
def __call__(self, request):
method = request.method.upper()
content_type = request.headers.get('content-type')
if not content_type:
content_type = ''
content_md5 = request.headers.get('content-md5')
if not content_md5:
m = hashlib.md5()
body = request.body
if not body: body = ''
m.update(body.encode('ascii'))
content_md5 = base64.b64encode(m.digest()).decode()
request.headers['content-md5'] = content_md5
httpdate = request.headers.get('date')
if not httpdate:
now = datetime.datetime.utcnow()
httpdate = now.strftime('%a, %d %b %Y %H:%M:%S GMT')
request.headers['Date'] = httpdate
url = urlparse(request.url)
path = url.path
if url.query:
path = path + '?' + url.query
canonical_string = '%s,%s,%s,%s,%s' % (method, content_type, content_md5, path, httpdate)
digest = hmac.new(
self.secret_key,
canonical_string.encode('ascii'),
hashlib.sha1
).digest()
signature = base64.encodestring(digest).rstrip().decode()
request.headers['Authorization'] = 'APIAuth %s:%s' % (self.access_id, signature)
return request
|
Python
| 0.000053
|
c0894d3c14b8273364454dfa13c94311578ff698
|
update for diverse usage
|
mk-1strecurring.py
|
mk-1strecurring.py
|
#!/usr/bin/env python3
# (C) Mikhail Kolodin, 2018, ver. 2018-05-31 1.1
# class ic test task: find 1st recurring character in a string
import random
import string
MINSIZE = 1 # min size of test string
MAXSIZE = 19 # its max size
TESTS = 10 # no of tests
alf = string.ascii_uppercase # test alphabet
arr = []
size = 0
def prepare():
"""organize tests"""
global arr, size
size = random.randint(MINSIZE, MAXSIZE)
arr = "".join([random.choice(alf) for i in range(size)])
def solve():
"""find char, reusable function"""
found = ""
for c in arr:
if c in found:
return c
else:
found += c
else:
return ""
def show():
"""find and show char, function to show result only"""
c = solve()
return c if c else "None"
def main():
"""run all"""
for test in range(TESTS):
prepare()
print ("test =", test, ", size = %2d" % (size), ", arr =", arr.ljust(MAXSIZE), ", found recurrent:", show())
if __name__ == "__main__":
main()
|
#!/usr/bin/env python3
# (C) Mikhail Kolodin, 2018, ver. 1.0
# class ic test task: find 1st recurring character in a string
import random
import string
MINSIZE = 1 # min size of test string
MAXSIZE = 9 # its max size
TESTS = 10 # no of tests
alf = string.ascii_uppercase # test alphabet
arr = []
size = 0
def prepare():
"""organize tests"""
global arr, size
size = random.randint(MINSIZE, MAXSIZE)
arr = "".join([random.choice(alf) for i in range(size)])
def solve():
"""find char"""
global arr
found = ""
for c in arr:
if c in found:
return c
else:
found += c
else:
return "None"
def main():
"""run all"""
global arr, szie
for test in range(TESTS):
prepare()
print ("test =", test, ", size =", size, ", arr =", arr.ljust(MAXSIZE), ", found recurrent:", solve())
main()
|
Python
| 0
|
e379f35a15956204f09aa593979fe0a0186cf56e
|
Update the upload tool
|
tools/upload_build.py
|
tools/upload_build.py
|
"""This script upload a newly-build version of CocoMUD for Windows.
The Download wiki page on Redmine are updated.
Requirements:
This script needs 'python-redmine', which you can obtain with
pip install python-redmine
"""
import argparse
from json import dumps
import os
import re
import sys
from urllib import request
from redminelib import Redmine
from redminelib.exceptions import ResourceNotFoundError
# Create an argument parser
parser = argparse.ArgumentParser(
description="upload a new CocoMUD build")
parser.add_argument("key", help="the API key to upload to Redmine")
args = parser.parse_args()
# Configure the system
key = args.key
# Connects to the REST API
redmine = Redmine("https://cocomud.plan.io", key=key)
# Check that the file exists
path = os.path.abspath("../src/build/CocoMUD.zip")
if not os.path.exists(path):
print("The file {} cannot be found.".format(path))
sys.exit(1)
# Then upload this file
print("Retrieving the Download wiki page on 'cocomud-client'...")
page = redmine.wiki_page.get("Download", project_id="cocomud-client")
print("Uploading {}...".format(path))
text = page.text
page.uploads = [{"path": path, "filename": "CocoMUD.zip"}]
page.text = text
print("Saving the page...", page.save())
# Get the new resource URL
url = list(page.attachments)[-1].content_url
# Retrieve the version number
with open("../src/version.py", encoding="utf-8") as file:
content = file.read()
version = content.partition("=")[2].strip()
# Now we get ALL wiki pages with the title 'Download' and replace the URL
for project in redmine.project.all():
identifier = project.identifier
# Try to get the 'Download' page
try:
page = redmine.wiki_page.get("Download", project_id=identifier)
except ResourceNotFoundError:
pass
else:
print("Updating the Download page for the {} project...".format(
identifier))
text = page.text
text = re.sub(r"https\://cocomud\.plan\.io/attachments/" \
r"download/\d+/CocoMUD\.zip", url, text)
text = re.sub(r"\+\*\d+\*\+", "+*" + version + "*+", text)
page.text = text
success = page.save()
if success:
print("Correctly saved the wiki page.")
else:
print("Error while saving the wiki page.")
# Update the build information in the custom field
build = dumps({version: {"windows": url}})
print("Updating the custom field")
redmine.project.update(resource_id=2,
custom_fields=[{"id": 3, "value": build}])
print("URL", url)
|
"""This script upload a newly-build version of CocoMUD for Windows.
The Download wiki page on Redmine are updated.
Requirements:
This script needs 'python-redmine', which you can obtain with
pip install python-redmine
"""
import argparse
from json import dumps
import os
import re
import sys
import urllib2
from redminelib import Redmine
from redminelib.exceptions import ResourceNotFoundError
# Create an argument parser
parser = argparse.ArgumentParser(
description="upload a new CocoMUD build")
parser.add_argument("key", help="the API key to upload to Redmine")
args = parser.parse_args()
# Configure the system
key = args.key
# Connects to the REST API
redmine = Redmine("https://cocomud.plan.io", key=key)
# Check that the file exists
path = os.path.abspath("../src/build/CocoMUD.zip")
if not os.path.exists(path):
print "The file {} cannot be found.".format(path)
sys.exit(1)
# Then upload this file
print "Retrieving the Download wiki page on 'cocomud-client'..."
page = redmine.wiki_page.get("Download", project_id="cocomud-client")
print "Uploading {}...".format(path)
text = page.text
page.uploads = [{"path": path, "filename": "CocoMUD.zip"}]
page.text = text
print "Saving the page...", page.save()
# Get the new resource URL
url = list(page.attachments)[-1].content_url
# Retrieve the version number
with open("../src/version.py") as file:
content = file.read()
version = content.partition("=")[2].strip()
# Now we get ALL wiki pages with the title 'Download' and replace the URL
for project in redmine.project.all():
identifier = project.identifier
# Try to get the 'Download' page
try:
page = redmine.wiki_page.get("Download", project_id=identifier)
except ResourceNotFoundError:
pass
else:
print "Updating the Download page for the {} project...".format(
identifier)
text = page.text
text = re.sub(r"https\://cocomud\.plan\.io/attachments/" \
r"download/\d+/CocoMUD\.zip", url, text)
text = re.sub(r"\+\*\d+\*\+", "+*" + version + "*+", text)
page.text = text
success = page.save()
if success:
print "Correctly saved the wiki page."
else:
print "Error while saving the wiki page."
# Update the build information in the custom field
build = dumps({version: {"windows": url}})
print "Updating the custom field"
redmine.project.update(resource_id=2,
custom_fields=[{"id": 3, "value": build}])
print "URL", url
|
Python
| 0
|
3d331ecdb9cb0e64050eb3e4ece27242e1714b3e
|
Update C_Temperature_Vertical_sections.py
|
Cas_1/Temperature/C_Temperature_Vertical_sections.py
|
Cas_1/Temperature/C_Temperature_Vertical_sections.py
|
import numpy as np
import matplotlib.pyplot as plt
from xmitgcm import open_mdsdataset
plt.ion()
dir1 = '/homedata/bderembl/runmit/test_southatlgyre'
ds1 = open_mdsdataset(dir1,iters='all',prefix=['T'])
Height = ds1.T.Z
print(Height)
nx = int(len(ds1.T.XC)/2)
print(nx)
ny = int(len(ds1.T.YC)/2)
print(ny)
nt = -1
# Vertical Section of Temperature
plt.figure(1)
ds1['T'].where(ds1.hFacC>0)[nt,:,ny,:].plot()
plt.title('Case 1 : Temperature (t=-1 ; YC = 30S)')
plt.savefig('T_Temperature_Vertical_section_xz_cas1'+'.png')
plt.clf()
plt.figure(2)
ds1['T'].where(ds1.hFacC>0)[nt,:,:,nx].plot()
plt.title('Case 1 : Temperature (t=-1 ; XC = 0E)')
plt.savefig('T_Temperature_Vertical_section_yz_cas1'+'.png')
plt.clf()
|
import numpy as np
import matplotlib.pyplot as plt
from xmitgcm import open_mdsdataset
plt.ion()
dir1 = '/homedata/bderembl/runmit/test_southatlgyre'
ds1 = open_mdsdataset(dir1,iters='all',prefix=['T'])
Height = ds1.T.Z
print(Height)
nx = int(len(ds1.T.XC)/2)
print(nx)
ny = int(len(ds1.T.YC)/2)
print(ny)
nt = -1
# Vertical Section of Temperature
plt.figure(1)
ds1['T'].where(ds1.hFacC>0)[nt,:,ny,:].plot()
plt.title('Case 1 : Temperature (t=-1 ; YC = 30S)')
plt.savefig('T_Temperature_Vertical_section_xz_cas4'+'.png')
plt.clf()
plt.figure(2)
ds1['T'].where(ds1.hFacC>0)[nt,:,:,nx].plot()
plt.title('Case 1 : Temperature (t=-1 ; XC = 0E)')
plt.savefig('T_Temperature_Vertical_section_yz_cas4'+'.png')
plt.clf()
|
Python
| 0.000001
|
b2542f8c3625150f9716eb0b1fcb44ee15520ae8
|
fix path to nvim files
|
mod/vim/install.py
|
mod/vim/install.py
|
import packages
import util
def run():
spell_dir = '~/.config/vim/spell/'
choices = [
'vim',
'gvim', # gvim supports for X11 clipboard, but has more dependencies
]
choice = None
while choice not in choices:
choice = input('Which package to install? (%s) ' % choices).lower()
packages.try_install(choice)
packages.try_install('fzf')
for name in ['undo', 'backup', 'swap', 'spell', 'autoload']:
util.create_dir('~/.config/vim/' + name)
for path in util.find('./../nvim/*.vim'):
util.create_symlink(path, '~/.config/vim/')
util.create_symlink('./../nvim/spell/pl.utf-8.add', spell_dir)
util.create_symlink('./../nvim/spell/en.utf-8.add', spell_dir)
util.download(
'ftp://ftp.vim.org/pub/vim/runtime/spell/en.utf-8.spl',
'~/.config/vim/spell/')
util.download(
'ftp://ftp.vim.org/pub/vim/runtime/spell/pl.utf-8.spl',
'~/.config/vim/spell/')
util.download(
'https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim',
'~/.config/vim/autoload/plug.vim')
util.create_file(
'~/.config/zsh/editor.sh', 'export EDITOR=vim', overwrite=True)
util.create_symlink('~/.config/vim/', '~/.vim')
util.create_symlink('~/.config/vim/init.vim', '~/.vimrc')
commands = ['PlugInstall']
for path in util.find(spell_dir):
if 'add' in path and 'spl' not in path:
commands.append('mkspell! ' + path)
util.run_verbose(['vim'] + sum([['-c', cmd] for cmd in commands], []))
|
import packages
import util
def run():
spell_dir = '~/.config/vim/spell/'
choices = [
'vim',
'gvim', # gvim supports for X11 clipboard, but has more dependencies
]
choice = None
while choice not in choices:
choice = input('Which package to install? (%s) ' % choices).lower()
packages.try_install(choice)
packages.try_install('fzf')
for name in ['undo', 'backup', 'swap', 'spell', 'autoload']:
util.create_dir('~/.config/vim/' + name)
for path in util.find('./../mod-nvim/*.vim'):
util.create_symlink(path, '~/.config/vim/')
util.create_symlink('./../mod-nvim/spell/pl.utf-8.add', spell_dir)
util.create_symlink('./../mod-nvim/spell/en.utf-8.add', spell_dir)
util.download(
'ftp://ftp.vim.org/pub/vim/runtime/spell/en.utf-8.spl',
'~/.config/vim/spell/')
util.download(
'ftp://ftp.vim.org/pub/vim/runtime/spell/pl.utf-8.spl',
'~/.config/vim/spell/')
util.download(
'https://raw.githubusercontent.com/junegunn/vim-plug/master/plug.vim',
'~/.config/vim/autoload/plug.vim')
util.create_file(
'~/.config/zsh/editor.sh', 'export EDITOR=vim', overwrite=True)
util.create_symlink('~/.config/vim/', '~/.vim')
util.create_symlink('~/.config/vim/init.vim', '~/.vimrc')
commands = ['PlugInstall']
for path in util.find(spell_dir):
if 'add' in path and 'spl' not in path:
commands.append('mkspell! ' + path)
util.run_verbose(['vim'] + sum([['-c', cmd] for cmd in commands], []))
|
Python
| 0
|
6d2e66ab5b9b452474701ffc5035e4a8106db637
|
Add test_Record unit tests
|
tests/test_Record.py
|
tests/test_Record.py
|
import unittest
import os, shutil
from GeometrA.src.Record import *
from GeometrA.src.File.WorkSpace import WorkSpace
RECORD_FILE = './tests/record.log'
class RecordTestSuite(unittest.TestCase):
@classmethod
def setUpClass(cls):
path = './tests/Project0'
if os.path.isdir(path):
shutil.rmtree(path, True)
def setUp(self):
self.recordFile = './tests/record.log'
self.path = os.getcwd()
shutil.copytree('./tests/File/Project0', './tests/Project0')
def tearDown(self):
if os.path.isfile(self.recordFile):
os.remove(self.recordFile)
def tearDown(self):
path = './tests/Project0'
if os.path.isdir(path):
shutil.rmtree('path', True)
def testExportLog(self):
p = ['Project0', {'Project0':{'Suite1': ['case1', 'case2'],
'Suite2': ['case2']}}]
path = self.path
ws = WorkSpace(self.path, p)
exportLog(workspace = ws)
self.assertTrue(os.path.isfile(self.recordFile))
def testLog(self):
p = ['Project0', {'Project0':{'Suite1': ['case1', 'case2'],
'Suite2': ['case2']}}]
path = self.path
ws1 = WorkSpace(self.path, p)
exportLog(workspace = ws1)
ws = WorkSpace()
loadLog(ws)
log = [os.getcwd() + '/tests/Project0/Project0.json']
self.assertEqual(log, ws.log())
|
# import unittest
#
# import os, shutil
#
# from GeometrA.src.Record import *
# from GeometrA.src.File.WorkSpace import WorkSpace
#
# RECORD_FILE = './tests/record.log'
#
# class RecordTestSuite(unittest.TestCase):
# @classmethod
# def setUpClass(cls):
# path = './tests/Project0'
# if os.path.isdir(path):
# shutil.rmtree(path, True)
#
# def setUp(self):
# self.recordFile = './tests/record.log'
# self.path = os.getcwd()
# shutil.copytree('./tests/File/Project0', './tests/Project0')
#
# def tearDown(self):
# if os.path.isfile(self.recordFile):
# os.remove(self.recordFile)
#
# def tearDown(self):
# path = './tests/Project0'
# if os.path.isdir(path):
# shutil.rmtree('path', True)
#
# def testExportLog(self):
# p = ['Project0', {'Project0':{'Suite1': ['case1', 'case2'],
# 'Suite2': ['case2']}}]
# path = self.path
# ws = WorkSpace(self.path, p)
#
# exportLog(workspace = ws)
# self.assertTrue(os.path.isfile(self.recordFile))
#
# def testLog(self):
# p = ['Project0', {'Project0':{'Suite1': ['case1', 'case2'],
# 'Suite2': ['case2']}}]
# path = self.path
# ws1 = WorkSpace(self.path, p)
#
# exportLog(workspace = ws1)
#
# ws = WorkSpace()
# loadLog(ws)
#
# log = [os.getcwd() + '/tests/Project0/Project0.json']
# self.assertEqual(log, ws.log())
|
Python
| 0.000001
|
36e066ae645eb9b874ff1ce814708bd024c519e0
|
add support to get git revision from toymaker.
|
toymakerlib/toymaker.py
|
toymakerlib/toymaker.py
|
#! /usr/bin/env python
import sys
import os
import getopt
import optparse
import traceback
import toydist
from toydist.core.utils import \
subst_vars, pprint
from toydist.core.platforms import \
get_scheme
from toydist.core.descr_parser import \
ParseError
from toydist.commands.core import \
Command, HelpCommand, get_usage
from toydist.commands.configure import \
ConfigureCommand
from toydist.commands.build import \
BuildCommand
from toydist.commands.install import \
InstallCommand
from toydist.commands.parse import \
ParseCommand
from toydist.commands.convert import \
ConvertCommand
from toydist.commands.sdist import \
SdistCommand
from toydist.commands.detect_type import \
DetectTypeCommand
from toydist.commands.build_pkg_info import \
BuildPkgInfoCommand
from toydist.commands.build_egg import \
BuildEggCommand
from toydist.commands.core import \
register_command, UsageException, \
MyOptionParser, get_command_names, get_command, \
get_public_command_names
if os.environ.get("TOYMAKER_DEBUG", None) is not None:
TOYMAKER_DEBUG = True
else:
TOYMAKER_DEBUG = False
SCRIPT_NAME = 'toymaker'
#================================
# Create the command line UI
#================================
register_command("help", HelpCommand)
register_command("configure", ConfigureCommand)
register_command("build", BuildCommand)
register_command("install", InstallCommand)
register_command("convert", ConvertCommand)
register_command("sdist", SdistCommand)
register_command("build_egg", BuildEggCommand)
register_command("build_pkg_info", BuildPkgInfoCommand, public=False)
register_command("parse", ParseCommand, public=False)
register_command("detect_type", DetectTypeCommand, public=False)
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
show_usage = False
show_version = False
show_full_version = False
cmd_name = None
cmd_opts = None
try:
opts, pargs = getopt.getopt(argv, "hv", ["help", "version", "full-version"])
for opt, arg in opts:
if opt in ("--help", "-h"):
show_usage = True
if opt in ("--version", "-v"):
show_version = True
if opt in ("--full-version"):
show_full_version = True
if len(pargs) > 0:
cmd_name = pargs.pop(0)
cmd_opts = pargs
except getopt.GetoptError, e:
emsg = "%s: illegal global option -- %s" % (SCRIPT_NAME, e.opt)
print emsg
print get_usage()
return 1
if show_version:
print toydist.__version__
return 0
if show_full_version:
print toydist.__version__ + "git" + toydist.__git_revision__
return 0
if show_usage:
cmd = get_command('help')()
cmd.run([])
return 0
if not cmd_name:
print "Type '%s help' for usage." % SCRIPT_NAME
return 1
else:
if not cmd_name in get_command_names():
raise UsageException("%s: Error: unknown command %s" % (SCRIPT_NAME, cmd_name))
else:
cmd = get_command(cmd_name)()
cmd.run(cmd_opts)
def noexc_main(argv=None):
try:
ret = main(argv)
except UsageException, e:
pprint('RED', e)
sys.exit(1)
except ParseError, e:
pprint('RED', "".join(e.args))
sys.exit(2)
except Exception, e:
if TOYMAKER_DEBUG:
tb = sys.exc_info()[2]
traceback.print_tb(tb)
pprint('RED', "%s: Error: %s crashed (uncaught exception %s: %s)." % \
(SCRIPT_NAME, SCRIPT_NAME, e.__class__, str(e)))
sys.exit(1)
sys.exit(ret)
if __name__ == '__main__':
noexc_main()
|
#! /usr/bin/env python
import sys
import os
import getopt
import optparse
import traceback
import toydist
from toydist.core.utils import \
subst_vars, pprint
from toydist.core.platforms import \
get_scheme
from toydist.core.descr_parser import \
ParseError
from toydist.commands.core import \
Command, HelpCommand, get_usage
from toydist.commands.configure import \
ConfigureCommand
from toydist.commands.build import \
BuildCommand
from toydist.commands.install import \
InstallCommand
from toydist.commands.parse import \
ParseCommand
from toydist.commands.convert import \
ConvertCommand
from toydist.commands.sdist import \
SdistCommand
from toydist.commands.detect_type import \
DetectTypeCommand
from toydist.commands.build_pkg_info import \
BuildPkgInfoCommand
from toydist.commands.build_egg import \
BuildEggCommand
from toydist.commands.core import \
register_command, UsageException, \
MyOptionParser, get_command_names, get_command, \
get_public_command_names
if os.environ.get("TOYMAKER_DEBUG", None) is not None:
TOYMAKER_DEBUG = True
else:
TOYMAKER_DEBUG = False
SCRIPT_NAME = 'toymaker'
#================================
# Create the command line UI
#================================
register_command("help", HelpCommand)
register_command("configure", ConfigureCommand)
register_command("build", BuildCommand)
register_command("install", InstallCommand)
register_command("convert", ConvertCommand)
register_command("sdist", SdistCommand)
register_command("build_egg", BuildEggCommand)
register_command("build_pkg_info", BuildPkgInfoCommand, public=False)
register_command("parse", ParseCommand, public=False)
register_command("detect_type", DetectTypeCommand, public=False)
def main(argv=None):
if argv is None:
argv = sys.argv[1:]
show_usage = False
show_version = False
cmd_name = None
cmd_opts = None
try:
opts, pargs = getopt.getopt(argv, "hv", ["help", "version"])
for opt, arg in opts:
if opt in ("--help", "-h"):
show_usage = True
if opt in ("--version", "-v"):
show_version = True
if len(pargs) > 0:
cmd_name = pargs.pop(0)
cmd_opts = pargs
except getopt.GetoptError, e:
emsg = "%s: illegal global option -- %s" % (SCRIPT_NAME, e.opt)
print emsg
print get_usage()
return 1
if show_version:
print toydist.__version__
return 0
if show_usage:
cmd = get_command('help')()
cmd.run([])
return 0
if not cmd_name:
print "Type '%s help' for usage." % SCRIPT_NAME
return 1
else:
if not cmd_name in get_command_names():
raise UsageException("%s: Error: unknown command %s" % (SCRIPT_NAME, cmd_name))
else:
cmd = get_command(cmd_name)()
cmd.run(cmd_opts)
def noexc_main(argv=None):
try:
ret = main(argv)
except UsageException, e:
pprint('RED', e)
sys.exit(1)
except ParseError, e:
pprint('RED', "".join(e.args))
sys.exit(2)
except Exception, e:
if TOYMAKER_DEBUG:
tb = sys.exc_info()[2]
traceback.print_tb(tb)
pprint('RED', "%s: Error: %s crashed (uncaught exception %s: %s)." % \
(SCRIPT_NAME, SCRIPT_NAME, e.__class__, str(e)))
sys.exit(1)
sys.exit(ret)
if __name__ == '__main__':
noexc_main()
|
Python
| 0
|
d9407ebda411d49212da35e27f08718dade1cd02
|
Support Info is unable to read package version and git version
|
modules/support.py
|
modules/support.py
|
# -*- coding: utf-8 -*-
"""Support Information module.
The module provides functions to gain information to be included in issues.
It neither contains normal functionality nor is it used by GitGutter.
"""
import os
import subprocess
import textwrap
import sublime
import sublime_plugin
# get absolute path of the package
PACKAGE_PATH = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
if os.path.isfile(PACKAGE_PATH):
# Package is a PACKAGE.sublime-package so get its filename
PACKAGE, _ = os.path.splitext(os.path.basename(PACKAGE_PATH))
elif os.path.isdir(PACKAGE_PATH):
# Package is a directory, so get its basename
PACKAGE = os.path.basename(PACKAGE_PATH)
else:
raise ValueError('Package is no file and no directory!')
def git(*args):
"""Read version of git binary."""
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
else:
startupinfo = None
proc = subprocess.Popen(
args=['git'] + [arg for arg in args], startupinfo=startupinfo,
stdout=subprocess.PIPE, stdin=subprocess.PIPE,
# run command in package directory if exists.
cwd=PACKAGE_PATH if os.path.isdir(PACKAGE_PATH) else None)
stdout, _ = proc.communicate()
return stdout.decode('utf-8').strip() if stdout else None
def git_version():
"""Read version of git binary."""
try:
return git('--version')
except Exception as exception:
print('%s: %s' % (PACKAGE, exception))
return 'git version could not be acquired!'
def gitgutter_version():
"""Read commit hash or version of GitGutter."""
try:
return git('rev-parse', 'HEAD')[:7]
except:
try:
return sublime.load_resource(
'Packages/%s/release_messages/dest/VERSION' % PACKAGE)
except Exception as exception:
print('%s: %s' % (PACKAGE, exception))
return 'Version could not be acquired!'
def module_version(module, attr):
"""Format the module version."""
try:
version = getattr(module, attr)
if callable(version):
version = version()
except Exception as exception:
print('%s: %s' % (PACKAGE, exception))
version = 'version could not be acquired!'
if not isinstance(version, str):
version = '.'.join((str(x) for x in version))
return version
def is_installed_by_package_control():
"""Check if installed by package control."""
settings = sublime.load_settings('Package Control.sublime-settings')
return str(PACKAGE in set(settings.get('installed_packages', [])))
class GitGutterSupportInfoCommand(sublime_plugin.ApplicationCommand):
"""Support Information Command."""
@staticmethod
def run():
"""Run command."""
info = {
'platform': sublime.platform(),
'st_version': sublime.version(),
'arch': sublime.arch(),
'package_version': gitgutter_version(),
'pc_install': is_installed_by_package_control(),
'git_version': git_version()
}
try:
import markdown
info['markdown'] = module_version(markdown, 'version')
except ImportError:
info['markdown'] = 'not installed!'
try:
import mdpopups
info['mdpopups'] = module_version(mdpopups, 'version')
except ImportError:
info['mdpopups'] = 'not installed!'
try:
import jinja2
info['jinja'] = module_version(jinja2, '__version__')
except ImportError:
info['jinja'] = 'not installed!'
try:
import pygments
info['pygments'] = module_version(pygments, '__version__')
except ImportError:
info['pygments'] = 'not installed!'
msg = textwrap.dedent(
"""\
- Sublime Text %(st_version)s
- Platform: %(platform)s
- Arch: %(arch)s
- GitGutter %(package_version)s
- Install via PC: %(pc_install)s
- %(git_version)s
- mdpopups %(mdpopups)s
- markdown %(markdown)s
- pygments %(pygments)s
- jinja2 %(jinja)s
""" % info
)
sublime.message_dialog(msg + '\nInfo has been copied to clipboard.')
sublime.set_clipboard(msg)
|
# -*- coding: utf-8 -*-
"""Support Information module.
The module provides functions to gain information to be included in issues.
It neither contains normal functionality nor is it used by GitGutter.
"""
import os
import subprocess
import textwrap
import sublime
import sublime_plugin
PACKAGE = os.path.basename(os.path.dirname(os.path.dirname(
os.path.abspath(__file__))))
def git(*args):
"""Read version of git binary."""
if os.name == 'nt':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
else:
startupinfo = None
proc = subprocess.Popen(
args=['git'] + [arg for arg in args], startupinfo=startupinfo,
stdout=subprocess.PIPE, stdin=subprocess.PIPE,
# run command in package directory if exists.
cwd='/'.join((sublime.packages_path(), PACKAGE)))
stdout, _ = proc.communicate()
return stdout.decode('utf-8').strip() if stdout else None
def git_version():
"""Read version of git binary."""
try:
return git('--version')
except Exception as exception:
print('%s: %s' % (PACKAGE, exception))
return 'git version could not be acquired!'
def gitgutter_version():
"""Read commit hash or version of GitGutter."""
try:
return git('rev-parse', 'HEAD')[:7]
except:
try:
return sublime.load_resource(
'Packages/%s/release_messages/dest/VERSION' % PACKAGE)
except Exception as exception:
print('%s: %s' % (PACKAGE, exception))
return 'GitGutter version could not be acquired!'
def module_version(module, attr):
"""Format the module version."""
try:
version = getattr(module, attr)
if callable(version):
version = version()
except Exception as exception:
print('%s: %s' % (PACKAGE, exception))
version = 'version could not be acquired!'
if not isinstance(version, str):
version = '.'.join((str(x) for x in version))
return version
def is_installed_by_package_control():
"""Check if installed by package control."""
settings = sublime.load_settings('Package Control.sublime-settings')
return str(PACKAGE in set(settings.get('installed_packages', [])))
class GitGutterSupportInfoCommand(sublime_plugin.ApplicationCommand):
"""Support Information Command."""
@staticmethod
def run():
"""Run command."""
info = {
'platform': sublime.platform(),
'st_version': sublime.version(),
'arch': sublime.arch(),
'package_version': gitgutter_version(),
'pc_install': is_installed_by_package_control(),
'git_version': git_version()
}
try:
import markdown
info['markdown'] = module_version(markdown, 'version')
except ImportError:
info['markdown'] = 'not installed!'
try:
import mdpopups
info['mdpopups'] = module_version(mdpopups, 'version')
except ImportError:
info['mdpopups'] = 'not installed!'
try:
import jinja2
info['jinja'] = module_version(jinja2, '__version__')
except ImportError:
info['jinja'] = 'not installed!'
try:
import pygments
info['pygments'] = module_version(pygments, '__version__')
except ImportError:
info['pygments'] = 'not installed!'
msg = textwrap.dedent(
"""\
- Sublime Text %(st_version)s
- Platform: %(platform)s
- Arch: %(arch)s
- GitGutter %(package_version)s
- Install via PC: %(pc_install)s
- %(git_version)s
- mdpopups %(mdpopups)s
- markdown %(markdown)s
- pygments %(pygments)s
- jinja2 %(jinja)s
""" % info
)
sublime.message_dialog(msg + '\nInfo has been copied to clipboard.')
sublime.set_clipboard(msg)
|
Python
| 0
|
3425c2c9d19c1d0a54dafde6cc70d571421c82a9
|
Fix string app import error for python 3.5
|
tests/test_config.py
|
tests/test_config.py
|
import logging
import socket
import pytest
from uvicorn import protocols
from uvicorn.config import Config
from uvicorn.middleware.debug import DebugMiddleware
from uvicorn.middleware.wsgi import WSGIMiddleware
async def asgi_app():
pass
def wsgi_app():
pass
def test_debug_app():
config = Config(app=asgi_app, debug=True)
config.load()
assert config.debug is True
assert isinstance(config.loaded_app, DebugMiddleware)
def test_wsgi_app():
config = Config(app=wsgi_app, interface="wsgi")
config.load()
assert isinstance(config.loaded_app, WSGIMiddleware)
assert config.interface == "wsgi"
def test_proxy_headers():
config = Config(app=asgi_app, proxy_headers=True)
config.load()
assert config.proxy_headers is True
def test_app_unimportable():
config = Config(app="no.such:app")
with pytest.raises(ImportError):
config.load()
def test_concrete_http_class():
config = Config(app=asgi_app, http=protocols.http.h11_impl.H11Protocol)
config.load()
assert config.http_protocol_class is protocols.http.h11_impl.H11Protocol
def test_logger():
logger = logging.getLogger("just-for-tests")
config = Config(app=asgi_app, logger=logger)
config.load()
assert config.logger is logger
def test_socket_bind():
config = Config(app=asgi_app)
config.load()
assert isinstance(config.bind_socket(), socket.socket)
def test_ssl_config(certfile_and_keyfile):
certfile, keyfile = certfile_and_keyfile
config = Config(app=asgi_app, ssl_certfile=certfile.name, ssl_keyfile=keyfile.name)
config.load()
assert config.is_ssl is True
|
import logging
import socket
import pytest
from uvicorn import protocols
from uvicorn.config import Config
from uvicorn.middleware.debug import DebugMiddleware
from uvicorn.middleware.wsgi import WSGIMiddleware
async def asgi_app():
pass
def wsgi_app():
pass
def test_debug_app():
config = Config(app=asgi_app, debug=True)
config.load()
assert config.debug is True
assert isinstance(config.loaded_app, DebugMiddleware)
def test_wsgi_app():
config = Config(app=wsgi_app, interface="wsgi")
config.load()
assert isinstance(config.loaded_app, WSGIMiddleware)
assert config.interface == "wsgi"
def test_proxy_headers():
config = Config(app=asgi_app, proxy_headers=True)
config.load()
assert config.proxy_headers is True
def test_app_unimportable():
config = Config(app="no.such:app")
with pytest.raises(ModuleNotFoundError):
config.load()
def test_concrete_http_class():
config = Config(app=asgi_app, http=protocols.http.h11_impl.H11Protocol)
config.load()
assert config.http_protocol_class is protocols.http.h11_impl.H11Protocol
def test_logger():
logger = logging.getLogger("just-for-tests")
config = Config(app=asgi_app, logger=logger)
config.load()
assert config.logger is logger
def test_socket_bind():
config = Config(app=asgi_app)
config.load()
assert isinstance(config.bind_socket(), socket.socket)
def test_ssl_config(certfile_and_keyfile):
certfile, keyfile = certfile_and_keyfile
config = Config(app=asgi_app, ssl_certfile=certfile.name, ssl_keyfile=keyfile.name)
config.load()
assert config.is_ssl is True
|
Python
| 0.999224
|
1df8efb63333e89777820a96d78d5a59252b303d
|
Rename test specific to with gpg
|
tests/test_config.py
|
tests/test_config.py
|
import unittest
import figgypy.config
import sys
import os
class TestConfig(unittest.TestCase):
def test_config_load_with_gpg(self):
os.environ['FIGGY_GPG_HOME']='tests/resources/test-keys'
c = figgypy.config.Config('tests/resources/test-config.yaml')
self.assertEqual(c.db['host'], 'db.heck.ya')
self.assertEqual(c.db['pass'], 'test password')
if __name__ == '__main__':
unittest.main()
|
import unittest
import figgypy.config
import sys
import os
class TestConfig(unittest.TestCase):
def test_config_load(self):
os.environ['FIGGY_GPG_HOME']='tests/resources/test-keys'
c = figgypy.config.Config('tests/resources/test-config.yaml')
self.assertEqual(c.db['host'], 'db.heck.ya')
self.assertEqual(c.db['pass'], 'test password')
if __name__ == '__main__':
unittest.main()
|
Python
| 0
|
c5f44c9dda9905e9aa817c1945d49892e686f9cd
|
Fix failing test
|
tests/test_models.py
|
tests/test_models.py
|
# -*- coding: utf-8 -*-
import datetime as dt
import pytest
from doorman.models import Node, Pack, Query, Tag, FilePath
from .factories import NodeFactory, PackFactory, QueryFactory, TagFactory
@pytest.mark.usefixtures('db')
class TestNode:
def test_factory(self, db):
node = NodeFactory(host_identifier='foo')
db.session.commit()
assert node.node_key is not None
assert node.host_identifier == 'foo'
def test_tags(self):
tag = Tag.create(value='foo')
node = NodeFactory(host_identifier='foo')
node.tags.append(tag)
node.save()
assert tag in node.tags
def test_config(self):
node = NodeFactory(host_identifier='foo')
tag = Tag.create(value='foo')
node.tags.append(tag)
node.save()
query1 = Query.create(name='bar', sql='select * from osquery_info;')
query2 = Query.create(name='foobar', sql='select * from system_info;')
query2.tags.append(tag)
query2.save()
pack = Pack.create(name='baz')
pack.queries.append(query1)
pack.tags.append(tag)
pack.save()
file_path = FilePath.create(category='foobar', target_paths=[
'/home/foobar/%%',
])
file_path.tags.append(tag)
file_path.save()
assert tag in pack.tags
assert tag in query2.tags
assert tag in file_path.tags
assert tag not in query1.tags
assert query1 in pack.queries
assert query2 not in pack.queries
assert pack in node.packs
assert query2 in node.queries
assert query1 not in node.queries
config = node.get_config()
assert pack.name in config['packs']
assert query1.name in config['packs'][pack.name]['queries']
assert query1.sql == config['packs'][pack.name]['queries'][query1.name]['query']
assert query2.name not in config['packs']
assert query2.name in config['schedule']
assert query2.sql == config['schedule'][query2.name]['query']
assert file_path.category in config['file_paths']
@pytest.mark.usefixtures('db')
class TestQuery:
def test_factory(self, db):
query = QueryFactory(name='foobar', query='select * from foobar;')
db.session.commit()
assert query.name == 'foobar'
assert query.sql == 'select * from foobar;'
@pytest.mark.usefixtures('db')
class TestFilePath:
def test_create(self):
target_paths = [
'/root/.ssh/%%',
'/home/.ssh/%%',
]
file_path = FilePath.create(category='foobar', target_paths=target_paths)
assert file_path.to_dict() == {'foobar': target_paths}
target_paths.append('/etc/%%')
file_path.target_paths = '!!'.join(target_paths)
file_path.save()
assert file_path.to_dict() == {'foobar': target_paths}
|
# -*- coding: utf-8 -*-
import datetime as dt
import pytest
from doorman.models import Node, Pack, Query, Tag, FilePath
from .factories import NodeFactory, PackFactory, QueryFactory, TagFactory
@pytest.mark.usefixtures('db')
class TestNode:
def test_factory(self, db):
node = NodeFactory(host_identifier='foo')
db.session.commit()
assert node.node_key is not None
assert node.host_identifier == 'foo'
def test_tags(self):
tag = Tag.create(value='foo')
node = NodeFactory(host_identifier='foo')
node.tags.append(tag)
node.save()
assert tag in node.tags
def test_config(self):
node = NodeFactory(host_identifier='foo')
tag = Tag.create(value='foo')
node.tags.append(tag)
node.save()
query1 = Query.create(name='bar', sql='select * from osquery_info;')
query2 = Query.create(name='foobar', sql='select * from system_info;')
query2.tags.append(tag)
query2.save()
pack = Pack.create(name='baz')
pack.queries.append(query1)
pack.tags.append(tag)
pack.save()
file_path = FilePath.create(category='foobar', target_paths=[
'/home/foobar/%%',
])
file_path.tags.append(tag)
file_path.save()
assert tag in pack.tags
assert tag in query2.tags
assert tag in file_path.tags
assert tag not in query1.tags
assert query1 in pack.queries
assert query2 not in pack.queries
assert pack in node.packs
assert query2 in node.queries
assert query1 not in node.queries
config = node.get_config()
assert node.host_identifier == config['options']['host_identifier']
assert pack.name in config['packs']
assert query1.name in config['packs'][pack.name]['queries']
assert query1.sql == config['packs'][pack.name]['queries'][query1.name]['query']
assert query2.name not in config['packs']
assert query2.name in config['schedule']
assert query2.sql == config['schedule'][query2.name]['query']
assert file_path.category in config['file_paths']
@pytest.mark.usefixtures('db')
class TestQuery:
def test_factory(self, db):
query = QueryFactory(name='foobar', query='select * from foobar;')
db.session.commit()
assert query.name == 'foobar'
assert query.sql == 'select * from foobar;'
@pytest.mark.usefixtures('db')
class TestFilePath:
def test_create(self):
target_paths = [
'/root/.ssh/%%',
'/home/.ssh/%%',
]
file_path = FilePath.create(category='foobar', target_paths=target_paths)
assert file_path.to_dict() == {'foobar': target_paths}
target_paths.append('/etc/%%')
file_path.target_paths = '!!'.join(target_paths)
file_path.save()
assert file_path.to_dict() == {'foobar': target_paths}
|
Python
| 0.000209
|
66eddf04efd46fb3dbeae34c4d82f673a88be70f
|
Test the ability to add phone to the person
|
tests/test_person.py
|
tests/test_person.py
|
from copy import copy
from unittest import TestCase
from address_book import Person
class PersonTestCase(TestCase):
def test_get_groups(self):
pass
def test_add_address(self):
basic_address = ['Russian Federation, Kemerovo region, Kemerovo, Kirova street 23, apt. 42']
person = Person(
'John',
'Doe',
copy(basic_address),
['+79834772053'],
['john@gmail.com']
)
person.add_address('new address')
self.assertEqual(
person.addresses,
basic_address + ['new address']
)
def test_add_phone(self):
basic_phone = ['+79237778492']
person = Person(
'John',
'Doe',
copy(basic_phone),
['+79834772053'],
['john@gmail.com']
)
person.add_phone('+79234478810')
self.assertEqual(
person.addresses,
basic_phone + ['+79234478810']
)
def test_add_email(self):
pass
|
from copy import copy
from unittest import TestCase
from address_book import Person
class PersonTestCase(TestCase):
def test_get_groups(self):
pass
def test_add_address(self):
basic_address = ['Russian Federation, Kemerovo region, Kemerovo, Kirova street 23, apt. 42']
person = Person(
'John',
'Doe',
copy(basic_address),
['+79834772053'],
['john@gmail.com']
)
person.add_address('new address')
self.assertEqual(
person.addresses,
basic_address + ['new address']
)
def test_add_phone(self):
pass
def test_add_email(self):
pass
|
Python
| 0.000032
|
5017ee713fd03902aa502836654e1961fb7575f1
|
test form action url
|
tests/test_plugin.py
|
tests/test_plugin.py
|
from bs4 import BeautifulSoup
from cms.api import add_plugin
from cms.models import Placeholder
from django.core.urlresolvers import reverse
from django.test import TestCase
from cmsplugin_feedback.cms_plugins import FeedbackPlugin, \
DEFAULT_FORM_FIELDS_ID, DEFAULT_FORM_CLASS
from cmsplugin_feedback.forms import FeedbackMessageForm
class FeedbackPluginTests(TestCase):
def setUp(self):
self.placeholder = Placeholder.objects.create(slot='test')
def add_plugin(self, **kwargs):
model_instance = add_plugin(
self.placeholder,
FeedbackPlugin,
'en',
**kwargs)
return model_instance
def test_plugin_context(self):
model = self.add_plugin()
plugin = model.get_plugin_class_instance()
context = plugin.render({}, model, None)
self.assertIn('form', context)
self.assertIsInstance(context['form'], FeedbackMessageForm)
self.assertEqual(context['form'].auto_id, DEFAULT_FORM_FIELDS_ID)
self.assertIn('form_class', context)
self.assertEqual(context['form_class'], DEFAULT_FORM_CLASS)
def test_form_title(self):
title = 'Feedback Form'
plugin = self.add_plugin(title=title)
html = plugin.render_plugin({})
soup = BeautifulSoup(html)
self.assertEqual(soup.h1.string, title)
def test_default_submit_button(self):
plugin = self.add_plugin()
self.assertTrue(plugin.submit)
default = plugin._meta.get_field_by_name('submit')[0].default
self.assertEqual(plugin.submit, default)
html = plugin.render_plugin({})
soup = BeautifulSoup(html)
self.assertEqual(soup.find(type='submit').string, default)
def test_submit_button(self):
text = 'Send'
plugin = self.add_plugin(submit=text)
default = plugin._meta.get_field_by_name('submit')[0].default
self.assertNotEqual(text, default)
self.assertEqual(plugin.submit, text)
html = plugin.render_plugin({})
soup = BeautifulSoup(html)
self.assertEqual(soup.find(type='submit').string, text)
def test_form_action_url(self):
plugin = self.add_plugin()
html = plugin.render_plugin({})
soup = BeautifulSoup(html)
self.assertEqual(
soup.form['action'],
reverse('feedback-form', args=[plugin.id]))
|
from bs4 import BeautifulSoup
from cms.api import add_plugin
from cms.models import Placeholder
from django.test import TestCase
from cmsplugin_feedback.cms_plugins import FeedbackPlugin, \
DEFAULT_FORM_FIELDS_ID, DEFAULT_FORM_CLASS
from cmsplugin_feedback.forms import FeedbackMessageForm
class FeedbackPluginTests(TestCase):
def setUp(self):
self.placeholder = Placeholder.objects.create(slot='test')
def add_plugin(self, **kwargs):
model_instance = add_plugin(
self.placeholder,
FeedbackPlugin,
'en',
**kwargs)
return model_instance
def test_plugin_context(self):
model = self.add_plugin()
plugin = model.get_plugin_class_instance()
context = plugin.render({}, model, None)
self.assertIn('form', context)
self.assertIsInstance(context['form'], FeedbackMessageForm)
self.assertEqual(context['form'].auto_id, DEFAULT_FORM_FIELDS_ID)
self.assertIn('form_class', context)
self.assertEqual(context['form_class'], DEFAULT_FORM_CLASS)
def test_form_title(self):
title = 'Feedback Form'
plugin = self.add_plugin(title=title)
html = plugin.render_plugin({})
soup = BeautifulSoup(html)
self.assertEqual(soup.h1.string, title)
def test_default_submit_button(self):
plugin = self.add_plugin()
self.assertTrue(plugin.submit)
default = plugin._meta.get_field_by_name('submit')[0].default
self.assertEqual(plugin.submit, default)
html = plugin.render_plugin({})
soup = BeautifulSoup(html)
self.assertEqual(soup.find(type='submit').string, default)
def test_submit_button(self):
text = 'Send'
plugin = self.add_plugin(submit=text)
default = plugin._meta.get_field_by_name('submit')[0].default
self.assertNotEqual(text, default)
self.assertEqual(plugin.submit, text)
html = plugin.render_plugin({})
soup = BeautifulSoup(html)
self.assertEqual(soup.find(type='submit').string, text)
|
Python
| 0.000002
|
5e2b9410a7db019e4ad1056ec0a3d507374e5e4b
|
Make sure that get_user_config is called in replay.dump
|
tests/test_replay.py
|
tests/test_replay.py
|
# -*- coding: utf-8 -*-
"""
test_replay
-----------
"""
import json
import os
import pytest
from cookiecutter import replay, utils
from cookiecutter.config import get_user_config
@pytest.fixture
def replay_dir():
"""Fixture to return the expected replay directory."""
return os.path.expanduser('~/.cookiecutter_replay/')
def test_get_user_config(mocker, replay_dir):
"""Test that get_user_config holds the correct replay_dir."""
mocker.patch('os.path.exists', return_value=False)
config_dict = get_user_config()
assert 'replay_dir' in config_dict
assert config_dict['replay_dir'] == replay_dir
@pytest.fixture
def template_name():
"""Fixture to return a valid template_name."""
return 'cookiedozer'
@pytest.fixture
def context():
"""Fixture to return a valid context as known from a cookiecutter.json."""
return {
u'email': u'raphael@hackebrot.de',
u'full_name': u'Raphael Pierzina',
u'github_username': u'hackebrot',
u'version': u'0.1.0',
}
def test_dump_value_error_if_no_template_name(context):
"""Test that replay.dump raises if the tempate_name is not a valid str."""
with pytest.raises(ValueError):
replay.dump(None, context)
def test_dump_type_error_if_not_dict_context(template_name):
"""Test that replay.dump raises if the context is not of type dict."""
with pytest.raises(TypeError):
replay.dump(template_name, 'not_a_dict')
@pytest.fixture
def cleanup_replay_dir(request, replay_dir):
"""Fixture to remove the replay_dir that is created by replay.dump."""
def remove_dir():
if os.path.isdir(replay_dir):
utils.rmtree(replay_dir)
request.addfinalizer(remove_dir)
@pytest.mark.usefixtures('cleanup_replay_dir')
def test_raise_if_replay_dir_creation_fails(
mocker, template_name, context, replay_dir):
"""Test that replay.dump raises when the replay_dir cannot be created."""
mock_ensure = mocker.patch(
'cookiecutter.replay.make_sure_path_exists',
return_value=False
)
with pytest.raises(IOError):
replay.dump(template_name, context)
mock_ensure.assert_called_once_with(replay_dir)
@pytest.mark.usefixtures('cleanup_replay_dir')
def test_run_json_dump(
mocker, template_name, context, replay_dir):
"""Test that replay.dump runs json.dump under the hood and that the context
is correctly written to the expected file in the replay_dir.
"""
spy_ensure = mocker.spy(
'cookiecutter.replay.make_sure_path_exists',
)
spy_json_dump = mocker.spy('json.dump')
mock_get_user_config = mocker.patch(
'cookiecutter.config.get_user_config',
return_value=replay_dir
)
replay.dump(template_name, context)
spy_ensure.assert_called_once_with(replay_dir)
assert spy_json_dump.called == 1
assert mock_get_user_config.called == 1
replay_dir = os.path.expanduser('~/.cookiecutter_replay/')
replay_file = os.path.join(replay_dir, template_name)
with open(replay_file, 'r') as f:
dumped_context = json.load(f)
assert dumped_context == context
|
# -*- coding: utf-8 -*-
"""
test_replay
-----------
"""
import json
import os
import pytest
from cookiecutter import replay, utils
from cookiecutter.config import get_user_config
@pytest.fixture
def replay_dir():
"""Fixture to return the expected replay directory."""
return os.path.expanduser('~/.cookiecutter_replay/')
def test_get_user_config(mocker, replay_dir):
"""Test that get_user_config holds the correct replay_dir."""
mocker.patch('os.path.exists', return_value=False)
config_dict = get_user_config()
assert 'replay_dir' in config_dict
assert config_dict['replay_dir'] == replay_dir
@pytest.fixture
def template_name():
"""Fixture to return a valid template_name."""
return 'cookiedozer'
@pytest.fixture
def context():
"""Fixture to return a valid context as known from a cookiecutter.json."""
return {
u'email': u'raphael@hackebrot.de',
u'full_name': u'Raphael Pierzina',
u'github_username': u'hackebrot',
u'version': u'0.1.0',
}
def test_dump_value_error_if_no_template_name(context):
"""Test that replay.dump raises if the tempate_name is not a valid str."""
with pytest.raises(ValueError):
replay.dump(None, context)
def test_dump_type_error_if_not_dict_context(template_name):
"""Test that replay.dump raises if the context is not of type dict."""
with pytest.raises(TypeError):
replay.dump(template_name, 'not_a_dict')
@pytest.fixture
def cleanup_replay_dir(request, replay_dir):
"""Fixture to remove the replay_dir that is created by replay.dump."""
def remove_dir():
if os.path.isdir(replay_dir):
utils.rmtree(replay_dir)
request.addfinalizer(remove_dir)
@pytest.mark.usefixtures('cleanup_replay_dir')
def test_raise_if_replay_dir_creation_fails(
mocker, template_name, context, replay_dir):
"""Test that replay.dump raises when the replay_dir cannot be created."""
mock_ensure = mocker.patch(
'cookiecutter.replay.make_sure_path_exists',
return_value=False
)
with pytest.raises(IOError):
replay.dump(template_name, context)
mock_ensure.assert_called_once_with(replay_dir)
@pytest.mark.usefixtures('cleanup_replay_dir')
def test_run_json_dump(
mocker, template_name, context, replay_dir):
"""Test that replay.dump runs json.dump under the hood and that the context
is correctly written to the expected file in the replay_dir.
"""
spy_ensure = mocker.spy(
'cookiecutter.replay.make_sure_path_exists',
)
spy_json_dump = mocker.spy('json.dump')
replay.dump(template_name, context)
spy_ensure.assert_called_once_with(replay_dir)
assert spy_json_dump.called == 1
replay_dir = os.path.expanduser('~/.cookiecutter_replay/')
replay_file = os.path.join(replay_dir, template_name)
with open(replay_file, 'r') as f:
dumped_context = json.load(f)
assert dumped_context == context
|
Python
| 0.000005
|
02bd3772fcf20d9dc54bd94c125c2efc6ae01537
|
Make sure structs are pickleable
|
tests/test_struct.py
|
tests/test_struct.py
|
#!/usr/bin/env python
"""
Contains various tests for the `Struct` class of the base module.
:file: StructTests.py
:date: 30/08/2015
:authors:
- Gilad Naaman <gilad@naaman.io>
"""
from .utils import *
#########################
# "Structs" for testing #
#########################
class SmallStruct(Struct):
only_element = u8
class SimpleStruct(Struct):
b_first_variable = u8(0xDE)
a_second_variable = u16(0xCAFE)
x_third_variable = u8(0xAD)
class ComplicatedStruct(Struct):
other_struct = SmallStruct
some_field = SimpleStruct[3]
numeric = u32
##############
# Test Cases #
##############
class StructTests(HydrasTestCase):
""" A testcase checking for a few of `Struct`'s features. """
def test_serialize_simple(self):
""" Test serialization of a simple struct. """
obj = SimpleStruct()
raw_data = bytes(obj)
self.assertEqual(raw_data, b'\xDE\xFE\xCA\xAD')
def test_one_does_not_complicatedly(self):
""" Test serialization and deserialization of a more complicated struct."""
s = ComplicatedStruct()
s.numeric = 0xAEAEAEAE
data = s.serialize()
# Test serialization.
self.assertEqual(data, b'\x00\xDE\xFE\xCA\xAD\xDE\xFE\xCA\xAD\xDE\xFE\xCA\xAD\xAE\xAE\xAE\xAE')
# Test deserialization.
d_s = ComplicatedStruct.deserialize(data)
self.assertEqual(d_s, s)
def test_dict_conversion(self):
d = dict(ComplicatedStruct())
expected_dict = {
'other_struct': {'only_element': 0},
'some_field': [
{'b_first_variable': 0xDE, 'a_second_variable': 0xCAFE, 'x_third_variable': 0xAD},
{'b_first_variable': 0xDE, 'a_second_variable': 0xCAFE, 'x_third_variable': 0xAD},
{'b_first_variable': 0xDE, 'a_second_variable': 0xCAFE, 'x_third_variable': 0xAD},
],
'numeric': 0
}
self.assertEqual(d, expected_dict)
def test_derived_struct(self):
class DerivedStruct(SimpleStruct):
derived = u8
class DerivedStructEmpty(SimpleStruct):
pass
simple = SimpleStruct()
derived = DerivedStruct()
empty = DerivedStructEmpty()
self.assertEqual(simple.serialize() + b'\x00', derived.serialize())
self.assertEqual(simple.serialize(), empty.serialize())
def test_invalid_multiple_derives(self):
class A(Struct):
a = u8
class B(Struct):
b = u8
with self.assertRaises(TypeError):
class C(A, B):
pass
def test_pickles(self):
import pickle
o = pickle.loads(pickle.dumps(SimpleStruct()))
self.assertEqual(o, SimpleStruct())
o = pickle.loads(pickle.dumps(ComplicatedStruct()))
self.assertEqual(o, ComplicatedStruct())
if __name__ == '__main__':
unittest.main()
|
#!/usr/bin/env python
"""
Contains various tests for the `Struct` class of the base module.
:file: StructTests.py
:date: 30/08/2015
:authors:
- Gilad Naaman <gilad@naaman.io>
"""
from .utils import *
#########################
# "Structs" for testing #
#########################
class SmallStruct(Struct):
only_element = u8
class SimpleStruct(Struct):
b_first_variable = u8(0xDE)
a_second_variable = u16(0xCAFE)
x_third_variable = u8(0xAD)
class ComplicatedStruct(Struct):
other_struct = SmallStruct
some_field = SimpleStruct[3]
numeric = u32
##############
# Test Cases #
##############
class StructTests(HydrasTestCase):
""" A testcase checking for a few of `Struct`'s features. """
def test_serialize_simple(self):
""" Test serialization of a simple struct. """
obj = SimpleStruct()
raw_data = bytes(obj)
self.assertEqual(raw_data, b'\xDE\xFE\xCA\xAD')
def test_one_does_not_complicatedly(self):
""" Test serialization and deserialization of a more complicated struct."""
s = ComplicatedStruct()
s.numeric = 0xAEAEAEAE
data = s.serialize()
# Test serialization.
self.assertEqual(data, b'\x00\xDE\xFE\xCA\xAD\xDE\xFE\xCA\xAD\xDE\xFE\xCA\xAD\xAE\xAE\xAE\xAE')
# Test deserialization.
d_s = ComplicatedStruct.deserialize(data)
self.assertEqual(d_s, s)
def test_dict_conversion(self):
d = dict(ComplicatedStruct())
expected_dict = {
'other_struct': {'only_element': 0},
'some_field': [
{'b_first_variable': 0xDE, 'a_second_variable': 0xCAFE, 'x_third_variable': 0xAD},
{'b_first_variable': 0xDE, 'a_second_variable': 0xCAFE, 'x_third_variable': 0xAD},
{'b_first_variable': 0xDE, 'a_second_variable': 0xCAFE, 'x_third_variable': 0xAD},
],
'numeric': 0
}
self.assertEqual(d, expected_dict)
def test_derived_struct(self):
class DerivedStruct(SimpleStruct):
derived = u8
class DerivedStructEmpty(SimpleStruct):
pass
simple = SimpleStruct()
derived = DerivedStruct()
empty = DerivedStructEmpty()
self.assertEqual(simple.serialize() + b'\x00', derived.serialize())
self.assertEqual(simple.serialize(), empty.serialize())
def test_invalid_multiple_derives(self):
class A(Struct):
a = u8
class B(Struct):
b = u8
with self.assertRaises(TypeError):
class C(A, B):
pass
if __name__ == '__main__':
unittest.main()
|
Python
| 0.000225
|
28f6af7f84860535a1a82750df286f78320a6856
|
Fix monkeypatching
|
tests/test_things.py
|
tests/test_things.py
|
from __future__ import division
import stft
import numpy
import pytest
@pytest.fixture(params=[1, 2])
def channels(request):
return request.param
@pytest.fixture(params=[0, 1, 4])
def padding(request):
return request.param
@pytest.fixture(params=[2048])
def length(request):
return request.param
@pytest.fixture
def signal(channels, length):
return numpy.squeeze(numpy.random.random((length, channels)))
@pytest.fixture(params=[512])
def framelength(request):
return request.param
def test_shape(length, framelength):
a = numpy.squeeze(numpy.random.random((length, 1)))
x = stft.spectrogram(a, framelength=framelength, halved=True)
assert x.shape[0] == framelength / 2 + 1
x_2 = stft.spectrogram(a, framelength=framelength, halved=False)
assert x_2.shape[0] == framelength
def test_windowlength_errors():
"""
Test if way too short signals can be transformed
"""
siglen = 512
framelen = 2048
stft.spectrogram(numpy.random.random(siglen), framelength=framelen)
def test_precision(channels, padding, signal, framelength):
"""
Test if transform-inverse identity holds
"""
a = signal
x = stft.spectrogram(a, framelength=framelength, padding=padding)
y = stft.ispectrogram(x, framelength=framelength, padding=padding)
# Crop first and last frame
assert numpy.allclose(a, y)
def test_rms(channels, padding, signal, framelength):
"""
Test if transform-inverse identity holds
"""
a = signal
x = stft.spectrogram(a, framelength=framelength, padding=padding)
y = stft.ispectrogram(x, framelength=framelength, padding=padding)
# Crop first and last frame
assert numpy.sqrt(numpy.mean((a - y) ** 2)) < 1e-8
def test_maxdim():
a = numpy.random.random((512, 2, 2))
with pytest.raises(ValueError):
stft.spectrogram(a)
b = numpy.random.random((512, 2, 2, 3))
with pytest.raises(ValueError):
stft.ispectrogram(b)
def test_issue1():
a = numpy.random.random((512, 1))
b = stft.spectrogram(a)
assert b.ndim == 2
def raiser(*args):
raise AttributeError
def test_fallback(monkeypatch):
# Try monkeypatching signal.cosine away.
# Ignore AttributeErrors during monkeypatching, for older scipy versions
import scipy.signal
try:
monkeypatch.setattr("scipy.signal.cosine", raiser)
except Exception:
pass
return test_windowlength_errors()
|
from __future__ import division
import stft
import numpy
import pytest
@pytest.fixture(params=[1, 2])
def channels(request):
return request.param
@pytest.fixture(params=[0, 1, 4])
def padding(request):
return request.param
@pytest.fixture(params=[2048])
def length(request):
return request.param
@pytest.fixture
def signal(channels, length):
return numpy.squeeze(numpy.random.random((length, channels)))
@pytest.fixture(params=[512])
def framelength(request):
return request.param
def test_shape(length, framelength):
a = numpy.squeeze(numpy.random.random((length, 1)))
x = stft.spectrogram(a, framelength=framelength, halved=True)
assert x.shape[0] == framelength / 2 + 1
x_2 = stft.spectrogram(a, framelength=framelength, halved=False)
assert x_2.shape[0] == framelength
def test_windowlength_errors():
"""
Test if way too short signals can be transformed
"""
siglen = 512
framelen = 2048
stft.spectrogram(numpy.random.random(siglen), framelength=framelen)
def test_precision(channels, padding, signal, framelength):
"""
Test if transform-inverse identity holds
"""
a = signal
x = stft.spectrogram(a, framelength=framelength, padding=padding)
y = stft.ispectrogram(x, framelength=framelength, padding=padding)
# Crop first and last frame
assert numpy.allclose(a, y)
def test_rms(channels, padding, signal, framelength):
"""
Test if transform-inverse identity holds
"""
a = signal
x = stft.spectrogram(a, framelength=framelength, padding=padding)
y = stft.ispectrogram(x, framelength=framelength, padding=padding)
# Crop first and last frame
assert numpy.sqrt(numpy.mean((a - y) ** 2)) < 1e-8
def test_maxdim():
a = numpy.random.random((512, 2, 2))
with pytest.raises(ValueError):
stft.spectrogram(a)
b = numpy.random.random((512, 2, 2, 3))
with pytest.raises(ValueError):
stft.ispectrogram(b)
def test_issue1():
a = numpy.random.random((512, 1))
b = stft.spectrogram(a)
assert b.ndim == 2
def raiser(*args):
raise AttributeError
def test_fallback(monkeypatch):
# Try monkeypatching signal.cosine away.
# Ignore AttributeErrors during monkeypatching, for older scipy versions
try:
import scipy.signal
monkeypatch.setattr("scipy.signal.cosine", raiser)
except AttributeError:
pass
return test_windowlength_errors()
|
Python
| 0.000001
|
36200dea5889bdf4ad920adc1ab04ae3870f74ac
|
Edit varnet model (#5096)
|
tests/test_varnet.py
|
tests/test_varnet.py
|
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from parameterized import parameterized
from monai.apps.reconstruction.networks.nets.coil_sensitivity_model import CoilSensitivityModel
from monai.apps.reconstruction.networks.nets.complex_unet import ComplexUnet
from monai.apps.reconstruction.networks.nets.varnet import VariationalNetworkModel
from monai.networks import eval_mode
from tests.utils import test_script_save
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
coil_sens_model = CoilSensitivityModel(spatial_dims=2, features=[8, 16, 32, 64, 128, 8])
refinement_model = ComplexUnet(spatial_dims=2, features=[8, 16, 32, 64, 128, 8])
num_cascades = 2
TESTS = []
TESTS.append([coil_sens_model, refinement_model, num_cascades, (1, 3, 50, 50, 2), (1, 50, 50)]) # batch=1
TESTS.append([coil_sens_model, refinement_model, num_cascades, (2, 3, 50, 50, 2), (2, 50, 50)]) # batch=2
class TestVarNet(unittest.TestCase):
@parameterized.expand(TESTS)
def test_shape(self, coil_sens_model, refinement_model, num_cascades, input_shape, expected_shape):
net = VariationalNetworkModel(coil_sens_model, refinement_model, num_cascades).to(device)
mask_shape = [1 for _ in input_shape]
mask_shape[-2] = input_shape[-2]
mask = torch.zeros(mask_shape)
mask[..., mask_shape[-2] // 2 - 5 : mask_shape[-2] // 2 + 5, :] = 1
with eval_mode(net):
result = net(torch.randn(input_shape).to(device), mask.bool().to(device))
self.assertEqual(result.shape, expected_shape)
@parameterized.expand(TESTS)
def test_script(self, coil_sens_model, refinement_model, num_cascades, input_shape, expected_shape):
net = VariationalNetworkModel(coil_sens_model, refinement_model, num_cascades)
mask_shape = [1 for _ in input_shape]
mask_shape[-2] = input_shape[-2]
mask = torch.zeros(mask_shape)
mask[..., mask_shape[-2] // 2 - 5 : mask_shape[-2] // 2 + 5, :] = 1
test_data = torch.randn(input_shape)
test_script_save(net, test_data, mask.bool())
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import torch
from parameterized import parameterized
from monai.apps.reconstruction.networks.nets.coil_sensitivity_model import CoilSensitivityModel
from monai.apps.reconstruction.networks.nets.complex_unet import ComplexUnet
from monai.apps.reconstruction.networks.nets.varnet import VariationalNetworkModel
from monai.networks import eval_mode
from tests.utils import test_script_save
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
coil_sens_model = CoilSensitivityModel(spatial_dims=2, features=[8, 16, 32, 64, 128, 8])
refinement_model = ComplexUnet(spatial_dims=2, features=[8, 16, 32, 64, 128, 8])
num_cascades = 12
TESTS = []
TESTS.append([coil_sens_model, refinement_model, num_cascades, (1, 10, 300, 200, 2), (1, 300, 200)]) # batch=1
TESTS.append([coil_sens_model, refinement_model, num_cascades, (2, 10, 300, 200, 2), (2, 300, 200)]) # batch=2
class TestVarNet(unittest.TestCase):
@parameterized.expand(TESTS)
def test_shape(self, coil_sens_model, refinement_model, num_cascades, input_shape, expected_shape):
net = VariationalNetworkModel(coil_sens_model, refinement_model, num_cascades).to(device)
mask_shape = [1 for _ in input_shape]
mask_shape[-2] = input_shape[-2]
mask = torch.zeros(mask_shape)
mask[..., mask_shape[-2] // 2 - 5 : mask_shape[-2] // 2 + 5, :] = 1
with eval_mode(net):
result = net(torch.randn(input_shape).to(device), mask.byte().to(device))
self.assertEqual(result.shape, expected_shape)
@parameterized.expand(TESTS)
def test_script(self, coil_sens_model, refinement_model, num_cascades, input_shape, expected_shape):
net = VariationalNetworkModel(coil_sens_model, refinement_model, num_cascades)
mask_shape = [1 for _ in input_shape]
mask_shape[-2] = input_shape[-2]
mask = torch.zeros(mask_shape)
mask[..., mask_shape[-2] // 2 - 5 : mask_shape[-2] // 2 + 5, :] = 1
test_data = torch.randn(input_shape)
test_script_save(net, test_data, mask.byte())
if __name__ == "__main__":
unittest.main()
|
Python
| 0.000004
|
ddd3947514900d99bc644b8a791a92807bee4f2c
|
use mock
|
tests/test_worker.py
|
tests/test_worker.py
|
import functools
import unittest
from unittest import mock as um
from tornado import ioloop as ti, gen as tg
from acddl import worker
class TestWorker(unittest.TestCase):
def setUp(self):
self._worker = worker.Worker()
self._worker.start()
self.assertTrue(self._worker.is_alive())
def tearDown(self):
async_call(self._worker.stop)
self.assertFalse(self._worker.is_alive())
def testDo(self):
x = [1]
def fn():
x[0] = 2
async def fn_():
await self._worker.do(fn)
async_call(fn_)
self.assertEqual(x[0], 2)
class TestAsyncWorker(unittest.TestCase):
def setUp(self):
self._worker = worker.AsyncWorker()
self._worker.start()
self.assertTrue(self._worker.is_alive)
def tearDown(self):
async_call(self._worker.stop)
self.assertFalse(self._worker.is_alive)
def _createSyncMock(self):
return um.Mock(return_value=42)
def testDoWithSync(self):
fn = self._createSyncMock()
rv = async_call(self._worker.do, fn)
fn.assert_called_once_with()
self.assertEqual(rv, 42)
def testDoWithAsync(self):
x = [1]
async def fn():
await tg.moment
x[0] = 2
return 3
async def fn_():
return await self._worker.do(fn)
rv = async_call(fn_)
self.assertEqual(x[0], 2)
self.assertEqual(rv, 3)
def testDoLaterWithSync(self):
x = [1]
def fn():
x[0] = 2
self._worker.do_later(fn)
async_call(functools.partial(tg.sleep, 0.001))
self.assertEqual(x[0], 2)
def testDoLaterWithAsync(self):
x = [1]
async def fn():
await tg.moment
x[0] = 2
self._worker.do_later(fn)
async_call(functools.partial(tg.sleep, 0.001))
self.assertEqual(x[0], 2)
def testDoWithSyncPartial(self):
x = [1]
def fn(rv):
x[0] = 2
return rv
async def fn_():
return await self._worker.do(functools.partial(fn, 3))
rv = async_call(fn_)
self.assertEqual(x[0], 2)
self.assertEqual(rv, 3)
def testDoWithAsyncPartial(self):
x = [1]
async def fn(rv):
await tg.moment
x[0] = 2
return rv
async def fn_():
return await self._worker.do(worker.AsyncTask(fn, 3))
rv = async_call(fn_)
self.assertEqual(x[0], 2)
self.assertEqual(rv, 3)
def testDoLaterWithSyncPartial(self):
x = [1]
def fn(v):
x[0] = v
self._worker.do_later(functools.partial(fn, 2))
async_call(functools.partial(tg.sleep, 0.001))
self.assertEqual(x[0], 2)
def testDoLaterWithAsyncPartial(self):
x = [1]
async def fn(v):
await tg.moment
x[0] = v
self._worker.do_later(worker.AsyncTask(fn, 2))
async_call(functools.partial(tg.sleep, 0.001))
self.assertEqual(x[0], 2)
def async_call(fn, *args, **kwargs):
return ti.IOLoop.instance().run_sync(functools.partial(fn, *args, **kwargs))
|
import unittest
import functools
from tornado import ioloop as ti, gen as tg
from acddl import worker
class TestWorker(unittest.TestCase):
def setUp(self):
self._worker = worker.Worker()
self._worker.start()
self.assertTrue(self._worker.is_alive())
def tearDown(self):
async_call(self._worker.stop)
self.assertFalse(self._worker.is_alive())
def testDo(self):
x = [1]
def fn():
x[0] = 2
async def fn_():
await self._worker.do(fn)
async_call(fn_)
self.assertEqual(x[0], 2)
class TestAsyncWorker(unittest.TestCase):
def setUp(self):
self._worker = worker.AsyncWorker()
self._worker.start()
self.assertTrue(self._worker.is_alive)
def tearDown(self):
async_call(self._worker.stop)
self.assertFalse(self._worker.is_alive)
def testDoWithSync(self):
x = [1]
def fn():
x[0] = 2
return 3
async def fn_():
return await self._worker.do(fn)
rv = async_call(fn_)
self.assertEqual(x[0], 2)
self.assertEqual(rv, 3)
def testDoWithAsync(self):
x = [1]
async def fn():
await tg.moment
x[0] = 2
return 3
async def fn_():
return await self._worker.do(fn)
rv = async_call(fn_)
self.assertEqual(x[0], 2)
self.assertEqual(rv, 3)
def testDoLaterWithSync(self):
x = [1]
def fn():
x[0] = 2
self._worker.do_later(fn)
async_call(functools.partial(tg.sleep, 0.001))
self.assertEqual(x[0], 2)
def testDoLaterWithAsync(self):
x = [1]
async def fn():
await tg.moment
x[0] = 2
self._worker.do_later(fn)
async_call(functools.partial(tg.sleep, 0.001))
self.assertEqual(x[0], 2)
def testDoWithSyncPartial(self):
x = [1]
def fn(rv):
x[0] = 2
return rv
async def fn_():
return await self._worker.do(functools.partial(fn, 3))
rv = async_call(fn_)
self.assertEqual(x[0], 2)
self.assertEqual(rv, 3)
def testDoWithAsyncPartial(self):
x = [1]
async def fn(rv):
await tg.moment
x[0] = 2
return rv
async def fn_():
return await self._worker.do(worker.AsyncTask(fn, 3))
rv = async_call(fn_)
self.assertEqual(x[0], 2)
self.assertEqual(rv, 3)
def testDoLaterWithSyncPartial(self):
x = [1]
def fn(v):
x[0] = v
self._worker.do_later(functools.partial(fn, 2))
async_call(functools.partial(tg.sleep, 0.001))
self.assertEqual(x[0], 2)
def testDoLaterWithAsyncPartial(self):
x = [1]
async def fn(v):
await tg.moment
x[0] = v
self._worker.do_later(worker.AsyncTask(fn, 2))
async_call(functools.partial(tg.sleep, 0.001))
self.assertEqual(x[0], 2)
def async_call(fn):
return ti.IOLoop.instance().run_sync(fn)
|
Python
| 0.000016
|
3e84dcb7b449db89ca6ce2b91b34a5e8f8428b39
|
Allow sub- and superscript tags
|
core/markdown.py
|
core/markdown.py
|
from markdown.extensions import nl2br, sane_lists, fenced_code
from pymdownx import magiclink
from mdx_unimoji import UnimojiExtension
import utils.markdown
markdown_extensions = [
magiclink.MagiclinkExtension(),
nl2br.Nl2BrExtension(),
utils.markdown.ExtendedLinkExtension(),
sane_lists.SaneListExtension(),
fenced_code.FencedCodeExtension(),
utils.markdown.CuddledListExtension(),
UnimojiExtension()
]
content_allowed_tags = (
# text
'p', 'em', 'strong', 'br', 'a', 'img', 'sub', 'sup',
# citation
'blockquote', 'cite',
# headings
'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
# lists
'ol', 'ul', 'li',
# code
'pre', 'code'
)
content_allowed_attributes = {
'*': ['id', 'title'],
'a': ['href', 'title', 'data-component', 'data-grouplink-ref'],
'code': ['class'],
'img': ['src', 'alt']
}
|
from markdown.extensions import nl2br, sane_lists, fenced_code
from pymdownx import magiclink
from mdx_unimoji import UnimojiExtension
import utils.markdown
markdown_extensions = [
magiclink.MagiclinkExtension(),
nl2br.Nl2BrExtension(),
utils.markdown.ExtendedLinkExtension(),
sane_lists.SaneListExtension(),
fenced_code.FencedCodeExtension(),
utils.markdown.CuddledListExtension(),
UnimojiExtension()
]
content_allowed_tags = (
# text
'p', 'em', 'strong', 'br', 'a', 'img',
# citation
'blockquote', 'cite',
# headings
'h1', 'h2', 'h3', 'h4', 'h5', 'h6',
# lists
'ol', 'ul', 'li',
# code
'pre', 'code'
)
content_allowed_attributes = {
'*': ['id', 'title'],
'a': ['href', 'title', 'data-component', 'data-grouplink-ref'],
'code': ['class'],
'img': ['src', 'alt']
}
|
Python
| 0.000006
|
b3c55b059293d664d3e029b9c3d03203ff4af5a5
|
remove ws
|
resturo/tests/models.py
|
resturo/tests/models.py
|
from ..models import Organization as BaseOrganization
from ..models import Membership as BaseMembership
class Organization(BaseOrganization):
"""
"""
class Membership(BaseMembership):
""" Provide non-abstract implementation for Membership model,
define some roles
"""
ROLE_MEMBER = 1
|
from ..models import Organization as BaseOrganization
from ..models import Membership as BaseMembership
class Organization(BaseOrganization):
"""
"""
class Membership(BaseMembership):
""" Provide non-abstract implementation for Membership model,
define some roles
"""
ROLE_MEMBER = 1
|
Python
| 0.000054
|
32dd33126c9fa0076c8d7c9e8024a709674f8614
|
Bump Version 0.0.28 -> 0.0.29
|
threebot/__init__.py
|
threebot/__init__.py
|
# -*- encoding: utf-8 -*-
__version__ = '0.0.29'
|
# -*- encoding: utf-8 -*-
__version__ = '0.0.28'
|
Python
| 0
|
363583654998e404baba9b72860d2465bb3d339e
|
Remove convoluted meshgrid statement.
|
mplstyles/plots.py
|
mplstyles/plots.py
|
from matplotlib import cm
import matplotlib.pyplot as plt
from mplstyles import cmap as colormap
import numpy as np
import scipy.ndimage
def contour_image(x,y,Z,cmap=None,vmax=None,vmin=None,interpolation='nearest',contour_smoothing=0,contour_opts={},label_opts={},imshow_opts={},clegendlabels=[],label=False):
ax = plt.gca()
x_delta = float((x[-1]-x[0]))/(len(x)-1)/2.
y_delta = float((y[-1]-y[0]))/(len(y)-1)/2.
extent=(x[0],x[-1],y[0],y[-1])
extent_delta = (x[0]-x_delta,x[-1]+x_delta,y[0]-y_delta,y[-1]+y_delta)
ax.set_xlim(x[0],x[-1])
ax.set_ylim(y[0],y[-1])
if cmap is None:
cmap = colormap.reverse(cm.Blues)
Z = Z.transpose()
#plt.contourf(X,Y,self.pdata,interpolation=interpolation)
cs = ax.imshow(Z,interpolation=interpolation,origin='lower',aspect='auto',extent=extent_delta,cmap=cmap,vmax=vmax,vmin=vmin, **imshow_opts)
# Draw contours
if contour_smoothing != 0:
Z = scipy.ndimage.zoom(Z, contour_smoothing)
X, Y = np.meshgrid(x, y)
CS = ax.contour(X, Y, Z, extent=extent, origin='lower', **contour_opts )
# Label contours
if label:
ax.clabel(CS, **label_opts)
# Show contours in legend if desired
if len(clegendlabels) > 0:
for i in range(len(clegendlabels)):
CS.collections[i].set_label(clegendlabels[i])
#ax.legend()
return cs, CS
|
from matplotlib import cm
import matplotlib.pyplot as plt
from mplstyles import cmap as colormap
import numpy as np
import scipy.ndimage
def contour_image(x,y,Z,cmap=None,vmax=None,vmin=None,interpolation='nearest',contour_smoothing=0,contour_opts={},label_opts={},imshow_opts={},clegendlabels=[],label=False):
ax = plt.gca()
x_delta = float((x[-1]-x[0]))/(len(x)-1)/2.
y_delta = float((y[-1]-y[0]))/(len(y)-1)/2.
extent=(x[0],x[-1],y[0],y[-1])
extent_delta = (x[0]-x_delta,x[-1]+x_delta,y[0]-y_delta,y[-1]+y_delta)
ax.set_xlim(x[0],x[-1])
ax.set_ylim(y[0],y[-1])
if cmap is None:
cmap = colormap.reverse(cm.Blues)
Z = Z.transpose()
#plt.contourf(X,Y,self.pdata,interpolation=interpolation)
cs = ax.imshow(Z,interpolation=interpolation,origin='lower',aspect='auto',extent=extent_delta,cmap=cmap,vmax=vmax,vmin=vmin, **imshow_opts)
# Draw contours
if contour_smoothing != 0:
Z = scipy.ndimage.zoom(Z, contour_smoothing)
X, Y = np.meshgrid(np.linspace(x[0],x[-1],Z.shape[1]), np.linspace(y[0],y[-1],Z.shape[0]))
CS = ax.contour(X, Y, Z, extent=extent, origin='lower', **contour_opts )
# Label contours
if label:
ax.clabel(CS, **label_opts)
# Show contours in legend if desired
if len(clegendlabels) > 0:
for i in range(len(clegendlabels)):
CS.collections[i].set_label(clegendlabels[i])
#ax.legend()
return cs, CS
|
Python
| 0.000005
|
7177f7e0263d8a5f2adf458f9bfe33bff12137e0
|
fix syntax error
|
n_sided_polygon.py
|
n_sided_polygon.py
|
import turtle
import turtlehack
import random
# A function that draws an n-sided polygon
def n_sided_polygon(turtle, n, color="#FFFFFF", line_thickness=1):
'''
Draw an n-sided polygon
input: turtle, n, line_length
'''
# for n times:
# Draw a line, then turn 360/n degrees and draw another
# set initial parameters
turtle.degrees()
line_length=80
turtle.pensize(line_thickness)
turn_angle = (360/n)
i = 1
# Draw each line segment and turn
while (i <= n):
turtle.color(color)
turtle.pendown()
turtle.forward(line_length)
turtle.penup()
turtle.right(turn_angle)
i += 1
return 0
## MAIN ##
# set initial parameters
n=random.randint(3,12)
# create the Turle instance
graphic = turtle.Turtle()
# Call the polygon code
n_sided_polygon(graphic, n, turtlehack.random_color(), random.randint(4,8))
# Close and exit
ignore = raw_input("hit any key to continue:")
#graphic.done()
|
import turtle
import turtlehack
import random
# A function that draws an n-sided polygon
def n_sided_polygon(turtle, n, color="#FFFFFF", line_thickness=1):
'''
Draw an n-sided polygon
input: turtle, n, line_length
'''
# for n times:
# Draw a line, then turn 360/n degrees and draw another
# set initial parameters
turtle.degrees()
line_length=80
turtle.pensize(line_thickness)
turn_angle = (360/n)
i = 1
# Draw each line segment and turn
while (i <= n):
turtle.color(color)
turtle.pendown()
turtle.forward(line_length)
turtle.penup()
turtle.right(turn_angle)
i += 1
return 0
## MAIN ##
# set initial parameters
n=random.randint(3,12)
# create the Turle instance
graphic = turtle.Turtle()
turtlehack.n_sided_polygon(graphic, n, turtlehack.random_color(), random.randint(4,8))
ignore = input("hit any key to continue:")
graphic.done()
|
Python
| 0.000003
|
458d61ffb5161394f8080cea59716b2f9cb492f3
|
Add error message for not implemented error
|
nbgrader_config.py
|
nbgrader_config.py
|
c = get_config()
c.CourseDirectory.db_assignments = [dict(name="1", duedate="2019-12-09 17:00:00 UTC")]
c.CourseDirectory.db_students = [
dict(id="foo", first_name="foo", last_name="foo")
]
c.ClearSolutions.code_stub = {'python': '''##### Implement this part of the code #####
raise NotImplementedError("Code not implemented, follow the instructions.")'''}
|
c = get_config()
c.CourseDirectory.db_assignments = [dict(name="1", duedate="2019-12-09 17:00:00 UTC")]
c.CourseDirectory.db_students = [
dict(id="foo", first_name="foo", last_name="foo")
]
c.ClearSolutions.code_stub = {'python': '##### Implement this part of the code #####\nraise NotImplementedError()'}
|
Python
| 0.000001
|
1cae5cf5b2874eb2bafc9486d4873abfa1a58366
|
Add log_to_file method
|
toolsweb/__init__.py
|
toolsweb/__init__.py
|
# -*- coding: utf-8 -*-
import flask
import jinja2
import logging
import os.path
import oursql
def connect_to_database(database, host):
default_file = os.path.expanduser('~/replica.my.cnf')
if not os.path.isfile(default_file):
raise Exception('Database access not configured for this account!')
return oursql.connect(host=host, db=database,
read_default_file=default_file)
def connect_to_labsdb(project):
return connect_to_database(database=project + '_p',
host=project + '.labsdb')
def create_app(name, template_package=None, template_path=None,
log_file=None):
app = flask.Flask(name)
app_loader = app.jinja_loader
if template_package is not None:
app_loader = jinja2.PackageLoader(template_package)
elif template_path is not None:
app_loader = jinja2.FileSystemLoader(template_path)
app.jinja_loader = jinja2.ChoiceLoader([
app_loader,
jinja2.PackageLoader('toolsweb'),
])
return app
def log_to_file(app, log_file):
handler = logging.FileHandler(log_file)
app.logger.setLevel(logging.DEBUG)
app.logger.addHandler(handler)
|
# -*- coding: utf-8 -*-
import flask
import jinja2
import os.path
import oursql
def connect_to_database(database, host):
default_file = os.path.expanduser('~/replica.my.cnf')
if not os.path.isfile(default_file):
raise Exception('Database access not configured for this account!')
return oursql.connect(host=host, db=database,
read_default_file=default_file)
def connect_to_labsdb(project):
return connect_to_database(database=project + '_p',
host=project + '.labsdb')
def create_app(name, template_package=None, template_path=None):
app = flask.Flask(name)
app_loader = app.jinja_loader
if template_package is not None:
app_loader = jinja2.PackageLoader(template_package)
elif template_path is not None:
app_loader = jinja2.FileSystemLoader(template_path)
app.jinja_loader = jinja2.ChoiceLoader([
app_loader,
jinja2.PackageLoader('toolsweb'),
])
return app
|
Python
| 0.000008
|
cf8cc12b9a3bb4cfb550db1c75b1fa24db3c357d
|
{{{config.options}}} returns a list in some circumstances.
|
trac/tests/config.py
|
trac/tests/config.py
|
# -*- coding: iso8859-1 -*-
#
# Copyright (C) 2005 Edgewall Software
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
#
# Trac is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Trac is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
from trac.config import Configuration
import os
import tempfile
import time
import unittest
class ConfigurationTestCase(unittest.TestCase):
def setUp(self):
self.filename = os.path.join(tempfile.gettempdir(), 'trac-test.ini')
configfile = open(self.filename, 'w')
configfile.close()
def tearDown(self):
os.remove(self.filename)
def test_default(self):
config = Configuration(self.filename)
self.assertEquals('', config.get('a', 'option'))
self.assertEquals('value', config.get('a', 'option', 'value'))
config.setdefault('a', 'option', 'value')
self.assertEquals('value', config.get('a', 'option'))
def test_read_and_get(self):
configfile = open(self.filename, 'w')
configfile.writelines(['[a]\n', 'option = x\n', '\n'])
configfile.close()
config = Configuration(self.filename)
self.assertEquals('x', config.get('a', 'option'))
self.assertEquals('x', config.get('a', 'option', 'y'))
def test_set_and_save(self):
configfile = open(self.filename, 'w')
configfile.close()
config = Configuration(self.filename)
config.set('a', 'option', 'x')
self.assertEquals('x', config.get('a', 'option'))
config.save()
configfile = open(self.filename, 'r')
self.assertEquals(['[a]\n', 'option = x\n', '\n'],
configfile.readlines())
configfile.close()
def test_sections(self):
configfile = open(self.filename, 'w')
configfile.writelines(['[a]\n', 'option = x\n',
'[b]\n', 'option = y\n'])
configfile.close()
config = Configuration(self.filename)
self.assertEquals(['a', 'b'], config.sections())
def test_options(self):
configfile = open(self.filename, 'w')
configfile.writelines(['[a]\n', 'option = x\n',
'[b]\n', 'option = y\n'])
configfile.close()
config = Configuration(self.filename)
self.assertEquals(('option', 'x'), iter(config.options('a')).next())
self.assertEquals(('option', 'y'), iter(config.options('b')).next())
def test_reparse(self):
configfile = open(self.filename, 'w')
configfile.writelines(['[a]\n', 'option = x\n', '\n'])
configfile.close()
config = Configuration(self.filename)
self.assertEquals('x', config.get('a', 'option'))
time.sleep(1) # needed because of low mtime granularity
configfile = open(self.filename, 'w')
configfile.write('[a]\noption = y')
configfile.close()
config.parse_if_needed()
self.assertEquals('y', config.get('a', 'option'))
def suite():
return unittest.makeSuite(ConfigurationTestCase, 'test')
if __name__ == '__main__':
unittest.main()
|
# -*- coding: iso8859-1 -*-
#
# Copyright (C) 2005 Edgewall Software
# Copyright (C) 2005 Christopher Lenz <cmlenz@gmx.de>
#
# Trac is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Trac is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
# Author: Christopher Lenz <cmlenz@gmx.de>
from trac.config import Configuration
import os
import tempfile
import time
import unittest
class ConfigurationTestCase(unittest.TestCase):
def setUp(self):
self.filename = os.path.join(tempfile.gettempdir(), 'trac-test.ini')
configfile = open(self.filename, 'w')
configfile.close()
def tearDown(self):
os.remove(self.filename)
def test_default(self):
config = Configuration(self.filename)
self.assertEquals('', config.get('a', 'option'))
self.assertEquals('value', config.get('a', 'option', 'value'))
config.setdefault('a', 'option', 'value')
self.assertEquals('value', config.get('a', 'option'))
def test_read_and_get(self):
configfile = open(self.filename, 'w')
configfile.writelines(['[a]\n', 'option = x\n', '\n'])
configfile.close()
config = Configuration(self.filename)
self.assertEquals('x', config.get('a', 'option'))
self.assertEquals('x', config.get('a', 'option', 'y'))
def test_set_and_save(self):
configfile = open(self.filename, 'w')
configfile.close()
config = Configuration(self.filename)
config.set('a', 'option', 'x')
self.assertEquals('x', config.get('a', 'option'))
config.save()
configfile = open(self.filename, 'r')
self.assertEquals(['[a]\n', 'option = x\n', '\n'],
configfile.readlines())
configfile.close()
def test_sections(self):
configfile = open(self.filename, 'w')
configfile.writelines(['[a]\n', 'option = x\n',
'[b]\n', 'option = y\n'])
configfile.close()
config = Configuration(self.filename)
self.assertEquals(['a', 'b'], config.sections())
def test_options(self):
configfile = open(self.filename, 'w')
configfile.writelines(['[a]\n', 'option = x\n',
'[b]\n', 'option = y\n'])
configfile.close()
config = Configuration(self.filename)
self.assertEquals(('option', 'x'), config.options('a').next())
self.assertEquals(('option', 'y'), config.options('b').next())
def test_reparse(self):
configfile = open(self.filename, 'w')
configfile.writelines(['[a]\n', 'option = x\n', '\n'])
configfile.close()
config = Configuration(self.filename)
self.assertEquals('x', config.get('a', 'option'))
time.sleep(1) # needed because of low mtime granularity
configfile = open(self.filename, 'w')
configfile.write('[a]\noption = y')
configfile.close()
config.parse_if_needed()
self.assertEquals('y', config.get('a', 'option'))
def suite():
return unittest.makeSuite(ConfigurationTestCase, 'test')
if __name__ == '__main__':
unittest.main()
|
Python
| 0.999999
|
9ff4fbcdf5b21d263e8b20abb0a3d0395ce28981
|
Document the reason for accepting only `POST` requests on `/wiki_render`, and allow `GET` requests from `TRAC_ADMIN` for testing purposes.
|
trac/wiki/web_api.py
|
trac/wiki/web_api.py
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from trac.core import *
from trac.mimeview.api import Context
from trac.resource import Resource
from trac.web.api import IRequestHandler
from trac.wiki.formatter import format_to
class WikiRenderer(Component):
"""Wiki text renderer."""
implements(IRequestHandler)
# IRequestHandler methods
def match_request(self, req):
return req.path_info == '/wiki_render'
def process_request(self, req):
# Allow all POST requests (with a valid __FORM_TOKEN, ensuring that
# the client has at least some permission). Additionally, allow GET
# requests from TRAC_ADMIN for testing purposes.
if req.method != 'POST':
req.perm.require('TRAC_ADMIN')
realm = req.args.get('realm', 'wiki')
id = req.args.get('id')
version = req.args.get('version')
if version is not None:
try:
version = int(version)
except ValueError:
version = None
text = req.args.get('text', '')
flavor = req.args.get('flavor')
options = {}
if 'escape_newlines' in req.args:
options['escape_newlines'] = bool(req.args['escape_newlines'])
if 'shorten' in req.args:
options['shorten'] = bool(req.args['shorten'])
resource = Resource(realm, id=id, version=version)
context = Context.from_request(req, resource)
rendered = format_to(self.env, flavor, context, text, **options)
req.send(rendered.encode('utf-8'))
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2009 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
from trac.core import *
from trac.mimeview.api import Context
from trac.resource import Resource
from trac.web.api import IRequestHandler
from trac.wiki.formatter import format_to
class WikiRenderer(Component):
"""Wiki text renderer."""
implements(IRequestHandler)
# IRequestHandler methods
def match_request(self, req):
return req.path_info == '/wiki_render' and req.method == 'POST'
def process_request(self, req):
realm = req.args.get('realm', 'wiki')
id = req.args.get('id')
version = req.args.get('version')
if version is not None:
try:
version = int(version)
except ValueError:
version = None
text = req.args.get('text', '')
flavor = req.args.get('flavor')
options = {}
if 'escape_newlines' in req.args:
options['escape_newlines'] = bool(req.args['escape_newlines'])
if 'shorten' in req.args:
options['shorten'] = bool(req.args['shorten'])
resource = Resource(realm, id=id, version=version)
context = Context.from_request(req, resource)
rendered = format_to(self.env, flavor, context, text, **options)
req.send(rendered.encode('utf-8'))
|
Python
| 0
|
bb696f7c5b97563339f04206e649b54759fc9c6b
|
add transform for in__id to base get method
|
actions/lib/action.py
|
actions/lib/action.py
|
from st2actions.runners.pythonrunner import Action
import requests
__all__ = [
'NetboxBaseAction'
]
class NetboxBaseAction(Action):
"""Base Action for all Netbox API based actions
"""
def __init__(self, config):
super(NetboxBaseAction, self).__init__(config)
def get(self, endpoint_uri, **kwargs):
"""Make a get request to the API URI passed in
"""
self.logger.info("Calling base get with kwargs: {}".format(kwargs))
if self.config['use_https']:
url = 'https://'
else:
url = 'http://'
url = url + self.config['hostname'] + endpoint_uri
headers = {
'Authorization': 'Token ' + self.config['api_token'],
'Accept': 'application/json'
}
# transform `in__id` if present
if kwargs.get('in__id'):
kwargs['in__id'] = ','.join(kwargs['in__id'])
r = requests.get(url, verify=self.config['ssl_verify'], headers=headers, params=kwargs)
return {'raw': r.json()}
|
from st2actions.runners.pythonrunner import Action
import requests
__all__ = [
'NetboxBaseAction'
]
class NetboxBaseAction(Action):
"""Base Action for all Netbox API based actions
"""
def __init__(self, config):
super(NetboxBaseAction, self).__init__(config)
def get(self, endpoint_uri, **kwargs):
"""Make a get request to the API URI passed in
"""
self.logger.info("Calling base get with kwargs: {}".format(kwargs))
if self.config['use_https']:
url = 'https://'
else:
url = 'http://'
url = url + self.config['hostname'] + endpoint_uri
headers = {
'Authorization': 'Token ' + self.config['api_token'],
'Accept': 'application/json'
}
r = requests.get(url, verify=self.config['ssl_verify'], headers=headers, params=kwargs)
return {'raw': r.json()}
|
Python
| 0.000001
|
e1074fbc814b238a8d6d878810a8ac665a169f03
|
Fix template name in views
|
nomadblog/views.py
|
nomadblog/views.py
|
from django.views.generic import ListView, DetailView
from django.shortcuts import get_object_or_404
from django.conf import settings
from nomadblog.models import Blog, Category
from nomadblog import get_post_model
DEFAULT_STATUS = getattr(settings, 'PUBLIC_STATUS', 0)
POST_MODEL = get_post_model()
class NomadBlogMixin(object):
def dispatch(self, request, *args, **kwargs):
if self.kwargs.get('country_code'):
self.blog = get_object_or_404(Blog, countries__code__iexact=self.kwargs.get('country_code'), slug=self.kwargs.get('blog_slug'))
else:
self.blog = Blog.objects.get(slug=settings.DEFAULT_BLOG_SLUG)
return super(NomadBlogMixin, self).dispatch(request, *args, **kwargs)
def get_context_data(self, *args, **kwargs):
context = super(NomadBlogMixin, self).get_context_data(*args, **kwargs)
context['blog'] = self.blog
return context
class PostList(NomadBlogMixin, ListView):
model = POST_MODEL
template_name = 'nomadblog/post_list.html'
paginate_by = getattr(settings, 'POST_PAGINATE_BY', 25)
def get_queryset(self):
qs = super(PostList, self).get_queryset()
return qs.filter(bloguser__blog=self.blog).order_by('-pub_date')
class PostDetail(NomadBlogMixin, DetailView):
model = POST_MODEL
template_name = 'nomadblog/post_detail.html'
def get_object(self, queryset=None):
queryset = self.get_queryset().filter(bloguser__blog=self.blog)
return super(PostDetail, self).get_object(queryset)
class CategoriesList(NomadBlogMixin, ListView):
model = Category
paginate_by = getattr(settings, 'CATEGORY_PAGINATE_BY', 25)
class PostsByCategoryList(NomadBlogMixin, ListView):
model = POST_MODEL
template_name = 'nomadblog/post_list_by_category.html'
paginate_by = getattr(settings, 'POST_PAGINATE_BY', 25)
def get_queryset(self, *args, **kwargs):
qs = super(PostsByCategoryList, self).get_queryset()
self.category = get_object_or_404(Category, slug=self.kwargs.get('category_slug', ''))
return qs.filter(categories=self.category)
def get_context_data(self, *args, **kwargs):
context = super(PostsByCategoryList, self).get_context_data(*args, **kwargs)
context['category'] = self.category
return context
|
from django.views.generic import ListView, DetailView
from django.shortcuts import get_object_or_404
from django.conf import settings
from nomadblog.models import Blog, Category
from nomadblog import get_post_model
DEFAULT_STATUS = getattr(settings, 'PUBLIC_STATUS', 0)
POST_MODEL = get_post_model()
class NomadBlogMixin(object):
def dispatch(self, request, *args, **kwargs):
if self.kwargs.get('country_code'):
self.blog = get_object_or_404(Blog, countries__code__iexact=self.kwargs.get('country_code'), slug=self.kwargs.get('blog_slug'))
else:
self.blog = Blog.objects.get(slug=settings.DEFAULT_BLOG_SLUG)
return super(NomadBlogMixin, self).dispatch(request, *args, **kwargs)
def get_context_data(self, *args, **kwargs):
context = super(NomadBlogMixin, self).get_context_data(*args, **kwargs)
context['blog'] = self.blog
return context
class PostList(NomadBlogMixin, ListView):
model = POST_MODEL
paginate_by = getattr(settings, 'POST_PAGINATE_BY', 25)
def get_queryset(self):
qs = super(PostList, self).get_queryset()
return qs.filter(bloguser__blog=self.blog).order_by('-pub_date')
class PostDetail(NomadBlogMixin, DetailView):
model = POST_MODEL
def get_object(self, queryset=None):
queryset = self.get_queryset().filter(bloguser__blog=self.blog)
return super(PostDetail, self).get_object(queryset)
class CategoriesList(NomadBlogMixin, ListView):
model = Category
paginate_by = getattr(settings, 'CATEGORY_PAGINATE_BY', 25)
class PostsByCategoryList(NomadBlogMixin, ListView):
model = POST_MODEL
template_name = 'nomadblog/post_list_by_category.html'
paginate_by = getattr(settings, 'POST_PAGINATE_BY', 25)
def get_queryset(self, *args, **kwargs):
qs = super(PostsByCategoryList, self).get_queryset()
self.category = get_object_or_404(Category, slug=self.kwargs.get('category_slug', ''))
return qs.filter(categories=self.category)
def get_context_data(self, *args, **kwargs):
context = super(PostsByCategoryList, self).get_context_data(*args, **kwargs)
context['category'] = self.category
return context
|
Python
| 0
|
44161337282d14a48bde278b6e1669e8b3c94e4e
|
Bump version to 0.1.7
|
notify/__init__.py
|
notify/__init__.py
|
__version__ = "0.1.7"
|
__version__ = "0.1.6"
|
Python
| 0.000001
|
72a827b8cca6dc100e7f0d2d92e0c69aa67ec956
|
change name and docstring
|
apps/auth/iufOAuth.py
|
apps/auth/iufOAuth.py
|
from social.backends.oauth import BaseOAuth2
# see http://psa.matiasaguirre.net/docs/backends/implementation.html
class IUFOAuth2(BaseOAuth2):
"""IUF OAuth authentication backend"""
name = 'iuf'
AUTHORIZATION_URL = 'https://iufinc.org/login/oauth/authorize'
ACCESS_TOKEN_URL = 'https://iufinc.org/login/oauth/access_token'
SCOPE_SEPARATOR = ','
EXTRA_DATA = [
('id', 'id'),
('expires', 'expires')
]
def get_user_details(self, response):
"""Returns user details from IUF account"""
return {'username': response.get('user'),
'email': response.get('email') or '',
'first_name': response.get('first_name')}
|
from social.backends.oauth import BaseOAuth2
# see http://psa.matiasaguirre.net/docs/backends/implementation.html
class IUFOAuth2(BaseOAuth2):
"""Github OAuth authentication backend"""
name = 'github'
AUTHORIZATION_URL = 'https://iufinc.org/login/oauth/authorize'
ACCESS_TOKEN_URL = 'https://iufinc.org/login/oauth/access_token'
SCOPE_SEPARATOR = ','
EXTRA_DATA = [
('id', 'id'),
('expires', 'expires')
]
def get_user_details(self, response):
"""Returns user details from IUF account"""
return {'username': response.get('user'),
'email': response.get('email') or '',
'first_name': response.get('first_name')}
|
Python
| 0.000002
|
140f96ab4cddebd465ad2fdcca4560c683ca5770
|
add django-markdown url for tutorials app
|
oeplatform/urls.py
|
oeplatform/urls.py
|
"""oeplatform URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from oeplatform import settings
# This is used for Markdown forms in the tutorials app
from markdownx import urls as markdownx
handler500 = "base.views.handler500"
handler404 = "base.views.handler404"
urlpatterns = [
# This is used for Markdown forms in the tutorials app
url(r'^markdownx/', include(markdownx)),
url(r"^api/", include("api.urls")),
url(r"^", include("base.urls")),
url(r"^user/", include("login.urls")),
url(r"^factsheets/", include("modelview.urls")),
url(r"^dataedit/", include("dataedit.urls")),
url(r"^literature/", include("literature.urls")),
url(r"^ontology/", include("ontology.urls")),
url(r"^captcha/", include("captcha.urls")),
url(r"^tutorials/", include("tutorials.urls")),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
"""oeplatform URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from oeplatform import settings
handler500 = "base.views.handler500"
handler404 = "base.views.handler404"
urlpatterns = [
url(r"^api/", include("api.urls")),
url(r"^", include("base.urls")),
url(r"^user/", include("login.urls")),
url(r"^factsheets/", include("modelview.urls")),
url(r"^dataedit/", include("dataedit.urls")),
url(r"^literature/", include("literature.urls")),
url(r"^ontology/", include("ontology.urls")),
url(r"^captcha/", include("captcha.urls")),
url(r"^tutorials/", include("tutorials.urls")),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
Python
| 0
|
7c77a7b14432a85447ff74e7aa017ca56c86e662
|
Make api-tokens view exempt from CSRF checks
|
oidc_apis/views.py
|
oidc_apis/views.py
|
from django.http import JsonResponse
from django.views.decorators.http import require_http_methods
from oidc_provider.lib.utils.oauth2 import protected_resource_view
from django.views.decorators.csrf import csrf_exempt
from .api_tokens import get_api_tokens_by_access_token
@csrf_exempt
@require_http_methods(['GET', 'POST'])
@protected_resource_view(['openid'])
def get_api_tokens_view(request, token, *args, **kwargs):
"""
Get the authorized API Tokens.
:type token: oidc_provider.models.Token
:rtype: JsonResponse
"""
api_tokens = get_api_tokens_by_access_token(token, request=request)
response = JsonResponse(api_tokens, status=200)
response['Access-Control-Allow-Origin'] = '*'
response['Cache-Control'] = 'no-store'
response['Pragma'] = 'no-cache'
return response
|
from django.http import JsonResponse
from django.views.decorators.http import require_http_methods
from oidc_provider.lib.utils.oauth2 import protected_resource_view
from .api_tokens import get_api_tokens_by_access_token
@require_http_methods(['GET', 'POST'])
@protected_resource_view(['openid'])
def get_api_tokens_view(request, token, *args, **kwargs):
"""
Get the authorized API Tokens.
:type token: oidc_provider.models.Token
:rtype: JsonResponse
"""
api_tokens = get_api_tokens_by_access_token(token, request=request)
response = JsonResponse(api_tokens, status=200)
response['Access-Control-Allow-Origin'] = '*'
response['Cache-Control'] = 'no-store'
response['Pragma'] = 'no-cache'
return response
|
Python
| 0.000001
|
0417707ab0dca78f0daa8aa3b9003913ba90bbac
|
Add length and highway attribute to the edges
|
osmABTS/network.py
|
osmABTS/network.py
|
"""
Road network formation
======================
The primary purpose of this model is to abstract a road connectivity network
from the complicated OSM raw GIS data. The network is going to be stored as a
NetworkX graph.
The nodes are going to be just the traffic junctions and the dead ends of the
road traffic system. And each node has the original id in the raw OSM data as
their identity, and the coordinate stored in the attribute ``coord``.
Each edge is going to be an undirected edge connecting the nodes. They all have
got the attribute of ``name`` for the name of the road, and the attribute of
``travel_time`` for the time needed to traverse the edge by a common traveller.
Also there is an attribute ``length`` for the length of the actual road and
attribute ``highway`` for the type of the road.
"""
import networkx as nx
from geopy.distance import vincenty
#
# Constants controlling the bahaviour of the code
# -----------------------------------------------
#
# if the ``highway`` key contains the follow value for a node in raw OSM, then
# it is considered a node in the network.
_NODES_TAGS = [
'traffic_signals',
'crossing',
'turning_circle',
'motorway_junction',
]
# The speed to travel on each kind of highways
# In miles per hour
_HIGHWAY_SPEEDS = {
'residential': 20.0,
'primary': 40.0,
'primary_link': 40.0,
'secondary': 35.0,
'tertiary': 30.0,
'footway': 35.0,
'service': 35.0,
'motorway': 70.0,
}
#
# Utility functions
# -----------------
#
def _test_if_node(node):
"""Tests if a node in the raw OSM data a node in the network"""
tags = node.tags
return 'highway' in tags and tags['highway'] in _NODES_TAGS
def _calc_distance(coord1, coord2):
"""Calculates the distance between two points
A shallow wrapper of the geopy Vicinty distance calculator, returns the
distance in miles.
"""
return vincenty(coord1, coord2).miles
#
# The driver function
# -------------------
#
def form_network_from_osm(raw_osm):
"""Forms a road network from the raw OSM data
:param raw_osm: A :py:class:`readosm.RawOSM` instance for the raw data
:returns: A networkX graph for the road connectivity
"""
net = nx.Graph()
# nodes formation
nodes = raw_osm.nodes
for node_id, node in nodes.iteritems():
if _test_if_node(node):
net.add_node(node_id)
net.node[node_id]['coord'] = node.coord
continue
# edge formation
for way in raw_osm.ways.itervalues():
# test if it is actually a road
tags = way.tags
if 'highway' in tags:
highway = tags['highway']
else:
continue # building or something like that
# connect the nodes in the network
prev_node_id = None # The previous node in the network
# The coordinate of the previous raw node in the OSM data
prev_coord = way.nodes[0].coord
distance = 0.0
for node_id in way.nodes:
node = nodes[node_id]
# Update the distance
curr_coord = node.coord
distance += _calc_distance(curr_coord, prev_coord)
prev_coord = curr_coord
if _test_if_node(node):
# add edge if there is a previous node
if prev_node_id is not None:
# Add the new edge
try:
travel_time = distance / _HIGHWAY_SPEEDS[highway]
except IndexError:
raise IndexError(
'Unknown highway type %s' % highway
)
net.add_edge(
node_id, prev_node_id,
travel_time=travel_time, length=distance,
highway=highway, name=tags.get('name', '')
)
# Update previous node no matter there is a previous one
prev_node_id = node_id
distance = 0.0
return net
|
"""
Road network formation
======================
The primary purpose of this model is to abstract a road connectivity network
from the complicated OSM raw GIS data. The network is going to be stored as a
NetworkX graph.
The nodes are going to be just the traffic junctions and the dead ends of the
road traffic system. And each node has the original id in the raw OSM data as
their identity, and the coordinate stored in the attribute ``coord``.
Each edge is going to be an undirected edge connecting the nodes. They all have
got the attribute of ``name`` for the name of the road, and the attribute of
``travel_time`` for the time needed to traverse the edge by a common traveller.
"""
import networkx as nx
from geopy.distance import vincenty
#
# Constants controlling the bahaviour of the code
# -----------------------------------------------
#
# if the ``highway`` key contains the follow value for a node in raw OSM, then
# it is considered a node in the network.
_NODES_TAGS = [
'traffic_signals',
'crossing',
'turning_circle',
'motorway_junction',
]
# The speed to travel on each kind of highways
# In miles per hour
_HIGHWAY_SPEEDS = {
'residential': 20.0,
'primary': 40.0,
'primary_link': 40.0,
'secondary': 35.0,
'tertiary': 30.0,
'footway': 35.0,
'service': 35.0,
'motorway': 70.0,
}
#
# Utility functions
# -----------------
#
def _test_if_node(node):
"""Tests if a node in the raw OSM data a node in the network"""
tags = node.tags
return 'highway' in tags and tags['highway'] in _NODES_TAGS
def _calc_distance(coord1, coord2):
"""Calculates the distance between two points
A shallow wrapper of the geopy Vicinty distance calculator, returns the
distance in miles.
"""
return vincenty(coord1, coord2).miles
#
# The driver function
# -------------------
#
def form_network_from_osm(raw_osm):
"""Forms a road network from the raw OSM data
:param raw_osm: A :py:class:`readosm.RawOSM` instance for the raw data
:returns: A networkX graph for the road connectivity
"""
net = nx.Graph()
# nodes formation
nodes = raw_osm.nodes
for node_id, node in nodes.iteritems():
if _test_if_node(node):
net.add_node(node_id)
net.node[node_id]['coord'] = node.coord
continue
# edge formation
for way in raw_osm.ways.itervalues():
# test if it is actually a road
tags = way.tags
if 'highway' in tags:
highway = tags['highway']
else:
continue # building or something like that
# connect the nodes in the network
prev_node_id = None # The previous node in the network
# The coordinate of the previous raw node in the OSM data
prev_coord = way.nodes[0].coord
distance = 0.0
for node_id in way.nodes:
node = nodes[node_id]
# Update the distance
curr_coord = node.coord
distance += _calc_distance(curr_coord, prev_coord)
prev_coord = curr_coord
if _test_if_node(node):
# add edge if there is a previous node
if prev_node_id is not None:
# Add the new edge
try:
travel_time = distance / _HIGHWAY_SPEEDS[highway]
except IndexError:
raise IndexError(
'Unknown highway type %s' % highway
)
net.add_edge(
node_id, prev_node_id,
travel_time=travel_time,
name=tags.get('name', '')
)
# Update previous node no matter there is a previous one
prev_node_id = node_id
distance = 0.0
return net
|
Python
| 0
|
7e9dd7469f88d676959141534809b0bc10fc9a66
|
Print newline on de-initialization.
|
picotui/context.py
|
picotui/context.py
|
from .screen import Screen
class Context:
def __init__(self, cls=True, mouse=True):
self.cls = cls
self.mouse = mouse
def __enter__(self):
Screen.init_tty()
if self.mouse:
Screen.enable_mouse()
if self.cls:
Screen.cls()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.mouse:
Screen.disable_mouse()
Screen.goto(0, 50)
Screen.cursor(True)
Screen.deinit_tty()
# This makes sure that entire screenful is scrolled up, and
# any further output happens on a normal terminal line.
print()
|
from .screen import Screen
class Context:
def __init__(self, cls=True, mouse=True):
self.cls = cls
self.mouse = mouse
def __enter__(self):
Screen.init_tty()
if self.mouse:
Screen.enable_mouse()
if self.cls:
Screen.cls()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.mouse:
Screen.disable_mouse()
Screen.goto(0, 50)
Screen.cursor(True)
Screen.deinit_tty()
|
Python
| 0
|
1a8ab29c9f7a02730cababc077f196f9b21e26d4
|
Use own repo slug by default for Bitbucket.deploy_key.all() .
|
bitbucket/deploy_key.py
|
bitbucket/deploy_key.py
|
# -*- coding: utf-8 -*-
URLS = {
# deploy keys
'GET_DEPLOY_KEYS': 'repositories/%(username)s/%(repo_slug)s/deploy-keys',
'SET_DEPLOY_KEY': 'repositories/%(username)s/%(repo_slug)s/deploy-keys',
'GET_DEPLOY_KEY': 'repositories/%(username)s/%(repo_slug)s/deploy-key/%(key_id)s',
'DELETE_DEPLOY_KEY': 'repositories/%(username)s/%(repo_slug)s/deploy-key/%(key_id)s',
}
class DeployKey(object):
""" This class provide services-related methods to Bitbucket objects."""
def __init__(self, bitbucket):
self.bitbucket = bitbucket
self.bitbucket.URLS.update(URLS)
def all(self, repo_slug=None):
""" Get all ssh keys associated with a repo
"""
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('GET_DEPLOY_KEYS',
username=self.bitbucket.username,
repo_slug=repo_slug)
return self.bitbucket.dispatch('GET', url, auth=self.bitbucket.auth)
def get(self, repo_slug=None, key_id=None):
""" Get one of the ssh keys associated with this repo
"""
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('GET_DEPLOY_KEY',
key_id=key_id,
username=self.bitbucket.username,
repo_slug=repo_slug)
return self.bitbucket.dispatch('GET', url, auth=self.bitbucket.auth)
def create(self, repo_slug=None, key=None, label=None):
""" Associate an ssh key with your repo and return it.
"""
key = '%s' % key
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('SET_DEPLOY_KEY',
username=self.bitbucket.username,
repo_slug=repo_slug)
return self.bitbucket.dispatch('POST',
url,
auth=self.bitbucket.auth,
key=key,
label=label)
def delete(self, repo_slug=None, key_id=None):
""" Delete one of the ssh keys associated with your repo.
Please use with caution as there is NO confimation and NO undo.
"""
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('DELETE_DEPLOY_KEY',
key_id=key_id,
username=self.bitbucket.username,
repo_slug=repo_slug)
return self.bitbucket.dispatch('DELETE', url, auth=self.bitbucket.auth)
|
# -*- coding: utf-8 -*-
URLS = {
# deploy keys
'GET_DEPLOY_KEYS': 'repositories/%(username)s/%(repo_slug)s/deploy-keys',
'SET_DEPLOY_KEY': 'repositories/%(username)s/%(repo_slug)s/deploy-keys',
'GET_DEPLOY_KEY': 'repositories/%(username)s/%(repo_slug)s/deploy-key/%(key_id)s',
'DELETE_DEPLOY_KEY': 'repositories/%(username)s/%(repo_slug)s/deploy-key/%(key_id)s',
}
class DeployKey(object):
""" This class provide services-related methods to Bitbucket objects."""
def __init__(self, bitbucket):
self.bitbucket = bitbucket
self.bitbucket.URLS.update(URLS)
def all(self, repo_slug=None):
""" Get all ssh keys associated with a repo
"""
url = self.bitbucket.url('GET_DEPLOY_KEYS',
username=self.bitbucket.username,
repo_slug=repo_slug)
return self.bitbucket.dispatch('GET', url, auth=self.bitbucket.auth)
def get(self, repo_slug=None, key_id=None):
""" Get one of the ssh keys associated with this repo
"""
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('GET_DEPLOY_KEY',
key_id=key_id,
username=self.bitbucket.username,
repo_slug=repo_slug)
return self.bitbucket.dispatch('GET', url, auth=self.bitbucket.auth)
def create(self, repo_slug=None, key=None, label=None):
""" Associate an ssh key with your repo and return it.
"""
key = '%s' % key
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('SET_DEPLOY_KEY',
username=self.bitbucket.username,
repo_slug=repo_slug)
return self.bitbucket.dispatch('POST',
url,
auth=self.bitbucket.auth,
key=key,
label=label)
def delete(self, repo_slug=None, key_id=None):
""" Delete one of the ssh keys associated with your repo.
Please use with caution as there is NO confimation and NO undo.
"""
repo_slug = repo_slug or self.bitbucket.repo_slug or ''
url = self.bitbucket.url('DELETE_DEPLOY_KEY',
key_id=key_id,
username=self.bitbucket.username,
repo_slug=repo_slug)
return self.bitbucket.dispatch('DELETE', url, auth=self.bitbucket.auth)
|
Python
| 0
|
6650e5898ca058d1dc8494dbc3d0ba2e2d8c1e4c
|
Compute the distance between two points on the globe and determine if air travel is possible between them in the time between when localities were recorded
|
alerts/geomodel/alert.py
|
alerts/geomodel/alert.py
|
from datetime import datetime
import math
from operator import attrgetter
from typing import List, NamedTuple, Optional
import netaddr
from alerts.geomodel.config import Whitelist
from alerts.geomodel.locality import State, Locality
_AIR_TRAVEL_SPEED = 1000.0 # km/h
_EARTH_RADIUS = 6373.0 # km # approximate
_DEFAULT_SUMMARY = 'Authenticated action taken by a user outside of any of '\
'their known localities.'
# TODO: Switch to dataclasses when we move to Python3.7+
class Origin(NamedTuple):
'''A description of a location.
'''
city: str
country: str
latitude: float
longitude: float
geopoint: str
class Alert(NamedTuple):
'''A container for the data the alerts output by GeoModel contain.
'''
source: str
category: str
type_: str
username: str
sourceipaddress: str
timestamp: datetime
origin: Origin
tags: List[str]
summary: str
def new(
username: str,
sourceip: str,
origin: Origin,
summary: str = _DEFAULT_SUMMARY
) -> 'Alert':
'''Produce a new `Alert` with default values filled.
'''
return Alert(
source='geomodel',
category='geomodel',
type_='geomodel',
username=username,
sourceipaddress=sourceip,
timestamp=datetime.now(),
origin=origin,
tags=['geomodel'],
summary=summary)
def _travel_possible(loc1: Locality, loc2: Locality) -> bool:
'''Given two localities, determine whether it would be possible for a user
to have travelled from the former to the latter in the time between when the
actions took place.
'''
lat1 = math.radians(loc1.latitude)
lat2 = math.radians(loc2.latitude)
lon1 = math.radians(loc1.longitude)
lon2 = math.radians(loc2.longitude)
dlat = lat2 - lat1
dlon = lon2 - lon1
a = math.sin(dlat / 2.0) ** 2 +\
math.cos(lat1) * math.cos(lat2) * math.sin(dlon / 2.0) ** 2
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
distance = c * _EARTH_RADIUS
seconds_between = (loc2.lastaction - loc1.lastaction).total_seconds()
hours_between = math.ceil(seconds_between / 60.0 / 60.0)
# We pad the time with an hour to account for things like planes being
# slowed, network delays, etc.
return (distance / _AIR_TRAVEL_SPEED) <= (hours_between - 1)
def alert(user_state: State, whitelist: Whitelist) -> Optional[Alert]:
'''Determine whether an alert should fired given a particular user's
locality state. If an alert should fire, an `Alert` is returned, otherwise
this function returns `None`.
'''
ignore_cidrs = [netaddr.IPSet([cidr]) for cidr in whitelist.cidrs]
if user_state.username in whitelist.users:
return None
locs_to_consider = []
for loc in sorted(user_state.localities, key=attrgetter('lastaction')):
ip = netaddr.IPAddress(loc.sourceipaddress)
if all([ip not in cidr for cidr in ignore_cidrs]):
locs_to_consider.append(loc)
if len(locs_to_consider) < 2:
return None
locations = locs_to_consider[-2:]
if _travel_possible(*locations):
return None
(ip, city, country, lat, lon) = (
locations[1].sourceipaddress,
locations[1].city,
locations[1].country,
locations[1].latitude,
locations[1].longitude
)
return Alert.new(
user_state.username,
ip,
Origin(city, country, lat, lon, ''))
|
from datetime import datetime
from operator import attrgetter
from typing import List, NamedTuple, Optional
import netaddr
from alerts.geomodel.config import Whitelist
from alerts.geomodel.locality import State, Locality
_DEFAULT_SUMMARY = 'Authenticated action taken by a user outside of any of '\
'their known localities.'
# TODO: Switch to dataclasses when we move to Python3.7+
class Origin(NamedTuple):
'''A description of a location.
'''
city: str
country: str
latitude: float
longitude: float
geopoint: str
class Alert(NamedTuple):
'''A container for the data the alerts output by GeoModel contain.
'''
source: str
category: str
type_: str
username: str
sourceipaddress: str
timestamp: datetime
origin: Origin
tags: List[str]
summary: str
def new(
username: str,
sourceip: str,
origin: Origin,
summary: str = _DEFAULT_SUMMARY
) -> 'Alert':
'''Produce a new `Alert` with default values filled.
'''
return Alert(
source='geomodel',
category='geomodel',
type_='geomodel',
username=username,
sourceipaddress=sourceip,
timestamp=datetime.now(),
origin=origin,
tags=['geomodel'],
summary=summary)
def _travel_possible(loc1: Locality, loc2: Locality) -> bool:
'''Given two localities, determine whether it would be possible for a user
to have travelled from the former to the latter in the time between when the
actions took place.
'''
return False
def alert(user_state: State, whitelist: Whitelist) -> Optional[Alert]:
'''Determine whether an alert should fired given a particular user's
locality state. If an alert should fire, an `Alert` is returned, otherwise
this function returns `None`.
'''
ignore_cidrs = [netaddr.IPSet([cidr]) for cidr in whitelist.cidrs]
if user_state.username in whitelist.users:
return None
locs_to_consider = []
for loc in sorted(user_state.localities, key=attrgetter('lastaction')):
ip = netaddr.IPAddress(loc.sourceipaddress)
if all([ip not in cidr for cidr in ignore_cidrs]):
locs_to_consider.append(loc)
if len(locs_to_consider) < 2:
return None
locations = locs_to_consider[-2:]
if _travel_possible(*locations):
return None
(ip, city, country, lat, lon) = (
locations[1].sourceipaddress,
locations[1].city,
locations[1].country,
locations[1].latitude,
locations[1].longitude
)
return Alert.new(
user_state.username,
ip,
Origin(city, country, lat, lon, ''))
|
Python
| 0.998967
|
451c821118eff98d7e92b3a3f46b1a76048abbb5
|
add wiki canned response
|
androiddev_bot/config.py
|
androiddev_bot/config.py
|
import praw
# Put your vars here
suspect_title_strings = ['?', 'help', 'stuck', 'why', 'my', 'feedback']
subreddit = 'androiddev'
# Canned responses
cans = {
'questions_thread': "Removed because, per sub rules, this doesn't merit its own post. We have a questions thread every day, please use it for questions like this.",
'rules': 'Removed because posts like this are against the sub rules.',
'wiki': "Removed because relevant information can be found in the /r/androiddev [wiki](https://www.reddit.com/r/androiddev/wiki/index)"
}
# Specify the keyword and what days they should be removed
weekly_threads = {
'anything': {
'day': 'Saturday',
'name': 'Weekly \"anything goes\"'
},
'hiring': {
'day': 'Monday',
'name': 'Weekly \"who\'s hiring?\"'
}
}
flair_mapping = {
'Library': 'library',
'Discussion': 'discussion',
'News': 'news',
'Tech Talk': 'talk',
}
def post_is_suspicious(post_to_check: praw.objects.Submission) -> bool:
"""
A function that can be passed a submission to check against and return whether or not it's "suspicious" or otherwise
deserving of closer attention.
:type post_to_check: praw.objects.Submission
:rtype : bool
:param post_to_check: The Submission instance to check
:return: True if suspicious, False if now
"""
return \
any(word in post_to_check.title.lower() for word in suspect_title_strings) \
or post_to_check.domain == 'stackoverflow.com' \
or (post_to_check.selftext and 'stackoverflow' in post_to_check.selftext.lower()) \
or (post_to_check.selftext_html and any(block in post_to_check.selftext_html for block in ['<code', '%3Ccode']))
|
import praw
# Put your vars here
suspect_title_strings = ['?', 'help', 'stuck', 'why', 'my', 'feedback']
subreddit = 'androiddev'
# Canned responses
cans = {
'questions_thread': "Removed because, per sub rules, this doesn't merit its own post. We have a questions thread every day, please use it for questions like this.",
'rules': 'Removed because posts like this are against the sub rules.'
}
# Specify the keyword and what days they should be removed
weekly_threads = {
'anything': {
'day': 'Saturday',
'name': 'Weekly \"anything goes\"'
},
'hiring': {
'day': 'Monday',
'name': 'Weekly \"who\'s hiring?\"'
}
}
flair_mapping = {
'Library': 'library',
'Discussion': 'discussion',
'News': 'news',
'Tech Talk': 'talk',
}
def post_is_suspicious(post_to_check: praw.objects.Submission) -> bool:
"""
A function that can be passed a submission to check against and return whether or not it's "suspicious" or otherwise
deserving of closer attention.
:type post_to_check: praw.objects.Submission
:rtype : bool
:param post_to_check: The Submission instance to check
:return: True if suspicious, False if now
"""
return \
any(word in post_to_check.title.lower() for word in suspect_title_strings) \
or post_to_check.domain == 'stackoverflow.com' \
or (post_to_check.selftext and 'stackoverflow' in post_to_check.selftext.lower()) \
or (post_to_check.selftext_html and any(block in post_to_check.selftext_html for block in ['<code', '%3Ccode']))
|
Python
| 0
|
e5dcea13a27b90f89469518386a1748f3e141b5b
|
Improve docs of doQuery.py file.
|
app/lib/query/doQuery.py
|
app/lib/query/doQuery.py
|
# -*- coding: utf-8 -*-
"""
Receive SQL query in stdin, send to configured database file, then return
the query result rows.
Note that db queries don't have to done through python like this,
but can be done in SQL directly. For example:
$ sqlite3 path/to/db -csv -header < path/to/query > path/to/report
Usage:
## methods of input:
# Pipe text to the script.
$ echo "SELECT * FROM Trend LIMIT 10" | python -m lib.query.doQuery
# Redirect text from .sql file to the script.
$ python -m lib.query.doQuery --csv < lib/query/sql/abc.sql \
> var/reporting/abc.csv
# Enter an ad hoc query in lines of stdin.
$ python -m lib.query.doQuery <enter>
SELECT *
FROM Trend LIMIT 10;
<ctrl+D>
## methods to ouput:
# Print to console
$ python -m lib.query.doQuery < abc.sql
# Write to CSV
$ python -m lib.query.doQuery --csv < abc.sql > abc.csv
TODO
Test printing with u'\xed' character
"""
import sys
from lib import database as db
def CSVFormat(cell):
"""
Remove double-quotes from a string and if there is a comma then returns
value enclosed in double-quotes (ideal for outputting to CSV).
Null values are returned as an empty string.
TODO: If the data required in more than just a trending topic
(e.g. user tweets) then it may be better to use the CSV module instead.
@param cell: any python object representing a cell value from a table row.
@return: stringified version of the input cell value, with CSV
formatting applied.
"""
if cell is None:
return ''
else:
phrase = str(cell)
# Remove double-quotes.
phrase = phrase.replace('"', "'")
# Add quotes if there is a comma.
phrase = '"{}"'.format(phrase) if ',' in phrase else phrase
return phrase
def main(args, query=None):
"""
Receive a SQL query as a string and execute then print results to stdout.
"""
if set(args) & set(('-h', '--help')):
print 'Usage: python -m lib.query.sql.doQuery [-c|--csv]'\
' [-s|--summary] [-h|--help]'
print ' A query is required in stdin.'
print 'Options and arguments:'
print '--help : show help.'
print '--csv : default behaviour is print rows as tuples. The CSV'
print ' flags makes results return in a format ideal for'
print ' writing out to a CSV file. i.e. comma separate'
print ' values without tuple brackets and quoting any'
print ' strings containing a comma. Headers are still'
print ' excluded.'
print '--summary : print only count of rows returned.'
else:
if not query:
query = sys.stdin.read()
if not query:
raise ValueError('Database query is required as stdin.')
results = db.conn.queryAll(query)
if set(args) & set(('-s', '--summary')):
print len(results)
elif set(args) & set(('-c', '--csv')):
for row in results:
# Any unicode characters will be lost (replaced with
# question marks) by converting to str.
rowStr = (CSVFormat(c) for c in row)
print ','.join(rowStr)
else:
for row in results:
print row
if __name__ == '__main__':
main(sys.argv[1:])
|
# -*- coding: utf-8 -*-
"""
Receive SQL query in stdin, send to configured database file, then return
the query result rows.
Usage:
## methods of input:
# Pipe text to the script.
$ echo "SELECT * FROM Trend LIMIT 10" | python -m lib.query.doQuery
# Redirect text from .sql file to the script.
$ python -m lib.query.doQuery --csv < lib/query/sql/abc.sql \
> var/reporting/abc.csv
# Enter an ad hoc query in lines of stdin.
$ python -m lib.query.doQuery <enter>
SELECT *
FROM Trend LIMIT 10;
<ctrl+D>
## methods to ouput:
# Print to console
$ $ python -m lib.query.doQuery < abc.sql
# Write to CSV
$ python -m lib.query.doQuery --csv < abc.sql > abc.csv
TODO
Test printing with u'\xed' character
"""
import sys
from lib import database as db
def CSVFormat(cell):
"""
Remove double-quotes from a string and if there is a comma then returns
value enclosed in double-quotes (ideal for outputting to CSV).
Null values are returned as an empty string.
TODO: If the data required in more than just a trending topic
(e.g. user tweets) then it may be better to use the CSV module instead.
@param cell: any python object representing a cell value from a table row.
@return: stringified version of the input cell value, with CSV
formatting applied.
"""
if cell is None:
return ''
else:
phrase = str(cell)
# Remove double-quotes.
phrase = phrase.replace('"', "'")
# Add quotes if there is a comma.
phrase = '"{}"'.format(phrase) if ',' in phrase else phrase
return phrase
def main(args, query=None):
"""
Receive a SQL query as a string and execute then print results to stdout.
"""
if set(args) & set(('-h', '--help')):
print 'Usage: python -m lib.query.sql.doQuery [-c|--csv]'\
' [-s|--summary] [-h|--help]'
print ' A query is required in stdin.'
print 'Options and arguments:'
print '--help : show help.'
print '--csv : default behaviour is print rows as tuples. The CSV'
print ' flags makes results return in a format ideal for'
print ' writing out to a CSV file. i.e. comma separate'
print ' values without tuple brackets and quoting any'
print ' strings containing a comma. Headers are still'
print ' excluded.'
print '--summary : print only count of rows returned.'
else:
if not query:
query = sys.stdin.read()
if not query:
raise ValueError('Database query is required as stdin.')
results = db.conn.queryAll(query)
if set(args) & set(('-s', '--summary')):
print len(results)
elif set(args) & set(('-c', '--csv')):
for row in results:
# Any unicode characters will be lost (replaced with
# question marks) by converting to str.
rowStr = (CSVFormat(c) for c in row)
print ','.join(rowStr)
else:
for row in results:
print row
if __name__ == '__main__':
main(sys.argv[1:])
|
Python
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.