commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
3661ca3947763656165f8fc68ea42358ad37285a | Add stub for qiprofile update test. | test/unit/helpers/test_qiprofile.py | test/unit/helpers/test_qiprofile.py | import os
import glob
import shutil
from nose.tools import (assert_equal, assert_is_not_none)
import qixnat
from ... import (project, ROOT)
from ...helpers.logging import logger
from qipipe.helpers import qiprofile
COLLECTION = 'Sarcoma'
"""The test collection."""
SUBJECT = 'Sarcoma001'
"""The test subjects."""
SESSION = 'Session01'
"""The test session."""
class TestQIProfile(object):
"""qiprofile update tests."""
def setUp(self):
self._clean()
self._seed()
def tearDown(self):
self._clean()
def test_sync_session(self):
logger(__name__).debug("Testing qiprofile sync on %s %s..." %
(SUBJECT, SESSION))
def _clean(self):
"""Deletes the test XNAT session."""
with qixnat.connect() as xnat:
# Delete the test subject, if it exists.
xnat.delete_subjects(project, subject)
def _seed(self):
"""Populates the test XNAT session."""
with qixnat.connect() as xnat:
# Delete the test subject, if it exists.
xnat.delete_subjects(project(), subject)
if __name__ == "__main__":
import nose
nose.main(defaultTest=__name__)
| Python | 0 | |
1c7b9c1ed1f4d6a8ee201ba109db95449181fee1 | Make operation tests timezone-aware | pylxd/tests/test_operation.py | pylxd/tests/test_operation.py | # Copyright (c) 2015 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from ddt import ddt
import mock
import unittest
from pylxd import api
from pylxd import connection
from pylxd.tests import annotated_data
from pylxd.tests import fake_api
@ddt
class LXDUnitTestOperation(unittest.TestCase):
def setUp(self):
super(LXDUnitTestOperation, self).setUp()
self.lxd = api.API()
def test_list_operations(self):
with mock.patch.object(connection.LXDConnection, 'get_object') as ms:
ms.return_value = ('200', fake_api.fake_operation_list())
self.assertEqual(
['1234'],
self.lxd.list_operations())
ms.assert_called_with('GET',
'/1.0/operations')
def test_operation_info(self):
with mock.patch.object(connection.LXDConnection, 'get_object') as ms:
ms.return_value = ('200', fake_api.fake_operation())
self.assertEqual({
'operation_create_time':
datetime.datetime.utcfromtimestamp(1433876844)
.strftime('%Y-%m-%d %H:%M:%S'),
'operation_update_time':
datetime.datetime.utcfromtimestamp(1433876843)
.strftime('%Y-%m-%d %H:%M:%S'),
'operation_status_code':
'Running'
}, self.lxd.operation_info('1234'))
ms.assert_called_with('GET',
'/1.0/operations/1234')
@annotated_data(
('create_time',
datetime.datetime.utcfromtimestamp(1433876844)
.strftime('%Y-%m-%d %H:%M:%S')),
('update_time',
datetime.datetime.utcfromtimestamp(1433876843)
.strftime('%Y-%m-%d %H:%M:%S')),
('status', 'Running'),
)
def test_operation_show(self, method, expected):
with mock.patch.object(connection.LXDConnection, 'get_object') as ms:
ms.return_value = ('200', fake_api.fake_operation())
self.assertEqual(
expected, getattr(self.lxd,
'operation_show_' + method)('1234'))
ms.assert_called_with('GET',
'/1.0/operations/1234')
@annotated_data(
('operation_delete', 'DELETE', ''),
('wait_container_operation', 'GET',
'/wait?status_code=200&timeout=30', ('200', '30')),
)
def test_operation_actions(self, method, http, path, args=()):
with mock.patch.object(connection.LXDConnection, 'get_status') as ms:
ms.return_value = True
self.assertTrue(
getattr(self.lxd, method)('1234', *args))
ms.assert_called_with(http,
'/1.0/operations/1234' + path)
| # Copyright (c) 2015 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from ddt import ddt
import mock
import unittest
from pylxd import api
from pylxd import connection
from pylxd.tests import annotated_data
from pylxd.tests import fake_api
@ddt
class LXDUnitTestOperation(unittest.TestCase):
def setUp(self):
super(LXDUnitTestOperation, self).setUp()
self.lxd = api.API()
def test_list_operations(self):
with mock.patch.object(connection.LXDConnection, 'get_object') as ms:
ms.return_value = ('200', fake_api.fake_operation_list())
self.assertEqual(
['1234'],
self.lxd.list_operations())
ms.assert_called_with('GET',
'/1.0/operations')
def test_operation_info(self):
with mock.patch.object(connection.LXDConnection, 'get_object') as ms:
ms.return_value = ('200', fake_api.fake_operation())
self.assertEqual({
'operation_create_time':
datetime.datetime.fromtimestamp(1433869644)
.strftime('%Y-%m-%d %H:%M:%S'),
'operation_update_time':
datetime.datetime.fromtimestamp(1433869643)
.strftime('%Y-%m-%d %H:%M:%S'),
'operation_status_code':
'Running'
}, self.lxd.operation_info('1234'))
ms.assert_called_with('GET',
'/1.0/operations/1234')
@annotated_data(
('create_time',
datetime.datetime.fromtimestamp(1433869644)
.strftime('%Y-%m-%d %H:%M:%S')),
('update_time',
datetime.datetime.fromtimestamp(1433869643)
.strftime('%Y-%m-%d %H:%M:%S')),
('status', 'Running'),
)
def test_operation_show(self, method, expected):
with mock.patch.object(connection.LXDConnection, 'get_object') as ms:
ms.return_value = ('200', fake_api.fake_operation())
self.assertEqual(
expected, getattr(self.lxd,
'operation_show_' + method)('1234'))
ms.assert_called_with('GET',
'/1.0/operations/1234')
@annotated_data(
('operation_delete', 'DELETE', ''),
('wait_container_operation', 'GET',
'/wait?status_code=200&timeout=30', ('200', '30')),
)
def test_operation_actions(self, method, http, path, args=()):
with mock.patch.object(connection.LXDConnection, 'get_status') as ms:
ms.return_value = True
self.assertTrue(
getattr(self.lxd, method)('1234', *args))
ms.assert_called_with(http,
'/1.0/operations/1234' + path)
| Python | 0.000106 |
36af45d88f01723204d9b65d4081e74a80f0776b | Add test for layers module. | test/layers_test.py | test/layers_test.py | import theanets
import numpy as np
class TestLayer:
def test_build(self):
layer = theanets.layers.build('feedforward', nin=2, nout=4)
assert isinstance(layer, theanets.layers.Layer)
class TestFeedforward:
def test_create(self):
l = theanets.layers.Feedforward(nin=2, nout=4)
assert l.reset() == 12
class TestTied:
def test_create(self):
l0 = theanets.layers.Feedforward(nin=2, nout=4)
l = theanets.layers.Tied(partner=l0)
assert l.reset() == 2
class TestClassifier:
def test_create(self):
l = theanets.layers.Classifier(nin=2, nout=4)
assert l.reset() == 12
class TestRecurrent:
def test_create(self):
l = theanets.layers.Recurrent(nin=2, nout=4)
assert l.reset() == 28
class TestMRNN:
def test_create(self):
l = theanets.layers.MRNN(nin=2, nout=4, factors=3)
assert l.reset() == 42
class TestLSTM:
def test_create(self):
l = theanets.layers.LSTM(nin=2, nout=4)
assert l.reset() == 124
| Python | 0 | |
3dcf251276060b43ac888e0239f26a0cf2531832 | Add tests for proxy drop executable | tests/test_proxy_drop_executable.py | tests/test_proxy_drop_executable.py | # This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2017 Mozilla Corporation
from positive_alert_test_case import PositiveAlertTestCase
from negative_alert_test_case import NegativeAlertTestCase
from alert_test_suite import AlertTestSuite
class TestAlertSSHPasswordAuthViolation(AlertTestSuite):
alert_filename = "proxy_drop_non_standard_port"
# This event is the default positive event that will cause the
# alert to trigger
default_event = {
"_type": "event",
"_source": {
"category": "squid",
"tags": ["squid"],
"details": {
"details.sourceipaddress": "1.2.3.4",
"details.destination": "http://evil.com/evil.exe",
"details.proxyaction": "TCP_DENIED/-",
}
}
}
# This alert is the expected result from running this task
default_alert = {
"category": "squid",
"tags": ['squid', 'proxy'],
"severity": "WARNING",
"summary": 'Multiple Proxy DROP events detected from 1.2.3.4 to the following executable file destinations: http://evil.com/evil.exe',
}
test_cases = []
test_cases.append(
PositiveAlertTestCase(
description="Positive test with default events and default alert expected",
events=AlertTestSuite.create_events(default_event, 1),
expected_alert=default_alert
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['category'] = 'bad'
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with events with incorrect category",
events=events,
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['tags'] = 'bad tag example'
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with events with incorrect tags",
events=events,
)
)
events = AlertTestSuite.create_events(default_event, 10)
for event in events:
event['_source']['utctimestamp'] = AlertTestSuite.subtract_from_timestamp_lambda({
'minutes': 241})
event['_source']['receivedtimestamp'] = AlertTestSuite.subtract_from_timestamp_lambda({
'minutes': 241})
test_cases.append(
NegativeAlertTestCase(
description="Negative test case with old timestamp",
events=events,
)
)
| Python | 0 | |
30d4301a04081f3d7a4fdba835a56aa0adac1375 | fix latent slaves started serially with monkey patch instead | monkeypatch.py | monkeypatch.py | from twisted.python import log
from twisted.internet import reactor
def botmaster_maybeStartBuildsForSlave(self, slave_name):
"""
We delay this for 10 seconds, so that if multiple slaves start at the same
time, builds will be distributed between them.
"""
def do_start():
log.msg(format="Really starting builds on %(slave_name)s",
slave_name=slave_name)
builders = self.getBuildersForSlave(slave_name)
self.brd.maybeStartBuildsOn([b.name for b in builders])
log.msg(format="Waiting to start builds on %(slave_name)s",
slave_name=slave_name)
reactor.callLater(10, do_start)
from buildbot.process.slavebuilder import AbstractSlaveBuilder
def slavebuilder_buildStarted(self):
AbstractSlaveBuilder.buildStarted(self)
if self.slave and hasattr(self.slave, 'buildStarted'):
self.slave.buildStarted(self)
from buildbot.process.buildrequestdistributor import BasicBuildChooser
class NoFallBackBuildChooser(BasicBuildChooser):
"""
BuildChooser that doesn't fall back to rejected slaves.
In particular, builds with locks won't be assigned before a lock is ready.
"""
def __init__(self, bldr, master):
BasicBuildChooser.__init__(self, bldr, master)
self.rejectedSlaves = None
def apply_patches():
log.msg("Apply flocker_bb.monkeypatch.")
from buildbot.process.botmaster import BotMaster
BotMaster.maybeStartBuildsForSlave = botmaster_maybeStartBuildsForSlave
from buildbot.process.slavebuilder import SlaveBuilder
SlaveBuilder.buildStarted = slavebuilder_buildStarted
from buildbot.steps.master import MasterShellCommand
MasterShellCommand.renderables += ['path']
from buildbot.process.buildrequestdistributor import (
BuildRequestDistributor)
BuildRequestDistributor.BuildChooser = NoFallBackBuildChooser
| Python | 0 | |
379aef7e3aebc05352cacd274b43b156e32de18b | Add script to run tests | runtests.py | runtests.py | #!/usr/bin/env python
import argparse
import sys
import django
from django.conf import settings
from django.test.utils import get_runner
def runtests(test_labels):
settings.configure(INSTALLED_APPS=['tests'])
django.setup()
TestRunner = get_runner(settings)
test_runner = TestRunner()
failures = test_runner.run_tests(test_labels)
sys.exit(failures)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('test_labels', nargs='*', default=['tests'])
args = parser.parse_args()
runtests(test_labels=args.test_labels)
| Python | 0.000001 | |
abf39931331f54aff5f10345939420041bd2039d | Add test for APS2 instruction merging. | tests/test_APS2Pattern.py | tests/test_APS2Pattern.py | import h5py
import unittest
import numpy as np
from copy import copy
from QGL import *
from instruments.drivers import APS2Pattern
class APSPatternUtils(unittest.TestCase):
def setUp(self):
self.q1gate = Channels.LogicalMarkerChannel(label='q1-gate')
self.q1 = Qubit(label='q1', gateChan=self.q1gate)
self.q1 = Qubit(label='q1')
self.q1.pulseParams['length'] = 30e-9
Compiler.channelLib = {'q1': self.q1, 'q1-gate': self.q1gate}
def test_synchronize_control_flow(self):
q1 = self.q1
pulse = Compiler.Waveform()
pulse.length = 24
pulse.key = 12345
delay = Compiler.Waveform()
delay.length = 100
delay.isTimeAmp = True
blank = Compiler.Waveform( BLANK(q1, pulse.length) )
seq_1 = [qwait(), delay, copy(pulse), qwait(), copy(pulse)]
seq_2 = [qwait(), copy(blank), qwait(), copy(blank)]
offsets = { APS2Pattern.wf_sig(pulse) : 0 }
instructions = APS2Pattern.create_seq_instructions([seq_1, seq_2, [], [], []], offsets)
instr_types = [
APS2Pattern.SYNC,
APS2Pattern.WAIT,
APS2Pattern.WFM,
APS2Pattern.MARKER,
APS2Pattern.WFM,
APS2Pattern.WAIT,
APS2Pattern.WFM,
APS2Pattern.MARKER
]
for actual, expected in zip(instructions, instr_types):
instrOpCode = (actual.header >> 4) & 0xf
assert(instrOpCode == expected)
if __name__ == "__main__":
unittest.main()
| Python | 0 | |
df05088b5a6233cb262017b8489723c23000eb17 | Add variable | src/robotide/ui/images.py | src/robotide/ui/images.py | # Copyright 2008-2009 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import wx
from robot.parsing.model import Variable
_SIZE = (16, 16)
_BASE = os.path.dirname(__file__)
class TreeImageList(wx.ImageList):
def __init__(self):
wx.ImageList.__init__(self, *_SIZE)
self._images = {
'TestDataDirectory': _TreeImage(self, wx.ART_FOLDER, wx.ART_FOLDER_OPEN),
'TestCaseFile': _TreeImage(self, wx.ART_NORMAL_FILE),
'TestCase': _TreeImage(self, 'robot.png'),
'UserKeyword': _TreeImage(self, 'process.png'),
'ResourceFile': _TreeImage(self, wx.ART_NORMAL_FILE),
'Variable': _TreeImage(self, 'process.png')
}
def __getitem__(self, key):
return self._images[key]
class _TreeImage(object):
def __init__(self, image_list, normal, expanded=None):
self.normal = self._get_image(image_list, normal)
self.expanded = self._get_image(image_list, expanded) if expanded else self.normal
def _get_image(self, image_list, source):
if source.startswith('wx'):
img = wx.ArtProvider_GetBitmap(source, wx.ART_OTHER, _SIZE)
else:
path = os.path.join(_BASE, source)
img = wx.Image(path, wx.BITMAP_TYPE_PNG).ConvertToBitmap()
return image_list.Add(img)
| # Copyright 2008-2009 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import wx
_SIZE = (16, 16)
_BASE = os.path.dirname(__file__)
class TreeImageList(wx.ImageList):
def __init__(self):
wx.ImageList.__init__(self, *_SIZE)
self._images = {
'TestDataDirectory': _TreeImage(self, wx.ART_FOLDER, wx.ART_FOLDER_OPEN),
'TestCaseFile': _TreeImage(self, wx.ART_NORMAL_FILE),
'TestCase': _TreeImage(self, 'robot.png'),
'UserKeyword': _TreeImage(self, 'process.png'),
'ResourceFile': _TreeImage(self, wx.ART_NORMAL_FILE)
}
def __getitem__(self, key):
return self._images[key]
class _TreeImage(object):
def __init__(self, image_list, normal, expanded=None):
self.normal = self._get_image(image_list, normal)
self.expanded = self._get_image(image_list, expanded) if expanded else self.normal
def _get_image(self, image_list, source):
if source.startswith('wx'):
img = wx.ArtProvider_GetBitmap(source, wx.ART_OTHER, _SIZE)
else:
path = os.path.join(_BASE, source)
img = wx.Image(path, wx.BITMAP_TYPE_PNG).ConvertToBitmap()
return image_list.Add(img)
| Python | 0.000005 |
aeaf2e1a1207f2094ea4298b1ecff015f5996b5a | Add test cases for gabor filter | skimage/filter/tests/test_gabor.py | skimage/filter/tests/test_gabor.py | import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from skimage.filter import gabor_kernel, gabor_filter
def test_gabor_kernel_sum():
for sigmax in range(1, 10, 2):
for sigmay in range(1, 10, 2):
for frequency in range(0, 10, 2):
kernel = gabor_kernel(sigmax, sigmay, frequency+0.1, 0)
# make sure gaussian distribution is covered nearly 100%
assert_almost_equal(np.abs(kernel).sum(), 1, 2)
def test_gabor_kernel_theta():
for sigmax in range(1, 10, 2):
for sigmay in range(1, 10, 2):
for frequency in range(0, 10, 2):
for theta in range(0, 10, 2):
kernel0 = gabor_kernel(sigmax, sigmay, frequency+0.1, theta)
kernel180 = gabor_kernel(sigmax, sigmay, frequency,
theta+np.pi)
assert_array_almost_equal(np.abs(kernel0),
np.abs(kernel180))
def test_gabor_filter():
real, imag = gabor_filter(np.random.random((100, 100)), 1, 1, 1, 1)
if __name__ == "__main__":
from numpy import testing
testing.run_module_suite()
| Python | 0 | |
a70f46aac52be5b38b869cfbe18c0421a0032aee | Add script to count parameters of PyTorch model | count_params.py | count_params.py | import sys
import numpy as np
import torch
model = torch.load(sys.argv[1])
params = 0
for key in model:
params += np.multiply.reduce(model[key].shape)
print('Total number of parameters: ' + str(params))
| Python | 0 | |
fd4398b1e811aaa2b876c120f99ca7fff08618ca | install on windows via gohlke wheels | scripts/install_on_windows.py | scripts/install_on_windows.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script for installing on Microsoft Windows
Wheels from [GOHLKE WINDOWS REPOSITORY](https://www.lfd.uci.edu/~gohlke/pythonlibs/)
"""
try:
from gohlkegrabber import GohlkeGrabber
except ImportError:
print("gohlkegrabber not installed -> 'pip install gohlkegrabber")
pass
import subprocess
import tempfile
import shutil
PACKAGES = ('numpy')
def install_packages(packages, remove_tmpdir=True):
"""main script"""
_py = '3.7'
_platform = 'win_amd64'
_tmpdir = tempfile.mkdtemp(prefix='py37w')
print(f"Temporary directory is: {_tmpdir}")
gg = GohlkeGrabber()
for pkg in packages:
print(f"retreiving {pkg}...")
pkwhl = gg.retrieve(_tmpdir, pkg, python=_py, platform=_platform)
subprocess.call(f"pip install {pkwhl[0]}")
if remove_tmpdir:
shutil.rmtree(_tmpdir)
print("temporary directory removed")
if __name__ == "__main__":
pass
| Python | 0 | |
495c937d39da1902948065a38502f9d582fa2b3b | Add darkobject tests. | tests/darkobject.py | tests/darkobject.py | """
.. moduleauthor:: Adam Gagorik <adam.gagorik@gmail.com>
"""
import unittest
import pydarkstar.logutils
import pydarkstar.darkobject
pydarkstar.logutils.setDebug()
class TestDarkObject(unittest.TestCase):
def test_init(self):
pydarkstar.darkobject.DarkObject()
if __name__ == '__main__':
unittest.main() | Python | 0 | |
6f8699288f79ff856ed58595169cb08956cd210d | Create toeplitz-matrix.py | Python/toeplitz-matrix.py | Python/toeplitz-matrix.py | # Time: O(m * n)
# Space: O(1)
class Solution(object):
def isToeplitzMatrix(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: bool
"""
return all(i == 0 or j == 0 or matrix[i-1][j-1] == val
for i, row in enumerate(matrix)
for j, val in enumerate(row))
| Python | 0.000017 | |
d2a78f252bf9a584569e372ca9474863e4496c7a | Add one test | tests/test_utils.py | tests/test_utils.py | import os
from contextlib import suppress
import numpy as np
import pandas as pd
import pytest
import legendary_potato.kernel
import legendary_potato.utils
TEST_PATH = os.path.join(os.path.abspath(os.path.curdir))
SAMPLE_PATH = os.path.join(TEST_PATH, 'sample')
GRAMMATRIX_PATH = os.path.join(TEST_PATH, 'gram_matrix')
def kernel_sample_iterator():
"""Return an iterator over (kernels, samples).
"""
for kern in kernel_iterator():
kern_name = kern.__name__
yield kern, kernel_samples(kern_name)
def kernel_iterator():
"""Return an iterator over kernels to be tested.
"""
for kern_name in os.listdir(SAMPLE_PATH):
yield legendary_potato.kernel.__dict__.get(kern_name)
def kernel_samples(kernel_name):
"""Return iterator over samples for a kernel from specific file(s).
The iterator generate (sample_name, sample)
A kernel_path is generated. If it is a file, each line is considered
to be a sample, and its name is the line number. If it is a direcctory,
each file is considered to be a string sample and its name is the
file name.
"""
kernel_sample_path = os.path.join(SAMPLE_PATH, kernel_name)
sep = ','
if os.path.isfile(kernel_sample_path):
# One sample per line
with open(kernel_sample_path, 'r') as sample_file:
line = sample_file.readline()
try:
if len(np.fromstring(line, sep=sep)) > 0:
# line composed of numbers
is_string = False
else:
# line composed of stings
is_string = True
except ValueError:
# line composed of mix of strings and numbers. should be
# treated as strings
is_string = True
sample_file.seek(0)
for nu, line in enumerate(sample_file):
if is_string:
yield (nu, [row.strip for row in line.split(sep)])
else:
yield (nu, np.fromstring(line, sep=sep))
else:
# kernel_sample_path is a directory
for sample_file in os.listdir(kernel_sample_path):
file_path = os.path.join(kernel_sample_path, sample_file)
with open(file_path, 'r') as pot:
yield sample_file, pot.read()
@pytest.mark.parametrize(('kernel', 'sample'), kernel_sample_iterator())
def test_matrix(kernel, sample, tol=1e-15):
"""Regression test on gram matrix.
Construct the Gram matrix for the kernel and the samples and compare it
to the previously calculated one.
kernel -- the potato kernel to test
sample -- the sample to construct the Gram matrix
tol -- tolerance for pointing float errors
"""
kernel_name = kernel.__name__ # TODO: find a more feng shui way
matrix_path = os.path.join(GRAMMATRIX_PATH, kernel_name + '.csv')
potato_util = legendary_potato.utils.PotatoUtils(kernel)
cur_matrix = potato_util.matrix(tr_s for _, tr_s in sample)
if os.path.exists(matrix_path):
test_matrix = pd.DataFrame().from_csv(matrix_path,
header=None,
index_col=False)
results = np.array(test_matrix, dtype=cur_matrix.dtype) - cur_matrix
assert (np.abs(results) < tol).all()
else:
with suppress(FileExistsError):
os.makedirs(GRAMMATRIX_PATH)
pd.DataFrame(cur_matrix).to_csv(matrix_path, header=None, index=None)
| Python | 0.00105 | |
76c040e9da5d94dfcb68d3e9a8003b894c1cf1dc | test file for vimba.py | tests/test_vimba.py | tests/test_vimba.py | import pytest
from pymba import Vimba, VimbaException
def test_version():
version = Vimba().version.split('.')
assert int(version[0]) >= 1
assert int(version[1]) >= 7
assert int(version[2]) >= 0
def test_startup_shutdown():
with pytest.raises(VimbaException) as e:
Vimba().system().feature_names()
assert e.value.error_code == VimbaException.ERR_STARTUP_NOT_CALLED
# manual
Vimba().startup()
Vimba().system().feature_names()
Vimba().shutdown()
# context manager
with Vimba() as vmb:
vmb.system().feature_names()
@pytest.fixture
def vmb() -> Vimba:
with Vimba() as v:
yield v
# works best with camera(s) attached
def test_interface_camera_ids(vmb: Vimba):
# for ethernet camera discovery
if vmb.system().GeVTLIsPresent:
vmb.system().run_feature_command("GeVDiscoveryAllOnce")
for func in (vmb.interface_ids, vmb.camera_ids):
ids = func()
assert isinstance(ids, list)
for x in ids:
assert isinstance(x, str)
| Python | 0 | |
295b83d466b90ea812e8c0bda56b4d38a31c956a | Create reversedArrayNum.py | CodeWars/8kyu/reversedArrayNum.py | CodeWars/8kyu/reversedArrayNum.py | def digitize(n):
return [int(i) for i in str(n)][::-1]
| Python | 0.000106 | |
7b279117da06af5cf21b61ad810a9c3177de8e3e | Update fabfile.py | fabfile.py | fabfile.py | from fabric.api import local,run
import os
from os import path
#Add settings module so fab file can see it
os.environ['DJANGO_SETTINGS_MODULE'] = "adl_lrs.settings"
from django.conf import settings
adldir = settings.MEDIA_ROOT
actor_profile = 'actor_profile'
activity_profile = 'activity_profile'
activity_state = 'activity_state'
INSTALL_STEPS = ['yes | sudo apt-get install python-setuptools libmysqlclient-dev python-dev python-mysqldb python-libxml2 python-libxslt1 libxml2-dev libxslt1-dev',
'sudo easy_install pip',
'sudo pip install virtualenv',
'virtualenv env;. env/bin/activate;pip install -r requirements.txt;deactivate']
def deps_local():
for step in INSTALL_STEPS:
local(step)
#Create media directories and give them open permissions
if not os.path.exists(path.join(adldir,activity_profile)):
os.makedirs(path.join(adldir,activity_profile))
os.chmod(path.join(adldir,activity_profile), 0777)
if not os.path.exists(path.join(adldir,activity_state)):
os.makedirs(path.join(adldir,activity_state))
os.chmod(path.join(adldir,activity_state), 0777)
if not os.path.exists(path.join(adldir,actor_profile)):
os.makedirs(path.join(adldir,actor_profile))
os.chmod(path.join(adldir,actor_profile), 0777)
def deps_remote():
for step in INSTALL_STEPS:
run(step)
| from fabric.api import local,run
import os
from os import path
#Add settings module so fab file can see it
os.environ['DJANGO_SETTINGS_MODULE'] = "adl_lrs.settings"
from django.conf import settings
adldir = settings.MEDIA_ROOT
actor_profile = 'actor_profile'
activity_profile = 'activity_profile'
activity_state = 'activity_state'
INSTALL_STEPS = ['yes | sudo apt-get install python-setuptools libmysqlclient-dev python-dev python-mysqldb python-libxml2 python-libxslt1 libxml2-dev libxslt1-dev',
'sudo easy_install pip',
'pip install -r requirements.txt']
def deps_local():
for step in INSTALL_STEPS:
local(step)
def create_dirs():
#Create media directories and give them open permissions
if not os.path.exists(path.join(adldir,activity_profile)):
os.makedirs(path.join(adldir,activity_profile))
os.chmod(path.join(adldir,activity_profile), 0777)
if not os.path.exists(path.join(adldir,activity_state)):
os.makedirs(path.join(adldir,activity_state))
os.chmod(path.join(adldir,activity_state), 0777)
if not os.path.exists(path.join(adldir,actor_profile)):
os.makedirs(path.join(adldir,actor_profile))
os.chmod(path.join(adldir,actor_profile), 0777)
def deps_remote():
for step in INSTALL_STEPS:
run(step)
| Python | 0 |
86418c4f3ea786c6eb1aad6579dadfb286dec0a3 | Create InMoov2.minimal.py | toSort/InMoov2.minimal.py | toSort/InMoov2.minimal.py | # a very minimal script for InMoov
# although this script is very short you can still
# do voice control of a right hand or finger box
# for any command which you say - you will be required to say a confirmation
# e.g. you say -> open hand, InMoov will ask -> "Did you say open hand?", you will need to
# respond with a confirmation ("yes","correct","yeah","ya")
rightPort = "COM8"
i01 = Runtime.createAndStart("i01", "InMoov")
# starting parts
i01.startEar()
i01.startMouth()
#to tweak the default voice
i01.mouth.setGoogleURI("http://thehackettfamily.org/Voice_api/api2.php?voice=Ryan&txt=")
##############
i01.startRightHand(rightPort)
# tweaking defaults settings of right hand
#i01.rightHand.thumb.setMinMax(55,135)
#i01.rightHand.index.setMinMax(0,160)
#i01.rightHand.majeure.setMinMax(0,140)
#i01.rightHand.ringFinger.setMinMax(48,145)
#i01.rightHand.pinky.setMinMax(45,146)
#i01.rightHand.thumb.map(0,180,55,135)
#i01.rightHand.index.map(0,180,0,160)
#i01.rightHand.majeure.map(0,180,0,140)
#i01.rightHand.ringFinger.map(0,180,48,145)
#i01.rightHand.pinky.map(0,180,45,146)
#################
# verbal commands
ear = i01.ear
ear.addCommand("attach right hand", "i01.rightHand", "attach")
ear.addCommand("disconnect right hand", "i01.rightHand", "detach")
ear.addCommand("rest", i01.getName(), "rest")
ear.addCommand("open hand", "python", "handopen")
ear.addCommand("close hand", "python", "handclose")
ear.addCommand("capture gesture", ear.getName(), "captureGesture")
ear.addCommand("manual", ear.getName(), "lockOutAllGrammarExcept", "voice control")
ear.addCommand("voice control", ear.getName(), "clearLock")
ear.addComfirmations("yes","correct","yeah","ya")
ear.addNegations("no","wrong","nope","nah")
ear.startListening()
def handopen():
i01.moveHand("left",0,0,0,0,0)
i01.moveHand("right",0,0,0,0,0)
def handclose():
i01.moveHand("left",180,180,180,180,180)
i01.moveHand("right",180,180,180,180,180)
| Python | 0.000001 | |
35e76ec99a3710a20b17a5afddaa14389af65098 | Add some simple MediaWiki importer. | tools/import_mediawiki.py | tools/import_mediawiki.py | import os
import os.path
import argparse
from sqlalchemy import create_engine
def main():
parser = argparse.ArgumentParser()
parser.add_argument('url')
parser.add_argument('-o', '--out', default='wikked_import')
parser.add_argument('--prefix', default='wiki')
parser.add_argument('-v', '--verbose', action='store_true')
parser.add_argument('--ext', default='.md')
args = parser.parse_args()
prefix = args.prefix
out_dir = args.out
ext = '.' + args.ext.lstrip('.')
if not out_dir:
parser.print_help()
return 1
if os.path.isdir(out_dir):
print("The output directory already exists!")
return 1
engine = create_engine(args.url, echo=args.verbose)
conn = engine.connect()
query = (
'SELECT '
'p.page_id,p.page_title,p.page_latest,'
'r.rev_id,r.rev_text_id,t.old_id,t.old_text '
'from %(prefix)s_page p '
'INNER JOIN %(prefix)s_revision r ON p.page_latest = r.rev_id '
'INNER JOIN %(prefix)s_text t ON r.rev_text_id = t.old_id;' %
{'prefix': prefix})
q = conn.execute(query)
for p in q:
title = p['page_title'].decode('utf8')
text = p['old_text'].decode('utf8')
path_noext = os.path.join(out_dir, title)
path = path_noext + ext
dirname = os.path.dirname(path)
if not os.path.isdir(dirname):
os.makedirs(dirname)
if os.path.exists(path):
suffnum = 2
while True:
new_path = '%s_%d%s' % (path_noext, suffnum, ext)
if not os.path.exists(new_path):
break
suffnum += 1
if suffnum > 100:
raise Exception("Can't find available path for: " %
path)
print("WARNING: %s exists" % path)
print("WARNING: creating %s instead" % new_path)
path = new_path
print(p['page_id'], title)
with open(path, 'w', encoding='utf8') as fp:
fp.write(text)
conn.close()
if __name__ == '__main__':
main()
| Python | 0 | |
237041aff9d99ac840572742467772edf1f4d5ef | Add image download example | examples/image/download.py | examples/image/download.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
"""
Download an image with the Image service.
For a full guide see
http://developer.openstack.org/sdks/python/openstacksdk/users/guides/image.html
"""
def download_image_stream(conn):
print("Download Image via streaming:")
# Find the image you would like to download.
image = conn.image.find_image("myimage")
# As the actual download now takes place outside of the library
# and in your own code, you are now responsible for checking
# the integrity of the data. Create an MD5 has to be computed
# after all of the data has been consumed.
md5 = hashlib.md5()
with open("myimage.qcow2", "wb") as local_image:
response = conn.image.download_image(image, stream=True)
# Read only 1024 bytes of memory at a time until
# all of the image data has been consumed.
for chunk in response.iter_content(chunk_size=1024):
# With each chunk, add it to the hash to be computed.
md5.update(chunk)
local_image.write(chunk)
# Now that you've consumed all of the data the response gave you,
# ensure that the checksums of what the server offered and
# what you downloaded are the same.
if response.headers["Content-MD5"] != md5.hexdigest():
raise Exception("Checksum mismatch in downloaded content")
def download_image(conn):
print("Download Image:")
# Find the image you would like to download.
image = conn.image.find_image("myimage")
with open("myimage.qcow2", "w") as local_image:
response = conn.image.download_image(image)
# Response will contain the entire contents of the Image.
local_image.write(response)
| Python | 0.000003 | |
5ea296703596306ea9895e37db9412f80731543a | Add a protein-plotting example, to show how to visualize nicely a graph | examples/mayavi/protein.py | examples/mayavi/protein.py | """
Visualize a protein graph structure downloaded from the protein database in
standard pdb format.
We parse the pdb file, but extract only a very small amount of
information: the type of atoms, their positions, and the links between them.
We assign a scalar value for the atoms to differenciate the different
types of atoms, but it does not correspond to the atomic mass. The size
and the color of the atom on the visualization is therefore not
chemicaly-significant.
The atoms are plotted using mlab.points3d, and connections between atoms
are added to the dataset, and visualized using a surface module.
To visualize the local atomic density, we use a gaussian splatter filter
that builds a kernel density estimation of the continuous density field:
each point is convoluted by a Gaussian kernel, and the sum of these
Gaussians form the resulting density field. We visualize this field using
volume rendering.
Reference for the pdb file standard:
http://mmcif.pdb.org/dictionaries/pdb-correspondence/pdb2mmcif.html
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Copyright (c) 2008, Enthought, Inc.
# License: BSD Style.
# The pdb code for the protein.
protein_code = '2q09'
# Retrieve the file from the protein database ##################################
import os
if not os.path.exists('pdb%s.ent.gz' % protein_code):
# Download the data
import urllib
print 'Downloading protein data, please wait'
opener = urllib.urlopen(
'ftp://ftp.wwpdb.org/pub/pdb/data/structures/divided/pdb/q0/pdb%s.ent.gz'
% protein_code)
open('pdb%s.ent.gz' % protein_code, 'w').write(opener.read())
# Parse the pdb file ###########################################################
import gzip
infile = gzip.GzipFile('pdb%s.ent.gz' % protein_code, 'rb')
nodes = dict()
edges = list()
atoms = set()
last_atom_label = None
last_chain_label = None
for line in infile:
line = line.split()
if line[0] in ('ATOM', 'HETATM'):
nodes[line[1]] = (line[2], line[6], line[7], line[8])
atoms.add(line[2])
chain_label = line[5]
if chain_label == last_chain_label:
edges.append((line[1], last_atom_label))
last_atom_label = line[1]
last_chain_label = chain_label
elif line[0] == 'CONECT':
for start, stop in zip(line[1:-1], line[2:]):
edges.append((start, stop))
atoms = list(atoms)
atoms.sort()
atoms = dict(zip(atoms, range(len(atoms))))
labels = dict()
x = list()
y = list()
z = list()
scalars = list()
for index, label in enumerate(nodes):
labels[label] = index
this_scalar, this_x, this_y, this_z= nodes[label]
scalars.append(atoms[this_scalar])
x.append(float(this_x))
y.append(float(this_y))
z.append(float(this_z))
connections = list()
for start, stop in edges:
connections.append((labels[start], labels[stop]))
import numpy as np
x = np.array(x)
y = np.array(y)
z = np.array(z)
scalars = np.array(scalars)
# Visualize the data ###########################################################
from enthought.mayavi import mlab
mlab.figure(1, bgcolor=(0, 0, 0))
mlab.clf()
pts = mlab.points3d(x, y, z, 1.5*scalars.max() - scalars,
scale_factor=0.015, resolution=10)
pts.mlab_source.dataset.lines = np.array(connections)
# Turn of clamping: the size of the glyph becomes absolute
pts.glyph.glyph.clamping = False
# Use a tube fiter to plot tubes on the link, varying the radius with the
# scalar value
tube = mlab.pipeline.tube(pts, tube_radius=0.15)
tube.filter.radius_factor = 1.
tube.filter.vary_radius = 'vary_radius_by_scalar'
mlab.pipeline.surface(tube, color=(0.8, 0.8, 0))
# Visualize the local atomic density
mlab.pipeline.volume(mlab.pipeline.gaussian_splatter(pts))
mlab.view(49, 31.5, 52.8, (4.2, 37.3, 20.6))
mlab.show()
| Python | 0.000001 | |
fed1cee9ea50e19b6cc0c3e95f4455d2550f5176 | add initial disk I/O benchmark script | examples/run_benchmarks.py | examples/run_benchmarks.py | import sys
import os
import resource
import shutil
import shlex
import time
import subprocess
import random
# this is a disk I/O benchmark script. It runs menchmarks
# over different filesystems, different cache sizes and
# different number of peers (can be used to find a reasonable
# range for unchoke slots).
# it also measures performance improvements of re-ordering
# read requests based on physical location and OS hints
# like posix_fadvice(FADV_WILLNEED). It can also be used
# for the AIO branch to measure improvements over the
# classic thread based disk I/O
# to set up the test, build the example directoryin release
# with statistics=on and copy fragmentation_test, client_test
# and connection_tester to the current directory.
# make sure gnuplot is installed.
# the following lists define the space tests will be run in
# variables to test. All these are run on the first
# entry in the filesystem list.
cache_sizes = [0, 256, 512, 1024, 2048, 4096, 8192]
peers = [10, 100, 500, 1000, 2000]
# the drives are assumed to be mounted under ./<name>
# or have symbolic links to them.
filesystem = ['ext4', 'ext3', 'reiser', 'xfs']
# the number of peers for the filesystem test. The
# idea is to stress test the filesystem by using a lot
# of peers, since each peer essentially is a separate
# read location on the platter
filesystem_peers = 1000
# the amount of cache for the filesystem test
filesystem_cache = 8192
# the number of seconds to run each test. It's important that
# this is shorter than what it takes to finish downloading
# the test torrent, since then the average rate will not
# be representative of the peak anymore
test_duration = 400
# make sure the environment is properly set up
if resource.getrlimit(resource.RLIMIT_NOFILE)[0] < 4000:
print 'please set ulimit -n to at least 4000'
sys.exit(1)
# make sure we have all the binaries available
binaries = ['client_test', 'connection_tester', 'fragmentation_test']
for i in binaries:
if not os.path.exists(i):
print 'make sure "%s" is available in current working directory' % i
sys.exit(1)
for i in filesystem:
if not os.path.exists(i):
print ('the path "%s" does not exist. This is directory/mountpoint is ' +
'used as the download directory and is the filesystem that will be benchmarked ' +
'and need to exist.') % i
sys.exit(1)
# make sure we have a test torrent
if not os.path.exists('test.torrent'):
print 'generating test torrent'
os.system('./connection_tester gen-torrent test.torrent')
# use a new port for each test to make sure they keep working
# this port is incremented for each test run
port = 10000 + random.randint(0, 5000)
def build_commandline(config, port):
num_peers = config['num-peers']
no_disk_reorder = '';
if config['allow-disk-reorder'] == False:
no_disk_reorder = '-O'
no_read_ahead = ''
if config['read-ahead'] == False:
no_read_ahead = '-j'
global test_duration
return './client_test -k -z -N -h -H -M -S %d -T %d -c %d -C %d -s "%s" %s %s -q %d -p %d -l session_stats/alerts_log.txt test.torrent' \
% (num_peers, num_peers, num_peers, config['cache-size'], config['save-path'] \
, no_disk_reorder, no_read_ahead, test_duration, port)
def delete_files(files):
for i in files:
try: os.remove(i)
except:
try: shutil.rmtree(i)
except: pass
def build_test_config(fs, num_peers, cache_size, readahead=True, reorder=True):
config = {'test': 'dual', 'save-path': os.path.join('./', fs), 'num-peers': num_peers, 'allow-disk-reorder': reorder, 'cache-size': cache_size, 'read-ahead': readahead}
return config
def build_target_folder(config):
reorder = 'reorder'
if config['allow-disk-reorder'] == False: reorder = 'no-reorder'
readahead = 'readahead'
if config['read-ahead'] == False: readahead = 'no-readahead'
return 'results_%d_%d_%s_%s_%s_%s' % (config['num-peers'], config['cache-size'], os.path.split(config['save-path'])[1], config['test'], reorder, readahead)
def run_test(config):
if os.path.exists(build_target_folder(config)):
print 'results already exists, skipping test'
return
# make sure any previous test file is removed
delete_files([os.path.join(config['save-path'], 'stress_test_file'), '.ses_state', '.resume', '.dht_state', 'session_stats'])
try: os.mkdir('session_stats')
except: pass
# save off the command line for reference
global port
cmdline = build_commandline(config, port)
f = open('session_stats/cmdline.txt', 'w+')
f.write(cmdline)
f.close()
f = open('session_stats/config.txt', 'w+')
print >>f, config
f.close()
f = open('session_stats/client.output', 'w+')
print 'launching: %s' % cmdline
client = subprocess.Popen(shlex.split(cmdline), stdout=f)
time.sleep(1)
print '\n\n*********************************'
print '* RUNNING TEST *'
print '*********************************\n\n'
print 'launching connection tester'
os.system('./connection_tester %s %d 127.0.0.1 %d test.torrent >session_stats/tester.output' % (config['test'], config['num-peers'], port))
f.close()
# run fragmentation test
print 'analyzing fragmentation'
os.system('./fragmentation_test test.torrent %s' % config['save-path'])
shutil.copy('fragmentation.log', 'session_stats/')
shutil.copy('fragmentation.png', 'session_stats/')
shutil.copy('fragmentation.gnuplot', 'session_stats/')
os.chdir('session_stats')
# parse session stats
print 'parsing session log'
os.system('python ../../parse_session_stats.py *.0000.log')
os.chdir('..')
# move the results into its final place
print 'saving results'
os.rename('session_stats', build_target_folder(config))
# clean up
print 'cleaning up'
delete_files([os.path.join(config['save-path'], 'stress_test_file'), '.ses_state', '.resume', '.dht_state'])
port += 1
for fs in filesystem:
config = build_test_config(fs, filesystem_peers, filesystem_cache)
run_test(config)
for c in cache_sizes:
for p in peers:
for rdahead in [True, False]:
for reorder in [True, False]:
config = build_test_config(fs, filesystem_peers, filesystem_cache, rdahead, reorder)
run_test(config)
| Python | 0.000005 | |
ab60bd4f31a185884e0c05fa1a5f70c39a9d903a | add 52 | python/p052.py | python/p052.py | def same(a, b):
return sorted(str(a)) == sorted(str(b))
for i in xrange(1, 1000000):
if same(i, 2 * i) and same(3 * i, 4 * i) and same(5 * i, 6 * i) and same(i, 6 * i):
print i
break | Python | 0.998596 | |
c67e4e0e9d2a771df4674cb9cd9f178c1fe6c9bc | Add git-restash functional tests | tests/functional/test_restash.py | tests/functional/test_restash.py | import os
import shutil
import subprocess
import tempfile
import unittest
import git
class TestGitRestash(unittest.TestCase):
def setUp(self):
self.dirpath = tempfile.mkdtemp()
os.chdir(self.dirpath)
self.repo = git.Repo.init(self.dirpath)
# add files
open('README.md', 'w').close()
open('CHANGELOG.md', 'w').close()
self.repo.index.add(['README.md', 'CHANGELOG.md'])
self.repo.index.commit('Initial commit')
# edit README
with open('README.md', 'w') as readme_file:
readme_file.write('a')
def tearDown(self):
shutil.rmtree(self.dirpath)
def test_restash_full(self):
# given
self.repo.git.stash()
self.repo.git.stash('apply')
# when
restash_output = self.repo.git.restash()
# then
stash_sha = self.repo.git.rev_parse('stash@{0}')
self.assertEqual(restash_output, 'Restashed stash@{{0}} ({})'.format(stash_sha))
self.assertFalse(subprocess.check_output('git status --short'.split()).strip())
def test_restash_partial(self):
# given
self.repo.git.stash()
self.repo.git.stash('apply')
with open('CHANGELOG.md', 'w') as readme_file:
readme_file.write('a')
# when
restash_output = self.repo.git.restash()
# then
stash_sha = self.repo.git.rev_parse('stash@{0}')
self.assertEqual(restash_output, 'Restashed stash@{{0}} ({})'.format(stash_sha))
status_output = subprocess.check_output('git status --short'.split()).rstrip()
self.assertEqual(status_output, ' M CHANGELOG.md')
def test_restash_specifyStash(self):
# given
self.repo.git.stash()
self.repo.git.stash('apply')
with open('CHANGELOG.md', 'w') as readme_file:
readme_file.write('a')
self.repo.git.stash()
self.repo.git.stash('apply')
# when
restash_output = self.repo.git.restash('stash@{1}')
# then
stash_sha = self.repo.git.rev_parse('stash@{1}')
self.assertEqual(restash_output, 'Restashed stash@{{1}} ({})'.format(stash_sha))
status_output = subprocess.check_output('git status --short'.split()).rstrip()
self.assertEqual(status_output, ' M CHANGELOG.md')
def test_restash_withNewFiles(self):
# given
open('CONTRIBUTING.md', 'w').close()
self.repo.git.stash('save', '--include-untracked')
self.repo.git.stash('apply')
# when
restash_output = self.repo.git.restash()
# then
stash_sha = self.repo.git.rev_parse('stash@{0}')
self.assertEqual(restash_output, 'Restashed stash@{{0}} ({})'.format(stash_sha))
self.assertFalse(subprocess.check_output('git status --short'.split()).strip())
def test_restash_withDeletedFiles(self):
# given
os.remove('CHANGELOG.md')
self.repo.git.stash('save', '--include-untracked')
self.repo.git.stash('apply')
# when
restash_output = self.repo.git.restash()
# then
stash_sha = self.repo.git.rev_parse('stash@{0}')
self.assertEqual(restash_output, 'Restashed stash@{{0}} ({})'.format(stash_sha))
self.assertFalse(subprocess.check_output('git status --short'.split()).strip())
def test_restash_quiet_shortOption(self):
# given
self.repo.git.stash()
self.repo.git.stash('apply')
# when
restash_output = self.repo.git.restash('-q')
# then
self.assertFalse(restash_output)
def test_restash_quiet_longOption(self):
# given
self.repo.git.stash()
self.repo.git.stash('apply')
# when
restash_output = self.repo.git.restash('--quiet')
# then
self.assertFalse(restash_output)
def test_restash_noStashesExist(self):
# when
error_message = subprocess.Popen(
'git restash blarg'.split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
).communicate()[1].strip()
# then
self.assertEqual(error_message, 'error: no stashes exist')
def test_restash_invalidStash(self):
# given
self.repo.git.stash()
# when
error_message = subprocess.Popen(
'git restash blarg'.split(),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
).communicate()[1].strip()
# then
self.assertEqual(error_message, 'error: blarg is not a valid stash reference')
| Python | 0 | |
26bc79d7ed478872f615e80fa177f0c4582c3631 | reverse string ii | src/main/python/pyleetcode/reverse_string_ii.py | src/main/python/pyleetcode/reverse_string_ii.py | """
Given a string and an integer k, you need to reverse the first k characters for every 2k characters counting from the
start of the string. If there are less than k characters left, reverse all of them. If there are less than 2k but
greater than or equal to k characters, then reverse the first k characters and left the other as original.
Example:
Input: s = "abcdefg", k = 2
Output: "bacdfeg"
Restrictions:
* The string consists of lower English letters only.
* Length of the given string and k will in the range [1, 10000]
"""
def reverse_str(s, k):
"""
:type s: str
:type k: int
:rtype: str
"""
if k == 0:
return s
o = ''
for idx in range(0, len(s), k):
print idx, s[idx:idx+k]
if idx/k % 2 == 0:
o += s[idx:idx+k][::-1]
else:
o += s[idx:idx+k]
idx += k
return o
def test_reverse_str():
assert reverse_str('abcdefg', 2) == "bacdfeg" | Python | 0.999999 | |
fe6ece236e684d76441280ba700565f7fbce40cc | Create masked version based on pbcov cutogg | 14B-088/HI/analysis/pbcov_masking.py | 14B-088/HI/analysis/pbcov_masking.py |
'''
Cut out noisy regions by imposing a mask of the primary beam coverage.
'''
from astropy.io import fits
from spectral_cube import SpectralCube
from spectral_cube.cube_utils import beams_to_bintable
from astropy.utils.console import ProgressBar
import os
from analysis.paths import fourteenB_HI_data_path
# execfile(os.path.expanduser("~/Dropbox/code_development/ewky_scripts/write_huge_fits.py"))
pbcov = fits.open(fourteenB_HI_data_path("M33_14B-088_pbcov.fits"))[0]
cube = SpectralCube.read(fourteenB_HI_data_path("M33_14B-088_HI.clean.image.fits"))
# Apply the mask, using a cut-off of 0.3. This retains all of the regions with
# emission.
pblim = 0.3
masked_cube = cube.with_mask(pbcov.data > pblim)
masked_cube = masked_cube.minimal_subcube()
new_fitsname = fourteenB_HI_data_path("M33_14B-088_HI.clean.image.pbcov_gt_0.3_masked.fits",
no_check=True)
masked_cube.write(new_fitsname)
# create_huge_fits(new_fitsname, cube.header)
# save_hdu = fits.open(new_fitsname, mode='update')
# Save per channel
# for chan in ProgressBar(cube.shape[0]):
# save_hdu[0].data[chan] = cube[chan].value
# if chan % 50 == 0:
# save_hdu.flush()
# Save the beam table!
# save_hdu.append(beams_to_bintable(cube.beams))
# save_hdu.flush()
# save_hdu.close()
| Python | 0 | |
d8ddd6a843000c8b4125f166645a41443b6c06ba | Add kms_decrypt module | kms_decrypt.py | kms_decrypt.py | #!/usr/bin/python
import base64
DOCUMENTATION = '''
short_description: Decrypt a secret that was generated by KMS
description:
- This module decrypts the given secret using AWS KMS, and returns it as the Plaintext property
version_added: null
author: Ben Bridts
notes:
- Make sure you read http://docs.aws.amazon.com/kms/latest/developerguide/control-access.html to learn how to restrict
access to your keys
requirements:
- the boto3 python package
options:
aws_secret_key:
description:
- AWS secret key. If not set then the value of the AWS_SECRET_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_secret_key', 'secret_key' ]
version_added: "1.5"
aws_access_key:
description:
- AWS access key. If not set then the value of the AWS_ACCESS_KEY environment variable is used.
required: false
default: null
aliases: [ 'ec2_access_key', 'access_key' ]
version_added: "1.5"
region:
description:
- The AWS region to use. If not specified then the value of the EC2_REGION environment variable, if any, is used.
required: false
aliases: ['aws_region', 'ec2_region']
version_added: "1.5"
secret:
description:
- The encrypted string you want to decode
required: false
default: CAT
'''
EXAMPLES = '''
- name: Decrypt secret
kms_decrypt:
secret: "{{ secret }}"
register: result
delegate_to: 127.0.0.1
- name: Show plaintext
debug: var=result.plaintext
delegate_to: 127.0.0.1
'''
import sys
try:
import boto3
except ImportError:
print "failed=True msg='boto3 required for this module'"
sys.exit(1)
def main():
argument_spec = ec2_argument_spec()
argument_spec.update(dict(
secret=dict(required=True),
))
module = AnsibleModule(argument_spec=argument_spec, supports_check_mode=False)
secret = module.params.get('secret')
secret = base64.decodestring(secret)
client = boto3.client('kms')
response = client.decrypt(
CiphertextBlob=secret
)
status_code = response['ResponseMetadata']['HTTPStatusCode']
if status_code != 200:
module.fail_json(msg='Failed with http status code %s' % status_code)
module.exit_json(changed=True, plaintext=response['Plaintext'], key_id=response['KeyId'])
from ansible.module_utils.basic import *
from ansible.module_utils.ec2 import *
main()
| Python | 0.000001 | |
4af087e4920124eddb0342d0f22978872f9ba5dc | add landuse_sql.py which convert the .csv files from ArcMap to a SQL database | landuse_sql.py | landuse_sql.py | import sqlite3
import glob
import pandas
#Name of SQL database
sql_schema = 'LandUse_Approx.db'
files = [f for f in glob.glob("*.csv") if "LandUseApprox_" in f]
#Create table names for the SQL database.
#Table names will have 'landuse_' as prefix and the year and length as the ending in the format 'YYYY_Length'
#Store table names in a dictonary (table_names) with the .csv file name as key and SQL table name as value
table_names = {}
for f in files:
table_names[f] = 'landuse_' + f[-13:-4]
conn = sqlite3.connect(sql_schema)
c = conn.cursor()
#Convert each .csv file into a SQL database
#Iterate through all .csv file, convert each file into a Pandas DataFrame and then insert into SQL schema
for f in [files[0]]:
print f
raw_dataset = pandas.read_csv(f, index_col = 0)
print raw_dataset
raw_dataset.to_sql(table_names[f],conn)
#TODO: Apply Primary Key constraint on OBJECTID | Python | 0.000006 | |
110179832ff8ccdda81599f7d6b0675ba8feac24 | Fix document of gaussian | chainer/functions/gaussian.py | chainer/functions/gaussian.py | import numpy
from chainer import cuda
from chainer import function
from chainer.utils import type_check
class Gaussian(function.Function):
"""Gaussian sampling function.
In forward calculation, this funciton takes mean and logarithm of variance
as inputs, and draw a sample from a gaussian distribution.
"""
def __init__(self):
self.eps = None
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
m_type, v_type = in_types
type_check.expect(
m_type.dtype == numpy.float32,
v_type.dtype == numpy.float32,
m_type.shape == v_type.shape,
)
def check_type_backward(self, in_types, out_types):
type_check.expect(out_types.size() == 1)
m_type, v_type = in_types
g_type, = out_types
type_check.expect(
g_type.dtype == numpy.float32,
g_type.shape == m_type.shape,
)
def forward_cpu(self, inputs):
mean, ln_var = inputs
if self.eps is None:
self.eps = numpy.random.normal(0, 1, ln_var.shape) \
.astype(numpy.float32)
self.noise = numpy.exp(ln_var * 0.5) * self.eps
return mean + self.noise,
def forward_gpu(self, inputs):
mean, ln_var = inputs
if self.eps is None:
self.eps = cuda.empty(ln_var.shape, numpy.float32)
cuda.get_generator().fill_normal(self.eps)
noise = cuda.empty_like(ln_var)
cuda.elementwise(
'float* noise, const float* v, const float* e',
'noise[i] = __expf(v[i] * 0.5f) * e[i];',
'gaussian_forward'
)(noise, ln_var, self.eps)
self.noise = noise
return mean + self.noise,
def backward(self, inputs, grad_output):
g, = grad_output
return g, g * self.noise * 0.5,
def gaussian(mean, ln_var):
"""Gaussian sampling function.
It takes mean :math:`\\mu` and logarithm of variance
:math:`\\log(\\sigma^2)` as input and output a sample drawn from gaussian
:math:`N(\\mu, \\sigma)`.
Args:
mean (~chainer.Variable): Input variable representing mean
:math:`\\mu`.
ln_var (~chainer.Variable): Input variable representing logarithm of
variance :math:`\\log(\\sigma^2)`.
Returns:
~chainer.Variable: Output variable.
"""
return Gaussian()(mean, ln_var)
| import numpy
from chainer import cuda
from chainer import function
from chainer.utils import type_check
class Gaussian(function.Function):
"""Gaussian sampling function.
In forward calculation, this funciton takes mean and logarithm of variance
as inputs, and draw a sample from a gaussian distribution.
"""
def __init__(self):
self.eps = None
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
m_type, v_type = in_types
type_check.expect(
m_type.dtype == numpy.float32,
v_type.dtype == numpy.float32,
m_type.shape == v_type.shape,
)
def check_type_backward(self, in_types, out_types):
type_check.expect(out_types.size() == 1)
m_type, v_type = in_types
g_type, = out_types
type_check.expect(
g_type.dtype == numpy.float32,
g_type.shape == m_type.shape,
)
def forward_cpu(self, inputs):
mean, ln_var = inputs
if self.eps is None:
self.eps = numpy.random.normal(0, 1, ln_var.shape) \
.astype(numpy.float32)
self.noise = numpy.exp(ln_var * 0.5) * self.eps
return mean + self.noise,
def forward_gpu(self, inputs):
mean, ln_var = inputs
if self.eps is None:
self.eps = cuda.empty(ln_var.shape, numpy.float32)
cuda.get_generator().fill_normal(self.eps)
noise = cuda.empty_like(ln_var)
cuda.elementwise(
'float* noise, const float* v, const float* e',
'noise[i] = __expf(v[i] * 0.5f) * e[i];',
'gaussian_forward'
)(noise, ln_var, self.eps)
self.noise = noise
return mean + self.noise,
def backward(self, inputs, grad_output):
g, = grad_output
return g, g * self.noise * 0.5,
def gaussian(mean, ln_var):
"""Gaussian sampling function.
It takes :math:`\\mu` and :math:`\\log(\\sigma)` as input and output sample
drawn from gaussian :math:`N(\\mu, \\sigma)`.
Args:
mean (~chainer.Variable): Input variable representing mean :math:`\\mu`.
ln_var (~chainer.Variable): Input variable representing logarithm of
variance :math:`\\sigma`.
Returns:
~chainer.Variable: Output variable.
"""
return Gaussian()(mean, ln_var)
| Python | 0.000005 |
e23ccb850a6aef017ae91e35f672e6c6b5184e23 | Add image preprocessing functions | skan/pre.py | skan/pre.py | import numpy as np
from scipy import spatial, ndimage as ndi
from skimage import filters, img_as_ubyte
def hyperball(ndim, radius):
"""Return a binary morphological filter containing pixels within `radius`.
Parameters
----------
ndim : int
The number of dimensions of the filter.
radius : int
The radius of the filter.
Returns
-------
ball : array of bool, shape [2 * radius + 1,] * ndim
The required structural element
"""
size = 2 * radius + 1
center = [(radius,) * ndim]
coords = np.mgrid[[slice(None, size),] * ndim].reshape(ndim, -1).T
distances = np.ravel(spatial.distance_matrix(coords, center))
selector = distances <= radius
ball = np.zeros((size,) * ndim, dtype=bool)
ball.ravel()[selector] = True
return ball
def threshold(image, *, sigma=0., radius=0, offset=0.):
"""Use scikit-image filters to "intelligently" threshold an image.
Parameters
----------
image : array, shape (M, N, ...[, 3])
Input image, conformant with scikit-image data type
specification [1]_.
sigma : float, optional
If positive, use Gaussian filtering to smooth the image before
thresholding.
radius : int, optional
If given, use local median thresholding instead of global.
offset : float, optional
If given, reduce the threshold by this amount. Higher values
result in more pixels above the threshold.
Returns
-------
thersholded : image of bool, same shape as `image`
The thresholded image.
References
----------
.. [1] http://scikit-image.org/docs/dev/user_guide/data_types.html
"""
if sigma > 0:
image = filters.gaussian(image, sigma=sigma)
image = img_as_ubyte(image)
if radius > 0:
footprint = hyperball(image.ndim, radius=radius)
t = ndi.median_filter(image, footprint=footprint) - offset
else:
t = filters.threshold_otsu(image) - offset
thresholded = image > t
return thresholded
| Python | 0.000004 | |
f8823429d1bc548e4a91fe8ea64086d35dd66676 | Add race migration. | tvdordrecht/race/migrations/0003_auto_20150730_2250.py | tvdordrecht/race/migrations/0003_auto_20150730_2250.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('race', '0002_auto_20150729_1906'),
]
operations = [
migrations.AlterModelOptions(
name='distance',
options={'ordering': ['order'], 'verbose_name': 'Afstand', 'verbose_name_plural': 'Afstanden'},
),
migrations.AlterModelOptions(
name='result',
options={'ordering': ['date', 'event', 'distance', 'time'], 'verbose_name': 'Wie wat waar / Uitslag', 'verbose_name_plural': 'Wie wat waars / Uitslagen'},
),
migrations.RemoveField(
model_name='distance',
name='default',
),
migrations.AddField(
model_name='distance',
name='last_modified',
field=models.DateTimeField(auto_now=True, verbose_name=b'laatst bewerkt', null=True),
),
migrations.AddField(
model_name='distance',
name='last_modified_by',
field=models.ForeignKey(related_name='distance_last_modified_by', blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name=b'Laatst bewerkt door'),
),
migrations.AddField(
model_name='distance',
name='owner',
field=models.ForeignKey(related_name='distance_owner', blank=True, editable=False, to=settings.AUTH_USER_MODEL, null=True, verbose_name=b'Eigenaar'),
),
migrations.AddField(
model_name='distance',
name='pub_date',
field=models.DateTimeField(null=True, verbose_name=b'publicatie datum', blank=True),
),
]
| Python | 0 | |
564851a1a7f1378c9ef0e936640b690300a112fb | Add synthtool scripts (#3765) | java-containeranalysis/google-cloud-containeranalysis/synth.py | java-containeranalysis/google-cloud-containeranalysis/synth.py | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
gapic = gcp.GAPICGenerator()
common_templates = gcp.CommonTemplates()
library = gapic.java_library(
service='container',
version='v1beta1',
config_path='/google/devtools/containeranalysis/artman_containeranalysis_v1beta1.yaml',
artman_output_name='')
s.copy(library / 'gapic-google-cloud-containeranalysis-v1beta1/src', 'src')
s.copy(library / 'grpc-google-cloud-containeranalysis-v1beta1/src', '../../google-api-grpc/grpc-google-cloud-containeranalysis-v1beta1/src')
s.copy(library / 'proto-google-cloud-containeranalysis-v1beta1/src', '../../google-api-grpc/proto-google-cloud-containeranalysis-v1beta1/src')
| Python | 0.000001 | |
e2124aef9cb91dac3a597d353cd217ed328221e5 | Add gyp file to build cpu_features static library. | ndk/sources/android/cpufeatures/cpu_features.gyp | ndk/sources/android/cpufeatures/cpu_features.gyp | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'cpu_features',
'type': 'static_library',
'direct_dependent_settings': {
'include_dirs': [
'.',
],
},
'sources': [
'cpu-features.c',
],
},
],
}
| Python | 0 | |
f4aad329c445415f1306882d386abe43969ba6a9 | Add test for API ticket basics. | Allura/allura/tests/functional/test_rest_api_tickets.py | Allura/allura/tests/functional/test_rest_api_tickets.py | from pprint import pprint
from datetime import datetime, timedelta
import json
from pylons import c
from ming.orm import session
from allura import model as M
from allura.lib import helpers as h
from alluratest.controller import TestController, TestRestApiBase
class TestApiTicket(TestRestApiBase):
def set_api_ticket(self, expire=None):
if not expire:
expire = timedelta(days=1)
api_ticket = M.ApiTicket(user_id=self.user._id, capabilities={'import': 'test'},
expires=datetime.utcnow() + expire)
session(api_ticket).flush()
self.set_api_token(api_ticket)
def test_bad_signature(self):
self.set_api_ticket()
r = self.api_post('/rest/p/test/home/', api_signature='foo')
assert r.status_int == 403
def test_bad_token(self):
self.set_api_ticket()
r = self.api_post('/rest/p/test/home/', api_key='foo')
assert r.status_int == 403
def test_bad_timestamp(self):
self.set_api_ticket()
r = self.api_post('/rest/p/test/home/', api_timestamp=(datetime.utcnow() + timedelta(days=1)).isoformat())
assert r.status_int == 403
def test_bad_path(self):
self.set_api_ticket()
r = self.api_post('/rest/1/test/home/')
assert r.status_int == 404
r = self.api_post('/rest/p/1223/home/')
assert r.status_int == 404
r = self.api_post('/rest/p/test/12home/')
assert r.status_int == 404
def test_no_api(self):
self.set_api_ticket()
r = self.api_post('/rest/p/test/admin/')
assert r.status_int == 404
def test_project_ping(self):
self.set_api_ticket()
r = self.api_post('/rest/p/test/home/')
assert r.status_int == 200
assert r.json['shortname'] == 'test'
def test_project_ping_expired_ticket(self):
self.set_api_ticket(timedelta(seconds=-1))
r = self.api_post('/rest/p/test/home/')
assert r.status_int == 403
def test_subproject_ping(self):
self.set_api_ticket()
r = self.api_post('/rest/p/test/sub1/home/')
assert r.status_int == 200
assert r.json['shortname'] == 'test/sub1'
| Python | 0 | |
38a5a1d5bd5bcccf52a66a84377429bdecdfa4a2 | Replace g.next() with next(g) | troveclient/tests/test_common.py | troveclient/tests/test_common.py | from testtools import TestCase
from mock import Mock
from troveclient import common
class CommonTest(TestCase):
def test_check_for_exceptions(self):
status = [400, 422, 500]
for s in status:
resp = Mock()
resp.status_code = s
self.assertRaises(Exception,
common.check_for_exceptions, resp, "body")
def test_limit_url(self):
url = "test-url"
limit = None
marker = None
self.assertEqual(url, common.limit_url(url))
limit = "test-limit"
marker = "test-marker"
expected = "test-url?marker=test-marker&limit=test-limit"
self.assertEqual(expected,
common.limit_url(url, limit=limit, marker=marker))
class PaginatedTest(TestCase):
def setUp(self):
super(PaginatedTest, self).setUp()
self.items_ = ["item1", "item2"]
self.next_marker_ = "next-marker"
self.links_ = ["link1", "link2"]
self.pgn = common.Paginated(self.items_, self.next_marker_,
self.links_)
def tearDown(self):
super(PaginatedTest, self).tearDown()
def test___init__(self):
self.assertEqual(self.items_, self.pgn.items)
self.assertEqual(self.next_marker_, self.pgn.next)
self.assertEqual(self.links_, self.pgn.links)
def test___len__(self):
self.assertEqual(len(self.items_), self.pgn.__len__())
def test___iter__(self):
itr_expected = self.items_.__iter__()
itr = self.pgn.__iter__()
self.assertEqual(next(itr_expected), next(itr))
self.assertEqual(next(itr_expected), next(itr))
self.assertRaises(StopIteration, next, itr_expected)
self.assertRaises(StopIteration, next, itr)
def test___getitem__(self):
self.assertEqual(self.items_[0], self.pgn.__getitem__(0))
def test___setitem__(self):
self.pgn.__setitem__(0, "new-item")
self.assertEqual("new-item", self.pgn.items[0])
def test___delitem(self):
del self.pgn[0]
self.assertEqual(1, self.pgn.__len__())
def test___reversed__(self):
itr = self.pgn.__reversed__()
expected = ["item2", "item1"]
self.assertEqual("item2", next(itr))
self.assertEqual("item1", next(itr))
self.assertRaises(StopIteration, next, itr)
def test___contains__(self):
self.assertTrue(self.pgn.__contains__("item1"))
self.assertTrue(self.pgn.__contains__("item2"))
self.assertFalse(self.pgn.__contains__("item3"))
| from testtools import TestCase
from mock import Mock
from troveclient import common
class CommonTest(TestCase):
def test_check_for_exceptions(self):
status = [400, 422, 500]
for s in status:
resp = Mock()
resp.status_code = s
self.assertRaises(Exception,
common.check_for_exceptions, resp, "body")
def test_limit_url(self):
url = "test-url"
limit = None
marker = None
self.assertEqual(url, common.limit_url(url))
limit = "test-limit"
marker = "test-marker"
expected = "test-url?marker=test-marker&limit=test-limit"
self.assertEqual(expected,
common.limit_url(url, limit=limit, marker=marker))
class PaginatedTest(TestCase):
def setUp(self):
super(PaginatedTest, self).setUp()
self.items_ = ["item1", "item2"]
self.next_marker_ = "next-marker"
self.links_ = ["link1", "link2"]
self.pgn = common.Paginated(self.items_, self.next_marker_,
self.links_)
def tearDown(self):
super(PaginatedTest, self).tearDown()
def test___init__(self):
self.assertEqual(self.items_, self.pgn.items)
self.assertEqual(self.next_marker_, self.pgn.next)
self.assertEqual(self.links_, self.pgn.links)
def test___len__(self):
self.assertEqual(len(self.items_), self.pgn.__len__())
def test___iter__(self):
itr_expected = self.items_.__iter__()
itr = self.pgn.__iter__()
self.assertEqual(itr_expected.next(), itr.next())
self.assertEqual(itr_expected.next(), itr.next())
self.assertRaises(StopIteration, itr_expected.next)
self.assertRaises(StopIteration, itr.next)
def test___getitem__(self):
self.assertEqual(self.items_[0], self.pgn.__getitem__(0))
def test___setitem__(self):
self.pgn.__setitem__(0, "new-item")
self.assertEqual("new-item", self.pgn.items[0])
def test___delitem(self):
del self.pgn[0]
self.assertEqual(1, self.pgn.__len__())
def test___reversed__(self):
itr = self.pgn.__reversed__()
expected = ["item2", "item1"]
self.assertEqual("item2", itr.next())
self.assertEqual("item1", itr.next())
self.assertRaises(StopIteration, itr.next)
def test___contains__(self):
self.assertTrue(self.pgn.__contains__("item1"))
self.assertTrue(self.pgn.__contains__("item2"))
self.assertFalse(self.pgn.__contains__("item3"))
| Python | 0.000939 |
3eb8e73faf56bf3e3e3eb7cc8209c780d0f71b62 | create nanoparticle class | nanoparticle.py | nanoparticle.py | from scipy.constants import pi
import numpy as np
from math import cos, sin
class NanoParticle(object):
def __init__(self, r, n_acceptors, tau_D, R_Forster):
"""
Create a nanoparticle object
Parameters
----------
R : float
Radio of nanoparticule
n_acceptors : float
Number of acceptors in the nanoparticle
tau_D : float
Lifetime of the donor
R_Forster : float
Radio de Forster
"""
self.R = r
self.n_acceptors = n_acceptors
self.acceptors_positions = np.zeros((n_acceptors,3))
self.tau_D = tau_D
self.R_Forster = R_Forster
def deposit_superficial_acceptors(self):
"""
Generate random number of acceptors (n_acceptors) on the surface of the nanoparticle.
"""
for i in range(self.n_acceptors):
#Generate in spheric
theta = np.random.uniform(low=0, high=2*pi)
phi = np.random.uniform(low=0, high=pi)
#Transform to cartesian
self.acceptors_positions[i][0] = sin(phi)*cos(theta)*self.R
self.acceptors_positions[i][1] = sin(phi)*sin(theta)*self.R
self.acceptors_positions[i][2] = cos(phi)*self.R
def deposit_volumetrically_acceptors(self):
"""
Generate random number of acceptors (n_acceptors) anywhere in the nanoparticle.
Is not easy generate random point usin spherical coordinates.
For now, we generate random point in cartesian coordinates.
Reference link to implement in sphereic: http://mathworld.wolfram.com/SpherePointPicking.html
"""
for i in range(self.n_acceptors):
self.acceptors_positions[i][0] = np.random.uniform(low=-self.R, high=self.R)
self.acceptors_positions[i][1] = np.random.uniform(low=-self.R, high=self.R)
self.acceptors_positions[i][2] = np.random.uniform(low=-self.R, high=self.R)
def photon(self):
"""
Generate random position of a photon in the nanoparticle.
"""
x = np.random.uniform(low=-self.R, high=self.R)
y = np.random.uniform(low=-self.R, high=self.R)
z = np.random.uniform(low=-self.R, high=self.R)
self.photon = np.array([x, y, z])
def walk(self):
pass
def distance(self):
"""
Calculate, for all acceptor, 1/(r**6), where r are the distace bewteen the photon and acceptors
"""
self.dist = np.zeros(self.n_acceptors)
for i in range(self.n_acceptors):
self.dist[i] = (sum((self.photon - self.acceptors_positions[i])**2))**3
| Python | 0.000002 | |
b6356e4b7a88e1b2034f37aa135794b08e79c70b | Test POC script | tests/chk.py | tests/chk.py | import paramiko,sys,re,time,subprocess,getpass,os
def main(argv):
os.system('cls') #on windows
mins = 0
print ("\n[info] "+time.strftime("%d/%m/%Y %H:%M:%S") +"\n")
print ("""
_ _
______ _____ ___ ___ _ __(_)_ __ | |_
|_ /\ \/ / __| / __|/ __| '__| | '_ \| __|
/ / > < (__ \__ \ (__| | | | |_) | |_
/___|/_/\_\___| |___/\___|_| |_| .__/ \__|
|_|
""")
user = raw_input('Username : ')
passwd = getpass.getpass(prompt='Password : ')
while mins != -1:
q = raw_input('script #>')
if "quit" in q:
mins = -1
if "exit" in q:
mins = -1
else:
case(q)
os.system('cls') #on windows
def sshto(host,command):
output = ''
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host, username=user, password=passwd)
print "\n[info] executing : \""+ command + "\" on " + nslookup(host)
stdin, stdout, stderr = ssh.exec_command(command)
stdin.flush()
for line in stdout:
# print line.strip('\n')
output+=str(line)
ssh.close()
return output
def sshtoEnter(host,command):
output = ''
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(host, username=user, password=passwd)
print "\n[info] executing : \""+ command + "\" on " + nslookup(host)
chan = ssh.invoke_shell()
chan.send(command + '\n')
time.sleep(5)
chan.send('\r')
ssh.close()
print "[info] completed : \""+ command + "\" on " + nslookup(host)
return output
def checkBadlink(interface,output):
for line in output.splitlines():
if interface in line:
if 'down' in line:
print "[Response] interface is down"
elif 'up' in line:
print "[Response] interface is up"
print('\n')
def nslookup(ip):
output = ""
ns = subprocess.Popen(["nslookup",ip], stdout=subprocess.PIPE)
outns, err = ns.communicate()
for line in outns.splitlines():
if 'Non-existent' in line:
output = ip
break
elif 'Name' in line:
for ls in line.split():
if 'Name' not in ls:
output = ls
return output
def checkNotRespon(ip):
ping = subprocess.Popen(["ping",ip], stdout=subprocess.PIPE)
print "\n[info] Pinging... "+nslookup(ip)
outping, err = ping.communicate()
fail = outping.count('Request timed out')+outping.count('Destination net unreachable')
if fail==4:
print "[Response] Device is not reachable"
elif fail>=1:
print "[Response] Possibly link is intermittent"
else:
print "[Response] Device is reachable"
for line in outping.splitlines():
if "Minimum" in line:
print "[Response] "+line
print('\n')
def checkBGP(peer,output):
for line in output.splitlines():
if peer in line:
if 'Active' or 'Idle' in line:
print "[Response] BGP is down"
elif 'Admin' in line:
print "[Response] BGP is administratively down"
else:
print "[Response] BGP is up"
print('\n')
def checkErr(output):
for line in output.splitlines():
if "counters" in line:
if "never" in line:
print "[Response] Link has never been cleared"
else:
print "[Response] It has been " + line.split()[-1] + " since last counter cleared"
if "input" in line:
print "[Response] Link is seeing :-\n[Response] " + line
if "output" in line:
print "[Response] " + line
pass
################################################################################
# options and selections.
################################################################################
def ping(expres):
# ping 128.58.XXX.XXX
checkNotRespon(expres.split()[-1])
def stats(expres):
#stats se0/0/0 128.58.XXX.XXX
checkErr(sshto(expres.split()[-1],'sh int '+expres.split()[1]+' | i inte|err'))
def clearcounter(expres):
#clear counter se0/0 128.58.XXX.XXX
sshtoEnter(expres.split()[-1],'clear counter ' + expres.split()[-2])
def case(semantic):
if "stats" in semantic:
stats(semantic)
elif "clear counter" in semantic:
clearcounter(semantic)
elif "ping" in semantic:
ping(semantic)
elif "\r" in semantic:
pass
else:
print "[info] sorry but feature "+semantic+" has not been implemented"
user = ' '
passwd = ' '
ippat = re.compile('[0-9]+(?:\.[0-9]+){3}')
if __name__ == "__main__":
main(sys.argv)
# case("stats se0/0/0 128.58.246.214")
# case("clear counter se0/0/0 128.58.246.214")
# case("stats se0/0/0 128.58.246.214")
# print sshto('128.58.246.214','sh log')
# case ("ping 128.58.246.214")
#time.sleep(60) #60 Second Sleep | Python | 0.000015 | |
b3e6855489eba5d59507ef6fb4c92f8284526ec1 | Check consecutive elements in an array | Arrays/check_consecutive_elements.py | Arrays/check_consecutive_elements.py | import unittest
"""
Given an unsorted array of numbers, return true if the array only contains consecutive elements.
Input: 5 2 3 1 4
Ouput: True (consecutive elements from 1 through 5)
Input: 83 78 80 81 79 82
Output: True (consecutive elements from 78 through 83)
Input: 34 23 52 12 3
Output: False
"""
"""
Approach:
1. First check that there are (max - min + 1) elements in the array.
2. Second, check that all elements are unique.
3. If all elements are consecutive, we can use arr[i]-min as an index into the array.
4. If element is positive, make it negative, else if its negative, there is repetition.
NOTE: This only works if all numbers are positive, otherwise use a hashmap to check for dupes.
O(n) time complexity and O(1) space complexity.
"""
def check_consecutive_only(list_of_numbers):
min_val = min(list_of_numbers)
max_val = max(list_of_numbers)
if len(list_of_numbers) != (max_val - min_val + 1):
return False
for num in list_of_numbers:
index = abs(num) - min_val
if list_of_numbers[index] < 0:
return False
list_of_numbers[index] = -list_of_numbers[index]
return True
class TestConsecutiveElements(unittest.TestCase):
def test_consecutive_true(self):
list_of_numbers = [83, 78, 80, 81, 79, 82]
self.assertTrue(check_consecutive_only(list_of_numbers))
def test_consecutive_false(self):
list_of_numbers = [7, 6, 5, 5, 3, 4]
self.assertFalse(check_consecutive_only(list_of_numbers))
list_of_numbers = [34, 23, 52, 12, 3]
self.assertFalse(check_consecutive_only(list_of_numbers))
| Python | 0.000024 | |
f325937df3f1f1f972c7a0780d702f7fea5d03f5 | Test `__eq__`, `__ne__`, and `__hash__` | test/test_eq.py | test/test_eq.py | import pytest
from permutation import Permutation
EQUIV_CLASSES = [
[
Permutation(),
Permutation(1),
Permutation(1,2),
Permutation(1,2,3,4,5),
Permutation.transposition(2,2),
Permutation.cycle(),
Permutation.from_cycles(),
Permutation.from_cycles(()),
],
[
Permutation(2,1),
Permutation(2,1,3,4,5),
Permutation.transposition(1,2),
Permutation.transposition(2,1),
Permutation.cycle(1,2),
Permutation.cycle(2,1),
Permutation.from_cycles((1,2)),
],
[
Permutation(2,3,1),
Permutation(2,3,1,4,5),
Permutation.cycle(1,2,3),
Permutation.cycle(2,3,1),
Permutation.cycle(3,1,2),
Permutation.from_cycles((1,2,3)),
Permutation.from_cycles((2,3,1)),
Permutation.from_cycles((3,1,2)),
],
[
Permutation(3,1,2),
Permutation(3,1,2,4,5),
Permutation.cycle(1,3,2),
Permutation.cycle(2,1,3),
Permutation.cycle(3,2,1),
Permutation.from_cycles((1,3,2)),
Permutation.from_cycles((2,1,3)),
Permutation.from_cycles((3,2,1)),
],
[
Permutation(3,2,1),
Permutation(3,2,1,4,5),
Permutation.transposition(1,3),
Permutation.transposition(3,1),
Permutation.cycle(1,3),
Permutation.cycle(3,1),
Permutation.from_cycles((1,3)),
Permutation.from_cycles((3,1)),
],
[
Permutation(2,3,1,5,4),
Permutation.from_cycles((1,2,3), (4,5)),
Permutation.from_cycles((1,2,3), (5,4)),
Permutation.from_cycles((3,1,2), (4,5)),
Permutation.from_cycles((4,5), (3,1,2)),
Permutation.from_cycles((4,5), (1,2,3)),
Permutation.from_cycles((5,4), (1,2,3)),
],
]
@pytest.mark.parametrize('p,q',
[(p,q) for eqcls in EQUIV_CLASSES for p in eqcls for q in eqcls]
)
def test_eq(p,q):
assert p == q
assert not (p != q)
assert hash(p) == hash(q)
@pytest.mark.parametrize('p,q', [
(p,q) for i, ps in enumerate(EQUIV_CLASSES)
for qs in EQUIV_CLASSES[:i] + EQUIV_CLASSES[i+1:]
for p in ps
for q in qs
])
def test_neq(p,q):
assert p != q
assert not (p == q)
@pytest.mark.parametrize('p', [p for eqcls in EQUIV_CLASSES for p in eqcls])
@pytest.mark.parametrize('x', [None, 0, 1, True, False, '(1 2)', (1,2), [1,2]])
def test_neq_other_types(p,x):
assert p != x
assert not (p == x)
| Python | 0.000011 | |
55b33bff9856cc91943f0a5ae492db1fdc7d8d5a | Add missing python 3 only file. | numba/tests/jitclass_usecases.py | numba/tests/jitclass_usecases.py | """
Usecases with Python 3 syntax in the signatures. This is a separate module
in order to avoid syntax errors with Python 2.
"""
class TestClass1(object):
def __init__(self, x, y, z=1, *, a=5):
self.x = x
self.y = y
self.z = z
self.a = a
class TestClass2(object):
def __init__(self, x, y, z=1, *args, a=5):
self.x = x
self.y = y
self.z = z
self.args = args
self.a = a
| Python | 0 | |
e251aff9a232a66b2d24324f394da2ad9345ce79 | Add migration script for changing users with None as email_verifications to {} | scripts/migration/migrate_none_as_email_verification.py | scripts/migration/migrate_none_as_email_verification.py | """ Ensure that users with User.email_verifications == None now have {} instead
"""
import logging
import sys
from tests.base import OsfTestCase
from tests.factories import UserFactory
from modularodm import Q
from nose.tools import *
from website import models
from website.app import init_app
from scripts import utils as scripts_utils
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def main():
init_app(routes=False)
dry_run = 'dry' in sys.argv
count = 0
if not dry_run:
scripts_utils.add_file_logger(logger, __file__)
logger.info("Iterating users with None as their email_verification")
for user in get_users_with_none_in_email_verifications():
user.email_verifications = {}
count += 1
logger.info(repr(user))
if not dry_run:
user.save()
print('{} users migrated'.format(count))
def get_users_with_none_in_email_verifications():
return models.User.find( Q('email_verifications', 'eq', None))
class TestMigrateDates(OsfTestCase):
def setUp(self):
super(TestMigrateDates, self).setUp()
self.user1 = UserFactory(email_verfications=None)
self.user2 = UserFactory(email_verfications={})
def test_migrate_none_as_email(self):
main()
assert_equal(self.user1.email_verifications, {})
assert_not_equal(self.user2.email_verifications, None)
if __name__ == '__main__':
main()
| Python | 0.000001 | |
97bf6ba36b27822a9bd73cb9a27d9878e48945e2 | add a decorator to ignore signals from fixture loading | project/apps/utils/signal_decorators.py | project/apps/utils/signal_decorators.py |
from functools import wraps
def disable_for_loaddata(signal_handler):
"""
Decorator that turns off signal handlers when loading fixture data.
based on http://stackoverflow.com/a/15625121
"""
@wraps(signal_handler)
def wrapper(*args, **kwargs):
if kwargs.get('raw'):
return
signal_handler(*args, **kwargs)
return wrapper
| Python | 0 | |
f8b5e413b46350f25bd7d231a8102c706fbf34f8 | Add new package: py-devlib (#16982) | var/spack/repos/builtin/packages/py-devlib/package.py | var/spack/repos/builtin/packages/py-devlib/package.py | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyDevlib(PythonPackage):
"""Library for interaction with and instrumentation of remote devices."""
homepage = "https://github.com/ARM-software/devlib"
url = "https://github.com/ARM-software/devlib/archive/v1.2.tar.gz"
version('1.2', sha256='4cdb6767a9430b49eecffe34e2b9fcbcfc7e65328122d909aa71c3d11a86503d')
version('1.1.2', sha256='c900420cb97239b4642f5e333e43884fb09507b530edb55466e7b82103b4deaa')
version('1.1.1', sha256='eceb7a2721197a6023bbc2bbf346663fc117e4f54e1eb8334a3085dead9c8036')
version('1.1.0', sha256='317e9be2303ebb6aebac9a2ec398c622ea16d6e46079dc9e37253b37d739ca9d')
version('1.0.0', sha256='2f78278bdc9731a4fa13c41c74f08e0b8c5143de5fa1e1bdb2302673aec45862')
version('0.0.4', sha256='0f55e684d43fab759d0e74bd8f0d0260d9546a8b8d853d286acfe5e00c86da05')
version('0.0.3', sha256='29ec5f1de481783ab0b9efc111dfeb67c890187d56fca8592b25ee756ff32902')
version('0.0.2', sha256='972f33be16a06572a19b67d909ee0ed6cb6f21f9a9da3c43fd0ff5851421051d')
depends_on('py-setuptools', type='build')
depends_on('py-python-dateutil', type=('build', 'run'))
depends_on('py-pexpect@3.3:', type=('build', 'run'))
depends_on('py-pyserial', type=('build', 'run'))
depends_on('py-wrapt', type=('build', 'run'))
depends_on('py-future', type=('build', 'run'))
depends_on('py-pandas', type=('build', 'run'))
depends_on('py-enum34', type=('build', 'run'), when='^python@:3.3')
depends_on('py-contextlib2', type=('build', 'run'), when='^python@:2.999')
depends_on('py-numpy@:1.16.4', type=('build', 'run'), when='^python@:2.999')
depends_on('py-numpy', type=('build', 'run'), when='^python@:3.0')
| Python | 0 | |
de39aa257d845ecb6e1c2e7c4c4911497d00cdcf | add sample, non working, test_wsgi | os_loganalyze/tests/test_wsgi.py | os_loganalyze/tests/test_wsgi.py | #!/usr/bin/python
#
# Copyright (c) 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Test the ability to convert files into wsgi generators
"""
from os_loganalyze.tests import base
import os_loganalyze.wsgi as log_wsgi
def _start_response(*args):
return
class TestWsgi(base.TestCase):
def test_nofile(self):
gen = log_wsgi.application(None, _start_response)
self.assertTrue(False)
self.assertEqual(gen, ['Invalid file url'])
environ = {
'path': '/htmlify/foo.txt'
}
gen = log_wsgi.application(environ, _start_response)
self.assertEqual(gen, ['Invalid file url1'])
| Python | 0 | |
4c8ea40eeec6df07cf8721c256ad8cc3d35fb23e | Add intial unit test file | src/test_main.py | src/test_main.py | import pytest
from main import *
test_files = [ "examples/C/filenames/script", "examples/Clojure/index.cljs.hl",
"examples/Chapel/lulesh.chpl", "examples/Forth/core.fth",
"examples/GAP/Magic.gd", "examples/JavaScript/steelseries-min.js",
"examples/Matlab/FTLE_reg.m", "examples/Perl6/for.t",
"examples/VimL/solarized.vim", "examples/C/cpu.c",
"examples/CSS/bootstrap.css", "examples/D/mpq.d",
"examples/Go/api.pb.go", "examples/HTML+ERB/index.html.erb"]
number_of_comments = [
423,# examples/C/filenames/script
13, # examples/Clojure/index.cljs.hl
609,# examples/Chapel/lulesh.chpl
0, # examples/Forth/core.fth
3, # examples/GAP/Magic.gd
2, # examples/JavaScript/steelseries-min.js
6, # examples/Matlab/FTLE_reg.m
586,# examples/Perl6/for.t
20, # examples/VimL/solarized.vim
39, # examples/C/cpu.c
680,# examples/CSS/bootstrap.css
167,# examples/D/mpq.d
0, # examples/Go/api.pb.go
10 # examples/HTML+ERB/index.html.erb
]
def test_get_comment_tokens():
from pygments.lexers.c_cpp import CLexer
file_text_test = "int main(int argc, char[] argv){\n//This is a comment\n}\n"
c_lexer = CLexer()
results = []
for comment in get_comment_tokens(file_text_test, c_lexer):
results.append(comment)
assert len(results) == 1
assert results[0] == "//This is a comment\n"
def test_get_tokens_from_file():
for index,file in enumerate(test_files, 0):
result = get_tokens_from_file("../" + file)
#print(index)
print(file)
assert number_of_comments[index] == len(result.keys()) | Python | 0 | |
33a3e4a8adc6b3284de18fe02c67eafa3a391226 | Create tinycrypt.py | tinycrypt.py | tinycrypt.py | Python | 0.000091 | ||
5204633c8b3d578d3860b964239191510ae14665 | Add imperfect NIH data script | nih_data.py | nih_data.py | #!/user/bin/env python3
'''Download NIH RePORTER database and find project data for funded researchers.'''
import shutil
import re
import zipfile
import csv
import codecs
import locale
import os.path
import urllib.request
from collections import namedtuple
from pprint import pprint
_BASE_SUMMARY_URL = "https://projectreporter.nih.gov/project_info_description.cfm?aid={app_id}"
_BASE_DATA_FILENAME = "RePORTER_PRJ_C_FY{year}.{extension}"
# Choose between CSV and XML
_BASE_DATA_URL = "http://exporter.nih.gov/CSVs/final/RePORTER_PRJ_C_FY{year}.zip"
# _BASE_DATA_URL = "http://exporter.nih.gov/XMLData/final/RePORTER_PRJ_X_FY{year}.zip"
_APP_ID_IDX = 0
_PI_IDX = 29
_PROJ_TITLE_IDX = 34
_TOTAL_COST_IDX = 43
_SUB_COST_IDX = 44
Project = namedtuple('Project', 'researcher title url funding_amount')
class Name:
'''Researcher name.
Pretty name is Lastname, Firstname. Query regex is \bLASTNAME, FIRSTNAME\b
'''
def __init__(self, last_name, first_name, middle=''):
self.last_name = last_name
self.first_name = first_name
self.middle = middle
middle_pretty = ' ' + middle if middle != '' else ''
self.pretty_name = '{}, {} {}'.format(last_name, first_name, middle_pretty)
self.query_regex = r"\b" + self.pretty_name.upper() + r"\b"
def _decode_file(filename):
'''Remove carriage returns from passed filename.
Creates a temporary file, reads it as a bytes stream, then
writes back as a text file.
'''
tmp_filename = 'tmp'
shutil.copyfile(filename, tmp_filename)
with open(tmp_filename, 'rb') as unfixed:
with open(filename, 'w') as fixed:
# print("Failed lines:", "-" * 10, "\n\n")
for line in unfixed:
fixed_line = line.rstrip()
if len(fixed_line) == 0:
continue
try:
fixed_line = fixed_line.decode('utf-8')
fixed.write(fixed_line)
fixed.write('\n')
except UnicodeDecodeError:
# print('\n', fixed_line)
pass
# print("\n\n/Failed lines", "-" * 10)
os.remove(tmp_filename)
def _require_csv_file(fiscal_year, force_redownload=False, force_reunzip=True):
'''Download the CSV file for the passed fiscal year if necessary.
Return True if successful, False otherwise.
'''
csv_filename = _BASE_DATA_FILENAME.format(year=fiscal_year, extension='csv')
zip_filename = _BASE_DATA_FILENAME.format(year=fiscal_year, extension='zip')
csv_file_exists = os.path.isfile(csv_filename)
zip_file_exists = os.path.isfile(zip_filename)
if csv_file_exists and not force_redownload and not force_reunzip:
print("File already exists:", csv_filename)
return True
# Download zip file
if not zip_file_exists or force_redownload:
url = _BASE_DATA_URL.format(year=fiscal_year)
print("Downloading from {}...".format(url))
request = urllib.request.urlopen(url)
with open(zip_filename, 'wb') as zip_file:
shutil.copyfileobj(request, zip_file)
# Unzip data file
if not csv_file_exists or force_reunzip:
print("Unzipping {}...".format(zip_filename))
zip_ref = zipfile.ZipFile(zip_filename, 'r')
zip_ref.extractall()
_decode_file(csv_filename)
return os.path.isfile(csv_filename)
def _total_cost(csv_entry):
'''Get the total cost for a project.
Takes the max of the 'total cost' and 'sub cost' fields--sometimes
'total cost' is left blank and its value is placed in the
'sub cost' field.
'''
total_cost = csv_entry[_TOTAL_COST_IDX]
sub_cost = csv_entry[_SUB_COST_IDX]
if total_cost == '':
total_cost = '0'
if sub_cost == '':
sub_cost = '0'
total_cost = int(total_cost)
sub_cost = int(sub_cost)
total_cost = max(sub_cost, total_cost)
return total_cost
def _projects_data(researcher, filename):
'''Get project data for projects associated with passed
researcher in passed filename.
'''
researcher_projects = []
with open(filename, 'r') as csv_file:
data_reader = csv.reader(csv_file, quotechar='"')
count = 0
for entry in data_reader:
if len(entry) < 45:
# print("###")
# pprint(entry)
# print("###")
continue
principal_investigators = entry[_PI_IDX]
pi_participated_in_entry = re.search(
researcher.query_regex, principal_investigators) is not None
if not pi_participated_in_entry:
continue
# print(principal_investigators)
# print("-" * 10)
# pprint(entry)
title = entry[_PROJ_TITLE_IDX]
total_cost = _total_cost(entry)
app_id = entry[_APP_ID_IDX]
url = _BASE_SUMMARY_URL.format(app_id=app_id)
researcher_projects.append(
Project(researcher, title, url, total_cost))
return researcher_projects
def main():
locale.setlocale(locale.LC_ALL, '')
funded_researchers = [Name('Tanzi', 'Rudolph')]
year = '2015'
_require_csv_file(year, True, True)
csv_filename = _BASE_DATA_FILENAME.format(year=year, extension='csv')
researcher_data = []
for researcher in funded_researchers:
researcher_projects = _projects_data(researcher, csv_filename)
researcher_data.extend(researcher_projects)
for entry in researcher_data:
print('-' * 10)
print("Researcher:", entry.researcher.pretty_name)
print("Project:", entry.title)
print("URL:", entry.url)
funding_str = locale.currency(entry.funding_amount, grouping=True)[:-3]
print("Amount:", funding_str)
if __name__ == '__main__':
main()
| Python | 0.998359 | |
6cda3951d27e819cb452233f514c953c923d9a53 | Add Python script to check links (#872) | check_links.py | check_links.py | import os
from fnmatch import fnmatch
import bs4
from requests import get
from tqdm import tqdm
import webbrowser
import pyinputplus as pyip
from fake_headers import Headers
from random import shuffle
import validators
# Create a list of all the HTML files in lopp.net
all_html_files = []
website_directory = pyip.inputFilepath(
prompt="Enter the path to the website directory: "
)
all_html_files = []
os.chdir(website_directory)
all_html_files = []
for root, dirs, files in os.walk(os.getcwd()):
for file in files:
if fnmatch(file, "*.html"):
all_html_files.append(os.path.join(root, file))
# Parse each HTML and create a list of links associated with each HTML file
all_links = []
for html_file in all_html_files:
with open(html_file, "r") as f:
soup = bs4.BeautifulSoup(f, "html.parser")
for link in soup.find_all("a"):
all_links.append(link.get("href"))
# Remove all duplicate links and those pointing to other pages in lopp.net
print(f"Total number of links: {len(all_links)}")
all_links = list(set(all_links)) # Removes duplicate links
shuffle(
all_links
) # We don't want to visit the same page twice in a row, so shuffle the list
for link in all_links:
if validators.url(link) == False:
# If the link is not a valid URL, remove it
all_links.remove(link)
elif link.find("lopp.net") != -1:
# Ignores the link if it points to one of the other pages in lopp.net or blog.lopp.net
all_links.remove(link)
elif link[0] == "#" or link[0] == "/":
# Ignores the link if it is a link to a specific section of the page
all_links.remove(link)
print(f"Total number of links: {len(all_links)}")
# Iterate over each link and download the page with requests
failed_links = []
headers = Headers(headers=True).generate()
# For this first iteration, the timeout is set to 3 seconds
for link in tqdm(all_links):
try:
r = get(link, timeout=3, headers=headers)
if r.status_code != 200:
failed_links.append(link)
except:
failed_links.append(link)
print("Finished checking links with a timeout of 3 seconds")
print(f"Number of failed links: {len(failed_links)}")
print("Retrying the failed links with a timeout of 10 seconds")
# Retries the failed links with a longer timeout
for link in tqdm(failed_links):
try:
r = get(link, timeout=10, headers=headers)
if r.status_code == 200:
failed_links.remove(link)
except:
pass
print("Finished checking links with a timeout of 10 seconds")
print(f"Number of failed links: {len(failed_links)}")
print(failed_links)
really_failed_links = []
for link in failed_links:
webbrowser.open_new_tab(link)
if pyip.inputYesNo("Is this link working? ") == "no":
really_failed_links.append(link)
# Search all the HTML files for the failed links and print them out
files_with_failed_links = []
for html_file in all_html_files:
with open(html_file, "r") as f:
soup = bs4.BeautifulSoup(f, "html.parser")
for link in soup.find_all("a"):
if link.get("href") in really_failed_links:
files_with_failed_links.append(f"{html_file} - {link.get('href')}")
break
# Finally, output a list of the really broken links and their associated HTML files to a text file
os.chdir("..")
try:
f = open("broken_links.txt", "x")
except:
f = open("broken_links.txt", "w")
for link in files_with_failed_links:
f.write(link + "\n")
f.close()
| Python | 0 | |
4a179825234b711a729fce5bc9ffc8de029c0999 | Test for invalid data when loading | utest/controller/test_loading.py | utest/controller/test_loading.py | import unittest
from robot.utils.asserts import assert_true, assert_raises, assert_raises_with_msg
from robotide.controller import ChiefController
from robotide.namespace import Namespace
from resources import MINIMAL_SUITE_PATH, RESOURCE_PATH, FakeLoadObserver
from robot.errors import DataError
class TestDataLoading(unittest.TestCase):
def setUp(self):
self.ctrl = ChiefController(Namespace())
self.load_observer = FakeLoadObserver()
def test_loading_suite(self):
self._load(MINIMAL_SUITE_PATH)
assert_true(self.ctrl._controller is not None)
def test_loading_resource(self):
self._load(RESOURCE_PATH)
assert_true(self.ctrl.resources != [])
def test_loading_invalid_data(self):
assert_raises(DataError, self._load, 'invalid')
def _load(self, path):
self.ctrl.load_data(self.load_observer, path)
assert_true(self.load_observer.finished)
def test_loading_invalid_datafile(self):
assert_raises_with_msg(DataError, 'Invalid data file: invalid.',
self.ctrl.load_datafile, FakeLoadObserver(),
'invalid')
def test_loading_invalid_resource(self):
assert_raises_with_msg(DataError, 'Invalid resource file: invalid.',
self.ctrl.load_resource, 'invalid')
if __name__ == "__main__":
unittest.main()
| import unittest
from robot.utils.asserts import assert_true, assert_raises
from robotide.application.chiefcontroller import ChiefController
from robotide.namespace import Namespace
from resources import MINIMAL_SUITE_PATH, RESOURCE_PATH
from robot.errors import DataError
class _FakeObserver(object):
def notify(self):
pass
def finished(self):
self.finished = True
class TestDataLoading(unittest.TestCase):
def setUp(self):
self.ctrl = ChiefController(Namespace())
self.load_observer = _FakeObserver()
def test_loading_suite(self):
self._load(MINIMAL_SUITE_PATH)
assert_true(self.ctrl._controller is not None)
def test_loading_resource(self):
self._load(RESOURCE_PATH)
assert_true(self.ctrl.resources != [])
def test_loading_invalid_data(self):
assert_raises(DataError, self._load, 'invalid')
def _load(self, path):
self.ctrl.load_data(self.load_observer, path)
assert_true(self.load_observer.finished)
if __name__ == "__main__":
unittest.main()
| Python | 0 |
77fed107b4e224437995075bb8c11e1c0d2161d5 | add a utility script that checks property names and types against currently installed mapnik python bindings | util/validate-mapnik-instance.py | util/validate-mapnik-instance.py | #!/usr/bin/env python
import os
import sys
import json
import mapnik
if not mapnik.mapnik_version() > 200100:
print 'Error: this script is only designed to work with Mapnik 2.1 and above (you have %s)' % mapnik.mapnik_version_string()
sys.exit(1)
mapnik_version = mapnik.mapnik_version_string().replace('-pre','')
reference_file = './%s/reference.json' % mapnik_version
if not os.path.exists(reference_file):
print '\n*** WARNING *** reference.json not found for your mapnik version (%s), defaulting to latest\n' % mapnik_version
reference_file = './latest/reference.json'
reference = json.load(open(reference_file, 'r'))
type_mapping = {
'integer':'int',
'float':'float',
'unsigned':'int',
'boolean':'bool',
'uri':'str',
'string':'str',
'color':'mapnik.Color',
'expression':'mapnik.Expression',
'functions':'todo'
}
style = mapnik.Style()
for prop in reference['style'].items():
key = prop[0].replace('-','_')
assert hasattr(style,key), "'%s' not a valid property of Style" % key
layer = mapnik.Layer('foo')
for prop in reference['layer'].items():
key = prop[0].replace('-','_')
assert hasattr(layer,key), "'%s' not a valid property of Layer" % key
map_instance = mapnik.Map(256,256)
for prop in reference['symbolizers']['map'].items():
key = prop[0].replace('-','_')
# https://github.com/mapnik/mapnik/issues/1419
if not key in ['minimum_version','paths_from_xml','font_directory']:
assert hasattr(map_instance,key), "'%s' not a valid property of Map" % key
# https://github.com/mapnik/mapnik/issues/1427
text_fixups = {
'size':'text_size',
'opacity':'text_opacity',
'spacing':'label_spacing',
'max_char_angle_delta':'maximum_angle_char_delta',
'placement':'label_placement'
}
total_fails = 0
before = 0
for sym in reference['symbolizers'].items():
if sym[0] not in ['map','*']:
#if sym[0] in ['raster']:
sym_name = ''.join([s.title() for s in sym[0].split('-')])
sym_object = getattr(mapnik,sym_name+'Symbolizer')
instance_var = None
if sym_name in ['PolygonPattern','LinePattern']:
instance_var = sym_object(mapnik.PathExpression(''))
elif sym_name == 'Shield':
instance_var = sym_object(mapnik.Expression('True'),'DejaVu Sans Book', 10, mapnik.Color('black'), mapnik.PathExpression(''))
else:
instance_var = sym_object()
fails = []
for prop in sym[1]:
key = prop.replace('-','_')
if key == 'file':
key = 'filename'
if sym_name == 'Line' and 'stroke' in key:
stroke_instance = instance_var.stroke
if key == 'stroke':
key = 'color'
else:
key = key.replace('stroke_','')
if not hasattr(stroke_instance,key):
fails.append("'%s' not a valid property of %s" % (key,'Stroke'))
elif sym_name == 'Markers' and 'stroke' in key:
stroke_instance = instance_var.stroke
if not stroke_instance: # marker.stroke is boost::optional
stroke_instance = mapnik.Stroke()
if key == 'stroke':
key = 'color'
else:
key = key.replace('stroke_','')
if not hasattr(stroke_instance,key):
fails.append("'%s' not a valid property of %s" % (key,'Stroke'))
else:
# temporary hotfix until: https://github.com/mapnik/mapnik/issues/1427
if sym_name in ['Text','Shield']:
if key in text_fixups:
key = text_fixups[key]
if not hasattr(instance_var,key):
fails.append("'%s' not a valid property of %s" % (key,sym_name))
else:
attr_instance = getattr(instance_var,key)
prop_type = sym[1][prop]['type']
if not isinstance(prop_type,list):
mapnik_py_type = type_mapping[prop_type]
# TODO - make mapnik.Expression ctor a proper class
if attr_instance is None:
continue
if mapnik_py_type == 'mapnik.Expression':
#expected_expr = "<type 'Boost.Python.function'>"
expected_expr = "<class 'mapnik._mapnik.Expression'>"
if not str(type(attr_instance)) == expected_expr:
print 'type error: %s for %s/%s is not %s (ex' % (type(attr_instance),sym_name,key)
elif prop_type == 'functions':
pass
else:
if not isinstance(attr_instance,eval(mapnik_py_type)):
#print sym[1][prop]
print 'type error: %s (actual) for %s/%s is not %s (expected)' % (type(attr_instance),sym_name,key,eval(mapnik_py_type))
if len(fails):
print '\n\n%s -->\n' % (sym_name)
for f in fails:
print f
#print '(' + '|'.join([i for i in dir(instance_var) if not '__' in i]) + ')'
total_fails += len(fails);
print '\n\nTotal issues: %s' % total_fails
| Python | 0 | |
f8d49af459fb3b751f44ecf625521c62fa68df0a | Check in script to delete existing autochecked tasks | bin/ext_service/historical/fix_autocheck_tasks.py | bin/ext_service/historical/fix_autocheck_tasks.py | import logging
import argparse
import uuid
import emission.core.wrapper.user as ecwu
import emission.core.get_database as edb
import emission.net.ext_service.habitica.proxy as proxy
def fix_autocheck_for_user(uuid):
auto_tasks = find_existing_auto_tasks(uuid)
delete_tasks(uuid, auto_tasks)
create_new_tasks(uuid)
# I wanted to reuse existing code, but it is unclear how to do so.
# in particular, I will have either the format of the old tests or of
# the new tests. Most PRs will not keep the old and the new around side
# to side. Since this is a historical, as opposed to ongoing script, I
# think this is fine.
def find_existing_auto_tasks(uuid):
method_uri = "/api/v3/tasks/user"
get_habits_uri = method_uri + "?type=habits"
#First, get all habits and check if the habit requested already exists
result = proxy.habiticaProxy(uuid, 'GET', get_habits_uri, None)
habits = result.json()
auto_tasks = []
for habit in habits['data']:
print habit['text'], habit["notes"], habit["id"]
if "automatically" in habit['notes']:
logging.debug("Found auto task %s, %s, %s" %
(habit['text'], habit['notes'], habit['id']))
auto_tasks.append(habit)
else:
if len(habit["challenge"]) > 0:
logging.info("Found challenge task %s, %s, %s, unsure what to do" %
(habit['text'], habit['notes'], habit['id']))
else:
logging.debug("Found manual task %s, %s, %s" %
(habit['text'], habit['notes'], habit['id']))
return auto_tasks
def delete_tasks(uuid, task_list):
method_uri = "/api/v3/tasks/"
for task in task_list:
curr_task_del_uri = method_uri + str(task["id"])
result = proxy.habiticaProxy(uuid, 'DELETE', curr_task_del_uri, {})
logging.debug("Result of deleting %s = %s" % (task["id"], result.json()))
def create_new_tasks(uuid):
pass
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-e", "--user_email")
group.add_argument("-u", "--user_uuid")
group.add_argument("-a", "--all", action="store_true")
args = parser.parse_args()
if args.all:
for uuid in edb.get_habitica_db().distinct("user_id"):
logging.debug("About to check user %s" % uuid)
fix_autocheck_for_user(uuid)
else:
if args.user_uuid:
del_uuid = uuid.UUID(args.user_uuid)
else:
del_uuid = ecwu.User.fromEmail(args.user_email).uuid
fix_autocheck_for_user(del_uuid)
| Python | 0 | |
89ef576ba4e707eef653c670b32fa40d862e79ec | Add package for the Python regex library (#4771) | var/spack/repos/builtin/packages/py-regex/package.py | var/spack/repos/builtin/packages/py-regex/package.py | ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyRegex(PythonPackage):
"""Alternative regular expression module, to replace re."""
homepage = "https://pypi.python.org/pypi/regex/"
url = "https://pypi.io/packages/source/r/regex/regex-2017.07.11.tar.gz"
version('2017.07.11', '95f81ebb5273c7ad9a0c4d1ac5a94eb4')
depends_on('py-setuptools', type='build')
| Python | 0 | |
0e6a7a805ff08f191c88bda67992cb874f538c2f | Add migration for unitconnection section types | services/migrations/0097_alter_unitconnection_section_type.py | services/migrations/0097_alter_unitconnection_section_type.py | # Generated by Django 4.0.5 on 2022-06-22 05:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("services", "0096_create_syllables_fi_columns"),
]
operations = [
migrations.AlterField(
model_name="unitconnection",
name="section_type",
field=models.PositiveSmallIntegerField(
choices=[
(1, "PHONE_OR_EMAIL"),
(2, "LINK"),
(3, "TOPICAL"),
(4, "OTHER_INFO"),
(5, "OPENING_HOURS"),
(6, "SOCIAL_MEDIA_LINK"),
(7, "OTHER_ADDRESS"),
(8, "HIGHLIGHT"),
(9, "ESERVICE_LINK"),
(10, "PRICE"),
(11, "SUBGROUP"),
],
null=True,
),
),
]
| Python | 0 | |
9009315381edd69adac3319b973b3bcdb16f23e4 | Add missing module wirecloud.live.utils | src/wirecloud/live/utils.py | src/wirecloud/live/utils.py | # -*- coding: utf-8 -*-
# Copyright (c) 2016 CoNWeT Lab., Universidad Politécnica de Madrid
# This file is part of Wirecloud.
# Wirecloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Wirecloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with Wirecloud. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
from base64 import b64encode
def build_group_name(name):
return b"wc-%s" % b64encode(name.encode('utf-8'), b'-_').replace(b'=', b'.')
WIRECLOUD_BROADCAST_GROUP = build_group_name('live-*')
| Python | 0.000024 | |
5e49eb4fb6bce9cdeae515590530b78e4dde89d9 | Add alternate example for `match_template`. | doc/examples/plot_match_face_template.py | doc/examples/plot_match_face_template.py | """
=================
Template Matching
=================
In this example, we use template matching to identify the occurrence of an
image patch (in this case, a sub-image centered on the camera man's head).
Since there's only a single match, the maximum value in the `match_template`
result` corresponds to the head location. If you expect multiple matches, you
should use a proper peak-finding function.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage.feature import match_template
image = data.camera()
head = image[70:170, 180:280]
result = match_template(image, head)
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(8, 4))
ax1.imshow(head)
ax1.set_axis_off()
ax1.set_title('template')
ax2.imshow(image)
ax2.set_axis_off()
ax2.set_title('image')
# highlight matched region
xy = np.unravel_index(np.argmax(result), image.shape)[::-1] # -1 flips ij to xy
wface, hface = head.shape
rect = plt.Rectangle(xy, wface, hface, edgecolor='r', facecolor='none')
ax2.add_patch(rect)
plt.show()
| Python | 0 | |
854fd7de75a14ee030b8f2e8a686dd96f40273de | Add mvn-push.py | mvn-push.py | mvn-push.py | #!/usr/bin/python
import os
import sys
import getopt
from os.path import realpath
from os.path import join
from os.path import basename
import subprocess
help_message = 'mvn-push.py --group package --id package --version version --file file [--javadoc file|path] [--sources file]'
mvn_repo=os.getcwd()
cleanup = ''
def subprocess_cmd(command):
process = subprocess.Popen(command, stdout = subprocess.PIPE, shell = True)
proc_stdout = process.communicate()[0].strip()
if proc_stdout:
print proc_stdout
def check_required(arg_value_name_pairs):
for pair in arg_value_name_pairs:
if not pair[0]:
print pair[1], 'is empty or invalid'
sys.exit(1)
def detect_packaging(file_path):
file_extension = file_path[-4:]
if file_extension == '.aar':
return 'aar'
elif file_extension == '.jar':
return 'jar'
else:
print 'wrong file extension'
sys.exit(1)
def pack_javadoc(file_path, javadoc):
if not javadoc:
return javadoc
else:
global cleanup
temp_jar = basename('%s-javadoc.jar' % file_path[:-4])
subprocess_cmd('cd {0}; jar cf {1} *'.format(javadoc, temp_jar))
cleanup = cleanup + ' ' + join(javadoc, temp_jar)
return join(javadoc, temp_jar)
def deploy(group_id, artifact_id, version, file_path, javadoc, sources, packaging):
mvn_deploy = 'mvn deploy:deploy-file -Durl=file://{0} -DgroupId={1} -DartifactId={2} -Dversion={3} -Dpackaging={4} -Dfile={5}'.format(
mvn_repo, group_id, artifact_id, version, packaging, file_path)
if sources:
mvn_deploy += ' -Dsources=%s' % sources
if javadoc:
mvn_deploy += ' -Djavadoc=%s' % javadoc
subprocess_cmd(mvn_deploy)
def main(argv):
group_id=''
artifact_id=''
version=''
file_path=''
javadoc=''
sources=''
try:
opts, args = getopt.getopt(argv, 'h:', ['group=', 'id=', 'version=', 'file=', 'javadoc=', 'sources='])
except getopt.GetoptError:
print help_message
sys.exit(1)
if len(opts) == 0:
print help_message
sys.exit(1)
for opt, arg in opts:
if opt == '-h':
print help_message
sys.exit()
elif opt == '--group':
group_id = arg
elif opt == '--id':
artifact_id = arg
elif opt == '--version':
version = arg
elif opt == '--file':
file_path = realpath(arg)
elif opt == '--javadoc':
javadoc = realpath(arg)
elif opt == '--sources':
sources = realpath(arg)
check_required(((group_id, 'group'), (artifact_id, 'id'), (version, 'version'), (file_path, 'file')))
packaging = detect_packaging(file_path)
javadoc = pack_javadoc(file_path, javadoc)
deploy(group_id, artifact_id, version, file_path, javadoc, sources, packaging)
subprocess_cmd('rm %s' % cleanup)
if __name__ == '__main__':
main(sys.argv[1:]) | Python | 0.000004 | |
f284bb85a0b28142850f980a33f38a3cf25d9da8 | Solve Knowit 2017/08 | knowit2017/08.py | knowit2017/08.py | memoized = {}
def christmas_number(n):
in_sequence = {1: True}
while True:
if n > 10000000:
for k in in_sequence:
memoized[k] = False
return False
in_sequence[n] = True
if n in memoized:
return memoized[n]
n = sum([int(d)**2 for d in str(n)])
if n == 1:
for k in in_sequence:
memoized[k] = True
return True
if n in in_sequence:
for k in in_sequence:
memoized[k] = False
return False
def test_christmas_number():
assert christmas_number(13) is True
if __name__ == "__main__":
s = 0
for n in range(1, 10000001):
if n % 100000 == 0:
print(n)
if christmas_number(n):
s += n
print(s) | Python | 0.000118 | |
2dd0efce803c4dfcc4c5d61cf6fec1d5ee64e1b3 | test for btcSpecialTx.py | test/test_btcSpecialTx.py | test/test_btcSpecialTx.py | from pyethereum import tester
from datetime import datetime, date
import math
import pytest
slow = pytest.mark.slow
class TestBtcSpecialTx(object):
CONTRACT = 'btcSpecialTx.py'
CONTRACT_GAS = 55000
ETHER = 10 ** 18
def setup_class(cls):
tester.gas_limit = 2 * 10**6
cls.s = tester.state()
cls.c = cls.s.abi_contract(cls.CONTRACT, endowment=2000*cls.ETHER)
cls.snapshot = cls.s.snapshot()
cls.seed = tester.seed
def setup_method(self, method):
self.s.revert(self.snapshot)
tester.seed = self.seed
def test_testnetTx(self):
# testnet tx a51a71f8094f9b4e266fcccd55068e809277ec79bfa44b7bdb8f1355e9bb8460
# tx[9] of block 350559
txStr = '010000000158115acce0e68bc58ecb89e6452380bd68da56dc0a163d9806c04b24dfefe269000000008a47304402207a0bf036d5c78d6910d608c47c9e59cbf5708df51fd22362051b8f1ecd9691d1022055ee6ace9f12f02720ce91f62916570dbd93b2aa1e91be7da8e5230f62606db7014104858527cb6bf730cbd1bcf636bc7e77bbaf0784b9428ec5cca2d8378a0adc75f5ca893d14d9db2034cbb7e637aacf28088a68db311ff6f1ebe6d00a62fed9951effffffff0210980200000000001976a914a0dc485fc3ade71be5e1b68397abded386c0adb788ac10270000000000001976a914d3193ccb3564d5425e4875fe763e26e2fce1fd3b88ac00000000'
res = self.c.getFirst2Outputs(txStr)
assert res[0] == 170000
out1stScriptIndex = res[1]
btcAddrIndex = out1stScriptIndex*2 + 6
assert txStr[btcAddrIndex:btcAddrIndex+40] == 'a0dc485fc3ade71be5e1b68397abded386c0adb7'
out2ndScriptIndex = res[2]
ethAddrIndex = out2ndScriptIndex*2 + 6
assert txStr[ethAddrIndex:ethAddrIndex+40] == 'd3193ccb3564d5425e4875fe763e26e2fce1fd3b'
| Python | 0.000001 | |
edc5116472c49370e5bf3ff7f9f7872732b0285e | Add a solution to the phone number problem: can a phone number be represented as words in a dictionary? | phone_numbers.py | phone_numbers.py | #!/usr/bin/env python
import unittest
words = set(["dog", "clog", "cat", "mouse", "rat", "can",
"fig", "dig", "mud", "a", "an", "duh", "sin",
"get", "shit", "done", "all", "glory", "comes",
"from", "daring", "to", "begin", ])
dialmap = {
'a':2, 'b':2, 'c':2,
'd':3, 'e':3, 'f':3,
'g':4, 'h':4, 'i':4,
'j':5, 'k':5, 'l':5,
'm':6, 'n':6, 'o':6,
'p':7, 'q':7, 'r':7, 's':7,
't':8, 'u':8, 'v':8,
'w':9, 'x':9, 'y':9, 'z':9,
}
def tonumbers(word):
"""Convert the string 'word' into the equivalent string of phone-dailing numbers"""
numstr = ''
for c in word.lower():
numstr += str(dialmap[c])
return numstr
wordsnum = set()
for w in words:
wordsnum.add(tonumbers(w))
def isword(number):
"""Return True if the string of decimal digits 'number' can be represented
as the concatenation of words in the 'words' set, otherwise False."""
if number in wordsnum:
return True
if number in isword.memoized:
return isword.memoized[number]
for i in range(1, len(number)):
a = number[i:]
b = number[:i]
#print locals()
if isword(a) and isword(b):
isword.memoized[number] = True
return True
isword.memoized[number] = False
return False
isword.memoized = {}
class TestIsWord(unittest.TestCase):
def testGetShitDone(self):
self.assertTrue(isword(tonumbers('getshitdone')))
def testHas1(self):
self.assertFalse(isword('1092340345'))
def testDogDog(self):
self.assertTrue(isword(tonumbers('dogdog')))
def testMyNumber1(self):
self.assertFalse(isword('7342393309'))
def testMyNumber2(self):
self.assertFalse(isword('4082434090'))
if __name__ == "__main__":
unittest.main()
| Python | 0.999746 | |
904ac79bd278634c97f6f43f4d85bc0c2316117b | add configuration example | scripts/exchange-bots/config-example.py | scripts/exchange-bots/config-example.py | from bot.strategies.maker import MakerRamp, MakerSellBuyWalls
wallet_host = "localhost"
wallet_port = 8092
wallet_user = ""
wallet_password = ""
witness_url = "ws://testnet.bitshares.eu/ws"
witness_user = ""
witness_password = ""
watch_markets = ["PEG.PARITY : TEST", "PEG.RANDOM : TEST"]
market_separator = " : "
bots = {}
#############################
# Ramps
#############################
bots["MakerRexp"] = {"bot" : MakerRamp,
"markets" : ["PEG.PARITY : TEST"],
"target_price" : "feed",
"spread_percentage" : 0.2,
"volume_percentage" : 30,
"ramp_price_percentage" : 2,
"ramp_step_percentage" : 0.1,
"ramp_mode" : "linear"
}
bots["MakerRamp"] = {"bot" : MakerRamp,
"markets" : ["PEG.PARITY : TEST"],
"target_price" : "feed",
"spread_percentage" : 4,
"volume_percentage" : 30,
"ramp_price_percentage" : 4,
"ramp_step_percentage" : 0.5,
"ramp_mode" : "exponential"
}
#############################
# Walls
#############################
bots["MakerWall"] = {"bot" : MakerSellBuyWalls,
"markets" : ["PEG.PARITY : TEST"],
"target_price" : "feed",
"spread_percentage" : 5,
"volume_percentage" : 10,
"symmetric_sides" : True,
}
bots["MakerBridge"] = {"bot" : MakerSellBuyWalls,
"markets" : ["PEG.PARITY : TEST"],
"target_price" : 1.0,
"spread_percentage" : 90,
"volume_percentage" : 10,
"symmetric_sides" : True,
}
account = "xeroc"
safe_mode = False
| Python | 0.000001 | |
94ca753de7d6d7f82bc71fb2216128c13b2c2499 | Add py | picasawebsync.py | picasawebsync.py | #!/usr/bin/python
from gdata.photos.service import *
import gdata.media
import gdata.geo
import os
import re
import pprint
import sys
import argparse
# Class to store details of an album
class Albums:
def __init__(self, rootDir):
self.albums = Albums.scanFileSystem(rootDir)
# walk the directory tree populating the list of files we have locally
@staticmethod
def scanFileSystem(rootDir):
fileAlbums = {}
for dirName,subdirList,fileList in os.walk( rootDir ) :
albumName = convertDirToAlbum("{0}~{1} ({0})", rootDir, dirName)
# have we already seen this album? If so append our path to it's list
if albumName in fileAlbums:
album = fileAlbums[album.getAlbumName()]
album.paths.append(dirName)
else:
# create a new album
album = AlbumEntry(dirName, albumName)
fileAlbums[album.getAlbumName()] = album
# now iterate it's files to add them to our list
for fname in fileList :
fullFilename = os.path.join(dirName, fname)
# figure out the filename relative to the root dir of the album (to ensure uniqeness)
relFileName = re.sub("^/","", fullFilename[len(album.rootPath):])
fileEntry = FileEntry(relFileName, fullFilename, False, True)
album.entries[relFileName] = fileEntry
print "Found "+str(len(fileAlbums))+" albums on the filesystem"
return fileAlbums;
def scanWebAlbums(self):
# walk the web album finding albums there
webAlbums = gd_client.GetUserFeed()
for webAlbum in webAlbums.entry:
webAlbumTitle = Albums.flatten(webAlbum.title.text)
if webAlbumTitle in self.albums:
foundAlbum = self.albums[webAlbumTitle]
photos = gd_client.GetFeed(webAlbum.GetPhotosUri())
foundAlbum.webAlbum.append(WebAlbum(webAlbum, int(photos.total_results.text)))
for photo in photos.entry:
if photo.title.text in foundAlbum.entries:
foundAlbum.entries[photo.title.text].isWeb = True
else:
print "skipping web only photo "+photo.title.text
else:
print "skipping web only album "+webAlbum.title.text
print 'Checked: %s (containing %s files)' % (webAlbum.title.text, webAlbum.numphotos.text)
def uploadMissingAlbumsAndFiles(self):
for album in self.albums.itervalues():
subAlbumCount = 0;
for file in album.entries.itervalues():
if not(file.isWeb) :
while (subAlbumCount<len(album.webAlbum) and album.webAlbum[subAlbumCount].numberFiles >= 999):
subAlbumCount = subAlbumCount + 1
if subAlbumCount>=len(album.webAlbum):
subAlbum = WebAlbum(gd_client.InsertAlbum(title=Albums.createAlbumName(album.getAlbumName(), subAlbumCount), access='private', summary='synced from '+album.rootPath), 0)
album.webAlbum.append(subAlbum)
print 'Created album %s to sync %s' % (subAlbum.album.title.text, album.rootPath)
else:
subAlbum = album.webAlbum[subAlbumCount]
try:
photo = gd_client.InsertPhotoSimple(subAlbum.album, file.name, 'synced from '+file.path, file.path, content_type='image/jpeg')
print "uploaded "+file.path
subAlbum.numberFiles = subAlbum.numberFiles + 1
except GooglePhotosException:
print "Skipping upload of %s due to exception" % file.path
@staticmethod
def createAlbumName(name, index):
if index == 0:
return name
else:
return "%s #%s" % (name, index)
@staticmethod
def flatten(name):
return re.sub("#[0-9]*$","",name)
class AlbumEntry:
def __init__(self, fileName, albumName):
self.paths = [fileName]
self.rootPath= fileName
self.albumName = albumName
self.entries = {}
self.webAlbum = []
def __str__(self):
return (self.albumName+" starting at "+rootPath+" total "+str(len(self.entries))+" entries "+\
["exists","doesn't exist"][not self.webAlbum]+" online")
def getAlbumName(self):
if (len(self.albumName) > 0):
return self.albumName
else:
return "Home"
def getPathsAsString(self):
return ",".join(self.paths)
# Class to store web album details
class WebAlbum:
def __init__(self, album, numberFiles):
self.album = album
self.numberFiles = numberFiles
# Class to store details of an individual file
class FileEntry:
def __init__(self, name, path, isWeb, isLocal):
self.name = name
self.path=path
self.isWeb=isWeb
self.isLocal=isLocal
# Method to translate directory name to an album name
def convertDirToAlbum(form, root, name):
formElements = re.split("~", form)
nameElements = re.split("/", re.sub("^/","",name[len(root):]))
which = min(len(formElements), len(nameElements))
work = formElements[which-1].format(*nameElements)
return work
# start of the program
parser = argparse.ArgumentParser()
parser.add_argument("username", help="Your picassaweb username")
parser.add_argument("password", help="Your picassaweb password")
parser.add_argument("directory", help="The local directory to copy from")
args = parser.parse_args()
gd_client = gdata.photos.service.PhotosService()
gd_client.email = args.username # Set your Picasaweb e-mail address...
gd_client.password = args.password
gd_client.source = 'api-sample-google-com'
gd_client.ProgrammaticLogin()
rootDir = args.directory # set the directory you want to start from
albums = Albums(rootDir)
albums.scanWebAlbums()
albums.uploadMissingAlbumsAndFiles()
exit(1)
sys.exit(0)
# photos = gd_client.GetFeed('/data/feed/api/user/default/albumid/%s?kind=photo' % (album.gphoto_id.text))
# for photo in photos.entry:
# print ' Photo:', photo.title.text
# tags = gd_client.GetFeed('/data/feed/api/user/default/albumid/%s/photoid/%s?kind=tag' % (album.gphoto_id.text, photo.gphoto_id.text))
# for tag in tags.entry:
# print ' Tag:', tag.title.text
# comments = gd_client.GetFeed('/data/feed/api/user/default/albumid/%s/photoid/%s?kind=comment' % (album.gphoto_id.text, photo.gphoto_id.text))
# for comment in comments.entry:
# print ' Comment:', comment.content.text
| Python | 0.000288 | |
d5cfa59c586053d911f8725dfd321d8ad0eecce6 | Fix context comprobation. It must be exist at begining | account_voucher_payment_method/account_voucher.py | account_voucher_payment_method/account_voucher.py | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv, fields, orm
import time
from openerp.tools.translate import _
from lxml import etree
class accountVoucherinherit(orm.Model):
_inherit = 'account.voucher'
def fields_view_get(self, cr, uid, view_id=None, view_type=False, context=None, toolbar=False, submenu=False):
res = super(accountVoucherinherit, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
doc = etree.XML(res['arch'])
#In this section is when some differences between supplier and customer are established
if context and context.get('type', 'sale') in ('purchase', 'payment'):
#Separate the journal types
nodes = doc.xpath("//field[@name='journal_id']")
for node in nodes:
#Add a domain when the view is from supplier.
node.set('domain', "[('payment_method_supplier','=', True)]")
#Remove selection widget (if the widget is added, values from
#customer are showed in supplier wizard. The wizard doesn't
#refresh values
node.set('widget', '')
res['arch'] = etree.tostring(doc)
return res
def _compute_exchange_rate(self, cr, uid, ids, field_names, args, context=None):
res_user_obj = self.pool.get('res.users')
currency_obj = self.pool.get('res.currency')
exchange_rate = 0.0
res = {}
#Company currency for logged user
res_user = res_user_obj.browse(cr, uid, uid, context=context)
company_currency = res_user.company_id.currency_id
#Today's date
now = time.strftime('%Y-%m-%d')
for voucher in self.browse(cr, uid, ids, context=context):
#Depends of sequence, set initial and final currency
if company_currency.sequence < voucher.currency_id.sequence:
initial_currency = company_currency
final_currency = voucher.currency_id
else:
initial_currency = voucher.currency_id
final_currency = company_currency
#Get exchange, depends of order sets before
exchange_rate = currency_obj.get_exchange_rate(cr, uid, initial_currency, final_currency, now, context=context)
res[voucher.id] = exchange_rate
return res
_columns = {
'voucher_payment_rate' : fields.function(_compute_exchange_rate, string='Exchange Rate Commercial', type='float',),
'voucher_payment_rate_currency_id' : fields.related('company_id', 'currency_id', string='Company Currency', type='many2one', relation='res.currency',),
}
| # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import osv, fields, orm
import time
from openerp.tools.translate import _
from lxml import etree
class accountVoucherinherit(orm.Model):
_inherit = 'account.voucher'
def fields_view_get(self, cr, uid, view_id=None, view_type=False, context=None, toolbar=False, submenu=False):
res = super(accountVoucherinherit, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
doc = etree.XML(res['arch'])
#In this section is when some differences between supplier and customer are established
if context.get('type', 'sale') in ('purchase', 'payment'):
#Separate the journal types
nodes = doc.xpath("//field[@name='journal_id']")
for node in nodes:
#Add a domain when the view is from supplier.
node.set('domain', "[('payment_method_supplier','=', True)]")
#Remove selection widget (if the widget is added, values from
#customer are showed in supplier wizard. The wizard doesn't
#refresh values
node.set('widget', '')
res['arch'] = etree.tostring(doc)
return res
def _compute_exchange_rate(self, cr, uid, ids, field_names, args, context=None):
res_user_obj = self.pool.get('res.users')
currency_obj = self.pool.get('res.currency')
exchange_rate = 0.0
res = {}
#Company currency for logged user
res_user = res_user_obj.browse(cr, uid, uid, context=context)
company_currency = res_user.company_id.currency_id
#Today's date
now = time.strftime('%Y-%m-%d')
for voucher in self.browse(cr, uid, ids, context=context):
#Depends of sequence, set initial and final currency
if company_currency.sequence < voucher.currency_id.sequence:
initial_currency = company_currency
final_currency = voucher.currency_id
else:
initial_currency = voucher.currency_id
final_currency = company_currency
#Get exchange, depends of order sets before
exchange_rate = currency_obj.get_exchange_rate(cr, uid, initial_currency, final_currency, now, context=context)
res[voucher.id] = exchange_rate
return res
_columns = {
'voucher_payment_rate' : fields.function(_compute_exchange_rate, string='Exchange Rate Commercial', type='float',),
'voucher_payment_rate_currency_id' : fields.related('company_id', 'currency_id', string='Company Currency', type='many2one', relation='res.currency',),
}
| Python | 0.994592 |
60efa5bbab4463714df8dd93c1c7c606bee4dbaf | add giphy plugin | plugins/giphy.py | plugins/giphy.py | from util import http, hook
@hook.api_key('giphy')
@hook.command('giphy', autohelp=False)
@hook.command('gif', autohelp=False)
@hook.command(autohelp=False)
def giphy(inp, api_key=None):
".giphy [term] -- gets random gif for a term"
data = http.get_json("http://api.giphy.com/v1/gifs/random", { "api_key": api_key, "tag": inp })
return data['data']['image_url']
| Python | 0 | |
227a8e0f654c9797a7dedf863f7568d55a6c2f8e | add download sample from go-sciter port | examples/download.py | examples/download.py | """Go sciter example port."""
import sciter
class MyEventHandler(sciter.EventHandler):
def document_complete(self):
print("content loaded.")
pass
def on_data_arrived(self, nm):
print("data arrived, uri:", nm.uri, nm.dataSize)
pass
class Frame(sciter.Window):
def __init__(self):
super().__init__(ismain=True, uni_theme=False, debug=True)
pass
def on_data_loaded(self, nm):
print("data loaded, uri:", nm.uri, nm.dataSize)
pass
def load(self, url):
self.set_title("Download Element Content")
self.load_html(b'''<html><body><span id='url'>Url To Load</span><frame id='content'></frame></body></html>''', "/")
# get root element
root = self.get_root()
# get span#url and frame#content:
span = root.find_first('#url')
content = root.find_first('#content')
# replace span text with url provided
text = span.get_text()
span.set_text(url)
print("span:", text)
# install event handler to content frame to print data_arrived events
self.handler = MyEventHandler(element=content)
print("load content")
content.request_html(url)
pass
pass
if __name__ == '__main__':
import sys
print("Sciter version:", ".".join(map(str, sciter.version())))
if len(sys.argv) < 2:
sys.exit("at least one Sciter compatible page url is needed")
print(sys.argv[1])
frame = Frame()
frame.load(sys.argv[1])
frame.expand()
frame.run_app(False)
| Python | 0 | |
dff9d7a05e2a522b3dbbd7ea18866c5ba1fc0476 | add a !stock plugin for stock images | plugins/stock.py | plugins/stock.py | """!stock <search term> return a stock photo for <search term>"""
from random import shuffle
import re
import requests
from bs4 import BeautifulSoup
def stock(searchterm):
url = "http://www.shutterstock.com/cat.mhtml?searchterm={}&search_group=&lang=en&language=en&search_source=search_form&version=llv1".format(searchterm)
r = requests.get(url)
soup = BeautifulSoup(r.text)
images = [x["src"] for x in soup.select(".gc_clip img")]
shuffle(images)
return images[0] if images else ""
def on_message(msg, server):
text = msg.get("text", "")
match = re.findall(r"!stock (.*)", text)
if not match: return
searchterm = match[0]
return stock(searchterm)
| Python | 0 | |
d26a78d3e0695e0bf492910c530beb54b30cdbbc | bump version number for development | stdeb/__init__.py | stdeb/__init__.py | # setuptools is required for distutils.commands plugin we use
import logging
import setuptools
__version__ = '0.4.2.git'
log = logging.getLogger('stdeb')
log.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
| # setuptools is required for distutils.commands plugin we use
import logging
import setuptools
__version__ = '0.4.2'
log = logging.getLogger('stdeb')
log.setLevel(logging.INFO)
handler = logging.StreamHandler()
handler.setLevel(logging.INFO)
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
| Python | 0 |
1cab72ac3c5f3cea8326ebc97ccae1a8068eb839 | Add http responses collection module. | superview/http.py | superview/http.py | # -*- coding: utf-8 -*-
"""
The various HTTP responses for use in returning proper HTTP codes.
"""
from django.http import HttpResponse, StreamingHttpResponse
class HttpCreated(HttpResponse):
status_code = 201
def __init__(self, *args, **kwargs):
location = kwargs.pop('location', '')
super(HttpCreated, self).__init__(*args, **kwargs)
self['Location'] = location
class HttpAccepted(HttpResponse):
status_code = 202
class HttpNoContent(HttpResponse):
status_code = 204
class HttpMultipleChoices(HttpResponse):
status_code = 300
class HttpSeeOther(HttpResponse):
status_code = 303
class HttpNotModified(HttpResponse):
status_code = 304
class HttpBadRequest(HttpResponse):
status_code = 400
class HttpUnauthorized(HttpResponse):
status_code = 401
class HttpForbidden(HttpResponse):
status_code = 403
class HttpNotFound(HttpResponse):
status_code = 404
class HttpMethodNotAllowed(HttpResponse):
status_code = 405
class HttpConflict(HttpResponse):
status_code = 409
class HttpGone(HttpResponse):
status_code = 410
class HttpTooManyRequests(HttpResponse):
status_code = 429
class HttpApplicationError(HttpResponse):
status_code = 500
class HttpNotImplemented(HttpResponse):
status_code = 501
| Python | 0 | |
86baa4f437cf3892c15a56e8331c19b6d2e63b1d | Add a script for generating unicode name table | lib/gen-names.py | lib/gen-names.py | #!/usr/bin/python3
# Input: https://www.unicode.org/Public/UNIDATA/UnicodeData.txt
import io
import re
class Builder(object):
def __init__(self):
pass
def read(self, infile):
names = []
for line in infile:
if line.startswith('#'):
continue
line = line.strip()
if len(line) == 0:
continue
(codepoint, name, _other) = line.split(';', 2)
# Names starting with < are signifying controls and special blocks,
# they aren't useful for us
if name[0] == '<':
continue
names.append((codepoint, name))
return names
def write(self, data):
print('''\
struct CharacterName
{
gunichar uc;
const char *name;
};''')
print('static const struct CharacterName character_names[] =\n {')
s = ''
offset = 0
for codepoint, name in data:
print(' {{ 0x{0}, "{1}" }},'.format(codepoint, name))
print(' };')
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='build')
parser.add_argument('infile', type=argparse.FileType('r'),
help='input file')
args = parser.parse_args()
builder = Builder()
# FIXME: argparse.FileType(encoding=...) is available since Python 3.4
data = builder.read(io.open(args.infile.name, encoding='utf_8_sig'))
builder.write(data)
| Python | 0 | |
a50190fe04e434ce70f6b02027e281a896dbb81b | Create Python password hasher | passwordhash.py | passwordhash.py | #!/usr/bin/env python
# Password Hashing Module for Linux
# Author: Dave Russell Jr (drussell393)
from getpass import getpass
import crypt
# If you like Python 2, please to be importing.
import os
import binascii
password = getpass('Enter your desired password, Harry: ')
passwordConfirm = getpass('Confirm your password: ')
if (password == passwordConfirm):
# Python 2 alternative, os.urandom()
passwordHash = crypt.crypt(password, '$6$' + binascii.hexlify(os.urandom(4)))
# Python 3 likes my crypt (mksalt doesn't work in Python 2)
#passwordHash = crypt.crypt(password, crypt.mksalt(crypt.METHOD_SHA512))
print('You\'re a wizard, Harry: ' + passwordHash)
else:
print('Dobby has heard of your greatness, sir. But of your goodness, Dobby never knew.')
print('Your confirmation password didn\'t match, Oh Great One.')
| Python | 0.000107 | |
961e9b031b94a0533c53c29787660ab954b6db37 | Add patch-weight.py, patch weight into phrase segs. | patch-weight.py | patch-weight.py | #!/usr/bin/env python
# -*- coding: utf-8
from codecs import open
from argparse import ArgumentParser
DEBUG_FLAG = False
def load_weight_dict(weight_file):
weight_dict = {}
with open(weight_file, 'r') as fd:
for line in fd:
splited_line = line.strip().split()
if len(splited_line) != 2:
continue
word, weight = splited_line
if word not in weight_dict:
weight_dict[word] = float(weight)
return weight_dict
def main():
parser = ArgumentParser()
parser.add_argument("weight_file", help = "word-weight in tsv format")
parser.add_argument("phrase_file", help = "phrase segment file (original phrase and segmented phrase) one phrase per line in tsv format")
args = parser.parse_args()
phrase_file = args.phrase_file
weight_file = args.weight_file
weight_dict = load_weight_dict(weight_file)
word_set = set(weight_dict)
with open(phrase_file, 'r') as fd:
for line in fd:
splited_line = line.strip().split("\t")
if len(splited_line) != 2:
continue
phrase_str, phrase_seg = splited_line
phrase_seg_list = phrase_seg.split()
phrase_seg_set = set(phrase_seg_list)
outside_word_set = phrase_seg_set - word_set
if len(outside_word_set) > 0:
if DEBUG_FLAG:
print "###outsidewords###", " ".join(list(outside_word_set))
for word in outside_word_set:
weight_dict[word] = 0.0
weight_sum = sum([weight_dict[word] for word in phrase_seg_list])
if DEBUG_FLAG:
if weight_sum == 0.0:
res_list = ["%s/%s" % (word, weight_dict[word]) for word in phrase_seg_list]
else:
res_list = ["%s/%s" % (word, weight_dict[word] / weight_sum) for word in phrase_seg_list]
print "%s\t%s" % (phrase_str, " ".join(res_list))
else:
if weight_sum == 0.0:
res_list = ["%s%s" % (word, weight_dict[word]) for word in phrase_seg_list]
else:
res_list = ["%s%s" % (word, weight_dict[word] / weight_sum) for word in phrase_seg_list]
print "%s\t%s" % (phrase_str, "".join(res_list))
if __name__ == "__main__":
main()
| Python | 0.000001 | |
9e51fc305f21a4031b6ec94ccfa39ef1e611da9e | add script to compare DFAs. | src/trusted/validator_ragel/unreviewed/compare_dfa.py | src/trusted/validator_ragel/unreviewed/compare_dfa.py | #!/usr/bin/python
# Copyright (c) 2013 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import dfa_parser
visited_pairs = set()
def Traverse(state1, state2, path):
if (state1, state2) in visited_pairs:
return
if state1.is_accepting != state2.is_accepting:
print map(hex, path)
print state1.is_accepting
print state2.is_accepting
sys.exit(1)
visited_pairs.add((state1, state2))
for byte in range(256):
new_path = path + [byte]
t1 = state1.forward_transitions.get(byte)
t2 = state2.forward_transitions.get(byte)
if (t1 is None) != (t2 is None):
print map(hex, new_path)
print t1 is not None
print t2 is not None
sys.exit(1)
if t1 is None:
continue
Traverse(t1.to_state, t2.to_state, new_path)
def main():
filename1, filename2 = sys.argv[1:]
_, start_state1 = dfa_parser.ParseXml(filename1)
_, start_state2 = dfa_parser.ParseXml(filename2)
Traverse(start_state1, start_state2, [])
print 'automata are equivalent'
if __name__ == '__main__':
main()
| Python | 0.000011 | |
3cdee1d40d3370686c9bff435a4575e985c121e9 | Create __init__.py | pfc/__init__.py | pfc/__init__.py | """pfc"""
| Python | 0.000429 | |
438471a4a3b41637c5c1eb3c2e07d9d8ca81ee09 | Add a stats ./manage.py command | www/management/commands/stats.py | www/management/commands/stats.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
# Copyright 2014, Cercle Informatique ASBL. All rights reserved.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or (at
# your option) any later version.
#
# This software was made by hast, C4, ititou at UrLab, ULB's hackerspace
from django.core.management.base import BaseCommand
from users.models import User
from telepathy.models import Thread, Message
from documents.models import Document
class Command(BaseCommand):
help = 'Numbers on b402'
def handle(self, *args, **options):
Print = self.stdout.write
Print("User summary :\n")
Print("{} users\n".format(User.objects.count()))
Print("\n")
Print("Document summary :\n")
Print("{} documents\n".format(Document.objects.count()))
Print(" - {} IN_QUEUE\n".format(Document.objects.filter(state="IN_QUEUE").count()))
Print(" - {} PROCESSING\n".format(Document.objects.filter(state="PROCESSING").count()))
Print(" - {} PREPARING\n".format(Document.objects.filter(state="PREPARING").count()))
Print(" - {} READY_TO_QUEUE\n".format(Document.objects.filter(state="READY_TO_QUEUE").count()))
Print(" - {} ERROR\n".format(Document.objects.filter(state="ERROR").count()))
Print(" - {} DONE\n".format(Document.objects.filter(state="DONE").count()))
Print("\n")
Print("Thread summary :\n")
Print("{} threads\n".format(Thread.objects.count()))
Print("{} messages\n".format(Message.objects.count()))
Print("\n")
| Python | 0.000048 | |
633e540a1718a5cc515725b13d3f1740bb950bb6 | Use GitHub URL for ImageMagick | var/spack/repos/builtin/packages/ImageMagick/package.py | var/spack/repos/builtin/packages/ImageMagick/package.py | ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Imagemagick(Package):
"""ImageMagick is a software suite to create, edit, compose,
or convert bitmap images."""
homepage = "http://www.imagemagick.org"
url = "https://github.com/ImageMagick/ImageMagick/archive/7.0.2-7.tar.gz"
version('7.0.2-7', 'c59cdc8df50e481b2bd1afe09ac24c08')
version('7.0.2-6', 'aa5689129c39a5146a3212bf5f26d478')
depends_on('jpeg')
depends_on('libtool', type='build')
depends_on('libpng')
depends_on('freetype')
depends_on('fontconfig')
depends_on('libtiff')
depends_on('ghostscript')
def url_for_version(self, version):
return "https://github.com/ImageMagick/ImageMagick/archive/{0}.tar.gz".format(version)
def install(self, spec, prefix):
configure('--prefix={0}'.format(prefix))
make()
make('check')
make('install')
| ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Imagemagick(Package):
"""ImageMagick is a software suite to create, edit, compose,
or convert bitmap images."""
homepage = "http://www.imagemagick.org"
url = "http://www.imagemagick.org/download/ImageMagick-7.0.2-6.tar.gz"
version('7.0.2-6', 'c29c98d2496fbc66adb05a28d8fad21a')
depends_on('jpeg')
depends_on('libtool', type='build')
depends_on('libpng')
depends_on('freetype')
depends_on('fontconfig')
depends_on('libtiff')
depends_on('ghostscript')
def install(self, spec, prefix):
configure('--prefix={0}'.format(prefix))
make()
make('check')
make('install')
| Python | 0 |
3ba109622c24bd52f32e605c523249e1c26b0207 | Add regression test with non ' ' space character as token | spacy/tests/regression/test_issue834.py | spacy/tests/regression/test_issue834.py | # coding: utf-8
from io import StringIO
word2vec_str = """, -0.046107 -0.035951 -0.560418
de -0.648927 -0.400976 -0.527124
. 0.113685 0.439990 -0.634510
-1.499184 -0.184280 -0.598371"""
def test_issue834(en_vocab):
f = StringIO(word2vec_str)
vector_length = en_vocab.load_vectors(f)
assert vector_length == 3
| Python | 0.99974 | |
d11707e651d4b44ef706f62677ba6a617102f239 | Add test-code | test/post_test.py | test/post_test.py | import json
import urllib2
data = {
"cells":["ECT","VISC", "AAA"]
}
req = urllib2.Request('http://localhost:5000/api')
req.add_header('Content-Type', 'application/json')
response = urllib2.urlopen(req, json.dumps(data))
| Python | 0.000083 | |
480852bb1dd6796b7fb12e40edc924b9a4dbee60 | Add tests to cover no framework, no problem | test/test_misc.py | test/test_misc.py | import unittest
from .helpers import run_module
class MiscTests(unittest.TestCase):
def setUp(self):
self.name = "benchmarker"
def test_no_framework(self):
with self.assertRaises(Exception):
run_module(self.name)
def test_no_problem(self):
with self.assertRaises(Exception):
run_module(self.name, "--framework=pytorch")
| Python | 0 | |
ae372375a7160978eb56ef9b710027887a844d6f | add tests, now i am cool. | test_app_input.py | test_app_input.py | """
Test sending data to process_message.
"""
import pytest
from plugins.werewolf import app
from plugins.werewolf.user_map import get_user_map, set_user_map, reset_user_map
def get_empty_game_state():
# hi there
# make mock game state.
# we'll have several fixtures
# and a basic one we can set up in each test.
return {'players':{},
'votes':{},
'STATUS': 'INACTIVE',
'ROUND': None
}
def get_fake_game_state():
return {
'players': {
'ab': {
'name': 'nick',
'DM': 'dm channel',
'role': 'v',
'side': 'v',
'status': 'alive'
},
'cd': {
'name': 'not_nick',
'dm': 'dm channel',
'role': 'w',
'side': 'w',
'status': 'alive'
}
},
'votes': {},
'STATUS': 'RUNNING',
'ROUND': 'night'
}
def setup_users(g):
# for users in g
# setup an appropriate user map.
for player in g['players'].keys():
set_user_map(g, player, g['players'][player]['name'])
def tear_down():
reset_user_map()
def test_setup_users():
night_g = get_fake_game_state()
setup_users(night_g)
test_user_map = get_user_map(night_g)
players = night_g['players'].keys()
p1_id = players[0]
p2_id = players[1]
assert test_user_map.id_dict[p1_id] == 'nick'
assert test_user_map.id_dict[p2_id] == 'not_nick'
assert test_user_map.name_dict['nick'] == p1_id
assert test_user_map.name_dict['not_nick'] == p2_id
tear_down()
def test_basic_input():
fake_message = {'text': 'sup noob', 'user':'ab'}
night_g = get_fake_game_state()
result = app.process_message(fake_message, night_g)
assert result == None
tear_down()
def test_no_vote_target_input():
fake_message = {'text': '!vote', 'user': 'ab'}
night_g = get_fake_game_state()
setup_users(night_g)
result = app.process_message(fake_message, night_g)
assert result == 'Not a valid command.'
tear_down()
def test_vote_user_not_in_game_input():
fake_message = {'text': '!vote cd', 'user': 'cat'}
night_g = get_fake_game_state()
setup_users(night_g)
message = app.process_message(fake_message, night_g)
assert message == 'User not in the game.'
tear_down()
def test_night_vote_input():
fake_message = {'text': '!vote not_nick', 'user': 'ab'}
night_g = get_fake_game_state()
setup_users(night_g)
message = app.process_message(fake_message, night_g)
assert message == 'It is not day.'
tear_down()
def test_day_voting_input():
fake_message = {'text': '!vote not_nick', 'user': 'ab'}
user_name = 'nick'
target_name = 'not_nick'
day_g = get_fake_game_state()
day_g['ROUND'] = 'day'
setup_users(day_g)
assert day_g['votes'] == {}
message = app.process_message(fake_message, day_g)
assert message == user_name + ' voted for ' + target_name
tear_down()
| Python | 0 | |
431760d7a840543901fc1ebc0069ecd384302101 | Add tests/conftest.py for py.test | tests/conftest.py | tests/conftest.py | import decimal
import os
try:
# Python 2
from ConfigParser import ConfigParser
except ImportError:
# Python 3
from configparser import ConfigParser
import tests.helpers as th
from .helpers import cfgpath, clear_db, get_app_lock, release_app_lock
_parser = ConfigParser({
'server': 'localhost',
'username': 'sa',
'password': '',
'database': 'tempdb',
'port': '1433',
'ipaddress': '127.0.0.1',
'instance': '',
})
def pytest_addoption(parser):
parser.addoption(
"--pymssql-section",
type="string",
default=os.environ.get('PYMSSQL_TEST_CONFIG', 'DEFAULT'),
help="The name of the section to use from tests.cfg"
)
def pytest_configure(config):
_parser.read(cfgpath)
section = config.getoption('--pymssql-section')
if not _parser.has_section(section) and section != 'DEFAULT':
raise ValueError('the tests.cfg file does not have section: %s' % section)
th.config.server = os.getenv('PYMSSQL_TEST_SERVER') or _parser.get(section, 'server')
th.config.user = os.getenv('PYMSSQL_TEST_USERNAME') or _parser.get(section, 'username')
th.config.password = os.getenv('PYMSSQL_TEST_PASSWORD') or _parser.get(section, 'password')
th.config.database = os.getenv('PYMSSQL_TEST_DATABASE') or _parser.get(section, 'database')
th.config.port = os.getenv('PYMSSQL_TEST_PORT') or _parser.get(section, 'port')
th.config.ipaddress = os.getenv('PYMSSQL_TEST_IPADDRESS') or _parser.get(section, 'ipaddress')
th.config.instance = os.getenv('PYMSSQL_TEST_INSTANCE') or _parser.get(section, 'instance')
th.config.orig_decimal_prec = decimal.getcontext().prec
get_app_lock()
clear_db()
def pytest_unconfigure(config):
release_app_lock()
| Python | 0 | |
bceee12d94924931ff73b45d2ed3de8b3d71522c | Add case fixture to top-level conftest.py in tests | tests/conftest.py | tests/conftest.py | import pytest
from gaphor.conftest import Case
@pytest.fixture
def case():
case = Case()
yield case
case.shutdown()
| Python | 0.000001 | |
c8946c27aa334b3c0caa4be1e108715ed8969045 | Add tests for app endpoint. | tests/test_app.py | tests/test_app.py | from __future__ import print_function
import sys
import os
if sys.version_info > (2, 7, 0):
import unittest
else:
import unittest2 as unittest
from mock import Mock
sys.path.append(os.path.join(os.path.dirname(__file__), '../bin'))
import qds
from qds_sdk.connection import Connection
from test_base import print_command
from test_base import QdsCliTestCase
class TestAppList(QdsCliTestCase):
def test_minimal(self):
sys.argv = ['qds.py', 'app', 'list']
print_command()
Connection._api_call = Mock(return_value={})
qds.main()
Connection._api_call.assert_called_with("GET", "apps", params=None)
class TestAppShow(QdsCliTestCase):
def test_minimal(self):
sys.argv = ['qds.py', 'app', 'show', '123']
print_command()
Connection._api_call = Mock(return_value={})
qds.main()
Connection._api_call.assert_called_with("GET", "apps/123", params=None)
def test_fail_with_no_id(self):
sys.argv = ['qds.py', 'app', 'show']
print_command()
with self.assertRaises(SystemExit):
qds.main()
def test_fail_with_bad_id(self):
sys.argv = ['qds.py', 'app', 'show', 'notanumber']
print_command()
with self.assertRaises(SystemExit):
qds.main()
class TestAppCreate(QdsCliTestCase):
def test_fail_with_no_argument(self):
sys.argv = ['qds.py', 'app', 'create']
print_command()
with self.assertRaises(SystemExit):
qds.main()
def test_minimal(self):
sys.argv = ['qds.py', 'app', 'create', '--name', 'appname']
print_command()
Connection._api_call = Mock(return_value={})
qds.main()
Connection._api_call.assert_called_with("POST", "apps",
{'name': 'appname',
'kind': 'spark',
'config': {}})
def test_with_kind(self):
sys.argv = ['qds.py', 'app', 'create', '--name', 'appname',
'--kind', 'spark']
print_command()
Connection._api_call = Mock(return_value={})
qds.main()
Connection._api_call.assert_called_with("POST", "apps",
{'name': 'appname',
'kind': 'spark',
'config': {}})
def test_fail_with_wrong_kind(self):
sys.argv = ['qds.py', 'app', 'create', '--name', 'appname',
'--kind', 'tez'] # tez apps are not supported yet.
print_command()
with self.assertRaises(SystemExit):
qds.main()
def test_with_config(self):
sys.argv = ['qds.py', 'app', 'create', '--name', 'appname', '--config',
'zeppelin.spark.concurrentSQL=true']
print_command()
Connection._api_call = Mock(return_value={})
qds.main()
Connection._api_call.assert_called_with(
"POST", "apps", {'name': 'appname', 'kind': 'spark',
'config': {
'zeppelin.spark.concurrentSQL': 'true'}})
def test_with_configs(self):
sys.argv = ['qds.py', 'app', 'create', '--name', 'appname', '--config',
'spark.executor.memory=2g',
'zeppelin.spark.concurrentSQL=true']
print_command()
Connection._api_call = Mock(return_value={})
qds.main()
Connection._api_call.assert_called_with(
"POST", "apps", {'name': 'appname', 'kind': 'spark',
'config': {
'spark.executor.memory': '2g',
'zeppelin.spark.concurrentSQL': 'true'}})
def test_fail_with_bad_config_1(self):
sys.argv = ['qds.py', 'app', 'create', '--name', 'appname', '--config',
'no-equal-sign']
print_command()
with self.assertRaises(SystemExit):
qds.main()
def test_fail_with_bad_config_2(self):
sys.argv = ['qds.py', 'app', 'create', '--name', 'appname', '--config',
'multiple=equal=sign']
print_command()
with self.assertRaises(SystemExit):
qds.main()
def test_fail_with_good_and_bad_config(self):
sys.argv = ['qds.py', 'app', 'create', '--name', 'appname', '--config',
'this=good', 'no-equal-sign']
print_command()
with self.assertRaises(SystemExit):
qds.main()
class TestAppStop(QdsCliTestCase):
def test_minimal(self):
sys.argv = ['qds.py', 'app', 'stop', '123']
print_command()
Connection._api_call = Mock(return_value={})
qds.main()
Connection._api_call.assert_called_with("PUT", "apps/123/stop", None)
def test_fail_with_no_id(self):
sys.argv = ['qds.py', 'app', 'stop']
print_command()
with self.assertRaises(SystemExit):
qds.main()
def test_fail_with_bad_id(self):
sys.argv = ['qds.py', 'app', 'stop', 'notanumber']
print_command()
with self.assertRaises(SystemExit):
qds.main()
class TestAppDelete(QdsCliTestCase):
def test_minimal(self):
sys.argv = ['qds.py', 'app', 'delete', '123']
print_command()
Connection._api_call = Mock(return_value={})
qds.main()
Connection._api_call.assert_called_with("DELETE", "apps/123", None)
def test_fail_with_no_id(self):
sys.argv = ['qds.py', 'app', 'delete']
print_command()
with self.assertRaises(SystemExit):
qds.main()
def test_fail_with_bad_id(self):
sys.argv = ['qds.py', 'app', 'delete', 'notanumber']
print_command()
with self.assertRaises(SystemExit):
qds.main()
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
0146058fe8a5c91ce33102bb55f5f087428a03a3 | Add tests for get_keeper_token | tests/test_cli.py | tests/test_cli.py | """Test the ltd-mason CLI features."""
from base64 import b64encode
import responses
import pytest
from ltdmason.cli import get_keeper_token
@responses.activate
def test_get_keeper_token():
"""Test getting a token from LTD Keeper."""
expected_json = {'token': 'shake-it-off-shake-it-off'}
responses.add(
responses.GET,
'http://localhost:5000/token',
json=expected_json,
status=200)
_auth_header = 'Basic ' + b64encode(('user:pass')
.encode('utf-8')).decode('utf-8')
token = get_keeper_token('http://localhost:5000', 'user', 'pass')
assert responses.calls[0].request.url == 'http://localhost:5000/token'
assert responses.calls[0].request.headers['Authorization'] \
== _auth_header
assert token == 'shake-it-off-shake-it-off'
@responses.activate
def test_get_keeper_token_error():
"""Test with server error."""
expected_json = {'token': 'shake-it-off-shake-it-off'}
responses.add(
responses.GET,
'http://localhost:5000/token',
json=expected_json,
status=401)
with pytest.raises(RuntimeError):
get_keeper_token('http://localhost:5000', 'user', 'pass')
| Python | 0.000001 | |
e1e8bef8c2c916505e9bdc0ea37c81a7626db6af | Add int tests | tests/test_int.py | tests/test_int.py | import pytest
import parsenvy
def test_int_positive(monkeypatch):
"""'13'"""
monkeypatch.setenv("foo", "13")
assert parsenvy.int("foo") == 13
def test_int_negative(monkeypatch):
"""'-42'"""
monkeypatch.setenv("foo", "-42")
assert parsenvy.int("foo") == -42
def test_int_zero(monkeypatch):
"""'0'"""
monkeypatch.setenv("foo", "0")
assert parsenvy.int("foo") == 0
def test_int_negative_zero(monkeypatch):
"""'-0'"""
monkeypatch.setenv("foo", "-0")
assert parsenvy.int("foo") == 0
def test_int_invalid(monkeypatch):
"""'bar'"""
monkeypatch.setenv("foo", "bar")
with pytest.raises(TypeError):
parsenvy.int("foo")
def test_int_empty(monkeypatch):
"""''"""
monkeypatch.setenv("foo", "")
with pytest.raises(TypeError):
parsenvy.int("foo")
| Python | 0.000061 | |
3b66fbc844b023003420db7a9986811110f55489 | Add tests for the run() function | tests/test_run.py | tests/test_run.py | import sys
import tempfile
import unittest
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import icon_font_to_png
class TestRun(unittest.TestCase):
def create_css_file(self, contents):
css_file = tempfile.NamedTemporaryFile()
css_file.write(contents.encode('utf-8'))
css_file.flush()
return css_file
def test_usage(self):
orig_stderr = sys.stderr
sys.stderr = StringIO()
self.assertRaises(SystemExit, icon_font_to_png.run,
['icon_font_to_png.py'])
err = sys.stderr.getvalue().strip()
self.assertRegexpMatches(err, '^usage: .*')
sys.stderr = orig_stderr
def test_list(self):
css_file = self.create_css_file(
".foo-xyzzy:before { content: '\\f003' }\n"
".foo-baz:before { content: '\\f002' }\n"
".foo-bar:before { content: '\\f001' }\n"
)
orig_stdout = sys.stdout
sys.stdout = StringIO()
self.assertRaisesRegexp(SystemExit, '^0',
icon_font_to_png.run, ['foo.ttf', css_file.name, 'bar', '--list'])
out = sys.stdout.getvalue()
self.assertEqual(out,
"bar\n"
"baz\n"
"xyzzy\n"
)
sys.stdout = StringIO()
self.assertRaisesRegexp(SystemExit, '^0',
icon_font_to_png.run, ['foo.ttf', css_file.name, 'bar', '--list',
'--keep-prefix'])
out = sys.stdout.getvalue()
self.assertEqual(out,
"foo-bar\n"
"foo-baz\n"
"foo-xyzzy\n"
)
sys.stdout = orig_stdout
if __name__ == '__main__':
unittest.main
| Python | 0.000011 | |
578de6c57f9698c7e273af06d1e815f71269bb18 | Add a sample python file interesting to debug | tests/to_debug.py | tests/to_debug.py | import sys
import os
import time
import threading
import ikpdb
TEST_MULTI_THREADING = False
TEST_EXCEPTION_PROPAGATION = False
TEST_POSTMORTEM = True
TEST_SYS_EXIT = 0
TEST_STEPPING = False
# Note that ikpdb.set_trace() will reset/mess breakpoints set using GUI
TEST_SET_TRACE = False
TCB = TEST_CONDITIONAL_BREAKPOINT = True
class Worker(object):
def __init__(self):
self._running = True
def terminate(self):
self._running = False
def run(self, n):
work_count = n
while self._running and n > 0:
print "Worker: Doing iteration: %s" % (work_count - n)
if n == 3:
pass # ikpdb.set_trace()
n -= 1
time.sleep(2)
ga = 5
gb ="coucou"
g_dict = {"Genesis": 1, "Don't Look Back": 2, 'array': [1,3,{'coucou': 3.14}]}
a_tuple = (1,'e', 3.14, ['a', 'b'])
class BigBear:
color = "white"
def __init__(self, name='unknown'):
self._name = name
def grumble(self):
print "Roaaarrrrrrr"
def sub_function():
return True
def the_function(p_nb_seconds):
a_var = 18.3
the_function_local_list = [1,2,3,'cyril']
a_beast = BigBear()
print "ga=%s" % ga
print "Hello World"
print "Ceci est la ligne avec le point d'arret"
for loop_idx in range(p_nb_seconds):
print "hello @ %s seconds" % loop_idx
time.sleep(1)
if loop_idx == 12:
if TEST_SET_TRACE:
ikpdb.set_trace() # will break on next line
pass # Need this for set_trace()
a_var = 98.3
sub_function()
def sub_raiser():
raise Exception("Prends ca dans ta bouille")
def raiser():
try:
sub_raiser()
except Exception as e:
raise e
if __name__=='__main__':
b = 0
main_bear = BigBear("Cyril")
print "Type of main_bear=%s" % type(main_bear)
print "sys.argv=%s" % sys.argv
if TEST_SYS_EXIT:
sys.exit(TEST_SYS_EXIT)
if TEST_EXCEPTION_PROPAGATION:
raiser()
if TEST_MULTI_THREADING:
w = Worker()
t = threading.Thread(target=w.run, args=(5,))
t.start()
duration = 2 if TEST_STEPPING else 15
the_function(duration)
if TEST_MULTI_THREADING:
w.terminate()
t.join()
print "finished"
if TEST_POSTMORTEM:
print 5 / b
| Python | 0 | |
329270ddef5f4da4528750ebc463ffc910325ec8 | add migration | temba/channels/migrations/0066_auto_20170306_1713.py | temba/channels/migrations/0066_auto_20170306_1713.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-06 17:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('channels', '0065_auto_20170228_0837'),
]
operations = [
migrations.AlterField(
model_name='channelsession',
name='status',
field=models.CharField(choices=[('P', 'Pending'), ('Q', 'Queued'), ('R', 'Ringing'), ('I', 'In Progress'), ('D', 'Complete'), ('B', 'Busy'), ('F', 'Failed'), ('N', 'No Answer'), ('C', 'Canceled'), ('X', 'Interrupted'), ('T', 'Triggered'), ('A', 'Initiated')], default='P', help_text='The status of this session', max_length=1),
),
]
| Python | 0.000001 | |
a8caef202ba0fd6909359241ff385eca762aca1f | Add echo effect | quack/effects.py | quack/effects.py | # Author: Martin McBride
# Created: 2018-09-25
# Copyright (C) 2018, Martin McBride
# License: MIT
import math
import numpy as np
from quack.buffer import create_buffer
def echo(params, source, delay, strength):
'''
Create an echo
:param params:
:param source:
:param delay:
:param strength:
:return:
'''
source = create_buffer(params, source)
delay = create_buffer(params, delay)
strength = create_buffer(params, strength)
output = source[:]
for i in range(params.length):
d = int(i - delay[i])
if 0 <= d < params.length:
output[i] += source[d]*strength[i]
return output
| Python | 0.000002 | |
bdd7016fe8f41abdc8562d114efc41622916a675 | Create startBackEnd.py | startBackEnd.py | startBackEnd.py | #!/usr/bin/python
import boto.ec2
conn = boto.ec2.connect_to_region("eu-central-1", aws_access_key_id='AKIAI111111111111111', aws_secret_access_key='keyyyyy')
instance = conn.get_all_instances(instance_ids=['i-40eb8111'])
print instance[0].instances[0].start()
| Python | 0.000002 | |
f6f2c6fc2a51bb3243d9b99ab1093809a2d1a5bb | Add script that tests AI players | test_players.py | test_players.py | from AI import *
import random
def RandomPlayer(game):
return 0, random.choice(game.get_available_moves())
def ABPlayer(game):
return alpha_beta_search(game, 8, -np.inf, np.inf, True, evaluate_base)
def ABChainPlayer1(game):
return alpha_beta_search(game, 7, -np.inf, np.inf, True, evaluate_chain_len)
def ABChainPlayer2(game):
return alpha_beta_search(game, 7, -np.inf, np.inf, True, evaluate_chain_count)
players = [ABChainPlayer2, ABPlayer]
player_names = tuple((map(lambda x: x.__name__, players)))
print "%s v. %s" % player_names
game = DotsAndBoxes(-1, 4, 4)
while not game.is_over():
play_fn = players[game.turn == 1]
print "\tTurn: %s" % (player_names[game.turn == 1])
score, move = play_fn(game)
game.play(move)
print "\tPlayed: %d %d" % (move)
print "\tEvaluated score: %d\n" % (score)
print "Winner: %s" % (player_names[np.argmax(game.score)])
print game.score | Python | 0.000001 | |
0cf85c1ab68ddc50787e6a09f3604320d18118b4 | Add UniqueForFieldsMixin | django_more/mixins.py | django_more/mixins.py | from django.db.models.options import normalize_together
from django.utils.functional import cached_property
# Used by OrderByField to allow for unique_together constraints to be field declared
class UniqueForFieldsMixin:
""" Mixin first to a Field to add a unique_for_fields field option """
unique_for_fields = None
def __init__(self, unique_for_fields=None, *args, **kwargs):
if unique_for_fields:
self.unique_for_fields = tuple(unique_for_fields)
# If unique_for_fields then any unique option is irrelevant
kwargs.pop('unique', None)
super().__init__(*args, **kwargs)
def deconstruct(self):
name, path, args, kwargs = super().deconstruct()
if self.unique_for_fields:
kwargs['unique_for_fields'] = self.unique_for_fields
return name, path, args, kwargs
def contribute_to_class(self, cls, *args, **kwargs):
super().contribute_to_class(cls, *args, **kwargs)
# Add any necessary unique_together index to the model
if self.unique_for_fields:
# Alter only original_attr to fake being a declared unique_together
# Cannot modify cls._meta.unique_together as it breaks state consistency for migrations
ut = set((self.unique_together, )).union(normalize_together(cls._meta.original_attrs.get('unique_together')))
cls._meta.original_attrs['unique_together'] = ut
@cached_property
def unique_together(self):
return self.unique_for_fields + (self.attname, )
@cached_property
def unique_for_attnames(self):
return [self.model._meta.get_field(field_name).get_attname() for field_name in self.unique_for_fields]
| Python | 0 | |
419f86f5c50f812f19dd731e9c33f66e57f51a48 | Test matrix - work in progress | tests/matrix.py | tests/matrix.py | import os.path, urllib, subprocess, shutil
python_versions = ['2.4.6', '2.5.6', '2.6.8', '2.7.5']
libcurl_versions = ['7.19.0', '7.32.0']
class in_dir:
def __init__(self, dir):
self.dir = dir
def __enter__(self):
self.oldwd = os.getcwd()
os.chdir(self.dir)
def __exit__(self, type, value, traceback):
os.chdir(self.oldwd)
def fetch(url, archive):
if not os.path.exists(archive):
print "Fetching %s" % url
io = urllib.urlopen(url)
with open('.tmp.%s' % archive, 'w') as f:
while True:
chunk = io.read(65536)
if len(chunk) == 0:
break
f.write(chunk)
os.rename('.tmp.%s' % archive, archive)
def build(archive, dir, prefix):
if not os.path.exists(dir):
print "Building %s" % archive
subprocess.check_call(['tar', 'xf', archive])
with in_dir(dir):
subprocess.check_call(['./configure', '--prefix=%s' % prefix])
subprocess.check_call(['make'])
subprocess.check_call(['make', 'install'])
for python_version in python_versions:
url = 'http://www.python.org/ftp/python/%s/Python-%s.tgz' % (python_version, python_version)
archive = os.path.basename(url)
fetch(url, archive)
dir = archive.replace('.tgz', '')
prefix = os.path.abspath('i/%s' % dir)
build(archive, dir, prefix)
for libcurl_version in libcurl_versions:
url = 'http://curl.haxx.se/download/curl-%s.tar.gz' % libcurl_version
archive = os.path.basename(url)
fetch(url, archive)
dir = archive.replace('.tar.gz', '')
prefix = os.path.abspath('i/%s' % dir)
build(archive, dir, prefix)
fetch('https://raw.github.com/pypa/virtualenv/1.7/virtualenv.py', 'virtualenv-1.7.py')
if not os.path.exists('venv'):
os.mkdir('venv')
for python_version in python_versions:
for libcurl_version in libcurl_versions:
python_prefix = os.path.abspath('i/Python-%s' % python_version)
libcurl_prefix = os.path.abspath('i/curl-%s' % libcurl_version)
venv = os.path.abspath('venv/Python-%s-curl-%s' % (python_version, libcurl_version))
if os.path.exists(venv):
shutil.rmtree(venv)
subprocess.check_call(['python', 'virtualenv-1.7.py', venv, '-p', '%s/bin/python' % python_prefix])
with in_dir('pycurl'):
subprocess.check_call('make clean && . %s/bin/activate && make test' % venv, shell=True)
| Python | 0 | |
c24647a921c64cfc8a1385f7e735622514e199c3 | make it clear that we don't depend on gabble version for the test | tests/test-caps-update.py | tests/test-caps-update.py | """
Test that CapabilitiesChanged signal is emitted only once after
all the caps in the presence have been analyzed.
"""
import dbus
from twisted.words.xish import domish
from servicetest import match, unwrap, lazy
from gabbletest import go, make_result_iq
def make_presence(from_jid, type, status):
presence = domish.Element((None, 'presence'))
if from_jid is not None:
presence['from'] = from_jid
if type is not None:
presence['type'] = type
if status is not None:
presence.addElement('status', content=status)
return presence
def caps_iface(proxy):
return dbus.Interface(proxy,
'org.freedesktop.Telepathy.Connection.Interface.Capabilities')
@match('dbus-signal', signal='StatusChanged', args=[0, 1])
def expect_connected(event, data):
presence = make_presence('bob@foo.com/Foo', None, 'hello')
presence.addElement('priority', None, '0')
c = presence.addElement(('http://jabber.org/protocol/caps', 'c'))
c['node'] = 'http://telepathy.freedesktop.org/caps'
c['ver'] = '0.4.test-version'
c['ext'] = 'jingle-audio jingle-video'
data['stream'].send(presence)
return True
@lazy
@match('dbus-signal', signal='CapabilitiesChanged',
args=[[(2, u'org.freedesktop.Telepathy.Channel.Type.StreamedMedia', 0,
3, 0, 3)]])
def expect_CapabilitiesChanged(event, data):
data['conn_iface'].Disconnect()
return True
@match('dbus-signal')
def expect_disconnected(event, data):
assert event.signal != 'CapabilitiesChanged'
if event.signal == 'StatusChanged' and event.args == [2, 1]:
return True
return False
if __name__ == '__main__':
go()
|
"""
Test that CapabilitiesChanged signal is emitted only once after
all the caps in the presence have been analyzed.
"""
import dbus
from twisted.words.xish import domish
from servicetest import match, unwrap, lazy
from gabbletest import go, make_result_iq
def make_presence(from_jid, type, status):
presence = domish.Element((None, 'presence'))
if from_jid is not None:
presence['from'] = from_jid
if type is not None:
presence['type'] = type
if status is not None:
presence.addElement('status', content=status)
return presence
def caps_iface(proxy):
return dbus.Interface(proxy,
'org.freedesktop.Telepathy.Connection.Interface.Capabilities')
@match('dbus-signal', signal='StatusChanged', args=[0, 1])
def expect_connected(event, data):
presence = make_presence('bob@foo.com/Foo', None, 'hello')
presence.addElement('priority', None, '0')
c = presence.addElement(('http://jabber.org/protocol/caps', 'c'))
c['node'] = 'http://telepathy.freedesktop.org/caps'
c['ver'] = '0.5.14'
c['ext'] = 'voice-v1 jingle-audio jingle-video'
data['stream'].send(presence)
return True
@lazy
@match('dbus-signal', signal='CapabilitiesChanged',
args=[[(2, u'org.freedesktop.Telepathy.Channel.Type.StreamedMedia', 0,
3, 0, 3)]])
def expect_CapabilitiesChanged(event, data):
data['conn_iface'].Disconnect()
return True
@match('dbus-signal')
def expect_disconnected(event, data):
assert event.signal != 'CapabilitiesChanged'
if event.signal == 'StatusChanged' and event.args == [2, 1]:
return True
return False
if __name__ == '__main__':
go()
| Python | 0.000001 |
1ef3c14af249f211df4cdad89cdd49d7f2845eb1 | Add share count using Flask. | flask_share_count.py | flask_share_count.py | from flask import Flask, jsonify, request
import grequests, re, json
app = Flask(__name__)
FACEBOOK = 'https://api.facebook.com/method/links.getStats?urls=%s&format=json'
TWITTER = 'http://urls.api.twitter.com/1/urls/count.json?url=%s&callback=count'
REDDIT = 'http://buttons.reddit.com/button_info.json?url=%s'
STUMBLEUPON = 'http://www.stumbleupon.com/services/1.01/badge.getinfo?url=%s'
PINTEREST = 'http://widgets.pinterest.com/v1/urls/count.json?source=6&url=%s'
GOOGLE_PLUS = 'https://clients6.google.com/rpc?key=AIzaSyCKSbrvQasunBoV16zDH9R33D88CeLr9gQ'
@app.route('/')
def hello_world():
return 'Hello World!'
@app.route('/count')
def total_count():
target_url = request.args.get('url')
params = []
param = {}
param['method'] = 'pos.plusones.get'
param['id'] = 'p'
param['params'] = {}
param['params']['nolog'] = True
param['params']['id'] = target_url
param['params']['source'] = 'widget'
param['params']['userId'] = '@viewer'
param['params']['groupId'] = '@self'
param['jsonrpc'] = '2.0'
param['key'] = 'p'
param['apiVersion'] = 'v1'
params.append(param)
json_param = json.dumps(params)
try:
requests = (
grequests.get(FACEBOOK % (target_url)),
grequests.get(TWITTER % (target_url)),
grequests.get(REDDIT % (target_url)),
grequests.get(STUMBLEUPON % (target_url)),
grequests.get(PINTEREST % (target_url)),
grequests.post(GOOGLE_PLUS, data=json_param)
)
except:
return jsonify(result='error', total=-1)
responses = grequests.map(requests)
print dir(responses[0])
counts = (
parse_facebook(responses[0]),
parse_twitter(responses[1]),
parse_reddit(responses[2]),
parse_stumbleupon(responses[3]),
parse_pinterest(responses[4]),
parse_googleplus(responses[5])
)
print counts
total_count = 0
for count in counts:
total_count += count
return jsonify(result='success', total= total_count)
def parse_facebook(res):
facebook_data = res.json()[0]
return facebook_data['share_count'] + facebook_data['like_count']
def parse_twitter(res):
m = re.search(r'{.+}', res.content)
raw_data = m.group(0)
return json.loads(raw_data)['count']
def parse_reddit(res):
print 'reddit:'
print res.json()
if 'children' in res.json()['data'] and res.json()['data']['children']:
return res.json()['data']['children'][0]['data']['score']
return 0
def parse_stumbleupon(res):
if 'views' in res.json()['result']:
return int(res.json()['result']['views'])
return 0
def parse_pinterest(res):
m = re.search(r'{.+}', res.content)
return json.loads(m.group(0))['count']
def parse_googleplus(res):
return int(res.json()[0]['result']['metadata']['globalCounts']['count'])
if __name__ == '__main__':
app.run(port=8000, debug=True)
| Python | 0 | |
7311f8f2a8a7ab285669dc02d26d7e2248583ff5 | Add tests for 'rle_compress' | test_rle.py | test_rle.py | import pypolycomp
import numpy as np
def test_compression():
for cur_type in (np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64):
compressed = pypolycomp.rle_compress(np.array([1, 1, 1, 2, 3], dtype=cur_type))
assert np.all(compressed == np.array([3, 1, 1, 2, 1, 3], dtype=cur_type))
| Python | 0.000013 | |
da2b773bf6e669b3ec50bbd6af73e1d80bb0b5a5 | Add tsstats/event.py for easy event-initialization | tsstats/events.py | tsstats/events.py | from collections import namedtuple
Event = namedtuple(
'Event', ['timestamp', 'identifier', 'action', 'arg', 'arg_is_client']
)
def nick(timestamp, identifier, nick):
return Event(timestamp, identifier, 'set_nick', nick, arg_is_client=False)
def connect(timestamp, identifier):
return Event(
timestamp, identifier, 'connect', arg=timestamp, arg_is_client=False
)
def disconnect(timestamp, identifier):
return Event(
timestamp, identifier, 'disconnect', arg=timestamp, arg_is_client=False
)
def kick(timestamp, identifier, target_identifier):
return Event(
timestamp, identifier, 'kick', target_identifier, arg_is_client=True
)
def ban(timestamp, identifier, target_identifier):
return Event(
timestamp, identifier, 'ban', target_identifier, arg_is_client=True
)
| Python | 0 | |
99f5c2a9cd44ac8ed301a781460816e8f0dffdb8 | add killall.py example script | examples/killall.py | examples/killall.py | #!/usr/bin/env python
"""
Kill a process by name.
"""
import os
import sys
import psutil
def main():
if len(sys.argv) != 2:
sys.exit('usage: %s name' % __file__)
else:
NAME = sys.argv[1]
killed = []
for proc in psutil.process_iter():
if proc.name == NAME and proc.pid != os.getpid():
proc.kill()
killed.append(proc.pid)
if not killed:
sys.exit('%s: no process found' % NAME)
else:
sys.exit(0)
sys.exit(main())
| Python | 0.000001 | |
20c08b96ce7a5377576e45953266c51079b5bdeb | Create testfile.py | testfile.py | testfile.py | print("Tess is cool")
| Python | 0.000005 | |
da4bdccbc7ff3b949659e048c9a0b3643dfbca42 | Add Docker pipeline operator - wip | airflow_pipeline/operators/docker_pipeline_operator.py | airflow_pipeline/operators/docker_pipeline_operator.py |
"""
.. module:: operators.docker_pipeline_operator
:synopsis: A DockerOperator that moves XCOM data used by the pipeline
.. moduleauthor:: Ludovic Claude <ludovic.claude@chuv.ch>
"""
try:
from airflow.operators import DockerOperator
except ImportError:
from airflow.operators.docker_operator import DockerOperator
from airflow.utils import apply_defaults
from airflow_pipeline.pipelines import TransferPipelineXComs
import logging
class DockerPipelineOperator(DockerOperator, TransferPipelineXComs):
"""
A DockerOperator that moves XCOM data used by the pipeline.
A temporary directory is created on the host and mounted into a container to allow storing files
that together exceed the default disk size of 10GB in a container. The path to the mounted
directory can be accessed via the environment variable ``AIRFLOW_TMP_DIR``.
:param image: Docker image from which to create the container.
:type image: str
:param api_version: Remote API version.
:type api_version: str
:param command: Command to be run in the container.
:type command: str or list
:param cpus: Number of CPUs to assign to the container.
This value gets multiplied with 1024. See
https://docs.docker.com/engine/reference/run/#cpu-share-constraint
:type cpus: float
:param docker_url: URL of the host running the docker daemon.
:type docker_url: str
:param environment: Environment variables to set in the container.
:type environment: dict
:param force_pull: Pull the docker image on every run.
:type force_pull: bool
:param mem_limit: Maximum amount of memory the container can use. Either a float value, which
represents the limit in bytes, or a string like ``128m`` or ``1g``.
:type mem_limit: float or str
:param network_mode: Network mode for the container.
:type network_mode: str
:param tls_ca_cert: Path to a PEM-encoded certificate authority to secure the docker connection.
:type tls_ca_cert: str
:param tls_client_cert: Path to the PEM-encoded certificate used to authenticate docker client.
:type tls_client_cert: str
:param tls_client_key: Path to the PEM-encoded key used to authenticate docker client.
:type tls_client_key: str
:param tls_hostname: Hostname to match against the docker server certificate or False to
disable the check.
:type tls_hostname: str or bool
:param tls_ssl_version: Version of SSL to use when communicating with docker daemon.
:type tls_ssl_version: str
:param tmp_dir: Mount point inside the container to a temporary directory created on the host by
the operator. The path is also made available via the environment variable
``AIRFLOW_TMP_DIR`` inside the container.
:type tmp_dir: str
:param user: Default user inside the docker container.
:type user: int or str
:param volumes: List of volumes to mount into the container, e.g.
``['/host/path:/container/path', '/host/path2:/container/path2:ro']``.
:param xcom_push: Does the stdout will be pushed to the next step using XCom.
The default is True.
:type xcom_push: bool
:param xcom_all: Push all the stdout or just the last line. The default is True (all lines).
:type xcom_all: bool
:param parent_task: name of the parent task to use to locate XCom parameters
:type parent_task: str
"""
template_fields = ('templates_dict','incoming_parameters',)
template_ext = tuple()
ui_color = '#e9ffdb' # nyanza
@apply_defaults
def __init__(
self,
image,
api_version=None,
command=None,
cpus=1.0,
docker_url='unix://var/run/docker.sock',
environment=None,
force_pull=False,
mem_limit=None,
network_mode=None,
tls_ca_cert=None,
tls_client_cert=None,
tls_client_key=None,
tls_hostname=None,
tls_ssl_version=None,
tmp_dir='/tmp/airflow',
user=None,
volumes=None,
xcom_push=True,
xcom_all=True,
parent_task=None,
*args, **kwargs):
DockerOperator.__init__(self,
image=image,
api_version=api_version,
command=command,
cpus=cpus,
docker_url=docker_url,
environment=environment,
force_pull=force_pull,
mem_limit=mem_limit,
network_mode=network_mode,
tls_ca_cert=tls_ca_cert,
tls_client_cert=tls_client_cert,
tls_client_key=tls_client_key,
tls_hostname=tls_hostname,
tls_ssl_version=tls_ssl_version,
tmp_dir=tmp_dir,
user=user,
volumes=volumes,
xcom_push=xcom_push,
xcom_all=xcom_all,
*args, **kwargs)
TransferPipelineXComs.__init__(self, parent_task)
self.logs = None
def pre_execute(self, context):
self.read_pipeline_xcoms(context, expected=[
'folder', 'session_id', 'participant_id', 'scan_date',
'dataset'])
self.pipeline_xcoms['task_id'] = self.task_id
self.op_kwargs.update(self.pipeline_xcoms)
def execute(self, context):
self.logs = super(SpmPipelineOperator, self).execute(context)
self.op_kwargs = self.op_kwargs or {}
self.pipeline_xcoms = self.pipeline_xcoms or {}
if self.provide_context:
context.update(self.op_kwargs)
context.update(self.pipeline_xcoms)
self.op_kwargs = context
return_value = self.docker_callable(*self.op_args, **self.op_kwargs)
logging.info("Done. Returned value was: " + str(return_value))
if isinstance(return_value, dict):
self.pipeline_xcoms.update(return_value)
self.write_pipeline_xcoms(context)
return return_value
def trigger_dag(self, context, dag_id):
if dag_id:
run_id = 'trig__' + datetime.now().isoformat()
payload = {
'output': self.logs,
'error': ''
}
payload.update(self.pipeline_xcoms)
session = settings.Session()
dr = DagRun(
dag_id=dag_id,
run_id=run_id,
conf=payload,
external_trigger=True)
session.add(dr)
session.commit()
session.close()
| Python | 0 | |
d75eebbcb6b1922d37a97550bc4cbead6e50cfdb | add localdb.py | united/localdb.py | united/localdb.py | # -*- coding: utf-8 -*-
"""
Copyright (C) 2015, MuChu Hsu
Contributed by Muchu Hsu (muchu1983@gmail.com)
This file is part of BSD license
<https://opensource.org/licenses/BSD-3-Clause>
"""
import sqlite3
import os
import logging
from pkg_resources import resource_filename
"""
資料庫存取 類別
"""
class SQLite3Db:
#建構子
def __init__(self):
logging.basicConfig(level=logging.INFO)
dbPath = resource_filename("cameo_res", "local.db")
if os.path.exists(dbPath):#建立連線
logging.info("connect to sqlite3 db.")
self.conn = sqlite3.connect(dbPath)
else: #初始化資料庫並建立連線
logging.info("connect to sqlite3 db with initialization.")
self.conn = sqlite3.connect(dbPath)
c = self.conn.cursor()
c.execute("""CREATE TABLE table
(id INTEGER PRIMARY KEY)""")
self.conn.commit()
#解構子
def __del__(self):
logging.info("close sqlite3 db connection.")
self.conn.close() #關閉資料庫連線
# 執行 SQL 並 commit (適用於 INSERT、UPDATE、DELETE)
def commitSQL(self, strSQL=None):
c = self.conn.cursor()
c.execute(strSQL)
self.conn.commit()
# 執行 SQL 並 fetchall 資料 (適用於 SELECT)
def fetchallSQL(self, strSQL=None):
c = self.conn.cursor()
c.execute(strSQL)
return c.fetchall() | Python | 0.000001 | |
b6d1b9365c356a14f0f9ef478247d498845a2b2c | add script to process normal vectors | coastline/data/vectors.py | coastline/data/vectors.py | import matplotlib.pyplot as plt
import glob
import math
def extract_data(file_name):
points = []
with open(file_name, 'r') as f:
for i, line in enumerate(f):
if i > 2:
s = line.split()
point = (float(s[0]), float(s[1]))
points.append(point)
return points
def normalize(vector, s):
norm = math.sqrt(vector[0]**2.0 + vector[1]**2.0)
return (s*vector[0]/norm, s*vector[1]/norm)
def get_normal_vectors(points):
num_points = len(points)
vectors = []
for i in range(num_points):
i_before = i - 1
i_after = (i + 1)%num_points
vector = (points[i_after][1] - points[i_before][1], -(points[i_after][0] - points[i_before][0]))
vector = normalize(vector, 5000.0)
vectors.append(vector)
return vectors
def add_plot(file_name, style):
points = extract_data(file_name)
if len(points) > 3: # for the moment cannot handle linear islands
ax = plt.axes()
vectors = get_normal_vectors(points)
for i in range(len(points)):
ax.arrow(points[i][0], points[i][1], vectors[i][0], vectors[i][1], head_width=0.1, head_length=0.1, fc='k', ec='k')
(xs, ys) = zip(*points)
plt.plot(xs, ys, style)
for f in glob.glob('*.txt'):
add_plot(f, 'r-')
#axes = plt.gca()
#axes.set_xlim([-20.0, 0.0])
#axes.set_ylim([40.0, 60.0])
plt.show()
| Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.