commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
b1a5764956e0f569b4955dbf43e5656873c903f6 | Create new package. (#7649) | var/spack/repos/builtin/packages/soapdenovo-trans/package.py | var/spack/repos/builtin/packages/soapdenovo-trans/package.py | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class SoapdenovoTrans(MakefilePackage):
"""SOAPdenovo-Trans is a de novo transcriptome assembler basing on the
SOAPdenovo framework, adapt to alternative splicing and different
expression level among transcripts."""
homepage = "http://soap.genomics.org.cn/SOAPdenovo-Trans.html"
url = "https://github.com/aquaskyline/SOAPdenovo-Trans/archive/1.0.4.tar.gz"
version('1.0.4', 'a3b00b0f743b96141c4d5f1b49f2918c')
build_directory = 'src'
def edit(self, spec, prefix):
with working_dir(self.build_directory):
makefile = FileFilter('Makefile')
makefile.filter('CFLAGS= -O3 -fomit-frame-pointer -static',
'CFLAGS= -O3 -fomit-frame-pointer')
def build(self, spec, prefix):
with working_dir(self.build_directory):
make()
make('127mer=1', parallel=False)
def install(self, spec, prefix):
install_tree('.', prefix.bin)
| Python | 0 | |
402004b1a0612e5b4eeb703f3787dd1b7f3def30 | make auto migration | yandex_kassa/migrations/0004_auto_20151209_0940.py | yandex_kassa/migrations/0004_auto_20151209_0940.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('yandex_kassa', '0003_auto_20151116_1530'),
]
operations = [
migrations.AlterModelOptions(
name='payment',
options={'ordering': ('-created',), 'verbose_name': '\u043f\u043b\u0430\u0442\u0435\u0436', 'verbose_name_plural': '\u041f\u043b\u0430\u0442\u0435\u0436\u0438'},
),
migrations.AlterField(
model_name='payment',
name='scid',
field=models.PositiveIntegerField(default=528277, verbose_name=b'\xd0\x9d\xd0\xbe\xd0\xbc\xd0\xb5\xd1\x80 \xd0\xb2\xd0\xb8\xd1\x82\xd1\x80\xd0\xb8\xd0\xbd\xd1\x8b'),
),
migrations.AlterField(
model_name='payment',
name='shop_id',
field=models.PositiveIntegerField(default=104674, verbose_name=b'ID \xd0\xbc\xd0\xb0\xd0\xb3\xd0\xb0\xd0\xb7\xd0\xb8\xd0\xbd\xd0\xb0'),
),
]
| Python | 0.000001 | |
7ec4133b11ba91541e9ec9895e39a2c402c63087 | define the AVB loss separately | avb/models/avb_loss.py | avb/models/avb_loss.py | import keras.backend as ker
from keras.layers import Layer
from keras.losses import categorical_crossentropy
class AVBLossLayer(Layer):
def __init__(self, **kwargs):
self.is_placeholder = True
super(AVBLossLayer, self).__init__(**kwargs)
@staticmethod
def avb_loss(discrim_output_posterior, discrim_output_posterior_prior, data_log_probs):
# 1/m * sum_{i=1}^m log p(x_i|z), where z = encoder(x_i, epsilon_i)
reconstruction_log_likelihood = ker.mean(ker.sum(data_log_probs, axis=1))
# The decoder tries to maximise the reconstruction data log-likelihood
decoder_loss = -reconstruction_log_likelihood
# The encoder tries to minimize the discriminator output
encoder_loss = ker.mean(discrim_output_posterior)
# The dicriminator loss is the GAN loss with input from the prior and posterior distributions
discriminator_loss = ker.mean(categorical_crossentropy(y_true=ker.ones_like(discrim_output_posterior),
y_pred=discrim_output_posterior)
+ categorical_crossentropy(y_true=ker.zeros_like(discrim_output_posterior_prior),
y_pred=discrim_output_posterior_prior))
return ker.mean(encoder_loss + decoder_loss + discriminator_loss)
def call(self, inputs, **kwargs):
discrim_output_posterior, discrim_output_prior, decoder_output_log_probs = inputs
loss = self.avb_loss(discrim_output_posterior, discrim_output_prior, decoder_output_log_probs)
self.add_loss(loss, inputs=inputs)
# unused output
return inputs[0]
| Python | 0 | |
0c3b3ff095af2ccf6c3891a99170c982b1639f4e | test pickle retention adapted. | test/test_module_pickle_retention.py | test/test_module_pickle_retention.py | #!/usr/bin/env python2.6
#Copyright (C) 2009-2010 :
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
#This file is part of Shinken.
#
#Shinken is free software: you can redistribute it and/or modify
#it under the terms of the GNU Affero General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#Shinken is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU Affero General Public License for more details.
#
#You should have received a copy of the GNU Affero General Public License
#along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
#It's ugly I know....
import os
from shinken_test import *
sys.path.append("../shinken/modules")
import pickle_retention_file_scheduler
from pickle_retention_file_scheduler import *
from module import Module
modconf = Module()
modconf.module_name = "PickleRetention"
modconf.module_type = pickle_retention_file_scheduler.properties['type']
modconf.properties = pickle_retention_file_scheduler.properties.copy()
class TestConfig(ShinkenTest):
#setUp is in shinken_test
#Change ME :)
def test_pickle_retention(self):
print self.conf.modules
#get our modules
mod = None
#mod = Module({'type' : 'pickle_retention_file', 'module_name' : 'PickleRetention', 'path' : 'tmp/retention-test.dat'})
mod = pickle_retention_file_scheduler.Pickle_retention_scheduler(modconf, 'tmp/retention-test.dat')
try :
os.unlink(mod.path)
except :
pass
sl = get_instance(mod)
print "Instance", sl
#Hack here :(
sl.properties = {}
sl.properties['to_queue'] = None
sl.init()
l = logger
#updte the hosts and service in the scheduler in the retentino-file
sl.update_retention_objects(self.sched, l)
#Now we change thing
svc = self.sched.hosts.find_by_name("test_host_0")
self.assert_(svc.state == 'PENDING')
print "State", svc.state
svc.state = 'UP' #was PENDING in the save time
r = sl.load_retention_objects(self.sched, l)
self.assert_(r == True)
#search if the host is not changed by the loading thing
svc2 = self.sched.hosts.find_by_name("test_host_0")
self.assert_(svc == svc2)
self.assert_(svc.state == 'PENDING')
#Ok, we can delete the retention file
os.unlink(mod.path)
# Now make real loops with notifications
self.scheduler_loop(10, [[svc, 2, 'CRITICAL | bibi=99%']])
#updte the hosts and service in the scheduler in the retentino-file
sl.update_retention_objects(self.sched, l)
r = sl.load_retention_objects(self.sched, l)
self.assert_(r == True)
if __name__ == '__main__':
unittest.main()
| #!/usr/bin/env python2.6
#Copyright (C) 2009-2010 :
# Gabes Jean, naparuba@gmail.com
# Gerhard Lausser, Gerhard.Lausser@consol.de
#
#This file is part of Shinken.
#
#Shinken is free software: you can redistribute it and/or modify
#it under the terms of the GNU Affero General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#Shinken is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU Affero General Public License for more details.
#
#You should have received a copy of the GNU Affero General Public License
#along with Shinken. If not, see <http://www.gnu.org/licenses/>.
#
# This file is used to test reading and processing of config files
#
#It's ugly I know....
import os
from shinken_test import *
sys.path.append("../shinken/modules")
from pickle_retention_file_scheduler import *
class TestConfig(ShinkenTest):
#setUp is in shinken_test
#Change ME :)
def test_pickle_retention(self):
print self.conf.modules
#get our modules
mod = None
mod = Module({'type' : 'pickle_retention_file', 'module_name' : 'PickleRetention', 'path' : 'tmp/retention-test.dat'})
try :
os.unlink(mod.path)
except :
pass
sl = get_instance(mod)
print "Instance", sl
#Hack here :(
sl.properties = {}
sl.properties['to_queue'] = None
sl.init()
l = logger
#updte the hosts and service in the scheduler in the retentino-file
sl.update_retention_objects(self.sched, l)
#Now we change thing
svc = self.sched.hosts.find_by_name("test_host_0")
self.assert_(svc.state == 'PENDING')
print "State", svc.state
svc.state = 'UP' #was PENDING in the save time
r = sl.load_retention_objects(self.sched, l)
self.assert_(r == True)
#search if the host is not changed by the loading thing
svc2 = self.sched.hosts.find_by_name("test_host_0")
self.assert_(svc == svc2)
self.assert_(svc.state == 'PENDING')
#Ok, we can delete the retention file
os.unlink(mod.path)
# Now make real loops with notifications
self.scheduler_loop(10, [[svc, 2, 'CRITICAL | bibi=99%']])
#updte the hosts and service in the scheduler in the retentino-file
sl.update_retention_objects(self.sched, l)
r = sl.load_retention_objects(self.sched, l)
self.assert_(r == True)
if __name__ == '__main__':
unittest.main()
| Python | 0 |
fd54c28be8d9ffd7e5711035bf5b5e1b7fe332cc | Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/ab2e190c2bfe60b3b738c125ca9db1a2785cdcaa. | third_party/tf_runtime/workspace.bzl | third_party/tf_runtime/workspace.bzl | """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "ab2e190c2bfe60b3b738c125ca9db1a2785cdcaa"
TFRT_SHA256 = "b097063dd10c010e827e58cc8e5a0e4008d99bcba1dcb20259c8ef890620b9b5"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = [
"http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
"https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
],
)
| """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "285e48bc47db23a479637fd1e2767b9a35dc2c9b"
TFRT_SHA256 = "6f0067d0cb7bb407caeef060603b6e33f1231cddf1ce4ce2ebce027dc418764f"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = [
"http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
"https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
],
)
| Python | 0.000003 |
c9c00a6a5ab267ab56dd147e6542cae6566061d8 | Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/dc109b725d8f36f8c7db7847f0c95a819c43f9e9. | third_party/tf_runtime/workspace.bzl | third_party/tf_runtime/workspace.bzl | """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "dc109b725d8f36f8c7db7847f0c95a819c43f9e9"
TFRT_SHA256 = "e6a6359ecd731f7208f32402fac9bf874b26855497c0252fcddc44e5133320df"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "4bcf968d66a6bb2899b9d99917b916f6ec04c327"
TFRT_SHA256 = "9bd2cc2e7003f73f767e138ae4776b43d15ca286f0f85ad374ec5f8aaeab1aa4"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| Python | 0.000001 |
d81a2b0328c86165b09c2d41aa2a4684c75388cd | Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/78537f15f4873bbed59258bed4442225303f462a. | third_party/tf_runtime/workspace.bzl | third_party/tf_runtime/workspace.bzl | """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "78537f15f4873bbed59258bed4442225303f462a"
TFRT_SHA256 = "87526ed2a287d7809b2cadf82f9db94994b0019635d431f2fc9c3db2bd4a31cc"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "a2f5e07760d2a888370d0686546b757ee9628494"
TFRT_SHA256 = "70653b94faa603befef83457482c8a1151fa529b3215124e18a0f97592d5ad05"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| Python | 0 |
8c1b20941c1216bb56fa55fe881962d2ea883366 | Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/c68238f982305e3618a2b5347e1e0a5663898c90. | third_party/tf_runtime/workspace.bzl | third_party/tf_runtime/workspace.bzl | """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "c68238f982305e3618a2b5347e1e0a5663898c90"
TFRT_SHA256 = "b28ed95058c101a9d3203ddbaa271044de984f6b49c5609124e1cb4ae0b3e165"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = [
"http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
"https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
],
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "377c20166e8e1b5124493c1433b1df34ca62cf3f"
TFRT_SHA256 = "f0c3c03e7d9ca2e10c3256f28bf9c0aa0aa26d9aa4da539c00532ee5217ba7ba"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = [
"http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
"https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
],
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| Python | 0.000002 |
5a8fde172f0fc7aff841e8059927ff126712b321 | Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/feffe7beb261f6dfe9af083e8f46dfea293ded54. | third_party/tf_runtime/workspace.bzl | third_party/tf_runtime/workspace.bzl | """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "feffe7beb261f6dfe9af083e8f46dfea293ded54"
TFRT_SHA256 = "830492c8a9884e5ca84b15a4da953491f74b2ffbd45656352d58b624e881b9b7"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = [
"http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
"https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
],
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "509cf2f10beb666002ece6a7b968fe2c7c0c1e4b"
TFRT_SHA256 = "14b22d39d3eebcf255e4dd8ee8630b4da3ecc786f5053adf9c94a2e42362ee0c"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = [
"http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
"https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
],
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
| Python | 0.000005 |
e42862ce7bde45e90bec0980f3c35c5cef5c65b6 | Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/47a1de40f17e70f901238edfe99dc510a5db797a. | third_party/tf_runtime/workspace.bzl | third_party/tf_runtime/workspace.bzl | """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "47a1de40f17e70f901238edfe99dc510a5db797a"
TFRT_SHA256 = "87631491c3fdd34b4d00b6999274468b89a98f23113aeafa15b53c3a7517fc36"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = [
"http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
"https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
],
)
| """Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "033f079420053002701271e4173bdcaf21bd1b73"
TFRT_SHA256 = "15c1c5a3617b91322d4ef96ce884676d27164cf94211f83bc1fcec50ab96aad4"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = [
"http://mirror.tensorflow.org/github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
"https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT),
],
)
| Python | 0.000002 |
5ab54cf353cece6a8754a1869d8f342ba0a8b351 | Add a script to find and compare F* execution times | tools/scripts/collect-fstar-times.py | tools/scripts/collect-fstar-times.py | #!/usr/bin/python
import argparse
import os
import glob
import re
import time
import sys
import fnmatch
import pickle
from prettytable import PrettyTable # Install via: easy_install PrettyTable
def find_fstar_output_files(directory):
matches = []
extensions = ["vfsti", "vfst"]
# Based on: https://stackoverflow.com/a/2186565
for root, dirnames, filenames in os.walk(directory):
for ext in extensions:
for filename in fnmatch.filter(filenames, '*.' + ext):
matches.append(os.path.join(root, filename))
return matches
def parse_fstar_output(filename):
time = 0
found = False
with open(filename, "r") as f:
for line in f.readlines():
result = re.search("Verified.*\((\d+) milliseconds\)", line)
if result:
time += int(result.group(1))
found = True
if found:
return time
else:
return None
def collect_times_dir(d):
files = find_fstar_output_files(d)
times = {}
for f in files:
times[f] = parse_fstar_output(f)
return times
def collect_times(directories):
times = {}
for d in directories:
times.update(collect_times_dir(d))
return times
def display_times(times):
tab = PrettyTable(["Filename", "Time", "Full Path"])
tab.align["Filename"] = "l"
tab.align["Time"] = "r"
tab.align["FullPath"] = "l"
total_time = 0
for f in sorted(times.keys()):
filename = os.path.basename(f)
tab.add_row([filename, times[f], f])
if not times[f] is None:
total_time += times[f]
tab.add_row(["", "", ""])
tab.add_row(["Total", total_time, ""])
print(tab)
def store_times(times, label):
pickle_file = "times." + label + ".pickle"
if not os.path.isfile(pickle_file):
with open(pickle_file, "wb") as pickler:
pickle.dump(times, pickler)
else:
print "WARNING: Found existing pickled file %s. No data written. Consider moving or deleting it." % pickle_file
def load_times(filename):
with open(filename, "rb") as pickler:
return pickle.load(pickler)
def compute_diff(times1, times2):
diffs = {}
for f,t in times1.items():
if f in times2 and not t is None:
diffs[f] = t - times2[f]
return diffs
def display_diffs(times, diffs):
tab = PrettyTable(["Filename", "t1 time", "delta", "delta \%","Full Path"])
tab.align["Filename"] = "l"
tab.align["t1 time"] = "r"
tab.align["delta"] = "r"
tab.align["delta \%"] = "r"
tab.align["FullPath"] = "l"
tab.sortby = "delta"
total_time = 0
total_delta = 0
for f in sorted(times.keys()):
filename = os.path.basename(f)
delta = "n/a"
delta_percent = "n/a"
if f in diffs:
delta = diffs[f]
delta_percent = "%0.1f" % (delta / float(times[f]))
tab.add_row([filename, times[f], delta, delta_percent, f])
if not times[f] is None:
total_time += times[f]
total_delta += delta
tab.add_row(["", "", "", "", ""])
#tab.add_row(["Total", total_time, total_delta, total_delta / float(total_time), ""])
print(tab)
def main():
parser = argparse.ArgumentParser(description= 'Collect and summarize F* verification times')
parser.add_argument('--dir', action='append', required=False, help='Collect all results in this folder and its subfolders')
parser.add_argument('--label', action='store', required=False, help='Label for file containing the results')
parser.add_argument('--t1', action='store', required=False, help='File of times to compare to t2')
parser.add_argument('--t2', action='store', required=False, help='File of times to compare to t1')
args = parser.parse_args()
if (not args.dir is None) and (not args.label is None):
times = collect_times(args.dir)
display_times(times)
store_times(times, args.label)
sys.exit(0)
if (not args.t1 is None) and (not args.t2 is None):
times1 = load_times(args.t1)
times2 = load_times(args.t2)
diffs = compute_diff(times1, times2)
display_diffs(times1, diffs)
sys.exit(0)
print("Invalid or insufficient arguments supplied. Try running with -h")
if (__name__=="__main__"):
main()
| Python | 0 | |
a6cc742a7272d1138031e26c61fd10617e6b0ac1 | Initialize transpositionTest | books/CrackingCodesWithPython/Chapter09/transpositionTest.py | books/CrackingCodesWithPython/Chapter09/transpositionTest.py | # Transposition Cipher Test
# https://www.nostarch.com/crackingcodes/ (BSD Licensed)
import random, sys, transpositionEncrypt, transpositionDecrypt
def main():
random.seed(42) # Set the random "seed" to a static value.
for i in range(20): # Run 20 tests.
# Generate random messages to test.
# The message will have a random length:
message = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' * random.randint(4, 40)
# Convert the message string to a list to shuffle it:
message = list(message)
random.shuffle(message)
message = ''.join(message) # Convert the list back to a string.
print('Test #%s: %s..."' % (i + 1, message[:50]))
# Check all possible keys for each message:
for key in range(1, int(len(message)/2)):
encrypted = transpositionEncrypt.encryptMessage(key, message)
decrypted = transpositionDecrypt.decryptMessage(key, encrypted)
# If the decryption doesn't match the original message, display
# an error message and quit:
if message != decrypted:
print('Mismatch with key %s and message %s.' % (key, message))
print('Decrypted as: ' + decrypted)
sys.exit()
print('Transposition cipher test passed.')
# If transpositionTest.py is run (instead of imported as a module) call
# the main() function:
if __name__ == '__main__':
main()
| Python | 0.000001 | |
1c7daf0bd9801885d7740620b3e81faa03ce49d4 | add sign/verify json tests | test/crypto/olm_device_test.py | test/crypto/olm_device_test.py | from copy import deepcopy
from matrix_client.client import MatrixClient
from matrix_client.crypto.olm_device import OlmDevice
HOSTNAME = 'http://example.com'
class TestOlmDevice:
cli = MatrixClient(HOSTNAME)
user_id = '@user:matrix.org'
device_id = 'QBUAZIFURK'
device = OlmDevice(cli.api, user_id, device_id)
signing_key = device.olm_account.identity_keys['ed25519']
def test_sign_json(self):
example_payload = {
"name": "example.org",
"unsigned": {
"age_ts": 922834800000
}
}
saved_payload = deepcopy(example_payload)
signed_payload = self.device.sign_json(example_payload)
signature = signed_payload.pop('signatures')
# We should not have modified the payload besides the signatures key
assert example_payload == saved_payload
key_id = 'ed25519:' + self.device_id
assert signature[self.user_id][key_id]
def test_verify_json(self):
example_payload = {
"test": "test",
"unsigned": {
"age_ts": 922834800000
},
"signatures": {
"@user:matrix.org": {
"ed25519:QBUAZIFURK": ("WI7TgwqTp4YVn1dFWmDu7xrJvEikEzAbmoqyM5JY5t0P"
"6fVaiMFAirmwb13GzIyYDLR+nQfoksNBcrp7xSaMCA")
}
}
}
saved_payload = deepcopy(example_payload)
signing_key = "WQF5z9b4DV1DANI5HUMJfhTIDvJs1jkoGTLY6AQdjF0"
assert self.device.verify_json(example_payload, signing_key, self.user_id,
self.device_id)
# We should not have modified the payload
assert example_payload == saved_payload
# Try to verify an object that has been tampered with
example_payload['test'] = 'test1'
assert not self.device.verify_json(example_payload, signing_key, self.user_id,
self.device_id)
# Try to verify invalid payloads
example_payload['signatures'].pop(self.user_id)
assert not self.device.verify_json(example_payload, signing_key, self.user_id,
self.device_id)
example_payload.pop('signatures')
assert not self.device.verify_json(example_payload, signing_key, self.user_id,
self.device_id)
def test_sign_verify(self):
example_payload = {
"name": "example.org",
}
signed_payload = self.device.sign_json(example_payload)
assert self.device.verify_json(signed_payload, self.signing_key, self.user_id,
self.device_id)
| Python | 0.000001 | |
c4ffd77a56e09f3b418e6d13e8339fe693fffbdb | add fasd_cleanup script | misc/fasd_clean.py | misc/fasd_clean.py | #/usr/bin/env python
# Copyright (C) 2015 Ratheesh S<ratheeshreddy@gmail.com>
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
import os
db_file = "/home/ratheesh/.fasd"
purged_items = 0
try:
f = open(db_file, "r+")
except IOError:
print 'ERROR: No File found: %s' % db_file
exit(1)
d = f.readlines()
f.close()
try:
f = open(db_file, "w+")
except IOError:
print 'ERROR: No File found: %s' % db_file
exit(1)
print "Cleaning fasd database ..."
for i in d:
path, sep, misc = i.partition('|')
if os.path.exists(path):
f.write(i)
else:
print 'Removing %s' % path
purged_items += 1 # increment purged items
f.close()
if purged_items == 0:
print "fasd database is clean!"
else:
print "---------------------------------------"
print "No. of Purged Items: %d" % purged_items
# End of File
| Python | 0 | |
2a45679c02e74ce7a63e259b1475d4190086084e | Add errors to zombase | zombase/errors.py | zombase/errors.py | # -*- coding: utf-8 -*-
class ZombaseRuntimeError(Exception):
pass
| Python | 0.000001 | |
629c9e330e6114680f22af125252d95fb6989201 | update migrations for link manager | webquills/linkmgr/migrations/0002_alter_linkcategory_site.py | webquills/linkmgr/migrations/0002_alter_linkcategory_site.py | # Generated by Django 3.2 on 2021-06-07 11:11
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wqsites', '0001_initial'),
('linkmgr', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='linkcategory',
name='site',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='link_lists', to='wqsites.site', verbose_name='site'),
),
]
| Python | 0 | |
24e2ddfd49aa2c05879460baeb67ed6cc75ffa87 | fix benchmark script | benchmark/benchmark.py | benchmark/benchmark.py | import pyqg
import time
import cProfile
import pstats
import numpy as np
tmax = 8000*1000
dtfac = 64 * 8000.
mynx = [32, 64, 128, 256, 512, 1024, 2048]
mynth = [1,2,4,8,16,32]
res = np.zeros((len(mynx), 5))
print 'nx, threads, timesteps, time'
for j, nx in enumerate(mynx):
dt = dtfac / nx
#for i, (use_fftw, nth) in enumerate([(False, 1), (True, 1),
# (True, 2), (True, 4), (True, 8)]):
for i, nth in enumerate(mynth):
m = pyqg.QGModel(nx=nx, tmax=tmax, dt=dt, ntd=nth,
# no output
twrite=np.inf,
# no time average
taveint=np.inf,)
tic = time.time()
m.run()
toc = time.time()
tottime = toc-tic
#res[j,i] = tottime
#print 'nx=%3d, fftw=%g, threads=%g: %g' % (nx, use_fftw, nth, tottime)
print '%3d, %3d, %8d, %10.4f' % (nx, nth, m.tc, tottime)
# # profiling
# prof = cProfile.Profile()
# prof.run('m.run()')
# p = pstats.Stats(prof)
# p.sort_stats('cum').print_stats(0.3)
| from pyqg import qg_model, model
import time
import cProfile
import pstats
import numpy as np
tmax = 104000000
dtfac = (64 * 8000.)
mynx = [32, 64, 128, 256]
res = np.zeros((len(mynx), 5))
for j, nx in enumerate(mynx):
dt = dtfac / nx
for i, (use_fftw, nth) in enumerate([(False, 1), (True, 1),
(True, 2), (True, 4), (True, 8)]):
m = qg_model.QGModel(nx=64, tmax=tmax, dt=dt,
use_fftw=use_fftw, ntd=nth)
tic = time.time()
m.run()
toc = time.time()
tottime = toc-tic
res[j,i] = tottime
print 'nx=%3d, fftw=%g, threads=%g: %g' % (nx, use_fftw, nth, tottime)
# # profiling
# prof = cProfile.Profile()
# prof.run('m.run()')
# p = pstats.Stats(prof)
# p.sort_stats('cum').print_stats(0.3)
| Python | 0.000007 |
078727dcaba9f7861f84ab7ef61e653f28253226 | add script | mdm_vendor_sign.py | mdm_vendor_sign.py | # This is based loosely on Softthinker's java code found here
# http://www.softhinker.com/in-the-news/iosmdmvendorcsrsigning
# fuck java
import argparse
from plistlib import writePlistToString
import os
import subprocess
from base64 import b64encode
import sys
import urllib2
def p(s):
sys.stdout.write(s)
sys.stdout.flush()
def mdm_vendor_sign():
"""
This utility will create a properly encoded certifiate signing request
that you can upload to identity.apple.com/pushcert
"""
parser = argparse.ArgumentParser(description=mdm_vendor_sign.__doc__)
parser.add_argument('--key', help='Private key', required=True)
parser.add_argument('--csr', help='Certificate signing request', required=True)
parser.add_argument('--mdm', help='MDM vendor certificate', required=True)
parser.add_argument('--out', help='Output filename', required=False)
cli_args = vars(parser.parse_args())
# Verify CSR
# openssl req -text -noout -verify -in CSR.csr
p('Verifying %s ... ' % cli_args['csr'])
csr_file = open(cli_args['csr']).read()
args = ['openssl', 'req', '-noout', '-verify' ]
command = subprocess.Popen(args, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT)
output, error = command.communicate(input = csr_file)
if output.rstrip().split('\n')[0] == 'verify OK':
p('OK\n')
else:
p('FAILED\n')
return
# Verify private key
# openssl rsa -in privateKey.key -check
p('Verifying %s ... ' % cli_args['key'])
key_file = open(cli_args['key']).read()
args = ['openssl', 'rsa', '-check', '-noout' ]
command = subprocess.Popen(args, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT)
output, error = command.communicate(input = key_file)
if output.rstrip().split('\n')[0] == 'RSA key ok':
p('OK\n')
else:
p('FAILED\n\n')
print """If you don't have the plain private key already, you need
to extract it from the pkcs12 file...
First convert to PEM
openssl pkcs12 -in filename.p12 -nocerts -out key.pem
Then export the certificate file from the pfx file
openssl pkcs12 -in filename.pfx -clcerts -nokeys -out cert.pem
Lastly Remove the passphrase from the private key
openssl rsa -in key.pem -out the_private_key.key
"""
return
# Verify MDM vendor certificate
# openssl x509 -noout -in mdm.cer -inform DER
p('Verifying %s ... ' % cli_args['mdm'])
mdm_cert_file = open(cli_args['mdm']).read()
args = ['openssl', 'x509', '-noout', '-inform', 'DER' ]
command = subprocess.Popen(args, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT)
output, error = command.communicate(input = mdm_cert_file)
if len(output) == 0:
p('OK\n')
else:
p('FAILED\n')
return
# Convert CSR to DER format
# openssl req -inform pem -outform der -in customer.csr -out customer.der
p('Converting %s to DER format... ' % cli_args['csr'])
args = ['openssl', 'req', '-inform', 'pem', '-outform', 'der' ]
command = subprocess.Popen(args, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT)
output, error = command.communicate(input = csr_file)
if error:
p('FAILED\n')
return
p('OK\n')
csr_der = output
csr_b64 = b64encode(csr_der)
# Sign the CSR with the private key
# openssl sha1 -sign private_key.key -out signed_output.rsa data_to_sign.txt
p('Signing CSR with private key... ')
args = ['openssl', 'sha1', '-sign', cli_args['key'] ]
command = subprocess.Popen(args, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT)
output, error = command.communicate(input = csr_der)
if error:
p('FAILED\n')
return
p('OK\n')
signature_bytes = output
signature = b64encode(signature_bytes)
def cer_to_pem(cer_data):
# openssl x509 -inform der -in mdm.cer -out mdm.pem
# -in and -out flags are handled by STDIN and STDOUT
args = ['openssl', 'x509', '-inform', 'der' ]
command = subprocess.Popen(args, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT)
output, error = command.communicate(input = cer_data)
if error:
p('Error converting from cer to pem: %s' % error)
return output
# TODO : Probably should verify these too
p('Downloading WWDR intermediate certificate...')
intermediate_cer = urllib2.urlopen('https://developer.apple.com/certificationauthority/AppleWWDRCA.cer').read()
p(' converting to pem...')
intermediate_pem = cer_to_pem(intermediate_cer)
p('OK\n')
p('Downloading Apple Root Certificate...')
root_cer = urllib2.urlopen('http://www.apple.com/appleca/AppleIncRootCertificate.cer').read()
p(' converting to pem...')
root_pem = cer_to_pem(root_cer)
p('OK\n')
mdm_pem = cer_to_pem(mdm_cert_file)
p('Finishing...')
plist_dict = dict(
PushCertRequestCSR = csr_b64,
PushCertCertificateChain = mdm_pem + intermediate_pem + root_pem,
PushCertSignature = signature
)
plist_xml = writePlistToString(plist_dict)
plist_b64 = b64encode(plist_xml)
output_filename = cli_args['out'] if cli_args['out'] else 'plist_encoded'
write_path = os.path.join(os.getcwd(), output_filename)
output = open(write_path, 'wb')
output.write(plist_b64)
output.close()
p('DONE\n\nGo upload file \'%s\' to identity.apple.com/pushcert !\n' % output_filename)
if __name__=="__main__":
mdm_vendor_sign()
| Python | 0.000001 | |
2347ee253f04fa87b28206b0ec00fd2a3fffb49f | Create hello_market_maker.py | hello_market_maker.py | hello_market_maker.py | class hello_market_maker():
def __init__(self, anchor_price, tick_increment, max_pos):
self.anchor_price = anchor_price
self.tick_increment = tick_increment
self.position = 0
self.upper_bound = anchor_price + ((max_pos + 1) * tick_increment)
self.lower_bound = anchor_price - ((max_pos + 1) * tick_increment)
self.max_pos = max_pos
self.mkt = inside_market(anchor_price - tick_increment, anchor_price + tick_increment)
def on_bid_fill(self):
# modify current bid and ask down 1 tick_increment
#self.mkt.shift(-self.tick_increment)
self.position += 1
price = self.mkt.bid.price
if self.position < self.max_pos:
self.mkt.shift(-self.tick_increment)
else:
self.mkt.exit(BID, self.tick_increment)
return "BID_FILL @ ", price
def on_ask_fill(self):
# modify current bid and ask up 1 tick_increment
#self.mkt.shift(-self.tick_increment)
self.position -= 1
price = self.mkt.ask.price
if self.position > -self.max_pos:
self.mkt.shift(self.tick_increment)
else:
self.mkt.exit(ASK, self.tick_increment)
return "ASK_FILL @ ", price
def evaluate(self, trade_price):
fill, price = self.mkt.evaluate(trade_price)
self.adjust_bounds(trade_price)
if fill == BID:
self.on_bid_fill()
elif fill == ASK:
self.on_ask_fill()
else:
filler = 0
return fill, price
def adjust_bounds(self, trade_price):
if trade_price > self.upper_bound:
self.mkt.shift(self.tick_increment)
self.upper_bound += self.tick_increment
self.lower_bound += self.tick_increment
print "ADJUSTING UP"
elif trade_price < self.lower_bound:
self.mkt.shift(-self.tick_increment)
self.upper_bound -= self.tick_increment
self.lower_bound -= self.tick_increment
print "ADJUSTING DOWN"
| Python | 0.999986 | |
819a47ce69164aa48f3b68e9ab997f6ee90e2292 | Add a index stats tool | index-stats.py | index-stats.py | """
This script prints some basic collection stats about the size of the
collections and their indexes.
"""
from prettytable import PrettyTable
import psutil
from pymongo import Connection
from pymongo import ReadPreference
connection = Connection(read_preference=ReadPreference.SECONDARY)
def compute_signature(index):
signature = index["ns"]
for key in index["key"]:
signature += "%s_%s" % (key, index["key"][key])
return signature
def get_collection_stats(database, collection):
print "Checking DB: %s" % collection.full_name
return database.command("collstats", collection.name)
# From http://www.5dollarwhitebox.org/drupal/node/84
def convert_bytes(bytes):
bytes = float(bytes)
if bytes >= 1099511627776:
terabytes = bytes / 1099511627776
size = '%.2fT' % terabytes
elif bytes >= 1073741824:
gigabytes = bytes / 1073741824
size = '%.2fG' % gigabytes
elif bytes >= 1048576:
megabytes = bytes / 1048576
size = '%.2fM' % megabytes
elif bytes >= 1024:
kilobytes = bytes / 1024
size = '%.2fK' % kilobytes
else:
size = '%.2fb' % bytes
return size
summary_stats = {
"count" : 0,
"size" : 0,
"indexSize" : 0
}
all_stats = []
all_db_stats = {}
for db in connection.database_names():
# FIXME: Add an option to include oplog stats.
if db == "local":
continue
database = connection[db]
all_db_stats[database.name] = []
for collection_name in database.collection_names():
stats = get_collection_stats(database, database[collection_name])
all_stats.append(stats)
all_db_stats[database.name].append(stats)
summary_stats["count"] += stats["count"]
summary_stats["size"] += stats["size"]
summary_stats["indexSize"] += stats.get("totalIndexSize", 0)
x = PrettyTable(["Collection", "Index","% Size", "Index Size"])
x.set_field_align("Collection", "l")
x.set_field_align("Index", "l")
x.set_field_align("% Size", "r")
x.set_field_align("Index Size", "r")
x.set_padding_width(1)
print
index_size_mapping = {}
for db in all_db_stats:
db_stats = all_db_stats[db]
count = 0
for stat in db_stats:
count += stat["count"]
for index in stat["indexSizes"]:
index_size = stat["indexSizes"].get(index, 0)
row = [stat["ns"], index,
"%0.1f%%" % ((index_size / float(stat["totalIndexSize"])) * 100),
convert_bytes(index_size)]
index_size_mapping[index_size] = row
x.add_row(row)
print "Index Overview"
x.printt(sortby="Collection")
print
print "Top 5 Largest Indexes"
x = PrettyTable(["Collection", "Index","% Size", "Index Size"])
x.set_field_align("Collection", "l")
x.set_field_align("Index", "l")
x.set_field_align("% Size", "r")
x.set_field_align("Index Size", "r")
x.set_padding_width(1)
top_five_indexes = sorted(index_size_mapping.keys(), reverse=True)[0:5]
for size in top_five_indexes:
x.add_row(index_size_mapping.get(size))
x.printt()
print
print "Total Documents:", summary_stats["count"]
print "Total Data Size:", convert_bytes(summary_stats["size"])
print "Total Index Size:", convert_bytes(summary_stats["indexSize"])
ram_headroom = psutil.phymem_usage()[0] - summary_stats["indexSize"]
print "RAM Headroom:", convert_bytes(ram_headroom)
print "RAM Used: %s (%s%%)" % (convert_bytes(psutil.phymem_usage()[1]), psutil.phymem_usage()[3])
print "Available RAM Headroom:", convert_bytes((100 - psutil.phymem_usage()[3]) / 100 * ram_headroom)
| Python | 0 | |
372f4a988411e48a0c50cdc74fb2a7f4e5abf052 | Add a server identity test | tests/server-identity.py | tests/server-identity.py | import nose
import requests
import fixture
@nose.with_setup(fixture.start_tangelo, fixture.stop_tangelo)
def test_server_identity():
response = requests.get(fixture.url("/"))
assert response.headers["server"] == "Tangelo"
| Python | 0.000001 | |
19db4647257617992e9b195828baf39907cc5db1 | Add tests for exit codes | tests/test_exit_codes.py | tests/test_exit_codes.py | """Check that the CLI returns the appropriate exit code."""
import subprocess
def test_exit_code_demo():
"""Ensure that linting the demo returns an exit code of 1."""
try:
subprocess.check_output("proselint --demo", shell=True)
except subprocess.CalledProcessError as grepexc:
assert(grepexc.returncode == 1)
def test_exit_code_version():
"""Ensure that getting the version returns an exit code of 0."""
try:
subprocess.check_output("proselint --version", shell=True)
except subprocess.CalledProcessError:
assert(False)
| Python | 0 | |
704a979b3be5d7949dd17c640aa429c34f6163b5 | add gp_algebra test | tests/test_gp_algebra.py | tests/test_gp_algebra.py | import time
import pytest
import numpy as np
from flare import gp, gp_algebra
from flare.env import AtomicEnvironment
from flare.struc import Structure
from flare.mc_simple import two_plus_three_body_mc, \
two_plus_three_body_mc_grad
from flare.gp_algebra import get_like_grad_from_mats
def get_random_training_set(nenv):
"""Create a random training_set array with parameters """
np.random.seed(0)
cutoffs = np.array([0.8, 0.8])
hyps = np.ones(5, dtype=float)
kernel = (two_plus_three_body_mc, two_plus_three_body_mc_grad)
# create test data
cell = np.eye(3)
unique_species = [2, 1]
noa = 5
training_data = []
training_labels = []
for idenv in range(nenv):
positions = np.random.uniform(-1, 1, [noa,3])
species = np.random.randint(0, len(unique_species), noa)
struc = Structure(cell, species, positions)
training_data += [AtomicEnvironment(struc, 1, cutoffs)]
training_labels += [np.random.uniform(-1, 1, 3)]
training_labels = np.hstack(training_labels)
return hyps, training_data, training_labels, kernel, cutoffs
def test_ky_mat():
"""
test function get_ky_mat in gp_algebra
using gp_algebra_origin as reference
TO DO: store the reference... and call it explicitely
"""
hyps, training_data, training_labels, kernel, cutoffs = \
get_random_training_set(10)
func = [gp_algebra.get_ky_mat,
gp_algebra.get_ky_mat_par]
# get the reference
# timer0 = time.time()
ky_mat0 = func[0](hyps, training_data,
kernel[0], cutoffs)
# print("linear", time.time()-timer0)
# parallel version
ky_mat = func[1](hyps, training_data,
kernel[0], cutoffs,
ncpus=2)
diff = (np.max(np.abs(ky_mat-ky_mat0)))
assert (diff==0), "parallel implementation is wrong"
def test_ky_and_hyp():
hyps, training_data, training_labels, kernel, cutoffs = \
get_random_training_set(10)
func = [gp_algebra.get_ky_and_hyp,
gp_algebra.get_ky_and_hyp_par]
hypmat_0, ky_mat0 = func[0](hyps, training_data,
kernel[1], cutoffs)
# parallel version
hypmat, ky_mat = func[1](hyps, training_data,
kernel[1], cutoffs, ncpus=2)
diff = (np.max(np.abs(ky_mat-ky_mat0)))
assert (diff==0), "parallel implementation is wrong"
def test_grad():
hyps, training_data, training_labels, kernel, cutoffs = \
get_random_training_set(10)
# obtain reference
func = gp_algebra.get_ky_and_hyp
hyp_mat, ky_mat = func(hyps, training_data,
kernel[1], cutoffs)
like0, like_grad0 = \
get_like_grad_from_mats(ky_mat, hyp_mat, training_labels)
# serial implementation
func = gp_algebra.get_ky_and_hyp
hyp_mat, ky_mat = func(hyps, training_data,
kernel[1], cutoffs)
like, like_grad = \
get_like_grad_from_mats(ky_mat, hyp_mat, training_labels)
assert (like==like0), "wrong likelihood"
assert np.max(np.abs(like_grad-like_grad0))==0, "wrong likelihood"
def test_ky_hyp_grad():
hyps, training_data, training_labels, kernel, cutoffs = \
get_random_training_set(10)
func = gp_algebra.get_ky_and_hyp
hyp_mat, ky_mat = func(hyps, training_data,
kernel[1], cutoffs)
print(hyp_mat.shape, ky_mat.shape, len(training_labels), training_labels[0])
like, like_grad = \
get_like_grad_from_mats(ky_mat, hyp_mat, training_labels)
delta = 0.001
for i in range(len(hyps)):
newhyps = np.copy(hyps)
newhyps[i] += delta
hyp_mat_p, ky_mat_p = func(newhyps, training_data,
kernel[1], cutoffs)
like_p, like_grad_p = \
get_like_grad_from_mats(ky_mat_p, hyp_mat_p, training_labels)
newhyps[i] -= 2*delta
hyp_mat_m, ky_mat_m = func(newhyps, training_data,
kernel[1], cutoffs)
like_m, like_grad_m = \
get_like_grad_from_mats(ky_mat_m, hyp_mat_m, training_labels)
diff = np.abs(like_grad[i]-(like_p-like_m)/2./delta)
assert (diff < 1e-3), "wrong calculation of hyp_mat"
| Python | 0.00218 | |
787298889fd85dffb597dee6571dead42227c7d6 | add test to validate generated stub constants.pyi | tests/test_type_stubs.py | tests/test_type_stubs.py | """Test type stubs for correctness where possible."""
import os
import sys
import pytest
import xmlsec
black = pytest.importorskip('black')
if sys.version_info >= (3, 4):
from pathlib import Path
else:
from _pytest.pathlib import Path
constants_stub_header = """
import sys
from typing import NamedTuple
if sys.version_info >= (3, 8):
from typing import Final, Literal
else:
from typing_extensions import Final, Literal
class __KeyData(NamedTuple): # __KeyData type
href: str
name: str
class __Transform(NamedTuple): # __Transform type
href: str
name: str
usage: int
"""
def gen_constants_stub():
"""
Generate contents of the file:`xmlsec/constants.pyi`.
Simply load all constants at runtime,
generate appropriate type hint for each constant type.
"""
def process_constant(name):
"""Generate line in stub file for constant name."""
obj = getattr(xmlsec.constants, name)
return '{name}: Final = {obj!r}'.format(name=name, obj=obj)
names = list(sorted(name for name in dir(xmlsec.constants) if not name.startswith('__')))
lines = [process_constant(name) for name in names]
return constants_stub_header + os.linesep.join(lines)
def test_xmlsec_constants_stub(request):
"""
Generate the stub file for :mod:`xmlsec.constants` from existing code.
Compare it against the existing stub :file:`xmlsec/constants.pyi`.
"""
rootdir = Path(str(request.config.rootdir))
stub = rootdir / 'src' / 'xmlsec' / 'constants.pyi'
mode = black.FileMode(target_versions=[black.TargetVersion.PY38], line_length=130, is_pyi=True, string_normalization=False)
formatted = black.format_file_contents(gen_constants_stub(), fast=False, mode=mode)
assert formatted == stub.read_text()
| Python | 0 | |
f1e35886822a7ff7e7f19ef4f1db90c870e8d45d | Add file for remove nonterminal tests | tests/NonterminalRemoveTest.py | tests/NonterminalRemoveTest.py | #!/usr/bin/env python
"""
:Author Patrik Valkovic
:Created 23.06.2017 16:39
:Licence GNUv3
Part of grammpy
"""
from unittest import TestCase, main
from grammpy import Grammar
from grammpy import Nonterminal
class TempClass(Nonterminal):
pass
class Second(Nonterminal):
pass
class Third(Nonterminal):
pass
class NonterminalRemoveTest(TestCase):
def test_removeOne(self):
gr = Grammar()
gr.add_term([0, 'asdf', TempClass])
self.assertEqual(gr.terms_count(), 3)
gr.remove_term(0)
self.assertEqual(gr.terms_count(), 2)
self.assertTrue(gr.have_term('asdf'))
self.assertTrue(gr.have_term(TempClass))
self.assertFalse(gr.have_term(0))
def test_removeClass(self):
gr = Grammar()
gr.add_term([0, 'asdf', TempClass])
self.assertEqual(gr.terms_count(), 3)
gr.remove_term(TempClass)
self.assertEqual(gr.terms_count(), 2)
self.assertTrue(gr.have_term('asdf'))
self.assertTrue(gr.have_term(0))
self.assertFalse(gr.have_term(TempClass))
def test_removeTwo(self):
gr = Grammar()
gr.add_term([0, 'asdf', TempClass])
self.assertEqual(gr.terms_count(), 3)
gr.remove_term(0)
gr.remove_term('asdf')
self.assertEqual(gr.terms_count(), 1)
self.assertTrue(gr.have_term(TempClass))
self.assertFalse(gr.have_term('asdf'))
self.assertFalse(gr.have_term(0))
def test_removeTwoInArray(self):
gr = Grammar()
gr.add_term([0, 'asdf', TempClass])
self.assertEqual(gr.terms_count(), 3)
gr.remove_term([0, 'asdf'])
self.assertEqual(gr.terms_count(), 1)
self.assertTrue(gr.have_term(TempClass))
self.assertFalse(gr.have_term('asdf'))
self.assertFalse(gr.have_term(0))
def test_removeTwoInTuple(self):
gr = Grammar()
gr.add_term([0, 'asdf', TempClass])
self.assertEqual(gr.terms_count(), 3)
gr.remove_term((0, 'asdf'))
self.assertEqual(gr.terms_count(), 1)
self.assertTrue(gr.have_term(TempClass))
self.assertFalse(gr.have_term('asdf'))
self.assertFalse(gr.have_term(0))
def test_removeAllWithoutParam(self):
gr = Grammar()
gr.add_term([0, 'asdf', TempClass])
self.assertEqual(gr.terms_count(), 3)
gr.remove_term()
self.assertEqual(gr.terms_count(), 0)
self.assertFalse(gr.have_term(TempClass))
self.assertFalse(gr.have_term('asdf'))
self.assertFalse(gr.have_term(0))
def test_removeEmptyGrammar(self):
gr = Grammar()
self.assertEqual(gr.terms_count(), 0)
gr.remove_term()
self.assertEqual(gr.terms_count(), 0)
if __name__ == '__main__':
main()
| Python | 0 | |
f3c9284bf7b5d9ae4acc413fd7feb824fdb7aca0 | create field to exclude recomputation of old invoices | l10n_it_fatturapa_in/migrations/12.0.1.18.3/pre-migration.py | l10n_it_fatturapa_in/migrations/12.0.1.18.3/pre-migration.py | from openupgradelib import openupgrade
@openupgrade.migrate()
def migrate(env, version):
if not version:
return
openupgrade.logged_query(
env.cr,
"""
ALTER TABLE fatturapa_attachment_in
ADD COLUMN IF NOT EXISTS invoices_date character varying
""",
)
| Python | 0 | |
60068d4deeba541b9518579d6d8473c4300e189d | Test killing onitu during a transfer | tests/functional/test_crash.py | tests/functional/test_crash.py | import os.path
from os import unlink
from utils.launcher import Launcher
from utils.entries import Entries
from utils.loop import CounterLoop, BooleanLoop
from utils.files import generate, checksum
from utils.tempdirs import TempDirs
launcher = None
dirs = TempDirs()
rep1, rep2 = dirs.create(), dirs.create()
json_file = 'test_crash.json'
def setup_module(module):
global launcher
entries = Entries()
entries.add('local_storage', 'rep1', {'root': rep1})
entries.add('local_storage', 'rep2', {'root': rep2})
entries.save(json_file)
launcher = Launcher(json_file)
def teardown_module(module):
launcher.kill()
unlink(json_file)
dirs.delete()
def launcher_startup():
loop = CounterLoop(3)
launcher.on_referee_started(loop.check)
launcher.on_driver_started(loop.check, driver='rep1')
launcher.on_driver_started(loop.check, driver='rep2')
launcher()
loop.run(timeout=5)
def test_crach():
filename = 'crash'
loop = BooleanLoop()
launcher.on_transfer_started(
loop.stop, d_from='rep1', d_to='rep2', filename=filename
)
launcher_startup()
generate(os.path.join(rep1, filename), 1000)
loop.run(timeout=5)
launcher.kill()
launcher.unset_all_events()
loop = BooleanLoop()
launcher.on_transfer_ended(
loop.stop, d_from='rep1', d_to='rep2', filename=filename
)
launcher_startup()
loop.run(timeout=5)
assert(checksum(os.path.join(rep1, filename)) ==
checksum(os.path.join(rep2, filename)))
launcher.kill()
| Python | 0 | |
e541d2c6c9c71647201ad39eb8a774eabe243139 | Add gaussian smoothing example (#485) | examples/01-filter/gaussian-smoothing.py | examples/01-filter/gaussian-smoothing.py | """
Gaussian smoothing
~~~~~~~~~~~~~~~~~~
Perform a gaussian convolution.
"""
import pyvista as pv
from pyvista import examples
# Load dataset
data = examples.download_gourds()
# Define a good point of view
cp = [
(319.5, 239.5, 1053.7372980874645),
(319.5, 239.5, 0.0),
(0.0, 1.0, 0.0)
]
###############################################################################
# Let's apply the gaussian smoothing with different values of standard
# deviation.
p = pv.Plotter(shape=(2, 2))
p.subplot(0, 0)
p.add_text("Original Image", font_size=24)
p.add_mesh(data, rgb=True)
p.camera_position = cp
p.subplot(0, 1)
p.add_text("Gaussian smoothing, std=2", font_size=24)
p.add_mesh(data.gaussian_smooth(std_dev=2.), rgb=True)
p.camera_position = cp
p.subplot(1, 0)
p.add_text("Gaussian smoothing, std=4", font_size=24)
p.add_mesh(data.gaussian_smooth(std_dev=4.), rgb=True)
p.camera_position = cp
p.subplot(1, 1)
p.add_text("Gaussian smoothing, std=8", font_size=24)
p.add_mesh(data.gaussian_smooth(std_dev=8.), rgb=True)
p.camera_position = cp
p.show()
###############################################################################
# Now let's see an example on a 3D dataset with volume rendering:
data = examples.download_brain()
smoothed_data = data.gaussian_smooth(std_dev=3.)
dargs = dict(clim=smoothed_data.get_data_range(),
opacity=[0, 0, 0, 0.1, 0.3, 0.6, 1])
n = [100, 150, 200, 245, 255]
p = pv.Plotter(shape=(1, 2), notebook=0)
p.subplot(0, 0)
p.add_text("Original Image", font_size=24)
# p.add_mesh(data.contour(n), **dargs)
p.add_volume(data, **dargs)
p.subplot(0, 1)
p.add_text("Gaussian smoothing", font_size=24)
# p.add_mesh(smoothed_data.contour(n), **dargs)
p.add_volume(smoothed_data, **dargs)
p.link_views()
p.camera_position = [(-162.0, 704.8, 65.02),
(90.0, 108.0, 90.0),
(0.0068, 0.0447, 0.999)]
p.show()
| Python | 0.000001 | |
cdc457e7486c9d47f0db017864175f37a2054091 | Add a copy of prjtrellis SDF parser | timing/util/parse_sdf.py | timing/util/parse_sdf.py | """
Utilities for SDF file parsing to determine cell timings
"""
import sys
class SDFData:
def __init__(self):
self.cells = {}
class Delay:
def __init__(self, minv, typv, maxv):
self.minv = minv
self.typv = typv
self.maxv = maxv
class IOPath:
def __init__(self, from_pin, to_pin, rising, falling):
self.from_pin = from_pin
self.to_pin = to_pin
self.rising = rising
self.falling = falling
class SetupHoldCheck:
def __init__(self, pin, clock, setup, hold):
self.pin = pin
self.clock = clock
self.setup = setup
self.hold = hold
class WidthCheck:
def __init__(self, clock, width):
self.clock = clock
self.width = width
class Interconnect:
def __init__(self, from_net, to_net, rising, falling):
self.from_net = from_net
self.to_net = to_net
self.rising = rising
self.falling = falling
class CellData:
def __init__(self, celltype, inst):
self.type = celltype
self.inst = inst
self.entries = []
self.interconnect = {}
def parse_sexpr(stream):
content = []
buffer = ""
instr = False
while True:
c = stream.read(1)
assert c != "", "unexpected end of file"
if instr:
if c == '"':
instr = False
else:
buffer += c
else:
if c == '(':
content.append(parse_sexpr(stream))
elif c == ')':
if buffer != "":
content.append(buffer)
return content
elif c.isspace():
if buffer != "":
content.append(buffer)
buffer = ""
elif c == '"':
instr = True
else:
buffer += c
def parse_sexpr_file(filename):
with open(filename, 'r') as f:
assert f.read(1) == '('
return parse_sexpr(f)
def parse_delay(delay):
sp = [int(x) for x in delay.split(":")]
assert len(sp) == 3
return Delay(sp[0], sp[1], sp[2])
def parse_sdf_file(filename):
sdata = parse_sexpr_file(filename)
assert sdata[0] == "DELAYFILE"
sdf = SDFData()
for entry in sdata[1:]:
if entry[0] != "CELL":
continue
assert entry[1][0] == "CELLTYPE"
celltype = entry[1][1]
assert entry[2][0] == "INSTANCE"
if len(entry[2]) > 1:
inst = entry[2][1]
else:
inst = "top"
cell = CellData(celltype, inst)
for subentry in entry[3:]:
if subentry[0] == "DELAY":
assert subentry[1][0] == "ABSOLUTE"
for delay in subentry[1][1:]:
if delay[0] == "IOPATH":
cell.entries.append(
IOPath(delay[1], delay[2], parse_delay(delay[3][0]), parse_delay(delay[4][0])))
elif delay[0] == "INTERCONNECT":
cell.interconnect[(delay[1], delay[2])] = Interconnect(delay[1], delay[2],
parse_delay(delay[3][0]),
parse_delay(delay[4][0]))
elif subentry[0] == "TIMINGCHECK":
for check in subentry[1:]:
if check[0] == "SETUPHOLD":
cell.entries.append(
SetupHoldCheck(check[1], check[2], parse_delay(check[3][0]), parse_delay(check[4][0])))
elif check[0] == "WIDTH":
cell.entries.append(WidthCheck(check[1], parse_delay(check[2][0])))
sdf.cells[inst] = cell
return sdf
| Python | 0 | |
59d435ab1d0e5347180f60633d316aa7f2a3abdb | add send_TWH_text module to package | timutils/send_TWH_txt.py | timutils/send_TWH_txt.py | """
short module to send a text message to Tim Hilton's phone using
Verizon's email-to-sms support and gmail's smtp mail server. I was
unable to get UC Merced's outlook.com server to accept the outgoing
message.
Timothy W. Hilton, UC Merced, 25 Feb 2014
"""
import smtplib
import getpass
def get_outgoing_mail_password():
pwd = getpass.getpass(prompt='Gmail password: ')
if len(pwd) == 0:
pwd = None
return(pwd)
def send_vtext_gmail(gmail_passwd,
gmail_uname='timothy.w.hilton@gmail.com',
dest_phone_num='4153147478',
msg_txt='testing 123'):
vtext_addr = "{}@vtext.com".format(dest_phone_num)
msg = """From: %s
To: %s
Subject: text-message\n
%s""" % (gmail_uname, vtext_addr, msg_txt)
server = smtplib.SMTP('smtp.gmail.com',587)
server.starttls()
server.login(gmail_uname,gmail_passwd)
server.sendmail(gmail_uname, vtext_addr, msg)
server.quit()
def send_vtext_outlook(ucmerced_uname,
smtp_password,
dest_phone_num,
msg_txt):
"""
25 Feb 2014: couldn't get sending mail through UC Merced's
outlook.com SMTP server to work. Probably something related to
the formatting of the outlook.com username? -TWH
"""
vtext_addr = "{}@vtext.com".format(dest_phone_num)
smtp_uname = "{}@ucmerced.edu".format(ucmerced_uname)
msg = """From: %s
To: %s
Subject: text-message
%s""" % (smtp_uname, vtext_addr, msg_txt)
print smtp_uname
result = 0
# server = smtplib.SMTP('pod51011.outlook.com',587)
# server.starttls()
# server.login(smtp_uname,smtp_password)
# result = server.sendmail(smtp_uname, vtext_addr, msg)
# server.quit()
print result
if __name__ == "__main__":
passwd = get_outgoing_mail_password()
if passwd is not None:
send_vtext_gmail(passwd,
msg_txt='here is the message')
else:
print('no password provided')
| Python | 0 | |
4ef17b96531a511b7ad620a0753594a2892af65c | Add monte_carlo_multigpu.py | examples/finance/monte_carlo_multigpu.py | examples/finance/monte_carlo_multigpu.py | import argparse
import contextlib
import sys
import time
import cupy
import numpy
from black_scholes import black_scholes_kernel
from monte_carlo import monte_carlo_kernel
# CuPy also implements a feature to call kernels in different GPUs.
# Through this sample, we will explain how to allocate arrays
# in different devices, and call kernels in parallel.
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpus', type=int, nargs='*',
default=[0], help='GPU IDs')
parser.add_argument('--n-options', default=1000, type=int)
parser.add_argument('--n-samples-per-thread', default=1000, type=int)
parser.add_argument('--n-threads-per-option', default=10000, type=int)
args = parser.parse_args()
if len(args.gpus) == 0:
print('At least one GPU is required.')
sys.exit(1)
def rand_range(m, M):
samples = numpy.random.rand(args.n_options)
return (m + (M - m) * samples).astype(numpy.float64)
print('initializing...')
stock_price_cpu = rand_range(5, 30)
option_strike_cpu = rand_range(1, 100)
option_years_cpu = rand_range(0.25, 10)
risk_free = 0.02
volatility = 0.3
stock_price_gpus = []
option_strike_gpus = []
option_years_gpus = []
call_prices_gpus = []
print('start computation')
print(' # of gpus: {}'.format(len(args.gpus)))
print(' # of options: {}'.format(args.n_options))
print(' # of samples per option: {}'.format(
len(args.gpus) * args.n_samples_per_thread * args.n_threads_per_option)
)
# Allocate arrays in different devices
for gpu_id in args.gpus:
with cupy.cuda.Device(gpu_id):
stock_price_gpus.append(cupy.array(stock_price_cpu))
option_strike_gpus.append(cupy.array(option_strike_cpu))
option_years_gpus.append(cupy.array(option_years_cpu))
call_prices_gpus.append(cupy.empty(
(args.n_options, args.n_threads_per_option),
dtype=numpy.float64))
@contextlib.contextmanager
def timer(message):
cupy.cuda.Stream.null.synchronize()
start = time.time()
yield
cupy.cuda.Stream.null.synchronize()
end = time.time()
print('%s:\t%f sec' % (message, end - start))
with timer('GPU (CuPy, Monte Carlo method)'):
for i, gpu_id in enumerate(args.gpus):
# Performs Monte-Carlo simulations in parallel
with cupy.cuda.Device(gpu_id):
monte_carlo_kernel(
stock_price_gpus[i][:, None],
option_strike_gpus[i][:, None],
option_years_gpus[i][:, None],
risk_free, volatility, args.n_samples_per_thread, i,
call_prices_gpus[i])
# Transfer the result from the GPUs
call_prices = [c.get() for c in call_prices_gpus]
call_mc = numpy.concatenate(call_prices).reshape(
len(args.gpus), args.n_options, args.n_threads_per_option)
call_mc = call_mc.mean(axis=(0, 2))
# Compute the error between the value of the exact solution
# and that of the Monte-Carlo simulation
with cupy.cuda.Device(args.gpus[0]):
call_bs = black_scholes_kernel(
stock_price_gpus[0], option_strike_gpus[0], option_years_gpus[0],
risk_free, volatility)[0].get()
error = cupy.std(call_mc - call_bs)
print('Error: %f' % error)
| Python | 0.998411 | |
2b4c065b986ca1e05d0755b2b64502861b17364d | add import script for Oldham | polling_stations/apps/data_collection/management/commands/import_oldham.py | polling_stations/apps/data_collection/management/commands/import_oldham.py | from data_collection.management.commands import BaseXpressCsvImporter
class Command(BaseXpressCsvImporter):
council_id = 'E08000004'
addresses_name = 'OldhamPropertyPostCodePollingStationWebLookup-2017-02-16.TSV'
stations_name = 'OldhamPropertyPostCodePollingStationWebLookup-2017-02-16.TSV'
elections = ['mayor.greater-manchester.2017-05-04']
csv_delimiter = '\t'
| Python | 0 | |
1064b7bc9e343f3ab9308172f6a3129745e7a548 | add test.py | test.py | test.py | #!/usr/bin/python
import smc
from pprint import pprint
import time
import logging
logger = logging.getLogger(__name__)
smc.login('http://172.18.1.150:8082', 'EiGpKD4QxlLJ25dbBEp20001')
#Example of using a search filter
#Response is a json record with a reference link to the object
#smc.get_element_by_href(href) gets the record directly
#Search for group named (Skype Servers)
mygroup = smc.filter_by_type('group', 'Skype Servers')
if mygroup:
pprint(smc.get_element_by_href(mygroup['href']))
#Search for single_fw instance named vmware-fw
myfw = smc.filter_by_type('single_fw', 'vmware-fw')
if myfw:
pprint(smc.get_element_by_href(myfw['href']))
#Search for host named ami
myhost = smc.filter_by_type('host', 'ami')
if myhost:
pprint(smc.get_element_by_href(myhost['href']))
#Search by top level element if element type is not known
myobject = smc.filter_by_element('myelement')
'''
#Creating/removing a host record. Validation is done based on IP address.
smc.create_host('ami', '1.1.1.2')
smc.remove_host('ami')
smc.create_host('a', 'a.b.c.d') #Should fail, not valid IP
smc.remove_host('ami2') #should fail if host doesn't exist
'''
'''
#Create group and add members
smc.create_group('group_with_no_members')
smc.create_host('ami', '1.1.1.1')
smc.create_host('ami2', '2.2.2.2')
smc.create_group('anewgroup', ['ami','ami2'])
'''
'''
#Example of creating a group record. If members is included, each member href
#needs to be validated or warning will be issued that members can't be added
smc.create_group('mygroup')
smc.create_group('mygroup', ['member1','member2','member3'])
'''
'''
#Example of creating a single_fw instance. method signature is:
#smc.create_single_fw(name, IP (mgmt), network (mgmt), dns=None, fw_license=None)
#If DNS and fw_license are provided, DNS is added to fw and an attempt is made to attach an available license if available
smc.create_single_fw('lepage', '172.18.1.5', '172.18.1.0/24', dns='5.5.5.5', fw_license=True)
time.sleep(5)
smc.remove_single_fw('lepage')
'''
'''
#Get available dynamic licenses
print "License: %s" % smc.get_dynamic_license()
'''
smc.logout()
| Python | 0.000012 | |
fa2fd9cdab29a5736ae6b69c5f754f92a33c7f74 | add wsgi.py | wsgi.py | wsgi.py | from server import app
if __name__ == "__main__":
app.run() | Python | 0.00001 | |
f4e12493c000b6bb3051e9c201347d420c8dd687 | add basis for netcomp class | camoco/NetComp.py | camoco/NetComp.py | from .COB import COB
class NetComp(Camoco):
def __init__(self,name,networks):
self.networks = set()
# Add all the networks
for n in networks:
self.add_network(n)
def add_network(self,net):
'''
Add a network (COB) to the
NetComp object.
'''
if isinstance(net,str):
net = COB(net)
if not isinstance(net,COB):
raise ValueError(f'a valid network must be provided')
self.networks.add(net)
| Python | 0 | |
b5d2b975e0566b90e6f52b9b3a4bf1b2e1fef8da | constrain tabled_committee_report.committee_id NOT NULL | migrations/versions/8cbc3d8dd55_add_soundcloudtrack_model.py | migrations/versions/8cbc3d8dd55_add_soundcloudtrack_model.py | """Add SoundcloudTrack model
Revision ID: 8cbc3d8dd55
Revises: 17570e7e200b
Create Date: 2016-08-31 10:19:49.128041
"""
# revision identifiers, used by Alembic.
revision = '8cbc3d8dd55'
down_revision = '17570e7e200b'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.alter_column('tabled_committee_report', 'committee_id',
existing_type=sa.INTEGER(),
nullable=False)
op.create_index(op.f('ix_tabled_committee_report_committee_id'), 'tabled_committee_report', ['committee_id'], unique=False)
op.drop_constraint(u'tabled_committee_report_committee_id_fkey', 'tabled_committee_report', type_='foreignkey')
op.create_foreign_key(op.f('fk_tabled_committee_report_committee_id_committee'), 'tabled_committee_report', 'committee', ['committee_id'], ['id'])
op.drop_column('tabled_committee_report', 'summary')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('tabled_committee_report', sa.Column('summary', sa.TEXT(), autoincrement=False, nullable=True))
op.drop_constraint(op.f('fk_tabled_committee_report_committee_id_committee'), 'tabled_committee_report', type_='foreignkey')
op.create_foreign_key(u'tabled_committee_report_committee_id_fkey', 'tabled_committee_report', 'committee', ['committee_id'], ['id'], ondelete=u'SET NULL')
op.drop_index(op.f('ix_tabled_committee_report_committee_id'), table_name='tabled_committee_report')
op.alter_column('tabled_committee_report', 'committee_id',
existing_type=sa.INTEGER(),
nullable=True)
### end Alembic commands ###
| Python | 0.999995 | |
2a3b89f42cde7088b304a3f224eaf52894f544ec | Add an python example for stream testing | misc/utils/LSL_Tests/RecieveAppStatistics.py | misc/utils/LSL_Tests/RecieveAppStatistics.py | """Example program to show how to read a multi-channel time series from LSL."""
from pylsl import StreamInlet, resolve_stream
import sys
# first resolve an EEG stream on the lab network
print("looking for an Unity3D.AppStatistics stream...")
streams = resolve_stream('type', 'Unity3D.FPS.FT')
# create a new inlet to read from the stream
inlet = StreamInlet(streams[0])
while True:
# get a new sample (you can also omit the timestamp part if you're not
# interested in it)
sample, timestamp = inlet.pull_sample()
print '\r' + str(round(timestamp)) + '\t' + str(sample),
sys.stdout.flush()
| Python | 0.000005 | |
52c7d6ba8f6dcb6c6f1bd02790ab9bb7fae8ebcd | add script | scripts/grabBAMrecs.py | scripts/grabBAMrecs.py | #!/usr/bin/env python
import sys
import pysam
import os
import re
from collections import defaultdict as dd
import logging
logger = logging.getLogger(__name__)
FORMAT = '%(asctime)s %(message)s'
logging.basicConfig(format=FORMAT)
logger.setLevel(logging.INFO)
def find_mate(read, bam):
''' AlignmentFile.mate() can return a non-primary alignment, so use this function instead '''
chrom = read.next_reference_name
for rec in bam.fetch(chrom, read.next_reference_start, read.next_reference_start+1):
if rec.query_name == read.query_name and rec.reference_start == read.next_reference_start:
if not rec.is_secondary and bin(rec.flag & 2048) != bin(2048):
if rec.is_read1 != read.is_read1:
return rec
return None
if len(sys.argv) == 3:
inbam = pysam.AlignmentFile(sys.argv[1], 'rb')
outfn = '.'.join(os.path.basename(sys.argv[1]).split('.')[:-1]) + '.' + re.sub(':', '_', sys.argv[2]) + '.bam'
outbam = pysam.AlignmentFile(outfn, 'wb', template=inbam)
seen = dd(list)
for read in inbam.fetch(region=sys.argv[2]):
if not read.is_supplementary and not read.is_secondary and not read.mate_is_unmapped:
outbam.write(read)
seen[read.qname].append(read.is_read1)
seen_pairs = 0
seen_alone = 0
for qname, pair in seen.iteritems():
assert len(set(pair)) <= 2
if len(set(pair)) == 2:
seen_pairs += 1
if len(set(pair)) == 1:
seen_alone += 1
logger.info('%d pairs inside and %d mates outside region %s' % (seen_pairs, seen_alone, sys.argv[2]))
matebam = pysam.AlignmentFile(sys.argv[1], 'rb')
for read in inbam.fetch(region=sys.argv[2]):
if not read.is_supplementary and not read.is_secondary and not read.mate_is_unmapped:
assert read.qname in seen
if len(set(seen[read.qname])) == 1:
mate = find_mate(read, matebam)
if mate is not None:
outbam.write(mate)
else:
sys.exit('usage: %s <BAM> <region chrom:start-end>' % sys.argv[0])
| Python | 0.000003 | |
6ad72a0c624abdda0df8d5c49366bfc597a12340 | Add tests for utils experiment module | cptm/tests/test_utils_experiment.py | cptm/tests/test_utils_experiment.py | from nose.tools import assert_equal, assert_false
from os import remove
from os.path import join
from json import dump
from cptm.utils.experiment import load_config, add_parameter, thetaFileName, \
topicFileName, opinionFileName, tarFileName, experimentName
def setup():
global jsonFile
global config
global nTopics
jsonFile = 'config.json'
# create cofig.json
params = {}
with open(jsonFile, 'wb') as f:
dump(params, f, sort_keys=True, indent=4)
config = load_config(jsonFile)
nTopics = 100
def teardown():
remove(jsonFile)
def test_load_config_default_values():
params = {}
params['inputData'] = None
params['outDir'] = '/{}'
params['testSplit'] = 20
params['minFreq'] = None
params['removeTopTF'] = None
params['removeTopDF'] = None
params['nIter'] = 200
params['beta'] = 0.02
params['beta_o'] = 0.02
params['expNumTopics'] = range(20, 201, 20)
params['nTopics'] = None
params['nProcesses'] = None
params['topicLines'] = [0]
params['opinionLines'] = [1]
params['sampleEstimateStart'] = None
params['sampleEstimateEnd'] = None
for p, v in params.iteritems():
yield assert_equal, v, params[p]
def test_add_parameter():
pName = 'nTopics'
yield assert_false, hasattr(config, pName)
add_parameter(pName, nTopics, jsonFile)
config2 = load_config(jsonFile)
yield assert_equal, config2[pName], nTopics
def test_thetaFileName():
config['nTopics'] = nTopics
fName = thetaFileName(config)
assert_equal(fName, '/theta_{}.csv'.format(nTopics))
def test_topicFileName():
config['nTopics'] = nTopics
fName = topicFileName(config)
assert_equal(fName, '/topics_{}.csv'.format(nTopics))
def test_opinionFileName():
config['nTopics'] = nTopics
return join(params.get('outDir').format(''),
'opinions_{}_{}.csv'.format(name, nTopics))
#def experimentName(params):
# fName = params.get('outDir')
# fName = fName.replace('/{}', '')
# _p, name = os.path.split(fName)
# return name
#def tarFileName(params):
# nTopics = params.get('nTopics')
# name = experimentName(params)
# return os.path.join(params.get('outDir').format(''),
# '{}_{}.tgz'.format(name, nTopics))
| Python | 0 | |
f73800f8e4ccd76d858c08d8cc8a72a6f2274fb6 | Validate settings a tad later | mopidy/__main__.py | mopidy/__main__.py | import logging
import multiprocessing
import optparse
import os
import sys
sys.path.insert(0,
os.path.abspath(os.path.join(os.path.dirname(__file__), '../')))
from mopidy import get_version, settings, SettingsError
from mopidy.core import CoreProcess
from mopidy.utils import get_class
from mopidy.utils.log import setup_logging
from mopidy.utils.path import get_or_create_folder
from mopidy.utils.settings import list_settings_optparse_callback
logger = logging.getLogger('mopidy.main')
def main():
options = parse_options()
setup_logging(options.verbosity_level, options.dump)
logger.info('-- Starting Mopidy --')
get_or_create_folder('~/.mopidy/')
settings.validate()
core_queue = multiprocessing.Queue()
output_class = get_class(settings.OUTPUT)
backend_class = get_class(settings.BACKENDS[0])
frontend = get_class(settings.FRONTENDS[0])()
frontend.start_server(core_queue)
core = CoreProcess(core_queue, output_class, backend_class, frontend)
core.start()
logger.debug('Main done')
def parse_options():
parser = optparse.OptionParser(version='Mopidy %s' % get_version())
parser.add_option('-q', '--quiet',
action='store_const', const=0, dest='verbosity_level',
help='less output (warning level)')
parser.add_option('-v', '--verbose',
action='store_const', const=2, dest='verbosity_level',
help='more output (debug level)')
parser.add_option('--dump',
action='store_true', dest='dump',
help='dump debug log to file')
parser.add_option('--list-settings',
action='callback', callback=list_settings_optparse_callback,
help='list current settings')
return parser.parse_args()[0]
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
logger.info(u'Interrupted by user')
sys.exit(0)
except SettingsError, e:
logger.error(e)
sys.exit(1)
except SystemExit, e:
logger.error(e)
sys.exit(1)
| import logging
import multiprocessing
import optparse
import os
import sys
sys.path.insert(0,
os.path.abspath(os.path.join(os.path.dirname(__file__), '../')))
from mopidy import get_version, settings, SettingsError
from mopidy.core import CoreProcess
from mopidy.utils import get_class
from mopidy.utils.log import setup_logging
from mopidy.utils.path import get_or_create_folder
from mopidy.utils.settings import list_settings_optparse_callback
logger = logging.getLogger('mopidy.main')
def main():
options = _parse_options()
setup_logging(options.verbosity_level, options.dump)
settings.validate()
logger.info('-- Starting Mopidy --')
get_or_create_folder('~/.mopidy/')
core_queue = multiprocessing.Queue()
output_class = get_class(settings.OUTPUT)
backend_class = get_class(settings.BACKENDS[0])
frontend = get_class(settings.FRONTENDS[0])()
frontend.start_server(core_queue)
core = CoreProcess(core_queue, output_class, backend_class, frontend)
core.start()
logger.debug('Main done')
def _parse_options():
parser = optparse.OptionParser(version='Mopidy %s' % get_version())
parser.add_option('-q', '--quiet',
action='store_const', const=0, dest='verbosity_level',
help='less output (warning level)')
parser.add_option('-v', '--verbose',
action='store_const', const=2, dest='verbosity_level',
help='more output (debug level)')
parser.add_option('--dump',
action='store_true', dest='dump',
help='dump debug log to file')
parser.add_option('--list-settings',
action='callback', callback=list_settings_optparse_callback,
help='list current settings')
return parser.parse_args()[0]
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
logger.info(u'Interrupted by user')
sys.exit(0)
except SettingsError, e:
logger.error(e)
sys.exit(1)
except SystemExit, e:
logger.error(e)
sys.exit(1)
| Python | 0 |
69b715ab99522967a6b1bb8f4abfc4f2b1e60912 | check most of the analyzer code by importing the analyzer itself | tests/windows/test_analyzer.py | tests/windows/test_analyzer.py | # Copyright (C) 2017 Cuckoo Foundation.
# This file is part of Cuckoo Sandbox - http://www.cuckoosandbox.org
# See the file 'docs/LICENSE' for copying permission.
def test_analyzer():
"""Simply imports the analyzer module to at least load most of the code."""
import analyzer
analyzer # Fake usage.
| Python | 0 | |
4853257696373d248884efd1532af8a81c34ee93 | Add LiveComposite creation helper script | tools/create_live_composite.py | tools/create_live_composite.py |
"""
Helper script for cropping images and creating a RenPy LiveComposite for them.
Quite specific and mostly useful for processing images exported from a
rendering program like Blender or from Photoshop layers.
Requires Pillow Python image processing library to be installed.
Command line example (current working directory at the base of this project):
python tools/create_live_composite.py ShaderDemo/game/images/doll
This assumes all images in the source directory have the same size. The script
crops them and creates an efficient LiveComposite that can be used for rigging
or just normally. The resulting LiveComposite is written into a .rpy-file
in the target directory.
"""
import sys
import os
from PIL import Image
IMAGES = ["png", "jpg"]
POSTFIX = "crop"
PAD = 5
sourceDir = sys.argv[1]
sourceImages = [os.path.join(sourceDir, name) for name in os.listdir(sourceDir) if name.lower().split(".")[-1] in IMAGES]
sourceImages.sort()
def findValidImages(images):
valid = []
size = None
for path in sourceImages:
image = Image.open(path)
if POSTFIX and POSTFIX in path.lower():
print("Skipping already cropped: %s" % path)
elif size is None or image.size == size:
size = image.size
valid.append((path, image))
else:
print("Image %s has size %s, should be %s? Skipped." % (path, str(image.size), str(size)))
return valid
def getCropRect(image):
x = 0
y = 0
x2 = image.size[0]
y2 = image.size[1]
box = image.getbbox()
if box:
return max(box[0] - PAD, 0), max(box[1] - PAD, 0), min(box[2] + PAD, image.size[0]), min(box[3] + PAD, image.size[1])
return x, y, x2, y2
def createName(path):
parts = path.rsplit(".", 1)
return parts[0] + POSTFIX + "." + parts[1]
results = []
for path, image in findValidImages(sourceImages):
rect = getCropRect(image)
cropped = image.crop(rect)
name = createName(path)
cropped.save(name)
print("Saved: %s. Cropped: %s" % (name, str(rect)))
results.append((name, image, rect))
name = os.path.normcase(sourceDir).split(os.sep)[-1]
with open(os.path.join(sourceDir, name + ".rpy"), "w") as f:
base = results[0]
f.write("#Automatically generated file\n\n")
f.write("image %s = LiveComposite(\n" % name)
f.write(" (%i, %i),\n" % base[1].size)
for result in results:
name, image, crop = result
name = name[name.find("images"):].replace("\\", "/")
f.write(" (%i, %i), \"%s\",\n" % (crop[0], crop[1], name))
f.write(")\n")
| Python | 0 | |
e74571c6505bdf99a94fc27dd1ea60e23f55db0a | Add strace_inputs.py to strace a test executable and detect its dependencies. | tools/isolate/strace_inputs.py | tools/isolate/strace_inputs.py | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Runs strace on a test and processes the logs to extract the dependencies from
the source tree.
Automatically extracts directories where all the files are used to make the
dependencies list more compact.
"""
import os
import re
import subprocess
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(os.path.dirname(BASE_DIR))
IGNORED = (
'/dev',
'/etc',
'/home',
'/lib',
'/proc',
'/sys',
'/tmp',
'/usr',
'/var',
)
def gen_trace(cmd, cwd, logname, silent):
"""Runs strace on an executable."""
strace = ['strace', '-f', '-e', 'trace=open', '-o', logname]
stdout = stderr = None
if silent:
stdout = subprocess.PIPE
stderr = subprocess.PIPE
cmd = [os.path.normpath(os.path.join(cwd, c)) for c in cmd]
p = subprocess.Popen(
strace + cmd, cwd=cwd, stdout=stdout, stderr=stderr)
out, err = p.communicate()
if p.returncode != 0:
print 'Failure: %d' % p.returncode
# pylint: disable=E1103
print ''.join(out.splitlines(True)[-100:])
print ''.join(err.splitlines(True)[-100:])
return p.returncode
def parse_log(filename, blacklist):
"""Processes a strace log and returns the files opened and the files that do
not exist.
Most of the time, files that do not exist are temporary test files that should
be put in /tmp instead. See http://crbug.com/116251
TODO(maruel): Process chdir() calls so relative paths can be processed.
"""
files = set()
non_existent = set()
for line in open(filename):
# 1=pid, 2=filepath, 3=mode, 4=result
m = re.match(r'^(\d+)\s+open\("([^"]+)", ([^\)]+)\)\s+= (.+)$', line)
if not m:
continue
if m.group(4).startswith('-1') or 'O_DIRECTORY' in m.group(3):
# Not present or a directory.
continue
filepath = m.group(2)
if blacklist(filepath):
continue
if not os.path.isfile(filepath):
non_existent.add(filepath)
else:
files.add(filepath)
return files, non_existent
def relevant_files(files, root):
"""Trims the list of files to keep the expected files and unexpected files.
Unexpected files are files that are not based inside the |root| directory.
"""
expected = []
unexpected = []
for f in files:
if f.startswith(root):
expected.append(f[len(root):])
else:
unexpected.append(f)
return sorted(set(expected)), sorted(set(unexpected))
def extract_directories(files, root):
"""Detects if all the files in a directory were loaded and if so, replace the
individual files by the directory entry.
"""
directories = set(os.path.dirname(f) for f in files)
files = set(files)
for directory in sorted(directories, reverse=True):
actual = set(
os.path.join(directory, f) for f in
os.listdir(os.path.join(root, directory))
if not f.endswith(('.svn', '.pyc'))
)
if not (actual - files):
files -= actual
files.add(directory + '/')
return sorted(files)
def strace_inputs(unittest, cmd):
"""Tries to load the logs if available. If not, strace the test."""
logname = os.path.join(BASE_DIR, os.path.basename(unittest))
if not os.path.isfile(logname):
returncode = gen_trace(cmd, ROOT_DIR, logname, True)
if returncode:
return returncode
def blacklist(f):
"""Strips ignored paths."""
return f.startswith(IGNORED) or f.endswith('.pyc')
files, non_existent = parse_log(logname, blacklist)
print('Total: %d' % len(files))
print('Non existent: %d' % len(non_existent))
for f in non_existent:
print(' %s' % f)
expected, unexpected = relevant_files(files, ROOT_DIR + '/')
if unexpected:
print('Unexpected: %d' % len(unexpected))
for f in unexpected:
print(' %s' % f)
simplified = extract_directories(expected, ROOT_DIR)
print('Interesting: %d reduced to %d' % (len(expected), len(simplified)))
for f in simplified:
print(' %s' % f)
return 0
def main():
if len(sys.argv) < 3:
print >> sys.stderr, (
'Usage: strace_inputs.py [testname] [cmd line...]\n'
'\n'
'Example:\n'
' ./strace_inputs.py base_unittests testing/xvfb.py out/Release '
'out/Release/base_unittests')
return 1
return strace_inputs(sys.argv[1], sys.argv[2:])
if __name__ == '__main__':
sys.exit(main())
| Python | 0.000008 | |
e94192a4c549e46ae0a155dbfa634ebde992903a | Create netntlm2hashcat.py | netntlm2hashcat.py | netntlm2hashcat.py | #!/usr/bin/env python
import sys
import re
import argparse
# Arg Input (Like a pirate)
p = argparse.ArgumentParser(description='Convert NetNTLM John Hashes to Hashcat Format')
p.add_argument('-i','--hash',action='store_true',help='Enter one-time hash input mode',required=False)
p.add_argument('-f','--file',dest='file',help='Path to file containing multiple hashes',required=False,default="")
p.add_argument('-o','--output',dest='output',help='File path to save the converted hashes',required=False)
a = p.parse_args()
# RegEx to re-arrange the hash
reg = re.compile('(.*?):(\$.*?)\$(.*?)\$(.*)')
if a.hash:
try:
hash = raw_input("Enter your hash:\n")
if hash:
print reg.sub(r'\1::::\4:\3',hash)
except KeyboardInterrupt:
sys.exit("\n")
except:
sys.exit("Error: Something is broken\n")
elif a.file:
try:
with open(a.file) as temp:
for line in temp:
outhash = reg.sub(r'\1::::\4:\3',line)
outhash = outhash.rstrip('\n\n')
if a.output is None:
print outhash
else:
with open(a.output,'w') as f:
f.write(outhash)
f.close()
except KeyboardInterrupt:
sys.exit("\n")
except:
sys.exit("Error: Input file doesn't exist.\n")
else:
p.print_help()
| Python | 0 | |
e3b025ae738b6aff8fb873bb41d1cc13e0845131 | Create geddit-backend.py | geddit-backend.py | geddit-backend.py | #!/usr/bin/python
import requests
import json
# Import modules for CGI handling
import cgi, cgitb
# Create instance of FieldStorage
form = cgi.FieldStorage()
# Get data from fields
user_title = form.getvalue('search_title')
print "Content-type: text/html\n\n";
# Setting attributes to send to Wikipedia API
baseurl = 'http://en.wikipedia.org/w/api.php'
search_atts = {}
search_atts['action'] = 'query'
search_atts['list'] = 'search'
search_atts['srwhat'] = 'text'
search_atts['format'] = 'json'
search_atts['srsearch'] = user_title
search_resp = requests.get(baseurl, params = search_atts)
search_data = search_resp.json()
title = search_data["query"]["search"][0]["title"]
# Make the title with no space which will be needed for making a url link to send for summary
title_w_no_space = ""
for i in title:
if i==" ":
title_w_no_space = title_w_no_space + "_"
else:
title_w_no_space = title_w_no_space + i
# Getting related topics using the result given by Wikipedia API
topics = []
for key in search_data["query"]["search"]:
topics.append (key["title"])
topics = topics [1:len(topics)]
# Summarizing the content:
# setting attributes for to send to Smmry API
link_for_smmry = 'https://en.wikipedia.org/wiki/' + title_w_no_space
smmry_base_url = 'http://api.smmry.com/'
#smmry_atts = {}
#smmry_atts ['SM_URL'] = 'https://en.wikipedia.org/wiki/Guyana'
#smmry_atts ['SM_API_KEY'] = '6F297A53E3' # represents your registered API key.
# Optional, X represents the webpage to summarize.
#smmry_atts ['SM_LENGTH'] = N # Optional, N represents the number of sentences returned, default is 7
#smmry_atts ['SM_KEYWORD_COUNT'] = N # Optional, N represents how many of the top keywords to return
#smmry_atts ['SM_QUOTE_AVOID'] # Optional, summary will not include quotations
#smmry_atts ['SM_WITH_BREAK'] # Optional, summary will contain string [BREAK] between each sentence
api_key_link = '&SM_API_KEY=6F297A53E3&SM_URL='
api_lenght = 'SM_LENGTH=7&SM_WITH_BREAK'
#print api_key_link
api_link = smmry_base_url + api_lenght + api_key_link + link_for_smmry
#smmry_resp = requests.get('http://api.smmry.com/&SM_API_KEY=6F297A53E3&SM_URL=https://en.wikipedia.org/wiki/Guyana')
smmry_resp = requests.get(api_link)
smmry_data = smmry_resp.json()
content= '<p>Try adding another key word.</p><a style="color:white;" id="backbtn" href="#" onclick="myFunction()" >Go back.</a>'
try:
content = smmry_data['sm_api_content']
except:
pass
content_with_non_ascii = ""
for word in content:
if ord(word) < 128:
content_with_non_ascii+=word
else:
content_with_non_ascii+= "?"
if len(content_with_non_ascii) >0:
content = content_with_non_ascii
# replacing "[BREAK]"s with a new line
while "[BREAK]" in content:
length = len (content)
break_position = content.find("[BREAK]")
content = content [0:break_position] + "<br><br>" + content [break_position+7: length]
print '<div id="all-cont-alt"><div class="select-nav"><div id="nav-top-main"><a id="backbtn" href="#" onclick="myFunction()" ><i style=" position: relative;margin-left: 25px;background-color: #00cfb9;padding: 13px;top: 74px;border-radius: 16px;color: #ffffff;text-align: left;" class= "fa fa-chevron-left fa-2x"></i></a><h1>Geddit</h1></div></div>'
print '<div id="loaddddd"></div><div id="contentss">'
print '<h1 id="user-title">'
print user_title
print "</h1>"
print content
print '</div></div>'
print '<h3 class="related">Related Topics</h3>'
print '<div id="rel-holder">'
for key in topics:
if all(ord(c) < 128 for c in key):
print '<h5 class="related-topics" onclick="relatedFunction();">'
print key
print '</h5>'
else:
pass
print '</div>'
| Python | 0 | |
e6e5fbb671c2539f4f82c6eaca51fbf400133482 | Write a silly Python script to compute some hard coded info from the generated ARM match table, which is substantially more efficient than dealing with tblgen. | utils/Target/ARM/analyze-match-table.py | utils/Target/ARM/analyze-match-table.py | #!/usr/bin/env python
def analyze_match_table(path):
# Extract the instruction table.
data = open(path).read()
start = data.index("static const MatchEntry MatchTable")
end = data.index("\n};\n", start)
lines = data[start:end].split("\n")[1:]
# Parse the instructions.
insns = []
for ln in lines:
ln = ln.split("{", 1)[1]
ln = ln.rsplit("}", 1)[0]
a,bc = ln.split("{", 1)
b,c = bc.split("}", 1)
code, string, converter, _ = [s.strip()
for s in a.split(",")]
items = [s.strip() for s in b.split(",")]
_,features = [s.strip() for s in c.split(",")]
assert string[0] == string[-1] == '"'
string = string[1:-1]
insns.append((code,string,converter,items,features))
# For every mnemonic, compute whether or not it can have a carry setting
# operand and whether or not it can have a predication code.
mnemonic_flags = {}
for insn in insns:
mnemonic = insn[1]
items = insn[3]
flags = mnemonic_flags[mnemonic] = mnemonic_flags.get(mnemonic, set())
flags.update(items)
mnemonics = set(mnemonic_flags)
ccout_mnemonics = set(m for m in mnemonics
if 'MCK_CCOut' in mnemonic_flags[m])
condcode_mnemonics = set(m for m in mnemonics
if 'MCK_CondCode' in mnemonic_flags[m])
noncondcode_mnemonics = mnemonics - condcode_mnemonics
print ' || '.join('Mnemonic == "%s"' % m
for m in ccout_mnemonics)
print ' || '.join('Mnemonic == "%s"' % m
for m in noncondcode_mnemonics)
def main():
import sys
if len(sys.argv) == 1:
import os
from lit.Util import capture
llvm_obj_root = capture(["llvm-config", "--obj-root"])
file = os.path.join(llvm_obj_root,
"lib/Target/ARM/ARMGenAsmMatcher.inc")
elif len(sys.argv) == 2:
file = sys.argv[1]
else:
raise NotImplementedError
analyze_match_table(file)
if __name__ == '__main__':
main()
| Python | 0.000138 | |
5b2c1650059f9e4b69b6bab1d8ce88177f449e02 | Add basic test for import | foyer/tests/test_external_forcefields.py | foyer/tests/test_external_forcefields.py | import pytest
def test_basic_import():
import foyer
assert 'external_forcefields' in dir(foyer)
import foyer.external_forcefields
| Python | 0.000001 | |
e5d58cc795541b5e4e8f791a441a4369df17ee19 | Add first exercise | cuadradoDentroDeRangoDado.py | cuadradoDentroDeRangoDado.py | #!/usr/bin/env python
def main():
def cuadr(num):
return num * num
def nom_cuad(num):
return ("%d -> %d") % (num, cuadr(num))
def promptCuadr():
myNum1 = input("Enter num1: ")
myNum2 = input("Enter num2: ")
minimum = min(myNum1, myNum2)
maximum = max(myNum1, myNum2)
arr = [nom_cuad(x) for x in range(minimum, maximum) + [maximum]]
multiline = "\n".join(arr)
print multiline
print "==== Mostrar el cuadrado de los numeros dentro del rango introducido ===="
promptCuadr()
print "Operacion finalizada"
main()
| Python | 0.000142 | |
aab833a4a267ed46e83a5968e87d357ae3a5a12b | Add new DemoStream example corresponding to the LSL4Unity Project | utils/LSL_Tests/RecieveDemoStream.py | utils/LSL_Tests/RecieveDemoStream.py | """Example program to show how to read a marker time series from LSL."""
import sys
sys.path.append('./pylsl') # help python find pylsl relative to this example program
from pylsl import StreamInlet, resolve_stream
# first resolve an EEG stream on the lab network
targetStreamType = 'Unity.Quaternion'
print 'looking for an stream of type ' + targetStreamType
streams = resolve_stream('type', targetStreamType)
streamsFound = len(streams)
if (streamsFound > 0):
print 'found ' + str(streamsFound)
else:
print 'found none',
# create a new inlet to read from the stream
inlet = StreamInlet(streams[0])
while True:
sample, timestamp = inlet.pull_sample()
if(sample):
print "\033[K", str(timestamp) + ' Quaternion: ' + ' '.join(str(sample[x]) for x in range(0,len(sample))), "\r",
sys.stdout.flush() | Python | 0 | |
897371dac52c38b96b6a1a92cd8ce36e9b2d1003 | Add django admin page for HQOauthApplication | corehq/apps/hqwebapp/admin.py | corehq/apps/hqwebapp/admin.py | from django.contrib import admin
from corehq.apps.hqwebapp.models import HQOauthApplication
@admin.register(HQOauthApplication)
class HQOauthApplicationAdmin(admin.ModelAdmin):
list_display = (
"id", "application_id", "application_name", "application_user", "application_client_type",
"application_authorization_grant_type"
)
def application_id(self, obj):
return obj.application.id
def application_name(self, obj):
return obj.application.name
def application_user(self, obj):
return obj.application.user.id
def application_client_type(self, obj):
return obj.application.client_type
def application_authorization_grant_type(self, obj):
return obj.application.authorization_grant_type
| Python | 0 | |
09a8f4efcfc99f7add4d055465de621a47f06ee8 | Add management command to sanitize 2fa sessions | corehq/apps/hqadmin/management/commands/clean_2fa_sessions.py | corehq/apps/hqadmin/management/commands/clean_2fa_sessions.py | from getpass import getpass
from importlib import import_module
from packaging import version
from pkg_resources import DistributionNotFound, get_distribution
from django.conf import settings
from django.core.cache import caches
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = (
"Remove outdated/sensitive information from active Django sessions. "
"See https://github.com/Bouke/django-two-factor-auth/security/advisories/GHSA-vhr6-pvjm-9qwf"
)
def add_arguments(self, parser):
parser.add_argument(
'--one-session',
action='store_true',
default=False,
help='Lookup one session only (will prompt for a session key).',
)
parser.add_argument(
'--dry-run',
action='store_true',
default=False,
help='Count the number of sessions that would be affected, '
'but do not modify them.',
)
def handle(self, one_session=False, dry_run=False, **options):
if dry_run:
print("DRY RUN sessions will not be modified")
tf_ver = get_two_factor_version()
if tf_ver and version.parse(tf_ver) < version.parse("1.12"):
print(f"WARNING old/insecure django-two-factor-auth version detected: {tf_ver}")
print("Please run this tool again after upgrading.")
else:
print(f"found django-two-factor-auth version {tf_ver}")
print("scanning sessions...")
count = i = 0
for i, session in enumerate(iter_sessions(one_session), start=1):
if i % 10000 == 0:
print(f"processed {i} sessions")
if has_sensitive_info(session):
count += 1
if not dry_run:
sanitize(session)
if dry_run:
print(f"DRY RUN {count} of {i} sessions need to be sanitized")
else:
print(f"Sanitized {count} of {i} sessions")
def sanitize(session):
for data in iter_wizard_login_views(session):
del data["step_data"]
del data["validated_step_data"]
session.save()
assert not has_sensitive_info(session)
def iter_sessions(one_session):
"""Iterate over one or all existing django sessions
Assumes that redis is the default cache in which all sessions are stored.
"""
assert settings.SESSION_ENGINE == "django.contrib.sessions.backends.cache", \
f"unsupported session engine: {settings.SESSION_ENGINE}"
engine = import_module(settings.SESSION_ENGINE)
if one_session:
session_key = getpass(prompt="Session key: ")
yield engine.SessionStore(session_key)
return
cache = caches[settings.SESSION_CACHE_ALIAS]
prefix_length = len(engine.SessionStore.cache_key_prefix)
for key in cache.iter_keys(engine.SessionStore.cache_key_prefix + "*"):
session_key = key[prefix_length:]
yield engine.SessionStore(session_key)
def has_sensitive_info(session):
def has_key(data, path):
value = data
for name in path:
if not isinstance(value, dict) or name not in value:
return False
value = value[name]
return True
return any(
has_key(data, STEP_DATA_PATH) or has_key(data, VALIDATED_STEP_DATA_PATH)
for data in iter_wizard_login_views(session)
)
def iter_wizard_login_views(session):
for key, data in session.items():
if key.startswith("wizard_") and key.endswith("_login_view"):
yield data
STEP_DATA_PATH = ["step_data", "auth", "auth-password"]
VALIDATED_STEP_DATA_PATH = ["validated_step_data", "auth", "password"]
def get_two_factor_version():
try:
dist = get_distribution("django-two-factor-auth")
except DistributionNotFound:
return None
return dist.version
| Python | 0.000001 | |
fb884d3453b42b68aa7ecc7b0523bf1460b6b9e0 | Add missing EFS patch | scripts/patches/efs.py | scripts/patches/efs.py | patches = [
{
"op": "replace",
"path": "/ResourceTypes/AWS::EFS::AccessPoint/Properties/AccessPointTags/ItemType",
"value": "Tag",
},
{
"op": "replace",
"path": "/ResourceTypes/AWS::EFS::FileSystem/Properties/FileSystemTags/ItemType",
"value": "Tag",
},
]
| Python | 0.000001 | |
1ae811c79b1cbc28b2f71e8f2bb01b44cc3aa2b9 | Improve import malware hashes cron | cron/import_malware_hashes.py | cron/import_malware_hashes.py | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
# Copyright (c) 2017 Mozilla Corporation
#
# Contributors:
# Brandon Myers bmyers@mozilla.com
import os
import sys
from configlib import getConfig, OptionParser
from datetime import datetime
from datetime import timedelta
from pytx.access_token import access_token
from pytx import Malware
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../lib'))
from utilities.logger import logger, initLogger
from utilities.toUTC import toUTC
from elasticsearch_client import ElasticsearchClient
from state import State
def pull_malware_hashes(since_date, until_date):
query_params = {
'since': str(since_date),
'until': str(until_date),
'full_response': True,
}
logger.info('Querying threat exchange with params {}'.format(query_params))
results = Malware.objects(**query_params)
malware_data = []
for result in results['data']:
created_date = toUTC(datetime.now()).isoformat()
es_doc = {
'created_on': created_date,
'details': result
}
malware_data.append(es_doc)
return malware_data
def main():
logger.info('Connecting to Elasticsearch')
client = ElasticsearchClient(options.esservers)
logger.info('Connecting to threat exchange')
access_token(options.appid, options.appsecret)
state = State(options.state_file_name)
current_timestamp = toUTC(datetime.now()).isoformat()
# We're setting a default for the past 2 days of data
# if there isnt a state file
since_date_obj = toUTC(datetime.now()) - timedelta(days=2)
since_date = since_date_obj.isoformat()
if 'lastrun' in state.data.keys():
since_date = state.data['lastrun']
malware_hashes_docs = pull_malware_hashes(since_date=since_date, until_date=current_timestamp)
for malware_hash_doc in malware_hashes_docs:
client.save_object(index='threat-exchange', doc_type='malware_hashes', body=malware_hash_doc)
state.data['lastrun'] = current_timestamp
state.save()
def initConfig():
options.output = getConfig('output', 'stdout', options.configfile)
options.sysloghostname = getConfig('sysloghostname', 'localhost', options.configfile)
options.syslogport = getConfig('syslogport', 514, options.configfile)
options.state_file_name = getConfig('state_file_name', '{0}.state'.format(sys.argv[0]), options.configfile)
# threat exchange options
options.appid = getConfig('appid', '', options.configfile)
options.appsecret = getConfig('appsecret', '', options.configfile)
# elastic search server settings
options.esservers = list(getConfig('esservers', 'http://localhost:9200', options.configfile).split(','))
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-c", dest='configfile', default=sys.argv[0].replace('.py', '.conf'), help="configuration file to use")
(options, args) = parser.parse_args()
initConfig()
initLogger(options)
main()
| Python | 0.000004 | |
58852970847bab30fee18e6ab824b24bc75d389f | Add the package-cleaning script | clean-packages.py | clean-packages.py | # WARNING: HERE BE DRAGONS
import yaml
import os.path
import urllib.parse
from copy import deepcopy
urllib.parse.uses_relative.append('github')
urllib.parse.uses_netloc.append('github')
with open('packages.yaml') as f:
package_db = yaml.load(f)
def strip_prefix(prefix, url):
for n in range(len(url) - 1, 0, -1):
component = url[n:]
joined = urllib.parse.urljoin(prefix, component)
if joined == url:
return component
return url
def clean_package(value):
backup = deepcopy(value)
if 'base' in value:
old_base = value['base']
del value['base']
value['files'] = {fn: urllib.parse.urljoin(old_base, val) for fn, val in value['files'].items()}
prefix = os.path.commonprefix(value['files'].values())
if '/' not in prefix:
return backup
prefix = prefix[0:prefix.rindex('/')+1]
if len(prefix) > 12:
value['base'] = prefix
value['files'] = {fn: strip_prefix(prefix, url) for fn, url in value['files'].items()}
return value
package_db = {key: clean_package(value) for key, value in package_db.items()}
with open('packages.yaml', 'w') as f:
yaml.dump(package_db, f, default_flow_style = False)
| Python | 0 | |
2e0fbcb3ec1c2f0311d7ee4bbfeac33662f66089 | Monitor process using subprocess module | monitor_process.py | monitor_process.py | import subprocess
""" If the program is running "ps -ef | grep program" will return 2 or more rows
(one with the program itself and the second one with "grep program").
Otherwise, it will only return one row ("grep program")
You can trigger the alert on this if required.
"""
def monitor_process(name):
args=['ps','-ef']
args1=['grep','-c','%s' %name]
process_ps = subprocess.Popen(args, stdout=subprocess.PIPE, shell=False)
process_monitor = subprocess.Popen(args1, stdin=process_ps.stdout, stdout=subprocess.PIPE, shell=False)
# Allow process_ps to receive a SIGPIPE if process_monitor exits.
process_ps.stdout.close()
return process_monitor.communicate()[0]
if __name__== "__main__":
print monitor_process('firefox')
| Python | 0.000001 | |
0d956a8137f5bd2cc30f5163c717858e4a1172ee | delete a module never used | nova/scheduler/filters/image_props_filter.py | nova/scheduler/filters/image_props_filter.py | # Copyright (c) 2011-2012 OpenStack, LLC
# Copyright (c) 2012 Canonical Ltd
# Copyright (c) 2012 SUSE LINUX Products GmbH
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.openstack.common import log as logging
from nova.scheduler import filters
LOG = logging.getLogger(__name__)
class ImagePropertiesFilter(filters.BaseHostFilter):
"""Filter compute nodes that satisfy instance image properties.
The ImagePropertiesFilter filters compute nodes that satisfy
any architecture, hpervisor type, or virtual machine mode properties
specified on the instance's image properties. Image properties are
contained in the image dictionary in the request_spec.
"""
def _instance_supported(self, capabilities, image_props):
img_arch = image_props.get('architecture', None)
img_h_type = image_props.get('hypervisor_type', None)
img_vm_mode = image_props.get('vm_mode', None)
checked_img_props = (img_arch, img_h_type, img_vm_mode)
# Supported if no compute-related instance properties are specified
if not any(checked_img_props):
return True
supp_instances = capabilities.get('supported_instances', None)
# Not supported if an instance property is requested but nothing
# advertised by the host.
if not supp_instances:
LOG.debug(_("Instance contains properties %(image_props)s, "
"but no corresponding capabilities are advertised "
"by the compute node"), locals())
return False
def _compare_props(props, other_props):
for i in props:
if i and i not in other_props:
return False
return True
for supp_inst in supp_instances:
if _compare_props(checked_img_props, supp_inst):
LOG.debug(_("Instance properties %(image_props)s "
"are satisfied by compute host capabilities "
"%(capabilities)s"), locals())
return True
LOG.debug(_("Instance contains properties %(image_props)s "
"that are not provided by the compute node "
"capabilities %(capabilities)s"), locals())
return False
def host_passes(self, host_state, filter_properties):
"""Check if host passes specified image properties.
Returns True for compute nodes that satisfy image properties
contained in the request_spec.
"""
spec = filter_properties.get('request_spec', {})
image_props = spec.get('image', {}).get('properties', {})
capabilities = host_state.capabilities
if not self._instance_supported(capabilities, image_props):
LOG.debug(_("%(host_state)s does not support requested "
"instance_properties"), locals())
return False
return True
| # Copyright (c) 2011-2012 OpenStack, LLC
# Copyright (c) 2012 Canonical Ltd
# Copyright (c) 2012 SUSE LINUX Products GmbH
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.openstack.common import log as logging
from nova.scheduler import filters
from nova import utils
LOG = logging.getLogger(__name__)
class ImagePropertiesFilter(filters.BaseHostFilter):
"""Filter compute nodes that satisfy instance image properties.
The ImagePropertiesFilter filters compute nodes that satisfy
any architecture, hpervisor type, or virtual machine mode properties
specified on the instance's image properties. Image properties are
contained in the image dictionary in the request_spec.
"""
def _instance_supported(self, capabilities, image_props):
img_arch = image_props.get('architecture', None)
img_h_type = image_props.get('hypervisor_type', None)
img_vm_mode = image_props.get('vm_mode', None)
checked_img_props = (img_arch, img_h_type, img_vm_mode)
# Supported if no compute-related instance properties are specified
if not any(checked_img_props):
return True
supp_instances = capabilities.get('supported_instances', None)
# Not supported if an instance property is requested but nothing
# advertised by the host.
if not supp_instances:
LOG.debug(_("Instance contains properties %(image_props)s, "
"but no corresponding capabilities are advertised "
"by the compute node"), locals())
return False
def _compare_props(props, other_props):
for i in props:
if i and i not in other_props:
return False
return True
for supp_inst in supp_instances:
if _compare_props(checked_img_props, supp_inst):
LOG.debug(_("Instance properties %(image_props)s "
"are satisfied by compute host capabilities "
"%(capabilities)s"), locals())
return True
LOG.debug(_("Instance contains properties %(image_props)s "
"that are not provided by the compute node "
"capabilities %(capabilities)s"), locals())
return False
def host_passes(self, host_state, filter_properties):
"""Check if host passes specified image properties.
Returns True for compute nodes that satisfy image properties
contained in the request_spec.
"""
spec = filter_properties.get('request_spec', {})
image_props = spec.get('image', {}).get('properties', {})
capabilities = host_state.capabilities
if not self._instance_supported(capabilities, image_props):
LOG.debug(_("%(host_state)s does not support requested "
"instance_properties"), locals())
return False
return True
| Python | 0.000007 |
4a7680d783d38a4ee32b558f47532dc5b706cc05 | Add scheduler utils unit tests | nova/tests/scheduler/test_scheduler_utils.py | nova/tests/scheduler/test_scheduler_utils.py | # Copyright (c) 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Scheduler Utils
"""
import mox
from nova.compute import utils as compute_utils
from nova.conductor import api as conductor_api
from nova import db
from nova import notifications
from nova.openstack.common.notifier import api as notifier
from nova.scheduler import utils as scheduler_utils
from nova import test
class SchedulerUtilsTestCase(test.NoDBTestCase):
"""Test case for scheduler utils methods."""
def setUp(self):
super(SchedulerUtilsTestCase, self).setUp()
self.context = 'fake-context'
def _test_set_vm_state_and_notify(self, request_spec,
expected_uuids):
updates = dict(vm_state='fake-vm-state')
service = 'fake-service'
method = 'fake-method'
exc_info = 'exc_info'
publisher_id = 'fake-publisher-id'
self.mox.StubOutWithMock(compute_utils,
'add_instance_fault_from_exc')
self.mox.StubOutWithMock(notifications, 'send_update')
self.mox.StubOutWithMock(db, 'instance_update_and_get_original')
self.mox.StubOutWithMock(notifier, 'publisher_id')
old_ref = 'old_ref'
new_ref = 'new_ref'
for uuid in expected_uuids:
db.instance_update_and_get_original(
self.context, uuid, updates).AndReturn((old_ref, new_ref))
notifications.send_update(self.context, old_ref, new_ref,
service=service)
compute_utils.add_instance_fault_from_exc(
self.context,
mox.IsA(conductor_api.LocalAPI),
new_ref, exc_info, mox.IsA(tuple))
payload = dict(request_spec=request_spec,
instance_properties=request_spec.get(
'instance_properties'),
instance_id=uuid,
state='fake-vm-state',
method=method,
reason=exc_info)
event_type = '%s.%s' % (service, method)
notifier.publisher_id(service).AndReturn(publisher_id)
notifier.notify(self.context, publisher_id,
event_type, notifier.ERROR, payload)
self.mox.ReplayAll()
scheduler_utils.set_vm_state_and_notify(self.context,
service,
method,
updates,
exc_info,
request_spec,
db)
def test_set_vm_state_and_notify_rs_uuids(self):
expected_uuids = ['1', '2', '3']
request_spec = dict(instance_uuids=expected_uuids)
self._test_set_vm_state_and_notify(request_spec, expected_uuids)
def test_set_vm_state_and_notify_uuid_from_instance_props(self):
expected_uuids = ['fake-uuid']
request_spec = dict(instance_properties=dict(uuid='fake-uuid'))
self._test_set_vm_state_and_notify(request_spec, expected_uuids)
def _test_populate_filter_props(self, host_state_obj=True,
with_retry=True):
if with_retry:
filter_properties = dict(retry=dict(hosts=[]))
else:
filter_properties = dict()
if host_state_obj:
class host_state(object):
host = 'fake-host'
nodename = 'fake-node'
limits = 'fake-limits'
else:
host_state = dict(host='fake-host',
nodename='fake-node',
limits='fake-limits')
scheduler_utils.populate_filter_properties(filter_properties,
host_state)
if with_retry:
# So we can check for 2 hosts
scheduler_utils.populate_filter_properties(filter_properties,
host_state)
self.assertEqual('fake-limits', filter_properties['limits'])
if with_retry:
self.assertEqual([['fake-host', 'fake-node'],
['fake-host', 'fake-node']],
filter_properties['retry']['hosts'])
else:
self.assertNotIn('retry', filter_properties)
def test_populate_filter_props(self):
self._test_populate_filter_props()
def test_populate_filter_props_host_dict(self):
self._test_populate_filter_props(host_state_obj=False)
def test_populate_filter_props_no_retry(self):
self._test_populate_filter_props(with_retry=False)
| Python | 0.000004 | |
0ae60d170c3a8fd33fac3b1283e646a7018027df | Add expertise removal migration | qipr_approver/approver/migrations/0007_auto_20170227_1533.py | qipr_approver/approver/migrations/0007_auto_20170227_1533.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-27 15:33
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('approver', '0006_auto_20170222_1424'),
]
operations = [
migrations.RemoveField(
model_name='expertise',
name='created_by',
),
migrations.RemoveField(
model_name='expertise',
name='last_modified_by',
),
migrations.AlterField(
model_name='person',
name='expertise',
field=models.ManyToManyField(to='approver.Descriptor'),
),
migrations.DeleteModel(
name='Expertise',
),
]
| Python | 0 | |
b629a8e6346359683e637fd8e2f34f1d704ad1bc | Add missing test | test/test_full.py | test/test_full.py | import numpy as np
from util.full import matrix
def assert_(this, ref):
print this
print ref
assert np.allclose(this, ref)
def test_diag():
ref = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
this = matrix.diag([1,1,1])
assert_(this, ref)
| Python | 0.000383 | |
5e9b804ef20d71aa84cb4d3cdd8b3bad9863cf11 | add validator | sections/validators.py | sections/validators.py | import re
from django.core.validators import RegexValidator
section_name_validator = RegexValidator(
r'^[a-zA-Z][a-zA-Z0-9]{1,19}$',
'This field can contain only characters a-zA-Z0-9 and be max 20 characters long',
code='invalid'
)
| Python | 0.000005 | |
e7db8f3dc4d945185a99b5b62ae0b528959651ac | add python version | versioncheck/python_version.py | versioncheck/python_version.py | from invoke import task
from subprocess import call
import invoke
def check_invoke_version(ctx):
minimal_verion = "0.15.0"
if minimal_verion > invoke.__version__:
print("Your python-invoke version is too old (currently "+invoke.__version__+"). Please update to version "+minimal_verion+" or higher.")
print("call: pip install invoke --upgrade")
correct = False
response = False
print("\nDo you want to resume with a old version? [YES/NO]?")
while response != True:
choice = raw_input().lower()
if choice in yes:
correct = True
response = True
elif choice in no:
correct = False
response = True
else:
sys.stdout.write("Please respond with 'yes' or 'no'")
if correct == False:
return False
return True
| Python | 0.000002 | |
39cbea7183a77495173b10aef4d9f6ac10ad15f6 | Add initial layout of resources | objectset/resources.py | objectset/resources.py | from django.conf.urls import patterns, url
from django.http import HttpResponse
from restlib2.resources import Resource
from restlib2.http import codes
from restlib2.params import Parametizer, param_cleaners
from preserialize.serialize import serialize
SET_OPERATIONS = {
'and': '__and__',
'or': '__or__',
'xor': '__xor__',
'sub': '__sub__',
}
INPLACE_SET_OPERATIONS = {
'and': '__iand__',
'or': '__ior__',
'xor': '__ixor__',
'sub': '__isub__',
}
class SetParametizer(Parametizer):
embed = False
def clean_embed(self, value):
return param_cleaners.clean_bool(value)
class BaseSetResource(Resource):
parametizer = SetParametizer
model = None
template = None
object_template = None
form_class = None
def get_params(self, request):
return self.parametizer().clean(request.GET)
def get_serialize_template(self, request, **kwargs):
"Prepare the serialize template"
template = self.template or {}
relation = self.model._set_object_rel
if kwargs.get('embed', False):
template.setdefault('exclude', [relation])
elif self.object_template:
template.setdefault('related', {})
if relation not in template['related']:
template['related'][relation] = self.object_template
def get_queryset(self, request):
return self.model.objects.all()
def get_object(self, request, **kwargs):
try:
return self.get_queryset(request).get(**kwargs)
except self.model.DoesNotExist:
pass
class SetsResource(BaseSetResource):
def get(self, request):
params = self.get_params(request)
template = self.get_serialize_template(request, **params)
return serialize(self.get_queryset(request), **template)
def post(self, request):
form = self.form_class(request.data)
if form.is_valid():
instance = form.save()
params = self.get_params(request)
template = self.get_serialize_template(request, **params)
return serialize(instance, **template)
return HttpResponse(dict(form.errors),
status=codes.unprocessable_enity)
class SetResource(BaseSetResource):
def is_not_found(self, request, response, pk):
instance = self.get_object(pk=pk)
if instance is None:
return True
request.instance = instance
def get(self, request, pk):
return serialize(request.instance, **self.template)
def put(self, request, pk):
form = self.form_class(request.data, instance=request.instance)
if form.is_valid():
form.save()
return HttpResponse(status=codes.no_content)
return HttpResponse(dict(form.errors),
status=codes.unprocessable_enity)
def delete(self, request, pk):
request.instance.delete()
return HttpResponse(status=codes.no_content)
class SetObjectsResource(BaseSetResource):
pass
class SetOperationsResource(BaseSetResource):
def post(request, pk, *args):
pass
def get_url_patterns(resources):
"""Returns urlpatterns for the defined resources.
`resources` is a dict corresponding to each resource:
- `sets` => SetsResource
- `set` => SetResource
- `operations` => SetOperatiosnResource
- `objects` => SetObjectsResource
"""
return patterns(
'',
url(r'^$', resources['sets'](), name='sets'),
url(r'^(?P<pk>\d+)/$', resources['set'](), name='set'),
url(r'^(?P<pk>\d+)/objects/$', resources['objects'](), name='objects'),
url(r'^(?P<pk>\d+)/(?:(and|or|xor|sub)/(\d+)/)+/$',
resources['operations'](), name='operations'),
)
| Python | 0 | |
0dfe81fdbb89f65940c37437901d073021ecd899 | Add files via upload | chain_maker.py | chain_maker.py | import maya.cmds as cmds
import maya.mel as mel
import maya.OpenMaya as OpenMaya
import math
import re
from string import Template, zfill
from functools import partial
class Find_Out():
'''
multipurpose class for finding length of anything. Well, work in progress
'''
def edge_length(self, vertex_list):
#find distance between two points. numpy required. need to rework this so numpy not required
vtx_p=cmds.xform(vertex_list,q=True,t=True,ws=True)
'''
this is numpy version. reuse for machines with numpy for quicker calculations:
vtx_p_array_a = np.array([[vtx_p[0]], [vtx_p[1]], [vtx_p[2]]])
vtx_p_array_b = np.array([[vtx_p[3]], [vtx_p[4]], [vtx_p[5]]])
dist = np.linalg.norm(vtx_p_array_a-vtx_p_array_b)
'''
dist = math.sqrt((vtx_p[3] - vtx_p[0])**2 + (vtx_p[4] - vtx_p[1])**2 + (vtx_p[5] - vtx_p[2])**2)
return dist
def curve_length(self, curve_sel):
#find length of curve
find_curve_length = cmds.arclen(curve_sel)
return find_curve_length
class Chain_Constrain():
def __init__(self, curve_sel, vertex_list, chain_geo):
self.curve_sel = curve_sel
self.verts = vertex_list
self.chain_geo = chain_geo
self.find_length = Find_Out()
self.link_length = self.find_length.edge_length(self.verts)
self.chain_length = self.find_length.curve_length(self.curve_sel)
self.link_total = int(self.chain_length/self.link_length)
self.motion_path_name = str(self.chain_geo) + '_Path'
cmds.pathAnimation(self.chain_geo, name = self.motion_path_name, fractionMode = True, follow= True, followAxis = 'x',
upAxis = 'y', worldUpType = 'object', startTimeU = 1, endTimeU = self.link_total, c = self.curve_sel)
cmds.setKeyframe(self.motion_path_name + '.frontTwist', v = 0.0, t = 1 )
cmds.setKeyframe(self.motion_path_name + '.frontTwist', v = 60.0*self.link_total, t = self.link_total )
cmds.keyTangent(self.motion_path_name + '.uValue', itt = 'linear', ott = 'linear' )
cmds.keyTangent(self.motion_path_name + '.frontTwist', itt = 'linear', ott = 'linear')
cmds.snapshot( self.chain_geo, constructionHistory=True, startTime=1, endTime = self.link_total, increment=1, update = 'animCurve',
name = str(self.chain_geo) + '_snapShot' )
self.chain_group = cmds.group( em=True, name=str(self.chain_geo) + '_geo_grp' )
self.chain_list = cmds.listRelatives(str(self.chain_geo + '_snapShotGroup'))
for dummy_geo in self.chain_list:
cmds.delete(icn = True, ch = True)
cmds.parent(dummy_geo, self.chain_group)
class Spline_Rig_Chain():
def __init__(self, curve_sel, vertex_list):
self.curve_sel = curve_sel
self.verts = vertex_list
self.find_length = Find_Out()
self.link_length = self.find_length.edge_length(self.verts)
self.chain_length = self.find_length.curve_length(self.curve_sel)
self.link_total = int(self.chain_length/self.link_length)
cmds.duplicate(self.curve_sel, n = 'buildCurve')
cmds.rebuildCurve('buildCurve', ch = 1, rpo = 1, rt = 0, end = 1, kr = 2, kep = 1, kt = 0, kcp = 0, s = self.link_total/2, d = 3, tol = 0.01 )
self.num_cv = int(cmds.getAttr ('buildCurve.degree'))+ (cmds.getAttr ('buildCurve.spans'))
for dummy_cv in range(self.num_cv):
dummy_cv_pos = (cmds.getAttr ('buildCurve.cv['+ str(dummy_cv) +']'))
if dummy_cv == 0:
cmds.joint(n=self.curve_sel+'_jointRoot',p = dummy_cv_pos[0])
elif dummy_cv == self.num_cv - 1:
cmds.joint(n=self.curve_sel+'_jointEnd', p = dummy_cv_pos[0])
else:
cmds.joint(n=self.curve_sel+'_joint_'+(str(dummy_cv)),p = dummy_cv_pos[0])
cmds.delete('buildCurve')
cmds.ikHandle( sj = (self.curve_sel+'_jointRoot'), ee = (self.curve_sel+'_jointEnd'), c = self.curve_sel,
sol = 'ikSplineSolver', scv = 0, pcv = 0, ccv = 0, ns = 4)
class Chain_Maker_UI():
def __init__(self):
window = cmds.window( title="Chain Maker", iconName='ChnMk', widthHeight=(300, 100) )
cmds.columnLayout( adjustableColumn=True )
cmds.separator( style='single' )
self.curve_sel_name = cmds.textFieldGrp( label = 'Curve Selection' )
cmds.separator( style='single' )
cmds.button( label='Run', command=partial(self.run_command, 1) )
cmds.separator( style='single' )
cmds.button( label='Exit', command=('cmds.deleteUI(\"' + window + '\", window=True)') )
cmds.setParent( '..' )
cmds.showWindow( window )
def curve_name(self, *args):
self.curve_sel = cmds.textFieldGrp(self.curve_sel_name, query=True, text=True)
return self.curve_sel
def run_command(self, *args):
curve_sel = self.curve_name()
vert_sel_list = cmds.ls(sl=True, fl = True)
if '.' in vert_sel_list[0]:
dummy_item_index = vert_sel_list[0].index('.')
self.geo_name = vert_sel_list[0][0:dummy_item_index]
Chain_Constrain(curve_sel, vert_sel_list, self.geo_name)
Spline_Rig_Chain(curve_sel, vert_sel_list)
self.chain_list = cmds.listRelatives(str(self.geo_name + '_geo_grp'))
joint_list = cmds.ls(type = 'joint')
for dummy_geo in self.chain_list:
cmds.select(dummy_geo, add = True)
for dummy_joint in joint_list:
if 'ikHandle' in dummy_joint:
pass
elif curve_sel in dummy_joint:
cmds.select(dummy_joint, add=True)
cmds.select('ikHandle*', d = True)
mel.eval('newBindSkin " -byClosestPoint -toSkeleton";')
Chain_Maker_UI()
| Python | 0 | |
ca9ed2756a12a2587f5b4d021597d2229196da50 | Add migration to add china region | api/common/migrations/0007_add_china_region.py | api/common/migrations/0007_add_china_region.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-06-24 21:52
from __future__ import unicode_literals
from django.db import migrations
def forwards(apps, schema_editor):
Region = apps.get_model('common.Region')
region_to_add = 'China'
try:
Region.objects.get(name=region_to_add)
except Region.DoesNotExist:
Region.objects.create(name=region_to_add)
class Migration(migrations.Migration):
dependencies = [
('common', '0006_emailrecord'),
]
operations = [
migrations.RunPython(forwards, migrations.RunPython.noop)
]
| Python | 0 | |
7e113b5ec884c4a1e6da421a116e09b22939ae75 | Add old wiki backup script | python/graveyard/misc/mywiki-backup.py | python/graveyard/misc/mywiki-backup.py | #!/usr/bin/env python
import os
import os.path
import sys
import time
import urllib2
import urlparse
import lxml.etree
import lxml.html
debug = False
def main():
site_url = 'http://sites.google.com/site/bmaupinwiki'
#dest_path = '%s/Documents/misc/bmaupinwiki' % (home)
dest_path = '{0}/Desktop/bmaupinwiki'.format(os.getenv('HOME'))
ensure_folder_exists(dest_path)
link_paths = []
parsed_url = urlparse.urlsplit(site_url)
outfile_name = 'home'
write_url_to_file(
site_url,
'{0}.html'.format(
os.path.join(dest_path, outfile_name)
),
site_url,
dest_path,
check_timestamp=True,
insert_encoding=True
)
# attempt to alleviate encoding issues
parser = lxml.html.HTMLParser(encoding='utf-8')
try:
page = lxml.html.parse(site_url, parser).getroot()
# in case there's a bug in lxml (http://stackoverflow.com/q/3116269/399105)
except IOError:
page = lxml.html.parse(urllib2.urlopen(site_url), parser).getroot()
# iterate through all of the div elements in the main index page
for element in page.iter('div'):
# get the table of contents
if element.get('class') == 'nav-toc-content':
toc = element.find('ul')
break
# iterate through all of the links ("a" elements) in the table of contents
for element in toc.iter('a'):
link = element.get('href')
# if the path of the URL is in the link
if link.startswith(parsed_url.path):
# remove it
link = link.replace(parsed_url.path, '')
# remove a starting slash
if link.startswith('/'):
link = link[1:]
link_paths.append(link)
if debug:
link_paths.sort()
print link_paths
for link_path in link_paths:
# drop everything after the final /, and that's the path
path = link_path.rsplit('/', 1)[0]
full_path = os.path.join(dest_path, path)
ensure_folder_exists(full_path)
url = '%s/%s' % (site_url, link_path)
if debug:
print url
print '%s/%s.html' % (dest_path, link_path)
write_url_to_file(
url,
'{0}.html'.format(
os.path.join(dest_path, link_path)
),
site_url,
dest_path,
check_timestamp=True,
insert_encoding=True
)
def ensure_folder_exists(path):
# make sure the path isn't an existing file
if os.path.isfile(path):
sys.exit('ERROR: folder %s is an existing file' % (path))
# create the path if necessary
elif not os.path.isdir(path):
try:
os.mkdir(path)
except OSError, error:
sys.exit('OSError: %s' % (error))
def write_url_to_file(url, outfile_path, site_url, dest_path,
check_timestamp=False, insert_encoding=False):
try:
infile = urllib2.urlopen(url)
except urllib2.HTTPError, error:
sys.exit('HTTPError: %s' % (error))
except urllib2.URLError, error:
sys.exit('URLError: %s' % (error))
# only check the timestamp if the destination file already exists
if check_timestamp == True and os.path.isfile(outfile_path):
# if local file modification time is greater than URL mod time
if (os.path.getmtime(outfile_path) >
time.mktime(infile.info().getdate('last-modified'))):
infile.close()
# exit the function and don't overwrite the local file
return
parser = lxml.html.HTMLParser(encoding='utf-8')
page = lxml.html.parse(infile, parser)
if insert_encoding == True:
head = page.getroot().find('head')
meta = lxml.etree.SubElement(head, 'meta')
meta.set('charset', 'utf-8')
''' TODO: make the path relative
from this page:
/home/bryan/Desktop/pile/bmaupinwiki/home/operating-systems/gnu-linux/rhel.html
this link:
/site/bmaupinwiki/home/operating-systems/gnu-linux/rhel/rhel-init-script-template
converts to (absolute):
/home/bryan/Desktop/pile/bmaupinwiki/home/operating-systems/gnu-linux/rhel/rhel-init-script-template.html
relative:
rhel/rhel-init-script-template.html
'''
old_link_prefix = '{0}/'.format(urlparse.urlparse(site_url).path)
'''
The links normally look like this:
/site/bmaupinwiki/home/operating-systems/gnu-linux/rhel/rhel-init-script-template
so update them
'''
for element in page.iter('a'):
if 'href' not in element.attrib:
continue
link = element.get('href')
if link.startswith(old_link_prefix):
element.set(
'href',
'{0}.html'.format(
os.path.join(
dest_path,
link.replace(old_link_prefix, '')
)
)
)
outfile = open(outfile_path, 'w')
outfile.write(
lxml.etree.tostring(
page.getroot(),
pretty_print=True,
method='html',
doctype=page.docinfo.doctype
)
)
outfile.close()
infile.close()
if __name__ == '__main__':
main()
'''
TODO:
- Make links relative so we can move the wiki
- Update write_url_to_file and make it more modular
- Add way to delete old pages
- Download page css and images so they work too
'''
| Python | 0 | |
680b2cb1488f83aef5b45476e23bd93a90069872 | Create Content Loader app to Herd/DM standards - Configure Pyinstaller | herd-code/herd-tools/herd-content-loader/herdcl/hook-otags.py | herd-code/herd-tools/herd-content-loader/herdcl/hook-otags.py | hiddenimports = [
'numpy',
'pandas._libs.tslibs.timedeltas',
'pandas._libs.tslibs.nattype',
'pandas._libs.tslibs.np_datetime',
'pandas._libs.skiplist'
]
| Python | 0 | |
56d14e7b0386588afd39f2413fafe0b9ba41806d | Access checking unit tests for SlotsTransferAdminPage. | tests/app/soc/modules/gsoc/views/test_slot_transfer_admin.py | tests/app/soc/modules/gsoc/views/test_slot_transfer_admin.py | # Copyright 2013 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for slot transfer admin view."""
from tests import profile_utils
from tests import test_utils
class SlotsTransferAdminPageTest(test_utils.GSoCDjangoTestCase):
"""Unit tests for SlotsTransferAdminPage class."""
def setUp(self):
self.init()
self.url = '/gsoc/admin/slots/transfer/%s' % self.gsoc.key().name()
def testLoneUserAccessForbidden(self):
response = self.get(self.url)
self.assertResponseForbidden(response)
self.assertErrorTemplatesUsed(response)
def testStudentAccessForbidden(self):
self.data.createStudent()
response = self.get(self.url)
self.assertResponseForbidden(response)
self.assertErrorTemplatesUsed(response)
def testMentorAccessForbidden(self):
self.data.createMentor(self.org)
response = self.get(self.url)
self.assertResponseForbidden(response)
self.assertErrorTemplatesUsed(response)
def testOrgAdminAccessForbidden(self):
self.data.createOrgAdmin(self.org)
response = self.get(self.url)
self.assertResponseForbidden(response)
self.assertErrorTemplatesUsed(response)
def testHostAccessGranted(self):
self.data.createHost()
response = self.get(self.url)
self.assertResponseOK(response)
| Python | 0 | |
db446bf6dc4255f556f20235d2bdc28fa056ad46 | Add list_owners.py to list shared folder owners | list_owners.py | list_owners.py | """List all shared folders and their owners."""
import logging
import os
from typing import Iterator
import dropbox
from backup import File, setup_logging, get_members, get_files
def get_folder_members(team: dropbox.DropboxTeam,
folder: File) \
-> Iterator[dropbox.sharing.UserMembershipInfo]:
"""Yield UserMembershipInfo objects which contain access level information
(whether user is an owner, editor or viewer of a shared folder).
"""
user = team.as_user(folder.member.profile.team_member_id)
members = user.sharing_list_folder_members(folder.file.shared_folder_id)
for member in members.users:
yield member
while members.cursor:
members = user.sharing_list_folder_members_continue(members.cursor)
for member in members.users:
yield member
def main():
setup_logging()
logger = logging.getLogger('main')
logger.info('Please wait up to tens of minutes...')
shared_folders = set()
team = dropbox.DropboxTeam(os.environ['DROPBOX_TEAM_TOKEN'])
for member in get_members(team):
logger.debug(f'Checking {member.profile.name.display_name}')
for f in get_files(member, team):
path = f.file.path_display
logger.debug(f'Checking {path}')
# Find out if it is a shared folder
try:
if not f.file.sharing_info.parent_shared_folder_id:
shared_folders.add(f)
except AttributeError:
logger.debug(f'{path} is not a shared folder')
for sf in shared_folders:
path = sf.file.path_display
for member in get_folder_members(team, sf):
name = member.user.display_name
logger.debug(f'{path} : {name} : {member.access_type}')
if member.access_type.is_owner():
logger.info(f'{path} is owned by {name}')
break
else:
# No owner found for the shared folder
logger.warning(f'No owner found for {path}')
if __name__ == "__main__":
main()
| Python | 0 | |
1c094fe58df0fa57884752be7f64ee9755e433f1 | Create __init__.py | tests/__init__.py | tests/__init__.py | Python | 0.000429 | ||
6edc4700f755380b8b9099ae78619cbd225a2790 | add API tests | tests/api_test.py | tests/api_test.py | import overview, unittest, mock, json
from overview.services import Services
class ApiV1Test(unittest.TestCase):
def send_patch_json(self, url, json_data):
return self.app.patch(url,
data = json.dumps(json_data),
headers = [('Content-Type', 'application/json')])
def setUp(self):
self.app = overview.app.test_client()
@mock.patch.object(Services, 'docker_state')
def test_get_docker_state(self, mock_docker_state):
mock_docker_state.return_value = {'message':'docker_state_by_services'}
rv = self.app.get('/api/v1/docker')
self.assertEqual(rv.data, '{\n "message": "docker_state_by_services"\n}')
@mock.patch.object(Services, 'states')
def test_get_services_state(self, mock_services_state):
mock_services_state.return_value = {'message':'services_state'}
rv = self.app.get('/api/v1/services')
self.assertEqual(rv.data, '{\n "message": "services_state"\n}')
@mock.patch.object(Services, 'change')
def test_patch_service_state(self, mock_services_change):
# When the change is valid (from services.change perspective)
mock_services_change.return_value = None
rv = self.send_patch_json('/api/v1/services/serviceId',
{ 'state': Services.STATE_RUNNING })
self.assertEqual(rv.data,
'{\n "message": "Correctly applied. Change in progress."\n}')
# Verify that the change has been given
mock_services_change.assert_called_with('serviceId', Services.STATE_RUNNING)
# When the change is invalid (from services.change perspective)
mock_services_change.return_value = 'error description'
rv = self.send_patch_json('/api/v1/services/serviceId',
{ 'state': Services.STATE_RUNNING })
self.assertEqual(rv.data,
'{\n "error": "error description", \n'
' "message": "This change cannot be made"\n}')
| Python | 0.000001 | |
0347d82e55382b9618158c4c5809c360e729c245 | Create neworld_client.py | _src/om2py4w/4wex0/neworld_client.py | _src/om2py4w/4wex0/neworld_client.py | #/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
# sys.setdefaultencoding() does not exist, here!
reload(sys) # Reload does the trick!
sys.setdefaultencoding('UTF8')
from lxml import html
import requests
def read_note():
page = requests.get('http://localhost:8080/neworld')
tree = html.fromstring(page.content)
note_content = tree.xpath('//div[@class="note_content"]/text()')
return note_content
def write_note(mynote):
wpage = requests.post('http://localhost:8080/neworld', data = {'notes': mynote})
def main():
while True:
mynote = raw_input('>>> ')
if mynote == "q":
print ("Thanks for writing.")
break
elif mynote =="r":
print read_note()
else:
write_note(mynote)
if __name__ == "__main__":
main()
| Python | 0.000001 | |
faacc6dcef31cb22a87cd3184824b9785b21fdef | Jiffy application to test indexing of reflections from other experiments in an n^2 manner, to see if a matrix from one experiment has good predictive power for another as: | command_line/griddex.py | command_line/griddex.py | from __future__ import absolute_import, division, print_function
import libtbx.phil
import libtbx.load_env
help_message = '''
Cross reference indexing solutions.
Examples::
%s expts0.json refl0.json
''' % libtbx.env.dispatcher_name
phil_scope = libtbx.phil.parse("""
d_min = None
.type = float(value_min=0.0)
""")
def test_index(experiment, reflections):
from dials.algorithms.indexing import indexer
# map reflections to reciprocal space from image space
refl = indexer.indexer_base.map_spots_pixel_to_mm_rad(
reflections, experiment.detector, experiment.scan)
indexer.indexer_base.map_centroids_to_reciprocal_space(
refl, experiment.detector, experiment.beam, experiment.goniometer)
# now compute fractional indices - in Python rather than trying to push
# everything to C++ for the moment
from scitbx import matrix
ub = matrix.sqr(experiment.crystal.get_A())
rub = ub.inverse()
from dials.array_family import flex
hkl_real = flex.vec3_double(len(reflections))
for j, rlp in enumerate(reflections['rlp']):
hkl_real[j] = rub * rlp
hkl = hkl_real.iround()
ms = 0.0
for (_h, _k, _l), (_hr, _kr, _lr) in zip(hkl, hkl_real):
ms += (_hr - _h) ** 2 + (_kr - _k) ** 2 + (_lr - _l) ** 2
import math
return math.sqrt(ms / len(reflections))
def run(args):
from dials.util.options import OptionParser
from dials.util.options import flatten_experiments
from dials.util.options import flatten_reflections
import libtbx.load_env
usage = "%s [options] datablock.json reflections.pickle" % (
libtbx.env.dispatcher_name)
parser = OptionParser(
usage=usage,
phil=phil_scope,
read_experiments=True,
read_reflections=True,
check_format=False,
epilog=help_message)
params, options = parser.parse_args(show_diff_phil=True)
experiments = flatten_experiments(params.input.experiments)
reflections = flatten_reflections(params.input.reflections)
assert len(experiments) == len(reflections)
nn = len(experiments)
# FIXME check that all the crystals are in the primitive setting...
# now compute grid of reciprocal RMSD's
result = { }
for j, expt in enumerate(experiments):
for k, refl in enumerate(reflections):
result[j, k] = test_index(expt, refl)
# print matrix of results
print(' ' + ''.join(['%7d' % j for j in range(nn)]))
for k in range(nn):
record = ''.join([' %6.3f' % result[j, k] for j in range(nn)])
print('%8d' % k + record)
if __name__ == '__main__':
import sys
run(sys.argv[1:])
| Python | 0.999997 | |
3a4c922d353df5f5b3f3cabe24b04090b0a3fd08 | test the serve command | tests/test_cli.py | tests/test_cli.py | # Copyright 2013 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
from __future__ import unicode_literals
import pretend
import werkzeug.serving
from warehouse.cli import ServeCommand
def test_serve(monkeypatch):
run_simple = pretend.call_recorder(
lambda host, port, app, use_reloader, use_debugger: None,
)
monkeypatch.setattr(werkzeug.serving, "run_simple", run_simple)
host, port, app, use_reloader, use_debugger = (
pretend.stub() for x in range(5)
)
ServeCommand()(
app, host, port,
reloader=use_reloader,
debugger=use_debugger,
)
assert run_simple.calls == [
pretend.call(
host, port, app,
use_reloader=use_reloader,
use_debugger=use_debugger,
),
]
| Python | 0.0001 | |
2299343d8b10658cc6682b23dbf9be9d5fd290f6 | Add unit test for data integrity. | tests/testdata.py | tests/testdata.py | import ConfigParser
import csv
import unittest
class DataTest(unittest.TestCase):
def setUp(self):
config = ConfigParser.RawConfigParser()
config.read('../app.config')
# Load the data from the csv into an array
self.data = []
with open('../data/%s' % config.get('data', 'filename'), 'rb') as csvfile:
reader = csv.reader(csvfile)
# Skip header and parse data
reader.next()
for row in reader:
self.data.append([s.strip() for s in row])
def test_complete(self):
'''Ensure there are no day/country pairs missing data'''
date_country = dict()
dates = set()
countries = set()
for date, country, video_id in self.data:
dates.add(date)
countries.add(country)
date_country[date] = date_country.get(date, {})
date_country[date][country] = date_country[date].get(country, 0) + 1
for date in dates:
for country in countries:
count = date_country.get(date,{}).get(country,0)
self.assertNotEqual((date, country, count), (date, country, 0))
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
78aebc48763b15dedc3aee65a18a2a39f46e5c30 | add run module | flagon/run.py | flagon/run.py |
def run_simple(hostname, port, application, use_reloader=False,
use_debugger=False):
from wsgiref.simple_server import make_server
from wsgiref.simple_server import WSGIRequestHandler, WSGIServer
pass
| Python | 0.000001 | |
ad6b9ebc0b9d9ec0ea5e24da240c09a5645dff2c | Add renamed file | flush_gui.pyw | flush_gui.pyw | # GUI component of the flush tool
import sys
import flush_tool
import configparser
from PyQt4 import QtCore, QtGui, uic
# set ui file
form_class = uic.loadUiType("flushtool_interface.ui")[0] # load ui file
class ErrorDialog(QtGui.QDialog):
def __init__(self, parent = None):
super(ErrorDialog, self).__init__(parent)
layout = QVBoxLayout(self)
self.buttons = QDialogButtonBox(
QDialogButtonBox.Ok, self)
self.layout.addWidget(self.buttons)
def showDialog(self):
self.show()
class AppWindow(QtGui.QMainWindow, form_class):
def __init__(self, parent = None): # parent specification may not be necessary
QtGui.QMainWindow.__init__(self, parent)
self.setupUi(self)
self.initUI()
def initUI(self):
self.show()
# bind buttons to event handlers
self.button_generate.clicked.connect(self.button_generate_clicked)
self.combo_destination.activated.connect(self.grab_destination_selection)
# populate combo boxes with equipment names
self.populate_combo_box(self.combo_destination)
def populate_combo_box(self, combobox):
for e in flush_tool.equipment:
QtGui.QComboBox.addItem(combobox, e.name)
def grab_destination_selection(self):
selection = self.combo_destination.currentText()
for e in flush_tool.equipment:
if e.name == selection:
department = e.area
break
if department == 'Blending':
self.combo_source.setEnabled(True)
self.linetext_volume.setEnabled(True)
self.label_volume.setText('Blend Size (L)')
elif department == 'Bulk Receiving':
self.combo_source.setEnabled(True)
self.linetext_volume.setEnabled(True)
self.label_volume.setText('Receipt Size (L)')
else:
self.combo_source.setEnabled(False)
self.linetext_volume.setEnabled(False)
self.label_volume.setText('Volume (L)')
def button_generate_clicked(self):
"""Controls the behaviour of the 'Generate' button."""
# ensure integrity of user values
prev_product_code = self.linetext_prev.text().strip()
next_product_code = self.linetext_next.text().strip()
equipment_destination_name = self.combo_destination.currentText()
destination = None
volume = 0
for e in flush_tool.equipment:
if e.name == equipment_destination_name:
destination = e
break
flush_factor = None
# validate equipment selection
if destination is None:
QtGui.QMessageBox.critical(self,
"Invalid Input",
"Could not find in loaded equipment for destination selection.")
return
# NEVER FORGET ABOUT BUFFER OVERFLOW
if len(prev_product_code) > 15 or len (next_product_code) > 15:
QtGui.QMessageBox.critical(self,
"Invalid Input",
"Material code is too large.")
return
# check prev value isn't empty
if prev_product_code is None or prev_product_code == "":
QtGui.QMessageBox.critical(self,
"Invalid Input",
"Please fill in the 'Previous Material Code' field.")
return
# check next value isn't empty
if next_product_code is None or next_product_code == "":
QtGui.QMessageBox.critical(self,
"Invalid Input",
"Please fill in the 'Next Material Code' field.")
return
# convert codes grabbed from text fields into ints
try:
prev_product_code = int(prev_product_code)
next_product_code = int(next_product_code)
except ValueError:
QtGui.QMessageBox.critical(self,
"Invalid Input",
"Material codes must be integers.")
return
# we don't want to run calculations on identicla codes
if prev_product_code == next_product_code:
QtGui.QMessageBox.critical(self,
"Invalid Input",
"Product codes are identical.")
return
# check to make sure provided codes correspond to a product in the database
prev_product = flush_tool.find_match(prev_product_code)
if prev_product is None:
QtGui.QMessageBox.critical(self,
"Invalid Input",
"No match found for Previous Material Code")
return
next_product = flush_tool.find_match(next_product_code)
if next_product is None:
QtGui.QMessageBox.critical(self,
"Invalid Input",
"No match found for Next Material Code")
return
# if everything is kosher so far, launch the calculating script
if destination.area == 'Packaging':
num_flush_cycles = flush_tool.generate_flush_factor(prev_product, next_product, destination)
else:
try:
volume = float(self.linetext_volume.text())
if volume <= 0:
QtGui.QMessageBox.critical(self,
"Invalid Input",
"Volume must be greater than zero.")
return
except ValueError:
QtGui.QMessageBox.critical(self,
"Invalid Input",
"Volume must be a number.")
return
num_flush_cycles = flush_tool.generate_flush_factor(prev_product, next_product, destination, volume)
if num_flush_cycles is None:
logging.critical("Fatal error: unable to calculate flush factor.")
elif num_flush_cycles < 0:
logging.critical("Flush factor is less than 0.")
elif num_flush_cycles == 0:
self.label_num_cycles.setValue(int(num_flush_cycles))
self.label_cycle_volume.setText("0")
self.label_material.setText("--")
QtGui.QMessageBox.critical(self,
"Similar Products",
"The flush result is equal to zero. No flush necessary!")
else:
self.label_num_cycles.setValue(int(num_flush_cycles))
self.label_cycle_volume.setText(str(destination.cycle_size))
self.label_material.setText(str(destination.flush_material))
def main():
# initialize and show window
app = QtGui.QApplication(sys.argv)
# load backend before creating front end
flush_tool.init_data()
window = AppWindow(None)
# start
sys.exit(app.exec_())
if __name__ == '__main__':
main()
| Python | 0.000002 | |
b9399dbdfff80fec21cfed926779b67589835047 | Create LettCombPhoneNum_002.py | leetcode/017-Letter-Combinations-of-a-Phone-Number/LettCombPhoneNum_002.py | leetcode/017-Letter-Combinations-of-a-Phone-Number/LettCombPhoneNum_002.py | class Solution(object):
def letterCombinations(self, digits):
"""
:type digits: str
:rtype: List[str]
"""
def comb(digits, d2l):
if not digits:
return [""]
res = []
for c in d2l[int(digits[0])]:
for suffix in comb(digits[1:], d2l):
res.append(c + suffix)
return res
if not digits:
return []
d2l = { 2: 'abc', 3: 'def', 4: 'ghi', 5: 'jkl',
6: 'mno', 7: 'pqrs', 8: 'tuv', 9: 'wxyz' }
return comb(digits, d2l)
| Python | 0.000001 | |
d92eff7e89e09167b126f99243986eae5792f705 | Add py-debtcollector (#25212) | var/spack/repos/builtin/packages/py-debtcollector/package.py | var/spack/repos/builtin/packages/py-debtcollector/package.py | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyDebtcollector(PythonPackage):
"""
A collection of Python deprecation patterns and strategies that help you
collect your technical debt in a non-destructive manner.
"""
homepage = "https://docs.openstack.org/debtcollector/latest"
pypi = "debtcollector/debtcollector-2.2.0.tar.gz"
maintainers = ['haampie']
version('2.2.0', sha256='787981f4d235841bf6eb0467e23057fb1ac7ee24047c32028a8498b9128b6829')
depends_on('python@3.6:', type=('build', 'run'))
depends_on('py-setuptools', type='build')
depends_on('py-pbr@2.0.0:2.0.999,2.1.1:', type='build')
depends_on('py-six@1.10.0:', type=('build', 'run'))
depends_on('py-wrapt@1.7.0:', type=('build', 'run'))
| Python | 0 | |
6ed82eb673c2a95f5c349ab45dd8d17280db91c9 | Conversion of string to number | BankOCR.py | BankOCR.py | numbers = '''
_ _ _ _ _ _ _ _
| | | _| _||_||_ |_ ||_||_|
|_| ||_ _| | _||_| ||_| _|
'''
class Converter:
def __init__(self):
digits = self.splitDigits(numbers)
self.digitLineDicts = [{} for i in range(3)]
self.digitIdDict = {}
digitIndices = [0]*3
for d in digits:
for (lineIdx, line) in enumerate(d.split('\n')):
lDict = self.digitLineDicts[lineIdx]
if not line in lDict:
lDict[line] = digitIndices[lineIdx]
digitIndices[lineIdx] += 1
for i,d in enumerate(digits):
self.digitIdDict[self.generateID(d)] = i
def generateID(self, digit):
id = 0
for (lineIdx, line) in enumerate(digit.split('\n')):
id *= 10
id += self.digitLineDicts[lineIdx][line]
return id
def convertDigit(self, digit):
return self.digitIdDict[self.generateID(digit)]
def splitDigits(self, code):
lines = [l for l in code.split('\n') if l]
numChars = max([len(l) for l in lines])
def adjustLine(l):
return l + ' ' * max(numChars-len(l), 0);
lines = [adjustLine(l) for l in lines]
numDigits = numChars//3
digits = ['']*numDigits
for i in range(numDigits):
digits[i] += lines[0][i*3:i*3+3] + '\n'
digits[i] += lines[1][i*3:i*3+3] + '\n'
digits[i] += lines[2][i*3:i*3+3]
return digits
def convert(self, digits):
for d in self.splitDigits(digits):
yield self.convertDigit(d)
c = Converter()
print(list(c.convert(numbers)))
| Python | 0.999999 | |
6bd4b7e4c2dac2817250f184114eea8c05fbefb7 | Add compat.py to get get_user_model working | cuser/compat.py | cuser/compat.py | from __future__ import unicode_literals
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
import django
from django.utils.functional import lazy
__all__ = ['User', 'AUTH_USER_MODEL']
# Django 1.5+ compatibility
if django.VERSION >= (1, 5):
AUTH_USER_MODEL = settings.AUTH_USER_MODEL
try:
from django.contrib.auth import get_user_model
User = lazy(get_user_model, AUTH_USER_MODEL)
except ImproperlyConfigured:
pass
else:
from django.contrib.auth.models import User
AUTH_USER_MODEL = 'auth.User'
| Python | 0 | |
422390ff7eb4d97eaf0c5c1a1b250010ee766ec7 | Add tool for clean pyc files | tools/cleanPYC.py | tools/cleanPYC.py |
import re
import os
import sys
print("%s path\n" % sys.argv[0])
path = sys.argv[1]
for root, dirs, files in os.walk(path):
for file_ in files:
if re.match(".*.pyc$", file_):
abs_file = os.path.join(root, file_)
print("Clean %s" % abs_file)
os.remove(abs_file)
| Python | 0 | |
3aacdb44210ca5af86bc9258eaecc1bbbda4ea7f | Implement colorization in it's own file | tools/colorize.py | tools/colorize.py | """
mbed SDK
Copyright (c) 2016 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
""" This python file is responsible for generating colorized notifiers.
"""
import sys
import re
from colorama import init
init()
colors = {
'none' : "",
'default' : "\033[.0m",
'bold' : "\033[.1m",
'underline' : "\033[.4m",
'blink' : "\033[.5m",
'reverse' : "\033[.7m",
'concealed' : "\033[.8m",
'black' : "\033[.30m",
'red' : "\033[.31m",
'green' : "\033[.32m",
'yellow' : "\033[.33m",
'blue' : "\033[.34m",
'magenta' : "\033[.35m",
'cyan' : "\033[.36m",
'white' : "\033[.37m",
'on_black' : "\033[.40m",
'on_red' : "\033[.41m",
'on_green' : "\033[.42m",
'on_yellow' : "\033[.43m",
'on_blue' : "\033[.44m",
'on_magenta' : "\033[.45m",
'on_cyan' : "\033[46m",
'on_white' : "\033[47m",
}
# Convert a color string from a string into an ascii escape code that will print
# that color on the terminal.
color_matcher = re.compile(r"(\w+)(\W+on\W+\w+)?")
def colorstring_to_escapecode(color_string):
match = re.match(color_matcher, color_string)
if match:
return colors[match.group(1)] + (colors[match.group(2).strip().replace(" ","_")] if match.group(2) else "")
else:
return corols['default']
# Wrap a toolchain notifier in a colorizer. This colorizer will wrap notifications
# in a color if the severity matches a color in the *color_map*.
def print_in_color_notifier (color_map, print_fn):
def wrap(event, silent=False):
fd = sys.stdout
if fd.isatty() and 'severity' in event and event['severity'] in color_map:
fd.write(colorstring_to_escapecode(color_map[event['severity']]))
print_fn(event, silent)
fd.write(colorstring_to_escapecode('default'))
else:
print_fn(event, silent)
return wrap
| Python | 0.000003 | |
0d0115ef5e088ed54a176e24cc94713b706f3d55 | include migration | awx/main/migrations/0015_v300_label_changes.py | awx/main/migrations/0015_v300_label_changes.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('main', '0014_v300_invsource_cred'),
]
operations = [
migrations.AlterField(
model_name='label',
name='organization',
field=models.ForeignKey(related_name='labels', to='main.Organization', help_text='Organization this label belongs to.'),
),
]
| Python | 0.000111 | |
5db256e6ac4ee84e008afa8f94d767330e392709 | Increase coverage | test/test_vmcp.py | test/test_vmcp.py | # -*- coding: utf8 -*-
# This file is part of PyBossa.
#
# Copyright (C) 2013 SF Isle of Man Limited
#
# PyBossa is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyBossa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with PyBossa. If not, see <http://www.gnu.org/licenses/>.
import json
from mock import patch
from base import web, model, Fixtures, db, redis_flushall
import pybossa.vmcp as vmcp
from nose.tools import assert_equal, assert_raises
class TestAPI:
def test_myquote(self):
"""Test myquote works."""
# Valid char should be the same
err_msg = "Valid chars should not be quoted"
assert vmcp.myquote('a') == 'a', err_msg
# Non-valid
err_msg = "Non-Valid chars should be quoted"
assert vmcp.myquote('%') == '%25', err_msg
| Python | 0 | |
0a3e00b27606eda26917c3c69b0344dc301502f0 | Revert "will this fix tests?" | tests/__init__.py | tests/__init__.py | # log_support setups the default Logger class
# and so we need to ensure that it is also
# setup for the tests
from lbrynet.core import log_support
| Python | 0 | |
3dbef22cee9ea83c7e80756037209334da237d4c | Remove unused compat types from compat.py | twython/compat.py | twython/compat.py | import sys
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
try:
import simplejson as json
except ImportError:
import json
if is_py2:
from urllib import urlencode, quote_plus
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
basestring = basestring
elif is_py3:
from urllib.parse import urlencode, quote_plus, parse_qsl
basestring = (str, bytes)
| import sys
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
try:
import simplejson as json
except ImportError:
import json
if is_py2:
from urllib import urlencode, quote_plus
try:
from urlparse import parse_qsl
except ImportError:
from cgi import parse_qsl
builtin_str = str
bytes = str
str = unicode
basestring = basestring
numeric_types = (int, long, float)
elif is_py3:
from urllib.parse import urlencode, quote_plus, parse_qsl
builtin_str = str
str = str
bytes = bytes
basestring = (str, bytes)
numeric_types = (int, float)
| Python | 0 |
f10049ae831570b54581c2a089218359febe5c50 | add command for exporting to csv | fecfilings/management/commands/fecfilings_to_csv.py | fecfilings/management/commands/fecfilings_to_csv.py | from django.core.management.base import NoArgsCommand
from fecfilings.models import Contributor
class Command(NoArgsCommand):
def handle(self, **options):
for c in Contributor.objects.all():
print c.to_csv()
| Python | 0.000001 | |
23165cbd1ac8ba1528649c04b56d598664e1da8b | Enhance mysensors binary sensor device classes (#13367) | homeassistant/components/binary_sensor/mysensors.py | homeassistant/components/binary_sensor/mysensors.py | """
Support for MySensors binary sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/binary_sensor.mysensors/
"""
from homeassistant.components import mysensors
from homeassistant.components.binary_sensor import (
DEVICE_CLASSES, DOMAIN, BinarySensorDevice)
from homeassistant.const import STATE_ON
SENSORS = {
'S_DOOR': 'door',
'S_MOTION': 'motion',
'S_SMOKE': 'smoke',
'S_SPRINKLER': 'safety',
'S_WATER_LEAK': 'safety',
'S_SOUND': 'sound',
'S_VIBRATION': 'vibration',
'S_MOISTURE': 'moisture',
}
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the MySensors platform for binary sensors."""
mysensors.setup_mysensors_platform(
hass, DOMAIN, discovery_info, MySensorsBinarySensor,
add_devices=add_devices)
class MySensorsBinarySensor(mysensors.MySensorsEntity, BinarySensorDevice):
"""Representation of a MySensors Binary Sensor child node."""
@property
def is_on(self):
"""Return True if the binary sensor is on."""
return self._values.get(self.value_type) == STATE_ON
@property
def device_class(self):
"""Return the class of this sensor, from DEVICE_CLASSES."""
pres = self.gateway.const.Presentation
device_class = SENSORS.get(pres(self.child_type).name)
if device_class in DEVICE_CLASSES:
return device_class
return None
| """
Support for MySensors binary sensors.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/binary_sensor.mysensors/
"""
from homeassistant.components import mysensors
from homeassistant.components.binary_sensor import (
DEVICE_CLASSES, DOMAIN, BinarySensorDevice)
from homeassistant.const import STATE_ON
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the MySensors platform for binary sensors."""
mysensors.setup_mysensors_platform(
hass, DOMAIN, discovery_info, MySensorsBinarySensor,
add_devices=add_devices)
class MySensorsBinarySensor(mysensors.MySensorsEntity, BinarySensorDevice):
"""Representation of a MySensors Binary Sensor child node."""
@property
def is_on(self):
"""Return True if the binary sensor is on."""
return self._values.get(self.value_type) == STATE_ON
@property
def device_class(self):
"""Return the class of this sensor, from DEVICE_CLASSES."""
pres = self.gateway.const.Presentation
class_map = {
pres.S_DOOR: 'opening',
pres.S_MOTION: 'motion',
pres.S_SMOKE: 'smoke',
}
if float(self.gateway.protocol_version) >= 1.5:
class_map.update({
pres.S_SPRINKLER: 'sprinkler',
pres.S_WATER_LEAK: 'leak',
pres.S_SOUND: 'sound',
pres.S_VIBRATION: 'vibration',
pres.S_MOISTURE: 'moisture',
})
if class_map.get(self.child_type) in DEVICE_CLASSES:
return class_map.get(self.child_type)
| Python | 0 |
60ffb1d13f00851377960eb76c90a7ef4592d03c | Create kivy_android_carousel.py | 009---Nine-Nine/kivy_android_carousel.py | 009---Nine-Nine/kivy_android_carousel.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import kivy
from kivy.app import App
from kivy.uix.button import Button
from kivy.uix.image import Image
from kivy.uix.label import Label
from kivy.uix.scatter import Scatter
from kivy.uix.screenmanager import Screen, ScreenManager, FadeTransition
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.carousel import Carousel
from os import listdir
from os.path import isfile, join
#kivy.require('1.9.1')
images_path = "images/"
carousel_images = [f for f in listdir(images_path) if isfile(join(images_path, f))]
#print carousel_images
class ImagesCarousel(Carousel):
"""
#STEP 4
A simple carousel to load our images
to avoid scatter and make it simple
remove scatter and replace add_widget(image_to_add)
"""
def __init__(self,*args, **kwargs):
super(ImagesCarousel, self).__init__(**kwargs)
for image in carousel_images:
scatter = Scatter(pos=(100,200), scale=4, do_scale=True)
image_to_add = Image(source=images_path+image)
scatter.add_widget(image_to_add)
self.add_widget(scatter)
class ScreenOne(Screen):
"""
STEP 2
This is screen 1 -> see class PyThess(App) 2nd line in build
"""
def __init__(self,**kwargs):
super(ScreenOne, self).__init__(**kwargs)
my_box = FloatLayout(orientation='vertical')
button1 = Button(text="To the next screen",color=[1,1,1,1],size_hint_y=0.1, size_hint_x=1, pos_hint={'x':0, 'y': 0.9})
button1.bind(on_press=self.screen_changer1)
label = Label(text='Hello PyThess', font_size='40sp', pos_hint={'x':0, 'y': 0.3})
my_box.add_widget(button1)
my_box.add_widget(label)
self.add_widget(my_box)
def screen_changer1(self, *args):
self.manager.current = 'screen2'
class ScreenTwo(Screen):
"""
#STEP 3
This is screen 2 -> see class PyThess(App) 3rd line in build
"""
def __init__ (self,**kwargs):
super (ScreenTwo, self).__init__(**kwargs)
my_box = FloatLayout(orientation='vertical')
my_box1 = FloatLayout(orientation='vertical',size_hint_y=0.9,size_hint_x = 1, pos_hint={'x':0, 'y': 0})
button1 = Button(text="To the previous screen",color=[0,0,0,1],size_hint_y=0.1, size_hint_x=1, pos_hint={'x':0, 'y': 0.9})
button1.bind(on_press=self.screen_changer1)
my_box.add_widget(button1)
local_carousel = ImagesCarousel(direction='right') # Here we create the new Carousel
my_box1.add_widget(local_carousel)
my_box.add_widget(my_box1)
self.add_widget(my_box)
def screen_changer1(self, *args):
self.manager.current = 'screen1'
class PyThess(App):
"""
#STEP 1
The basic app class. Here we load the screen manager
"""
def build(self):
self.my_screenmanager = ScreenManager(transition=FadeTransition())
screen1 = ScreenOne(name='screen1')
screen2 = ScreenTwo(name='screen2')
self.my_screenmanager.add_widget(screen1)
self.my_screenmanager.add_widget(screen2)
return self.my_screenmanager
if __name__ == '__main__':
PyThess().run()
| Python | 0.000647 | |
c9f70c7a4a24be0cdd9dcf044a06051b0978efff | add exceptions | jsonrpc/exceptions.py | jsonrpc/exceptions.py | class JSONRPCError(object):
""" Error for JSON-RPC communication.
When a rpc call encounters an error, the Response Object MUST contain the
error member with a value that is a Object with the following members:
code: A Number that indicates the error type that occurred.
This MUST be an integer.
message: A String providing a short description of the error.
The message SHOULD be limited to a concise single sentence.
data: A Primitive or Structured value that contains additional information
about the error.
This may be omitted.
The value of this member is defined by the Server (e.g. detailed error
information, nested errors etc.).
The error codes from and including -32768 to -32000 are reserved for
pre-defined errors. Any code within this range, but not defined explicitly
below is reserved for future use. The error codes are nearly the same as
those suggested for XML-RPC at the following
url: http://xmlrpc-epi.sourceforge.net/specs/rfc.fault_codes.php
"""
def __init__(self, code=None, message=None, data=None):
self.code = code or self.code
self.message = message or self.message
self.data = data
@property
def _dict(self):
""" Return object dict representation.
:return dict:
"""
data = dict(code=self.code, message=self.message)
if self.data:
data["data"] = self.data
return data
class JSONRPCParseError(JSONRPCError):
""" Parse Error.
Invalid JSON was received by the server.
An error occurred on the server while parsing the JSON text.
"""
code = -32700
message = "Parse error"
class JSONRPCInvalidRequest(JSONRPCError):
""" Invalid Request.
The JSON sent is not a valid Request object.
"""
code = -32600
message = "Invalid Request"
class JSONRPCMethodNotFound(JSONRPCError):
""" Method not found.
The method does not exist / is not available.
"""
code = -32601
message = "Method not found"
class JSONRPCInvalidParams(JSONRPCError):
""" Invalid params.
Invalid method parameter(s).
"""
code = -32602
message = "Invalid params"
class JSONRPCInternalError(JSONRPCError):
""" Internal error.
Internal JSON-RPC error.
"""
code = -32603
message = "Internal error"
class JSONRPCServerError(JSONRPCError):
""" Server error.
Reserved for implementation-defined server-errors.
"""
code = -32000
message = "Server error"
| Python | 0.000013 | |
2947a2c9b6348d248e3ae740722d6a7aa04327c0 | add reg d included definitions | regconfig/reg_d.py | regconfig/reg_d.py | from regparser.default_settings import *
#### Regulation D
INCLUDE_DEFINITIONS_IN_PART_1004 = [
('Alternative mortgage transaction', 'Alternative mortgage transaction'),
('Creditor', 'Creditor'),
('State', 'State'),
('State law', 'State law'),
]
INCLUDE_DEFINITIONS_IN['1004'] = INCLUDE_DEFINITIONS_IN_PART_1004
| Python | 0 | |
1e32a27b35e25e780e8af6cc76d1eb424328171b | add leetcode Populating Next Right Pointers in Each Node | leetcode/PopulatingNextRightPointersinEachNode/solution.py | leetcode/PopulatingNextRightPointersinEachNode/solution.py | # -*- coding:utf-8 -*-
# Definition for a binary tree node
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
# self.next = None
class Solution:
# @param root, a tree node
# @return nothing
def connect(self, root):
if root is None:
return root
if root.left is not None:
root.left.next = root.right
if root.right is not None and root.next is not None:
root.right.next = root.next.left or root.next.right
self.connect(root.left)
self.connect(root.right)
| Python | 0 | |
b220410ad51413d52076bec84a3bf1a660f9883b | Add a program that says Hello Shikha | helloShikha.py | helloShikha.py | #This is my hello world program
print 'Hello Shikha!'
| Python | 0.999999 | |
5a120774bae2d9775493c6806841a97e790b266e | Create citreo_code_v2.py | Logistic-Regression/citreo_code_v2.py | Logistic-Regression/citreo_code_v2.py | from datetime import datetime
from csv import DictReader
from math import exp, log, sqrt
# parameters #################################################################
train = 'train.csv' # path to training file
test = 'test.csv' # path to testing file
D = 2 ** 20 # number of weights use for learning
alpha = .1 # learning rate for sgd optimization
# function definitions #######################################################
# A. Bounded logloss
# INPUT:
# p: our prediction
# y: real answer
# OUTPUT
# logarithmic loss of p given y
def logloss(p, y):
p = max(min(p, 1. - 10e-12), 10e-12)
return -log(p) if y == 1. else -log(1. - p)
# B. Apply hash trick of the original csv row
# for simplicity, we treat both integer and categorical features as categorical
# INPUT:
# csv_row: a csv dictionary, ex: {'Lable': '1', 'I1': '357', 'I2': '', ...}
# D: the max index that we can hash to
# OUTPUT:
# x: a list of indices that its value is 1
# def get_x(csv_row, D):
# x = [0] # 0 is the index of the bias term
# for key, value in csv_row.items():
# index = int(value + key[1:], 16) % D # weakest hash ever ;)
# x.append(index)
# return x # x contains indices of features that have a value of 1
# C. Get probability estimation on x
# INPUT:
# x: features
# w: weights
# OUTPUT:
# probability of p(y = 1 | x; w)
def get_p(x, w):
wTx = 0.
for i in x: # do wTx
wTx += w[i] * 1. # w[i] * x[i], but if i in x we got x[i] = 1.
return 1. / (1. + exp(-max(min(wTx, 20.), -20.))) # bounded sigmoid
# D. Update given model
# INPUT:
# w: weights
# n: a counter that counts the number of times we encounter a feature
# this is used for adaptive learning rate
# x: feature
# p: prediction of our model
# y: answer
# OUTPUT:
# w: updated model
# n: updated count
def update_w(w, n, x, p, y):
for i in x:
# alpha / (sqrt(n) + 1) is the adaptive learning rate heuristic
# (p - y) * x[i] is the current gradient
# note that in our case, if i in x then x[i] = 1
w[i] -= (p - y) * alpha / (sqrt(n[i]) + 1.)
n[i] += 1.
return w, n
# training and testing #######################################################
# initialize our model
w = [0.] * D # weights
n = [0.] * D # number of times we've encountered a feature
# start training a logistic regression model using on pass sgd
loss = 0.
for t, row in enumerate(DictReader(open(train))):
y = 1. if row['Label'] == '1' else 0.
del row['Label'] # can't let the model peek the answer
del row['Id'] # we don't need the Id
# main training procedure
# step 1, get the hashed features
x = get_x(row, D)
# step 2, get prediction
p = get_p(x, w)
# for progress validation, useless for learning our model
loss += logloss(p, y)
if t % 1000000 == 0 and t > 1:
print('%s\tencountered: %d\tcurrent logloss: %f' % (
datetime.now(), t, loss/t))
# step 3, update model with answer
w, n = update_w(w, n, x, p, y)
# testing (build kaggle's submission file)
with open('submission1234.csv', 'w') as submission:
submission.write('Id,Predicted\n')
for t, row in enumerate(DictReader(open(test))):
Id = row['Id']
del row['Id']
x = get_x(row, D)
p = get_p(x, w)
submission.write('%s,%f\n' % (Id, p))
| Python | 0.000015 | |
557652d4b4297dd80d844915c3d57fc3e46ac83a | add graham's solution: | solutions/4_21_grsr.py | solutions/4_21_grsr.py | import sys
for line in sys.stdin:
line = line.rstrip()
pop, sample_id, sample_name, sex = line.split(",")
if (sample_id == sys.argv[1]):
print "Found", sample_id
| Python | 0.000033 | |
8db04e5d648c9e923f7977f456242d9ea9b80050 | Create pig_latin.py | solutions/pig_latin.py | solutions/pig_latin.py | def pig_latin(input_string):
new_string = []
for i in input_string.split():
if i[0] not in "aeiou":
i = i[1:]+i[0]
i +="ay"
new_string.append(i)
return ' '.join(new_string)
def main():
user_input = str(raw_input("Please give me a phrase: "))
print pig_latin(user_input)
if __name__ == '__main__':
main()
| Python | 0.001803 | |
f5460adbaeb87421a7f193a700d25e5a3c6e4351 | Create crypt.py | crypt.py | crypt.py | from itertools import cycle
def crypt(source,key):
result=""
a=cycle(key)
for ch in source:
result+=chr(ord(ch)^ord(next(a)))
return result
if __name__=="__main__":
source=input("输入想要加密/解密的字串:")
key=input("输入密钥:")
print("加密/解密成功!密码为:"+crypt(source,key))
| Python | 0.00091 | |
5ed7db70874f3ebfe9c946d38ccf12228dacac3a | Test if we tried to commit with an empty message, it should raise a ValueError | tests/test_git.py | tests/test_git.py | from unittest import TestCase
from mock import MagicMock, patch
from nose.tools import raises
from pyolite.git import Git
class TestGit(TestCase):
@raises(ValueError)
def test_commit_with_no_message(self):
mock_repo = MagicMock()
mock_index = MagicMock()
mock_remotes = MagicMock()
mock_repo.index = mock_index
mock_repo.remotes.origin = mock_remotes
with patch.multiple('pyolite.git', Repo=mock_repo):
git = Git('~/path/to/repo')
objects = ['simple_object', 'more_complex_one']
git.commit(objects, '')
| Python | 0.000001 | |
1a3d9b3da91a5c87316e44498a876f70a49df8ad | add 70 | python/p070.py | python/p070.py | import utils
def is_perm(a, b):
return sorted(str(a)) == sorted(str(b))
best = (10000, 1)
primes = [ i for i in utils.primes(4000) if i > 2000 ]
for i in primes:
for j in primes:
n = i * j
if n > 10**7:
break
phi = (i - 1) * (j - 1)
ratio = (n * 1.0) / phi
curr = (ratio, n)
if is_perm(n, phi) and curr < best:
best = curr
print best[1]
| Python | 0.999266 | |
c4764ef1aa1a1aaa0ae8dd909c3578705c7a2060 | add 77 | python/p077.py | python/p077.py | import utils
primes = utils.primes(100)
def count(target):
ways = [0] * (target + 1)
ways[0] = 1
for p in primes:
for j in xrange(p, target + 1):
ways[j] += ways[j - p]
return ways[target]
for target in xrange(2, 100):
if count(target) > 5000:
print target
break
| Python | 0.998768 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.