commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
bd0bdc543ba1e44ddc9d149fbaadd12ab051614d | Add migrations | accession/migrations/0003_auto_20191101_1625.py | accession/migrations/0003_auto_20191101_1625.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.25 on 2019-11-01 16:25
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accession', '0002_auto_20191031_2139'),
]
operations = [
migrations.AlterField(
model_name='object',
name='object_era',
field=models.CharField(blank=True, choices=[('Pre-1770', 'Pre-1770'), ('1770-1779', '1770-1779'), ('1780-1789', '1780-1789'), ('1790-1799', '1790-1799'), ('1800-1809', '1800-1809'), ('1810-1819', '1810-1819'), ('1820-1829', '1820-1829'), ('1830-1839', '1830-1839'), ('1840-1849', '1840-1849'), ('1850-1859', '1850-1859'), ('1860-1869', '1860-1869'), ('1870-1879', '1870-1879'), ('1880-1889', '1880-1889'), ('1890-1899', '1890-1899'), ('1900-1909', '1900-1909'), ('1910-1919', '1910-1919'), ('1920-1929', '1920-1929'), ('1930-1939', '1930-1939'), ('1940-1949', '1940-1949'), ('1950-1959', '1950-1959'), ('1960-1969', '1960-1969'), ('1970-1979', '1970-1979'), ('1980-1989', '1980-1989'), ('1990-1999', '1990-1999'), ('2000-2009', '2000-2009'), ('2010-2019', '2010-2019'), ('2020-2029', '2020-2029')], max_length=10),
),
]
| Python | 0.000001 | |
7b0ebe74cbaad610bb65f24cc2555d82e7d7a750 | read attachments path from settings, catch jpeg/png | apps/photos/views.py | apps/photos/views.py | from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from rapidsms.webui.utils import render_to_response
from photos.models import Photo
import os
import settings
# default page - show all thumbnails by date
@login_required()
def recent(request, template_name="photos/list.html"):
photos = Photo.objects.all()
return render_to_response(request, template_name, {'photos' : photos})
# show a single photo + comments
@login_required()
def show(request, photo_id, template_name="photos/single.html"):
p = Photo.objects.get(id=photo_id)
return render_to_response(request, template_name, {'photo' : p})
@login_required()
def import_photos(request):
path = settings.RAPIDSMS_APPS['receiver']['attachments_path'] # -> data/attachments
def is_img(filename):
return (filename.endswith('.jpg') or filename.endswith('.jpeg') or filename.endswith('.png'))
def not_in_db_already(filename):
# Note that there's a query for each file here - another way would be to load all existing files to a list in one operation and work with that
# but, that might generate huge list when there are a lot of photos in the DB, and might cause data freshness issues in some edge cases
# so, we just do n queries each time (where n is probably not too big) instead
return (Photo.objects.filter(original_image="%s/%s" % (path, filename)).count() == 0)
files = os.listdir(path)
img_files = filter(is_img, files)
new_img_files = filter(not_in_db_already, img_files)
for f in new_img_files:
p = Photo(name=f, original_image="%s/%s" % (path, f))
p.save()
return HttpResponseRedirect("/photos")
@login_required()
def populate(request):
for i in (1,2,3):
p = Photo(name="test image #%s" % i, original_image="apps/photos/tests/test%s.jpg" % i)
p.save()
return HttpResponseRedirect("/photos")
| from django.contrib.auth.decorators import login_required
from django.http import HttpResponse
from django.http import HttpResponseRedirect
from rapidsms.webui.utils import render_to_response
from photos.models import Photo
import os
import settings
# default page - show all thumbnails by date
@login_required()
def recent(request, template_name="photos/list.html"):
photos = Photo.objects.all()
return render_to_response(request, template_name, {'photos' : photos})
# show a single photo + comments
@login_required()
def show(request, photo_id, template_name="photos/single.html"):
p = Photo.objects.get(id=photo_id)
return render_to_response(request, template_name, {'photo' : p})
@login_required()
def import_photos(request):
path = 'data/attachments' #settings.RAPIDSMS_APPS['receiver']['attachments_path']
def is_img(filename):
return filename.endswith('.jpg')
def not_in_db_already(filename):
# Note that there's a query for each file here - another way would be to load all existing files to a list in one operation and work with that
# but, that might generate huge list when there are a lot of photos in the DB, and might cause data freshness issues in some edge cases
# so, we just do n queries each time (where n is probably not too big) instead
return (Photo.objects.filter(original_image="%s/%s" % (path, filename)).count() == 0)
files = os.listdir(path)
img_files = filter(is_img, files)
new_img_files = filter(not_in_db_already, img_files)
out = ''
for f in new_img_files:
out += "%s/%s <br/> " % (path, f)
p = Photo(name=f, original_image="%s/%s" % (path, f))
p.save()
return HttpResponseRedirect("/photos")
# return HttpResponse(out)
@login_required()
def populate(request):
for i in (1,2,3):
p = Photo(name="test image #%s" % i, original_image="apps/photos/tests/test%s.jpg" % i)
p.save()
return HttpResponseRedirect("/photos")
| Python | 0 |
fda4f436bbaea9215efa03648d2df8e413fb47dd | add class loader tests | test/test_loader.py | test/test_loader.py | # Copyright (c) 2018 Workonline Communications (Pty) Ltd. All rights reserved.
#
# The contents of this file are licensed under the Apache License version 2.0
# (the "License"); you may not use this file except in compliance with the
# License.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""rptk class loader test cases."""
from __future__ import print_function
from __future__ import unicode_literals
from helpers import default_format_classes, default_query_classes
import pytest
class_sets = (
default_query_classes().items(),
default_format_classes().items(),
pytest.param([("foo", "rptk.foo.FooClass")], marks=pytest.mark.xfail),
pytest.param(0, marks=pytest.mark.xfail)
)
class TestClassLoader(object):
"""Test cases for rptk class loader classes."""
@pytest.mark.parametrize("class_set", class_sets)
def test_class_loader(self, class_set):
"""Test rptk class loader."""
from rptk.load import ClassLoader
loader = ClassLoader(items=class_set)
assert isinstance(loader.class_names, list)
for name, path in class_set:
assert name in loader.class_names
assert name in loader.class_info
assert loader.class_info[name]
assert loader.get_class(name=name).__name__ in path
assert isinstance(loader.classes, list)
for cls in loader.classes:
assert isinstance(cls, type)
| Python | 0 | |
dd75e1c5afb05c5d46adae465947fb3f893cdf6b | Create 7kyu_complete_the_pattern4.py | Solutions/7kyu/7kyu_complete_the_pattern4.py | Solutions/7kyu/7kyu_complete_the_pattern4.py | def pattern(n):
l=list(range(1,n+1))
return '\n'.join(''.join(map(str,l[i:])) for i in range(n))
| Python | 0.001969 | |
a5f8248b1b4a237e66a8dbf443ea822b5e01a2f9 | Add backport-pr script | tools/backport_pr.py | tools/backport_pr.py | #!/usr/bin/env python
"""
Backport pull requests to a particular branch.
Usage: backport_pr.py branch [PR] [PR2]
e.g.:
python tools/backport_pr.py 0.13.1 123 155
to backport PR #123 onto branch 0.13.1
or
python tools/backport_pr.py 2.1
to see what PRs are marked for backport with milestone=2.1 that have yet to be applied
to branch 2.x.
Forked from the backport_pr.py script in the ipython/ipython repository.
"""
from __future__ import print_function
import os
import re
import sys
from subprocess import Popen, PIPE, check_call, check_output
try:
from urllib.request import urlopen
except:
from urllib import urlopen
from gh_api import (
get_issues_list,
get_pull_request,
get_pull_request_files,
is_pull_request,
get_milestone_id,
)
def find_rejects(root='.'):
for dirname, dirs, files in os.walk(root):
for fname in files:
if fname.endswith('.rej'):
yield os.path.join(dirname, fname)
def get_current_branch():
branches = check_output(['git', 'branch'])
for branch in branches.splitlines():
if branch.startswith(b'*'):
return branch[1:].strip().decode('utf-8')
def backport_pr(branch, num, project='jupyter/nbgrader'):
current_branch = get_current_branch()
if branch != current_branch:
check_call(['git', 'checkout', branch])
check_call(['git', 'pull'])
pr = get_pull_request(project, num, auth=True)
files = get_pull_request_files(project, num, auth=True)
patch_url = pr['patch_url']
title = pr['title']
description = pr['body']
fname = "PR%i.patch" % num
if os.path.exists(fname):
print("using patch from {fname}".format(**locals()))
with open(fname, 'rb') as f:
patch = f.read()
else:
req = urlopen(patch_url)
patch = req.read()
lines = description.splitlines()
if len(lines) > 5:
lines = lines[:5] + ['...']
description = '\n'.join(lines)
msg = "Backport PR #%i: %s" % (num, title) + '\n\n' + description
check = Popen(['git', 'apply', '--check', '--verbose'], stdin=PIPE)
a,b = check.communicate(patch)
if check.returncode:
print("patch did not apply, saving to {fname}".format(**locals()))
print("edit {fname} until `cat {fname} | git apply --check` succeeds".format(**locals()))
print("then run tools/backport_pr.py {num} again".format(**locals()))
if not os.path.exists(fname):
with open(fname, 'wb') as f:
f.write(patch)
return 1
p = Popen(['git', 'apply'], stdin=PIPE)
a,b = p.communicate(patch)
filenames = [ f['filename'] for f in files ]
check_call(['git', 'add'] + filenames)
check_call(['git', 'commit', '-m', msg])
print("PR #%i applied, with msg:" % num)
print()
print(msg)
print()
if branch != current_branch:
check_call(['git', 'checkout', current_branch])
return 0
backport_re = re.compile(r"(?:[Bb]ackport|[Mm]erge).*#(\d+)")
def already_backported(branch, since_tag=None):
"""return set of PRs that have been backported already"""
if since_tag is None:
since_tag = check_output(['git','describe', branch, '--abbrev=0']).decode('utf8').strip()
cmd = ['git', 'log', '%s..%s' % (since_tag, branch), '--oneline']
lines = check_output(cmd).decode('utf8')
return set(int(num) for num in backport_re.findall(lines))
def should_backport(labels=None, milestone=None):
"""return set of PRs marked for backport"""
if labels is None and milestone is None:
raise ValueError("Specify one of labels or milestone.")
elif labels is not None and milestone is not None:
raise ValueError("Specify only one of labels or milestone.")
if labels is not None:
issues = get_issues_list("jupyter/nbgrader",
labels=labels,
state='closed',
auth=True,
)
else:
milestone_id = get_milestone_id("jupyter/nbgrader", milestone,
auth=True)
issues = get_issues_list("jupyter/nbgrader",
milestone=milestone_id,
state='closed',
auth=True,
)
should_backport = set()
for issue in issues:
if not is_pull_request(issue):
continue
pr = get_pull_request("jupyter/nbgrader", issue['number'],
auth=True)
if not pr['merged']:
print ("Marked PR closed without merge: %i" % pr['number'])
continue
if pr['base']['ref'] != 'master':
continue
should_backport.add(pr['number'])
return should_backport
if __name__ == '__main__':
if len(sys.argv) < 2:
print(__doc__)
sys.exit(1)
if len(sys.argv) < 3:
milestone = sys.argv[1]
branch = milestone.split('.')[0] + '.x'
already = already_backported(branch)
should = should_backport(milestone=milestone)
print ("The following PRs should be backported:")
for pr in sorted(should.difference(already)):
print (pr)
sys.exit(0)
for prno in map(int, sys.argv[2:]):
print("Backporting PR #%i" % prno)
rc = backport_pr(sys.argv[1], prno)
if rc:
print("Backporting PR #%i failed" % prno)
sys.exit(rc)
| Python | 0 | |
422b5573b72cc2014893aa15758b9d0bc61baf05 | refactor from core.py | Synopsis/Formatters/HTML/DeclarationStyle.py | Synopsis/Formatters/HTML/DeclarationStyle.py | # $Id: DeclarationStyle.py,v 1.1 2003/11/15 19:55:06 stefan Exp $
#
# Copyright (C) 2000 Stephen Davies
# Copyright (C) 2000 Stefan Seefeld
# All rights reserved.
# Licensed to the public under the terms of the GNU LGPL (>= 2),
# see the file COPYING for details.
#
class Style:
"""This class just maintains a mapping from declaration to display style.
The style is an enumeration, possible values being: SUMMARY (only display
a summary for this declaration), DETAIL (summary and detailed info),
INLINE (summary and detailed info, where detailed info is an inline
version of the declaration even if it's a class, etc.)"""
SUMMARY = 0
DETAIL = 1
INLINE = 2
def __init__(self):
self.__dict = {}
def style_of(self, decl):
"""Returns the style of the given decl"""
SUMMARY = self.SUMMARY
DETAIL = self.DETAIL
key = id(decl)
if self.__dict.has_key(key): return self.__dict[key]
if len(decl.comments()) == 0:
# Set to summary, as this will mean no detailed section
style = SUMMARY
else:
comment = decl.comments()[0]
# Calculate the style. The default is detail
if not comment.text():
# No comment, don't show detail
style = SUMMARY
elif comment.summary() != comment.text():
# There is more to the comment than the summary, show detail
style = DETAIL
else:
# Summary == Comment, don't show detail
style = SUMMARY
# Always show tags
if comment.tags():
style = DETAIL
# Always show enums
if isinstance(decl, AST.Enum):
style = DETAIL
# Show functions if they have exceptions
if isinstance(decl, AST.Function) and len(decl.exceptions()):
style = DETAIL
# Don't show detail for scopes (they have their own pages)
if isinstance(decl, AST.Scope):
style = SUMMARY
self.__dict[key] = style
return style
__getitem__ = style_of
| Python | 0.000202 | |
20cbfa3646bc38429ee202c0e77c32a9c5c614d9 | blotto.py | blotto.py | blotto.py | from ea import adult_selection
from ea import parent_selection
from ea import reproduction
from ea import main
from ea import binary_gtype
def fitness_test(population):
'''Naive fitness test for onemax, just the number of ones'''
return [(ind[0], ind[1], sum(ind[1]), ind[2]) for ind in population]
def develop(population):
'''Development function for onemax (just copies the genotype)'''
return [(ind[0], list(ind[0]), ind[1]) for ind in population]
def visualize(generation_list):
'''Generate visualizations using matplotlib'''
return None
if __name__=='__main__':
size = int(raw_input("Input problem size:\n"))
popsize = int(raw_input("Input population size:\n"))
adult_selection, litter_size = adult_selection.gen_adult_selection(popsize)
parent_selection = parent_selection.gen_parent_selection(litter_size)
mutate = binary_gtype.gen_mutate()
crossover = binary_gtype.gen_crossover()
reproduction = reproduction.gen_reproduction(mutate, crossover)
generations = int(input("Input max number of generations:\n"))
fitness_goal = float(input("Input fitness goal, 0 for none:\n"))
initial = [(binary_gtype.generate(size), 0) for i in xrange(popsize)]
generation_list = main.evolutionary_algorithm(initial, develop, fitness_test, adult_selection, parent_selection, reproduction, generations, fitness_goal)
print "Program ran for " + str(len(generation_list)) + " generations"
| Python | 0.999968 | |
b8cc84245ae7f3ceda0e0cd92b6b2eecb0426ee3 | add start of peg generator | src/mugen/parser/peg.py | src/mugen/parser/peg.py | #!/usr/bin/env python
next_var = 0
def nextVar():
global next_var;
next_var += 1;
return next_var
class Pattern:
def __init__(self):
pass
def generate(self, result):
pass
class PatternNot(Pattern):
def __init__(self, next):
Pattern.__init__(self)
self.next = next
def generate(self, result):
my_result = "result_%d" % nextVar()
data = """
Result %s = 0;
%s
%s = ! %s;
""" % (my_result, self.next.generate(my_result), result, my_result)
return data
class PatternVerbatim(Pattern):
def __init__(self, letters):
Pattern.__init__(self)
self.letters = letters
def generate(self, result):
data = """
%s = "%s";
""" % (result, self.letters)
return data
class Rule:
def __init__(self, name, patterns):
self.name = name
self.patterns = patterns
def generate(self):
result = "result_%d" % nextVar()
data = """
static Result rule_%s(){
Result %s = 0;
%s
return Result;
}
""" % (self.name, result, '\n'.join([pattern.generate(result) for pattern in self.patterns]))
return data
class Peg:
def __init__(self, start, rules):
self.start = start
self.rules = rules
def generate(self):
namespace = "Peg"
data = """
namespace %s{
%s
Result main(){
return rule_%s();
}
}
""" % (namespace, '\n'.join([rule.generate() for rule in self.rules]), self.start)
return data
def generate(peg):
print peg.generate()
def test():
rules = [
Rule("s", [PatternNot(PatternVerbatim("hello"))]),
]
peg = Peg("s", rules)
generate(peg)
test()
| Python | 0 | |
82f15b2dae1b23b75a019362e5925c4a3591fa92 | Create InputNeuronGroup_multiple_inputs_1.py | examples/InputNeuronGroup_multiple_inputs_1.py | examples/InputNeuronGroup_multiple_inputs_1.py | '''
Example of a spike generator (only outputs spikes)
In this example spikes are generated and sent through UDP packages. At the end of the simulation a raster plot of the
spikes is created.
'''
from brian import *
import numpy
from brian_multiprocess_udp import BrianConnectUDP
number_of_neurons_total = 40
number_of_neurons_spiking = 30
def main_NeuronGroup(input_Neuron_Group, simulation_clock):
print "main_NeuronGroup!" #DEBUG!
simclock = simulation_clock
delta_t=5
random_list=numpy.random.randint(number_of_neurons_total,size=number_of_neurons_spiking)
random_list.sort()
spiketimes = [(i, delta_t*ms) for i in random_list]
SpikesOut = SpikeGeneratorGroup(number_of_neurons_total, spiketimes, period=300*ms, clock=simclock) # the maximum clock of the input spikes is limited here (period)
MSpkOut=SpikeMonitor(SpikesOut) # Spikes sent by UDP
return ([SpikesOut],[],[MSpkOut])
def post_simulation_function(input_NG, simulation_NG, simulation_SYN, simulation_MN):
"""
input_NG: the neuron group that receives the input spikes
simulation_NG: the neuron groups list passed to the system by the user function (main_NeuronGroup)
simulation_SYN: the synapses list passed to the system by the user function (main_NeuronGroup)
simulation_MN: the monitors list passed to the system by the user function (main_NeuronGroup)
This way it is possible to plot, save or do whatever you want with these objects after the end of the simulation!
"""
figure()
raster_plot(simulation_MN[0])
title("Spikes Sent by UDP")
show(block=True)
if __name__=="__main__":
my_simulation = BrianConnectUDP(main_NeuronGroup, NumOfNeuronsOutput=number_of_neurons_total, post_simulation_function=post_simulation_function,
output_addresses=[("127.0.0.1", 14141)], simclock_dt=5, TotalSimulationTime=10000, brian_address=0)
| Python | 0 | |
765897a05a7aae6a89bfd62d8493fb14aa16048a | Create db_migrate.py | db_migrate.py | db_migrate.py | #!venv/bin/python
import imp
from migrate.versioning import api
from app import db
from config import SQLALCHEMY_DATABASE_URI
from config import SQLALCHEMY_MIGRATE_REPO
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
migration = SQLALCHEMY_MIGRATE_REPO + ('/versions/%03d_migration.py' % (v+1))
tmp_module = imp.new_module('old_model')
old_model = api.create_model(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
exec(old_model, tmp_module.__dict__)
script = api.make_update_script_for_model(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO, tmp_module.meta, db.metadata)
open(migration, "wt").write(script)
api.upgrade(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
v = api.db_version(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)
print('New migration saved as ' + migration)
print('Current database version: ' + str(v))
| Python | 0.000003 | |
c085f9b5af73a50a86d592b3d8b02b1e8e444cde | Create optsimulate.py | docs/assets/optsimulate.py | docs/assets/optsimulate.py |
# OpenPTrack Sender Simulator
# Sept 13, 2015
# jburke@ucla.edu
import socket, time, json, time, random
UDP_IP = "127.0.0.1"
UDP_PORT = 21234
PERIOD = .100 # how often to publish in time
# For the random walk
MAXSTEP_X = 10
MAXSTEP_Y = 10
WOBBLE_Z = 1
Z_NOMINAL = 40
# Increasing packet seq number
_SEQ = 0
# Current message format
# https://github.com/OpenPTrack/open_ptrack/wiki/Using%20The%20Data
#
#MESSAGE = '{"header":{"seq":336988,"stamp":{"sec":1441244414,"nsec":266356327},"frame_id":"world"},"tracks":[{"id":170,"x":0.740519,"y":-3.21577,"height":1.01898,"age":79.4518,"confidence":0.491777},{"id":172,"x":0.843167,"y":-3.29433,"height":1.10497,"age":29.471,"confidence":0.500193}]}'
def track( id, x, y, height, age, confidence ) :
return {"id":id, "x":x, "y":y, "height":height, "age": age, "confidence":confidence}
def packet( tracks ) :
global _SEQ
_SEQ+=1
now = float(time.time())
sec = int(now)
nsec = int((now-sec) * 1e9)
header = { "seq":_SEQ, "stamp": {"sec":sec, "nsec":nsec}, "frame_id":"world" }
return { "header":header, "tracks":tracks }
# Provide two random walkers
# More is exercise for reader ...
def walk(W):
for w in W:
w[0] += MAXSTEP_X * 2*(random.random() - 0.5)
w[1] += MAXSTEP_Y * 2*(random.random() - 0.5)
w[2] = Z_NOMINAL + WOBBLE_Z*2*(random.random()-0.5)
walkers = [ [random.randrange(200)-100, random.randrange(200)-100, Z_NOMINAL],
[random.randrange(200)-100, random.randrange(200)-100, Z_NOMINAL] ]
print("^C to stop")
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # UDP
try:
while True:
walk(walkers)
MESSAGE = json.dumps( packet( [ track(42, walkers[0][0], walkers[0][1], walkers[0][2], _SEQ+100+random.random(), random.random()),
track(43, walkers[1][0], walkers[1][1], walkers[1][2], _SEQ+100+random.random(), random.random())] ) )
# We throw some zeroes at the end to simulate OpenPTrack's current zero padding,
# so parsers make sure to handle it. This padding should be removed soon.
# https://github.com/OpenPTrack/open_ptrack/issues/52
payload = bytes(MESSAGE.encode('utf-8')) + bytes(bytearray(100))
sock.sendto(payload, (UDP_IP, UDP_PORT))
print(payload)
time.sleep(PERIOD)
except KeyboardInterrupt:
pass # do cleanup here
| Python | 0 | |
7e30de04cad1070eb84c1de0c370e950b5e2c783 | Annotate zerver.views.webhooks.pingdom. | zerver/views/webhooks/pingdom.py | zerver/views/webhooks/pingdom.py | # Webhooks for external integrations.
from __future__ import absolute_import
from typing import Any
from django.utils.translation import ugettext as _
from django.http import HttpRequest, HttpResponse
from zerver.lib.actions import check_send_message
from zerver.lib.response import json_success, json_error
from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view
from zerver.models import Client, UserProfile
import ujson
import six
PINGDOM_SUBJECT_TEMPLATE = '{name} status.'
PINGDOM_MESSAGE_TEMPLATE = 'Service {service_url} changed its {type} status from {previous_state} to {current_state}.'
PINGDOM_MESSAGE_DESCRIPTION_TEMPLATE = 'Description: {description}.'
SUPPORTED_CHECK_TYPES = (
'HTTP',
'HTTP_CUSTOM'
'HTTPS',
'SMTP',
'POP3',
'IMAP',
'PING',
'DNS',
'UDP',
'PORT_TCP',
)
@api_key_only_webhook_view('Pingdom')
@has_request_variables
def api_pingdom_webhook(request, user_profile, client, payload=REQ(argument_type='body'), stream=REQ(default='pingdom')):
# type: (HttpRequest, UserProfile, Client, Dict[str, Any], six.text_type) -> HttpResponse
check_type = get_check_type(payload)
if check_type in SUPPORTED_CHECK_TYPES:
subject = get_subject_for_http_request(payload)
body = get_body_for_http_request(payload)
else:
return json_error(_('Unsupported check_type: {check_type}').format(check_type=check_type))
check_send_message(user_profile, client, 'stream', [stream], subject, body)
return json_success()
def get_subject_for_http_request(payload):
# type: (Dict[str, Any]) -> six.text_type
return PINGDOM_SUBJECT_TEMPLATE.format(name=payload['check_name'])
def get_body_for_http_request(payload):
# type: (Dict[str, Any]) -> six.text_type
current_state = payload['current_state']
previous_state = payload['previous_state']
data = {
'service_url': payload['check_params']['hostname'],
'previous_state': previous_state,
'current_state': current_state,
'type': get_check_type(payload)
}
body = PINGDOM_MESSAGE_TEMPLATE.format(**data)
if current_state == 'DOWN' and previous_state == 'UP':
description = PINGDOM_MESSAGE_DESCRIPTION_TEMPLATE.format(description=payload['long_description'])
body += '\n{description}'.format(description=description)
return body
def get_check_type(payload):
# type: (Dict[str, Any]) -> six.text_type
return payload['check_type']
| # Webhooks for external integrations.
from __future__ import absolute_import
from django.utils.translation import ugettext as _
from zerver.lib.actions import check_send_message
from zerver.lib.response import json_success, json_error
from zerver.decorator import REQ, has_request_variables, api_key_only_webhook_view
import ujson
PINGDOM_SUBJECT_TEMPLATE = '{name} status.'
PINGDOM_MESSAGE_TEMPLATE = 'Service {service_url} changed its {type} status from {previous_state} to {current_state}.'
PINGDOM_MESSAGE_DESCRIPTION_TEMPLATE = 'Description: {description}.'
SUPPORTED_CHECK_TYPES = (
'HTTP',
'HTTP_CUSTOM'
'HTTPS',
'SMTP',
'POP3',
'IMAP',
'PING',
'DNS',
'UDP',
'PORT_TCP',
)
@api_key_only_webhook_view('Pingdom')
@has_request_variables
def api_pingdom_webhook(request, user_profile, client, payload=REQ(argument_type='body'), stream=REQ(default='pingdom')):
check_type = get_check_type(payload)
if check_type in SUPPORTED_CHECK_TYPES:
subject = get_subject_for_http_request(payload)
body = get_body_for_http_request(payload)
else:
return json_error(_('Unsupported check_type: {check_type}').format(check_type=check_type))
check_send_message(user_profile, client, 'stream', [stream], subject, body)
return json_success()
def get_subject_for_http_request(payload):
return PINGDOM_SUBJECT_TEMPLATE.format(name=payload['check_name'])
def get_body_for_http_request(payload):
current_state = payload['current_state']
previous_state = payload['previous_state']
data = {
'service_url': payload['check_params']['hostname'],
'previous_state': previous_state,
'current_state': current_state,
'type': get_check_type(payload)
}
body = PINGDOM_MESSAGE_TEMPLATE.format(**data)
if current_state == 'DOWN' and previous_state == 'UP':
description = PINGDOM_MESSAGE_DESCRIPTION_TEMPLATE.format(description=payload['long_description'])
body += '\n{description}'.format(description=description)
return body
def get_check_type(payload):
return payload['check_type']
| Python | 0 |
d8c359b27d371f5bd66825202860a0a376a2466c | add script to convert old plans to new ones | jsonQueries/old_to_new_plan.py | jsonQueries/old_to_new_plan.py | #!/usr/bin/env python
import json
import sys
def read_json(filename):
with open(filename, 'r') as f:
return json.load(f)
def uniquify_fragments(query_plan):
fragment_inv = []
for worker in sorted(query_plan.keys()):
worker_plan = query_plan[worker]
for fragment in worker_plan:
flag = False
for (i,(x,y)) in enumerate(fragment_inv):
if y == fragment:
fragment_inv[i] = (x + [worker], y)
flag = True
break
if flag:
continue
fragment_inv.append(([worker], fragment))
return fragment_inv
def json_pretty(obj):
return json.dumps(obj, sort_keys=True, indent=4, separators=(',', ': '))
if __name__ == "__main__":
if len(sys.argv) != 2:
print >> sys.stderr, "Usage: %s <old json file>" % sys.argv[0]
sys.exit(1)
myria_json_plan = read_json(sys.argv[1])
fragments = []
frags = uniquify_fragments(myria_json_plan['query_plan'])
for (ws,ops) in frags:
fragments.append({
'workers' : ws,
'operators' : ops
})
output = {
'raw_datalog' : myria_json_plan['raw_datalog'],
'logical_ra' : myria_json_plan['logical_ra'],
'fragments' : fragments
}
print json_pretty(output)
| Python | 0 | |
f71ce70330f7dea86820f1d9cdc390ea972aaeca | add 2s-complement | algorithms/bit-manipulation/2s-complement.py | algorithms/bit-manipulation/2s-complement.py | import sys
def ones(x):
uCount = x - ((x >> 1) & 033333333333) - ((x >> 2) & 011111111111);
return ((uCount + (uCount >> 3)) & 030707070707) % 63;
def count(x):
if x >= 0:
if x == 0:
return 0
if x % 2 == 0:
return count(x - 1) + ones(x)
return (x + 1) / 2 + 2 * count(x / 2)
else:
x += 1
return 32 * (1 - x) - count(-x)
def solve(A, B):
if A >= 0:
if A == 0:
return count(B)
return count(B) - count(A - 1)
else:
if B >= 0:
return count(A) + count(B)
return count(A) - count(B + 1)
if __name__ == '__main__':
T = int(sys.stdin.readline())
for i in range(T):
[A, B] = map(int, sys.stdin.readline().split());
#print count(A), count(B)
print solve(A, B)
| Python | 0.999978 | |
27a6b27a74f7c3a52c08b7dc3fbcc810a680b89d | Add a generic namespaced lock queue. | lib/python2.6/aquilon/locks.py | lib/python2.6/aquilon/locks.py | # ex: set expandtab softtabstop=4 shiftwidth=4: -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
import logging
from threading import Condition, Lock
from aquilon.exceptions_ import InternalError
LOGGER = logging.getLogger('aquilon.locks')
class LockQueue(object):
"""Provide a layered (namespaced?) locking mechanism.
When a lock request comes in it is put into a queue. The lock
request is made with an instance of LockKey. Each key is a set
of ordered components that describes a heirarchy. The lock is
granted once there are no conflicting lock requests preceeding
it in the queue.
As a convenience, ignore undefined keys. This essentially
equates a key of None with a no-op request.
"""
def __init__(self):
self.queue_condition = Condition()
self.queue = []
def acquire(self, key):
if key is None:
return
key.transition("acquiring", debug=True)
with self.queue_condition:
if key in self.queue:
raise InternalError("Duplicate attempt to aquire %s with the "
"same key." % key)
self.queue.append(key)
while self.blocked(key):
key.log("requesting %s with %s others waiting",
key, key.blocker_count)
self.queue_condition.wait()
key.transition("acquired")
def blocked(self, key):
"""Indicate whether the lock for this key can be acquired.
As a side effect, reset the key's knowledge of external
blockers and let it know if it is in line.
"""
if key is None:
return False
key.reset_blockers()
is_blocked = False
for k in self.queue:
if k == key:
return is_blocked
if k.blocks(key):
key.register_blocker(k)
is_blocked = True
# Can only get here if the key is not in the queue - seems
# like a valid theoretical question to ask - in which case
# the method is "would queued keys block this one?"
return is_blocked
def release(self, key):
if key is None:
return
key.transition("releasing")
with self.queue_condition:
self.queue.remove(key)
self.queue_condition.notifyAll()
key.transition("released", debug=True)
class LockKey(object):
"""Create a key composed of an ordered list of components.
The intent is that this class is subclassed to take a dictionary
and provide validation before setting the components variable.
"""
def __init__(self, components, logger=LOGGER, loglevel=logging.INFO):
self.components = components
self.logger = logger
self.loglevel = loglevel
self.blocker_count = None
self.transition("initialized", debug=True)
def __str__(self):
if not self.components:
return 'lock'
if len(self.components) == 1:
return '%s lock' % self.components[0]
return '%s lock for %s' % (self.components[0],
"/".join(self.components[1:]))
def log(self, *args, **kwargs):
self.logger.log(self.loglevel, *args, **kwargs)
def transition(self, state, debug=False):
self.state = state
if debug:
self.logger.debug('%s %s', state, self)
else:
self.log('%s %s', state, self)
def reset_blockers(self):
self.blocker_count = 0
def register_blocker(self, key):
self.blocker_count += 1
def blocks(self, key):
"""Determine if this key blocks another.
The algorithm exploits the zip() implementation. There are two
basic cases:
The keys are the same length. They only block each other if
they match exactly. Using zip to interleave the parts for
comparison should make sense here. If any of the parts are
not equal then the two keys do not conflict.
The keys are different length. Here, they block if one is
a superset of the other. That is, if the shorter keys matches
all the components of the longer key. The zip() method will
truncate the comparison to the length of the shorter list.
Thus the logic is the same - if all the paired components
match then the keys are blocked.
"""
for (a, b) in zip(self.components, key.components):
if a != b:
return False
return True
@staticmethod
def merge(keylist):
"""Find the common root of a list of keys and make a new key.
The new key will be in the LockKey class. Returning a key of
the same class as the list (assuming they're all the same) is
possible but more work for little gain.
"""
keylist = [key for key in keylist if key is not None]
if not keylist:
return None
components = []
# Assume logger/loglevel is consistent across the list.
logger = keylist[0].logger
loglevel = keylist[0].loglevel
for position in zip(*[key.components for key in keylist]):
unique_elements = set(position)
if len(unique_elements) == 1:
components.append(unique_elements.pop())
else:
return LockKey(components, logger=logger, loglevel=loglevel)
return LockKey(components, logger=logger, loglevel=loglevel)
| Python | 0 | |
173565f7f2b9ffa548b355a0cbc8f972f1445a50 | Add test coverage for rdopkg.guess version2tag and tag2version | tests/test_guess.py | tests/test_guess.py | from rdopkg import guess
from collections import namedtuple
import pytest
VersionTestCase = namedtuple('VersionTestCase', ('expected', 'input_data'))
data_table_good = [
VersionTestCase(('1.2.3', None), '1.2.3'),
VersionTestCase(('1.2.3', 'vX.Y.Z'), 'v1.2.3'),
VersionTestCase(('1.2.3', 'VX.Y.Z'), 'V1.2.3'),
VersionTestCase(('banana', None), 'banana'),
]
data_table_bad = [
VersionTestCase((None, None), None),
VersionTestCase((None, None), []),
VersionTestCase((None, None), ()),
VersionTestCase((None, None), ''),
VersionTestCase((None, None), {}),
]
data_table_ugly = [
VersionTestCase((None, None), ('foo', 'bar', 'bah')),
VersionTestCase((None, None), ['foo', 'bar', 'bah']),
VersionTestCase((None, None), {'foo': 'bar'}),
]
def test_table_data_good_tag2version():
for entry in data_table_good:
assert entry.expected == guess.tag2version(entry.input_data)
def test_table_data_bad_tag2version():
for entry in data_table_bad:
# Input Validation should probably return to us (None, None)
# assert entry.expected == guess.tag2version(entry.input_data)
assert (entry.input_data, None) == guess.tag2version(entry.input_data)
def test_table_data_ugly_tag2version():
for entry in data_table_ugly:
# TODO: probably should be a more specific exception
with pytest.raises(Exception):
guess.tag2version(entry.input_data)
def test_version2tag_simple():
assert '1.2.3' == guess.version2tag('1.2.3')
def test_version2tag_type1():
assert 'v1.2.3' == guess.version2tag('1.2.3', 'vX.Y.Z')
def test_version2tag_type2():
assert 'V1.2.3' == guess.version2tag('1.2.3', 'VX.Y.Z')
| Python | 0 | |
7b5abb9bf56ed0da2b79f757bb27438d95589d52 | change in irpp 2010 real amount | tests/test_irpp1.py | tests/test_irpp1.py | # -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import nose
from datetime import datetime
import openfisca_france
openfisca_france.init_country()
from openfisca_core.simulations import ScenarioSimulation
def test_irpp():
"""
test pour un célibataire pour un revenu de 20 000, 50 000 € et 150 000 €
et des revenus de différentes origines
"""
dico = {
# test pour un célibataire ayant un revenu salarial (1AJ)
"sali": [
{"year" : 2012, "amount": 20000, "irpp":-1181 },
{"year" : 2011, "amount": 20000, "irpp":-1181 },
{"year" : 2010, "amount": 20000, "irpp":-1201 },
],
}
for revenu, test_list in dico.iteritems():
for item in test_list:
year = item["year"]
amount = item["amount"]
irpp = item["irpp"]
simulation = ScenarioSimulation()
simulation.set_config(year = year, nmen = 1)
simulation.set_param()
test_case = simulation.scenario
if revenu in ["rsti", "sali"]:
test_case.indiv[0].update({revenu:amount})
elif revenu in ["f2da", "f2dh", "f2dc", "f2ts", "f2tr", "f4ba", "f3vg", "f3vz"]:
test_case.declar[0].update({revenu:amount})
else:
assert False
df = simulation.get_results_dataframe(index_by_code = True)
if not abs(df.loc["irpp"][0] - irpp) < 1:
print year
print revenu
print amount
print "OpenFisca :", abs(df.loc["irpp"][0])
print "Real value :", irpp
assert abs(df.loc["irpp"][0] - irpp) < 1
# TODO: The amounts are wrong
#
# def test_ppe():
# """
# test ppe pour un célibataire
# """
# dico = {
# # test pour un célibataire ayant un revenu salarial (1AJ)
# "sali": [
# {"year" : 2010, "amount": 12*1000/2, "ppe":-1181 },
# {"year" : 2010, "amount": 12*1000, "ppe":-1181 },
# {"year" : 2011, "amount": 12*1000/2, "ppe":-42338},
# {"year" : 2011, "amount": 12*1000, "ppe":-42338},
# ]
# }
# for revenu, test_list in dico.iteritems():
# for item in test_list:
# year = item["year"]
# amount = item["amount"]
# ppe = item["ppe"]
# simulation = ScenarioSimulation()
# simulation.set_config(year = year, nmen = 1)
# simulation.set_param()
# test_case = simulation.scenario
# if revenu in ["rsti", "sali"]:
# test_case.indiv[0].update({revenu:amount})
# test_case.indiv[0].update({"ppe_tp_sa":True})
# else:
# assert False
# df = simulation.get_results_dataframe(index_by_code=True)
# if not abs(df.loc["ppe"][0] - ppe) < 1:
# print year
# print revenu
# print amount
# print "OpenFisca :", abs(df.loc["ppe"][0])
# print "Real value :", ppe
# assert abs(df.loc["ppe"][0] - ppe) < 1
if __name__ == '__main__':
test_irpp()
# test_ppe()
# nose.core.runmodule(argv=[__file__, '-v', '-i test_*.py'])
# nose.core.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'], exit=False)
| Python | 0 | |
e50060ca76c667b77db433ca03ef640140831dc9 | Add migration for dagman_metrics | migrations/004_add_dagman_metrics.py | migrations/004_add_dagman_metrics.py | import migrations
conn = migrations.connect()
cur = conn.cursor()
cur.execute("""
create table dagman_metrics (
id INTEGER UNSIGNED NOT NULL,
ts DOUBLE,
remote_addr VARCHAR(15),
hostname VARCHAR(256),
domain VARCHAR(256),
version VARCHAR(10),
wf_uuid VARCHAR(36),
root_wf_uuid VARCHAR(36),
start_time DOUBLE,
end_time DOUBLE,
duration FLOAT,
exitcode SMALLINT,
dagman_id VARCHAR(32),
parent_dagman_id VARCHAR(32),
jobs INTEGER,
jobs_failed INTEGER,
jobs_succeeded INTEGER,
dag_jobs INTEGER,
dag_jobs_failed INTEGER,
dag_jobs_succeeded INTEGER,
dag_status INTEGER,
planner VARCHAR(1024),
planner_version VARCHAR(10),
rescue_dag_number INTEGER,
total_job_time DOUBLE,
total_jobs INTEGER,
total_jobs_run INTEGER,
PRIMARY KEY (id),
FOREIGN KEY (id) REFERENCES raw_data(id) ON DELETE CASCADE
) ENGINE=InnoDB DEFAULT CHARSET=utf8;
""")
conn.commit()
cur.close()
| Python | 0.000001 | |
dc314e50a573f3ecb2cf41d1e08df29ea991d3b6 | Add migrations versions | migrations/versions/d71a3e9499ef_.py | migrations/versions/d71a3e9499ef_.py | """empty message
Revision ID: d71a3e9499ef
Revises:
Create Date: 2017-11-21 23:19:12.740735
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'd71a3e9499ef'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('username', sa.String(length=50), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.Column('email', sa.String(length=120), nullable=True),
sa.Column('surname', sa.String(length=100), nullable=False),
sa.Column('first_name', sa.String(length=100), nullable=False),
sa.Column('active', sa.Boolean(), nullable=True),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.Column('date_modified', sa.DateTime(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('email'),
sa.UniqueConstraint('username')
)
op.create_table('bucket_list',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('interests', sa.String(length=120), nullable=True),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.Column('date_modified', sa.DateTime(), nullable=True),
sa.Column('created_by', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['created_by'], ['user.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_table('item',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=100), nullable=True),
sa.Column('description', sa.Text(), nullable=True),
sa.Column('status', sa.Text(), nullable=True),
sa.Column('date_accomplished', sa.DateTime(), nullable=True),
sa.Column('date_created', sa.DateTime(), nullable=True),
sa.Column('date_modified', sa.DateTime(), nullable=True),
sa.Column('bucketlists', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['bucketlists'], ['bucket_list.id'], ),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('item')
op.drop_table('bucket_list')
op.drop_table('user')
# ### end Alembic commands ###
| Python | 0.000001 | |
5bac311ac9da94edbd08b0b43c5214ba6b9fc1c8 | add scollable pages | app2.py | app2.py | from webkit import WebView
import pygtk
pygtk.require('2.0')
import gtk, threading, time
from nuimo import NuimoScanner, Nuimo, NuimoDelegate
class App:
def __init__(self):
window = gtk.Window(gtk.WINDOW_TOPLEVEL)
fixed = gtk.Fixed()
views = [WebView(), WebView(), WebView()]
width = gtk.gdk.screen_width()
height = gtk.gdk.screen_height()
for idx, view in enumerate(views):
view.set_usize(width, height)
fixed.put(views[idx], -width+(idx*width), 0)
window.add(fixed)
#self.loadUrls()
window.fullscreen()
window.show_all()
views[0].open('http://google.com?q=page1')
views[1].open('http://google.com?q=page2')
views[2].open('http://google.com?q=page3')
self.views = views
self.fixed = fixed
self.x = 0
self.width = width
def rotate(self, val):
w = self.width
x = self.x = (self.x - val) % (3 * w)
for idx, view in enumerate(self.views):
if idx == 0 and x > w:
self.fixed.move(view, ((idx+3)*w)-x, 0)
else:
self.fixed.move(view, (idx*w)-x, 0)
def loadUrls(self):
self.current = 0
try:
with open('urls.csv') as f:
self.urls = f.readlines()
#remove empties
self.urls = filter(None, self.urls)
except:
print 'failed to read urls.csv'
self.urls = ['http://google.com']
def next(self):
self.current = (self.current + 1) % len(self.urls)
self.view.open(self.urls[self.current])
def previous(self):
self.current = self.current - 1
if self.current < 0:
self.current = len(self.urls) - 1
self.view.open(self.urls[self.current])
class CustomNuimoDelegate(NuimoDelegate):
def __init__(self, nuimo, app):
NuimoDelegate.__init__(self, nuimo)
self.app = app
def handleRotation(self, value):
NuimoDelegate.handleRotation(self, value)
gtk.idle_add(app.rotate, value)
def showImagesOnNuimo(nuimo):
nuimo.displayLedMatrix(
" " +
" *** " +
" * * * " +
" * * " +
" *** * " +
" * * " +
" * * " +
" * * " +
" ", 2.0)
time.sleep(2)
nuimo.displayLedMatrix(
" ** ** " +
" * * * * " +
" ***** " +
" * * " +
" * * * * " +
" * * * " +
" * * * * " +
" * * " +
" *** ", 20.0)
def main():
try:
gtk.main()
except Exception, e:
print '%s', e
return 0
if __name__ == "__main__":
app = App()
def nuimo_process():
def foundDevice(addr):
print 'found device: ' + addr
nuimo = Nuimo(addr)
nuimo.set_delegate(CustomNuimoDelegate(nuimo, app))
nuimo.connect()
showImagesOnNuimo(nuimo)
while True:
nuimo.waitForNotifications()
while True:
try:
NuimoScanner().start(foundDevice)
except Exception, e:
print 'failed to connect to nuimo: %s' % e
time.sleep(5)
thread = threading.Thread(target=nuimo_process)
thread.daemon = True
thread.start()
main()
| Python | 0 | |
4933e4ca107516a667ae3449337746bf7e002cc2 | Create bkvm.py | bkvm.py | bkvm.py | #!/usr/bin/python
import commands, time
def prepareTarget():
print "prepare backup Target"
print "---------------------"
cmd = "mount -t cifs //10.0.0.9/public/BK\ VM\ XEN -o username=xxx,password=yyy /bak/"
output = commands.getoutput(cmd)
cmd = "ls -lht --time-style=\"long-iso\" /bak/"
output = commands.getoutput(cmd)
print output
print "..."
def releaseTarget():
print "release backup Target"
print "---------------------"
cmd = "ls -lht --time-style=\"long-iso\" /bak/"
output = commands.getoutput(cmd)
print output
cmd = "umount /bak/"
output = commands.getoutput(cmd)
print "..."
def get_backup_vms():
result = []
cmd = "xe vm-list is-control-domain=false is-a-snapshot=false power-state=running"
output = commands.getoutput(cmd)
for vm in output.split("\n\n\n"):
lines = vm.splitlines()
uuid = lines[0].split(":")[1][1:]
name = lines[1].split(":")[1][1:]
result += [(uuid, name)]
return result
def backup_vm(uuid, filename, timestamp):
cmd = "xe vm-snapshot uuid=" + uuid + " new-name-label=" + timestamp
snapshot_uuid = commands.getoutput(cmd)
cmd = "xe template-param-set is-a-template=false ha-always-run=false uuid="
cmd = cmd + snapshot_uuid
commands.getoutput(cmd)
cmd = "rm " + filename+".tmp"
commands.getoutput(cmd)
cmd = "xe vm-export vm=" + snapshot_uuid + " filename=" + filename+".tmp"
(status,output)=commands.getstatusoutput(cmd)
if (status==0):
cmd = "rm " + filename + " ; mv " + filename+".tmp"+ " " + filename
commands.getoutput(cmd)
else:
print "Error"
print output
cmd = "xe vm-uninstall uuid=" + snapshot_uuid + " force=true"
commands.getoutput(cmd)
prepareTarget()
print "Backup Running VMs"
print "------------------"
for (uuid, name) in get_backup_vms():
timestamp = time.strftime("%Y%m%d-%H%M", time.gmtime())
# filename = "\"/bak/" + timestamp + " " + name + ".xva\""
filename = "\"/bak/" + name + ".xva\""
print timestamp, uuid, name," to ", filename
backup_vm(uuid, filename, timestamp)
print "..."
releaseTarget()
| Python | 0.000002 | |
e050d9ce4fb4d63ec7857f581033258f87c805b0 | Create pyPdfMerger.py | pyPdfMerger.py | pyPdfMerger.py | # -*- coding: utf-8 -*-
"""
TITLE: pyPdfMerger.py
AUTHOR: John Himics
EMAIL: john@johnhimics.com
TIMEZONE: EST
VERSION: 0
DESCRIPTION: Merges pdf files together
DEPENDANCIES: PyPDF2
"""
from PyPDF2 import PdfFileMerger
#Global Variables
merger = PdfFileMerger()
#Methods
#Program starts here
if __name__ == "__main__":
input1 = open("C:\PFile\@ActiveProjects\1050LF Yeild Issues\Emails\All emails 11-18-13 2.pdf", "rb")
input2 = open("C:\PFile\@ActiveProjects\1050LF Yeild Issues\Emails\Wade 343005 [compatibility mode].pdf", "rb")
input3 = open("C:\PFile\@ActiveProjects\1050LF Yeild Issues\Emails\1050LF Mill Mix MDR.pdf", "rb")
# add the first 3 pages of input1 document to output
#merger.append(fileobj = input1, pages = (0,3))
# insert the first page of input2 into the output beginning after the second page
#merger.merge(position = 2, fileobj = input2, pages = (0,1))
# append entire input3 document to the end of the output document
merger.append(input1)
merger.append(input2)
merger.append(input3)
# Write to an output PDF document
output = open("C:\PFile\@ActiveProjects\1050LF Yeild Issues\Emails\document-output.pdf", "wb")
merger.write(output)
| Python | 0 | |
62e65ae978b703b6af0b594e958e79d467e83421 | add 63 | python/p063.py | python/p063.py | def g(power):
count = 0
i = 1
min = 10**(power - 1)
max = 10**power - 1
while True:
result = i**power
if result >= min:
if result <= max:
count += 1
else:
break
i += 1
return count
count = 0
for i in xrange(1, 1000):
current = g(i)
if current > 0:
count += current
else:
break
print count
| Python | 0.99912 | |
600bf1bbce7db5f62d55537a33d4586fa2892d8a | Create conf.py | conf.py | conf.py | #OK
| Python | 0.000001 | |
66c8c6f587c49f587901cf6a9cf7e122d110d668 | Add migration to encrypt secrets | migrations/versions/3bac7f8ccfdb_encrypt_secrets.py | migrations/versions/3bac7f8ccfdb_encrypt_secrets.py | """encrypt_secrets
Revision ID: 3bac7f8ccfdb
Revises: 291237183b82
Create Date: 2019-01-14 17:35:58.872052
"""
# revision identifiers, used by Alembic.
revision = '3bac7f8ccfdb'
down_revision = '291237183b82'
from alembic import op, context
import sqlalchemy as sa
# def upgrade():
# op.add_column('secret',
# sa.Column('secret', sa.String(length=512), nullable=True))
# import nacl.secret
# import nacl.utils
# from inbox.ignition import engine, engine_manager
# from inbox.models.session import session_scope
# from inbox.config import config
# print engine_manager.engines
# _engine = engine_manager.engines[0]
# Base = sa.ext.declarative.declarative_base()
# Base.metadata.reflect(_engine)
# key = config.get_required('SECRET_ENCRYPTION_KEY')
# class Secret(Base):
# __table__ = Base.metadata.tables['secret']
# with session_scope(0, versioned=False) as db_session:
# secrets = db_session.query(Secret).filter(
# Secret.encryption_scheme == 0,
# Secret._secret.isnot(None)).order_by(Secret.id).all()
# for s in secrets:
# unencrypted = s._secret
# nonce = nacl.utils.random(nacl.secret.SecretBox.NONCE_SIZE)
# s.secret = nacl.secret.SecretBox(
# key=key,
# encoder=nacl.encoding.HexEncoder
# ).encrypt(
# plaintext=unencrypted,
# nonce=nacl.utils.random(nacl.secret.SecretBox.NONCE_SIZE)
# )
# # Picked arbitrarily
# # s.acl_id = 0
# # s.type = 0
# db_session.add(s)
# db_session.commit()
# op.drop_column('secret', '_secret')
def upgrade():
from inbox.config import config
import nacl.secret
import nacl.utils
from inbox.ignition import engine_manager
from inbox.models.session import session_scope
shard_id = int(context.get_x_argument(as_dictionary=True).get('shard_id'))
engine = engine_manager.engines[shard_id]
Base = sa.ext.declarative.declarative_base()
Base.metadata.reflect(engine)
class Secret(Base):
__table__ = Base.metadata.tables['secret']
class GenericAccount(Base):
__table__ = Base.metadata.tables['genericaccount']
with session_scope(shard_id, versioned=False) as db_session:
secrets = db_session.query(Secret).filter(
Secret._secret.isnot(None),
Secret.encryption_scheme == 0).all()
# Join on the genericaccount and optionally easaccount tables to
# determine which secrets should have type 'password'.
generic_query = db_session.query(Secret.id).join(
GenericAccount, Secret.id == GenericAccount.password_id)
password_secrets = [id_ for id_, in generic_query]
if engine.has_table('easaccount'):
class EASAccount(Base):
__table__ = Base.metadata.tables['easaccount']
eas_query = db_session.query(Secret.id).join(
EASAccount).filter(Secret.id == EASAccount.password_id)
password_secrets.extend([id_ for id_, in eas_query])
for s in secrets:
plain = s._secret.encode('utf-8') if isinstance(s._secret, unicode) \
else s._secret
if config.get_required('ENCRYPT_SECRETS'):
s._secret = nacl.secret.SecretBox(
key=config.get_required('SECRET_ENCRYPTION_KEY'),
encoder=nacl.encoding.HexEncoder
).encrypt(
plaintext=plain,
nonce=nacl.utils.random(nacl.secret.SecretBox.NONCE_SIZE))
# 1 is EncryptionScheme.SECRETBOX_WITH_STATIC_KEY
s.encryption_scheme = 1
else:
s._secret = plain
if s.id in password_secrets:
s.type = 'password'
else:
s.type = 'token'
db_session.add(s)
db_session.commit()
def downgrade():
pass
| Python | 0 | |
45cb6df45df84cb9ae85fc8aa15710bde6a15bad | Add create image functional negative tests | nova/tests/functional/test_images.py | nova/tests/functional/test_images.py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.tests.functional.api import client
from nova.tests.functional import test_servers
class ImagesTest(test_servers.ServersTestBase):
def test_create_images_negative_invalid_state(self):
# Create server
server = self._build_minimal_create_server_request()
created_server = self.api.post_server({"server": server})
server_id = created_server['id']
found_server = self._wait_for_state_change(created_server, 'BUILD')
self.assertEqual('ACTIVE', found_server['status'])
# Create image
name = 'Snapshot 1'
self.api.post_server_action(
server_id, {'createImage': {'name': name}})
self.assertEqual('ACTIVE', found_server['status'])
# Confirm that the image was created
images = self.api.get_images(detail=False)
image_map = {image['name']: image for image in images}
found_image = image_map.get(name)
self.assertTrue(found_image)
# Change server status from ACTIVE to SHELVED for negative test
self.flags(shelved_offload_time = -1)
self.api.post_server_action(server_id, {'shelve': {}})
found_server = self._wait_for_state_change(found_server, 'ACTIVE')
self.assertEqual('SHELVED', found_server['status'])
# Create image in SHELVED (not ACTIVE, etc.)
name = 'Snapshot 2'
ex = self.assertRaises(client.OpenStackApiException,
self.api.post_server_action,
server_id,
{'createImage': {'name': name}})
self.assertEqual(409, ex.response.status_code)
self.assertEqual('SHELVED', found_server['status'])
# Confirm that the image was not created
images = self.api.get_images(detail=False)
image_map = {image['name']: image for image in images}
found_image = image_map.get(name)
self.assertFalse(found_image)
# Cleanup
self._delete_server(server_id)
| Python | 0.000005 | |
18e2263a636e97519272a21562cbba4b978fcf49 | Create EmailForm | headlines/forms.py | headlines/forms.py | from flask_wtf import FlaskForm
from wtforms import StringField, TextAreaField, SubmitField
from wtforms.validators import DataRequired, Email
class EmailForm(FlaskForm):
""" Form used to submit messages to the admin. """
name = StringField('Name')
reply_to = StringField('Email', validators=[Email(), DataRequired()])
message = TextAreaField('Message', validators=[DataRequired()])
submit = SubmitField('Submit') | Python | 0 | |
61b21d1ec14e0be683f8da2b92b3ca2aa9fdcf59 | add sample for api caller | InvenTree/plugin/samples/integration/api_caller.py | InvenTree/plugin/samples/integration/api_caller.py | """
Sample plugin for calling an external API
"""
from django.utils.translation import ugettext_lazy as _
from plugin import IntegrationPluginBase
from plugin.mixins import APICallMixin, SettingsMixin
class SampleApiCallerPlugin(APICallMixin, SettingsMixin, IntegrationPluginBase):
"""
A small api call sample
"""
PLUGIN_NAME = "Sample API Caller"
SETTINGS = {
'API_TOKEN': {
'name': 'API Token',
'protected': True,
},
'API_URL': {
'name': 'External URL',
'description': 'Where is your API located?',
'default': 'https://reqres.in',
},
}
API_URL_SETTING = 'API_URL'
API_TOKEN_SETTING = 'API_TOKEN'
def get_external_url(self):
"""
returns data from the sample endpoint
"""
return self.api_call('api/users/2')
| Python | 0 | |
1a68a1a461a66c4a4aaf3a19a607ab64475cb05c | Create simpleExamples.py | simpleExamples.py | simpleExamples.py | import DSM as dsmx
import random as rnd
import copy
def example1():
myDSM=dsmx.DSM('example')
## adding components
myDSM.addComponent(['c1'])
myDSM.addComponent(['c2'])
myDSM.addComponent(['c3'])
#
myDSM.display()
print "--------"
## adding relations between existing components
myDSM.addRelation(['c1'], ['c2'], [1])
myDSM.addRelation(['c3'], ['c1'], [1])
myDSM.addRelation(['c2'], ['c3'], [1])
myDSM.display()
print "--------"
## adding relations with non existing elements
myDSM.addRelation(['c4'], ['c5'], [1.0])
myDSM.display()
#using pandas for better visualisation
myDSM.dispPDFrame()
def example2():
### simple examples un-directional dsm
myDSMU=dsmx.DSM('example undirectional','simple','no')
## adding components
myDSMU.addComponent(['c1'])
myDSMU.addComponent(['c2'])
myDSMU.addComponent(['c3'])
#
myDSMU.display()
print "--------"
## adding relations between existing components
myDSMU.addRelation(['c1'], ['c2'], [1])
myDSMU.addRelation(['c3'], ['c1'], [1])
myDSMU.addRelation(['c2'], ['c3'], [1])
myDSMU.display()
print "--------"
## adding relations with non existing elements
myDSMU.addRelation(['c4'], ['c5'], [1.0])
myDSMU.display()
def example3():
### simple examples for array inputs
myDSM=dsmx.DSM('example array')
#print 'creating a list of elements'
myList=list(range(0,10))
#print myList
## adding components
myDSM.addComponent(myList)
#print 'creating two shuffled list'
rnd.shuffle(myList)
myList1=copy.copy(myList)
rnd.shuffle(myList)
myList2=copy.copy(myList)
#print myList1
#print myList2
#print "--------"
#
myDSM.display()
print "--------"
## adding relations between existing components
myDSM.addRelation(myList1, myList2, [1.0]*len(myList))
myDSM.display()
print "--------"
## adding relations with non existing elements
#using pandas for better visualisation
myDSM.dispPDFrame()
def example4():
## Example using Interactions
#Based on interactions of Pimmler and Eppinger (1994)
## http://web.mit.edu/eppinger/www/pdf/Pimmler_DTM1994.pdf
## required = 2
## desired = 1
## indifferent = 0 (default value)
## undesired = -1
## detrimental = -2
##
## create a dict of format [ S E
## I M ]
###########################################################
myDSM2=dsmx.DSM(name='example 2', dsmType='interactions')
#adding components
myDSM2.addComponent(['c1'])
myDSM2.addComponent(['c2'])
myDSM2.addComponent(['c3'])
#
myDSM2.display()
print "--------"
## adding relations between existing components
# using complete interaction list
myDSM2.addRelation(['c1'], ['c2'], [{'s':1, 'e':0, 'i':0 ,'m':-2}])
myDSM2.addRelation(['c3'], ['c1'], [{'s':0, 'e':1, 'i':1 ,'m':0}])
#one interaction at a time
myDSM2.addRelation(['c2'], ['c3'], [{'s':2}])
myDSM2.addRelation(['c2'], ['c3'], [{'e':-1}])
myDSM2.addRelation(['c2'], ['c3'], [{'i':0}])
myDSM2.addRelation(['c2'], ['c3'], [{'m':-1}])
#using lists of components and interactions, and new components
myDSM2.addRelation(['c4', 'c6'], ['c5', 'c4'], [{'s':1, 'e':1, 'i':1 ,'m':1},{'s':-1, 'e':1, 'i':-1 ,'m':-2}])
myDSM2.display()
print "--------"
myDSM2.dispPDFrame()
| Python | 0.000001 | |
d5fcaf05d100d3fe709b34b8f6b839736773a130 | Create dict.py | dict.py | dict.py | import random
a=["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s"\
,"t","u","v","w","x","y","z"]
def create():
dictionary=open("dictionary.py","w")
tablewenn=["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s"\
,"t","u","v","w","x","y","z"," "]
tablewennupper=["A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S"\
,"T","U","V","W","X","Y","Z"]
tabledann=["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s"\
,"t","u","v","w","x","y","z","A","B","C","D","E","F","G","H","I","J","K","L","M","N","O","P","Q","R","S"\
,"T","U","V","W","X","Y","Z"," "]
dictionary.write("def ver(letter):\n")
entkeys=[]
for i in tablewenn:
returning=random.choice(tabledann)
tabledann.remove(returning)
dictionary.write(" if letter == '"+i+"' :\n return '"+returning+"'\n")
entkeys.append([returning,i])
for i in tablewennupper:
returning=random.choice(tabledann)
tabledann.remove(returning)
dictionary.write(" if letter == '"+i+"' :\n return '"+returning+"'\n")
entkeys.append([returning,i])
dictionary.write(" else:\n return letter\n")
dictionary.write("def ent(letter):\n")
for i in entkeys:
dictionary.write(" if letter == '"+i[0]+"':\n return '"+i[1]+"'\n")
dictionary.write(" else:\n return letter")
def debug():
pass
| Python | 0.000001 | |
2fba29b90156e844d7d61a15c9ad9c37e2b5dfe2 | load template | examples/aimsun/load_template.py | examples/aimsun/load_template.py | """
Load an already existing Aimsun template and run the simulation
"""
from flow.core.experiment import Experiment
from flow.core.params import AimsunParams, EnvParams, NetParams
from flow.core.params import VehicleParams
from flow.envs import TestEnv
from flow.scenarios.loop import Scenario
from flow.controllers.rlcontroller import RLController
sim_params = AimsunParams(
sim_step=0.1,
render=True,
emission_path='data',
subnetwork_name="Subnetwork 8028981")
env_params = EnvParams()
vehicles = VehicleParams()
vehicles.add(
veh_id="rl",
acceleration_controller=(RLController, {}),
num_vehicles=22)
scenario = Scenario(
name="test",
vehicles=vehicles,
net_params=NetParams(template="/Users/nathan/internship/I-210Pasadena/I-210subnetwork.ang")
)
env = TestEnv(env_params, sim_params, scenario, simulator='aimsun')
exp = Experiment(env)
exp.run(1, 3000)
| Python | 0.000001 | |
c6f6278c1915ef90e8825f94cc33a4dea4124722 | Add http directory listing with content display | network/http_server_cat.py | network/http_server_cat.py | #!/bin/env python3
import http.server
import string
import click
import pathlib
import urllib.parse
import os
@click.command()
@click.argument("port", required=False)
@click.option("-s", "--server", default="0.0.0.0")
def main(port, server):
if not port:
port = 8888
http_server = http.server.HTTPServer((server, port), PostHandler)
print('Starting server on {0}:{1}, use <Ctrl-C> to stop'.format(
server, port))
http_server.serve_forever()
class PostHandler(http.server.BaseHTTPRequestHandler):
cwd = pathlib.Path(".")
def do_GET(self):
body_file_cat = string.Template("$content")
body_dir_list = string.Template("""
<h1>Directory listing for $cwd</h1>
<ul>
$items
</ul>
""")
page = string.Template("""<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>Directory listing for $cwd</title>
</head>
<body>
$body
</body>
</html>
""")
path = urllib.parse.urlparse(self.path)
fs_path = pathlib.Path("{}{}".format(self.cwd, path.path))
prefix_ref = "{}/".format(path.path)
if fs_path.is_file():
body = body_file_cat
content = ""
with fs_path.open() as f:
content = "".join(f.readlines())
content = "<pre>{}</pre>".format(content)
body = body.substitute(content=content)
else:
body = body_dir_list
items = list()
item_template = string.Template('<li><a href="$item_path">$item_name</a></li>')
for p in fs_path.iterdir():
item_path = urllib.parse.urljoin(prefix_ref, p.name)
item_name = p.name
if os.path.isdir(p):
item_name = "{}/".format(item_name)
items.append(item_template.substitute(item_path=item_path, item_name=item_name))
body = body.substitute(cwd=fs_path, items="\n".join(items))
page = page.substitute(cwd=fs_path, body=body)
self.send_response(200)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(page.encode("UTF-8"))
if __name__ == '__main__':
main()
| Python | 0 | |
8fa81263cfcc63f6bf22ed2ad50103f91bc43b21 | Create hira.py | hira.py | hira.py | #coding:utf-8
import hashlib
start = ord(u'あ')
end = ord(u'ん')
hira = []
print "Create hiragana"
for i in range(start, end+1, 1):
hira.append(unichr(i).encode('utf-8'))
num = len(hira)
for i4 in range(num):
for i3 in range(num):
for i2 in range(num):
for i1 in range(num):
msg = hira[i1] + hira[i2] + hira[i3] + hira[i4]
print msg,
print hashlib.md5(msg).hexdigest()
| Python | 0.000002 | |
92bc1ad22b6147f61ef4b51b16e115109bc04596 | add build.gyp | build.gyp | build.gyp | {
'targets':[
{
'target_name':'start_first',
'type':'executable',
'dependencies':[],
'defines':[],
'include_dirs':[],
'sources':[
'start_first/opengl_first.c',
],
'libraries':[
'-lGLU -lGL -lglut'
],
'conditions':[]
}
],
}
| Python | 0.000001 | |
45a0b65106f665872f14780e93ab9f09e65bbce3 | add genRandomGraph.py | ComplexCiPython/genRandomGraph.py | ComplexCiPython/genRandomGraph.py | import networkx
import sys
if len(sys.argv) < 2:
print ("python genRandomGraph.py [output folder]");
input()
sys.exit(0);
outputPath = sys.argv[1]
G=networkx.erdos_renyi_graph(100000,3/100000)
networkx.write_edgelist(G, outputPath + "/genRandomGraph.csv", data=False , delimiter=',')
| Python | 0.000001 | |
3b15fb1d43bad6d6cf2112538d1de8c1710d0272 | add test for within_page_range | freelancefinder/freelancefinder/tests/test_within_page_range_templatetag.py | freelancefinder/freelancefinder/tests/test_within_page_range_templatetag.py | """Test the within_page_range function."""
from ..templatetags.within_page_range import within_filter
def test_in_range_above():
"""One page above current should be displayed."""
test_page = 5
current_page = 4
result = within_filter(test_page, current_page)
assert result
def test_in_range_below():
"""One page below current should be displayed."""
test_page = 3
current_page = 4
result = within_filter(test_page, current_page)
assert result
def test_out_of_range_above():
"""20 pages above current should not be displayed."""
test_page = 74
current_page = 54
result = within_filter(test_page, current_page)
assert not result
def test_out_of_range_below():
"""20 pages below current should not be displayed."""
test_page = 34
current_page = 54
result = within_filter(test_page, current_page)
assert not result
| Python | 0.000001 | |
0c315f766b31c105c60b39746db977d6702955ca | Remove unneeded model attributes | successstories/views.py | successstories/views.py | from django.contrib import messages
from django.core.urlresolvers import reverse
from django.utils.decorators import method_decorator
from django.views.generic import CreateView, DetailView, ListView
from honeypot.decorators import check_honeypot
from .forms import StoryForm
from .models import Story, StoryCategory
class ContextMixin:
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['category_list'] = StoryCategory.objects.all()
return ctx
class StoryCreate(ContextMixin, CreateView):
model = Story
form_class = StoryForm
template_name = 'successstories/story_form.html'
success_message = (
'Your success story submission has been recorded. '
'It will be reviewed by the PSF staff and published.'
)
@method_decorator(check_honeypot)
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def get_success_url(self):
return reverse('success_story_create')
def form_valid(self, form):
messages.add_message(self.request, messages.SUCCESS, self.success_message)
return super().form_valid(form)
class StoryDetail(ContextMixin, DetailView):
template_name = 'successstories/story_detail.html'
context_object_name = 'story'
def get_queryset(self):
if self.request.user.is_staff:
return Story.objects.select_related()
return Story.objects.select_related().published()
class StoryList(ListView):
template_name = 'successstories/story_list.html'
context_object_name = 'stories'
def get_queryset(self):
return Story.objects.select_related().published()
class StoryListCategory(ContextMixin, DetailView):
model = StoryCategory
| from django.contrib import messages
from django.core.urlresolvers import reverse
from django.utils.decorators import method_decorator
from django.views.generic import CreateView, DetailView, ListView
from honeypot.decorators import check_honeypot
from .forms import StoryForm
from .models import Story, StoryCategory
class ContextMixin:
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
ctx['category_list'] = StoryCategory.objects.all()
return ctx
class StoryCreate(ContextMixin, CreateView):
model = Story
form_class = StoryForm
template_name = 'successstories/story_form.html'
success_message = (
'Your success story submission has been recorded. '
'It will be reviewed by the PSF staff and published.'
)
@method_decorator(check_honeypot)
def dispatch(self, *args, **kwargs):
return super().dispatch(*args, **kwargs)
def get_success_url(self):
return reverse('success_story_create')
def form_valid(self, form):
messages.add_message(self.request, messages.SUCCESS, self.success_message)
return super().form_valid(form)
model = Story
class StoryDetail(ContextMixin, DetailView):
template_name = 'successstories/story_detail.html'
context_object_name = 'story'
def get_queryset(self):
if self.request.user.is_staff:
return Story.objects.select_related()
return Story.objects.select_related().published()
class StoryList(ListView):
model = Story
template_name = 'successstories/story_list.html'
context_object_name = 'stories'
def get_queryset(self):
return Story.objects.select_related().published()
class StoryListCategory(ContextMixin, DetailView):
model = StoryCategory
| Python | 0.000001 |
9abb8108f62451fb993a398c8165a4605e40ec4a | Add tests for JSONPResponseMiddleware | mapit/tests/test_middleware.py | mapit/tests/test_middleware.py | from django.test import TestCase
from django.test.client import RequestFactory
from django.http import HttpResponse, HttpResponsePermanentRedirect
from ..middleware import JSONPMiddleware
class JSONPMiddlewareTest(TestCase):
def setUp(self):
self.middleware = JSONPMiddleware()
self.factory = RequestFactory()
def test_process_response_ignores_302_redirects(self):
request = self.factory.get("/dummy_url", {"callback": "xyz"})
response = HttpResponsePermanentRedirect("/new_url")
middleware_response = self.middleware.process_response(request, response)
self.assertEqual(middleware_response, response)
def test_process_response_uses_callback(self):
request = self.factory.get("/dummy_url", {"callback": "xyz"})
response = HttpResponse(content="blah")
middleware_response = self.middleware.process_response(request, response)
self.assertEqual(middleware_response.content, u'xyz(blah)')
def test_process_response_uses_ignores_requests_without_callback(self):
request = self.factory.get("/dummy_url")
response = HttpResponse(content="blah")
middleware_response = self.middleware.process_response(request, response)
self.assertEqual(middleware_response, response)
def test_process_response_callback_allowed_characters(self):
request = self.factory.get("/dummy_url", {"callback": "xyz123_$."})
response = HttpResponse(content="blah")
middleware_response = self.middleware.process_response(request, response)
self.assertEqual(middleware_response.content, u'xyz123_$.(blah)')
# Try with a character not allowed in the callback
request = self.factory.get("/dummy_url", {"callback": "xyz123_$.["})
response = HttpResponse(content="blah")
middleware_response = self.middleware.process_response(request, response)
self.assertEqual(middleware_response, response)
| Python | 0 | |
e20d3ff6147b857cb9a8efa32bfb4ee80610dd34 | Revert "dump" | dump/fastMessageReaderOriginal.py | dump/fastMessageReaderOriginal.py | #!/usr/bin/python
import sys
import re
# ============================================================================
class MessageReader:
messageRegexp = r"s*(\w+)\[\d+\]=(.*?)(?=\s\w+\[\d+\]|$)";
def __init__(self, fileName):
self.fileName = fileName
#self.file = open(fileName, encoding="utf8")
self.file = open(fileName)
self.carryover = "";
def __del__(self):
self.file.close()
def getMessage(self):
if (self.carryover != ""):
line = self.carryover
self.carryover = ""
else:
line = self.file.readline()
while (line.startswith('ApplVerID') is not True):
if not line: return {}
line = self.file.readline()
message = dict(re.findall(self.messageRegexp, line))
message['entries'] = []
line = self.file.readline();
noEntries = re.sub(".*?NoMDEntries\[268\]\s*=\s*(\d+)[^\d]*", r'\1', line)
if (noEntries == line):
self.carryover = line;
return message
for i in range(int(noEntries)):
line = self.file.readline().split(':')[1].strip()
entry = dict(re.findall(self.messageRegexp, line))
message["entries"].append(entry)
return message
# ============================================================================
| Python | 0.000002 | |
f917c7ccfbe22a50049e76957a05f35eaaa46b2a | migrate child table | polling_stations/apps/addressbase/migrations/0010_remove_onsud_ctry_flag.py | polling_stations/apps/addressbase/migrations/0010_remove_onsud_ctry_flag.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-02-15 14:12
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("addressbase", "0009_onsud_ced")]
operations = [migrations.RemoveField(model_name="onsud", name="ctry_flag")]
| Python | 0.000002 | |
1553cdda2edc16368ba2281616923e849f09bdee | Create matching_{x,y}.py | hacker_rank/regex/repetitions/matching_{x,y}.py | hacker_rank/regex/repetitions/matching_{x,y}.py | Regex_Pattern = r'^\d{1,2}[a-zA-Z]{3,}\W{0,3}$' # Do not delete 'r'.
| Python | 0.998695 | |
527a53ee1e43f59462b94b50ea997058836a7031 | Create voicersss-inmoovservice-test.py | home/moz4r/Test/voicersss-inmoovservice-test.py | home/moz4r/Test/voicersss-inmoovservice-test.py | i01 = Runtime.createAndStart("i01", "InMoov")
i01.mouth = Runtime.createAndStart("i01.mouth", "voiceRSS")
python.subscribe(i01.mouth.getName(),"publishStartSpeaking")
python.subscribe(i01.mouth.getName(),"publishEndSpeaking")
def onEndSpeaking(text):
print "end speak"
def onStartSpeaking(text):
print "start speak"
i01.mouth.setKey("6b714718f09e48c9a7f260e385ca99a4")
i01.mouth.setVoice("fr-fr");
i01.mouth.speakBlocking(u"test accent utf8 : éléphant")
| Python | 0 | |
75980fc2e2f63e210f1e58e9a1d56c09072aa04e | add play_camera.py | python/video/play_camera.py | python/video/play_camera.py | #!/usr/bin/env python3
# encoding: utf-8
# pylint: disable=no-member
"""Play a video with OpenCV."""
import sys
import cv2
def main():
"""The main function of this module."""
cv2.namedWindow('video', cv2.WINDOW_AUTOSIZE)
cap = cv2.VideoCapture(0)
i = 0
while cap.isOpened():
ret, frame = cap.read()
if not ret: # done
break
i += 1
if i == 1:
print frame.shape, frame.dtype, frame.size
cv2.imshow('video', frame)
key = cv2.waitKey(30)
if key & 0xFF == ord('q'): # quit
break
cap.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
main()
| Python | 0.000002 | |
6dfc5a3d7845633570b83aac06c47756292cf8ac | Add tests for get_uid() method for common DB models. | st2common/tests/unit/test_db_model_uids.py | st2common/tests/unit/test_db_model_uids.py | # contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest2
from st2common.models.db.pack import PackDB
from st2common.models.db.sensor import SensorTypeDB
from st2common.models.db.action import ActionDB
from st2common.models.db.rule import RuleDB
from st2common.models.db.trigger import TriggerTypeDB
from st2common.models.db.trigger import TriggerDB
__all__ = [
'DBModelUIDFieldTestCase'
]
class DBModelUIDFieldTestCase(unittest2.TestCase):
def test_get_uid(self):
pack_db = PackDB(ref='ma_pack')
self.assertEqual(pack_db.get_uid(), 'pack:ma_pack')
sensor_type_db = SensorTypeDB(name='sname', pack='spack')
self.assertEqual(sensor_type_db.get_uid(), 'sensor_type:spack:sname')
action_db = ActionDB(name='aname', pack='apack', runner_info={})
self.assertEqual(action_db.get_uid(), 'action:apack:aname')
rule_db = RuleDB(name='rname', pack='rpack')
self.assertEqual(rule_db.get_uid(), 'rule:rpack:rname')
trigger_type_db = TriggerTypeDB(name='ttname', pack='ttpack')
self.assertEqual(trigger_type_db.get_uid(), 'trigger_type:ttpack:ttname')
trigger_db = TriggerDB(name='tname', pack='tpack')
self.assertTrue(trigger_db.get_uid().startswith('trigger:tpack:tname:'))
| Python | 0 | |
5d64acfd475ca0bb0db2ef7c032fc4ee16df4f75 | remove highlight table | alembic/versions/186928676dbc_remove_highlights.py | alembic/versions/186928676dbc_remove_highlights.py | """remove_highlights
Revision ID: 186928676dbc
Revises: f163a00a02aa
Create Date: 2019-06-01 15:14:13.999836
"""
# revision identifiers, used by Alembic.
revision = '186928676dbc'
down_revision = 'f163a00a02aa'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mysql
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('tb_stream_chunk_highlight')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('tb_stream_chunk_highlight',
sa.Column('id', mysql.INTEGER(display_width=11), autoincrement=True, nullable=False),
sa.Column('stream_chunk_id', mysql.INTEGER(display_width=11), autoincrement=False, nullable=False),
sa.Column('created_at', mysql.DATETIME(), nullable=False),
sa.Column('highlight_offset', mysql.INTEGER(display_width=11), autoincrement=False, nullable=False),
sa.Column('description', mysql.VARCHAR(length=128), nullable=True),
sa.Column('override_link', mysql.VARCHAR(length=256), nullable=True),
sa.Column('thumbnail', mysql.TINYINT(display_width=1), autoincrement=False, nullable=True),
sa.Column('created_by', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.Column('last_edited_by', mysql.INTEGER(display_width=11), autoincrement=False, nullable=True),
sa.ForeignKeyConstraint(['stream_chunk_id'], ['tb_stream_chunk.id'], name='tb_stream_chunk_highlight_ibfk_1'),
sa.PrimaryKeyConstraint('id'),
mysql_default_charset='utf8mb4',
mysql_engine='InnoDB'
)
# ### end Alembic commands ###
| Python | 0.000005 | |
8658ad72c74306617e58ca82ff0f3fdba35bd353 | implement auto build database interface | app/tools/dbautocreat.py | app/tools/dbautocreat.py | #-*- coding:utf-8 -*-
import asyncio
import aiomysql
from tools.config import Config
class AutoCreate(obj):
def __init__(self):
pass
def _create_db(self):
pass
def _create_field_type(self):
pass
def _create_field_primary_key(self):
pass
def _create_field_unique_key(self):
pass
def _create_auto_increment(self):
pass
def _create_default(self):
pass
def _create_table(self):
pass
def run(self):
pass
@asyncio.coroutine
def auto_create():
conn=yield from aiomysql.connect(db=Config.database.database,
host=Config.database.host,
password=Config.database.password,
user=Config.database.user)
cursor =yield from conn.cursor()
yield from cursor.execute('show databases;')
ret=yield from cursor.fetchall()
print(ret)
if __name__=='__main__':
loop=asyncio.get_event_loop()
loop.run_until_complete(asyncio.wait([auto_create()]))
loop.close()
| Python | 0 | |
36b8c44f8c2554109ab4ab09add9ac10fae20781 | add entities orm | cliche/services/tvtropes/entities.py | cliche/services/tvtropes/entities.py | from sqlalchemy import Column, DateTime, ForeignKey, String
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship
Base = declarative_base()
__all__ = 'Entity'
class Entity(Base):
namespace = Column(String, primary_key=True)
name = Column(String, primary_key=True)
url = Column(String)
last_crawled = Column(DateTime)
type = Column(String)
relations = relationship('Relation', foreign_keys=[namespace, name],
primaryjoin='and_(Entity.namespace == \
Relation.origin_namespace, \
Entity.name == Relation.origin)',
collection_class=set)
def __init__(self, namespace, name, url, last_crawled, type):
self.namespace = namespace
self.name = name
self.url = url
self.last_crawled = last_crawled
self.type = type
def __repr__(self):
return "<Entity('%s', '%s', '%s', '%s', '%s')" % (
self.namespace, self.name, self.url, str(self.last_crawled),
self.type
)
__tablename__ = 'entities'
__repr_columns__ = namespace, name
class Relation(Base):
origin_namespace = Column(String, ForeignKey(Entity.namespace),
primary_key=True)
origin = Column(String, ForeignKey(Entity.name), primary_key=True)
destination_namespace = Column(String, primary_key=True)
destination = Column(String, primary_key=True)
origin_entity = relationship('Entity',
foreign_keys=[origin_namespace, origin])
def __init__(self, origin_namespace, origin, destination_namespace,
destination):
self.origin_namespace = origin_namespace
self.origin = origin
self.destination_namespace = destination_namespace
self.destination = destination
__tablename__ = 'relations'
__repr_columns__ = origin_namespace, origin, destination_namespace, \
destination
| Python | 0.000406 | |
ad664a7722da63d783a2b9d73077d91a8a012057 | Create hello.py | Python/hello.py | Python/hello.py | print("hello world!!!")
| Python | 0.999979 | |
dfed8f837b5fe07445b3914b33c1dab1b0b5741b | add basic UAV object incl. very basic search algo | uav.py | uav.py | import random
class Uav:
def __init__(x,y, worldMap):
self.x = x
self.y = y
self.worldMap = worldMap
self.sensorStrength = None
def setMap(self, newMap):
self.worldMap = newMap
def nextStep(self):
""" where should we go next tick? """
options = self.surroundingValues()
m = max(a)
maxIndexes = [i for i, j in enumerate(a) if j == m]
return random.choice(maxIndexes)
def surroundingValues(self):
return [self.worldMap[self.x][self.y+1],
self.worldMap[self.x+1][self.y],
self.worldMap[self.x][self.y-1],
self.worldMap[self.x-1][self.y]]
| Python | 0 | |
8d1946c9656ea6c29d4730a68cbf4610152cd98b | make migrations | poll/migrations/0002_vote_user_id.py | poll/migrations/0002_vote_user_id.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('poll', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='vote',
name='user_id',
field=models.IntegerField(default=None),
preserve_default=False,
),
]
| Python | 0.000057 | |
64109dddedb7441456ae8e255c6a4b20ccaa6a73 | Create ReinhardNorm.py | ReinhardNorm.py | ReinhardNorm.py | import numpy
def ReinhardNorm(I, TargetMu, TargetSigma):
'''
Performs Reinhard color normalization to transform the color characteristics of an image to
a desired standard. The standard is defined by the mean and standard deviations of
the target image in LAB color space defined by Ruderman. The input image is converted to
Ruderman's LAB space, the LAB channels are each centered and scaled to zero-mean unit
variance, and then rescaled and shifted to match the target image statistics.
*Inputs:
I (rgbimage) - an RGB image of type unsigned char.
TargetMu - a 3-element list containing the means of the target image channels in LAB
color space.
TargetSigma - a 3-element list containing the standard deviations of the target image
channels in LAB color space.
*Outputs:
Normalized (rgbimage) - a normalized RGB image with corrected color characteristics.
*Related functions:
RudermanLABFwd, RudermanLABInv
*References:
Erik Reinhard, Michael Ashikhmin, Bruce Gooch, and Peter Shirley. 2001. Color Transfer between Images. IEEE Comput. Graph. Appl. 21, 5 (September 2001), 34-41.
Daniel Ruderman, Thomas Cronin, Chuan-Chin Chiao, Statistics of Cone Responses to Natural Images: Implications for Visual Coding, J. Optical Soc. of America, vol. 15, no. 8, 1998, pp. 2036-2045.
'''
#get input image dimensions
m = I.shape[0]
n = I.shape[1]
#convert input image to LAB color space
LAB = RudermanLAB(I)
#center and scale to zero-mean and unit variance
Mu = LAB.sum(axis=0).sum(axis=0)
LAB[:,:,0] = LAB[:,:,0] - Mu[0]
LAB[:,:,1] = LAB[:,:,1] - Mu[1]
LAB[:,:,2] = LAB[:,:,2] - Mu[2]
Sigma = (LAB*LAB).sum(axis=0).sum(axis=0) / (m*n-1)
LAB[:,:,0] = LAB[:,:,0] / Sigma[0]
LAB[:,:,1] = LAB[:,:,1] / Sigma[1]
LAB[:,:,2] = LAB[:,:,2] / Sigma[2]
#rescale and recenter to match target statistics
LAB[:,:,0] = LAB[:,:,0] * TargetSigma[0] + TargetMu[0]
LAB[:,:,1] = LAB[:,:,1] * TargetSigma[1] + TargetMu[1]
LAB[:,:,2] = LAB[:,:,2] * TargetSigma[2] + TargetMu[2]
#convert back to RGB colorspace
Normalized = RudermanLABInv(LAB)
return(Normalized)
| Python | 0.000001 | |
ebabfa0e14bdfd061e248285b8f7b5473f5a676e | Create convert_to_morse.py | morse_code/convert_to_morse.py | morse_code/convert_to_morse.py | from ConfigParser import SafeConfigParser
import string
target = 'target.txt'
def parse_ini():
parser = SafeConfigParser()
parser.read('conversion.ini')
morselist = list(string.ascii_uppercase)
number = 0
for i in morselist:
i = parser.get('CONVERSIONS', i)
morselist[number] = i
number += 1
return morselist
def convert_target():
with open(target, "r") as targetfile:
targetstring = targetfile.read()
for i in xrange(0, len(targetstring)):
print targetstring[i]
if any(character in targetstring)
pass
morselist = parse_ini()
#print morselist
capital_alphabet = list(string.ascii_uppercase)
lower_alphabet = list(string.ascii_lowercase)
#print capital_alphabet
#print lower_alphabet
convert_target()
| Python | 0.998175 | |
fcb311ffd264821767f58c92e96101aa8086acf5 | rewrite DHKE.py as crypto.py | crypto.py | crypto.py | import random
import time
timestamp = int(time.time())
random.seed(timestamp)
def gen_check(n):
if not isprime(n):
while not isprime(n):
n = random.randint(0, timestamp)
def input_check(n):
if not isprime(n):
n = input("Sorry, that number isn't prime. Please try another: ")
def isprime(n):
'''check if integer n is a prime'''
# make sure n is a positive integer
n = abs(int(n))
# 0 and 1 are not primes
if n < 2:
return False
# 2 is the only even prime number
if n == 2:
return True
# all other even numbers are not primes
if not n & 1:
return False
# range starts with 3 and only needs to go up the squareroot of n
# for all odd numbers
for x in range(3, int(n**0.5) + 1, 2):
if n % x == 0:
return False
return True
def publicKey():
resp = input("Do you have a shared base integer? (y/n): ")
if resp.lower() == "y":
b = input("Please enter your shared base integer: ")
input_check(b)
elif resp.lower() == "n":
b = random.randint(0, timestamp)
gen_check(b)
print("Your shared base integer is: ", b)
resp = input("Do you have a secret integer? (y/n): ")
if resp.lower() == "y":
alex = input("Please enter your secret integer: ")
input_check(alex)
elif resp.lower() == "n":
alex = random.randint(0, timestamp)
gen_check(alex)
print("Your secret integer is: ", alex)
resp = input("Do you have a shared modulus? (y/n): ")
if resp.lower() == "y":
mp = input("Please enter your shared modulus: ")
input_check(mp)
elif resp.lower() == "n":
mp = random.randint(0, timestamp)
gen_check(mp)
print("Your shared modulus is: ", mp)
b = int(b)
alex = int(alex)
mp = int(mp)
pubKey = b ** alex
pubKey = pubKey % mp
return pubKey
def sharedSecret():
pK = input("Please enter your public key: ")
mp = input("Please enter your shared modulus: ")
alex = input("Please enter your secret integer: ")
sharedSec = (int(pK) ** int(alex)) % int(mp)
return sharedSec
answer = input("Would you like to calculate a public key, or a shared secret? ")
if answer.lower() == "public key":
public = publicKey()
print("Your public key is: ", public)
elif answer.lower() == "shared secret":
shared = sharedSecret()
print("Your shared secret is: ", shared)
| Python | 0.999999 | |
81c793870387910cd0c4eda02b2b95588a02cc7f | Add files via upload | cypher.py | cypher.py | #!/usr/bin/python
import argparse, sys
ALPHABET_SIZE = 26
parser = argparse.ArgumentParser(description='Encrypts a message from a text file.')
parser.add_argument('walzenlage1', metavar='w1', type=int, action='store',
help='')
parser.add_argument('walzenlage2', metavar='w2', type=int, action='store',
help='')
parser.add_argument('walzenlage3', metavar='w3', type=int, action='store',
help='')
parser.add_argument('ringstellung', metavar='rs', type=str, action='store',
help='')
#parser.add_argument('--decrypt', nargs='?', const=decrypt, default=encrypt,
#help='decrypts the message')
parser.add_argument('file', metavar='filename', type=str,
help='name or path to the file wich contains your message')
args = parser.parse_args()
text = open(args.file, 'r')
msg = text.read()
lenmsg = len(msg)
w1 = args.walzenlage1
w2 = args.walzenlage2
w3 = args.walzenlage3
rs1 = args.ringstellung
#inicia os rotores
class Rotor:
config = {'1':[13, 17, 21, 16, 15, 24, 9, 25, 4, 18, 14, 8, 0, 20, 10, 19, 11, 1, 12, 22, 3, 6, 23, 5, 7, 2],
'2':[17, 8, 18, 2, 11, 1, 6, 19, 24, 10, 16, 14, 7, 4, 23, 13, 0, 25, 20, 12, 22, 5, 9, 15, 21, 3],
'3':[24, 16, 13, 0, 18, 12, 3, 25, 21, 8, 10, 15, 22, 2, 6, 7, 5, 17, 14, 1, 9, 11, 20, 23, 4, 19],
'Reflector':[14, 18, 1, 19, 25, 21, 5, 3, 24, 7, 8, 23, 4, 0, 9, 15, 6, 16, 12, 13, 10, 22, 20, 2, 17, 11]
}
def __init__(self, Id):
self.len = ALPHABET_SIZE
self.numbers = self.config[Id]
def rotate(self):
init = self.numbers[0]
for index in range (0, self.len-1):
self.numbers[index] = self.numbers[index+1]
self.numbers[self.len-1] = init
def set(self, rs):
while self.numbers[0] != rs:
self.rotate
def do(self, previousOut):
if previousOut < 0:
pass
return self.numbers[previousOut]
#inicia a maquina baseada na configuração da chave
class Enigma:
counter = [0, 0, 0]
def __init__(self, r1, r2, r3, ref):
self.r1 = r1
self.r2 = r2
self.r3 = r3
self.ref = ref
def ringset(self, rs):
self.r1.set(int(rs[0])-96)
self.r2.set(int(rs[1])-96)
self.r3.set(int(rs[2])-96)
def encrypt(self, message):
EncryptedMessage = []
for i in message:
EncryptedMessage.append(self.newLetter(ord(i.lower())-96, message))
self.rotateAll()
return EncryptedMessage
# def decrypt(self, message)
def newLetter(self, num):
return self.r1.do(self.r2.do(self.r3.do(self.ref.do(self.r3.do(self.r2.do(self.r1.do(num)))))))
def rotateAll(self):
self.r1.rotate()
self.counter[0] = self.counter[0] + 1
if self.counter[0] == ALPHABET_SIZE:
self.r2.rotate()
self.counter[1] = self.counter[1] + 1
self.counter[0] = 0
if self.counter[1] == ALPHABET_SIZE:
self.r3.rotate()
self.counter[2] = self.counter[2] + 1
self.counter[1] = 0
E = Enigma(Rotor(w1), Rotor(w2), Rotor(w3), Rotor('Reflector'))
E.ringset(rs)
print(E.r1)
| Python | 0.000001 | |
47c8aa34eb9f4d2c4f702bc3957c87ef92cf7d28 | add simple learning switch app for OF1.3 | ryu/app/simple_switch_13.py | ryu/app/simple_switch_13.py | # Copyright (C) 2011 Nippon Telegraph and Telephone Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import struct
from ryu.base import app_manager
from ryu.controller import ofp_event
from ryu.controller.handler import MAIN_DISPATCHER
from ryu.controller.handler import set_ev_cls
from ryu.ofproto import ofproto_v1_3
from ryu.lib.packet import packet
from ryu.lib.packet import ethernet
class SimpleSwitch13(app_manager.RyuApp):
OFP_VERSIONS = [ofproto_v1_3.OFP_VERSION]
def __init__(self, *args, **kwargs):
super(SimpleSwitch13, self).__init__(*args, **kwargs)
self.mac_to_port = {}
def add_flow(self, datapath, port, dst, actions):
ofproto = datapath.ofproto
match = datapath.ofproto_parser.OFPMatch(in_port=port,
eth_dst=dst)
inst = [datapath.ofproto_parser.OFPInstructionActions(
ofproto.OFPIT_APPLY_ACTIONS, actions)]
mod = datapath.ofproto_parser.OFPFlowMod(
datapath=datapath, cookie=0, cookie_mask=0, table_id=0,
command=ofproto.OFPFC_ADD, idle_timeout=0, hard_timeout=0,
priority=0, buffer_id=ofproto.OFP_NO_BUFFER,
out_port=ofproto.OFPP_ANY,
out_group=ofproto.OFPG_ANY,
flags=0, match=match, instructions=inst)
datapath.send_msg(mod)
@set_ev_cls(ofp_event.EventOFPPacketIn, MAIN_DISPATCHER)
def _packet_in_handler(self, ev):
msg = ev.msg
datapath = msg.datapath
ofproto = datapath.ofproto
in_port = msg.match['in_port']
pkt = packet.Packet(msg.data)
eth = pkt.get_protocols(ethernet.ethernet)[0]
dst = eth.dst
src = eth.src
dpid = datapath.id
self.mac_to_port.setdefault(dpid, {})
self.logger.info("packet in %s %s %s %s", dpid, src, dst, in_port)
# learn a mac address to avoid FLOOD next time.
self.mac_to_port[dpid][src] = in_port
if dst in self.mac_to_port[dpid]:
out_port = self.mac_to_port[dpid][dst]
else:
out_port = ofproto.OFPP_FLOOD
actions = [datapath.ofproto_parser.OFPActionOutput(out_port)]
# install a flow to avoid packet_in next time
if out_port != ofproto.OFPP_FLOOD:
self.add_flow(datapath, in_port, dst, actions)
out = datapath.ofproto_parser.OFPPacketOut(
datapath=datapath, buffer_id=msg.buffer_id, in_port=in_port,
actions=actions)
datapath.send_msg(out)
| Python | 0 | |
615e51ce1bf15c012a6c7cc2d026cb69bf0ce2b8 | Create MAIN.py | MAIN.py | MAIN.py | def pythagoras(SIDE, LEN1, LEN2):
from math import sqrt # This is function is needed to work, it **SHOULD** be included with the default install.
ANSWER = "Error Code 1" # This should not logicaly happen if the user is not an idiot and follows the usage.
if type(LEN1) is str or type(LEN2) is str: # This checks if the user didn't listen to the usage // Was the LEN a string?
ANWSER = "Error Code 2"
return ANWSER # This will return an error to the user that didn't listen.
if type(SIDE) is int or type(SIDE) is float: # This checks if the user didn't listen to the usage // Was the SIDE an integer or float?
ANWSER = "Error Code 4"
return ANWSER # This will return an error to the user that didn't listen.
#--SIDE C--
if SIDE.lower() == "c":
#SIDE C CALCULATION (Hypotenuse)
A_SIDE = LEN1
B_SIDE = LEN2
C_SIDE = sqrt(A_SIDE * A_SIDE + B_SIDE * B_SIDE)
ANSWER = C_SIDE # This sets the answer to be returned.
#--SIDE A--
elif SIDE.lower() == 'a':
if LEN1 < LEN2: # This will happen if the user did not follow instructions. See error below.
print("The hypotenues should be bigger")
anwser = "Error code 2"
return ANSWER # This will return an error to the user that didn't listen.
#SIDE A CALCULATION
B_SIDE = LEN2
C_SIDE = LEN1
ASIDE = sqrt((C_SIDE * C_SIDE) - (B_SIDE * B_SIDE))
ANSWER = A_SIDE # This sets the answer to be returned.
#--SIDE B--
elif SIDE.lower() == 'b':
if LEN1 < LEN2: # This will happen if the user did not follow instructions. See error below.
print("The hypotenues should be bigger")
ANSWER = "Error code 2"
return ANSWER # This will return an error to the user that didn't listen.
#SIDE B CALCULATION
A_SIDE = LEN2
C_SIDE = LEN1
B_SIDE = sqrt(C_SIDE * C_SIDE - A_SIDE * A_SIDE)
ANSWER = B_SIDE # This sets the answer to be returned.
return ANSWER # Returns the anwser for the user to use.
| Python | 0.000002 | |
cbe0d5b37d4055ea78568838c3fd4cc953342b80 | remove stale data | geoconnect/apps/gis_tabular/utils_stale_data.py | geoconnect/apps/gis_tabular/utils_stale_data.py | from datetime import datetime, timedelta
from apps.gis_tabular.models import TabularFileInfo # for testing
from apps.gis_tabular.models import WorldMapTabularLayerInfo,\
WorldMapLatLngInfo, WorldMapJoinLayerInfo
from apps.worldmap_connect.models import WorldMapLayerInfo
DEFAULT_STALE_AGE = 3 * 60 * 60 # 3 hours, in seconds
def remove_stale_map_data(stale_age_in_seconds=DEFAULT_STALE_AGE):
"""
Remove old map data...
"""
current_time = datetime.now()
for wm_info in WorldMapLatLngInfo.objects.all():
remove_if_stale(wm_info, stale_age_in_seconds, current_time)
for wm_info in WorldMapLatLngInfo.objects.all():
remove_if_stale(wm_info, stale_age_in_seconds, current_time)
for wm_info in WorldMapLayerInfo.objects.all():
remove_if_stale(wm_info, stale_age_in_seconds, current_time)
def remove_if_stale(info_object, stale_age_in_seconds, current_time=None):
assert hasattr(info_object, 'modified'),\
'The info_object must have "modified" date'
if not current_time:
current_time = datetime.now()
mod_time = info_object.modified
if hasattr(mod_time, 'tzinfo'):
mod_time = mod_time.replace(tzinfo=None)
# Is this object beyond it's time limit
time_diff = (current_time - mod_time).total_seconds()
if time_diff > stale_age_in_seconds:
# Yes! delete it
print 'Removing: ', info_object
info_object.delete()
"""
from apps.gis_tabular.utils_stale_data import *
remove_stale_map_data()
"""
| Python | 0.002077 | |
cb56e0151b37a79e2ba95815555cde0633e167e7 | add client subscribe testing | samples/client_subscribe.py | samples/client_subscribe.py | import logging
from hbmqtt.client._client import MQTTClient
import asyncio
logger = logging.getLogger(__name__)
C = MQTTClient()
@asyncio.coroutine
def test_coro():
yield from C.connect(uri='mqtt://iot.eclipse.org:1883/', username=None, password=None)
yield from C.subscribe([
{'filter': '$SYS/broker/uptime', 'qos': 0x00},
])
logger.info("Subscribed")
yield from asyncio.sleep(60)
yield from C.disconnect()
if __name__ == '__main__':
formatter = "[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s"
logging.basicConfig(level=logging.DEBUG, format=formatter)
asyncio.get_event_loop().run_until_complete(test_coro()) | Python | 0 | |
c422b5019c6e638bce40a7fecef6977aa5e63ce0 | add __init__.py | python/18-package/parent/__init__.py | python/18-package/parent/__init__.py | #!/usr/bin/env python
#-*- coding=utf-8 -*-
if __name__ == "__main__":
print "Package parent running as main program"
else:
print "Package parent initializing"
| Python | 0.00212 | |
8d6a5c4092d4f092416fc39fc7faa8bb20e701c3 | Add a manage command to sync reservations from external hook .. hard coded first product only atm (cherry picked from commit 63a80b711e1be9a6047965b8d0061b676d8c50ed) | cartridge/shop/management/commands/syncreshooks.py | cartridge/shop/management/commands/syncreshooks.py | from django.core.management.base import BaseCommand
from django.core.management.base import CommandError
from mezzanine.conf import settings
from cartridge.shop.models import *
class Command(BaseCommand):
help = 'Sync reservations from external hook'
def handle(self, *args, **options):
p = ReservableProduct.objects.all()[0]
p.update_from_hook()
| Python | 0 | |
afab4bcd795da4395920eab6107bc33e401ed86a | Create PiWS.py | PiWS.py | PiWS.py | import time
import datetime
import csv
from math import log
from flask import Flask, render_template
from sense_hat import SenseHat
app = Flask(__name__)
def weather():
sense = SenseHat()
sense.clear()
celcius = round(sense.get_temperature(), 1)
fahrenheit = round(1.8 * celcius + 32, 1)
humidity = round(sense.get_humidity(), 1)
pressure = round(sense.get_pressure(), 1)
dewpoint = round(243.04 * (log(humidity / 100) + ((17.625 * celcius) / (243.04 + celcius))) / (17.625 - log(humidity / 100) - (17.625 * celcius) / (243.04 + celcius)), 1)
acceleration = sense.get_accelerometer_raw()
x = round(acceleration['x'], 0)
y = round(acceleration['y'], 0)
z = round(acceleration['z'], 0)
if x == -1:
sense.set_rotation(90)
elif y == 1:
sense.set_rotation(0)
elif y == -1:
sense.set_rotation(180)
else:
sense.set_rotation(180)
if fahrenheit > 20 and fahrenheit < 80:
bg_color = [0, 0, 155] # blue
if fahrenheit > 81 and fahrenheit < 90:
bg_color = [0, 155, 0] # Green
if fahrenheit > 91 and fahrenheit < 100:
bg_color = [155, 155, 0] # Yellow
if fahrenheit > 101 and fahrenheit < 102:
bg_color = [255, 127, 0] # Orange
if fahrenheit > 103 and fahrenheit < 104:
bg_color = [155, 0, 0] # Red
if fahrenheit > 105 and fahrenheit < 109:
bg_color = [255, 0, 0] # Bright Red
if fahrenheit > 110 and fahrenheit < 120:
bg_color = [155, 155, 155] # White
else:
bg_color = [0, 155, 0] # Green
result = ' Temp. F ' + str(fahrenheit) + ' Temp. C ' + str(celcius) + ' Hum. ' + str(humidity) + ' Press. ' + str(pressure) + ' DewPoint ' + str(dewpoint)
print(result)
result_list = [(datetime.datetime.now(), celcius, fahrenheit, humidity, pressure, dewpoint)]
with open('weather_logs.csv', 'a', newline='') as csv_file:
writer = csv.writer(csv_file)
writer.writerows(result_list)
for x in range(5):
sense.show_message(result, scroll_speed=0.10, back_colour=bg_color, text_colour=[155, 155, 155])
@app.route('/')
def index():
sense = SenseHat()
sense.clear()
celcius = round(sense.get_temperature(), 1)
fahrenheit = round(1.8 * celcius + 32, 1)
humidity = round(sense.get_humidity(), 1)
pressure = round(sense.get_pressure(), 1)
dewpoint = round(243.04 * (log(humidity / 100) + ((17.625 * celcius) / (243.04 + celcius))) / (17.625 - log(humidity / 100) - (17.625 * celcius) / (243.04 + celcius)), 1)
acceleration = sense.get_accelerometer_raw()
x = round(acceleration['x'], 1)
y = round(acceleration['y'], 1)
z = round(acceleration['z'], 1)
return render_template('weather.html', celcius=celcius, fahrenheit=fahrenheit, humidity=humidity, pressure=pressure, dewpoint=dewpoint, x=x, y=y, z=z)
while __name__ == '__main__':
weather()
#app.run(host='0.0.0.0')
| Python | 0.000001 | |
7e71b21f655ec35bd5ebd79aeb5dbec6945a77a7 | Add purdue harvester | scrapi/harvesters/purdue.py | scrapi/harvesters/purdue.py | '''
Harvester for the Purdue University Research Repository for the SHARE project
Example API call: http://purr.purdue.edu/oaipmh?verb=ListRecords&metadataPrefix=oai_dc
'''
from __future__ import unicode_literals
from scrapi.base import OAIHarvester
class PurdueHarvester(OAIHarvester):
short_name = 'purdue'
long_name = 'PURR - Purdue University Research Repository'
url = 'http://purr.purdue.edu'
base_url = 'http://purr.purdue.edu/oaipmh'
property_list = ['date', 'relation', 'identifier', 'type', 'setSpec']
timezone_granularity = True
| Python | 0.999533 | |
7ecfe7d20f8708a1dada5761cdc02905b0e370e5 | use correct separator | scripts/ci/wheel_factory.py | scripts/ci/wheel_factory.py | #!/usr/bin/env python
import requirements
import argparse
import glob
import os
parser = argparse.ArgumentParser()
parser.add_argument('file', help="requirements.txt", type=str)
parser.add_argument('wheeldir', help="wheeldir location", type=str)
args = parser.parse_args()
req_file = open(args.file, 'r')
for req in requirements.parse(req_file):
print "Checking " + args.wheeldir + os.path.sep + req.name + "*.whl"
if not glob.glob(args.wheeldir + os.path.sep + req.name + "*.whl"):
os.system("pip wheel --wheel-dir=" + args.wheeldir + " " + req.name + "".join(req.specs) + "".join(req.extras))
| #!/usr/bin/env python
import requirements
import argparse
import glob
import os
parser = argparse.ArgumentParser()
parser.add_argument('file', help="requirements.txt", type=str)
parser.add_argument('wheeldir', help="wheeldir location", type=str)
args = parser.parse_args()
req_file = open(args.file, 'r')
for req in requirements.parse(req_file):
print "Checking " + args.wheeldir + os.path.pathsep + req.name + "*.whl"
if not glob.glob(args.wheeldir + os.path.pathsep + req.name + "*.whl"):
os.system("pip wheel --wheel-dir=" + args.wheeldir + " " + req.name + "".join(req.specs) + "".join(req.extras))
| Python | 0.000691 |
027a199924ee256170a2e369733a57fcc7483c88 | Add missing numeter namespace in poller | poller/numeter/__init__.py | poller/numeter/__init__.py | __import__('pkg_resources').declare_namespace(__name__)
| Python | 0.00005 | |
6557ef962a4147e5995347c11b8a8a14b26495f0 | Add a facility to delegate credentials to another principal. This is currently useless, since nobody can verify delegated credentials yet. | protogeni/test/delegate.py | protogeni/test/delegate.py | #! /usr/bin/env python
#
# EMULAB-COPYRIGHT
# Copyright (c) 2009 University of Utah and the Flux Group.
# All rights reserved.
#
# Permission to use, copy, modify and distribute this software is hereby
# granted provided that (1) source code retains these copyright, permission,
# and disclaimer notices, and (2) redistributions including binaries
# reproduce the notices in supporting documentation.
#
# THE UNIVERSITY OF UTAH ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
# CONDITION. THE UNIVERSITY OF UTAH DISCLAIMS ANY LIABILITY OF ANY KIND
# FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
import datetime
import os
import re
import sys
import tempfile
import uuid
import xml.dom.minidom
if len( sys.argv ) != 2:
print >> sys.stderr, "usage: " + sys.argv[ 0 ] + " principal"
sys.exit( 1 )
HOME = os.environ["HOME"]
# Path to my certificate
CERTIFICATE = HOME + "/.ssl/encrypted.pem"
XMLSEC1 = "xmlsec1"
CONFIGFILE = ".protogeni-config.py"
GLOBALCONF = HOME + "/" + CONFIGFILE
LOCALCONF = CONFIGFILE
if os.path.exists( GLOBALCONF ):
execfile( GLOBALCONF )
if os.path.exists( LOCALCONF ):
execfile( LOCALCONF )
# Look up an element (which must exist exactly once) within a node.
def Lookup( node, name ):
cnodes = filter( lambda x: x.nodeName == name, node.childNodes )
if len( cnodes ) != 1:
print >> sys.stderr, sys.argv[ 0 ] + ": invalid credential\n"
sys.exit( 1 )
return cnodes[ 0 ]
def SimpleNode( d, elem, body ):
n = d.createElement( elem );
n.appendChild( d.createTextNode( body ) );
return n;
principal = open( sys.argv[ 1 ] )
owner = re.search( r"^-----BEGIN CERTIFICATE-----\s*(.*)" +
"^-----END CERTIFICATE-----$", principal.read(),
re.MULTILINE | re.DOTALL ).group( 1 )
principal.close()
doc = xml.dom.minidom.parse( sys.stdin )
old = Lookup( doc.documentElement, "credential" )
c = doc.createElement( "credential" )
id = 1
while filter( lambda x: x.getAttribute( "xml:id" ) == "ref" + str( id ),
doc.getElementsByTagName( "credential" ) ):
id = id + 1
c.setAttribute( "xml:id", "ref" + str( id ) )
c.appendChild( Lookup( old, "type" ).cloneNode( True ) )
c.appendChild( SimpleNode( doc, "serial", "1" ) )
c.appendChild( SimpleNode( doc, "owner_gid", owner ) )
c.appendChild( Lookup( old, "target_gid" ).cloneNode( True ) )
c.appendChild( SimpleNode( doc, "uuid", str( uuid.uuid4() ) ) )
t = datetime.datetime.utcnow() + datetime.timedelta( hours = 6 )
t = t.replace( microsecond = 0 )
c.appendChild( SimpleNode( doc, "expires", t.isoformat() ) )
# FIXME allow an option to specify that only a proper subset of privileges
# are propagated (or even a a different set specified, even though that would
# presumably cause the credentials to be rejected).
for n in old.childNodes:
if n.nodeName in ( "privileges", "capabilities", "ticket", "extensions" ):
c.appendChild( n.cloneNode( True ) )
doc.documentElement.replaceChild( c, old )
p = doc.createElement( "parent" )
p.appendChild( old )
c.appendChild( p )
signature = doc.createElement( "Signature" );
signature.setAttribute( "xml:id", "Sig_ref" + str( id ) )
signature.setAttribute( "xmlns", "http://www.w3.org/2000/09/xmldsig#" )
Lookup( doc.documentElement, "signatures" ).appendChild( signature )
signedinfo = doc.createElement( "SignedInfo" )
signature.appendChild( signedinfo )
canonmeth = doc.createElement( "CanonicalizationMethod" );
canonmeth.setAttribute( "Algorithm",
"http://www.w3.org/TR/2001/REC-xml-c14n-20010315" )
signedinfo.appendChild( canonmeth )
sigmeth = doc.createElement( "SignatureMethod" );
sigmeth.setAttribute( "Algorithm",
"http://www.w3.org/2000/09/xmldsig#rsa-sha1" )
signedinfo.appendChild( sigmeth )
reference = doc.createElement( "Reference" );
reference.setAttribute( "URI", "#ref" + str( id ) )
signedinfo.appendChild( reference )
transforms = doc.createElement( "Transforms" )
reference.appendChild( transforms )
transform = doc.createElement( "Transform" );
transform.setAttribute( "Algorithm",
"http://www.w3.org/2000/09/xmldsig#" +
"enveloped-signature" )
transforms.appendChild( transform )
digestmeth = doc.createElement( "DigestMethod" );
digestmeth.setAttribute( "Algorithm",
"http://www.w3.org/2000/09/xmldsig#sha1" )
reference.appendChild( digestmeth )
digestvalue = doc.createElement( "DigestValue" );
reference.appendChild( digestvalue )
signaturevalue = doc.createElement( "SignatureValue" )
signature.appendChild( signaturevalue )
keyinfo = doc.createElement( "KeyInfo" )
signature.appendChild( keyinfo )
x509data = doc.createElement( "X509Data" )
keyinfo.appendChild( x509data )
x509subname = doc.createElement( "X509SubjectName" )
x509data.appendChild( x509subname )
x509serial = doc.createElement( "X509IssuerSerial" )
x509data.appendChild( x509serial )
x509cert = doc.createElement( "X509Certificate" )
x509data.appendChild( x509cert )
keyvalue = doc.createElement( "KeyValue" )
keyinfo.appendChild( keyvalue )
# Grrr... it would be much nicer to open a pipe to xmlsec1, but it can't
# read from standard input, so we have to use a temporary file.
tmpfile = tempfile.NamedTemporaryFile()
doc.writexml( tmpfile )
tmpfile.flush()
ret = os.spawnlp( os.P_WAIT, XMLSEC1, XMLSEC1, "--sign", "--node-id",
"Sig_ref" + str( id ), "--privkey-pem",
CERTIFICATE + "," + CERTIFICATE, tmpfile.name )
tmpfile.close()
sys.exit( ret )
| Python | 0 | |
95e09bf90e898092eaf841eac0330287f5627533 | Add files via upload | demo_video_args.py | demo_video_args.py | #!/usr/bin/env python
#./tools/demo_webcam.py --net output/faster_rcnn_end2end/train/ZF_faster_rcnn_iter_10000.caffemodel --prototxt models/FACEACT/ZF/faster_rcnn_end2end/test.prototxt
# --------------------------------------------------------
# Faster R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Demo script showing detections in sample images.
See README.md for installation instructions before running.
"""
"""
Vivek
This scripts has been tested with OpenCV 2.9. It will throw an error with OpenCV 3
"""
import _init_paths
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect
from fast_rcnn.nms_wrapper import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import caffe, os, sys, cv2
import argparse
# Vivek : DEFINE YOUR CLASS HERE
# n02974003 : Tyre; n04037443 : Car
CLASSES = ('__background__',
'n02974003','n04037443')
NETS = {'vgg16': ('VGG16',
'VGG16_faster_rcnn_final.caffemodel'),
'zf': ('ZF',
'ZF_faster_rcnn_final.caffemodel')}
def vis_detections(im, class_name, dets, thresh=0.5):
"""Draw detected bounding boxes."""
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
im = im[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=3.5)
)
ax.text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
ax.set_title(('{} detections with '
'p({} | box) >= {:.1f}').format(class_name, class_name,
thresh),
fontsize=14)
plt.axis('off')
plt.tight_layout()
plt.draw()
def demo(net, image_name):
"""Detect object classes in an image using pre-computed object proposals."""
# Load the demo image
im_file = image_name
# Vivek. Change the name of the video
# print os.getcwd()
cap = cv2.VideoCapture('TireWorks.mp4')
count = 1
while(cap.isOpened()):
ret, frame = cap.read()
if count == 1:
(h, w) = frame.shape[:2]
zeros = np.zeros((h, w), dtype="uint8")
count = 0
#gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if ret:
timer = Timer()
timer.tic()
scores, boxes = im_detect(net, frame)
timer.toc()
print ('Detection took {:.3f}s for '
'{:d} object proposals').format(timer.total_time, boxes.shape[0])
CONF_THRESH = 0.8
NMS_THRESH = 0.3
for cls_ind, cls in enumerate(CLASSES[1:]):
cls_ind += 1 # because we skipped background
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
inds = np.where(dets[:, -1] >= CONF_THRESH)[0]
for i in inds:
bbox = dets[i, :4]
cv2.putText(frame, cls, (bbox[0], int(bbox[3] + 25)), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255), 2, cv2.CV_AA)
cv2.rectangle(frame, (bbox[0], bbox[1]), (bbox[2], bbox[3]), (255,255,0), 3)
(B, G, R) = cv2.split(frame)
R = cv2.merge([zeros, zeros, R])
G = cv2.merge([zeros, G, zeros])
B = cv2.merge([B, zeros, zeros])
output = np.zeros((h, w, 3), dtype="uint8")
output = frame
cv2.imshow('Deep Learning Demonstration', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
#vw.release()
#del vw
cv2.destroyAllWindows()
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Faster R-CNN demo')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--cpu', dest='cpu_mode',
help='Use CPU mode (overrides --gpu)',
action='store_true')
parser.add_argument('--net', dest='demo_net', help='caffe model', default='vgg16')
parser.add_argument('--prototxt', dest='prototxt', help='Prototxt', default='models/FACEACT/ZF/faster_rcnn_end2end/test.prototxt')
parser.add_argument('--videoname', dest='videoname', help='Video Name', default='F1IndianGrandPrix.mp4')
args = parser.parse_args()
return args
if __name__ == '__main__':
cfg.TEST.HAS_RPN = True # Use RPN for proposals
args = parse_args()
if args.cpu_mode:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
cfg.GPU_ID = args.gpu_id
net = caffe.Net(args.prototxt, args.demo_net, caffe.TEST)
print '\n\nLoaded network {:s}'.format(args.demo_net)
demo(net, args.videoname)
| Python | 0.999913 | |
2022245c9172037bf71be29d0da53f1c47f6aac9 | guess user number: python3 | guess_user_number/python3/guess_user_number.py | guess_user_number/python3/guess_user_number.py | #!/usr/bin/python
min=1
max=100
print("Think of a number from "+str(min)+" to "+str(max)+". Then, tell the computer if its guess is correct (Y), lower than the number (L), or higher than the number (H).")
found=False
while not found:
mid = int((max-min)/2+min)
response = input(str(mid)+"? >>> ")
response = response.upper()
if response == "Y":
print("found it!!")
found=True
elif response == "L":
min=mid
elif response == "H":
max=mid
| Python | 0.999999 | |
7420f49f8e1508fa2017c629d8d11a16a9e28c4a | add abstract biobox class | biobox_cli/biobox.py | biobox_cli/biobox.py | from abc import ABCMeta, abstractmethod
import biobox_cli.container as ctn
import biobox_cli.util.misc as util
import tempfile as tmp
class Biobox:
__metaclass__ = ABCMeta
@abstractmethod
def prepare_volumes(opts):
pass
@abstractmethod
def get_doc(self):
pass
@abstractmethod
def after_run(self, host_dst_dir):
pass
def run(self, argv):
opts = util.parse_docopt(self.get_doc(), argv, False)
task = opts['--task']
image = opts['<image>']
output = opts['--output']
host_dst_dir = tmp.mkdtemp()
volumes = self.prepare_volumes(opts, host_dst_dir)
ctn.exit_if_no_image_available(image)
ctnr = ctn.create(image, task, volumes)
ctn.run(ctnr)
self.after_run(output, host_dst_dir)
return ctnr
def remove(self, container):
"""
Removes a container
Note this method is not tested due to limitations of circle ci
"""
ctn.remove(container) | Python | 0.000001 | |
4d1b006e5ba559715d55a88528cdfc0bed755182 | add import script for Weymouth | polling_stations/apps/data_collection/management/commands/import_weymouth.py | polling_stations/apps/data_collection/management/commands/import_weymouth.py | from data_collection.management.commands import BaseXpressDCCsvInconsistentPostcodesImporter
class Command(BaseXpressDCCsvInconsistentPostcodesImporter):
council_id = 'E07000053'
addresses_name = 'parl.2017-06-08/Version 1/Democracy_Club__08June2017WPBC.TSV'
stations_name = 'parl.2017-06-08/Version 1/Democracy_Club__08June2017WPBC.TSV'
elections = ['parl.2017-06-08']
csv_delimiter = '\t'
| Python | 0 | |
2ce80e667de438fca20de7b4ab6847751b683e33 | Add digikey command. | src/commands/digikey.py | src/commands/digikey.py | #
# Copyright (c) 2013 Joshua Hughes <kivhift@gmail.com>
#
import urllib
import webbrowser
import qmk
class DigikeyCommand(qmk.Command):
"""Look up a part on Digi-Key.
A new tab will be opened in the default web browser that contains the
search results.
"""
def __init__(self):
self._name = 'digikey'
self._help = self.__doc__
self.__baseURL = 'http://www.digikey.com/product-search/en?KeyWords={}'
@qmk.Command.actionRequiresArgument
def action(self, arg):
webbrowser.open_new_tab(self.__baseURL.format(urllib.quote_plus(
' '.join(arg.split()).encode('utf-8'))))
def commands(): return [ DigikeyCommand() ]
| Python | 0.000001 | |
8b4bbd23bf37fb946b664f5932e4903f802c6e0d | Add first pass at integration style tests | flake8/tests/test_integration.py | flake8/tests/test_integration.py | from __future__ import with_statement
import os
import unittest
try:
from unittest import mock
except ImportError:
import mock # < PY33
from flake8 import engine
class IntegrationTestCase(unittest.TestCase):
"""Integration style tests to exercise different command line options."""
def this_file(self):
"""Return the real path of this file."""
this_file = os.path.realpath(__file__)
if this_file.endswith("pyc"):
this_file = this_file[:-1]
return this_file
def check_files(self, arglist=[], explicit_stdin=False, count=0):
"""Call check_files."""
if explicit_stdin:
target_file = "-"
else:
target_file = self.this_file()
argv = ['flake8'] + arglist + [target_file]
with mock.patch("sys.argv", argv):
style_guide = engine.get_style_guide(parse_argv=True)
report = style_guide.check_files()
self.assertEqual(report.total_errors, count)
return style_guide, report
def test_no_args(self):
# assert there are no reported errors
self.check_files()
def _job_tester(self, jobs):
# mock stdout.flush so we can count the number of jobs created
with mock.patch('sys.stdout.flush') as mocked:
guide, report = self.check_files(arglist=['--jobs=%s' % jobs])
self.assertEqual(guide.options.jobs, jobs)
self.assertEqual(mocked.call_count, jobs)
def test_jobs(self):
self._job_tester(2)
self._job_tester(10)
def test_stdin(self):
self.count = 0
def fake_stdin():
self.count += 1
with open(self.this_file(), "r") as f:
return f.read()
with mock.patch("pep8.stdin_get_value", fake_stdin):
guide, report = self.check_files(arglist=['--jobs=4'],
explicit_stdin=True)
self.assertEqual(self.count, 1)
def test_stdin_fail(self):
def fake_stdin():
return "notathing\n"
with mock.patch("pep8.stdin_get_value", fake_stdin):
# only assert needed is in check_files
guide, report = self.check_files(arglist=['--jobs=4'],
explicit_stdin=True,
count=1)
| Python | 0 | |
0d2adfcce21dd2efb5d781babec3e6b03464b6d5 | Add basic tests | tests/app/main/test_request_header.py | tests/app/main/test_request_header.py | from tests.conftest import set_config_values
def test_route_correct_secret_key(app_, client):
with set_config_values(app_, {
'ROUTE_SECRET_KEY_1': 'key_1',
'ROUTE_SECRET_KEY_2': '',
'DEBUG': False,
}):
response = client.get(
path='/_status',
headers=[
('X-Custom-forwarder', 'key_1'),
]
)
assert response.status_code == 200
def test_route_incorrect_secret_key(app_, client):
with set_config_values(app_, {
'ROUTE_SECRET_KEY_1': 'key_1',
'ROUTE_SECRET_KEY_2': '',
'DEBUG': False,
}):
response = client.get(
path='/_status',
headers=[
('X-Custom-forwarder', 'wrong_key'),
]
)
assert response.status_code == 403
| Python | 0.000004 | |
77af87198d1116b77df431d9139b30f76103dd64 | Add migration for latitute and longitude of event | fellowms/migrations/0023_auto_20160617_1350.py | fellowms/migrations/0023_auto_20160617_1350.py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-06-17 13:50
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fellowms', '0022_event_report_url'),
]
operations = [
migrations.AddField(
model_name='event',
name='lat',
field=models.FloatField(blank=True, null=True),
),
migrations.AddField(
model_name='event',
name='lon',
field=models.FloatField(blank=True, null=True),
),
]
| Python | 0.000001 | |
fb07837db870a5fdea3a98aa1381793b1b20d2c0 | Create main.py | main.py | main.py | import webapp2
import jinja2
import os
import urllib
from google.appengine.api import users
from google.appengine.ext import ndb
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
def user_key(id):
return ndb.Key('GroceryList',id)
class GroceryItem(ndb.Model):
name = ndb.StringProperty()
cost = ndb.FloatProperty()
quantity = ndb.IntegerProperty()
total = ndb.FloatProperty()
picture = ndb.BlobProperty()
time = ndb.DateTimeProperty(auto_now_add=True)
class MainHandler(webapp2.RequestHandler):
def get(self):
user = users.get_current_user()
items_query = GroceryItem.query(
ancestor=user_key(users.get_current_user().user_id())).order(-GroceryItem.time)
items = items_query.fetch(10)
if user:
url = users.create_logout_url(self.request.uri)
url_linktext = 'Logout'
else:
url = users.create_login_url(self.request.uri)
url_linktext = 'Login'
template_values = {
'user':users.get_current_user(),
'items':items,
'url':url,
'url_linktext':url_linktext,
}
template = JINJA_ENVIRONMENT.get_template('index.html')
self.response.write(template.render(template_values))
class GroceryList(webapp2.RequestHandler):
def post(self):
user = users.get_current_user();
item = GroceryItem(parent=user_key(user.user_id()))
item.name = self.request.get('name')
item.cost = self.request.get('cost')
item.quantity = self.request.get('quantity')
item.picture = self.request.get('img')
item.total = item.cost * item.quantity
item.put()
query_params = {'user': user_key(user.user_id())}
self.redirect('/?' + urllib.urlencode(query_params))
app = webapp2.WSGIApplication([
('/', MainHandler)
('/add', GroceryList)
], debug=True)
| Python | 0.000001 | |
b920f5aeecf7843fcc699db4a70a9a0f124fa198 | Add unit test for protonate.py | tests/test_protonate.py | tests/test_protonate.py | import propka.atom
import propka.protonate
def test_protonate_atom():
atom = propka.atom.Atom(
"HETATM 4479 V VO4 A1578 -19.097 16.967 0.500 1.00 17.21 V "
)
assert not atom.is_protonated
p = propka.protonate.Protonate()
p.protonate_atom(atom)
assert atom.is_protonated
assert atom.number_of_protons_to_add == 6
| Python | 0 | |
f502b9bb7c0cddda05cd85cf60f88f0a801b43d1 | Add docstrings | flocker/common/script.py | flocker/common/script.py | # Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""Helpers for flocker shell commands."""
import sys
from twisted.internet.task import react
from twisted.python import usage
from zope.interface import Interface
from .. import __version__
def flocker_standard_options(cls):
"""
Add various standard command line options to flocker commands and
subcommands.
:param type cls: The `class` to decorate.
:return: The decorated `class`.
"""
original_init = cls.__init__
def __init__(self, *args, **kwargs):
"""
Set the default verbosity to `0` and then call the original
``cls.__init__``.
:param sys_module: An optional ``sys`` like fake module for use in
testing. Defaults to ``sys``.
"""
self._sys_module = kwargs.pop('sys_module', sys)
self['verbosity'] = 0
original_init(self, *args, **kwargs)
cls.__init__ = __init__
def opt_version(self):
"""
Print the program's version and exit.
"""
self._sys_module.stdout.write(__version__.encode('utf-8') + b'\n')
raise SystemExit(0)
cls.opt_version = opt_version
def opt_verbose(self):
"""
Increase the verbosity.
"""
self['verbosity'] += 1
cls.opt_verbose = opt_verbose
cls.opt_v = opt_verbose
return cls
class ICommandLineScript(Interface):
"""
A script which can be run by ``FlockerScriptRunner``.
"""
def main(reactor, options):
"""
:param twisted.internet.reactor reactor: A Twisted reactor.
:param dict options: A dictionary of configuration options.
:return: A ``Deferred`` which fires when the script has completed.
"""
class FlockerScriptRunner(object):
"""
An API for running standard flocker scripts.
:ivar ICommandLineScript script: See ``script`` of ``__init__``.
"""
def __init__(self, script, options, sys_module=None):
"""
:param ICommandLineScript script: A script object with a ``main``
method.
:param usage.Options options: An option parser object.
:param sys_module: An optional ``sys`` like fake module for use in
testing. Defaults to ``sys``.
"""
self.script = script
self.options = options
if sys_module is None:
sys_module = sys
self.sys_module = sys_module
def _parseOptions(self, arguments):
"""
Parse the options defined in the script's options class.
``UsageError``s are caught and printed to `stderr` and the script then
exits.
:param list arguments: The command line arguments to be parsed.
:return: A ``dict`` of configuration options.
"""
try:
self.options.parseOptions(arguments)
except usage.UsageError as e:
self.sys_module.stderr.write(unicode(self.options).encode('utf-8'))
self.sys_module.stderr.write(
b'ERROR: ' + e.message.encode('utf-8') + b'\n')
raise SystemExit(1)
return self.options
def main(self):
"""
Parse arguments and run the script's main function via ``react``.
"""
options = self._parseOptions(self.sys_module.argv[1:])
react(self.script.main, (options,))
__all__ = [
'flocker_standard_options',
'ICommandLineScript',
'FlockerScriptRunner',
]
| # Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""Helpers for flocker shell commands."""
import sys
from twisted.internet.task import react
from twisted.python import usage
from zope.interface import Interface
from .. import __version__
def flocker_standard_options(cls):
"""
Add various standard command line options to flocker commands and
subcommands.
"""
original_init = cls.__init__
def __init__(self, *args, **kwargs):
"""
"""
self._sys_module = kwargs.pop('sys_module', sys)
self['verbosity'] = 0
original_init(self, *args, **kwargs)
cls.__init__ = __init__
def opt_version(self):
"""
Print the program's version and exit.
"""
self._sys_module.stdout.write(__version__.encode('utf-8') + b'\n')
raise SystemExit(0)
cls.opt_version = opt_version
def opt_verbose(self):
"""
Increase the verbosity.
"""
self['verbosity'] += 1
cls.opt_verbose = opt_verbose
cls.opt_v = opt_verbose
return cls
class ICommandLineScript(Interface):
"""
A script which can be run by ``FlockerScriptRunner``.
"""
def main(reactor, options):
"""
:param twisted.internet.reactor reactor: A Twisted reactor.
:param dict options: A dictionary of configuration options.
:return: A ``Deferred`` which fires when the script has completed.
"""
class FlockerScriptRunner(object):
"""
An API for running standard flocker scripts.
:ivar ICommandLineScript script: See ``script`` of ``__init__``.
"""
def __init__(self, script, options, sys_module=None):
"""
"""
self.script = script
self.options = options
if sys_module is None:
sys_module = sys
self.sys_module = sys_module
def _parseOptions(self, arguments):
"""
Parse the options defined in the script's options class.
L{UsageErrors} are caught and printed to I{stderr} and the script then
exits.
@param arguments: The command line arguments to be parsed.
@rtype: L{Options}
"""
try:
self.options.parseOptions(arguments)
except usage.UsageError as e:
self.sys_module.stderr.write(unicode(self.options).encode('utf-8'))
self.sys_module.stderr.write(
b'ERROR: ' + e.message.encode('utf-8') + b'\n')
raise SystemExit(1)
return self.options
def main(self):
"""
Parse arguments and run the script's main function via L{react}.
"""
options = self._parseOptions(self.sys_module.argv[1:])
return react(self.script.main, (options,))
__all__ = [
'flocker_standard_options',
'ICommandLineScript',
'FlockerScriptRunner',
]
| Python | 0.000005 |
addba07842f95e9b5bac3a97ddb4f81035bb2fc8 | Don't add args that return None names | ouimeaux/device/api/service.py | ouimeaux/device/api/service.py | import logging
from xml.etree import cElementTree as et
import requests
from .xsd import service as serviceParser
log = logging.getLogger(__name__)
REQUEST_TEMPLATE = """
<?xml version="1.0" encoding="utf-8"?>
<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
<s:Body>
<u:{action} xmlns:u="{service}">
{args}
</u:{action}>
</s:Body>
</s:Envelope>
"""
class Action(object):
def __init__(self, service, action_config):
self._action_config = action_config
self.name = action_config.get_name()
self.serviceType = service.serviceType
self.controlURL = service.controlURL
self.args = {}
self.headers = {
'Content-Type': 'text/xml',
'SOAPACTION': '"%s#%s"' % (self.serviceType, self.name)
}
arglist = action_config.get_argumentList()
if arglist is not None:
for arg in arglist.get_argument():
name = arg.get_name()
if name:
# TODO: Get type instead of setting 0
self.args[arg.get_name()] = 0
def __call__(self, **kwargs):
arglist = '\n'.join('<{0}>{1}</{0}>'.format(arg, value)
for arg, value in kwargs.iteritems())
body = REQUEST_TEMPLATE.format(
action=self.name,
service=self.serviceType,
args=arglist
)
response = requests.post(self.controlURL, body.strip(), headers=self.headers)
d = {}
for r in et.fromstring(response.content).getchildren()[0].getchildren()[0].getchildren():
d[r.tag] = r.text
return d
def __repr__(self):
return "<Action %s(%s)>" % (self.name, ", ".join(self.args))
class Service(object):
"""
Represents an instance of a service on a device.
"""
def __init__(self, service, base_url):
self._base_url = base_url.rstrip('/')
self._config = service
url = '%s/%s' % (base_url, service.get_SCPDURL().strip('/'))
xml = requests.get(url)
self.actions = {}
self._svc_config = serviceParser.parseString(xml.content).actionList
for action in self._svc_config.get_action():
act = Action(self, action)
name = action.get_name()
self.actions[name] = act
setattr(self, name, act)
@property
def hostname(self):
return self._base_url.split('/')[-1]
@property
def controlURL(self):
return '%s/%s' % (self._base_url,
self._config.get_controlURL().strip('/'))
@property
def serviceType(self):
return self._config.get_serviceType()
| import logging
from xml.etree import cElementTree as et
import requests
from .xsd import service as serviceParser
log = logging.getLogger(__name__)
REQUEST_TEMPLATE = """
<?xml version="1.0" encoding="utf-8"?>
<s:Envelope xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/">
<s:Body>
<u:{action} xmlns:u="{service}">
{args}
</u:{action}>
</s:Body>
</s:Envelope>
"""
class Action(object):
def __init__(self, service, action_config):
self._action_config = action_config
self.name = action_config.get_name()
self.serviceType = service.serviceType
self.controlURL = service.controlURL
self.args = {}
self.headers = {
'Content-Type': 'text/xml',
'SOAPACTION': '"%s#%s"' % (self.serviceType, self.name)
}
arglist = action_config.get_argumentList()
if arglist is not None:
for arg in arglist.get_argument():
# TODO: Get type instead of setting 0
self.args[arg.get_name()] = 0
def __call__(self, **kwargs):
arglist = '\n'.join('<{0}>{1}</{0}>'.format(arg, value)
for arg, value in kwargs.iteritems())
body = REQUEST_TEMPLATE.format(
action=self.name,
service=self.serviceType,
args=arglist
)
response = requests.post(self.controlURL, body.strip(), headers=self.headers)
d = {}
for r in et.fromstring(response.content).getchildren()[0].getchildren()[0].getchildren():
d[r.tag] = r.text
return d
def __repr__(self):
return "<Action %s(%s)>" % (self.name, ", ".join(self.args))
class Service(object):
"""
Represents an instance of a service on a device.
"""
def __init__(self, service, base_url):
self._base_url = base_url.rstrip('/')
self._config = service
url = '%s/%s' % (base_url, service.get_SCPDURL().strip('/'))
xml = requests.get(url)
self.actions = {}
self._svc_config = serviceParser.parseString(xml.content).actionList
for action in self._svc_config.get_action():
act = Action(self, action)
name = action.get_name()
self.actions[name] = act
setattr(self, name, act)
@property
def hostname(self):
return self._base_url.split('/')[-1]
@property
def controlURL(self):
return '%s/%s' % (self._base_url,
self._config.get_controlURL().strip('/'))
@property
def serviceType(self):
return self._config.get_serviceType()
| Python | 0.999303 |
2bf763e39e91ef989c121bba420e4ae09ea0a569 | Add Diagonal Difference HackerRank Problem | algorithms/diagonal_difference/kevin.py | algorithms/diagonal_difference/kevin.py | #!/usr/bin/env python
def get_matrix_row_from_input():
return [int(index) for index in input().strip().split(' ')]
n = int(input().strip())
primary_diag_sum = 0
secondary_diag_sum = 0
for row_count in range(n):
row = get_matrix_row_from_input()
primary_diag_sum += row[row_count]
secondary_diag_sum += row[-1 - row_count]
print(abs(primary_diag_sum - secondary_diag_sum))
| Python | 0.000006 | |
9098904ffcd47c4327594f8fc6ce8ce8694e5422 | Create getsubinterfaces.py | python/getsubinterfaces.py | python/getsubinterfaces.py | #Device subinterface data retrieval script. Copyright Ingmar Van Glabbeek ingmar@infoblox.com
#Licensed under Apache-2.0
#This script will pull all devices of a given device group and then list the devices management ip as well as the available management ips.
#By default it saves the output to "deviceinterfacedump.json"
#Tested on NetMRI 7.3.1 and 7.3.2
#Modules required:
import getpass
import requests
import json
import urllib3
from requests.auth import HTTPBasicAuth
from http.client import responses
import time
#You can hardcode credentials here, it's not safe. Don't do it.
#hostname = "netmri.infoblox.com"
#username = "admin"
#password = "infoblox"
#urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def main():
cookie_host = wapi_connect()
#print(cookie_host)
devicelist = getdevices(cookie_host)
filtered_data = devicedata(devicelist)
#uncomment next line if you want to write to console
#print(json.dumps(filtered_data,indent=4, sort_keys=True))
filename = open("deviceinterfacedump.json","w")
filename.write(json.dumps(filtered_data,indent=4))
filename.close()
print("Data retrieved successfully")
def devicedata(devicelist):
listload = json.loads(devicelist)
data = []
for e in listload['rows']:
if not e["if_addrs"]:
device = {"DeviceID":e["DeviceID"],"DeviceName":e["DeviceName"],"DeviceType":e["DeviceType"],"DeviceIPDotted":e["DeviceIPDotted"],"Other InterfaceIP":["none"]}
data.append(device)
else:
device = {"DeviceID": e['DeviceID'], "DeviceName": e["DeviceName"], "DeviceType": e["DeviceType"],
"DeviceIPDotted": e["DeviceIPDotted"], "Other InterfaceIP":[]}
for f in e["if_addrs"]:
i=1
interface = {"InterfaceIP":f["ifIPDotted"], "Interfacename":f["ifName"]}
device["Other InterfaceIP"].insert(i,interface)
data.append(device)
i=i+1
dataftw=json.dumps(data)
returndata=json.loads(dataftw)
return returndata
def getdevices(cookie_host):
if not cookie_host:
print("No connection established.")
return 0
#get current time
ts = time.time()
hostname=cookie_host[1]
#limits number of results
limit = input("Limit to this number of devices: ")
get_url = "https://" + hostname + "/api/3.3/device_groups/index"
response = requests.get(get_url, cookies=cookie_host[0], verify=False)
d=response.text
dl=json.loads(d)
print("List of DeviceGroups")
for e in dl["device_groups"]:
dglist={"GroupName":e["GroupName"],"GroupID":e["GroupID"]}
print(dglist)
devicegroup = input("Based on the output specify the devicegroup ID by its ID: ")
get_url = "https://" + hostname + "/api/3.3/discovery_statuses/static/current.extjs"
querystring = {"_dc": ts, "filename": "recent_activity.csv", "filter": "null", "limit": limit,
"GroupID": devicegroup}
response = requests.get(get_url, cookies=cookie_host[0], verify=False, params=querystring)
t=response.text
print("We are fetching a list of " + str(limit) +
" devices for devicegroup " + str(devicegroup) + ".")
return(t)
def wapi_connect():
hostname = input("Enter the NetMRI hostname or IP: ")
username = input("Enter your NetMRI username: ")
password = getpass.getpass("Enter your Password: ")
https_val = input("Disable SSL validations?(y/n) ")
if https_val in ("y", "Y"):
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
print("SSL validation disabled")
if https_val in ("n", "N"):
print("SSL validation enabled")
login_url = "https://" + hostname + "/api/3.3/device_groups/index"
print("logging in to " + hostname)
try:
login_result = requests.get(
login_url,
auth=HTTPBasicAuth(username, password),
timeout=5,
verify=False)
except requests.exceptions.ConnectTimeout as e:
print("Connection time out after 5 seconds.")
exit(1)
except requests.exceptions.ConnectionError as e:
print("No route to host " + hostname)
exit(1)
if has_error(login_result):
exit(1)
else:
print("Login OK")
return(login_result.cookies,hostname)
def has_error(_result):
if _result.status_code == 200:
return 0
elif _result.status_code == 201:
return 0
try:
err_text = _result.json()['text']
except KeyError as e:
err_text = "Response contains no error text"
except json.decoder.JSONDecodeError as e:
err_text = "No JSON Response"
# print out the HTTP response code, description, and error text
http_code = _result.status_code
http_desc = responses[http_code]
print("HTTP Code [%3d] %s. %s" % (http_code, http_desc, err_text))
return 1
if __name__ == "__main__":
main()
| Python | 0.000001 | |
9b0b3c474e250193730308b555034d458697e01b | Fix dispatching of WeMo switch devices. | homeassistant/components/wemo.py | homeassistant/components/wemo.py | """
homeassistant.components.wemo
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
WeMo device discovery.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/wemo/
"""
import logging
from homeassistant.components import discovery
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
REQUIREMENTS = ['pywemo==0.3.12']
DOMAIN = 'wemo'
DISCOVER_LIGHTS = 'wemo.light'
DISCOVER_MOTION = 'wemo.motion'
DISCOVER_SWITCHES = 'wemo.switch'
# mapping from Wemo model_name to service
WEMO_MODEL_DISPATCH = {
'Bridge': DISCOVER_LIGHTS,
'Insight': DISCOVER_SWITCHES,
'Maker': DISCOVER_SWITCHES,
'Motion': DISCOVER_MOTION,
'Socket': DISCOVER_SWITCHES,
}
WEMO_SERVICE_DISPATCH = {
DISCOVER_LIGHTS: 'light',
DISCOVER_MOTION: 'binary_sensor',
DISCOVER_SWITCHES: 'switch',
}
SUBSCRIPTION_REGISTRY = None
KNOWN_DEVICES = []
_LOGGER = logging.getLogger(__name__)
# pylint: disable=unused-argument, too-many-function-args
def setup(hass, config):
"""Common set up for WeMo devices."""
import pywemo
global SUBSCRIPTION_REGISTRY
SUBSCRIPTION_REGISTRY = pywemo.SubscriptionRegistry()
SUBSCRIPTION_REGISTRY.start()
def stop_wemo(event):
"""Shutdown Wemo subscriptions and subscription thread on exit."""
_LOGGER.info("Shutting down subscriptions.")
SUBSCRIPTION_REGISTRY.stop()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_wemo)
def discovery_dispatch(service, discovery_info):
"""Dispatcher for WeMo discovery events."""
# name, model, location, mac
_, model_name, _, mac = discovery_info
# Only register a device once
if mac in KNOWN_DEVICES:
return
KNOWN_DEVICES.append(mac)
service = WEMO_MODEL_DISPATCH.get(model_name)
component = WEMO_SERVICE_DISPATCH.get(service)
if service is not None:
discovery.discover(hass, service, discovery_info,
component, config)
discovery.listen(hass, discovery.SERVICE_WEMO, discovery_dispatch)
_LOGGER.info("Scanning for WeMo devices.")
devices = [(device.host, device) for device in pywemo.discover_devices()]
# Add static devices from the config file
devices.extend((address, None) for address in config.get('static', []))
for address, device in devices:
port = pywemo.ouimeaux_device.probe_wemo(address)
if not port:
_LOGGER.warning('Unable to probe wemo at %s', address)
continue
_LOGGER.info('Adding wemo at %s:%i', address, port)
url = 'http://%s:%i/setup.xml' % (address, port)
if device is None:
device = pywemo.discovery.device_from_description(url, None)
discovery_info = (device.name, device.model_name, url, device.mac)
discovery.discover(hass, discovery.SERVICE_WEMO, discovery_info)
return True
| """
homeassistant.components.wemo
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
WeMo device discovery.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/wemo/
"""
import logging
from homeassistant.components import discovery
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
REQUIREMENTS = ['pywemo==0.3.12']
DOMAIN = 'wemo'
DISCOVER_LIGHTS = 'wemo.light'
DISCOVER_MOTION = 'wemo.motion'
DISCOVER_SWITCHES = 'wemo.switch'
# mapping from Wemo model_name to service
WEMO_MODEL_DISPATCH = {
'Bridge': DISCOVER_LIGHTS,
'Insight': DISCOVER_SWITCHES,
'Maker': DISCOVER_SWITCHES,
'Motion': DISCOVER_MOTION,
'Switch': DISCOVER_SWITCHES,
}
WEMO_SERVICE_DISPATCH = {
DISCOVER_LIGHTS: 'light',
DISCOVER_MOTION: 'binary_sensor',
DISCOVER_SWITCHES: 'switch',
}
SUBSCRIPTION_REGISTRY = None
KNOWN_DEVICES = []
_LOGGER = logging.getLogger(__name__)
# pylint: disable=unused-argument, too-many-function-args
def setup(hass, config):
"""Common set up for WeMo devices."""
import pywemo
global SUBSCRIPTION_REGISTRY
SUBSCRIPTION_REGISTRY = pywemo.SubscriptionRegistry()
SUBSCRIPTION_REGISTRY.start()
def stop_wemo(event):
"""Shutdown Wemo subscriptions and subscription thread on exit."""
_LOGGER.info("Shutting down subscriptions.")
SUBSCRIPTION_REGISTRY.stop()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_wemo)
def discovery_dispatch(service, discovery_info):
"""Dispatcher for WeMo discovery events."""
# name, model, location, mac
_, model_name, _, mac = discovery_info
# Only register a device once
if mac in KNOWN_DEVICES:
return
KNOWN_DEVICES.append(mac)
service = WEMO_MODEL_DISPATCH.get(model_name)
component = WEMO_SERVICE_DISPATCH.get(service)
if service is not None:
discovery.discover(hass, service, discovery_info,
component, config)
discovery.listen(hass, discovery.SERVICE_WEMO, discovery_dispatch)
_LOGGER.info("Scanning for WeMo devices.")
devices = [(device.host, device) for device in pywemo.discover_devices()]
# Add static devices from the config file
devices.extend((address, None) for address in config.get('static', []))
for address, device in devices:
port = pywemo.ouimeaux_device.probe_wemo(address)
if not port:
_LOGGER.warning('Unable to probe wemo at %s', address)
continue
_LOGGER.info('Adding wemo at %s:%i', address, port)
url = 'http://%s:%i/setup.xml' % (address, port)
if device is None:
device = pywemo.discovery.device_from_description(url, None)
discovery_info = (device.name, device.model_name, url, device.mac)
discovery.discover(hass, discovery.SERVICE_WEMO, discovery_info)
return True
| Python | 0 |
54a8a77c75660eeae314c410685243e2b5bc59ca | add sw infer wrapper | dltk/core/utils.py | dltk/core/utils.py | import numpy as np
from dltk.core.io.sliding_window import SlidingWindow
def sliding_window_segmentation_inference(session, ops_list, sample_dict, batch_size=1):
"""
Parameters
----------
session
ops_list
sample_dict
Returns
-------
"""
# TODO: asserts
pl_shape = list(sample_dict.keys()[0].get_shape().as_list())
pl_bshape = pl_shape[1:-1]
inp_shape = list(sample_dict.values()[0].shape)
inp_bshape = inp_shape[1:-1]
out_dummies = [np.zeros([inp_shape[0], ] + inp_bshape + [op.get_shape().as_list()[-1]]
if len(op.get_shape().as_list()) == len(inp_shape) else []) for op in ops_list]
out_dummy_counter = [np.zeros_like(o) for o in out_dummies]
op_shape = list(ops_list[0].get_shape().as_list())
op_bshape = op_shape[1:-1]
out_diff = np.array(pl_bshape) - np.array(op_bshape)
padding = [[0, 0]] + [[diff // 2, diff - diff // 2] for diff in out_diff] + [[0, 0]]
padded_dict = {k: np.pad(v, padding, mode='constant') for k,v in sample_dict.items()}
f_bshape = padded_dict.values()[0].shape[1:-1]
striding = list(np.array(op_bshape) // 2) if all(out_diff == 0) else op_bshape
sw = SlidingWindow(f_bshape, pl_bshape, striding=striding)
out_sw = SlidingWindow(inp_bshape, op_bshape, striding=striding)
if batch_size > 1:
slicers = []
out_slicers = []
done = False
while True:
try:
slicer = next(sw)
out_slicer = next(out_sw)
except StopIteration:
done = True
if batch_size == 1:
sw_dict = {k: v[slicer] for k,v in padded_dict.items()}
op_parts = session.run(ops_list, feed_dict=sw_dict)
for idx in range(len(op_parts)):
out_dummies[idx][out_slicer] += op_parts[idx]
out_dummy_counter[idx][out_slicer] += 1
else:
slicers.append(slicer)
out_slicers.append(out_slicer)
if len(slicers) == batch_size or done:
slices_dict = {k: np.concatenate([v[slicer] for slicer in slicers], 0) for k,v in padded_dict.items()}
all_op_parts = session.run(ops_list, feed_dict=slices_dict)
zipped_parts = zip(*[np.array_split(part, len(slicers)) for part in all_op_parts])
for out_slicer, op_parts in zip(out_slicers, zipped_parts):
for idx in range(len(op_parts)):
out_dummies[idx][out_slicer] += op_parts[idx]
out_dummy_counter[idx][out_slicer] += 1
slicers = []
out_slicers = []
if done:
break
return [o / c for o, c in zip(out_dummies, out_dummy_counter)]
| Python | 0.000012 | |
c262cc4cc18336257972105c1cd6c409da8ed5cd | Create mcmc.py | mcmc.py | mcmc.py | # MIT License
# Copyright (c) 2017 Rene Jean Corneille
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from numpy import zeros, reshape
from scipy.stats import rv_continuous
__all__ = ['Parameter','Parameters','MCMC']
class Parameter(object):
def __init__(self, name, distribution, size, current_value=None):
self.name = str(name)
self.distribution = distribution
self.size = size
self.current_value = current_value
@property
def current_value(self):
return self.__current_value
@current_value.setter
def current_value(self, current_value):
self.__current_value = current_value
def __str__(self):
return """
parameter name : %s
parameter distribution : %s
""" % (self.name, self.distribution.__str__())
def __len__(self):
return 1
def is_multivariate(self):
return self.size == (1,1)
class Parameters(object):
def __init__(self, list={}, hierarchy=[]):
self.list = list
self.hierarchy = hierarchy
@property
def parameters(self):
return self.__list
@parameters.setter
def parameters(self, list):
if not (list=={}):
self.__list = list
else:
self.__list = {}
@property
def hierarchy(self):
return self.__hierarchy
@hierarchy.setter
def hierarchy(self, hierarchy):
self.__hierarchy = hierarchy
def __len__(self):
return len(self.list)
def __str__(self):
descr = '(parameters: ----------------------- \n'
descr += ', \n'.join(['name: %s, distribution: %s, size: %s' % (str(l.name), l.distribution.__str__(), l.size) for l in self.list.values()])
descr += '\n ----------------------- )'
return descr
def append(self, parameter):
self.list[parameter.name] = parameter
self.hierarchy.append(parameter.name)
class Distribution(rv_continuous):
pass
class MCMC(object):
def __init__(self, data, strategy):
self.data = data
self.strategy = strategy
self.simulations = None
def summary(self):
smry = ""
return smry
def distribution_parameters(self, parameter_name, *args, **kwargs):
return self.strategy.distribution_parameters(parameter_name, *args, **kwargs) # returns a dictionary
def generate(self, parameter_name):
return self.strategy.generate(parameter_name)
def output(self, burn, parameter_name):
return self.strategy.output(self.simulations, burn, parameter_name)
def define_parameters(self):
return self.strategy.define_parameters()
def initial_value(self,parameter_name):
return self.strategy.initial_value(parameter_name)
def run(self, number_simulations=100):
self.simulations = {key : zeros((param.size[0],param.size[1],number_simulations)) for (key, param) in self.strategy.parameters.list.items()}
for name in self.strategy.parameters.hierarchy:
self.strategy.parameters.list[name].current_value = self.initial_value(name)
for i in range(number_simulations):
print("== step %i ==" % (int(i+1),))
restart_step = True
while restart_step:
for name in self.strategy.parameters.hierarchy:
print("== parameter %s ==" % name)
try:
self.strategy.parameters.list[name].current_value = self.generate(name)
self.simulations[name][:,:,i] = self.strategy.parameters.list[name].current_value.reshape(self.strategy.parameters.list[name].size)
restart_step = False
except:
print("== restart step %i ==" % i)
restart_step = True
break
class ConvergenceAnalysis(object):
| Python | 0.000031 | |
4b561d710e9ad72ad94ffb1ff3ae37db668899e4 | Add generate_examples script | seq2seq/scripts/generate_examples.py | seq2seq/scripts/generate_examples.py | #! /usr/bin/env python
"""
Generates a TFRecords file given sequence-aligned source and target files.
Example Usage:
python ./generate_examples.py --source_file <SOURCE_FILE> \
--target_file <TARGET_FILE> \
--output_file <OUTPUT_FILE>
"""
import tensorflow as tf
tf.flags.DEFINE_string('source_file', None,
'File containing content in source language.')
tf.flags.DEFINE_string(
'target_file', None,
'File containing content in target language, parallel line by line to the'
'source file.')
tf.flags.DEFINE_string('output_file', None,
'File to output tf.Example TFRecords.')
FLAGS = tf.flags.FLAGS
def build_example(pair_id, source, target):
"""Transforms pair of 'source' and 'target' strings into a tf.Example.
Assumes that 'source' and 'target' are already tokenized.
Args:
pair_id: id of this pair of source and target strings.
source: a pretokenized source string.
target: a pretokenized target string.
Returns:
a tf.Example corresponding to the 'source' and 'target' inputs.
"""
pair_id = str(pair_id)
source_tokens = source.strip().split(' ')
target_tokens = target.strip().split(' ')
ex = tf.train.Example()
ex.features.feature['pair_id'].bytes_list.value.append(pair_id.encode('utf-8'))
ex.features.feature['source_len'].int64_list.value.append(len(source_tokens))
ex.features.feature['target_len'].int64_list.value.append(len(target_tokens))
source_tokens = [t.encode('utf-8') for t in source_tokens]
target_tokens = [t.encode('utf-8') for t in target_tokens]
ex.features.feature['source_tokens'].bytes_list.value.extend(source_tokens)
ex.features.feature['target_tokens'].bytes_list.value.extend(target_tokens)
return ex
def write_tfrecords(examples, output_file):
"""Writes a list of tf.Examples to 'output_file'.
Args:
examples: An iterator of tf.Example records
outputfile: path to the output file
"""
writer = tf.python_io.TFRecordWriter(output_file)
print('Creating TFRecords file at {}...'.format(output_file))
for row in examples:
writer.write(row.SerializeToString())
writer.close()
print('Wrote to {}'.format(output_file))
def generate_examples(source_file, target_file):
"""Creates an iterator of tf.Example records given aligned source and target files.
Args:
source_file: path to file with newline-separated source strings
target_file: path to file with newline-separated target strings
Returns:
An iterator of tf.Example objects.
"""
with open(source_file) as source_records:
with open(target_file) as target_records:
for i, (source, target) in enumerate(zip(source_records, target_records)):
if i % 10000 == 0:
print('Processed {} records'.format(i))
yield build_example(i, source, target)
def main(unused_argv):
#pylint: disable=unused-argument
examples = generate_examples(
FLAGS.source_file, FLAGS.target_file)
write_tfrecords(examples, FLAGS.output_file)
if __name__ == '__main__':
tf.app.run()
| Python | 0.000002 | |
edb28fffe19e2b0de3113b43aeb075119c9e5830 | Work in progress. Creating new data migration. | emgapi/migrations/0019_auto_20200110_1455.py | emgapi/migrations/0019_auto_20200110_1455.py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.24 on 2020-01-10 14:55
from __future__ import unicode_literals
from django.db import migrations
def create_download_description(apps, schema_editor):
DownloadDescriptionLabel = apps.get_model("emgapi", "DownloadDescriptionLabel")
downloads = (
("Phylum level taxonomies UNITE (TSV)", "Phylum level taxonomies UNITE"),
("Phylum level taxonomies ITSoneDB (TSV)", "Phylum level taxonomies ITSoneDB"),
("Taxonomic assignments UNITE (TSV)", "Taxonomic assignments UNITE"),
("Taxonomic assignments ITSoneDB (TSV)", "Taxonomic assignments ITSoneDB"),
)
_downloads = list()
for d in downloads:
_downloads.append(
DownloadDescriptionLabel(
description=d[0],
description_label=d[1]
)
)
DownloadDescriptionLabel.objects.bulk_create(_downloads)
def create_group_types(apps, schema_editor):
DownloadGroupType = apps.get_model("emgapi", "DownloadGroupType")
group_types = (
"Taxonomic analysis ITS",
"Taxonomic analysis ITSoneDB",
"Taxonomic analysis UNITE",
"Pathways and Systems",
# TODO: Do we need sub groups for the function and pathways
)
_groups = list()
for group_type in group_types:
_groups.append(
DownloadGroupType(group_type=group_type)
)
DownloadGroupType.objects.bulk_create(_groups)
class Migration(migrations.Migration):
dependencies = [
('emgapi', '0018_auto_20191105_1052'),
]
operations = [
migrations.RunPython(create_download_description),
migrations.RunPython(create_group_types)
]
| Python | 0 | |
d41274ce2a54d37c35f23c8c78de196e57667b0a | add google translate plugin | plugins_examples/translate.py | plugins_examples/translate.py | #!/usr/bin/env python
import sys
import re
from googletrans import Translator
translator = Translator()
line = sys.stdin.readline()
while line:
match = re.search('^:([^\s]+) PRIVMSG (#[^\s]+) :(.+)', line)
if not match:
line = sys.stdin.readline()
continue
who = match.group(1)
chan = match.group(2)
what = match.group(3).strip().strip('\r\n')
def reply(text):
print("PRIVMSG %s :%s" % (chan, text))
sys.stdout.flush()
if what[:10] == ':translate':
m2 = re.search('^:translate (.*)', what)
if not m2:
line = sys.stdin.readline()
continue
try:
reply(translator.translate(m2.group(1), dest='fr').text)
except:
reply('Oups!')
elif what[:4] == ':tr ':
m2 = re.search('^:tr (\w+) (\w+) (.+)', what)
if not m2:
line = sys.stdin.readline()
continue
try:
reply(translator.translate(m2.group(3), src=m2.group(1), dest=m2.group(2)).text)
except:
reply('Oups!')
line = sys.stdin.readline()
| Python | 0 | |
b450734eea74f5f3536a44ed40c006c3da13656c | Add diff.py | diff.py | diff.py | # vim: set et ts=4 sw=4 fdm=marker
"""
MIT License
Copyright (c) 2016 Jesse Hogan
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
from diff_match_patch import diff_match_patch
from entities import entity
from pdb import set_trace; B=set_trace
# TODO Write test
class diff(entity):
def __init__(self, data1, data2):
self._data1 = data1
self._data2 = data2
self._ps = None
self._dmp = None
@property
def _diff_match_patch(self):
if not self._dmp:
self._dmp = diff_match_patch()
return self._dmp;
@property
def _patches(self):
if self._ps == None:
dmp = self._diff_match_patch
diffs = dmp.diff_main(self._data1, self._data2)
dmp.diff_cleanupSemantic(diffs)
self._ps = dmp.patch_make(diffs)
return self._ps
def apply(self, data):
return patch_apply(self._patches, data)[0]
def __str__(self):
dmp = self._diff_match_patch
return dmp.patch_toText(self._patches)
| Python | 0.000001 | |
176af82121da5282842fd7e77809da9780ac57a5 | implement server pool. | rsocks/pool.py | rsocks/pool.py | from __future__ import unicode_literals
import logging
import contextlib
from .eventlib import GreenPool
from .utils import debug
__all__ = ['ServerPool']
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG if debug() else logging.INFO)
logger.addHandler(logging.StreamHandler())
class ServerPool(object):
def __init__(self):
self.pool = GreenPool()
self.servers = {}
@contextlib.contextmanager
def new_server(self, name, server_class, *args, **kwargs):
server = server_class(*args, **kwargs)
yield server
self.servers[name] = server
def loop(self):
for name, server in self.servers.items():
logger.info('Prepared "%s"' % name)
self.pool.spawn(server.loop)
try:
self.pool.waitall()
except (SystemExit, KeyboardInterrupt):
logger.info('Exit')
| Python | 0 | |
416d2b0ffd617c8c6e58360fefe554ad7dc3057b | add example for discovering existing connections | examples/connections.py | examples/connections.py |
"""
Print out a list of existing Telepathy connections.
"""
import dbus.glib
import telepathy
prefix = 'org.freedesktop.Telepathy.Connection.'
if __name__ == '__main__':
for conn in telepathy.client.Connection.get_connections():
conn_iface = conn[telepathy.CONN_INTERFACE]
handle = conn_iface.GetSelfHandle()
print conn_iface.InspectHandles(
telepathy.CONNECTION_HANDLE_TYPE_CONTACT, [handle])[0]
print ' Protocol:', conn_iface.GetProtocol()
print ' Name:', conn.service_name[len(prefix):]
print
| Python | 0.000001 | |
ff53f699ac371266791487f0b863531dd8f5236a | Add hug 'hello_world' using to be developed support for optional URLs | examples/hello_world.py | examples/hello_world.py | import hug
@hug.get()
def hello_world():
return "Hello world"
| Python | 0.000009 | |
397ab61df61d5acac46cf60ede38fa928fdacd7c | Create solution.py | data_structures/linked_list/problems/pos_num_to_linked_list/solution.py | data_structures/linked_list/problems/pos_num_to_linked_list/solution.py | import LinkedList
# Linked List Node inside the LinkedList module is declared as:
#
# class Node:
# def __init__(self, val, nxt=None):
# self.val = val
# self.nxt = nxt
#
def ConvertPositiveNumToLinkedList(val: int) -> LinkedList.Node:
node = None
while True:
dig = val % 10
val //= 10
prev = LinkedList.Node(dig, node)
node = prev
if val == 0:
break
return node
| Python | 0.000018 | |
724bc46c85e6ea75ac8d786f4d1706b74df8f330 | Create dictid.py | dictid.py | dictid.py | a = (1,2)
b = [1,2]
c = {a: 1} # outcome: c= {(1,2): 1}
d = {b: 1} # outcome: error
| Python | 0.000001 | |
0fb7a5559f525ab1149ac41d4b399442f7649664 | add script to show statistics (number of chunks, data volume) | scale_stats.py | scale_stats.py | #! /usr/bin/env python3
#
# Copyright (c) 2016, 2017, Forschungszentrum Juelich GmbH
# Author: Yann Leprince <y.leprince@fz-juelich.de>
#
# This software is made available under the MIT licence, see LICENCE.txt.
import collections
import json
import math
import os
import os.path
import sys
import numpy as np
SI_PREFIXES = [
(1, ""),
(1024, "ki"),
(1024 * 1024, "Mi"),
(1024 * 1024 * 1024, "Gi"),
(1024 * 1024 * 1024 * 1024, "Ti"),
(1024 * 1024 * 1024 * 1024 * 1024, "Pi"),
(1024 * 1024 * 1024 * 1024 * 1024 * 1024, "Ei"),
]
def readable(count):
for factor, prefix in SI_PREFIXES:
if count > 10 * factor:
num_str = format(count / factor, ".0f")
else:
num_str = format(count / factor, ".1f")
if len(num_str) <= 3:
return num_str + " " + prefix
# Fallback: use the last prefix
factor, prefix = SI_PREFIXES[-1]
return "{:,.0f} {}".format(count / factor, prefix)
def show_scales_info(info):
total_size = 0
total_chunks = 0
total_directories = 0
dtype = np.dtype(info["data_type"]).newbyteorder("<")
num_channels = info["num_channels"]
for scale in info["scales"]:
scale_name = scale["key"]
size = scale["size"] #np.array(scale["size"], dtype=np.uintp)
for chunk_size in scale["chunk_sizes"]:
#chunk_size = np.array(chunk_size, dtype=np.uintp)
size_in_chunks = [(s - 1) // cs + 1 for s, cs in zip(size, chunk_size)]
num_chunks = np.prod(size_in_chunks)
num_directories = size_in_chunks[0] * (1 + size_in_chunks[1])
size_bytes = np.prod(size) * dtype.itemsize * num_channels
print("Scale {}, chunk size {}:"
" {:,d} chunks, {:,d} directories, raw uncompressed size {}B"
.format(scale_name, chunk_size,
num_chunks, num_directories, readable(size_bytes)))
total_size += size_bytes
total_chunks += num_chunks
total_directories += num_directories
print("---")
print("Total: {:,d} chunks, {:,d} directories, raw uncompressed size {}B"
.format(total_chunks, total_directories, readable(total_size)))
def show_scale_file_info(input_info_filename):
"""Show information about a list of scales from an input JSON file"""
with open(input_info_filename) as f:
info = json.load(f)
show_scales_info(info)
def parse_command_line(argv):
"""Parse the script's command line."""
import argparse
parser = argparse.ArgumentParser(
description="""\
Show information about a list of scales in Neuroglancer "info" JSON file format
""")
parser.add_argument("info_file", nargs="?", default="./info",
help="JSON file containing the information")
args = parser.parse_args(argv[1:])
return args
def main(argv):
"""The script's entry point."""
args = parse_command_line(argv)
return show_scale_file_info(args.info_file) or 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| Python | 0 | |
7d574c1f6d194df1f2b2009fb2e48fbaacaca873 | Add migration for_insert_base | oedb_datamodels/versions/6887c442bbee_insert_base.py | oedb_datamodels/versions/6887c442bbee_insert_base.py | """Add _insert_base
Revision ID: 6887c442bbee
Revises: 3886946416ba
Create Date: 2019-04-25 16:09:20.572057
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6887c442bbee'
down_revision = '3886946416ba'
branch_labels = None
depends_on = None
def upgrade():
op.create_table('_insert_base',
sa.Column('_id', sa.BigInteger(), autoincrement=True, nullable=False),
sa.Column('_message', sa.Text(), nullable=True),
sa.Column('_user', sa.String(length=50), nullable=True),
sa.Column('_submitted', sa.DateTime(), server_default=sa.text('now()'), nullable=True),
sa.Column('_autocheck', sa.Boolean(), server_default=sa.text('false'), nullable=True),
sa.Column('_humancheck', sa.Boolean(), server_default=sa.text('false'), nullable=True),
sa.Column('_type', sa.String(length=8), nullable=True),
sa.Column('_applied', sa.Boolean(), server_default=sa.text('false'), nullable=True),
sa.PrimaryKeyConstraint('_id'),
schema='public'
)
def downgrade():
op.drop_table('_insert_base', schema='public')
| Python | 0.000002 | |
2ef707337adc3d0abc33ca638b2adb70a681bd12 | update for new API | doc/examples/filters/plot_denoise.py | doc/examples/filters/plot_denoise.py | """
====================
Denoising a picture
====================
In this example, we denoise a noisy version of the picture of the astronaut
Eileen Collins using the total variation and bilateral denoising filter.
These algorithms typically produce "posterized" images with flat domains
separated by sharp edges. It is possible to change the degree of posterization
by controlling the tradeoff between denoising and faithfulness to the original
image.
Total variation filter
----------------------
The result of this filter is an image that has a minimal total variation norm,
while being as close to the initial image as possible. The total variation is
the L1 norm of the gradient of the image.
Bilateral filter
----------------
A bilateral filter is an edge-preserving and noise reducing filter. It averages
pixels based on their spatial closeness and radiometric similarity.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data, img_as_float
from skimage.restoration import denoise_tv_chambolle, denoise_bilateral
astro = img_as_float(data.astronaut())
astro = astro[220:300, 220:320]
noisy = astro + 0.6 * astro.std() * np.random.random(astro.shape)
noisy = np.clip(noisy, 0, 1)
fig, ax = plt.subplots(nrows=2, ncols=3, figsize=(8, 5), sharex=True,
sharey=True, subplot_kw={'adjustable': 'box-forced'})
plt.gray()
ax[0, 0].imshow(noisy)
ax[0, 0].axis('off')
ax[0, 0].set_title('noisy')
ax[0, 1].imshow(denoise_tv_chambolle(noisy, weight=0.1, multichannel=True))
ax[0, 1].axis('off')
ax[0, 1].set_title('TV')
ax[0, 2].imshow(denoise_bilateral(noisy, sigma_color=0.05, sigma_spatial=15))
ax[0, 2].axis('off')
ax[0, 2].set_title('Bilateral')
ax[1, 0].imshow(denoise_tv_chambolle(noisy, weight=0.2, multichannel=True))
ax[1, 0].axis('off')
ax[1, 0].set_title('(more) TV')
ax[1, 1].imshow(denoise_bilateral(noisy, sigma_color=0.1, sigma_spatial=15))
ax[1, 1].axis('off')
ax[1, 1].set_title('(more) Bilateral')
ax[1, 2].imshow(astro)
ax[1, 2].axis('off')
ax[1, 2].set_title('original')
fig.tight_layout()
plt.show()
| """
====================
Denoising a picture
====================
In this example, we denoise a noisy version of the picture of the astronaut
Eileen Collins using the total variation and bilateral denoising filter.
These algorithms typically produce "posterized" images with flat domains
separated by sharp edges. It is possible to change the degree of posterization
by controlling the tradeoff between denoising and faithfulness to the original
image.
Total variation filter
----------------------
The result of this filter is an image that has a minimal total variation norm,
while being as close to the initial image as possible. The total variation is
the L1 norm of the gradient of the image.
Bilateral filter
----------------
A bilateral filter is an edge-preserving and noise reducing filter. It averages
pixels based on their spatial closeness and radiometric similarity.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage import data, img_as_float
from skimage.restoration import denoise_tv_chambolle, denoise_bilateral
astro = img_as_float(data.astronaut())
astro = astro[220:300, 220:320]
noisy = astro + 0.6 * astro.std() * np.random.random(astro.shape)
noisy = np.clip(noisy, 0, 1)
fig, ax = plt.subplots(nrows=2, ncols=3, figsize=(8, 5), sharex=True,
sharey=True, subplot_kw={'adjustable': 'box-forced'})
plt.gray()
ax[0, 0].imshow(noisy)
ax[0, 0].axis('off')
ax[0, 0].set_title('noisy')
ax[0, 1].imshow(denoise_tv_chambolle(noisy, weight=0.1, multichannel=True))
ax[0, 1].axis('off')
ax[0, 1].set_title('TV')
ax[0, 2].imshow(denoise_bilateral(noisy, sigma_range=0.05, sigma_spatial=15))
ax[0, 2].axis('off')
ax[0, 2].set_title('Bilateral')
ax[1, 0].imshow(denoise_tv_chambolle(noisy, weight=0.2, multichannel=True))
ax[1, 0].axis('off')
ax[1, 0].set_title('(more) TV')
ax[1, 1].imshow(denoise_bilateral(noisy, sigma_range=0.1, sigma_spatial=15))
ax[1, 1].axis('off')
ax[1, 1].set_title('(more) Bilateral')
ax[1, 2].imshow(astro)
ax[1, 2].axis('off')
ax[1, 2].set_title('original')
fig.tight_layout()
plt.show()
| Python | 0 |
9e6a016c5a59b25199426f6825b2c83571997e68 | Refactor buildbot tests so that they can be used downstream. | build/android/buildbot/tests/bb_run_bot_test.py | build/android/buildbot/tests/bb_run_bot_test.py | #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
BUILDBOT_DIR = os.path.join(os.path.dirname(__file__), '..')
sys.path.append(BUILDBOT_DIR)
import bb_run_bot
def RunBotProcesses(bot_process_map):
code = 0
for bot, proc in bot_process_map:
_, err = proc.communicate()
code |= proc.returncode
if proc.returncode != 0:
print 'Error running the bot script with id="%s"' % bot, err
return code
def main():
procs = [
(bot, subprocess.Popen(
[os.path.join(BUILDBOT_DIR, 'bb_run_bot.py'), '--bot-id', bot,
'--testing'], stdout=subprocess.PIPE, stderr=subprocess.PIPE))
for bot in bb_run_bot.GetBotStepMap()]
return RunBotProcesses(procs)
if __name__ == '__main__':
sys.exit(main())
| #!/usr/bin/env python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
BUILDBOT_DIR = os.path.join(os.path.dirname(__file__), '..')
sys.path.append(BUILDBOT_DIR)
import bb_run_bot
def RunBotsWithTesting(bot_step_map):
code = 0
procs = [
(bot, subprocess.Popen(
[os.path.join(BUILDBOT_DIR, 'bb_run_bot.py'), '--bot-id', bot,
'--testing'], stdout=subprocess.PIPE, stderr=subprocess.PIPE))
for bot in bot_step_map]
for bot, proc in procs:
_, err = proc.communicate()
code |= proc.returncode
if proc.returncode != 0:
print 'Error running bb_run_bot with id="%s"' % bot, err
return code
def main():
return RunBotsWithTesting(bb_run_bot.GetBotStepMap())
if __name__ == '__main__':
sys.exit(main())
| Python | 0.000099 |
eb9f9d8bfa5ea278e1fb39c59ed660a223b1f6a9 | Add flask api app creation to init | api/__init__.py | api/__init__.py | from flask_sqlalchemy import SQLAlchemy
import connexion
from config import config
db = SQLAlchemy()
def create_app(config_name):
app = connexion.FlaskApp(__name__, specification_dir='swagger/')
app.add_api('swagger.yaml')
application = app.app
application.config.from_object(config[config_name])
db.init_app(application)
return application
from api.api import *
| Python | 0.000001 | |
c10eb3861daf48c13ec854bd210db5d5e1163b11 | Add LotGroupAutocomplete | livinglots_lots/autocomplete_light_registry.py | livinglots_lots/autocomplete_light_registry.py | from autocomplete_light import AutocompleteModelBase, register
from livinglots import get_lotgroup_model
class LotGroupAutocomplete(AutocompleteModelBase):
autocomplete_js_attributes = {'placeholder': 'lot group name',}
search_fields = ('name',)
def choices_for_request(self):
choices = super(LotGroupAutocomplete, self).choices_for_request()
if not self.request.user.is_staff:
choices = choices.none()
return choices
register(get_lotgroup_model(), LotGroupAutocomplete)
| Python | 0 | |
a5081ac307e037caee6bbd1add49d4c0d9424353 | Fix wake_on_lan for german version of Windows 10 (#6397) (#6398) | homeassistant/components/switch/wake_on_lan.py | homeassistant/components/switch/wake_on_lan.py | """
Support for wake on lan.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.wake_on_lan/
"""
import logging
import platform
import subprocess as sp
import voluptuous as vol
from homeassistant.components.switch import (SwitchDevice, PLATFORM_SCHEMA)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.script import Script
from homeassistant.const import (CONF_HOST, CONF_NAME)
REQUIREMENTS = ['wakeonlan==0.2.2']
_LOGGER = logging.getLogger(__name__)
CONF_MAC_ADDRESS = 'mac_address'
CONF_OFF_ACTION = 'turn_off'
DEFAULT_NAME = 'Wake on LAN'
DEFAULT_PING_TIMEOUT = 1
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_MAC_ADDRESS): cv.string,
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_OFF_ACTION): cv.SCRIPT_SCHEMA,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up a wake on lan switch."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
mac_address = config.get(CONF_MAC_ADDRESS)
off_action = config.get(CONF_OFF_ACTION)
add_devices([WOLSwitch(hass, name, host, mac_address, off_action)])
class WOLSwitch(SwitchDevice):
"""Representation of a wake on lan switch."""
def __init__(self, hass, name, host, mac_address, off_action):
"""Initialize the WOL switch."""
from wakeonlan import wol
self._hass = hass
self._name = name
self._host = host
self._mac_address = mac_address
self._off_script = Script(hass, off_action) if off_action else None
self._state = False
self._wol = wol
self.update()
@property
def should_poll(self):
"""Poll for status regularly."""
return True
@property
def is_on(self):
"""Return true if switch is on."""
return self._state
@property
def name(self):
"""The name of the switch."""
return self._name
def turn_on(self):
"""Turn the device on."""
self._wol.send_magic_packet(self._mac_address)
def turn_off(self):
"""Turn the device off if an off action is present."""
if self._off_script is not None:
self._off_script.run()
def update(self):
"""Check if device is on and update the state."""
if platform.system().lower() == 'windows':
ping_cmd = 'ping -n 1 -w {} {}'.format(
DEFAULT_PING_TIMEOUT * 1000, self._host)
else:
ping_cmd = 'ping -c 1 -W {} {}'.format(
DEFAULT_PING_TIMEOUT, self._host)
status = sp.call(ping_cmd, stdout=sp.DEVNULL)
self._state = not bool(status)
| """
Support for wake on lan.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.wake_on_lan/
"""
import logging
import platform
import subprocess as sp
import voluptuous as vol
from homeassistant.components.switch import (SwitchDevice, PLATFORM_SCHEMA)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.script import Script
from homeassistant.const import (CONF_HOST, CONF_NAME)
REQUIREMENTS = ['wakeonlan==0.2.2']
_LOGGER = logging.getLogger(__name__)
CONF_MAC_ADDRESS = 'mac_address'
CONF_OFF_ACTION = 'turn_off'
DEFAULT_NAME = 'Wake on LAN'
DEFAULT_PING_TIMEOUT = 1
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_MAC_ADDRESS): cv.string,
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_OFF_ACTION): cv.SCRIPT_SCHEMA,
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up a wake on lan switch."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
mac_address = config.get(CONF_MAC_ADDRESS)
off_action = config.get(CONF_OFF_ACTION)
add_devices([WOLSwitch(hass, name, host, mac_address, off_action)])
class WOLSwitch(SwitchDevice):
"""Representation of a wake on lan switch."""
def __init__(self, hass, name, host, mac_address, off_action):
"""Initialize the WOL switch."""
from wakeonlan import wol
self._hass = hass
self._name = name
self._host = host
self._mac_address = mac_address
self._off_script = Script(hass, off_action) if off_action else None
self._state = False
self._wol = wol
self.update()
@property
def should_poll(self):
"""Poll for status regularly."""
return True
@property
def is_on(self):
"""Return true if switch is on."""
return self._state
@property
def name(self):
"""The name of the switch."""
return self._name
def turn_on(self):
"""Turn the device on."""
self._wol.send_magic_packet(self._mac_address)
def turn_off(self):
"""Turn the device off if an off action is present."""
if self._off_script is not None:
self._off_script.run()
def update(self):
"""Check if device is on and update the state."""
if platform.system().lower() == 'windows':
ping_cmd = 'ping -n 1 -w {} {}'.format(
DEFAULT_PING_TIMEOUT * 1000, self._host)
else:
ping_cmd = 'ping -c 1 -W {} {}'.format(
DEFAULT_PING_TIMEOUT, self._host)
status = sp.getstatusoutput(ping_cmd)[0]
self._state = not bool(status)
| Python | 0 |
2527683522394c823bc100c75f1ce4885949136e | add paths module for other modules to find paths from one place | glim/paths.py | glim/paths.py | import os
from termcolor import colored
PROJECT_PATH = os.getcwd()
APP_PATH = os.path.join(PROJECT_PATH, 'app')
EXT_PATH = os.path.join(PROJECT_PATH, 'ext')
GLIM_ROOT_PATH = os.path.dirname(os.path.dirname(__file__))
PROTO_PATH = os.path.join(os.path.dirname(__file__), 'prototype')
import sys
from pprint import pprint as p
def configure_sys_path():
if GLIM_ROOT_PATH == PROJECT_PATH:
print colored('Development mode is on, sys.path is being configured', 'yellow')
sys.path.pop(0)
sys.path.insert(0, GLIM_ROOT_PATH)
else:
sys.path.insert(0, PROJECT_PATH)
def controllers():
return os.path.join(APP_PATH, 'controllers.py')
def config(env):
return os.path.join(APP_PATH, 'config', '%s.py' % env)
def start():
return os.path.join(APP_PATH, 'start.py')
def commands():
return os.path.join(APP_PATH, 'commands.py')
def routes():
return os.path.join(APP_PATH, 'routes.py')
def extensions(ext):
return os.path.join(EXT_PATH, '%s' % ext, '%s.py' % ext)
def extension_commands(ext):
return os.path.join(EXT_PATH, '%s' % ext, 'commands.py') | Python | 0 | |
24f21146b01ff75a244df40d1626c54883abeb1a | Add helper-lib for json object conversion and split dicts | lib/helpers.py | lib/helpers.py | #! /usr/bin/env python2.7
import datetime
def typecast_json(o):
if isinstance(o, datetime.datetime) or isinstance(o, datetime.date):
return o.isoformat()
else:
return o
def split_dict(src, keys):
result = dict()
for k in set(src.keys()) & set(keys):
result[k] = src[k]
return result
| Python | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.