commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
02156d3e9140b7f8f61b79816891ede2fff2cc49 | rename models to properties | properties.py | properties.py | import ConfigParser
import os
import sys
subreddit = 'taigeilove'
user_agent = 'Python:whalehelpbot:v1.0 (by /u/Noperative)'
general_words = []
first_time_words = []
expedition_words = []
quest_words = []
| Python | 0.000005 | |
e74c3273f840afbca25936083abdfb6577b4fdd0 | Devuelve lista de etiquetas y atributos | smallsmilhandler.py | smallsmilhandler.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#CELIA GARCIA FERNANDEz
from xml.sax import make_parser
from xml.sax.handler import ContentHandler
class SmallSMILHandler(ContentHandler):
def __init__ (self):
self.lista = []
self.etiquetas = ['root-layout', 'region', 'img', 'audio', 'textstream']
self.attributosD = {
'root-layout': ['width', 'height'],
'region': ['id','top','left'],
'img': ['scr','region','begin','dur'],
'audio': ['src','begin','dur'],
'textstream': ['src', 'region']
}
def startElement(self, name, attrs):
diccionario = {}
if name in self.etiquetas:
diccionario["name"] = name
for key in self.attributosD[name]:
diccionario[key] = attrs.get(key, "")
self.lista.append(diccionario)
def get_tags(self):
return self.lista
if __name__ == "__main__":
parser = make_parser()
small = SmallSMILHandler()
parser.setContentHandler(small)
parser.parse(open('karaoke.smil'))
print small.get_tags()
| Python | 0 | |
024e7fe473a19a16b7e34203aef2841af7a3aad4 | add markreads script | etc/markreads.py | etc/markreads.py | #!/usr/bin/env python
import pysam
import sys
def markreads(bamfn, outfn):
bam = pysam.AlignmentFile(bamfn, 'rb')
out = pysam.AlignmentFile(outfn, 'wb', template=bam)
for read in bam.fetch(until_eof=True):
tags = read.tags
tags.append(('BS',1))
read.tags = tags
out.write(read)
if len(sys.argv) == 3:
markreads(*sys.argv[1:])
else:
print 'usage:', sys.argv[0], '<input BAM> <output BAM>'
| Python | 0 | |
b86ad075f690718e528364bedce891a3a4debdaf | Add a basic example API | examples/api.py | examples/api.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
"""Simple REST API for Imap-CLI."""
import copy
import json
import logging
import re
from wsgiref import simple_server
from webob.dec import wsgify
from webob.exc import status_map
import imap_cli
from imap_cli import config
from imap_cli import const
from imap_cli import fetch
from imap_cli import search
ctx = config.new_context_from_file()
log = logging.getLogger('Imap-CLI API')
@wsgify
def read_controller(req):
params = req.params
inputs = {
'directory': params.get('directory') or const.DEFAULT_DIRECTORY,
'uid': req.urlvars.get('uid'),
}
if inputs['uid'] is None:
return 'You need to specify an UID'
imap_cli.change_dir(ctx, inputs['directory'] or const.DEFAULT_DIRECTORY)
fetched_mail = fetch.read(ctx, inputs['uid'])
if fetched_mail is None:
return 'Mail was not fetched, an error occured'
return_json = copy.deepcopy(fetched_mail)
for part in return_json['parts']:
if not part['content_type'].startswith('text'):
del part['data']
return json.dumps(return_json, indent=2)
@wsgify
def search_controller(req):
params = req.params
inputs = {
'directory': params.get('directory') or const.DEFAULT_DIRECTORY,
'tags': params.getall('tag') or None,
'text': params.get('text') or None,
}
search_criterion = search.prepare_search(
ctx,
directory=inputs['directory'],
tags=inputs['tags'],
text=inputs['text'],
)
mail_set = search.fetch_uids(ctx, search_criterion=search_criterion or [])
mails_info = list(
search.fetch_mails_info(ctx, directory=inputs['directory'], mail_set=mail_set)
)
return json.dumps(mails_info, indent=2)
@wsgify
def status_controller(req):
return json.dumps(list(imap_cli.status(ctx)), indent=2,)
routings = [
('GET', '^/v1/status.json$', status_controller),
('GET', '^/v1/list/?$', search_controller),
('GET', '^/v1/search/?$', search_controller),
('GET', '^/v1/read/(?P<uid>.+)?$', read_controller),
]
@wsgify
def router(req):
"""Dispatch request to controllers."""
split_path_info = req.path_info.split('/')
assert not split_path_info[0], split_path_info
for methods, regex, app, vars in routes:
if methods is None or req.method in methods:
match = regex.match(req.path_info)
if match is not None:
if getattr(req, 'urlvars', None) is None:
req.urlvars = {}
req.urlvars.update(dict(
(name, value.decode('utf-8') if value is not None else None)
for name, value in match.groupdict().iteritems()
))
req.urlvars.update(vars)
req.script_name += req.path_info[:match.end()]
req.path_info = req.path_info[match.end():]
return req.get_response(app)
return status_map[404]()
if __name__ == '__main__':
routes = []
for routing in routings:
methods, regex, app = routing[:3]
if isinstance(methods, basestring):
methods = (methods,)
vars = routing[3] if len(routing) >= 4 else {}
routes.append((methods, re.compile(regex), app, vars))
imap_cli.connect(ctx)
httpd = simple_server.make_server('127.0.0.1', 8000, router)
httpd.serve_forever()
| Python | 0.000129 | |
0bd69e17d75cf1ecaa53153fd07abf2e139f57b7 | add function0-input.py | input/function0-input.py | input/function0-input.py | # -*- coding: utf-8 -*-
# Author Frank Hu
# iDoulist Function 0 - input
import urllib2
response = urllib2.urlopen("http://www.douban.com/doulist/38390646/")
print response.read() | Python | 0.000021 | |
70d5b47a66d883187574c409ac08ece24277d292 | Add the test.py example that is cited in the cytomine.org documentation | examples/test.py | examples/test.py | # -*- coding: utf-8 -*-
# * Copyright (c) 2009-2020. Authors: see NOTICE file.
# *
# * Licensed under the Apache License, Version 2.0 (the "License");
# * you may not use this file except in compliance with the License.
# * You may obtain a copy of the License at
# *
# * http://www.apache.org/licenses/LICENSE-2.0
# *
# * Unless required by applicable law or agreed to in writing, software
# * distributed under the License is distributed on an "AS IS" BASIS,
# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# * See the License for the specific language governing permissions and
# * limitations under the License.
# * This script is just a simple example used to verify if the
# * Cytomine Python Client is correctly installed.
# * Using a correct Cytomine instance URL, and keys of a user, it will just fetch his username.
import sys
from argparse import ArgumentParser
__author__ = "Renaud Hoyoux <renaud.hoyoux@cytomine.coop>"
if __name__ == '__main__':
from cytomine import Cytomine
from cytomine.models.user import *
parser = ArgumentParser(prog="Cytomine Python client example")
# Cytomine
parser.add_argument('--cytomine_host', dest='host', default='demo.cytomine.be', help="The Cytomine host")
parser.add_argument('--cytomine_public_key', dest='public_key', help="The Cytomine public key")
parser.add_argument('--cytomine_private_key', dest='private_key', help="The Cytomine private key")
params, other = parser.parse_known_args(sys.argv[1:])
with Cytomine(host=params.host, public_key=params.public_key, private_key=params.private_key) as cytomine:
# Get the connected user
user = CurrentUser().fetch()
print(user) | Python | 0 | |
1c2bde23ffc6188fe839b36011775663f86c8919 | Create config.py | config.py | config.py | # -*- coding: utf-8 -*-
import configparser
class Config:
_cp = None
def load():
Config._cp = configparser.ConfigParser()
Config._cp.read("config.ini")
for category in Config._cp.sections():
temp = {}
for op in Config._cp.options(category):
temp[op] = Config._cp[category][op]
setattr(Config, category, temp)
Config.load()
| Python | 0.000002 | |
0712d78cf76c1d3f699317fcc64db3fe60dc6266 | Add utility functions for generating documentation | docs/utils.py | docs/utils.py | def cleanup_docstring(docstring):
doc = ""
stripped = [line.strip() for line in docstring.split("\n")]
doc += '\n'.join(stripped)
return doc
| Python | 0.000001 | |
a36e013e9b1d7133ed98cb2f087f3cb3dc53de69 | Add 5-2 to working dictionary. | models/tutorials/image/cifar10/5-2cnn_advance.py | models/tutorials/image/cifar10/5-2cnn_advance.py | import cifar10, cifar10_input
import tensorflow as tf
import numpy as np
import time
max_steps = 3000
batch_size = 128
data_dir = '/tmp/cifar10_data/cifar-10-batches-bin'
def variable_with_weight_loss(shape, stddev, wl):
var = tf.Variable(tf.truncated_normal(shape, stddev = stddev))
if wl is not None:
weight_loss = tf.multiply(tf.nn.l2_loss(var), wl, name = 'weight_loss')
tf.add_to_collection('losses', weight_loss)
return var
cifar10.maybe_download_and_extract()
images_train, labels_train = cifar10_input.distorted_inputs(data_dir = data_dir, batch_size = batch_size)
images_test, labels_test = cifar10_input.inputs(eval_data = True, data_dir = data_dir, batch_size = batch_size)
image_holder = tf.placeholder(tf.float32, [batch_size, 24, 24, 3])
label_holder = tf.placeholder(tf.int32, [batch_size])
weight1 = variable_with_weight_loss(shape = [5, 5, 3, 64], stddev = 5e-2, wl = 0.0)
kernel1 = tf.nn.conv2d(image_holder, weight1, [1, 1, 1, 1], padding = 'SAME')
bias1 = tf.Variable(tf.constant(0.0, shape = [64]))
conv1 = tf.nn.relu(tf.nn.bias_add(kernel1, bias1))
pool1 = tf.nn.max_pool(conv1, ksize = [1, 3, 3, 1], strides = [1, 2, 2, 1], padding = 'SAME')
norm1 = tf.nn.lrn(pool1, 4, bias = 1.0, alpha = 0.001 / 9.0, beta = 0.75)
weight2 = variable_with_weight_loss(shape = [5, 5, 64, 64], stddev = 5e-2, wl = 0.0)
kernel2 = tf.nn.conv2d(norm1, weight2, [1, 1, 1, 1], padding = 'SAME')
bias2 = tf.Variable(tf.constant(0.1, shape = [64]))
conv2 = tf.nn.relu(tf.nn.bias_add(kernel2, bias2))
norm2 = tf.nn.lrn(conv2, 4, bias = 1.0, alpha = 0.001 / 9.0, beta = 0.75)
pool2 = tf.nn.max_pool(norm2, ksize = [1, 3, 3, 1], strides = [1, 2, 2, 1], padding = 'SAME')
reshape = tf.reshape(pool2, [batch_size, -1])
dim = reshape.get_shape()[1].value
weight3 = variable_with_weight_loss(shape = [dim, 384], stddev = 0.04, wl = 0.004)
bias3 = tf.Variable(tf.constant(0.1, shape = [384]))
local3 = tf.nn.relu(tf.matmul(reshape, weight3) + bias3)
weight4 = variable_with_weight_loss(shape = [384, 192], stddev = 0.04, wl = 0.004)
bias4 = tf.Variable(tf.constant(0.1, shape = [192]))
local4 = tf.nn.relu(tf.matmul(local3, weight4) + bias4)
weight5 = variable_with_weight_loss(shape = [192, 10], stddev = 1 / 192.0, wl = 0.0)
bias5 = tf.Variable(tf.constant(0.0, shape = [10]))
logits = tf.add(tf.matmul(local4, weight5), bias5)
def loss(logits, labels):
labels = tf.cast(labels, tf.int64)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits = logits, labels = labels, name = 'cross_entropy_per_example')
cross_entropy_mean = tf.reduce_mean(cross_entropy, name = 'cross_entropy')
tf.add_to_collection('losses', cross_entropy_mean)
return tf.add_n(tf.get_collection('losses'), name = 'total_loss')
loss = loss(logits, label_holder)
train_op = tf.train.AdamOptimizer(1e-3).minimize(loss)
top_k_op = tf.nn.in_top_k(logits, label_holder, 1)
sess = tf.InteractiveSession()
tf.global_variables_initializer().run()
tf.train.start_queue_runners()
for step in range(max_steps):
start_time = time.time()
image_batch, label_batch = sess.run([images_train, labels_train])
_, loss_value = sess.run([train_op, loss], feed_dict = {image_holder: image_batch, label_holder: label_batch})
duration = time.time() - start_time
if step % 10 == 0:
examples_per_sec = batch_size / duration
sec_per_batch = float(duration)
format_str = ('step %d, loss = %.2f (%.1f examples / sec; %.3f sec / batch)')
print(format_str % (step, loss_value, examples_per_sec, sec_per_batch))
num_examples = 10000
import math
num_iter = int(math.ceil(num_examples / batch_size))
true_count = 0
total_sample_count = num_iter * batch_size
step = 0
while step < num_iter:
image_batch, label_batch = sess.run([images_test, labels_test])
predictions = sess.run([top_k_op], feed_dict = {image_holder: image_batch, label_holder: label_batch})
true_count += np.sum(predictions)
step += 1
precision = true_count / total_sample_count
print('precision @ 1 = %.3f' % precision) | Python | 0.000144 | |
24bb92edc18ea65166873fa41cd8db3ed6d62b5d | Add tests for forms | td_biblio/tests/test_forms.py | td_biblio/tests/test_forms.py | from django.core.exceptions import ValidationError
from django.test import TestCase
from ..forms import text_to_list, EntryBatchImportForm
def test_text_to_list():
"""Test text_to_list utils"""
inputs = [
'foo,bar,lol',
'foo , bar, lol',
'foo\nbar\nlol',
'foo,\nbar,\nlol',
'foo, \nbar,lol',
'foo,,bar,\nlol',
]
expected = ['bar', 'foo', 'lol']
for input in inputs:
result = text_to_list(input)
result.sort()
assert result == expected
class EntryBatchImportFormTests(TestCase):
"""
Tests for the EntryBatchImportForm
"""
def test_clean_pmids(self):
"""Test PMIDs cleaning method"""
inputs = [
{'pmids': '26588162\n19569182'},
{'pmids': '19569182\n26588162'},
{'pmids': '19569182,\n26588162'},
{'pmids': '19569182,26588162'},
{'pmids': '19569182,,26588162'},
{'pmids': '19569182\n\n26588162'},
]
expected = ['19569182', '26588162']
for input in inputs:
form = EntryBatchImportForm(input)
assert form.is_valid()
pmids = form.cleaned_data['pmids']
pmids.sort()
assert pmids == expected
def test_clean_pmids_with_random_input(self):
"""Test PMIDs cleaning method with non PMIDs"""
inputs = [
{'pmids': 'lorem, ipsum'},
{'pmids': 'lorem, 19569182'},
{'pmids': 'lorem42\nipsum234'},
]
for input in inputs:
form = EntryBatchImportForm(input)
self.assertFalse(form.is_valid())
def test_clean_dois(self):
"""Test DOIs cleaning method"""
inputs = [
{'dois': '10.1093/nar/gks419\n10.1093/nar/gkp323'},
{'dois': '10.1093/nar/gkp323\n10.1093/nar/gks419'},
{'dois': '10.1093/nar/gkp323,\n10.1093/nar/gks419'},
{'dois': '10.1093/nar/gkp323,10.1093/nar/gks419'},
{'dois': '10.1093/nar/gkp323,,10.1093/nar/gks419'},
{'dois': '10.1093/nar/gkp323\n\n10.1093/nar/gks419'},
]
expected = ['10.1093/nar/gkp323', '10.1093/nar/gks419']
for input in inputs:
form = EntryBatchImportForm(input)
assert form.is_valid()
dois = form.cleaned_data['dois']
dois.sort()
assert dois == expected
def test_clean_dois_with_random_input(self):
"""Test DOIs cleaning method with non DOIs"""
inputs = [
{'dois': 'lorem, ipsum'},
{'dois': 'lorem, 19569182'},
{'dois': 'lorem42\nipsum234'},
]
for input in inputs:
form = EntryBatchImportForm(input)
self.assertFalse(form.is_valid())
| Python | 0 | |
1bf634bd24d94a7d7ff358cea3215bba5b59d014 | Create power_of_two.py in bit manipulation | bit_manipulation/power_of_two/python/power_of_two.py | bit_manipulation/power_of_two/python/power_of_two.py | # Check if given number is power of 2 or not
# Function to check if x is power of 2
def isPowerOfTwo (x):
# First x in the below expression is for the case when x is 0
return (x and (not(x & (x - 1))) )
# Driver code
x = int(input("Enter a no:"))
if(isPowerOfTwo(x)):
print('Yes')
else:
print('No')
| Python | 0.000011 | |
00fa30068b36385c8b9b574074743af01aedff1f | find best parameters | mkTargeted/find_parameters.py | mkTargeted/find_parameters.py |
def common_elements(list1, list2):
return [element for element in list1 if element in list2]
ngap_best = 0
glimit_best = 0
fit_best = -1
for ngap in range(5,50):
for glimit in range(100,1500,100):
data = t2
data = updateArray(data)
#data = findClusterRedshift(data)
data['CLUSZ'] = tZ
data = findSeperationSpatial(data, center)
data = findLOSV(data)
# make initial cuts
mask = abs(data['LOSV']) < 5000
data = data[mask]
while True:
try:
if size == data.size:
break
except NameError:
pass
size = data.size
#print 'size', data.size
#data = rejectInterlopers(data)
flag = False
try:
x = shifty_gapper(data['SEP'], data['Z'], tZ, ngap=ngap,
glimit=glimit)
except:
flag = True
break
data = data[x]
#data = findLOSVD(data)
data = findLOSVDgmm(data)
data['LOSVD'] = data['LOSVDgmm']
data = findR200(data)
mask = data['SEP'] < data['R200'][0]
data = data[mask]
data = findClusterRedshift(data)
data = findSeperationSpatial(data, center)
data = findLOSV(data)
if not flag:
matched = len(common_elements(t['HALOID'], data['HALOID']))
fit = matched/t.size + 1/data.size
if fit > fit_best:
fit_best = fit
ngap_best = ngap
glimit_best = glimit
else:
pass
| Python | 0.999989 | |
5124d27adbaac0304b2b9a318461257ed9d678fc | valid number | python/valid_num.py | python/valid_num.py | #! /usr/bin/python
'''
Validate if a given string is numeric.
Some examples:
"0" => true
" 0.1 " => true
"abc" => false
"1 a" => false
"2e10" => true
Note: It is intended for the problem statement to be ambiguous. You should gather all requirements up front before implementing one.
'''
class Solution:
# @param {string} s
# @return {boolean}
def isNumber(self, s):
try:
float(s)
return True
except ValueError:
return False
if __name__ =='__main__':
solution = Solution()
a = ['0', '0.1', 'abc', '1 a', '2e10']
print [ solution.isNumber(string) for string in a ]
| Python | 0.999385 | |
8fddde260af6ea1e6de8491dd99dca671634327c | Add test for the matrix representation function. | test/operator/utility_test.py | test/operator/utility_test.py | # Copyright 2014, 2015 The ODL development group
#
# This file is part of ODL.
#
# ODL is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ODL is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ODL. If not, see <http://www.gnu.org/licenses/>.
# Imports for common Python 2/3 codebase
from __future__ import print_function, division, absolute_import
from future import standard_library
standard_library.install_aliases()
from builtins import str, super
# External module imports
import pytest
import numpy as np
# ODL imports
import odl
from odl.operator.utility import matrix_representation
from odl.util.testutils import almost_equal
class MultiplyOp(odl.Operator):
"""Multiply with matrix.
"""
def __init__(self, matrix, domain=None, range=None):
domain = (odl.Rn(matrix.shape[1])
if domain is None else domain)
range = (odl.Rn(matrix.shape[0])
if range is None else range)
self.matrix = matrix
super().__init__(domain, range, linear=True)
def _apply(self, rhs, out):
np.dot(self.matrix, rhs.data, out=out.data)
@property
def adjoint(self):
return MultiplyOp(self.matrix.T, self.range, self.domain)
def test_matrix_representation():
# Verify that the matrix representation function returns the correct matrix
A = np.random.rand(3, 3)
Aop = MultiplyOp(A)
the_matrix = matrix_representation(Aop)
assert almost_equal(np.sum(np.abs(A - the_matrix)), 1e-6)
if __name__ == '__main__':
pytest.main(str(__file__.replace('\\', '/')) + ' -v')
| Python | 0 | |
8b92e55fa202723f7859cd1ea22e835e5c693807 | Add some time handling functions | Instanssi/kompomaatti/misc/awesometime.py | Instanssi/kompomaatti/misc/awesometime.py | # -*- coding: utf-8 -*-
from datetime import datetime, timedelta
def todayhelper():
today = datetime.today()
return datetime(day=today.day, year=today.year, month=today.month)
def format_single_helper(t):
now = datetime.now()
today = todayhelper()
tomorrow = today + timedelta(days=1)
the_day_after_tomorrow = today + timedelta(days=2) # Must honor the movie!
if t < now:
return "päättynyt"
elif t >= now and t < tomorrow:
return "tänään klo. " + t.strftime("%H:%M")
elif t >= tomorrow and t < the_day_after_tomorrow:
return "huomenna klo. " + t.strftime("%H:%M")
elif t >= the_day_after_tomorrow and t < today+timedelta(days=3):
return "ylihuomenna klo. " + t.strftime("%H:%M")
else:
return t.strftime("%d.%m.%Y klo. %H:%M")
def format_single(t):
return format_single_helper(t).capitalize()
def format_between(t1, t2):
now = datetime.now()
today = todayhelper()
tomorrow = today + timedelta(days=1)
the_day_after_tomorrow = today + timedelta(days=2) # Must honor the movie!
if t1 < now and t2 > now:
left = t2-now
l_hours = int(left.total_seconds() / timedelta(hours=1).total_seconds())
l_minutes = int((left.total_seconds() - timedelta(hours=l_hours).total_seconds()) / 60)
if(l_hours == 0):
return "Menossa, aikaa jäljellä " + str(l_minutes) + " minuuttia"
else:
return "Menossa, aikaa jäljellä " + str(l_hours) + " tuntia ja " + str(l_minutes) + " minuuttia"
elif t1 > now and t1 < today+timedelta(days=3):
return "Alkaa " + format_single_helper(t1) + " ja päättyy " + format_single_helper(t2)
else:
return "Alkaa " + t1.strftime("%d.%m.%Y %H:%M") + " ja päättyy " + t2.strftime("%d.%m.%Y %H:%M") + "."
| Python | 0.000015 | |
bca4a0a0dda95306fe126191166e733c7ccea3ee | Add staff permissions for backup models | nodeconductor/backup/perms.py | nodeconductor/backup/perms.py | from nodeconductor.core.permissions import StaffPermissionLogic
PERMISSION_LOGICS = (
('backup.BackupSchedule', StaffPermissionLogic(any_permission=True)),
('backup.Backup', StaffPermissionLogic(any_permission=True)),
)
| Python | 0 | |
8f2d421242da11ab2b4fc3482ce6de5480b20070 | Improve documentation | bears/c_languages/ClangComplexityBear.py | bears/c_languages/ClangComplexityBear.py | from clang.cindex import Index, CursorKind
from coalib.bears.LocalBear import LocalBear
from coalib.results.Result import Result
from coalib.results.SourceRange import SourceRange
from bears.c_languages.ClangBear import clang_available, ClangBear
class ClangComplexityBear(LocalBear):
"""
Calculates cyclomatic complexity of each function and displays it to the
user.
"""
LANGUAGES = ClangBear.LANGUAGES
check_prerequisites = classmethod(clang_available)
decisive_cursor_kinds = {
CursorKind.IF_STMT, CursorKind.WHILE_STMT, CursorKind.FOR_STMT,
CursorKind.DEFAULT_STMT, CursorKind.CASE_STMT}
def function_key_points(self, cursor, top_function_level=False):
"""
Calculates number of function's decision points and exit points.
:param top_function_level: Whether cursor is in the top level of
the function.
"""
decisions, exits = 0, 0
for child in cursor.get_children():
if child.kind in self.decisive_cursor_kinds:
decisions += 1
elif child.kind == CursorKind.RETURN_STMT:
exits += 1
if top_function_level:
# There is no point to move forward, so just return.
return decisions, exits
child_decisions, child_exits = self.function_key_points(child)
decisions += child_decisions
exits += child_exits
if top_function_level:
# Implicit return statement.
exits += 1
return decisions, exits
def complexities(self, cursor, filename):
"""
Calculates cyclomatic complexities of functions.
"""
file = cursor.location.file
if file is not None and file.name != filename:
# There is nothing to do in another file.
return
if cursor.kind == CursorKind.FUNCTION_DECL:
child = next((child for child in cursor.get_children()
if child.kind != CursorKind.PARM_DECL),
None)
if child:
decisions, exits = self.function_key_points(child, True)
complexity = max(1, decisions - exits + 2)
yield cursor, complexity
else:
for child in cursor.get_children():
yield from self.complexities(child, filename)
def run(self, filename, file, max_complexity: int=8):
"""
Check for all functions if they are too complicated using the cyclomatic
complexity metric.
You can read more about this metric at
<https://www.wikiwand.com/en/Cyclomatic_complexity>.
:param max_complexity: Maximum cyclomatic complexity that is
considered to be normal. The value of 10 had
received substantial corroborating evidence.
But the general recommendation: "For each
module, either limit cyclomatic complexity to
[the agreed-upon limit] or provide a written
explanation of why the limit was exceeded."
"""
root = Index.create().parse(filename).cursor
for cursor, complexity in self.complexities(root, filename):
if complexity > max_complexity:
affected_code = (SourceRange.from_clang_range(cursor.extent),)
yield Result(
self,
"The function '{function}' should be simplified. Its "
"cyclomatic complexity is {complexity} which exceeds "
"maximal recommended value "
"of {rec_value}.".format(
function=cursor.displayname,
complexity=complexity,
rec_value=max_complexity),
affected_code=affected_code,
additional_info=(
"The cyclomatic complexity is a metric that measures "
"how complicated a function is by counting branches "
"and exits of each function.\n\n"
"Your function seems to be complicated and should be "
"refactored so that it can be understood by other "
"people easily.\n\nSee "
"<http://www.wikiwand.com/en/Cyclomatic_complexity>"
" for more information."))
| from clang.cindex import Index, CursorKind
from coalib.bears.LocalBear import LocalBear
from coalib.results.Result import Result
from coalib.results.SourceRange import SourceRange
from bears.c_languages.ClangBear import clang_available
class ClangComplexityBear(LocalBear):
"""
Calculates cyclomatic complexity of each function and displays it to the
user.
"""
check_prerequisites = classmethod(clang_available)
decisive_cursor_kinds = {
CursorKind.IF_STMT, CursorKind.WHILE_STMT, CursorKind.FOR_STMT,
CursorKind.DEFAULT_STMT, CursorKind.CASE_STMT}
def function_key_points(self, cursor, top_function_level=False):
"""
Calculates number of function's decision points and exit points.
:param top_function_level: Whether cursor is in the top level of
the function.
"""
decisions, exits = 0, 0
for child in cursor.get_children():
if child.kind in self.decisive_cursor_kinds:
decisions += 1
elif child.kind == CursorKind.RETURN_STMT:
exits += 1
if top_function_level:
# There is no point to move forward, so just return.
return decisions, exits
child_decisions, child_exits = self.function_key_points(child)
decisions += child_decisions
exits += child_exits
if top_function_level:
# Implicit return statement.
exits += 1
return decisions, exits
def complexities(self, cursor, filename):
"""
Calculates cyclomatic complexities of functions.
"""
file = cursor.location.file
if file is not None and file.name != filename:
# There is nothing to do in another file.
return
if cursor.kind == CursorKind.FUNCTION_DECL:
child = next((child for child in cursor.get_children()
if child.kind != CursorKind.PARM_DECL),
None)
if child:
decisions, exits = self.function_key_points(child, True)
complexity = max(1, decisions - exits + 2)
yield cursor, complexity
else:
for child in cursor.get_children():
yield from self.complexities(child, filename)
def run(self, filename, file, max_complexity: int=8):
"""
Calculates cyclomatic complexity of functions in file.
:param max_complexity: Maximum cyclomatic complexity that is
considered to be normal. The value of 10 had
received substantial corroborating evidence.
But the general recommendation: "For each
module, either limit cyclomatic complexity to
[the agreed-upon limit] or provide a written
explanation of why the limit was exceeded."
"""
root = Index.create().parse(filename).cursor
for cursor, complexity in self.complexities(root, filename):
if complexity > max_complexity:
affected_code = (SourceRange.from_clang_range(cursor.extent),)
yield Result(
self,
"The function '{function}' should be simplified. Its "
"cyclomatic complexity is {complexity} which exceeds "
"maximal recommended value "
"of {rec_value}.".format(
function=cursor.displayname,
complexity=complexity,
rec_value=max_complexity),
affected_code=affected_code,
additional_info=(
"The cyclomatic complexity is a metric that measures "
"how complicated a function is by counting branches "
"and exits of each function.\n\n"
"Your function seems to be complicated and should be "
"refactored so that it can be understood by other "
"people easily.\n\nSee "
"<http://www.wikiwand.com/en/Cyclomatic_complexity>"
" for more information."))
| Python | 0 |
54b66e132137eb6abea0a5ae6571dbc52e309b59 | change all libraries to have module_main of 'index', and add an index.js if it doesn't have one | migrations/011-ensure_library_main_module.py | migrations/011-ensure_library_main_module.py | from jetpack.models import PackageRevision
LIB_MODULE_MAIN = 'index'
libs = PackageRevision.objects.filter(package__type='l', module_main='main')
.select_related('package', 'modules')
libs.update(module_main=LIB_MODULE_MAIN)
main_per_package = {}
for revision in libs:
if revision.modules.filter(filename=LIB_MODULE_MAIN).count() == 0:
mod = main_per_package.get(revision.package_id)
if not mod:
mod = Module(filename=LIB_MODULE_MAIN, author=revision.author)
mod.save()
main_per_package[revision.package_id] = mod
revision.modules.add(mod)
| Python | 0.000001 | |
2fda10a83aa5a4d3080a0ce8751e28a18fc9a3e0 | Add two-point example to serve as a regression test for gridline/plot distinguishing | examples/two_point.py | examples/two_point.py | """
Demonstrates plotting multiple linear features with a single ``ax.pole`` call.
The real purpose of this example is to serve as an implicit regression test for
some oddities in the way axes grid lines are handled in matplotlib and
mplstereonet. A 2-vertex line can sometimes be confused for an axes grid line,
and they need different handling on a stereonet.
"""
import matplotlib.pyplot as plt
import mplstereonet
fig, ax = mplstereonet.subplots(figsize=(7,7))
strike = [200, 250]
dip = [50, 60]
ax.pole(strike, dip, 'go', markersize=10)
ax.grid()
plt.show()
| Python | 0.000004 | |
84b5464f67e60e35f28e0548a362ea68b13265bf | Create fullwaveform.py | fullwaveform.py | fullwaveform.py | # -*- coding: utf-8 -*-
"""
Created on Mon May 20 20:08:46 2013
Stuff to pull out stuff for full waveform
@author: davstott
davstott@gmail.com
"""
import os
import numpy as np
from scipy import interpolate
from scipy import signal
import matplotlib.pyplot as plt
import fiona
from shapely.geometry import shape
from shapely.geometry import asPoint
import csv
#******************* PATHS****************************
dirpath = os.path.dirname(os.path.abspath('...'))
print ' running in: ', dirpath
datapath = os.path.join(dirpath,'data')
print 'data located in :', datapath
outputpath = os.path.join(dirpath,'output')
if not os.path.exists(outputpath):
os.mkdir(outputpath)
#poly_path = os.path.join(dirpath,'polys')
header = str('x'+','+
'y'+','+
'z'+','+
'intensity'+','+
'peak_start'+','+
'peak_end'+','+
'peak_location'+','+
'peak_width'+','+
'max_intensity'+','+
'peak_sum'+','+
'shoulder_location'+','+
'shoulder_intensity'+
'\n')
class ShpReader():
def __init__(self,indir):
print 'call to shape reader'
os.chdir(indir)
listindir = os.listdir(indir)
self.arc = []
self.bac = []
self.poly = None
for thing in listindir:
item = thing[-4:]
if item == '.shp':
shapefile = fiona.open(thing)
for pol in shapefile:
#print pol['geometry']
classification = pol['properties']['CLASS']
if classification == 'ARC':
#print 'ARC'
arc_poly = shape(pol['geometry'])
self.arc.append(arc_poly)
elif classification =='BAC':
bac_poly = shape(pol['geometry'])
self.bac.append(bac_poly)
shapefile.close()
print len(self.arc),len(self.bac)
def classify_point(self,x,y):
#print 'call to classify point'
reading = asPoint(np.array([x,y]))
#print reading.wkt
classification = None
for poly in self.arc:
if reading.within(poly):
classification = 1
print 'arc'
break
for poly in self.bac:
if reading.within(poly):
classification = 0
print 'bac'
break
#print classification
return classification
#*******************functions*****************************
#a smoothing spline
def smoothing(waveform, kparam, sparam, weights):
sm_x = np.arange(1,257,1)
sm_weights = np.zeros(256)+weights
sm_spline = interpolate.UnivariateSpline(sm_x,
waveform,
k=kparam,
w=sm_weights,
s=sparam)
spline = sm_spline(sm_x)
return spline
#***************** Parameters*********************************
#for spline
kparam = 1.3
sparam = 191
weights = 1.4
#x values
x_vals = np.arange(1,257,1)
#find the data
os.chdir(datapath)
indir = os.listdir(datapath)
#open the data in a for loop
#classes = ShpReader(poly_path)
os.chdir(datapath)
indir = os.listdir(datapath)
for file in indir:
print file
reading_count = 0
with open(file) as csvfile:
reader = csv.reader(csvfile, delimiter=',')
j = 0
for row in reader:
#print len(row)
if len(row)>8:
#print row
#print row.shape
try:
j = j+1
#print 'File:',file,'Line',j
#print row.shape
#osgb eastng
x = np.asarray(row[0],dtype=np.float64)
#osgb northing
y = np.asarray(row[1],dtype=np.float64)
#osgb /newlyn elevation
z = np.asarray(row[2],dtype=np.float64)
#intensity derived from LAS tools
intensity = np.asarray(row[3],dtype=np.int32)
#number of returns identified by lastools
#returns = row[4]
#and lastly pull out the waveform
waveform = np.asarray(row[9:],dtype=np.int16)
print 'FWE',waveform.shape, waveform.dtype
#smooth the waveform using a univariate spline using the parameters above
smoothed2 = smoothing(waveform, kparam, sparam, weights)
#print smoothed2
#identify the peaks in the smoothed waveform
#peaks = signal.find_peaks_cwt(smoothed2, np.arange(19,27,1))
#first derivative of smoothed waveform
diff = np.diff(smoothed2, n=1)
#print diff
#second derivative of smoothed waveform
#diff2 = np.diff(smoothed2, n=2)
#find the maximal value in waveform
max_intensity = np.argmax(waveform)
#print 'MAX', max_intensity
#define the region of the returns
diffreg = np.logical_or(diff>1.5,diff<-0.75)
#print diffreg
#get the x values for slicing the other arrays
diffx = x_vals[1:]
regx = diffx[diffreg]
#get the first value
reg_l = regx[0]
#get the last value
reg_r = regx[-1]
#print 'diffreg', reg_l, reg_r
shoulder = np.argmin(diff[reg_l:reg_r])
#print 'shoulder pos', shoulder
#print shoulder
peak_value = waveform[max_intensity]
#print peak_value
peak_width = reg_r-reg_l
print peak_width
print reg_r, reg_l
print waveform.shape, waveform[reg_l:reg_r].shape
print waveform[reg_l:reg_r]
peak_sum = np.sum(waveform[reg_l:reg_r])
print 'peak sum', peak_sum
shoulder_pos = shoulder+reg_l
shoulder_int = waveform[shoulder_pos]
print type(x)
print y
vlist = [x,
y,
z,
intensity,
reg_l,
reg_r,
max_intensity,
peak_width,
peak_value,
peak_sum,
shoulder_pos,
shoulder_int]
print vlist
os.chdir(outputpath)
with open(file, 'a+') as outfile:
writer = csv.writer(outfile, delimiter=',')
writer.writerow(vlist)
outfile.close()
os.chdir(datapath)
'''waveform_class = classes.classify_point(x,y)
wv=[]
if waveform_class == 1:
os.chdir(outputpath)
wv=smoothed2.tolist()
wv.insert(0,np.around(y,2))
wv.insert(0,np.around(x,2))
print wv
with open(file+'wv'+'a', 'a+') as outfile:
writer = csv.writer(outfile, delimiter=',')
writer.writerow(wv)
outfile.close()
os.chdir(datapath)
elif waveform_class == 0:
os.chdir(outputpath)
wv=smoothed2.tolist()
wv.insert(0,np.around(y,2))
wv.insert(0,np.around(x,2))
with open(file+'wv'+'b', 'a+') as outfile:
writer = csv.writer(outfile, delimiter=',')
writer.writerow(wv)
outfile.close()
os.chdir(datapath)'''
except:
continue
| Python | 0.00001 | |
ee85acb7f9f3af91db3bfb4bf766636883f07685 | Add an extra test for the OpalSerializer | opal/tests/test_core_views.py | opal/tests/test_core_views.py | """
Unittests for opal.core.views
"""
from opal.core import test
from opal.core import views
class SerializerTestCase(test.OpalTestCase):
def test_serializer_default_will_super(self):
s = views.OpalSerializer()
with self.assertRaises(TypeError):
s.default(None)
| Python | 0.000001 | |
1fdffc42c7ff7ea4339a58e8a19ffa07253e4149 | Add script to resolve conflicts | resolveconflicts.py | resolveconflicts.py | # Copyright (C) 2014 Igor Tkach
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import couchdb
from urlparse import urlparse
def parse_args():
argparser = argparse.ArgumentParser()
argparser.add_argument('couch_url')
argparser.add_argument('-s', '--start')
return argparser.parse_args()
def mkclient(couch_url):
parsed_url = urlparse(couch_url)
couch_db = parsed_url.path.lstrip('/')
server_url = parsed_url.scheme + '://'+ parsed_url.netloc
server = couchdb.Server(server_url)
username = parsed_url.username
password = parsed_url.password
print "User %s%s at %s, database %s" % (
username,
'' if password else ' (no password)',
server.resource.url,
couch_db)
if password:
server.resource.credentials = (username, password)
return server[couch_db]
def main():
args = parse_args()
db = mkclient(args.couch_url)
viewoptions = {}
if args.start:
viewoptions['startkey'] = args.start
viewoptions['startkey_docid'] = args.start
for row in db.iterview('_all_docs', 100, **viewoptions):
doc = db.get(row.id, conflicts=True)
conflicts = doc.get('_conflicts')
if conflicts:
best_mw_revid = doc['parse']['revid']
docs = [doc]
best_doc = doc
print row.id, '\n', doc.rev, best_mw_revid, conflicts
all_aliases = set(doc.get('aliases', ()))
aliase_count = len(all_aliases)
for conflict_rev in conflicts:
conflict_doc = db.get(row.id, rev=conflict_rev)
docs.append(conflict_doc)
conflict_mw_revid = conflict_doc['parse']['revid']
#print 'conflict mw revid:', conflict_mw_revid
if conflict_mw_revid > best_mw_revid:
best_mw_revid = conflict_mw_revid
best_doc = conflict_doc
aliases = set(doc.get('aliases', ()))
all_aliases.update(aliases)
#print all_aliases
new_aliases_count = len(all_aliases) - aliase_count
#print 'New aliases found in conflict:', new_aliases_count
#print 'Best doc: ', best_doc.rev
if new_aliases_count > 0:
print '+A', doc.id
if best_doc.rev != doc.rev > 0:
print '+R', doc.id
for doc in docs:
if doc.rev == best_doc.rev:
print 'Keeping ', doc.rev
doc['aliases'] = list(all_aliases)
db.save(doc)
else:
print 'Discarding ', doc.rev
db.delete(doc)
if __name__ == '__main__':
main()
| Python | 0.000001 | |
a0a2017e05af986cd0a7207c429e7dc5e8b3fcd2 | Add missing tests for Variable | tests/test_solver_variable.py | tests/test_solver_variable.py | from gaphas.solver import Variable
def test_equality():
v = Variable(3)
w = Variable(3)
o = Variable(2)
assert v == 3
assert 3 == v
assert v == w
assert not v == o
assert v != 2
assert 2 != v
assert not 3 != v
assert v != o
def test_add_to_variable():
v = Variable(3)
assert v + 1 == 4
assert v - 1 == 2
assert 1 + v == 4
assert 4 - v == 1
def test_add_to_variable_with_variable():
v = Variable(3)
o = Variable(1)
assert v + o == 4
assert v - o == 2
def test_mutiplication():
v = Variable(3)
assert v * 2 == 6
assert v / 2 == 1.5
assert v // 2 == 1
assert 2 * v == 6
assert 4.5 / v == 1.5
assert 4 // v == 1
def test_mutiplication_with_variable():
v = Variable(3)
o = Variable(2)
assert v * o == 6
assert v / o == 1.5
assert v // o == 1
def test_comparison():
v = Variable(3)
assert v > 2
assert v < 4
assert v >= 2
assert v >= 3
assert v <= 4
assert v <= 3
assert not v > 3
assert not v < 3
assert not v <= 2
assert not v >= 4
def test_inverse_comparison():
v = Variable(3)
assert 4 > v
assert 2 < v
assert 4 >= v
assert 3 >= v
assert 2 <= v
assert 3 <= v
assert not 3 > v
assert not 3 < v
assert not 4 <= v
assert not 2 >= v
def test_power():
v = Variable(3)
o = Variable(2)
assert v ** 2 == 9
assert 2 ** v == 8
assert v ** o == 9
def test_modulo():
v = Variable(3)
o = Variable(2)
assert v % 2 == 1
assert 4 % v == 1
assert v % o == 1
assert divmod(v, 2) == (1, 1)
assert divmod(4, v) == (1, 1)
assert divmod(v, o) == (1, 1)
| Python | 0.000005 | |
865356c5b7bbec2b9412ffd3d2a39fea19e4b01a | Create getcounts.py | usbcounter/getcounts.py | usbcounter/getcounts.py | import serial
import json
import os, sys
import time
| Python | 0.000001 | |
993b1af160e6ed7886c2c95770683fae72332aed | remove __debug__ | direct/src/task/Task.py | direct/src/task/Task.py | """ This module exists temporarily as a gatekeeper between
TaskOrig.py, the original Python implementation of the task system,
and TaskNew.py, the new C++ implementation. """
from pandac.libpandaexpressModules import ConfigVariableBool
wantNewTasks = ConfigVariableBool('want-new-tasks', False).getValue()
if wantNewTasks:
from TaskNew import *
else:
from TaskOrig import *
| """ This module exists temporarily as a gatekeeper between
TaskOrig.py, the original Python implementation of the task system,
and TaskNew.py, the new C++ implementation. """
wantNewTasks = False
if __debug__:
from pandac.PandaModules import ConfigVariableBool
wantNewTasks = ConfigVariableBool('want-new-tasks', False).getValue()
if wantNewTasks:
from TaskNew import *
else:
from TaskOrig import *
| Python | 0.000105 |
85cbec4f398c49a4903c7370f74deeae3d5adabf | Create ShowData.py | ShowData.py | ShowData.py | """
The MIT License (MIT)
Copyright (c) <2016> <Larry McCaig (aka: Larz60+ aka: Larz60p)>
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import ReadRecord as RdRec
class ShowData:
def __init__(self):
self.rr = RdRec.ReadRecord('StockData.json')
self.stock_market_record = self.rr.read_data_file()
def show_data(self):
stkmktrec = self.stock_market_record
# get a list of field names:
print('Record fields: {}'.format(stkmktrec._fields))
# List entire record
print('\nEntire record: {}'.format(stkmktrec))
# Get individual field
print('\ndbtabledesc: {}'.format(stkmktrec.dbtabledesc))
# Show database column entries
print('\ndatabase column 0: {}'.format(stkmktrec.columns[0]))
print('database column 1: {}'.format(stkmktrec.columns[1]))
print('database column 2: {}'.format(stkmktrec.columns[2]))
# Column data by key:
for n in range(len(stkmktrec.columns)):
column = stkmktrec.columns[n]
print('\nColumn {} all: {}'.format(n, column))
print('Column data {} field_name: {}'.format(n, column.field_name))
print('Column data {} db_column_name: {}'.format(n, column.db_column_name))
print('Column data {} db_column_desc: {}'.format(n, column.db_column_desc))
print('Column data {} db_column_type: {}'.format(n, column.db_column_type))
# Using get_field_item
print('\nUsing get_field_item - Column 1, db_column_desc: {}'
.format(self.rr.get_field_item(1, itemname='db_column_desc')))
# same with bad data
print('With bad data you get: {}'
.format(self.rr.get_field_item(1, itemname='donkykong')))
if __name__ == '__main__':
sd = ShowData()
sd.show_data()
| Python | 0 | |
64130f988f2154870db540244a399a8297a103e9 | move hardcoded URL from email script to model definition. | dj/scripts/email_url.py | dj/scripts/email_url.py | #!/usr/bin/python
# email_url.py
# emails the video URL to the presenters
import itertools
from pprint import pprint
from email_ab import email_ab
class email_url(email_ab):
ready_state = 7
subject_template = "[{{ep.show.name}}] Video up: {{ep.name}}"
body_body = """
The video is posted:
{% for url in urls %} {{url}}
{% endfor %}
Look at it, make sure the title is spelled right and the audio sounds reasonable.
If you are satisfied, tweet it, blog it, whatever it. No point in making videos if no one watches them.
To approve it click the Approve button at
{{ep.approve_url}}
As soon as you or someone approves your video, it will be tweeted on @NextDayVideo{% if ep.show.client.tweet_prefix %} tagged {{ep.show.client.tweet_prefix}}{% endif %}. It will also be sent to the event organizers in hopes that they add it to the event website.
{% if ep.twitter_url %}
It has been tweeted: {{ ep.twitter_url }}
Re-tweet it, blog it, whatever it. No point in making videos if no one watches them.
{% endif %}
"""
def context(self, ep):
ctx = super(email_url, self).context(ep)
# dig around for URLs that might be relevant
urls = filter( None,
[ep.public_url,
ep.host_url,
ep.archive_ogv_url,
ep.archive_mp4_url] )
ctx['urls'] = urls
ctx['py_name'] = "email_url.py"
return ctx
if __name__ == '__main__':
p=email_url()
p.main()
| #!/usr/bin/python
# email_url.py
# emails the video URL to the presenters
import itertools
from pprint import pprint
from email_ab import email_ab
class email_url(email_ab):
ready_state = 7
subject_template = "[{{ep.show.name}}] Video up: {{ep.name}}"
body_body = """
The video is posted:
{% for url in urls %} {{url}}
{% endfor %}
Look at it, make sure the title is spelled right and the audio sounds reasonable.
If you are satisfied, tweet it, blog it, whatever it. No point in making videos if no one watches them.
To approve it click the Approve button at
https://veyepar.nextdayvideo.com/main/approve/{{ep.id}}/{{ep.slug}}/{{ep.edit_key}}/
As soon as you or someone approves your video, it will be tweeted on @NextDayVideo{% if ep.show.client.tweet_prefix %} tagged {{ep.show.client.tweet_prefix}}{% endif %}. It will also be sent to the event organizers in hopes that they add it to the event website.
{% if ep.twitter_url %}
It has been tweeted: {{ ep.twitter_url }}
Re-tweet it, blog it, whatever it. No point in making videos if no one watches them.
{% endif %}
"""
def context(self, ep):
ctx = super(email_url, self).context(ep)
# dig around for URLs that might be relevant
urls = filter( None,
[ep.public_url,
ep.host_url,
ep.archive_ogv_url,
ep.archive_mp4_url] )
ctx['urls'] = urls
ctx['py_name'] = "email_url.py"
return ctx
if __name__ == '__main__':
p=email_url()
p.main()
| Python | 0 |
ce47fec10ccda45550625221c64322d89622c707 | Add libjpeg.gyp that wraps third_party/externals/libjpeg/libjpeg.gyp Review URL: https://codereview.appspot.com/5848046 | gyp/libjpeg.gyp | gyp/libjpeg.gyp | # Copyright 2012 The Android Open Source Project
#
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# Depend on this wrapper to pick up libjpeg from third_party
{
'targets': [
{
'target_name': 'libjpeg',
'type': 'none',
'dependencies': [
'../third_party/externals/libjpeg/libjpeg.gyp:libjpeg',
],
},
],
}
# Local Variables:
# tab-width:2
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=2 shiftwidth=2:
| Python | 0.000001 | |
d4ff515df7e12d26c759adfafcacf82e47da71a1 | Add util | snapchat_fs/util.py | snapchat_fs/util.py | #!/usr/bin/env python
"""
util.py provides a set of nice utility functions that support the snapchat_fs pkg
"""
__author__ = "Alex Clemmer, Chad Brubaker"
__copyright__ = "Copyright 2013, Alex Clemmer and Chad Brubaker"
__credits__ = ["Alex Clemmer", "Chad Brubaker"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Alex Clemmer"
__email__ = "clemmer.alexander@gmail.com"
__status__ = "Prototype"
def bold(text):
return '\033[1m%s\033[0m' % text
def green(text):
return '\033[1;32m%s\033[0m' % text
def red(text):
return '\033[1;31m%s\033[0m' % text
| Python | 0.000051 | |
7d9fd2eed72a2a65744259af1bd8580253f282d3 | Create a.py | abc067/a.py | abc067/a.py | a, b = map(int, input().split())
if a % 3 == 0 or b % 3 == 0 or (a + b) % 3 == 0:
print('Possible')
else:
print('Impossible')
| Python | 0.000489 | |
c4d5d04a957fed09228995aa7f84ed19c64e3831 | Add previously forgotten afterflight utilities module | af_utils.py | af_utils.py | #Copyright 2013 Aaron Curtis
#Licensed under the Apache License, Version 2.0 (the "License");
#you may not use this file except in compliance with the License.
#You may obtain a copy of the License at
#http://www.apache.org/licenses/LICENSE-2.0
#Unless required by applicable law or agreed to in writing, software
#distributed under the License is distributed on an "AS IS" BASIS,
#WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#See the License for the specific language governing permissions and
#limitations under the License.
import calendar, datetime, re, numpy
def dt2jsts(datetime):
"""
Given a python datetime, convert to javascript timestamp format (milliseconds since Jan 1 1970).
Do so with microsecond precision, and without adding any timezone offset.
"""
return calendar.timegm(datetime.timetuple())*1e3+datetime.microsecond/1e3
def logpath2dt(filepath):
"""
given a dataflashlog in the format produced by Mission Planner,
return a datetime which says when the file was downloaded from the APM
"""
return datetime.datetime.strptime(re.match(r'.*/(.*) .*$',filepath).groups()[0],'%Y-%m-%d %H-%M')
class UTC(datetime.tzinfo):
"""
No timezones are provided in python stdlib (gaargh) so we have to make one here
"""
def utcoffset(self, dt):
return datetime.timedelta(0)
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return datetime.timedelta(0)
utc=UTC()
def cross(series, cross=0, direction='cross'):
"""
From http://stackoverflow.com/questions/10475488/calculating-crossing-intercept-points-of-a-series-or-dataframe
Given a Series returns all the index values where the data values equal
the 'cross' value.
Direction can be 'rising' (for rising edge), 'falling' (for only falling
edge), or 'cross' for both edges
"""
# Find if values are above or bellow yvalue crossing:
above=series.values > cross
below=numpy.logical_not(above)
left_shifted_above = above[1:]
left_shifted_below = below[1:]
x_crossings = []
# Find indexes on left side of crossing point
if direction == 'rising':
idxs = (left_shifted_above & below[0:-1]).nonzero()[0]
elif direction == 'falling':
idxs = (left_shifted_below & above[0:-1]).nonzero()[0]
else:
rising = left_shifted_above & below[0:-1]
falling = left_shifted_below & above[0:-1]
idxs = (rising | falling).nonzero()[0]
# Calculate x crossings with interpolation using formula for a line:
x1 = series.index.values[idxs]
x2 = series.index.values[idxs+1]
y1 = series.values[idxs]
y2 = series.values[idxs+1]
x_crossings = (cross-y1)*(x2-x1)/(y2-y1) + x1
return x_crossings
| Python | 0 | |
d08f06f124dead14d47e241c81a0b6c819b15d0e | Add setup script | Setup/setup.py | Setup/setup.py | # -*- coding: utf-8 -*-
import sys
import os
import shutil
import glob
import fnmatch
import codecs
import re
from lxml import etree
def get_buildtoolkit_path():
return os.path.abspath(os.path.join(os.path.split(__file__)[0], ".."))
def show_usage():
print "[Usage] setup.py command"
print "Command:"
print " INIT path [-b] Copy initial files to path"
print " RULE path Configure project to have analysis rule recursively"
print " LICENSE path Configure project to have same license recursively"
def init():
src_path = get_buildtoolkit_path()
dst_path = sys.argv[-1]
include_build = (len(sys.argv) > 3 and sys.argv[2].lower() == "-b")
shutil.copy(src_path + "/.gitattributes", dst_path)
shutil.copy(src_path + "/.gitignore", dst_path)
shutil.copy(src_path + "/CodeStyle/.editorconfig", dst_path)
shutil.copy(src_path + "/CodeStyle/CodeAnalysis.ruleset", dst_path)
if include_build:
shutil.copy(src_path + "/BuildScript/build.cmd", dst_path)
if not os.path.exists(dst_path + "/build.fsx"):
shutil.copy(src_path + "/BuildScript/build.fsx", dst_path)
for f in glob.glob(dst_path + "/*.DotSettings"):
os.remove(f)
if os.path.exists(dst_path + "/Settings.StyleCop"):
os.remove(dst_path + "/Settings.StyleCop")
def load_proj(path):
with codecs.open(path, encoding="utf-8") as f:
proj_text = f.read()
proj_text = proj_text.replace("xmlns=", "__xmlns__=")
return etree.fromstring(proj_text)
def save_proj(path, proj):
proj_text = etree.tostring(proj, encoding="utf-8")
t = proj_text.replace("__xmlns__=", "xmlns=").replace("/>", " />").replace("\n", "\r\n")
with codecs.open(path, "w", encoding="utf-8-sig") as f:
f.write('<?xml version="1.0" encoding="utf-8"?>\r\n')
f.write(t)
def add_element(element, child):
# add child element to element while keeping identation (>_<)
list(element)[-1].tail += " "
child.tail = element.text[0: -2]
element.append(child)
def rule():
dst_path = sys.argv[2]
for root, dirnames, filenames in os.walk(dst_path):
for filename in fnmatch.filter(filenames, '*.csproj'):
path = os.path.join(root, filename)
print path
try:
proj = load_proj(path)
pgroup_base = [e for e in proj if e.tag == "PropertyGroup" and len(e.keys()) == 0][0]
pgroup_rel = [e for e in proj if e.tag == "PropertyGroup" and e.get("Condition") != None and e.get("Condition").find("Release") != -1][0]
ruleset_relpath = os.path.relpath(dst_path, root) + "\CodeAnalysis.ruleset"
add_element(pgroup_base, etree.XML('<CodeAnalysisRuleSet>' + ruleset_relpath + '</CodeAnalysisRuleSet>'))
add_element(pgroup_rel, etree.XML('<RunCodeAnalysis>true</RunCodeAnalysis>'))
save_proj(path, proj)
except Exception as e:
print "!", e
def open_replace_save(path, transform):
with codecs.open(path, encoding="utf-8") as f:
text = f.read()
text = transform(text)
with codecs.open(path, "w", encoding="utf-8") as f:
f.write(text)
def license():
dst_path = sys.argv[2]
for root, dirnames, filenames in os.walk(dst_path):
for filename in fnmatch.filter(filenames, 'LICENSE'):
path = os.path.join(root, filename)
print path
open_replace_save(path, lambda x: re.sub(r"Copyright (c) .*", "Copyright (c) 2016 SaladLab", x))
for filename in fnmatch.filter(filenames, 'AssemblyInfo.cs'):
path = os.path.join(root, filename)
print path
open_replace_save(path, lambda x:
re.sub(r"\[assembly: AssemblyCopyright(.*)\]", u'[assembly: AssemblyCopyright("Copyright © 2016 SaladLab")]',
re.sub(r"\[assembly: AssemblyCompany.*\]", u'[assembly: AssemblyCompany("SaladLab")]', x)))
for filename in fnmatch.filter(filenames, '*.nuspec'):
path = os.path.join(root, filename)
print path
open_replace_save(path, lambda x: re.sub(r"\<copyright\>.*\</copyright\>", u'<copyright>Copyright © 2016 SaladLab</copyright>', x))
def main():
if len(sys.argv) <= 1:
show_usage()
return
cmd = sys.argv[1].lower()
if cmd == "init":
return init()
elif cmd == "rule":
rule()
elif cmd == "license":
license()
else:
print "Wrong command: " + cmd
sys.exit(1)
print get_buildtoolkit_path()
if __name__ == "__main__":
main()
| Python | 0.000001 | |
229d5b93d6e5474dfcd125536c7744f6a7ec86d0 | Create blender tool | waflib/Tools/blender.py | waflib/Tools/blender.py | #!/usr/bin/env python
# encoding: utf-8
# Michal Proszek, 2014 (poxip)
"""
Detect the version of Blender, path
and install the extension:
def options(opt):
opt.load('blender')
def configure(cnf):
cnf.load('blender')
def build(bld):
bld(name='io_mesh_raw',
feature='blender',
files=['file1.py', 'file2.py']
)
If name variable is empty, files are installed in scripts/addons, otherwise scripts/addons/name
Use ./waf configure --system to set the installation directory to system path
"""
import os
import re
from sys import platform as _platform
from getpass import getuser
from waflib import Utils
from waflib.TaskGen import feature
from waflib.Configure import conf
def options(opt):
opt.add_option(
'-s', '--system',
dest='directory_system',
default=False,
action='store_true',
help='determines installation directory (default: user)'
)
@conf
def find_blender(ctx):
'''Return version number of blender, if not exist return None'''
blender = ctx.find_program('blender')
try:
output = ctx.cmd_and_log(blender + ['--version'])
m = re.search(r'Blender\s*((\d+(\.|))*)', output)
blender_version = m.group(1)
except Exception:
ctx.fatal('Could not retrieve blender version')
ctx.env['BLENDER_VERSION'] = blender_version
return blender
@conf
def configure_paths(ctx):
"""Setup blender paths"""
# Get the username
user = getuser()
# Default: Linux
config_path = {
'user': '/home/%s/.config/blender/' % user,
'system': '/usr/share/blender/'
}
if _platform == 'darwin':
# MAC OS X
config_path['user'] = \
'/Users/%s/Library/Application Support/Blender/' % user
config_path['system'] = '/Library/Application Support/Blender/'
elif _platform == 'win32':
# Windows
appdata_path = ctx.getenv('APPDATA').replace('\\', '/')
homedrive = ctx.getenv('HOMEDRIVE').replace('\\', '/')
config_path['user'] = '%s/Blender Foundation/Blender/' % appdata_path
config_path['system'] = \
'%sAll Users/AppData/Roaming/Blender Foundation/Blender/' % homedrive
blender_version = ctx.env['BLENDER_VERSION']
config_path['user'] += blender_version + '/'
config_path['system'] += blender_version + '/'
ctx.env['BLENDER_CONFIG_DIR'] = os.path.abspath(config_path['user'])
if ctx.options.directory_system:
ctx.env['BLENDER_CONFIG_DIR'] = config_path['system']
ctx.env['BLENDER_ADDONS_DIR'] = os.path.join(
ctx.env['BLENDER_CONFIG_DIR'], 'scripts/addons'
)
Utils.check_dir(ctx.env['BLENDER_ADDONS_DIR'])
def configure(ctx):
ctx.find_blender()
ctx.configure_paths()
@feature('blender_list')
def blender(self):
# Two ways to install a blender extension: as a module or just .py files
dest_dir = os.path.join(self.env.BLENDER_ADDONS_DIR, self.get_name())
Utils.check_dir(dest_dir)
self.bld.install_files(
dest_dir,
getattr(self, 'files', '.')
)
| Python | 0 | |
f68b0bb1e1f10b10e58057f60e17377f027690f8 | add a util function for ungzip. | web/my_util/compress.py | web/my_util/compress.py | import gzip
from StringIO import StringIO
def ungzip(resp):
if resp.info().get('Content-Encoding') == 'gzip':
buf = StringIO(resp.read())
f = gzip.GzipFile(fileobj=buf)
data = f.read()
return data
else:
return resp.read()
| Python | 0 | |
eab925debf62d4ba180b1a114841b7a4f0fe8e76 | add basic command line interface | basicInterface.py | basicInterface.py | import sys
import requests
import random
passchars = map(chr,range(ord('a'),ord('z')+1) + range(ord('A'),ord('Z')+1) + range(ord('0'),ord('9')+1) )
class User():
def __init__(self,name,username,bracket):
self.name = name
self.username = username
self.bracket = bracket
self.password = self.gen_password(8)
def gen_password(self,length):
return "".join([random.choice(passchars) for i in range(length)])
def to_csv(self):
return "{},{},{},{}\n".format(self.name,self.username,self.bracket,self.password)
def to_printable(self):
return self.to_readable(32)
def to_readable(self,width):
return (
"""{: ^{width}}
{: ^{width}}
{: ^{width}}""").format(
"{} <{}>".format(self.name,self.bracket),
"username: {}".format(self.username),
"password: {}".format(self.password),width=width)
def to_comm(self,token):
return {"name":self.name,"username":self.username,"password":self.password,"token":token,"bracket":self.bracket}
def to_verify(self):
return (
"""NAME : {}
USERNAME : {}
BRACKET : {}""").format(self.name,self.username,self.bracket)
def read_name(userin,userout):
userout.write("WHAT... is your name? ")
name = userin.readline().strip()
if name == "" :
userout.write("HEY!, you must have some sort of name.\n")
return read_name(userin,userout)
return name
def read_username(userin,userout):
userout.write("WHAT... is your preferred username? ")
username = userin.readline().strip()
if username == "" :
userout.write("Nothing is an unacceptable username\n")
return read_username(userin,userout)
return username
def read_bracket(userin,userout):
userout.write("WHAT... is your bracket? [1400/1620/open] ")
bracket = userin.readline().strip().lower()
if bracket not in ["1400","1620","open"] :
userout.write("Your bracket must be one of 1400, 1620, or open\n")
return read_bracket(userin,userout)
return bracket
def verify(user,userin,userout,first=True):
if first :
userout.write("\n{}\ndoes this look correct? [y/N] ".format(user.to_verify()))
else :
userout.write("\n{}\nis everything correct now? [y/N] ".format(user.to_verify()))
if userin.readline().strip().lower().startswith("y"):
return user
thingmap = {"The name":("name",read_name),"The username":("username",read_username),"The bracket":("bracket",read_bracket)}
thinglist = [ x for x in thingmap ]
for x in range(len(thinglist)) :
userout.write("{}) {}\n".format(x+1,thinglist[x]))
val = len(thinglist)+1
userout.write("{}) Nevermind, nothing was wrong.\n".format(val))
num = numchoose(val,userin,userout)
if num == val :
userout.write("Okay.\n")
return user
tup = thingmap[thinglist[num-1]]
user.__dict__[tup[0]] = tup[1](userin,userout)
return verify(user,userin,userout,False)
def numchoose(maxnum,userin,userout):
userout.write("choose the number of what was incorrect: ")
inval = 0
try :
inval = int(userin.readline().strip())
except ValueError :
userout.write("hey, that was not an integer!\n")
return numchoose(maxnum,userin,userout)
if inval > maxnum or inval < 1 :
userout.write("that was not a valid choice\n")
return numchoose(maxnum,userin,userout)
return inval
def finalize(user,userin,userout,url,token) :
user = verify(user,userin,userout)
result = requests.post(url,data=user.to_comm(token))
if result.text != "Success" :
if "Duplicate" in result.text:
userout.write("someone already has that username, please choose a different one.\n")
user.username = read_username(userin,userout)
else :
userout.write("the server did not like your data, here is what it said:\n{}".format(result.text))
return finalize(user,userin,userout,url,token)
return user
def interface(userin,userout,printout,logout,url,token):
while True :
userout.write("STOP! who would enter the contest must answer me these questions three, ere contest site he see.\n")
name = read_name(userin,userout)
username = read_username(userin,userout)
bracket = read_bracket(userin,userout)
user = User(name,username,bracket)
user = finalize(user,userin,userout,url,token)
printout.write(user.to_printable())
printout.write("\n\n\n\n")
logout.write(user.to_csv())
interface(sys.stdin,sys.stdout,sys.stdout,open("interface.log","wa"),"http://bdo.pw:5000/user/add","acmsecret")
| Python | 0.000002 | |
f66b799a22f2c74b88f867266c2e51eda1377b1c | Create find_the_mine.py | find_the_mine.py | find_the_mine.py | #Kunal Gautam
#Codewars : @Kunalpod
#Problem name: Find the Mine!
#Problem level: 6 kyu
def mineLocation(field):
for i in range(len(field)):
for j in range(len(field)):
if field[i][j]==1: return [i,j]
| Python | 0.00075 | |
febc735e79f3cc1b5f2e5fe2882bf28c458f638a | Initialize init file | wikilink/db/__init__.py | wikilink/db/__init__.py | """
wikilink
~~~~~~~~
wiki-link is a web-scraping application to find minimum number
of links between two given wiki pages.
:copyright: (c) 2016 - 2018 by Tran Ly VU. All Rights Reserved.
:license: Apache License 2.0.
"""
__all__ = ["db", "base", "page", "link"]
__author__ = "Tran Ly Vu (vutransingapore@gmail.com)"
__version__ = "1.2.0"
__copyright__ = "Copyright (c) 2016 - 2019 Tran Ly Vu. All Rights Reserved."
__license__ = "Apache License 2.0"
| Python | 0 | |
3fdb673977de57e5555eafb18e36544f3ea8c056 | Solve the absurd problem with an absurd file | selection/absurd.py | selection/absurd.py | import kmeans
import numpy as np
kmeans = reload(kmeans)
n_sample = 100
p_array = []
for i in range(n_sample):
if i%10 == 0:
print i, " / ", n_sample
kmeans = reload(kmeans)
p = kmeans.f(10)
p_array.append(p)
import matplotlib.pyplot as plt
p_array = sorted(p_array)
x = np.arange(0, 1, 1./len(p_array));
plt.plot(x, p_array, 'ro')
| Python | 0.999963 | |
c2bce27530f9997bffcb04f80a8d78db65ff98b2 | Create GPS.py | home/kmcgerald/GPS.py | home/kmcgerald/GPS.py | from time import sleep
# The geofence and measure distance methods should be available in MRL > 1.0.86
gps1 = Runtime.start("gps1", "GPS")
gps1.connect("/dev/tty.palmOneGPS-GPSSerialOut")
sleep(1)
# define some points ...
# Lets use Nova Labs 1.0
lat1 = 38.950829
lon1 = -77.339502
# and Nova Labs 2.0
lat2 = 38.954471
lon2 = -77.338271
# and the nearest Metro station
lat3 = 38.947254
lon3 = -77.337844
# and the Sand Trap out back
lat4 = 38.954844
lon4 = -77.338797
def input():
startingAngle = 0
Latitude = msg_gps1_publishGGAData.data[0][2]
Longitude = msg_gps1_publishGGAData.data[0][4]
altitude = msg_gps1_publishGGAData.data[0][9]
print "Lat: " + Latitude
print "Long: " + Longitude
print "Alt: " + altitude + "\n"
#have python listening to lidar
gps1.addListener("publishGGAData", python.name, "input")
print "Ready to receive Data from GPS..."
print "Let's put a 100 meter GeoFence around around Nova Labs 2.0"
# create a point based geofence with a 100m radius
geofence = gps1.setPointGeoFence(lat2, lon2, 100)
distance = gps1.calculateDistance(lon1, lat1, lon2, lat2)
# check if a GPS point is inside the fence
if (gps1.checkInside(geofence, lat1, lon1)):
print "Inside the Fence"
else:
print "Outside the Fence"
print "Distance (meters): ",distance," between Nova Labs 1.0 and Nova Labs 2.0\n"
distance = gps1.calculateDistance(lon2, lat2, lon3, lat3)
# check if a GPS point is inside the fence
if (gps1.checkInside(geofence, lat3, lon3)):
print "Inside the Fence"
else:
print "Outside the Fence"
print "Distance (meters): ",distance, " between NL 2 and the nearest Metro Station\n"
distance = gps1.calculateDistance(lon2, lat2, lon4, lat4)
# check if a GPS point is inside the fence
if (gps1.checkInside(geofence, lat4, lon4)):
print "Inside the Fence"
else:
print "Outside the Fence"
print "Distance (meters): ",distance, "between NL 2 and the nearest sand trap\n"
| Python | 0.000007 | |
ddc80392b17a3fadcbea09f82ea5f6936f0fd459 | add fbcode_builder_config for mvfst build in oss | build/fbcode_builder/specs/mvfst.py | build/fbcode_builder/specs/mvfst.py | #!/usr/bin/env python
# Copyright (c) Facebook, Inc. and its affiliates.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import specs.folly as folly
import specs.fizz as fizz
def fbcode_builder_spec(builder):
return {
'depends_on': [folly, fizz],
'steps': [
builder.fb_github_cmake_install(
'mvfst/build',
github_org='facebookincubator',
),
],
}
| Python | 0 | |
84098985420d56d9db375531afb5083e7c7f0d08 | Add an example using pygame (pygameLiveView.py). Connects to camera and show LiveView images via X or console (fbcon/svglib/etc). | src/example/pygameLiveView.py | src/example/pygameLiveView.py |
from pysony import SonyAPI, common_header, payload_header
import argparse
import binascii
import io
import pygame
import os
# Global Variables
options = None
incoming_image = None
frame_sequence = None
frame_info = None
frame_data = None
done = False
parser = argparse.ArgumentParser(prog="pygameLiveView")
# General Options
parser.set_defaults(debug=None, file=None, width=None, height=None)
parser.add_argument("-l", "--large", action="store_true", dest="large", help="Use HighRes liveview (if available)" )
parser.add_argument("-i", "--info", action="store_true", dest="info", help="Enable LiveFrameInfo (if available)" )
parser.add_argument("-z", "--zoom", action="store_true", dest="zoom", help="Zoom image to fill screen" )
options = parser.parse_args()
# Connect and set-up camera
camera = SonyAPI()
#camera = SonyAPI(QX_ADDR='http://192.168.122.1:8080/')
# Check if we need to do 'startRecMode'
mode = camera.getAvailableApiList()
# Need a better method to check for the presence of a camera
if type(mode) != dict:
print "No camera found, aborting"
quit()
# For those cameras which need it
if 'startRecMode' in (mode['result'])[0]:
camera.startRecMode()
if 'setLiveviewFrameInfo' in (mode['result'])[0]:
if options.info:
camera.setLiveviewFrameInfo([{"frameInfo": True}])
else:
camera.setLiveviewFrameInfo([{"frameInfo": False}])
if 'getAvailableLiveviewSize' in (mode['result'])[0]:
if options.large and len((camera.getAvailableLiveviewSize()['result'])[0]) > 1:
incoming = camera.liveview(["L"])
else:
incoming = camera.liveview()
else:
incoming = camera.liveview()
# Use PyGame to display images full screen
disp_no = os.getenv("DISPLAY")
found = False
if disp_no:
pygame.display.init()
found = True
else:
drivers = ['directfb', 'fbcon', 'svgalib', 'dga', 'ggi', 'vgl', 'aalib']
for driver in drivers:
if not os.getenv('SDL_VIDEODRIVER'):
os.putenv('SDL_VIDEODRIVER', driver)
try:
pygame.display.init()
except pygame.error:
print 'Driver: {0} failed.'.format(driver)
continue
found = True
break
if not found:
raise Exception('No suitable video driver found!')
infoObject = pygame.display.Info()
screen = pygame.display.set_mode((infoObject.current_w, infoObject.current_h))
# Loop forever, or until user quits or presses a key
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
elif event.type == pygame.KEYDOWN:
done = True
# read next image
data = incoming.read(8)
common = common_header(data)
data = incoming.read(128)
if common['payload_type']==1:
payload = payload_header(data)
image_file = io.BytesIO(incoming.read(payload['jpeg_data_size']))
incoming_image = pygame.image.load(image_file)
if options.zoom:
incoming_image = pygame.transform.scale(incoming_image, \
(infoObject.current_w, infoObject.current_h))
incoming.read(payload['padding_size'])
elif common['payload_type']==2:
frame_info = payload_header(data, 2)
if frame_info['jpeg_data_size']:
frame_sequence = common['sequence_number']
frame_data = incoming.read(frame_info['jpeg_data_size'])
incoming.read(frame_info['padding_size'])
# copy image to the display
if incoming_image:
screen.fill((0,0,0))
screen.blit(incoming_image,(0,0))
if frame_info and frame_sequence >= common['sequence_number']-1 \
and payload['jpeg_data_size']:
(left, top, width, height) = incoming_image.get_rect()
left = int(binascii.hexlify(frame_data[0:2]), 16) * width / 10000
top = int(binascii.hexlify(frame_data[2:4]), 16) * height / 10000
right = int(binascii.hexlify(frame_data[4:6]), 16) * width / 10000
bottom = int(binascii.hexlify(frame_data[6:8]), 16) * height / 10000
pygame.draw.lines(screen, 0xffffff, True, \
[(left, top), (right, top), (right, bottom), (left, bottom)], 2)
pygame.display.flip()
| Python | 0 | |
20da50a3c6cee33caf2205562d0d05be6c6721fb | Create enforce_posting_limits.py | enforce_posting_limits.py | enforce_posting_limits.py | #!/usr/bin/python
import sys
import time
import logging
import praw
def main():
# SET THESE - reddit application configuration
user_agent = ''
client_id = ''
client_secret = ''
username = ''
password = ''
# SET THESE - Customize these for your subreddit.
subreddit_name = ''
post_limit_count = 2
post_limit_hours = 4
# Adjustable, but you shouldn't have to touch these.
max_new_submissions = 25
loop_delay = 119 # seconds
logging.basicConfig(
format='%(asctime)s %(levelname)s %(message)s',
level=logging.INFO
)
logging.info('Watching subreddit: %s', subreddit_name)
logging.info('Current limit set to %d posts in %d hours',
post_limit_count, post_limit_hours)
reddit = praw.Reddit(user_agent=user_agent,
client_id=client_id,
client_secret=client_secret,
username=username,
password=password)
subreddit = reddit.subreddit(subreddit_name)
# Initial search range will start 10m ago.
last_new_post_time = time.time() - (60*10)
# The loop
running = True
while running:
submissions = subreddit.new(limit=max_new_submissions)
new_submissions = []
for submission in submissions:
# New submissions will come in newest first.
# Save the ones newer than last_new_post_time.
if submission.created_utc > last_new_post_time:
new_submissions.append(submission)
logging.debug("New submission count is %d", len(new_submissions))
if len(new_submissions) > 0:
new_submissions.reverse()
# Now they should be oldest first.
for submission in new_submissions:
stamp = time.strftime("%a, %d %b %Y %H:%M:%S %Z",
time.gmtime(submission.created_utc))
logging.info('New post "%s" by "%s" at %s',
submission.title, submission.author.name, stamp)
check_user_submissions(subreddit, submission, post_limit_hours,
post_limit_count)
last_new_post_time = submission.created_utc
time.sleep(loop_delay)
def check_user_submissions(subreddit, submission, limit_hours, limit_posts):
start_time = submission.created_utc - (limit_hours * 60 * 60)
# Exclude the current post from the range check since reddit sometimes
# doesn't include it (cache?). We will add it in manually later.
stop_time = submission.created_utc - 1
username = submission.author.name
params = "author:'" + username + "'"
user_submissions = list(subreddit.submissions(start_time, stop_time, params))
# Count includes the post excluded earlier
count = len(user_submissions) + 1
logging.info('User "%s" post count is %d in the last %d hours.',
username, count, limit_hours)
if count > limit_posts:
logging.info('Removing the post')
try:
subreddit.mod.remove(submission)
except Exception as e:
# If the login user isn't permitted to remove posts, don't stop
print (e)
else:
msg_link = "/message/compose/?to=/"+subreddit._path
reply_text = (
"Your submission was automatically removed because you have "
"exceeded **{}** submissions within the last **{}** hours.\n\n"
"*I am a bot, and this action was performed automatically. "
"Please [contact the moderators of this subreddit]"
"("+msg_link+") if you have questions or "
"concerns.*").format(limit_posts, limit_hours)
submission.reply(reply_text)
if __name__ == '__main__':
main()
| Python | 0.000014 | |
82232f4c52b924c98e42bce0bd56ba604ca93555 | Add 3D Helmholtz problem class | cg-static-condensation/helmholtz.py | cg-static-condensation/helmholtz.py | from firedrake import *
from firedrake.utils import cached_property
class HelmholtzProblem(object):
name = "Helmholtz"
parameter_names = ["scpc_hypre", "hypre"]
def __init__(self, mesh_size=None, degree=None):
super(object, self).__init__()
self.degree = degree
self.mesh_size = mesh_size
def re_initialize(self, degree=None, mesh_size=None):
if degree is None:
degree = self.degree
if mesh_size is None:
mesh_size = self.mesh_size
degree_changed = degree != self.degree
mesh_changed = mesh_size != self.mesh_size
if not (degree_changed or mesh_changed):
return
for attr in ["function_space", "source",
"u", "F", "bcs", "Jp", "output"]:
try:
delattr(self, attr)
except AttributeError:
pass
if mesh_changed:
try:
delattr(self, "mesh")
except AttributeError:
pass
self.degree = degree
self.mesh_size = mesh_size
@property
def hypre(self):
return {"snes_type": "ksponly",
"ksp_type": "cg",
"ksp_rtol": 1e-8,
"ksp_monitor": True,
"pc_type": "hypre",
"pc_hypre_type": "boomeramg",
"pc_hypre_boomeramg_no_CF": True,
"pc_hypre_boomeramg_coarsen_type": "HMIS",
"pc_hypre_boomeramg_interp_type": "ext+i",
"pc_hypre_boomeramg_P_max": 4,
"pc_hypre_boomeramg_agg_nl": 1}
@property
def scpc_hypre(self):
return {"snes_type": "ksponly",
"mat_type": "matfree",
"ksp_type": "preonly",
"pc_type": "python",
"pc_python_type": "firedrake.CGStaticCondensationPC",
"static_condensation": {"ksp_type": "cg",
"ksp_rtol": 1e-8,
"ksp_monitor": True,
"pc_type": "hypre",
"pc_hypre_type": "boomeramg",
"pc_hypre_boomeramg_no_CF": True,
"pc_hypre_boomeramg_coarsen_type": "HMIS",
"pc_hypre_boomeramg_interp_type": "ext+i",
"pc_hypre_boomeramg_P_max": 4,
"pc_hypre_boomeramg_agg_nl": 1}}
@cached_property
def mesh(self):
return UnitCubeMesh(self.mesh_size,
self.mesh_size,
self.mesh_size)
@property
def comm(self):
return self.mesh.comm
@cached_property
def function_space(self):
return FunctionSpace(self.mesh, "CG", self.degree)
@cached_property
def source(self):
x, y, z = SpatialCoordinate(self.mesh)
f = (1 + 108*pi*pi)*cos(6*pi*x)*cos(6*pi*y)*cos(6*pi*z)
source = Function(self.function_space, name="source")
return source.interpolate(f)
@cached_property
def u(self):
return Function(self.function_space, name="solution")
@cached_property
def F(self):
v = TestFunction(self.function_space)
f = self.source
a = inner(grad(v), grad(self.u))*dx + v*self.u*dx
L = inner(v, f)*dx
return a - L
@cached_property
def bcs(self):
return None
@cached_property
def Jp(self):
return None
def solver(self, parameters=None):
problem = NonlinearVariationalProblem(self.F, self.u, bcs=self.bcs,
Jp=self.Jp)
solver = NonlinearVariationalSolver(problem,
solver_parameters=parameters)
return solver
@cached_property
def output(self):
return (self.u,)
| Python | 0.000024 | |
d8ff61b72c07a9f0b22e5cbaefe6277bf2697afc | Create project.py | project_surgery/project.py | project_surgery/project.py | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Gideoni Silva (Omnes)
# Copyright 2013-2014 Omnes Tecnologia
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm, osv, fields
class project(orm.Model):
_inherit = "project.project"
_columns = {
'doctor_id': fields.many2one(
'res.partner', 'Doctor',
domain = "[('is_company','=',False)]",
required=True,change_default=True, select=True, track_visibility='always'
),
'patient_id': fields.many2one(
'res.partner', 'Patient',
domain = "[('is_company','=',False)]",
required=True,change_default=True, select=True, track_visibility='always'
),
'hospital_id': fields.many2one(
'res.partner', 'Hospital',
domain = "[('is_company','=',True)]",
required=True,change_default=True, select=True, track_visibility='always'),
'box_ids': fields.many2many(
'stock.tracking','project_stock_track_rel','project_id','stock_tracking_id',
string='Used Surgical Boxes ',
help="Selecione as Caixas Cirúrgicas para a Cirurgia"
)
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| Python | 0.000001 | |
afb62cebced6bcbbbde2576be2d9b9d4b9ad3964 | add chisquare test comparing random sample with cdf (first try of commit) | scipy/stats/tests/test_discrete_chisquare.py | scipy/stats/tests/test_discrete_chisquare.py |
import numpy as np
from scipy import stats
debug = False
def check_discrete_chisquare(distname, arg, alpha = 0.01):
'''perform chisquare test for random sample of a discrete distribution
Parameters
----------
distname : string
name of distribution function
arg : sequence
parameters of distribution
alpha : float
significance level, threshold for p-value
Returns
-------
result : bool
0 if test passes, 1 if test fails
uses global variable debug for printing results
'''
# define parameters for test
n=50000
nsupp = 20
wsupp = 1.0/nsupp
distfn = getattr(stats, distname)
rvs = distfn.rvs(size=n,*arg)
# construct intervals with minimum mass 1/nsupp
# intervalls are left-half-open as in a cdf difference
distsupport = xrange(max(distfn.a, -1000), min(distfn.b, 1000) + 1)
last = 0
distsupp = [max(distfn.a, -1000)]
distmass = []
for ii in distsupport:
current = distfn.cdf(ii,*arg)
if current - last >= wsupp-1e-14:
distsupp.append(ii)
distmass.append(current - last)
last = current
if current > (1-wsupp):
break
if distsupp[-1] < distfn.b:
distsupp.append(distfn.b)
distmass.append(1-last)
distsupp = np.array(distsupp)
distmass = np.array(distmass)
# convert intervals to right-half-open as required by histogram
histsupp = distsupp+1e-8
histsupp[0] = distfn.a
# find sample frequencies and perform chisquare test
freq,hsupp = np.histogram(rvs,histsupp,new=True)
cdfs = distfn.cdf(distsupp,*arg)
(chis,pval) = stats.chisquare(np.array(freq),n*distmass)
# print and return results
if debug:
print 'chis,pval:', chis, pval
print 'len(distsupp), len(distmass), len(hsupp), len(freq)'
print len(distsupp), len(distmass), len(hsupp), len(freq)
print 'distsupp', distsupp
print 'distmass', n*np.array(distmass)
print 'freq', freq
print 'itemfreq', stats.itemfreq(rvs)
print 'n*pmf', n*distfn.pmf(list(distsupport)[:10],*arg)
assert (pval > alpha), 'chisquare - test for %s' \
'at arg = %s' % (distname,str(arg))
def test_discrete_rvs_cdf():
distdiscrete = [
['bernoulli',(0.3,)],
['binom', (5, 0.4)],
['boltzmann',(1.4, 19)],
['dlaplace', (0.8,)],
['geom', (0.5,)],
['hypergeom',(30, 12, 6)],
['logser', (0.6,)],
['nbinom', (5, 0.5)],
['planck', (4.1,)],
['poisson', (0.6,)],
['randint', (7, 31)],
['zipf', (2,)] ]
for distname, arg in distdiscrete:
if debug:
print distname
yield check_discrete_chisquare, distname, arg
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
| Python | 0.000002 | |
e87982d03edeb7c16d3c183309adfff4be50d168 | Add Qt4 file to start on creating a Qt-based GUI | gui/qt.py | gui/qt.py | from lib.version import AMON_VERSION
from lib.keybase import KeybaseUser
from lib.gmail import GmailUser
from lib.addresses import AddressBook
import lib.gpg as gpg
import sys
import logging
import json
from PyQt4 import QtGui
class Amon(QtGui.QMainWindow):
def __init__(self):
super(Amon, self).__init__()
self.keybase_user = KeybaseUser()
self.gmail = GmailUser()
self.address_book = AddressBook()
| Python | 0 | |
3972c4a16894732db418a2d04f36b5104e0fac86 | add rms code in own namespace | tkp/quality/rms.py | tkp/quality/rms.py | from tkp.utility import nice_format
def rms_invalid(rms, noise, low_bound=1, high_bound=50):
"""
Is the RMS value of an image too high?
:param rms: RMS value of an image, can be computed with
tkp.quality.statistics.rms
:param noise: Theoretical noise level of instrument, can be calculated with
tkp.lofar.noise.noise_level
:param low_bound: multiplied with noise to define lower threshold
:param high_bound: multiplied with noise to define upper threshold
:returns: True/False
"""
if (rms < noise * low_bound) or (rms > noise * high_bound):
ratio = rms / noise
return "rms value (%s) is %s times theoretical noise (%s)" % \
(nice_format(rms), nice_format(ratio), nice_format(noise))
else:
return False | Python | 0.000001 | |
5b34a265513381ec60def83885e754484f280c37 | Create create_html.py | create_html.py | create_html.py | import os
import webbrowser
def create_html():
#p = subprocess.Popen('cat '+CurrPath+' | grep -B 1 -A 1 '+email, stdout=subprocess.PIPE, shell=True)
#(output, err) = p.communicate()
html_top="""
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>Breach Miner</title>
<script type="text/javascript">
var collapseDivs, collapseLinks;
function createDocumentStructure (tagName) {
if (document.getElementsByTagName) {
var elements = document.getElementsByTagName(tagName);
collapseDivs = new Array(elements.length);
collapseLinks = new Array(elements.length);
for (var i = 0; i < elements.length; i++) {
var element = elements[i];
var siblingContainer;
if (document.createElement &&
(siblingContainer = document.createElement('div')) &&
siblingContainer.style)
{
var nextSibling = element.nextSibling;
element.parentNode.insertBefore(siblingContainer, nextSibling);
var nextElement = elements[i + 1];
while (nextSibling != nextElement && nextSibling != null) {
var toMove = nextSibling;
nextSibling = nextSibling.nextSibling;
siblingContainer.appendChild(toMove);
}
siblingContainer.style.display = 'none';
collapseDivs[i] = siblingContainer;
createCollapseLink(element, siblingContainer, i);
}
else {
// no dynamic creation of elements possible
return;
}
}
createCollapseExpandAll(elements[0]);
}
}
function createCollapseLink (element, siblingContainer, index) {
var span;
if (document.createElement && (span = document.createElement('span'))) {
span.appendChild(document.createTextNode(String.fromCharCode(160)));
var link = document.createElement('a');
link.collapseDiv = siblingContainer;
link.href = '#';
link.appendChild(document.createTextNode('expand'));
link.onclick = collapseExpandLink;
collapseLinks[index] = link;
span.appendChild(link);
element.appendChild(span);
}
}
function collapseExpandLink (evt) {
if (this.collapseDiv.style.display == '') {
this.parentNode.parentNode.nextSibling.style.display = 'none';
this.firstChild.nodeValue = 'expand';
}
else {
this.parentNode.parentNode.nextSibling.style.display = '';
this.firstChild.nodeValue = 'collapse';
}
if (evt && evt.preventDefault) {
evt.preventDefault();
}
return false;
}
function createCollapseExpandAll (firstElement) {
var div;
if (document.createElement && (div = document.createElement('div'))) {
var link = document.createElement('a');
link.href = '#';
link.appendChild(document.createTextNode('expand all'));
link.onclick = expandAll;
div.appendChild(link);
div.appendChild(document.createTextNode(' '));
link = document.createElement('a');
link.href = '#';
link.appendChild(document.createTextNode('collapse all'));
link.onclick = collapseAll;
div.appendChild(link);
firstElement.parentNode.insertBefore(div, firstElement);
}
}
function expandAll (evt) {
for (var i = 0; i < collapseDivs.length; i++) {
collapseDivs[i].style.display = '';
collapseLinks[i].firstChild.nodeValue = 'collapse';
}
if (evt && evt.preventDefault) {
evt.preventDefault();
}
return false;
}
function collapseAll (evt) {
for (var i = 0; i < collapseDivs.length; i++) {
collapseDivs[i].style.display = 'none';
collapseLinks[i].firstChild.nodeValue = 'expand';
}
if (evt && evt.preventDefault) {
evt.preventDefault();
}
return false;
}
</script>
<script type="text/javascript">
window.onload = function (evt) {
createDocumentStructure('h4');
}
</script>
<link rel="stylesheet" type="text/css" href="Resources/style.css" />
</head>
<body style="background-color:#000000">
<div style="color:#ff0000; height: 161px"><pre>
$$$$$$$\ $$\ $$\ $$\ $$\
$$ __$$\ $$ | $$$\ $$$ |\__|
$$ | $$ | $$$$$$\ $$$$$$\ $$$$$$\ $$$$$$$\ $$$$$$$\ $$$$\ $$$$ |$$\ $$$$$$$\ $$$$$$\ $$$$$$\
$$$$$$$\ |$$ __$$\ $$ __$$\ \____$$\ $$ _____|$$ __$$\ $$\$$\$$ $$ |$$ |$$ __$$\ $$ __$$\ $$ __$$\
$$ __$$\ $$ | \__|$$$$$$$$ | $$$$$$$ |$$ / $$ | $$ |$$ \$$$ $$ |$$ |$$ | $$ |$$$$$$$$ |$$ | \__|
$$ | $$ |$$ | $$ ____|$$ __$$ |$$ | $$ | $$ |$$ |\$ /$$ |$$ |$$ | $$ |$$ ____|$$ |
$$$$$$$ |$$ | \$$$$$$$\ \$$$$$$$ |\$$$$$$$\ $$ | $$ |$$ | \_/ $$ |$$ |$$ | $$ |\$$$$$$$\ $$ |
\_______/ \__| \_______| \_______| \_______|\__| \__|\__| \__|\__|\__| \__| \_______|\__|
</pre>
</div> <div style="color:#ff0000; text-align:left;"><pre>Author : @dH4wk</pre></div>
<div style="color:#ff0000; text-align:left;"><pre>Twitter : https://twitter.com/dH4wk</pre></div>
<br><div class="b"><i><b>Anlalysis Completed !!! See below for the details </i></b> :)</div><br>
"""
return html_top
def invokeBrowser():
path = os.getcwd()+'/Files/Results.html'
webbrowser.open(path, 2)
| Python | 0.000011 | |
5009b158f0c47ea885ba5fdcbd76dd1fc2bb6986 | Use a script to post metrics to an ingest endpoint. | bfclient.py | bfclient.py | #!/usr/bin/env python
import argparse
from os import environ
import datetime
import requests
import time
def get_unix_time(dt):
return int(time.mktime(dt.timetuple()))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
BF_URL = environ.get('BF_URL', None)
BF_TOKEN = environ.get('BF_TOKEN', None)
parser.add_argument('--debug', action='store_true',
help='Display additional info.')
parser.add_argument('--url', type=str, action='store', default=BF_URL,
help='The endpoint to send HTTP requests to.')
parser.add_argument('--token', type=str, action='store', default=BF_TOKEN,
help='The authentication token of the account making '
'the request')
subs = parser.add_subparsers(help='subparsers?', dest='command')
ingest_sub = subs.add_parser('ingest', help='Send metrics to blueflood.')
ingest_sub.add_argument('tenant')
ingest_sub.add_argument(metavar='metric-name', dest='metric_name')
ingest_sub.add_argument('unit',
choices=('minutes', 'hours', 'days', 'months',
'years', 'decades'))
ingest_sub.add_argument('value', type=int)
ingest_sub.add_argument('--ttl-seconds', type=int, default=172800)
ingest_sub.add_argument('--collection-time')
args = parser.parse_args()
print('args: {}'.format(args))
if args.command == 'ingest':
base_url = args.url
if not base_url:
print('Error: No url specified.')
exit(1)
tenant = args.tenant
metric_name = args.metric_name
unit = args.unit
value = args.value
ttl_seconds = args.ttl_seconds
collection_time = args.collection_time
if collection_time is None:
collection_time = datetime.datetime.now()
url = '{}/v2.0/{}/ingest/multi'.format(base_url, tenant)
payload = [{
'tenantId': str(tenant),
'metricName': metric_name,
'unit': unit,
'metricValue': value,
'ttlInSeconds': ttl_seconds,
'collectionTime': get_unix_time(collection_time) * 1000
}]
request = requests.Request('POST', url, json=payload)
if args.token:
request.headers['X-Auth-Token'] = args.token
preq = request.prepare()
if args.debug:
print('Sending:')
print(' {} {}'.format(preq.method, preq.path_url))
for name, value in preq.headers.iteritems():
print(' {}: {}'.format(name, value))
if preq.body:
print('')
print(' {}'.format(preq.body))
print('')
session = requests.session()
response = session.send(preq)
if args.debug:
print('')
print('Received:')
print(' {} {}'.format(response.status_code, response.reason))
for name, value in response.headers.iteritems():
print(' {}: {}'.format(name, value))
print('')
if response.text:
print(' {}'.format(response.text))
success = 200 <= response.status_code < 300
print(response.text)
exit(0 if success else 1)
# print(payload_dict)
# exit(0)
else:
print('Unknown command "{}"'.format(args.command))
| Python | 0 | |
1613bde53cfda3d38d7e62c6c91f3d6c5407fb9c | Add script inspect_checkpoint.py to check if a model checkpoint is corrupted with NaN/inf values | inspect_checkpoint.py | inspect_checkpoint.py | """
Simple script that checks if a checkpoint is corrupted with any inf/NaN values. Run like this:
python inspect_checkpoint.py model.12345
"""
import tensorflow as tf
import sys
import numpy as np
if __name__ == '__main__':
if len(sys.argv) != 2:
raise Exception("Usage: python inspect_checkpoint.py <file_name>\nNote: Do not include the .data .index or .meta part of the model checkpoint in file_name.")
file_name = sys.argv[1]
reader = tf.train.NewCheckpointReader(file_name)
var_to_shape_map = reader.get_variable_to_shape_map()
finite = []
all_infnan = []
some_infnan = []
for key in sorted(var_to_shape_map.keys()):
tensor = reader.get_tensor(key)
if np.all(np.isfinite(tensor)):
finite.append(key)
else:
if not np.any(np.isfinite(tensor)):
all_infnan.append(key)
else:
some_infnan.append(key)
print "\nFINITE VARIABLES:"
for key in finite: print key
print "\nVARIABLES THAT ARE ALL INF/NAN:"
for key in all_infnan: print key
print "\nVARIABLES THAT CONTAIN SOME FINITE, SOME INF/NAN VALUES:"
for key in some_infnan: print key
print ""
if not all_infnan and not some_infnan:
print "CHECK PASSED: checkpoint contains no inf/NaN values"
else:
print "CHECK FAILED: checkpoint contains some inf/NaN values"
| Python | 0.000001 | |
d5e16fdf73eb281da3541fa7a0e3f8792b83faeb | bump to 0.3.0 | tproxy/__init__.py | tproxy/__init__.py | # -*- coding: utf-8 -
#
# This file is part of tproxy released under the MIT license.
# See the NOTICE for more information.
version_info = (0, 3, 0)
__version__ = ".".join(map(str, version_info))
| # -*- coding: utf-8 -
#
# This file is part of tproxy released under the MIT license.
# See the NOTICE for more information.
version_info = (0, 2, 4)
__version__ = ".".join(map(str, version_info))
| Python | 0.000019 |
bb5a94208bb3a96995182b773998dbec4ebf7667 | Test wrapper py script | py_scripts/EoSeval_test.py | py_scripts/EoSeval_test.py | # -*- coding: utf-8 -*-
"""
Code description goes in here
"""
import numpy
import EoSeq
from scipy.optimize import curve_fit
# Prompt user for filename string
# filename = raw_input("Please enter a file path for P and V data")
# Load in data file
# data = numpy.loadtxt(filename, delimiter = ',')
data = numpy.loadtxt("/Users/Grace/Documents/EoSeval/data/ferropericlase_Mao_2011_2000K.csv", delimiter = ',')
init_params = [0,0,0,0]
testfunc = BM3EOS(init_params)
BM3 = EOS(testfunc)
| Python | 0.000001 | |
9a082b04973a9927014df496aa31f5c05e8be6ca | add 143 | python/143_reorder_list.py | python/143_reorder_list.py | """
Given a singly linked list L: L0→L1→…→Ln-1→Ln,
reorder it to: L0→Ln→L1→Ln-1→L2→Ln-2→…
You must do this in-place without altering the nodes' values.
For example,
Given {1,2,3,4}, reorder it to {1,4,2,3}.
"""
# Definition for singly-linked list.
# class ListNode(object):
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution(object):
def reorderList(self, head):
"""
:type head: ListNode
:rtype: void Do not return anything, modify head in-place instead.
"""
if not head or not head.next:
return
slow, fast = head, head.next
while fast and fast.next:
slow = slow.next
fast = fast.next.next
middlehead = slow.next
slow.next = None
if middlehead and middlehead.next:
pre = middlehead
cur = middlehead.next
nxt = middlehead.next.next
pre.next = None
while nxt:
cur.next = pre
pre = cur
cur = nxt
nxt = nxt.next
cur.next = pre
head2 = cur
elif middlehead:
head2 = middlehead
p, q = head, head2
tmp1 = head.next
tmp2 = head2.next
while tmp1 and tmp2:
p.next = q
q.next = tmp1
p, q = tmp1, tmp2
tmp1, tmp2 = tmp1.next, tmp2.next
p.next = q
if tmp1:
q.next = tmp1
from singlyLinkedList import singlyLinkedList
a = singlyLinkedList([1,2,3,4,5,6])
a.printNodes()
soln = Solution()
soln.reorderList(a.head)
a.printNodes()
| Python | 0.999998 | |
c91c8f56940ba60190f771ef7731169e68b2053e | Create functions.py | python/openCV/functions.py | python/openCV/functions.py | import numpy as np
import cv2
def nothing():
pass
def Rvalue(x):
#print('R=',x)
return x
def Gvalue(x):
#print('G=',x)
return x
def Bvalue(x):
#print('B=',x)
return x
img = np.zeros((512, 512, 3), np.uint8)
drawing = False # true if mouse is pressed
mode = True # if True, draw rectangle. Press 'm' to toggle to curve
ix,iy = -1,-1
def draw_circle(event,x,y,flags,param):
global ix,iy,drawing,mode
if event == cv2.EVENT_LBUTTONDOWN:
drawing = True
ix,iy = x,y
elif event == cv2.EVENT_MOUSEMOVE:
if drawing == True:
if mode == True:
cv2.rectangle(img,(ix,iy),(x,y),(0,255,0),-1)
else:
cv2.circle(img,(x,y),5,(0,0,255),-1)
elif event == cv2.EVENT_LBUTTONUP:
drawing = False
if mode == True:
cv2.rectangle(img,(ix,iy),(x,y),(0,255,0),-1)
else:
cv2.circle(img,(x,y),5,(0,0,255),-1)
def freePint():
cv2.namedWindow('image')
switch = '0 : OFF \n1 : ON'
cv2.createTrackbar(switch, 'image', 0, 1, nothing)
cv2.setMouseCallback('image', draw_circle)
while (1):
cv2.imshow('image', img)
s = cv2.getTrackbarPos(switch, 'image')
k = cv2.waitKey(1)
if s == 0:
mode = False
if s == 1:
mode = True
elif k == 27:
break
cv2.destroyAllWindows()
def trackbar():
# Create a black image, a window
img = np.zeros((300,512,3), np.uint8)
cv2.namedWindow('image')
# create trackbars for color change
cv2.createTrackbar('R','image',0,255,Rvalue)
cv2.createTrackbar('G','image',0,255,Gvalue)
cv2.createTrackbar('B','image',0,255,Bvalue)
# create switch for ON/OFF functionality
switch = '0 : OFF \n1 : ON'
cv2.createTrackbar(switch, 'image',0,1,nothing)
while(1):
cv2.imshow('image',img)
k = cv2.waitKey(1)
if k == 27:
break
# get current positions of four trackbars
r = cv2.getTrackbarPos('R','image')
g = cv2.getTrackbarPos('G','image')
b = cv2.getTrackbarPos('B','image')
s = cv2.getTrackbarPos(switch,'image')
if s == 0:
img[:] = 0
else:
img[:] = [b,g,r]
cv2.destroyAllWindows();
def dcircle():
trackbar()
img = np.zeros((512, 512, 3), np.uint8)
img = cv2.circle(img, (447, 63), 63, (Rvalue, Gvalue, Bvalue), -1)
cv2.imshow('figer circle', img)
def print_func(par):
return ("Hello" , par);
def drowit():
# Create a black image
img = np.zeros((512, 512, 3), np.uint8)
# cv2.imshow('fig1',img)
# cv2.waitKey()
# Draw a diagonal blue line with thickness of 5 px
img = cv2.line(img, (0, 0), (511, 511), (255, 0, 0), 10)
img = cv2.rectangle(img, (384, 0), (510, 128), (0, 255, 0), 3)
img = cv2.circle(img, (447, 63), 63, (0, 120, 255), -1)
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText(img, 'OpenCV', (10, 500), font, 4, (255, 255, 255), 2)
cv2.imshow('fig1', img)
def saveimage ():
cv2.imwrite("image_processed.png", img) #the name of new image
| Python | 0.000006 | |
38756d3fd7ac1d858d45f256e8d4ad118ecbf531 | add basic admin file | emencia/django/socialaggregator/admin.py | emencia/django/socialaggregator/admin.py | """Admin for parrot.gallery"""
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from emencia.django.socialaggregator.models import Feed
from emencia.django.socialaggregator.models import Aggregator
from emencia.django.socialaggregator.models import Ressource
class FeedAdmin(admin.ModelAdmin):
pass
admin.site.register(Feed, FeedAdmin)
class AggregatorAdmin(admin.ModelAdmin):
pass
admin.site.register(Aggregator, AggregatorAdmin)
class RessourceAdmin(admin.ModelAdmin):
pass
admin.site.register(Ressource, RessourceAdmin)
| Python | 0 | |
d53ec3fefddda14e6d7fad466f5e81d3ed369330 | Add sfp_numverify module | modules/sfp_numverify.py | modules/sfp_numverify.py | #-------------------------------------------------------------------------------
# Name: sfp_numverify
# Purpose: SpiderFoot plug-in to search numverify.com API for a phone number
# and retrieve location and carrier information.
#
# Author: <bcoles@gmail.com>
#
# Created: 2019-05-25
# Copyright: (c) bcoles 2019
# Licence: GPL
#-------------------------------------------------------------------------------
import json
import re
import urllib
import time
from sflib import SpiderFoot, SpiderFootPlugin, SpiderFootEvent
class sfp_numverify(SpiderFootPlugin):
"""numverify:Footprint,Investigate,Passive:Real World::Lookup phone number location and carrier information."""
# Default options
opts = {
'api_key': ''
}
# Option descriptions
optdescs = {
'api_key': 'numverify API key.'
}
results = dict()
errorState = False
def setup(self, sfc, userOpts=dict()):
self.sf = sfc
self.__dataSource__ = "numverify"
self.results = dict()
self.errorState = False
for opt in userOpts.keys():
self.opts[opt] = userOpts[opt]
# What events is this module interested in for input
def watchedEvents(self):
return ['PHONE_NUMBER']
# What events this module produces
def producedEvents(self):
return ['RAW_RIR_DATA', 'GEOINFO', 'PROVIDER_TELCO']
# Query numverify API for the specified phone number
# https://numverify.com/documentation
def query(self, qry):
number = qry.strip('+').strip('(').strip(')')
params = {
'number': number.encode('raw_unicode_escape'),
'country_code': '',
'format': '0', # set to "1" for prettified debug output
'access_key': self.opts['api_key']
}
# Free API does not support HTTPS for no adequately explained reason
res = self.sf.fetchUrl("http://apilayer.net/api/validate?" + urllib.urlencode(params),
timeout=self.opts['_fetchtimeout'],
useragent=self.opts['_useragent'])
time.sleep(1)
if res['content'] is None:
self.sf.debug('No response from apilayer.net')
return None
if res['code'] == '101':
self.sf.error('API error: invalid API key', False)
self.errorState = True
return None
if res['code'] == '102':
self.sf.error('API error: user account deactivated', False)
self.errorState = True
return None
if res['code'] == '104':
self.sf.error('API error: usage limit exceeded', False)
self.errorState = True
return None
try:
data = json.loads(res['content'])
except BaseException as e:
self.sf.debug('Error processing JSON response: ' + str(e))
return None
if data.get('error') is not None:
self.sf.error('API error: ' + str(data.get('error')), False)
return None
return data
# Handle events sent to this module
def handleEvent(self, event):
eventName = event.eventType
srcModuleName = event.module
eventData = event.data
if self.errorState:
return None
if self.opts['api_key'] == "":
self.sf.error("You enabled sfp_numverify but did not set an API key!", False)
self.errorState = True
return None
if eventData in self.results:
return None
self.results[eventData] = True
self.sf.debug("Received event, " + eventName + ", from " + srcModuleName)
data = self.query(eventData)
if data is None:
self.sf.debug("No phone information found for " + eventData)
return None
evt = SpiderFootEvent("RAW_RIR_DATA", str(data), self.__name__, event)
self.notifyListeners(evt)
if data.get('location') is not None and data.get('country_code') is not None:
location = data.get('location') + ', ' + data.get('country_code')
evt = SpiderFootEvent("GEOINFO", location, self.__name__, event)
self.notifyListeners(evt)
else:
self.sf.debug("No location information found for " + eventData)
if data.get('carrier') is not None:
evt = SpiderFootEvent("PROVIDER_TELCO", data.get('carrier'), self.__name__, event)
self.notifyListeners(evt)
else:
self.sf.debug("No carrier information found for " + eventData)
# End of sfp_numverify class
| Python | 0.000002 | |
c2f96872e92c0cbe6f484e85a75ad32132473a13 | Migrate in from rdfextras. | rdflib_jsonld/ldcontext.py | rdflib_jsonld/ldcontext.py | # -*- coding: utf-8 -*-
"""
Implementation of a Linked Data Context structure based on the JSON-LD
definition of contexts. See:
http://json-ld.org/
"""
# from __future__ import with_statement
from urlparse import urljoin
try:
import json
except ImportError:
import simplejson as json
from rdflib.namespace import RDF, split_uri
from rdflib.parser import create_input_source
RDF_TYPE = unicode(RDF.type)
CONTEXT_KEY = '@context'
LANG_KEY = '@language'
ID_KEY = '@id'
TYPE_KEY = '@type'
LITERAL_KEY = '@value'
LIST_KEY = '@list'
CONTAINER_KEY = '@container' # EXPERIMENTAL
SET_KEY = '@set' # EXPERIMENTAL
REV_KEY = '@rev' # EXPERIMENTAL
KEYS = set([LANG_KEY, ID_KEY, TYPE_KEY, LITERAL_KEY, LIST_KEY, REV_KEY])
class Context(object):
def __init__(self, source=None):
self._key_map = {}
self._iri_map = {}
self._term_map = {}
self.lang = None
if source:
self.load(source)
terms = property(lambda self: self._term_map.values())
context_key = CONTEXT_KEY
lang_key = property(lambda self: self._key_map.get(LANG_KEY, LANG_KEY))
id_key = property(lambda self: self._key_map.get(ID_KEY, ID_KEY))
type_key = property(lambda self: self._key_map.get(TYPE_KEY, TYPE_KEY))
literal_key = property(lambda self: self._key_map.get(LITERAL_KEY, LITERAL_KEY))
list_key = property(lambda self: self._key_map.get(LIST_KEY, LIST_KEY))
container_key = CONTAINER_KEY
set_key = SET_KEY
rev_key = property(lambda self: self._key_map.get(REV_KEY, REV_KEY))
def load(self, source, base=None, visited_urls=None):
if CONTEXT_KEY in source:
source = source[CONTEXT_KEY]
if isinstance(source, list):
sources = source
else:
sources=[source]
terms, simple_terms = [], []
for obj in sources:
if isinstance(obj, basestring):
url = urljoin(base, obj)
visited_urls = visited_urls or []
visited_urls.append(url)
sub_defs = source_to_json(url)
self.load(sub_defs, base, visited_urls)
continue
for key, value in obj.items():
if key == LANG_KEY:
self.lang = value
elif isinstance(value, unicode) and value in KEYS:
self._key_map[value] = key
else:
term = self._create_term(key, value)
if term.coercion:
terms.append(term)
else:
simple_terms.append(term)
for term in simple_terms + terms:
# TODO: expansion for these shoold be done by recursively looking up
# keys in source (would also avoid this use of simple_terms).
if term.iri:
term.iri = self.expand(term.iri)
if term.coercion:
term.coercion = self.expand(term.coercion)
self.add_term(term)
def _create_term(self, key, dfn):
if isinstance(dfn, dict):
iri = dfn.get(ID_KEY)
coercion = dfn.get(TYPE_KEY)
container = dfn.get(CONTAINER_KEY)
if not container and dfn.get(LIST_KEY) is True:
container = LIST_KEY
return Term(iri, key, coercion, container)
else:
iri = self.expand(dfn)
return Term(iri, key)
def add_term(self, term):
self._iri_map[term.iri] = term
self._term_map[term.key] = term
def get_term(self, iri):
return self._iri_map.get(iri)
def shrink(self, iri):
iri = unicode(iri)
term = self._iri_map.get(iri)
if term:
return term.key
if iri == RDF_TYPE:
# NOTE: only if no term for the rdf:type IRI is defined
return self.type_key
try:
ns, name = split_uri(iri)
term = self._iri_map.get(ns)
if term:
return ":".join((term.key, name))
except:
pass
return iri
def expand(self, term_curie_or_iri):
term_curie_or_iri = unicode(term_curie_or_iri)
if ':' in term_curie_or_iri:
pfx, term = term_curie_or_iri.split(':', 1)
ns = self._term_map.get(pfx)
if ns and ns.iri:
return ns.iri + term
else:
term = self._term_map.get(term_curie_or_iri)
if term:
return term.iri
return term_curie_or_iri
def to_dict(self):
data = {}
if self.lang:
data[LANG_KEY] = self.lang
for key, alias in self._key_map.items():
if key != alias:
data[alias] = key
for term in self.terms:
obj = term.iri
if term.coercion:
obj = {IRI_KEY: term.iri}
if term.coercion == REV_KEY:
obj = {REV_KEY: term.iri}
else:
obj[TYPE_KEY] = term.coercion
if term.container:
obj[CONTAINER_KEY] = term.container
if term.container == LIST_KEY:
obj[LIST_KEY] = True # TODO: deprecated form?
if obj:
data[term.key] = obj
return data
class Term(object):
def __init__(self, iri, key, coercion=None, container=None):
self.iri = iri
self.key = key
self.coercion = coercion
self.container = container
def source_to_json(source):
# TODO: conneg for JSON (fix support in rdflib's URLInputSource!)
source = create_input_source(source)
stream=source.getByteStream()
try:
return json.load(stream)
finally:
stream.close()
| Python | 0 | |
8b7db3fc9b90897c0e8da6d6b63d12e79754c625 | Solve Knowit2019/19 | knowit2019/19.py | knowit2019/19.py | def hidden_palindrome(n):
n_s = str(n)
if n_s == n_s[::-1]:
return False
s = str(n + int(n_s[::-1]))
return s == s[::-1]
def test_hidden_palindrome():
assert hidden_palindrome(38)
assert not hidden_palindrome(49)
if __name__ == '__main__':
s = 0
for x in range(1, 123454321+1):
if x % 1000000 == 0:
print(x)
s += x if hidden_palindrome(x) else 0
print(s) | Python | 0.999966 | |
f5d4fa76c7ea97af5cd30a3840835e6b97dd0721 | Add release script. (#162) | dev/release.py | dev/release.py | #!/usr/bin/env python
import click
from datetime import datetime
from subprocess import call, check_call, check_output, PIPE
import sys
DATABRICKS_REMOTE = "git@github.com:databricks/tensorframes.git"
PUBLISH_MODES = {
"local": "tfs_testing/publishLocal",
"m2": "tfs_testing/publishM2",
"spark-package-publish": "distribution/spPublish",
}
WORKING_BRANCH = "WORKING_BRANCH_RELEASE_%s_@%s"
# lower case "z" puts the branch at the end of the github UI.
RELEASE_TAG = "v%s"
def prominentPrint(x):
click.echo(click.style(x, underline=True))
def verify(prompt, interactive):
if not interactive:
return True
return click.confirm(prompt, show_default=True)
@click.command()
@click.argument("release-version", type=str)
@click.argument("next-version", type=str)
@click.option("--publish-to", default="local", show_default=True,
help="Where to publish artifact, one of: %s" % list(PUBLISH_MODES.keys()))
@click.option("--no-prompt", is_flag=True, help="Automated mode with no user prompts.")
@click.option("--git-remote", default=DATABRICKS_REMOTE,
help="Push current branch and docs to this git remote.")
def main(release_version, next_version, publish_to, no_prompt, git_remote):
interactive = not no_prompt
time = datetime.now().strftime("%Y-%m-%dT%H-%M-%S")
if publish_to not in PUBLISH_MODES:
modes = list(PUBLISH_MODES.keys())
prominentPrint("Unknown publish target, --publish-to should be one of: %s." % modes)
sys.exit(1)
if not next_version.endswith("SNAPSHOT"):
next_version += "-SNAPSHOT"
if not verify("Publishing version: %s\n"
"Next version will be: %s\n"
"Continue?" % (release_version, next_version), interactive):
sys.exit(1)
current_branch = check_output(["git", "rev-parse", "--abbrev-ref", "HEAD"]).strip()
if current_branch == "HEAD":
prominentPrint("Cannot build from detached head state. Please make a branch.")
sys.exit(1)
if current_branch != b"master":
if not verify("You're not on the master branch do you want to continue?",
interactive):
sys.exit(1)
uncommitted_changes = check_output(["git", "diff", "--stat"])
if uncommitted_changes != b"":
prominentPrint("There seem to be uncommitted changes on your current branch. Please commit or "
"stash them and try again.")
prominentPrint(uncommitted_changes)
sys.exit(1)
if call(["which", "protoc"], stdout=PIPE, stderr=PIPE) != 0:
prominentPrint("Cannot find protoc, protoc is required to build tensorfames. See README.md.")
sys.exit(1)
working_branch = WORKING_BRANCH % (release_version, time)
release_tag = RELEASE_TAG % release_version
target_tags = [release_tag]
existing_tags = check_output(["git", "tag"]).decode().split()
conflict_tags = list(filter(lambda a: a in existing_tags, target_tags))
if conflict_tags:
msg = ("The following tags already exist:\n"
" %s\n"
"Please delete them and try.")
msg = msg % "\n ".join(conflict_tags)
prominentPrint(msg)
sys.exit(1)
prominentPrint("Creating working branch for this release.")
check_call(["git", "checkout", "-b", working_branch])
prominentPrint("Creating release tag and updating snapshot version.")
update_version = "release release-version %s next-version %s" % (release_version, next_version)
check_call(["./build/sbt", update_version])
prominentPrint("Building and testing with sbt.")
check_call(["git", "checkout", release_tag])
publish_target = PUBLISH_MODES[publish_to]
check_call(["./build/sbt", "clean", publish_target])
prominentPrint("Updating local branch: %s" % current_branch)
check_call(["git", "checkout", current_branch])
check_call(["git", "merge", "--ff", working_branch])
check_call(["git", "branch", "-d", working_branch])
prominentPrint("Local branch updated")
if verify("Would you like to push local branch & version tag to remote: %s?" % git_remote,
interactive):
check_call(["git", "push", git_remote, current_branch])
check_call(["git", "push", git_remote, release_tag])
if __name__ == "__main__":
main()
| Python | 0 | |
58d19ea654e0c8d250f46b0d72191e48b4bc8588 | add tests for encryption/decryption in awx.main.utils.common | awx/main/tests/unit/common/test_common.py | awx/main/tests/unit/common/test_common.py | from awx.conf.models import Setting
from awx.main.utils import common
def test_encrypt_field():
field = Setting(pk=123, value='ANSIBLE')
encrypted = common.encrypt_field(field, 'value')
assert encrypted == '$encrypted$AES$Ey83gcmMuBBT1OEq2lepnw=='
assert common.decrypt_field(field, 'value') == 'ANSIBLE'
def test_encrypt_field_without_pk():
field = Setting(value='ANSIBLE')
encrypted = common.encrypt_field(field, 'value')
assert encrypted == '$encrypted$AES$8uIzEoGyY6QJwoTWbMFGhw=='
assert common.decrypt_field(field, 'value') == 'ANSIBLE'
def test_encrypt_subfield():
field = Setting(value={'name': 'ANSIBLE'})
encrypted = common.encrypt_field(field, 'value', subfield='name')
assert encrypted == '$encrypted$AES$8uIzEoGyY6QJwoTWbMFGhw=='
assert common.decrypt_field(field, 'value', subfield='name') == 'ANSIBLE'
def test_encrypt_field_with_ask():
encrypted = common.encrypt_field(Setting(value='ASK'), 'value', ask=True)
assert encrypted == 'ASK'
def test_encrypt_field_with_empty_value():
encrypted = common.encrypt_field(Setting(value=None), 'value')
assert encrypted is None
| Python | 0.000001 | |
714c339d098bddf276df71c94ca62ab19fc45c2b | Add working LSTM tagger example | examples/lstm_tagger.py | examples/lstm_tagger.py | from __future__ import print_function, division
import plac
import numpy
import time
from timeit import default_timer as timer
import dill as pickle
import spacy
from spacy.attrs import ORTH, LOWER, PREFIX, SUFFIX, SHAPE
from spacy.tokens.doc import Doc
from thinc.i2v import Embed, HashEmbed
from thinc.v2v import Model, Maxout, ReLu, Affine, Softmax
from thinc.t2t import ExtractWindow, BiLSTM
from thinc.misc import BatchNorm as BN
from thinc.misc import LayerNorm as LN
from thinc.misc import Residual
from thinc.api import with_flatten
from thinc.api import layerize, chain, concatenate, clone, add
from thinc.neural.util import flatten_sequences, remap_ids, to_categorical
from thinc.neural.ops import NumpyOps, CupyOps
from thinc.neural.optimizers import SGD
from thinc.extra.datasets import ancora_pos_tags
#from thinc.api import FeatureExtracter
try:
import cupy
except ImportError:
print("Could not import cupy")
cupy = None
def FeatureExtracter(lang, attrs=[LOWER, SHAPE, PREFIX, SUFFIX], tokenized=True):
nlp = spacy.blank(lang)
nlp.vocab.lex_attr_getters[PREFIX] = lambda string: string[:3]
nlp.vocab.lex_attr_getters[SUFFIX] = lambda string: string[-3:]
def forward(texts, drop=0.):
if tokenized:
docs = [Doc(nlp.vocab, words) for words in texts]
else:
docs = [nlp(text) for text in texts]
features = [doc.to_array(attrs) for doc in docs]
def backward(d_features, sgd=None):
return d_features
return features, backward
return layerize(forward)
epoch_train_acc = 0.
def track_progress(**context):
model = context['model']
dev_X = context['dev_X']
dev_y = model.ops.flatten(context['dev_y'])
n_train = context['n_train']
trainer = context['trainer']
n_dev = len(dev_y)
epoch_times = [timer()]
def each_epoch():
global epoch_train_acc
epoch_start = epoch_times[-1]
epoch_end = timer()
wps_train = n_train / (epoch_end-epoch_start)
dev_start = timer()
acc = model.evaluate(dev_X, dev_y)
dev_end = timer()
wps_run = n_dev / (dev_end-dev_start)
with model.use_params(trainer.optimizer.averages):
avg_acc = model.evaluate(dev_X, dev_y)
stats = (acc, avg_acc, float(epoch_train_acc) / n_train, trainer.dropout,
wps_train, wps_run)
print("%.3f (%.3f) dev acc, %.3f train acc, %.4f drop, %d wps train, %d wps run" % stats)
epoch_train_acc = 0.
epoch_times.append(timer())
return each_epoch
def preprocess(ops, get_feats, data, nr_tag, npad=4):
Xs, ys = zip(*data)
Xs = [ops.asarray(x) for x in get_feats(Xs)]
ys = [ops.asarray(to_categorical(y, nb_classes=nr_tag)) for y in ys]
return Xs, ys
_i = 0
def debug(X, drop=0.):
global _i
if _i % 1000 == 0:
print(X.mean(), X.var())
_i += 1
return X, lambda d, sgd: d
@plac.annotations(
width=("Width of the hidden layers", "option", "w", int),
vector_length=("Width of the word vectors", "option", "V", int),
depth=("Depth of the hidden layers", "option", "d", int),
min_batch_size=("Minimum minibatch size during training", "option", "b", int),
max_batch_size=("Maximum minibatch size during training", "option", "B", int),
learn_rate=("Learning rate", "option", "e", float),
momentum=("Momentum", "option", "m", float),
dropout=("Dropout rate", "option", "D", float),
dropout_decay=("Dropout decay", "option", "C", float),
nb_epoch=("Maximum passes over the training data", "option", "i", int),
L2=("L2 regularization penalty", "option", "L", float),
)
def main(width=100, depth=4, vector_length=64,
min_batch_size=1, max_batch_size=32, learn_rate=0.001,
momentum=0.9, dropout=0.5, dropout_decay=1e-4,
nb_epoch=20, L2=1e-6):
cfg = dict(locals())
print(cfg)
if cupy is not None:
print("Using GPU")
Model.ops = CupyOps()
train_data, check_data, nr_tag = ancora_pos_tags()
extracter = FeatureExtracter('es', attrs=[LOWER, SHAPE, PREFIX, SUFFIX])
Model.lsuv = True
with Model.define_operators({'**': clone, '>>': chain, '+': add,
'|': concatenate}):
lower_case = HashEmbed(width, 100, column=0)
shape = HashEmbed(width//2, 200, column=1)
prefix = HashEmbed(width//2, 100, column=2)
suffix = HashEmbed(width//2, 100, column=3)
model = (
with_flatten(
(lower_case | shape | prefix | suffix)
>> LN(Maxout(width, pieces=3))
#>> (ExtractWindow(nW=1) >> LN(Maxout(width, width*3)))
)
>> BiLSTM(width, width)
>> with_flatten(Softmax(nr_tag))
)
train_X, train_y = preprocess(model.ops, extracter, train_data, nr_tag)
dev_X, dev_y = preprocess(model.ops, extracter, check_data, nr_tag)
n_train = float(sum(len(x) for x in train_X))
global epoch_train_acc
with model.begin_training(train_X[:5000], train_y[:5000], **cfg) as (trainer, optimizer):
trainer.each_epoch.append(track_progress(**locals()))
trainer.batch_size = min_batch_size
batch_size = float(min_batch_size)
for X, y in trainer.iterate(train_X, train_y):
yh, backprop = model.begin_update(X, drop=trainer.dropout)
gradient = [yh[i]-y[i] for i in range(len(yh))]
backprop(gradient, optimizer)
trainer.batch_size = min(int(batch_size), max_batch_size)
batch_size *= 1.001
with model.use_params(trainer.optimizer.averages):
print(model.evaluate(dev_X, model.ops.flatten(dev_y)))
with open('/tmp/model.pickle', 'wb') as file_:
pickle.dump(model, file_)
if __name__ == '__main__':
if 1:
plac.call(main)
else:
import cProfile
import pstats
cProfile.runctx("plac.call(main)", globals(), locals(), "Profile.prof")
s = pstats.Stats("Profile.prof")
s.strip_dirs().sort_stats("time").print_stats()
| Python | 0 | |
0565270790e12318139d6231ab15102d52f9b2ba | Add example script to build a cooler | examples/make_cooler.py | examples/make_cooler.py | from __future__ import division, print_function
from multiprocessing import Pool
from collections import OrderedDict
import numpy as np
import pandas
import h5py
import Bio.Restriction as biorst
import Bio.Seq as bioseq
import pyfaidx
import cooler
def digest(fasta_records, enzyme):
# http://biopython.org/DIST/docs/cookbook/Restriction.html#mozTocId447698
chroms = fasta_records.keys()
try:
cut_finder = getattr(biorst, enzyme).search
except AttributeError:
raise ValueError('Unknown enzyme name: {}'.format(enzyme))
def _each(chrom):
seq = bioseq.Seq(str(fasta_records[chrom]))
cuts = np.r_[0, np.array(cut_finder(seq)) + 1, len(seq)].astype(int)
n_frags = len(cuts) - 1
frags = pandas.DataFrame({
'chrom': [chrom] * n_frags,
'start': cuts[:-1],
'end': cuts[1:]},
columns=['chrom', 'start', 'end'])
return frags
return pandas.concat(map(_each, chroms), axis=0, ignore_index=True)
if __name__ == '__main__':
ENZYME = 'HindIII'
FASTA_PATH = 'fasta file'
CHROMINFO_PATH = 'UCSC chromInfo-like file'
HIC_PATH = ('filtered, merged and **SORTED** input HDF5 file containing'
'datasets: chrms1 cuts1 chrms2 cuts2')
COOLER_PATH = 'output binned sparse contact map file path'
BINSIZE = 'integer or "frag"'
N_CPUS = 4
# Index and read a single FASTA file
# If using multiple files, read them separately and put the records into one
# ordered dictionary.
# Pyfaidx will autogenerate fai index files.
fasta_records = OrderedDict(pyfaidx.Fasta(FASTA_PATH))
# Need a chromInfo.txt style tab-separated file
# Two columns: 1) chromosome label and 2) length in bp.
# (the fai file usually works)
# Chromosomes should be listed in the same order as in the fasta records.
chromtable = pandas.read_csv(
CHROMINFO_PATH, sep='\t', usecols=[0, 1], names=['name', 'length'])
chromtable.index = chromtable['name']
if BINSIZE == 'frag':
# Make a fragment-level "bin table"
fragtable = digest(fasta_records, ENZYME)
# Bin the data (non-uniform fragment-level binning),
# i.e. bintable == fragtable
# Note that matrix balancing does not yet support non-uniformly binned
# data
h5opts = {'compression': 'gzip', 'compression_opts': 6}
chunksize = int(100e6)
with h5py.File(HIC_PATH, 'r') as h5read:
with h5py.File(COOLER_PATH, 'w') as h5binned:
cooler.io.from_readhdf5(
h5binned,
chromtable,
fragtable,
h5read,
info={'genome-assembly': 'myAssembly'},
h5opts=h5opts,
chunksize=chunksize)
else:
# For uniform bins, no need to assign fragments, just use:
bintable = cooler.make_bintable(chromtable['length'], BINSIZE)
# Bin the data
h5opts = {'compression': 'gzip', 'compression_opts': 6}
chunksize = int(100e6)
with h5py.File(HIC_PATH, 'r') as h5frag:
with h5py.File(COOLER_PATH, 'w') as h5binned:
cooler.io.from_readhdf5(
h5binned,
chromtable,
bintable,
h5read,
binsize=BINSIZE,
info={'genome-assembly': 'myAssembly'},
h5opts=h5opts,
chunksize=chunksize)
# Compute a genome-wide balancing/bias/normalization vector
# *** assumes uniform binning ***
from cooler import balancing
chunksize = int(100e6)
try:
pool = Pool(N_CPUS)
with h5py.File(COOLER_PATH, 'a') as h5:
bias = balancing.iterative_correction(
h5, chunksize=chunksize, tol=1e-05, min_nnz=100,
cis_only=False, ignore_diags=3, map=pool.map)
# add the bias column to the file (optional)
#h5['bins'].create_dataset('weight', data=bias, **h5opts)
finally:
pool.close()
"""
# example range query + applying balancing weights
c = cooler.Cooler(COOLER_PATH)
# fetch a scipy sparse matrix
mat = c.matrix().fetch('chr1:20,000,000-40,000,000')
# apply the balancing weights
i0, i1 = c.extent('chr1')
b = bias[i0:i1]
mat.data = b[mat.row] * b[mat.col] * mat.data
# convert to dense numpy array
A = mat.toarray()
np.save('chr1.npy', A)
"""
| Python | 0 | |
8ae3e44b0a43f382c98194b9caa097b62de899ef | Add script to save ner data to a csv file | nlpppln/save_ner_data.py | nlpppln/save_ner_data.py | #!/usr/bin/env python
import click
import os
import codecs
import json
import pandas as pd
@click.command()
@click.argument('input_dir', type=click.Path(exists=True))
@click.argument('output_file', type=click.Path())
def nerstats(input_dir, output_file):
output_dir = os.path.dirname(output_file)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
frames = []
files = os.listdir(input_dir)
for fi in files:
with codecs.open(os.path.join(input_dir, fi), encoding='utf-8') as f:
saf = json.load(f)
data = {}
data['word'] = [t['word'] for t in saf['tokens'] if 'ne' in t.keys()]
data['ner'] = [t['ne'] for t in saf['tokens'] if 'ne' in t.keys()]
data['w_id'] = [t['id'] for t in saf['tokens'] if 'ne' in t.keys()]
data['text'] = [fi for t in saf['tokens'] if 'ne' in t.keys()]
frames.append(pd.DataFrame(data=data))
df = pd.concat(frames, ignore_index=True)
df.to_csv(output_file)
if __name__ == '__main__':
nerstats()
| Python | 0 | |
46a40e7e8fc424cc7e7a601fc99ab2d852cd0980 | Add example GCP CLI tool. (#69) | examples/gcp_cli.py | examples/gcp_cli.py | # -*- coding: utf-8 -*-
# Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Demo CLI tool for GCP."""
import argparse
from libcloudforensics import gcp
def ListInstances(args):
"""List GCE instances in GCP project.
Args:
args (dict): Arguments from ArgumentParser.
"""
project = gcp.GoogleCloudProject(args.project)
instances = project.ListInstances()
print('Instances found:')
for instance in instances:
bootdisk_name = instances[instance].GetBootDisk().name
print('Name: {0:s}, Bootdisk: {1:s}'.format(instance, bootdisk_name))
def ListDisks(args):
"""List GCE disks in GCP project.
Args:
args (dict): Arguments from ArgumentParser.
"""
project = gcp.GoogleCloudProject(args.project)
disks = project.ListDisks()
print('Disks found:')
for disk in disks:
print('Name: {0:s}, Zone: {1:s}'.format(disk, disks[disk].zone))
def CreateDiskCopy(args):
"""Copy GCE disks to other GCP project.
Args:
args (dict): Arguments from ArgumentParser.
"""
disk = gcp.CreateDiskCopy(
args.project, args.dstproject, args.instancename, args.zone)
print('Disk copy completed.')
print('Name: {0:s}'.format(disk.name))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Demo CLI tool for GCP')
parser.add_argument('--project', help='The GCP project name')
subparsers = parser.add_subparsers()
parser_listdisks = subparsers.add_parser('listdisks')
parser_listdisks.set_defaults(func=ListDisks)
parser_listdisks = subparsers.add_parser('listinstances')
parser_listdisks.set_defaults(func=ListInstances)
parser_creatediskcopy = subparsers.add_parser('creatediskcopy')
parser_creatediskcopy.add_argument(
'--dstproject', help='Destination GCP project')
parser_creatediskcopy.add_argument('--zone', help='Zone to create disk in')
parser_creatediskcopy.add_argument(
'--instancename', help='Instance to copy disk from')
parser_creatediskcopy.set_defaults(func=CreateDiskCopy)
parsed_args = parser.parse_args()
if parsed_args.func:
parsed_args.func(parsed_args)
| Python | 0 | |
9cc4067d581f6a97136e0f186dc8aa1dbc734e47 | verify that the dynamic oracle for ArcEager can reach all projective parses | hals/transition_system/arc_eager_test.py | hals/transition_system/arc_eager_test.py | from copy import copy, deepcopy
import numpy as np
from unittest import TestCase
from transition_system.arc_eager import ArcEager, ArcEagerDynamicOracle
def generate_all_projective_parses(size):
arc_eager = ArcEager(1)
initial = arc_eager.state(size)
stack = []
stack.append(initial)
parses = set()
while len(stack):
state = stack.pop()
if arc_eager.is_final(state):
heads, labels = arc_eager.extract_parse(state)
parses.add(tuple(heads))
else:
for action in arc_eager.allowed(state):
state_copy = deepcopy(state)
arc_eager.perform(state_copy, action)
stack.append(state_copy)
return parses
class MockSentence:
def __init__(self, num_tokens):
self.adjacency = np.zeros((num_tokens, num_tokens), dtype=bool)
class TestArcEager(TestCase):
def test_dynamic_oracle_is_complete(self):
SIZE = 4
arc_eager = ArcEager(1)
dyn_oracle = ArcEagerDynamicOracle()
valid_parses = generate_all_projective_parses(SIZE)
for valid_parse in valid_parses:
sent = MockSentence(len(valid_parse) + 1)
for v, u in enumerate(valid_parse):
sent.adjacency[u, v] = True
state = arc_eager.state(SIZE)
while not arc_eager.is_final(state):
allowed_actions = arc_eager.allowed(state)
costs = dyn_oracle(state, sent, allowed_actions)
self.assertEqual(costs.min(), 0)
index = costs.argmin()
arc_eager.perform(state, allowed_actions[index])
heads, labels = arc_eager.extract_parse(state)
self.assertEqual(tuple(heads), valid_parse) | Python | 0 | |
db380d8e6a8dfa5444f82a0978fad3494d923278 | Add tests of generate_matrix | tests/chainer_tests/testing_tests/test_matrix.py | tests/chainer_tests/testing_tests/test_matrix.py | import unittest
import numpy
from chainer import testing
from chainer.testing import condition
@testing.parameterize(*testing.product({
'dtype': [
numpy.float16, numpy.float32, numpy.float64,
numpy.complex64, numpy.complex128,
],
'x_s_shapes': [
((2, 2), (2,)),
((2, 3), (2,)),
((3, 2), (2,)),
((2, 3, 4), (2, 3)),
((2, 4, 3), (2, 3)),
((0, 2, 3), (0, 2)),
],
}))
class TestGenerateMatrix(unittest.TestCase):
def test_generate_matrix(self):
dtype = self.dtype
x_shape, s_shape = self.x_s_shapes
sv = 0.5 + numpy.random.random(s_shape).astype(dtype().real.dtype)
x = testing.generate_matrix(x_shape, dtype=dtype, singular_values=sv)
assert x.shape == x_shape
s = numpy.linalg.svd(
x.astype(numpy.complex128), full_matrices=False, compute_uv=False,
)
sv_sorted = numpy.sort(sv, axis=-1)[..., ::-1]
rtol = 1e-3 if dtype == numpy.float16 else 1e-7
numpy.testing.assert_allclose(s, sv_sorted, rtol=rtol)
class TestGenerateMatrixInvalid(unittest.TestCase):
def test_no_singular_values(self):
with self.assertRaises(TypeError):
testing.generate_matrix((2, 2))
def test_invalid_shape(self):
with self.assertRaises(ValueError):
testing.generate_matrix((2,), singular_values=1)
def test_invalid_dtype(self):
with self.assertRaises(ValueError):
testing.generate_matrix(
(2, 2), dtype=numpy.int32, singular_values=1)
def test_shape_mismatch(self):
with self.assertRaises(ValueError):
testing.generate_matrix(
(2, 2), singular_values=numpy.ones(3))
testing.run_module(__name__, __file__)
| Python | 0.000003 | |
3cad51e08ef4c1dcfb11cbb8c32272328b31015a | Prepare v1.2.306.dev | flexget/_version.py | flexget/_version.py | """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '1.2.306.dev'
| """
Current FlexGet version.
This is contained in a separate file so that it can be easily read by setup.py, and easily edited and committed by
release scripts in continuous integration. Should (almost) never be set manually.
The version should always be set to the <next release version>.dev
The jenkins release job will automatically strip the .dev for release,
and update the version again for continued development.
"""
__version__ = '1.2.305'
| Python | 0.000002 |
daa11c0f69412f57cc097ec188e7287a096de277 | add Landsat 8 QA mask function | functions/Landsat8QA.py | functions/Landsat8QA.py | import numpy as np
class Landsat8QA():
def __init__(self):
self.name = "Landsat 8 Collection 2 QA Mask"
self.description = "This function creates masks based on Landsat 8 Collection 2 QA band."
self.bit_index = {'fill': 0, 'diluted': 1, 'cirrus': 2, 'cloud': 3, 'shadow': 4, 'snow': 5, 'clear': 6, 'water': 7}
def getParameterInfo(self):
return [
{
'name': 'r',
'dataType': 'raster',
'value': None,
'required': True,
'displayName': "Input Landsat 8 QA band",
'description': "The input QA raster."
},
{
'name': 'fill',
'dataType': 'boolean',
'value': False,
'required': False,
'displayName': "Mask fill data",
'description': "Set fill data pixels to 1"
},
{
'name': 'diluted',
'dataType': 'boolean',
'value': False,
'required': False,
'displayName': "Mask dilated cloud",
'description': "Set dilated cloud pixels to 1"
},
{
'name': 'cirrus',
'dataType': 'boolean',
'value': False,
'required': False,
'displayName': "Mask cirrus cloud",
'description': "Set cirrus cloud pixels to 1"
},
{
'name': 'cloud',
'dataType': 'boolean',
'value': False,
'required': False,
'displayName': "Mask cloud",
'description': "Set cloud pixels to 1"
},
{
'name': 'shadow',
'dataType': 'boolean',
'value': False,
'required': False,
'displayName': "Mask cloud shadow",
'description': "Set cloud shadow pixels to 1"
},
{
'name': 'snow',
'dataType': 'boolean',
'value': False,
'required': False,
'displayName': "Mask snow",
'description': "Set snow pixels to 1"
},
{
'name': 'clear',
'dataType': 'boolean',
'value': False,
'required': False,
'displayName': "Mask clear",
'description': "Set clear pixels to 1"
},
{
'name': 'water',
'dataType': 'boolean',
'value': False,
'required': False,
'displayName': "Mask water",
'description': "Set water pixels to 1"
},
]
def getConfiguration(self, **scalars):
return {
'compositeRasters': False,
'inheritProperties': 2 | 4 | 8, # inherit all from the raster but raster type
'invalidateProperties': 2 | 4 | 8, # reset stats, histogram, key properties
'inputMask': False
}
def updateRasterInfo(self, **kwargs):
kwargs['output_info']['bandCount'] = 1
kwargs['output_info']['histogram'] = () # reset histogram
kwargs['output_info']['pixelType'] = 'u1'
kwargs['output_info']['statistics'] = ({'minimum': 0, 'maximum': 1.0}, )
fill = kwargs.get('fill')
diluted = kwargs.get('diluted')
cirrus = kwargs.get('cirrus')
cloud = kwargs.get('cloud')
shadow = kwargs.get('shadow')
snow = kwargs.get('snow')
clear = kwargs.get('clear')
water = kwargs.get('water')
self.bit_mask = (fill << self.bit_index['fill']) + (diluted << self.bit_index['diluted']) + (cirrus << self.bit_index['cirrus']) + (cloud << self.bit_index['cloud']) + (shadow << self.bit_index['shadow']) + (snow << self.bit_index['snow']) + (clear << self.bit_index['clear']) + (water << self.bit_index['water'])
return kwargs
def updatePixels(self, tlc, shape, props, **pixelBlocks):
pix_blocks = pixelBlocks['r_pixels']
pix_array = np.asarray(pix_blocks)
z_dim, x_dim, y_dim = pix_array.shape
out_mask = np.zeros(pix_array.shape)
for num_x in range(x_dim):
for num_y in range(y_dim):
if pix_array[0, num_x, num_y] & self.bit_mask:
out_mask[0, num_x, num_y] = 1 # set pixels that have a flag set to 1, otherwise 0
pixelBlocks['output_pixels'] = out_mask.astype(props['pixelType'], copy=False)
return pixelBlocks
| Python | 0 | |
0ef5aa5abaf220579915e4068fd61513114b0be6 | Fix evolver get_mif() | joommf/drivers/evolver.py | joommf/drivers/evolver.py | import textwrap
class Minimiser(object):
def __init__(self, m_init, Ms, name, d_mxHxm=0.1):
self.m_init = m_init
self.Ms = Ms
self.name = name
self.d_mxHxm = d_mxHxm
def get_mif(self):
mif = textwrap.dedent("""\
Specify Oxs_CGEvolve:evolver {{}}
Specify Oxs_MinDriver {{
evolver :evolve
mesh :mesh
Ms {}
m0 {{ Oxs_UniformVectorField {{
vector {{{:.5f}, {:.5f}, {:.5f}}}
}} }}
stopping_mxHxm {}
basename {}
vector_field_output_format {{text \%#.8g}}
}}
""")
return mif.format(
self.Ms,
self.m_init[0],
self.m_init[1],
self.m_init[2],
self.d_mxHxm,
self.name
)
class LLG(object):
def __init__(self, t, m_init, Ms, alpha, gamma,
name, solver='rkf54', dm=0.01):
"""
Note:
solver options passed as a string - options
rk2, rk4, rkf54, rkf54m, rkf54s
"""
self.t = t
self.m_init = m_init
self.Ms = Ms
self.alpha = alpha
self.gamma = gamma
self.name = name
self.solver = solver
self.dm = dm
def get_mif(self):
llg_mif = textwrap.dedent("""\
Specify Oxs_RungeKuttaEvolve:evolve {{
method ${} alpha {:.5f}
gamma_G {:.5f}
start_dm {:.5f}
}}
Specify Oxs_TimeDriver [subst {{
evolver :evolve
stopping_time {:.2e}
stage_count 1
mesh :mesh
Ms {:.5e}
m0 {{ Oxs_UniformVectorField {{
vector {{{:.5f}, {:.5f}, {:.5f}}}
}} }}
basename {}
vector_field_output_format {{text \%#.8g}}
}}]
""")
return llg_mif.format(self.solver,
self.alpha,
self.gamma,
self.dm,
self.t,
self.Ms,
self.m_init[0],
self.m_init[1],
self.m_init[2],
self.name
)
if __name__ == '__main__':
llg = LLG(1e-9, (0, 0, 1), 1e6, 0.1, 2.21e5, 'test')
f = open('test_llg.mif', 'w')
f.write(llg.get_mif())
f.close()
| import textwrap
class Minimiser(object):
def __init__(self, m_init, Ms, name, d_mxHxm=0.1):
self.m_init = m_init
self.Ms = Ms
self.name = name
self.d_mxHxm = d_mxHxm
def get_mif(self):
mif = textwrap.dedent("""\
Specify Oxs_CGEvolve:evolver {}
Specify Oxs_MinDriver {{
evolver :evolve
mesh :mesh
Ms {}
m0 {{ Oxs_UniformVectorField {{
vector {{{:.5f}, {:.5f}, {:.5f}}}
}} }}
stopping_mxHxm {}
basename {}
vector_field_output_format {{text \%#.8g}}
}}
""")
return mif.format( # self.solver,
# self.alpha,
# self.gamma,
self.dm,
self.t,
self.Ms,
self.m_init[0],
self.m_init[1],
self.m_init[2],
self.stopping_mxHxm,
self.name
)
class LLG(object):
def __init__(self, t, m_init, Ms, alpha, gamma,
name, solver='rkf54', dm=0.01):
"""
Note:
solver options passed as a string - options
rk2, rk4, rkf54, rkf54m, rkf54s
"""
self.t = t
self.m_init = m_init
self.Ms = Ms
self.alpha = alpha
self.gamma = gamma
self.name = name
self.solver = solver
self.dm = dm
def get_mif(self):
llg_mif = textwrap.dedent("""\
Specify Oxs_RungeKuttaEvolve:evolve {{
method ${} alpha {:.5f}
gamma_G {:.5f}
start_dm {:.5f}
}}
Specify Oxs_TimeDriver [subst {{
evolver :evolve
stopping_time {:.2e}
stage_count 1
mesh :mesh
Ms {:.5e}
m0 {{ Oxs_UniformVectorField {{
vector {{{:.5f}, {:.5f}, {:.5f}}}
}} }}
basename {}
vector_field_output_format {{text \%#.8g}}
}}]
""")
return llg_mif.format(self.solver,
self.alpha,
self.gamma,
self.dm,
self.t,
self.Ms,
self.m_init[0],
self.m_init[1],
self.m_init[2],
self.name
)
if __name__ == '__main__':
llg = LLG(1e-9, (0, 0, 1), 1e6, 0.1, 2.21e5, 'test')
f = open('test_llg.mif', 'w')
f.write(llg.get_mif())
f.close()
| Python | 0.000001 |
05e7db377b7f0224ec97d5f96c387d711e1e0f23 | Add problem | src/SRM-144/time.py | src/SRM-144/time.py |
class Time:
def whatTime(self, seconds):
hours = seconds / 3600
a = 3600
leftover = seconds - hours * 3600
minutes = leftover / 60
final_sec = seconds - hours * 3600 - minutes * 60
final = str(hours) + ":" + str(minutes)+ ":" + str(final_sec)
return final
| Python | 0.03246 | |
61f542c215c0b45bf8b4121bc4705c760c334aa9 | Add a SetObjectExtruderOperation class | cura/Settings/SetObjectExtruderOperation.py | cura/Settings/SetObjectExtruderOperation.py | # Copyright (c) 2017 Ultimaker B.V.
# Cura is released under the terms of the AGPLv3 or higher.
from UM.Scene.SceneNode import SceneNode
from UM.Operations.Operation import Operation
from cura.Settings.SettingOverrideDecorator import SettingOverrideDecorator
## Simple operation to set the extruder a certain object should be printed with.
class SetObjectExtruderOperation(Operation):
def __init__(self, node: SceneNode, extruder_id: str) -> None:
self._node = node
self._extruder_id = extruder_id
self._previous_extruder_id = None
self._decorator_added = False
def undo(self):
if self._previous_extruder_id:
self._node.callDecoration("setActiveExtruder", self._previous_extruder_id)
def redo(self):
stack = self._node.callDecoration("getStack") #Don't try to get the active extruder since it may be None anyway.
if not stack:
self._node.addDecorator(SettingOverrideDecorator())
self._previous_extruder_id = self._node.callDecoration("getActiveExtruder")
self._node.callDecoration("setActiveExtruder", self._extruder_id)
| Python | 0 | |
cec802ac35cdc26912be882f53cb2f93aa67c022 | Add clock tamer utility | greo/clock_tamer.py | greo/clock_tamer.py | #!/usr/bin/env python
# Currently just a test to see if we can send data to the clock tamer.
# Import things to know about clock tamer
# It implements SPI with CPOL=1 and CPHA=0
# meaning we read on falling edge of SCK and change on rising edge
# Here are our pinouts:
# nSS = io_rx_08 = 0x0100 : active low
# SCK = io_rx_09 = 0x0200
# MOSI = io_rx_10 = 0x0400 : USRP->ClockTamer
# MISO = io_rx_11 = 0x0800 : ClockTamer->USRP
# nRST = io_rx_12 = 0x1000 : active low
from gnuradio import usrp
from struct import *
from time import sleep
import optparse, sys
class ClockTamer:
def __init__(self, usrp, which):
self.__usrp = usrp
self.__side = which
self.NSS = 0x0100
self.SCK = 0x0200
self.MOSI = 0x0400
self.MISO = 0x0800
self.NRST = 0x1000
self.__usrp._write_oe(self.__side, self.NSS | self.SCK | self.MOSI | self.NRST, self.NSS | self.SCK | self.MOSI | self.MISO | self.NRST )
self.set_hi(self.NSS)
self.set_hi(self.NRST)
# Cycle the reset
self.set_lo(self.NRST)
self.set_hi(self.NRST)
sleep(0.5)
def set_lo( self, pin ):
self.__usrp.write_io(self.__side, 0x0000, pin )
def set_hi( self, pin ):
self.__usrp.write_io(self.__side, 0xFFFF, pin )
def get_pin( self, pin ):
return self.__usrp.read_io(self.__side) & pin
def clean( self, text ):
return "".join(i for i in text if ord(i)<128 and ord(i)>31)
def write( self, text ):
result_string = ""
for c in text:
b = ord(c)
char_buffer = 0x00
# Prep the first bit
value = (b >> 7) & 1
if ( value > 0 ):
self.set_hi(self.MOSI)
else:
self.set_lo(self.MOSI)
# Start sending
self.set_hi(self.SCK)
self.set_lo(self.NSS)
for i in xrange(7,-1,-1):
self.set_hi(self.SCK)
value = (b >> i) & 1
if ( value > 0 ):
self.set_hi(self.MOSI)
else:
self.set_lo(self.MOSI)
self.set_lo(self.SCK)
bit = self.get_pin(self.MISO)
if ( bit > 0 ):
char_buffer = (char_buffer << 1) | 0x01
else:
char_buffer = char_buffer << 1
self.set_hi(self.SCK)
self.set_hi(self.NSS)
result_string += chr(char_buffer)
return result_string
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def main():
try:
try:
usage = "usage: clock_tamer.py [--reset][--set_clock N][--cmd \"\"]"
parser = optparse.OptionParser(usage=usage)
parser.set_defaults(reset=False)
parser.set_defaults(clock=0)
parser.set_defaults(cmd="")
parser.add_option("-r","--reset", dest="reset",
action="store_true",
help="Only reset the Clock Tamer")
parser.add_option("--set_clock", dest="clock",
help="Set the output clock",type="int")
parser.add_option("--cmd", dest="cmd",
help="Commands to send to Clock Tamer",type="string")
(options, args) = parser.parse_args()
if not options.reset and options.clock < 1 and options.cmd == "":
parser.error("Need input arguments")
except optparse.OptionError, msg:
raise Usage(msg)
clock = ClockTamer(usrp.source_c(0), 0)
if not options.reset:
if not options.cmd == "":
cmds = options.cmd.split(";")
for cmd in cmds:
print "Wrote: "+cmd
clock.write(cmd+"\r")
result = clock.write("".center(48))
print "Response: "+clock.clean(result)
else:
cmd = "SET,,OUT,"+str(options.clock)
print "Wrote: "+cmd
clock.write(cmd+"\r")
result = clock.write("".center(48))
print "Response: "+clock.clean(result)
else:
print "ClockTamer cycled"
except Usage, err:
print >>sys.stderr, err.msg
return 2
# except Exception, err:
# sys.stderr.write( str(err) + '\n' )
# return 1
if __name__ == "__main__":
sys.exit(main())
| Python | 0 | |
57c29ec11b91505cade24670cc45726a8689bb9a | add needed util module | hera_mc/cm_utils.py | hera_mc/cm_utils.py | # -*- mode: python; coding: utf-8 -*-
# Copyright 2016 the HERA Collaboration
# Licensed under the 2-clause BSD license.
"""Some dumb low-level configuration management utility functions.
"""
from __future__ import print_function
import datetime
def _get_datetime(_date,_time):
if _date.lower() == 'now':
dt_d = datetime.datetime.now()
else:
data = _date.split('/')
dt_d = datetime.datetime(int(data[2])+2000,int(data[0]),int(data[1]))
if _time.lower() == 'now':
dt_t = datetime.datetime.now()
else:
data = _time.split(':')
dt_t = datetime.datetime(dt_d.year,dt_d.month,dt_d.day,int(data[0]),int(data[1]),0)
dt = datetime.datetime(dt_d.year,dt_d.month,dt_d.day,dt_t.hour,dt_t.minute)
return dt
def _get_stopdate(_stop_date):
if _stop_date:
return _stop_date
else:
return datetime.datetime(2020,12,31)
def _is_active(current, _start_date, _stop_date):
_stop_date = _get_stopdate(_stop_date)
if current > _start_date and current < _stop_date:
is_active=True
else:
is_active=False
return is_active | Python | 0.000001 | |
860b7b30f393622dac9badd15d65bf59679580e2 | Create utils.py | image_gnip/utils.py | image_gnip/utils.py | import os
import sys
import time
import logging.config
import json
class Utils:
@staticmethod
def insert_record(client, dataset_id, table_id, record):
result = client.push_rows(dataset_id, table_id, [record], None)
if result.get('insertErrors', None):
print "Record: %s" % (json.dumps(record))
print "Error result: %s" % result
return False
return True
@staticmethod
def import_from_file(client, dataset_id, table_id, filename, single_tweet=False):
if single_tweet:
record = json.loads(Utils.read_file(SAMPLE_TWEET_FILE))
success = Utils.insert_record(client, dataset_id, table_id, record)
return success
row = 0
with open(filename, "r") as f:
for tweet in f:
record = json.loads(tweet)
# ignore delete records for now
if record.get("delete", None):
continue
record_scrubbed = Utils.scrub(record)
success = Utils.insert_record(client, dataset_id, table_id, record_scrubbed)
if not success:
print "Failed row: %s %s" % (row, json.dumps(record))
return
else:
print "Processed row: %s" % row
row = row + 1
@staticmethod
def scrub(d):
# d.iteritems isn't used as you can't del or the iterator breaks.
for key, value in d.items():
if value is None:
del d[key]
elif key == 'coordinates':
del d[key]
elif key == 'attributes': # in 'place' object
del d[key]
elif key == 'bounding_box': # in 'place' object
del d[key]
elif key == 'retweeted_status':
del d[key]
elif key == 'created_at':
d[key] = Utils.convert_timestamp(value)
elif isinstance(value, dict):
Utils.scrub(value)
return d # For convenience
@staticmethod
def convert_timestamp(str):
ts = time.strptime(str,'%a %b %d %H:%M:%S +0000 %Y')
ts = time.strftime('%Y-%m-%d %H:%M:%S', ts)
return ts
@staticmethod
def read_file(fn):
data = ""
with open(fn, "r") as f:
for line in f:
data = data + line
return data
@staticmethod
def generate_schema_from_tweet():
record_str = Utils.read_file(SAMPLE_TWEET_FILE)
record = json.loads(record_str)
schema_str = schema_from_record(record)
return schema_str
@staticmethod
def enable_logging():
LOGGING_CONFIG = os.path.join(os.path.dirname(__file__), "logging.conf")
print "LOGGING_CONFIG" + str(LOGGING_CONFIG)
logging.config.fileConfig(LOGGING_CONFIG)
root = logging.getLogger("root")
return root
| Python | 0.000001 | |
7d7277b034fd8368d30cef3273514dedc9acae69 | Create decode.py | decode.py | decode.py | #!/usr/bin/env python
#Coding: UTF-8
from StringIO import StringIO
import hmac
from hashlib import sha1
import base64
from lxml import etree as ET
import uuid
def c14n(xml, exclusive=True):
io_msg = StringIO(xml)
et = ET.parse(io_msg)
io_output = StringIO()
et.write_c14n(io_output, exclusive=exclusive)
return io_output.getvalue()
def psha1(clientSecret, serverSecret, sizeBits=256, decodeSecrets=False):
if decodeSecrets:
clientSecret = base64.b64decode(clientSecret)
serverSecret = base64.b64decode(serverSecret)
sizeBytes = sizeBits / 8
hashSize = 160 # HMAC_SHA1 length is always 160
i = 0
b1 = serverSecret
b2 = ""
temp = None
psha = ""
while i < sizeBytes:
b1 = hmac.new(clientSecret, b1, sha1).digest()
b2 = b1 + serverSecret
temp = hmac.new(clientSecret, b2, sha1).digest()
for j in xrange(0, len(temp)):
if i < sizeBytes:
psha += temp[j]
i += 1
else:
break
return base64.b64encode(psha)
## ONE ########################################################################
key_store = (
'9DKqiWZPOvQuXIk5cuupIxzKVoz6BZ0X1gB1OwZ/G8E=',
'icMFRGjveOK8LfW6QNw/5iLaknjWidTL3KEUT9sniDE=',
)
ts_store = (
'''<u:Timestamp u:Id="_0" xmlns:u="http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd"><u:Created>2014-10-07T21:25:16.810Z</u:Created><u:Expires>2014-10-07T21:30:16.810Z</u:Expires></u:Timestamp>''',
'''<SignedInfo><CanonicalizationMethod Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#"></CanonicalizationMethod><SignatureMethod Algorithm="http://www.w3.org/2000/09/xmldsig#hmac-sha1"></SignatureMethod><Reference URI="#_0"><Transforms><Transform Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#"></Transform></Transforms><DigestMethod Algorithm="http://www.w3.org/2000/09/xmldsig#sha1"></DigestMethod><DigestValue>zYIcnsphp4lPCK7REFYo4zT4tBU=</DigestValue></Reference></SignedInfo>''',
'''<SignedInfo><CanonicalizationMethod Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#"/><SignatureMethod Algorithm="http://www.w3.org/2000/09/xmldsig#hmac-sha1"/><Reference URI="#_0"><Transforms><Transform Algorithm="http://www.w3.org/2001/10/xml-exc-c14n#"/></Transforms><DigestMethod Algorithm="http://www.w3.org/2000/09/xmldsig#sha1"/><DigestValue>zYIcnsphp4lPCK7REFYo4zT4tBU=</DigestValue></Reference></SignedInfo>''',
)
known_signatures = (
'zYIcnsphp4lPCK7REFYo4zT4tBU=', # request 2 digest value
'U3B7nhKHc0prHvjHcRsWqP8bZcI=', # request 2 signature value
)
def decode_hmac(key, msg, decode_key=True):
'''
key - base64 encoded key
msg - XML message
'''
if decode_key:
try:
key_raw = base64.b64decode(key)
except:
return ''
else:
key_raw = key
canon = c14n(msg)
sig = hmac.new(key_raw, canon, sha1)
return base64.b64encode(sig.digest())
def decode_sha1(msg):
'''
msg - XML message
'''
canon = c14n(msg)
sig = sha1(canon)
return base64.b64encode(sig.digest())
def decode_multi(key_store, msg_store):
for msg in msg_store:
yield decode_sha1(msg)
def decode_multi_inline():
for sig in decode_multi(key_store, ts_store):
print sig
if sig in known_signatures:
print " MATCH!!! %s" % (sig,)
def decode_psha1():
key = key_store[0]
seed = key_store[1]
size = 256
keys = []
keys.append(psha1(key, seed, size, True))
keys.append(psha1(key, seed, size, False))
keys.append(psha1(seed, key, size, True))
keys.append(psha1(seed, key, size, False))
keys.append(psha1(key, '', size, True))
keys.append(psha1(key, '', size, False))
keys.append(psha1('', key, size, True))
keys.append(psha1('', key, size, False))
keys.append(psha1(seed, '', size, True))
keys.append(psha1(seed, '', size, False))
keys.append(psha1('', seed, size, True))
keys.append(psha1('', seed, size, False))
keys.append(psha1('', '', size, True))
keys.append(psha1('', '', size, False))
keys.append(psha1(seed, seed, size, True))
keys.append(psha1(seed, seed, size, False))
keys.append(psha1(key, key, size, True))
keys.append(psha1(key, key, size, False))
for h in keys:
for store in ts_store:
sig = decode_hmac(h, store)
print sig
if sig in known_signatures:
print " MATCH!!", sig
| Python | 0.00003 | |
0080b6744b0ed9603ecf28b826e03aef01a58d2c | add editmate extension | editmate.py | editmate.py | """
Use TextMate as the editor
Usage: %load_ext editmate
Now when you %edit something, it opens in textmate.
This is only necessary because the textmate command-line entrypoint
doesn't support the +L format for linenumbers, it uses `-l L`.
"""
from subprocess import Popen, list2cmdline
from IPython.core.error import TryNext
def edit_in_textmate(self, filename, linenum=None, wait=True):
cmd = ['mate']
if wait:
cmd.append('-w')
if linenum is not None:
cmd.extend(['-l', str(linenum)])
cmd.append(filename)
proc = Popen(list2cmdline(cmd), shell=True)
if wait and proc.wait() != 0:
raise TryNext()
def load_ipython_extension(ip):
ip.set_hook('editor', edit_in_textmate)
| Python | 0 | |
2ecf595b29b3b45769ab0934be6d095a4f80ad56 | Add mmtl unit teset | tests/metal/mmtl/test_mmtl.py | tests/metal/mmtl/test_mmtl.py | import unittest
from metal.mmtl.BERT_tasks import create_tasks
from metal.mmtl.metal_model import MetalModel
from metal.mmtl.trainer import MultitaskTrainer
class MMTLTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
task_names = [
"COLA",
"SST2",
"MNLI",
"RTE",
"WNLI",
"QQP",
"MRPC",
"STSB",
"QNLI",
]
cls.tasks = create_tasks(
task_names, max_datapoints=100, dl_kwargs={"batch_size": 8}
)
def test_mmtl_training(self):
model = MetalModel(self.tasks)
trainer = MultitaskTrainer()
trainer.train_model(
model,
self.tasks,
checkpoint_metric="train/loss",
checkpoint_metric_mode="min",
n_epochs=1,
verbose=False,
)
| Python | 0 | |
34b1eb53ffbca24a36c103f2017b8780405c48f4 | add prod wsgi to code | find.wsgi | find.wsgi | from openspending import core
application = core.create_web_app() | Python | 0 | |
59de1a12d44245b69ade0d4703c98bf772681751 | Add tests for User admin_forms | user_management/models/tests/test_admin_forms.py | user_management/models/tests/test_admin_forms.py | from django.core.exceptions import ValidationError
from django.test import TestCase
from .. import admin_forms
from . factories import UserFactory
class UserCreationFormTest(TestCase):
def test_clean_email(self):
email = 'test@example.com'
form = admin_forms.UserCreationForm()
form.cleaned_data = {'email': email}
self.assertEqual(form.clean_email(), email)
def test_clean_duplicate_email(self):
user = UserFactory.create()
form = admin_forms.UserCreationForm()
form.cleaned_data = {'email': user.email}
with self.assertRaises(ValidationError):
form.clean_email()
def test_clean(self):
data = {'password1': 'pass123', 'password2': 'pass123'}
form = admin_forms.UserCreationForm()
form.cleaned_data = data
self.assertEqual(form.clean(), data)
def test_clean_mismatched(self):
data = {'password1': 'pass123', 'password2': 'pass321'}
form = admin_forms.UserCreationForm()
form.cleaned_data = data
with self.assertRaises(ValidationError):
form.clean()
class UserChangeFormTest(TestCase):
def test_clean_password(self):
password = 'pass123'
data = {'password': password}
user = UserFactory.build()
form = admin_forms.UserChangeForm(data, instance=user)
self.assertNotEqual(form.clean_password(), password)
| Python | 0 | |
706e8a6318b50466ee00ae51f59ec7ab76f820d6 | Create forecast.py | forecast.py | forecast.py | # -*- coding: utf-8 -*-
# Weather Twitter Bot - AJBBB - 7/8/2015 v2.*
import urllib2
import json
from birdy.twitter import UserClient
import tweepy
#Twitter Keys
CONSUMER_KEY = "YOUR CONSUMER KEY HERE"
CONSUMER_SECRET = "YOUR CONSUMER SECRET HERE"
ACCESS_TOKEN = "YOUR ACCESS TOKEN HERE"
ACCESS_TOKEN_SECRET = "YOUR ACCESS TOKEN SECRET"
#Get the wundergound json file to be read
f = urllib2.urlopen("http://api.wunderground.com/api/YOUR-WUNDERGROUND-API-KEY-HERE/geolookup/conditions/q/GB/London.json")
#read from the json file
json_string = f.read()
#parse the json file
parsed_json = json.loads(json_string)
#get info from current_observation in json file
temp_c = parsed_json['current_observation']['temp_c']
wind = parsed_json['current_observation']['wind_kph']
winddir = parsed_json['current_observation']['wind_dir']
windstr = parsed_json['current_observation']['wind_string']
weather = parsed_json['current_observation']['weather']
#Define the degree symbol
degree = u'\N{DEGREE SIGN}'
#Connect Using Tweepy
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
#oAuth Client Info
client = UserClient(CONSUMER_KEY,
CONSUMER_SECRET,
ACCESS_TOKEN,
ACCESS_TOKEN_SECRET)
def tweet(message):
#Simple tweet function to tweet whatever is passed to message.
client.api.statuses.update.post(status=message)
if wind > 0.0:
#Tweet out the current weather with numerical wind speed.
tweet("Current weather in London, UK: " + str(temp_c) +
degree + "C" + " and " + str(weather) + ". Wind: " + str(wind) +
" KPH #weather #london #news #UK http://is.gd/UyLFWz")
else:
#Tweet out the current weather with text.
tweet("Current weather in London, UK: " + str(temp_c) +
degree + "C" + " and " + str(weather) +
". Little to no wind. #weather #london #news #UK http://is.gd/UyLFWz")
| Python | 0 | |
ae1aaaddb8adbbe4167e9b2a073493df90f6fd60 | Remove unused CACHE_VERSION | subliminal/cache.py | subliminal/cache.py | # -*- coding: utf-8 -*-
import datetime
from dogpile.cache import make_region
#: Expiration time for show caching
SHOW_EXPIRATION_TIME = datetime.timedelta(weeks=3).total_seconds()
#: Expiration time for episode caching
EPISODE_EXPIRATION_TIME = datetime.timedelta(days=3).total_seconds()
region = make_region()
| # -*- coding: utf-8 -*-
import datetime
from dogpile.cache import make_region
#: Subliminal's cache version
CACHE_VERSION = 1
#: Expiration time for show caching
SHOW_EXPIRATION_TIME = datetime.timedelta(weeks=3).total_seconds()
#: Expiration time for episode caching
EPISODE_EXPIRATION_TIME = datetime.timedelta(days=3).total_seconds()
region = make_region()
| Python | 0.000065 |
1d5227941c4839ff781fb944f425865b8afdc01f | Add lc0732_my_calendar_iii.py | lc0732_my_calendar_iii.py | lc0732_my_calendar_iii.py | """Leetcode 732. My Calendar III
Hard
URL: https://leetcode.com/problems/my-calendar-iii/
Implement a MyCalendarThree class to store your events. A new event can always be added.
Your class will have one method, book(int start, int end). Formally, this represents a
booking on the half open interval [start, end), the range of real numbers x such that
start <= x < end.
A K-booking happens when K events have some non-empty intersection (ie., there is some
time that is common to all K events.)
For each call to the method MyCalendar.book, return an integer K representing the
largest integer such that there exists a K-booking in the calendar.
Your class will be called like this:
MyCalendarThree cal = new MyCalendarThree();
MyCalendarThree.book(start, end)
Example 1:
MyCalendarThree();
MyCalendarThree.book(10, 20); // returns 1
MyCalendarThree.book(50, 60); // returns 1
MyCalendarThree.book(10, 40); // returns 2
MyCalendarThree.book(5, 15); // returns 3
MyCalendarThree.book(5, 10); // returns 3
MyCalendarThree.book(25, 55); // returns 3
Explanation:
The first two events can be booked and are disjoint, so the maximum K-booking is a 1-booking.
The third event [10, 40) intersects the first event, and the maximum K-booking is a 2-booking.
The remaining events cause the maximum K-booking to be only a 3-booking.
Note that the last event locally causes a 2-booking, but the answer is still 3 because
eg. [10, 20), [10, 40), and [5, 15) are still triple booked.
Note:
- The number of calls to MyCalendarThree.book per test case will be at most 400.
- In calls to MyCalendarThree.book(start, end), start and end are integers in the range [0, 10^9].
"""
class MyCalendarThree(object):
def __init__(self):
pass
def book(self, start, end):
"""
:type start: int
:type end: int
:rtype: int
"""
pass
def main():
pass
if __name__ == '__main__':
main()
| Python | 0.000225 | |
5dc8e70bc081646fdeb37e9af1090a78e016d91b | add script inserting initial datas in selected database | insert_initial_datas.py | insert_initial_datas.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import psycopg2
import sys
import argparse
POSTGRESQL_connection = u"host='localhost' port=5432 user='postgres' password='postgres'"
def main():
parser = argparse.ArgumentParser(description="Script d'insertion des données initiales d'une base ENDIV.")
parser.add_argument("database", help="Spécifie le nom de la base de données")
args = parser.parse_args()
try:
connection_string = POSTGRESQL_connection
connection_string += u"dbname='{0}'".format(args.database)
connection = psycopg2.connect(connection_string)
except psycopg2.Error as e:
print u"connection à la base de données impossible {0}".format(e)
sys.exit(1)
query = ''
try:
cursor = connection.cursor()
try:
cursor.execute(open("insert_initial_data.sql", "r").read())
cursor.execute("COMMIT")
except psycopg2.Error, e:
print "error while inserting initial datas: {0}".format(e)
sys.exit(1)
finally:
if cursor:
cursor.close()
if connection:
connection.close()
print u'insertions OK'
sys.exit(0)
if __name__ == "__main__":
main()
| Python | 0 | |
2aa90b34951bde36696bbcb773940a6adc245f23 | Add Authenticater plugin | plugins/Authenticater.py | plugins/Authenticater.py | from ts3observer.models import Plugin, Action
import MySQLdb
class Meta:
author_name = 'Tim Fechner'
author_email = 'tim.b.f@gmx.de'
version = '1.0'
class Config:
enable = False
interval = 5
yaml = {
'general': {
'servergroup_id': 0,
'remove_if_deleted': True,
},
'database': {
'hostname': 'localhost',
'username': '',
'password': '',
'database': '',
'table': '',
},
}
class Authenticater(Plugin):
def setup(self):
self.connection = MySQLdb.connect(
host=self.config['database']['hostname'],
user=self.config['database']['username'],
passwd=self.config['database']['password'],
db=self.config['database']['database']
)
self.cursor = self.connection.cursor(MySQLdb.cursors.DictCursor)
def run(self, clients, channels, server_info):
auth_list = self.get_authenticated_users()
for clid, client in clients.items():
if (client.unique_identifier, True) in auth_list:
if not self.already_has_group(client):
self.add_group(client)
else:
if self.already_has_group(client):
self.remove_group(client)
def get_authenticated_users(self):
self.cursor.execute('''SELECT ts3o_uid, ts3o_active FROM {}'''.format(self.config['database']['table']))
self.connection.commit()
users = self.cursor.fetchall()
return [(pair['ts3o_uid'], bool(pair['ts3o_active'])) for pair in users]
def already_has_group(self, client):
for group in client.servergroups:
if group == self.config['general']['servergroup_id']:
return True
return False
def add_group(self, client):
self._register_action(client, 'add')
def remove_group(self, client):
self._register_action(client, 'remove')
def shutdown(self):
self.connection.close()
def _register_action(self, client, atype):
Action(
'Authenticater',
ts3o.run_id,
client,
'{}_group'.format(atype),
function_kwargs = {
'servergroup_id': self.config['general']['servergroup_id'],
},
reason=atype
).register()
| Python | 0 | |
571dbf74bfc9f893d25ad7d626de800b2b3d6c73 | move load document functionality to deserializer. prepare for post/put methods | jsonapi/deserializer.py | jsonapi/deserializer.py | """ Deserializer definition."""
class DeserializerMeta(object):
pass
class Deserializer(object):
Meta = DeserializerMeta
@classmethod
def load_document(cls, document):
""" Given document get model.
:param dict document: Document
:return django.db.models.Model model: model instance
"""
pass
| Python | 0.000002 | |
6537dc8853bb7f8d9fb93b0fb2b1c0241bb08b6b | Create client.py | python-scripts/client.py | python-scripts/client.py | import socket
from datetime import datetime, time
s=socket.socket(socket.AF_INET, socket.SOCK_STREAM) # create a client socket
port=9999
# get the current date-time
time1=datetime.now()
s.connect(("10.0.0.2", port)) # connect to server socket which is at address 10.0.0.2 and port 9999
tm=s.recv(1024) # this will read atmost 1024 bytes
# get the current date-time (after receiving current time from server)
time2=datetime.now()
serverTime=datetime.strptime(tm, "%Y-%m-%d %H:%M:%S.%f")
# terminate client socket
s.close()
# printing out time received from the time-server in console
print("The time got from the server is: \n")
print "Hour: %d \n" % serverTime.hour
print "Minute: %d \n" % serverTime.minute
print "Second: %d \n" % serverTime.second
print "Microsecond: %d \n" %serverTime.microsecond
# Applying Cristian`s algorithm
t1=time1.second*1000000+time1.microsecond
t2=time2.second*1000000+time2.microsecond
diff=(t2-t1)/2
# computed value of actual micro-sec time to be added to obtained server time
newMicro = serverTime.microsecond+diff
# printing out actual time in console after application of Cristian`s algorithm
print("Applying Cristian`s algorithm the actual time is: \n")
print "Hour: %d \n" % serverTime.hour
print "Minute: %d \n" % serverTime.minute
print "Second: %d \n" % serverTime.second
print "Microsecond: %d \n" % newMicro
| Python | 0 | |
bae50495106ce5c9cb39143a58e0e73a4e823d29 | Implement DispatchLoader (metapath import hook) | loader.py | loader.py | from __future__ import print_function, absolute_import, unicode_literals, division
from stackable.stack import Stack
from stackable.utils import StackablePickler
from stackable.network import StackableSocket, StackablePacketAssembler
from sys import modules
from types import ModuleType
class DispatchLoader(object):
def __init__(self, ip, port):
self.stack = Stack((StackableSocket(ip=ip, port=port),
StackablePacketAssembler(),
StackablePickler()))
self.cache = {}
def get_module(self, name):
if name in self.cache:
return self.cache[name]
else:
self.stack.write({'load': name})
o = self.stack.read()
if o['module'] != None:
self.cache[name] = o['module']
return o['module']
def find_module(self, fullname, path=None):
if self.get_module(fullname) != None:
self.path = path
return self
return None
def load_module(self, name):
if name in modules:
return modules[name]
m = ModuleType(name, name)
modules[name] = m
mod = self.get_module(name)
if mod == None:
raise ImportError("No such module")
exec mod in m.__dict__
return m
| Python | 0 | |
0f79cf1d15292476f2bead6d85d15e6f0db6ebbf | Revert "Remove manage.py in the root" | manage.py | manage.py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "tests.sandbox.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| Python | 0 | |
77e13247b63af4dc2355bab2fdc64e2b38ec777a | Create manage.py | manage.py | manage.py | #!/usr/bin/python
import argparse
from json import dump, load
from os import remove, rename, system
parser = argparse.ArgumentParser(description='This tool is Chaincode Development Manager.')
parser.add_argument("--init", action="store_true",help="Initialise the Chaincode environment")
# parser.add_argument('integers', metavar='N', type=int, nargs='+', help='an integer for the accumulator')
# parser.add_argument('--bootstrap', dest='bootstrap', action='store_const', const=sum, default=max, help='sum the integers (default: find the max)')
args = parser.parse_args()
############################### UTILS #############################
def bash(cmd):
print '\n'+'%'*32,"BASHING-BEGINS-HERE",'%'*32,'\n'
print "Command: ",cmd,'\n'
print "Output:\n"
system(cmd)
print '\n'
print '%'*32,"BASHING-ENDS-HERE",'%'*32,'\n'
####################################################################
specification_template={
"specification":{
"participants":[],
"ccis": {
"init": {
},
"invoke": {
},
"query": {
}
},
"models": {
}
},
"hashes": {
"specification":'',
"entities.go":'',
"ccis.go":''
}
}
init=False
if args.init:
name=raw_input()
author=raw_input()
with open("specification.json","w") as f:
dump(specification_template,f)
specification=None
# hashes=None
# with open("specification.json","r") as f:
# dic=load(f)
# specification=dic["specification"]
# hashes=dic["hashes"]
generate=False
if generate:
pass
struct="""\
type %s struct {
%s
}
"""
# with open("tmp_entities.go","w") as f:
# print >> f, "package main\n"
# for entity,attributes in models.items():
# print >> f, struct%(entity,'')
# for file in ["tmp_entities.go"]:
# system("gofmt "+file)
# remove(file[4:])
# rename(file,file[4:])
#setup=True
if setup:
#build=True
if build:
print "You know this is not really required, but just running for Knitty Gritty."
bash("go tool fix -r .")
#test=True
if test:
print "Starting Unittests."
bash("go test -v")
print "Generating Test Coverage reports."
bash("go tool cover -html=count.out -o test/coverage.html")
browser=''
bash(browser+" test/coverage.out")
credits=True
if credits:
print """\
##########################################################################
################ HYPERLEDGER CHAINCODE DEVLOPMENT MANAGER ################
##########################################################################
Author: Neela Krishna Teja Tadikonda
Thanks to my team for the procurement project for the support and encouragement.
*
*
*
*
*
*
*
*
*
*
*
*
*
*
Finally special thanks to itpc - Mohan (CTO) and Sai (CEO) for supporting and encouraging me with the development of this tool.
"""
| Python | 0.000001 | |
540ab945736486ce78452750486ea73128b29d7b | Add parse_xml.py | parse_xml.py | parse_xml.py | import sys
import os
from bs4 import BeautifulSoup
from zipfile import ZipFile
def main(argv):
root, ext = os.path.splitext(argv[1])
with ZipFile(argv[1]) as myzip:
with myzip.open("content.xml") as f:
soup = BeautifulSoup(f.read(), "lxml")
# print(soup)
notes = soup.findAll("draw:frame", {"presentation:class": "notes"})
with open("{}.script.txt".format(root), "w") as f:
for index, note in enumerate(notes):
bits = note.findAll("text:s")
for bit in bits:
note.find("text:s").replace_with(" ")
print("_Slide {}".format(index))
f.write("_Slide {}\n".format(index))
print(note.text)
f.write("{}\n".format(note.text))
if __name__ == "__main__":
main(sys.argv)
| Python | 0.000263 | |
269b779fe560fb85ca527cdda2ebd4e5e9b3a89c | Add monkeyrunner script to common operations | monkey/common.py | monkey/common.py | from com.android.monkeyrunner import MonkeyDevice as mkd
from com.android.monkeyrunner import MonkeyRunner as mkr
_ddmm_pkg = 'br.ufpe.emilianofirmino.ddmm'
def open_dev():
"""Estabilish a MonkeyDevice connection to android"""
return mkr.waitForConnection(1000)
def open_app(device, package, activity = '.MainActivity'):
"""Launch activity on device specified by package[, activity]"""
app = package + '/' + activity
device.startActivity(component=app)
def press_back(device):
"""Press back button on device"""
device.press('KEYCODE_BACK', mkd.DOWN_AND_UP)
def lock_screen(device):
"""Lock device"""
device.press('KEYCODE_POWER', mkd.DOWN_AND_UP)
def unlock_screen(device):
"""Unlock device"""
device.wake()
(x1, x2, y) = (768/2, 50, 1000)
device.drag((x1,y), (x2,y), duration=1.0, steps=50)
def start_ddmm(device):
"""Start DDMM Profiler"""
open_app(device, _ddmm_pkg)
mkr.sleep(2)
device.touch(20, 200, mkd.DOWN_AND_UP) # check prevent sleep
device.touch(384, 300, mkd.DOWN_AND_UP) # start ddmm
mkr.sleep(2)
press_back(device) # close app
def stop_ddmm(device):
"""Stop DDMM Profiler"""
open_app(device, _ddmm_pkg)
mkr.sleep(2)
device.touch(384, 300, mkd.DOWN_AND_UP) # stop ddmm
press_back(device) # close app
| Python | 0 | |
e0d075661677b4b02fa29d108472e80b9fbcad02 | Add quote fixture | SoftLayer/testing/fixtures/Billing_Order_Quote.py | SoftLayer/testing/fixtures/Billing_Order_Quote.py | getObject = {
'accountId': 1234,
'id': 1234,
'name': 'TestQuote1234',
'quoteKey': '1234test4321',
}
getRecalculatedOrderContainer = {
'orderContainers': [{
'presetId': '',
'prices': [{
'id': 1921
}],
'quantity': 1,
'packageId': 50,
'useHourlyPricing': '',
}],
}
| Python | 0 | |
b517150810e3757ef5cd1aeb03088187efaa134f | Add unit tests for Mixture node | bayespy/inference/vmp/nodes/tests/test_mixture.py | bayespy/inference/vmp/nodes/tests/test_mixture.py | ######################################################################
# Copyright (C) 2014 Jaakko Luttinen
#
# This file is licensed under Version 3.0 of the GNU General Public
# License. See LICENSE for a text of the license.
######################################################################
######################################################################
# This file is part of BayesPy.
#
# BayesPy is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# BayesPy is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BayesPy. If not, see <http://www.gnu.org/licenses/>.
######################################################################
"""
Unit tests for mixture module.
"""
import numpy as np
from ..gaussian import GaussianARD
from ..gamma import Gamma
from ..mixture import Mixture
from ..categorical import Categorical
from bayespy.utils import random
from bayespy.utils import linalg
from bayespy.utils import utils
from bayespy.utils.utils import TestCase
class TestMixture(TestCase):
def test_init(self):
"""
Test the creation of Mixture node
"""
# Do not accept non-negative cluster plates
z = Categorical(np.ones(2))
self.assertRaises(ValueError,
Mixture,
z,
GaussianARD,
GaussianARD(0, 1, plates=(2,)),
Gamma(1, 1, plates=(2,)),
cluster_plate=0)
# Try constructing a mixture without any of the parents having the
# cluster plate axis
z = Categorical(np.ones(2))
self.assertRaises(ValueError,
Mixture,
z,
GaussianARD,
GaussianARD(0, 1, plates=()),
Gamma(1, 1, plates=()))
def test_message_to_child(self):
"""
Test the message to child of Mixture node.
"""
K = 3
#
# Estimate statistics from parents only
#
# Simple case
mu = GaussianARD([0,2,4], 1,
ndim=0,
plates=(K,))
alpha = Gamma(1, 1,
plates=(K,))
z = Categorical(np.ones(K))
X = Mixture(z, GaussianARD, mu, alpha)
self.assertEqual(X.plates, ())
self.assertEqual(X.dims, ( (), () ))
u = X._message_to_child()
self.assertAllClose(u[0],
2)
self.assertAllClose(u[1],
2**2+1)
# Broadcasting the moments on the cluster axis
mu = GaussianARD(2, 1,
ndim=0,
plates=(K,))
alpha = Gamma(1, 1,
plates=(K,))
z = Categorical(np.ones(K))
X = Mixture(z, GaussianARD, mu, alpha)
self.assertEqual(X.plates, ())
self.assertEqual(X.dims, ( (), () ))
u = X._message_to_child()
self.assertAllClose(u[0],
2)
self.assertAllClose(u[1],
2**2+1)
#
# Estimate statistics with observed children
#
pass
def test_message_to_parent(self):
"""
Test the message to parents of Mixture node.
"""
K = 3
# Broadcasting the moments on the cluster axis
Mu = GaussianARD(2, 1,
ndim=0,
plates=(K,))
(mu, mumu) = Mu._message_to_child()
Alpha = Gamma(3, 1,
plates=(K,))
(alpha, logalpha) = Alpha._message_to_child()
z = Categorical(np.ones(K))
X = Mixture(z, GaussianARD, Mu, Alpha)
tau = 4
Y = GaussianARD(X, tau)
y = 5
Y.observe(y)
(x, xx) = X._message_to_child()
m = X._message_to_parent(0)
self.assertAllClose(m[0],
random.gaussian_logpdf(xx*alpha,
x*alpha*mu,
mumu*alpha,
logalpha,
0))
m = X._message_to_parent(1)
self.assertAllClose(m[0],
1/K * (alpha*x) * np.ones(3))
self.assertAllClose(m[1],
-0.5 * 1/K * alpha * np.ones(3))
# Some parameters do not have cluster plate axis
Mu = GaussianARD(2, 1,
ndim=0,
plates=(K,))
(mu, mumu) = Mu._message_to_child()
Alpha = Gamma(3, 1) # Note: no cluster plate axis!
(alpha, logalpha) = Alpha._message_to_child()
z = Categorical(np.ones(K))
X = Mixture(z, GaussianARD, Mu, Alpha)
tau = 4
Y = GaussianARD(X, tau)
y = 5
Y.observe(y)
(x, xx) = X._message_to_child()
m = X._message_to_parent(0)
self.assertAllClose(m[0],
random.gaussian_logpdf(xx*alpha,
x*alpha*mu,
mumu*alpha,
logalpha,
0))
m = X._message_to_parent(1)
self.assertAllClose(m[0],
1/K * (alpha*x) * np.ones(3))
self.assertAllClose(m[1],
-0.5 * 1/K * alpha * np.ones(3))
# Cluster assignments do not have as many plate axes as parameters.
M = 2
Mu = GaussianARD(2, 1,
ndim=0,
plates=(K,M))
(mu, mumu) = Mu._message_to_child()
Alpha = Gamma(3, 1,
plates=(K,M))
(alpha, logalpha) = Alpha._message_to_child()
z = Categorical(np.ones(K))
X = Mixture(z, GaussianARD, Mu, Alpha, cluster_plate=-2)
tau = 4
Y = GaussianARD(X, tau)
y = 5 * np.ones(M)
Y.observe(y)
(x, xx) = X._message_to_child()
m = X._message_to_parent(0)
self.assertAllClose(m[0]*np.ones(K),
np.sum(random.gaussian_logpdf(xx*alpha,
x*alpha*mu,
mumu*alpha,
logalpha,
0) *
np.ones((K,M)),
axis=-1))
m = X._message_to_parent(1)
self.assertAllClose(m[0] * np.ones((K,M)),
1/K * (alpha*x) * np.ones((K,M)))
self.assertAllClose(m[1] * np.ones((K,M)),
-0.5 * 1/K * alpha * np.ones((K,M)))
| Python | 0 | |
07b6e59a5c7f581bd3e67f6ce254a8388e8b97e1 | add test | minitds/test_minitds.py | minitds/test_minitds.py | #!/usr/bin/env python3
##############################################################################
# The MIT License (MIT)
#
# Copyright (c) 2016 Hajime Nakagami
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
##############################################################################
import unittest
import minitds
class TestMiniTds(unittest.TestCase):
host = 'localhost'
user = 'sa'
password = 'secret'
database = 'test'
def setUp(self):
self.connection = minitds.connect(
host=self.host,
user=self.user,
password=self.password,
database=self.database,
port=14333,
)
def tearDown(self):
self.connection.close()
def test_basic(self):
cur = self.connection.cursor()
cur.execute("select 1 n, @@version version")
if __name__ == "__main__":
unittest.main()
| Python | 0.000002 | |
9b7817e4c4583ddecf2586b595bce9e2e126f4f0 | Add test for image.py | tests/image_test.py | tests/image_test.py | #!/usr/bin/env python
# encoding: utf-8
from unittest import main
from vimiv_testcase import VimivTestCase
class ImageTest(VimivTestCase):
"""Image mode Test."""
@classmethod
def setUpClass(cls):
cls.init_test(cls, ["vimiv/testimages/arch_001.jpg"])
cls.image = cls.vimiv["image"]
def test_zoom_percent(self):
"""Test getting the fitting image zoom."""
# Panorama image
width = 1920
im_width = self.image.imsize[0]
perc = self.image.get_zoom_percent_to_fit()
self.assertEqual(im_width/width, perc)
def test_zooming(self):
"""Zooming of images."""
width = 1920
# Zoom in by 30 %
perc_before = self.image.zoom_percent
self.image.zoom_delta(0.3)
self.assertEqual(self.image.zoom_percent, perc_before * 1.3)
# Zoom to a size representing half the image size
self.image.zoom_to(0.5)
self.assertEqual(self.image.zoom_percent, 0.5)
pixbuf = self.image.image.get_pixbuf()
self.assertEqual(width * 0.5, pixbuf.get_width())
# Zoom by keyhandler
self.vimiv["keyhandler"].num_str = "03"
self.image.zoom_to(0)
self.assertEqual(self.image.zoom_percent, 1/3)
pixbuf = self.image.image.get_pixbuf()
self.assertEqual(width * (1/3), pixbuf.get_width())
# Zoom back to fit
self.image.zoom_to(0)
self.assertEqual(self.image.zoom_percent,
self.image.get_zoom_percent_to_fit())
pixbuf = self.image.image.get_pixbuf()
self.assertEqual(width * self.image.get_zoom_percent_to_fit(),
pixbuf.get_width())
# Unreasonable zoom
self.image.zoom_to(1000)
message = self.vimiv["statusbar"].left_label.get_text()
self.assertEqual(message, "Warning: Object cannot be zoomed (further)")
pixbuf = self.image.image.get_pixbuf()
self.assertEqual(width * self.image.get_zoom_percent_to_fit(),
pixbuf.get_width())
# Non parseable percentage
self.vimiv["keyhandler"].num_str = "vimiv"
self.image.zoom_to(0)
message = self.vimiv["statusbar"].left_label.get_text()
self.assertEqual(message, "Error: Zoom percentage not parseable")
def test_move(self):
"""Move from image to image."""
self.assertEqual(0, self.vimiv.index)
self.image.move_index()
self.assertEqual(1, self.vimiv.index)
self.image.move_index(forward=False)
self.assertEqual(0, self.vimiv.index)
self.image.move_index(delta=2)
self.assertEqual(2, self.vimiv.index)
self.image.move_pos()
self.assertEqual(len(self.vimiv.paths) - 1, self.vimiv.index)
self.image.move_pos(forward=False)
self.assertEqual(0, self.vimiv.index)
def test_toggles(self):
"""Toggle image.py settings."""
# Rescale svg
before = self.image.rescale_svg
self.image.toggle_rescale_svg()
self.assertFalse(before == self.image.rescale_svg)
self.image.toggle_rescale_svg()
self.assertTrue(before == self.image.rescale_svg)
# Overzoom
before = self.image.overzoom
self.image.toggle_overzoom()
self.assertFalse(before == self.image.overzoom)
self.image.toggle_overzoom()
self.assertTrue(before == self.image.overzoom)
# Animations should be tested in animation_test.py
def test_check_for_edit(self):
"""Check if an image was edited."""
path = self.vimiv.paths[self.vimiv.index]
self.assertEqual(0, self.image.check_for_edit(False))
self.vimiv.paths[self.vimiv.index] = "some-EDIT.jpg"
self.assertEqual(1, self.image.check_for_edit(False))
self.assertEqual(0, self.image.check_for_edit(True))
# Reset path
self.vimiv.paths[self.vimiv.index] = path
if __name__ == '__main__':
main()
| Python | 0.000002 | |
a13ee62b02d3fe1958f2cbecd903c3e8b32562da | Add dummy test file #2 | tests/test_dummy.py | tests/test_dummy.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2017 Jun-ya HASEBA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
def test_dummy():
assert 1 + 1 == 2
| Python | 0.000001 | |
5d795253180ef11117ae27447fa597fa15b40734 | Add testing for graphing code | tests/test_graph.py | tests/test_graph.py | import os
from click.testing import CliRunner
from cli.script import cli
def get_graph_code():
return '''
from copy import deepcopy as dc
class StringCopier(object):
def __init__(self):
self.copied_strings = set()
def copy(self):
string1 = 'this'
string2 = dc(string1)
string1.add(string1)
return string2
class DoSomething(object):
def something(self):
copier = StringCopier()
copied_string = copier.copy()
'''
def test_produce_graph():
runner = CliRunner()
with runner.isolated_filesystem():
with open('code.py', 'w') as f:
f.write(get_graph_code())
runner.invoke(cli, ['code.py', '--output', 'code_output'])
assert 'code_output' in os.listdir(os.path.curdir)
assert 'code_output.pdf' in os.listdir(os.path.curdir)
def test_file_extension():
runner = CliRunner()
with runner.isolated_filesystem():
with open('code.py', 'w') as f:
f.write(get_graph_code())
runner.invoke(cli, ['code.py', '--output', 'code_output', '--output-format', 'png'])
assert 'code_output' in os.listdir(os.path.curdir)
assert 'code_output.png' in os.listdir(os.path.curdir)
assert 'code_output.pdf' not in os.listdir(os.path.curdir)
| Python | 0 | |
66989005b6e9443c65c082ea1c2e4386ffae1330 | Add a few basic pages tests ahead of #406 | tests/test_pages.py | tests/test_pages.py | from gittip.testing import serve_request, load, setup_tips
def test_homepage():
actual = serve_request('/').body
expected = "Gittip happens every Thursday."
assert expected in actual, actual
def test_profile():
with load(*setup_tips(("cheese", "puffs", 0))):
expected = "I’m grateful for tips"
actual = serve_request('/cheese/').body
assert expected in actual, actual
def test_widget():
with load(*setup_tips(("cheese", "puffs", 0))):
expected = "javascript: window.open"
actual = serve_request('/cheese/widget.html').body
assert expected in actual, actual
# These hit the network.
def test_github_proxy():
expected = "<b>lgtest</b> has not joined"
actual = serve_request('/on/github/lgtest/').body
assert expected in actual, actual
def test_twitter_proxy():
expected = "<b>Twitter</b> has not joined"
actual = serve_request('/on/twitter/twitter/').body
assert expected in actual, actual
| Python | 0 | |
7a5b46d5a9d0e45b928bcadfeb91a6285868d8f3 | Create medium_RunLength.py | medium_RunLength.py | medium_RunLength.py | """
Determine the run length
of a string
ex: aaabbrerr > 3a2b1r1e2r
"""
def RunLength(string):
val = string[0]
count = 1
ret = ""
for char in string[1:]:
if char != val:
ret += str(count)
ret += val
val = char
count = 1
else:
count += 1
ret += str(count)
ret += val
return ret
# keep this function call here
# to see how to enter arguments in Python scroll down
print RunLength(raw_input())
| Python | 0.000004 | |
f3c8117755537ca96c3c8c72d5f54b8c244c260b | add top-level class | mwdust/DustMap3D.py | mwdust/DustMap3D.py | ###############################################################################
#
# DustMap3D: top-level class for a 3D dust map; all other dust maps inherit
# from this
#
###############################################################################
class DustMap3D:
"""top-level class for a 3D dust map; all other dust maps inherit from this"""
def __init__(self):
"""
NAME:
__init__
PURPOSE:
Initialize the dust map
INPUT:
OUTPUT:
HISTORY:
2013-11-24 - Started - Bovy (IAS)
"""
return None
def __call__(self,*args,**kwargs):
"""
NAME:
__call__
PURPOSE:
evaluate the dust map
INPUT:
OUTPUT:
HISTORY:
2013-11-24 - Started - Bovy (IAS)
"""
raise NotImplementedError("'__call__' for this DustMap3D not implemented yet")
| Python | 0.000566 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.