commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
921b0adf8b93ccad54eb0a82e42ff4b742e176db | Add label_wav_dir.py (#14847) | tensorflow/examples/speech_commands/label_wav_dir.py | tensorflow/examples/speech_commands/label_wav_dir.py | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Runs a trained audio graph against WAVE files and reports the results.
The model, labels and .wav files specified in the arguments will be loaded, and
then the predictions from running the model against the audio data will be
printed to the console. This is a useful script for sanity checking trained
models, and as an example of how to use an audio model from Python.
Here's an example of running it:
python tensorflow/examples/speech_commands/label_wav_dir.py \
--graph=/tmp/my_frozen_graph.pb \
--labels=/tmp/speech_commands_train/conv_labels.txt \
--wav_dir=/tmp/speech_dataset/left
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import glob
import tensorflow as tf
# pylint: disable=unused-import
from tensorflow.contrib.framework.python.ops import audio_ops as contrib_audio
# pylint: enable=unused-import
FLAGS = None
def load_graph(filename):
"""Unpersists graph from file as default graph."""
with tf.gfile.FastGFile(filename, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
tf.import_graph_def(graph_def, name='')
def load_labels(filename):
"""Read in labels, one label per line."""
return [line.rstrip() for line in tf.gfile.GFile(filename)]
def run_graph(wav_dir, labels, input_layer_name, output_layer_name,
num_top_predictions):
"""Runs the audio data through the graph and prints predictions."""
with tf.Session() as sess:
# Feed the audio data as input to the graph.
# predictions will contain a two-dimensional array, where one
# dimension represents the input image count, and the other has
# predictions per class
for wav_path in glob.glob(wav_dir + "/*.wav"):
if not wav_path or not tf.gfile.Exists(wav_path):
tf.logging.fatal('Audio file does not exist %s', wav_path)
with open(wav_path, 'rb') as wav_file:
wav_data = wav_file.read()
softmax_tensor = sess.graph.get_tensor_by_name(output_layer_name)
predictions, = sess.run(softmax_tensor, {input_layer_name: wav_data})
# Sort to show labels in order of confidence
print('\n%s' % (wav_path.split('/')[-1]))
top_k = predictions.argsort()[-num_top_predictions:][::-1]
for node_id in top_k:
human_string = labels[node_id]
score = predictions[node_id]
print('%s (score = %.5f)' % (human_string, score))
return 0
def label_wav(wav_dir, labels, graph, input_name, output_name, how_many_labels):
"""Loads the model and labels, and runs the inference to print predictions."""
if not labels or not tf.gfile.Exists(labels):
tf.logging.fatal('Labels file does not exist %s', labels)
if not graph or not tf.gfile.Exists(graph):
tf.logging.fatal('Graph file does not exist %s', graph)
labels_list = load_labels(labels)
# load graph, which is stored in the default session
load_graph(graph)
run_graph(wav_dir, labels_list, input_name, output_name, how_many_labels)
def main(_):
"""Entry point for script, converts flags to arguments."""
label_wav(FLAGS.wav_dir, FLAGS.labels, FLAGS.graph, FLAGS.input_name,
FLAGS.output_name, FLAGS.how_many_labels)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--wav_dir', type=str, default='', help='Audio file to be identified.')
parser.add_argument(
'--graph', type=str, default='', help='Model to use for identification.')
parser.add_argument(
'--labels', type=str, default='', help='Path to file containing labels.')
parser.add_argument(
'--input_name',
type=str,
default='wav_data:0',
help='Name of WAVE data input node in model.')
parser.add_argument(
'--output_name',
type=str,
default='labels_softmax:0',
help='Name of node outputting a prediction in the model.')
parser.add_argument(
'--how_many_labels',
type=int,
default=3,
help='Number of results to show.')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| Python | 0.000002 | |
a3a48824b36ef62edaf128379f1baec5482166e7 | Save error_message for resources (SAAS-982) | src/nodeconductor_saltstack/migrations/0005_resource_error_message.py | src/nodeconductor_saltstack/migrations/0005_resource_error_message.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('nodeconductor_saltstack', '0004_remove_useless_spl_fields'),
]
operations = [
migrations.AddField(
model_name='domain',
name='error_message',
field=models.TextField(blank=True),
preserve_default=True,
),
migrations.AddField(
model_name='site',
name='error_message',
field=models.TextField(blank=True),
preserve_default=True,
),
]
| Python | 0 | |
bfb51cadc66f34a67686bef3b15e9197c9d0617b | Create ping_help.py | ping_help.py | ping_help.py | import time
import subprocess
import os
hostname=raw_input('')
#while 1:
os.system("ping -c 10 -i 5 " + hostname + " >1.txt")
os.system("awk -F'[= ]' '{print $6,$10}' < 1.txt >final.txt")
os.system("grep [0-9] final.txt >final1.txt")
| Python | 0.000023 | |
dacffcb3e79877e1ea5e71d1a2e67bd4edd865bf | Add SettingOverrideModel that exposes a SettingOverrideDecorator to QML | plugins/Tools/PerObjectSettingsTool/SettingOverrideModel.py | plugins/Tools/PerObjectSettingsTool/SettingOverrideModel.py | # Copyright (c) 2015 Ultimaker B.V.
# Uranium is released under the terms of the AGPLv3 or higher.
from PyQt5.QtCore import Qt, pyqtSlot, QUrl
from UM.Application import Application
from UM.Qt.ListModel import ListModel
class SettingOverrideModel(ListModel):
KeyRole = Qt.UserRole + 1
LabelRole = Qt.UserRole + 2
DescriptionRole = Qt.UserRole + 3
ValueRole = Qt.UserRole + 4
TypeRole = Qt.UserRole + 5
UnitRole = Qt.UserRole + 6
ValidRole = Qt.UserRole + 7
def __init__(self, decorator, parent = None):
super().__init__(parent)
self._decorator = decorator
self._decorator.settingAdded.connect(self._onSettingsChanged)
self._decorator.settingRemoved.connect(self._onSettingsChanged)
self._decorator.settingValueChanged.connect(self._onSettingValueChanged)
self._onSettingsChanged()
self.addRoleName(self.KeyRole, "key")
self.addRoleName(self.LabelRole, "label")
self.addRoleName(self.DescriptionRole, "description")
self.addRoleName(self.ValueRole,"value")
self.addRoleName(self.TypeRole, "type")
self.addRoleName(self.UnitRole, "unit")
self.addRoleName(self.ValidRole, "valid")
def _onSettingsChanged(self):
self.clear()
active_instance = Application.getInstance().getMachineManager().getActiveMachineInstance()
for key, value in self._decorator.getAllSettings().items():
setting = active_instance.getSettingByKey(key)
if not setting:
continue
self.appendItem({
"key": key,
"label": setting.getLabel(),
"description": setting.getDescription(),
"value": value,
"type": setting.getType(),
"unit": setting.getUnit(),
"valid": setting.validate()
})
def _onSettingValueChanged(self, key, value):
index = self.find("key", key)
if index != -1:
self.setProperty(index, "value", value)
| Python | 0 | |
3652f1c666f3bf482862727838f0b4bbc9fea5e9 | fix bug 1076270 - add support for Windows 10 | alembic/versions/17e83fdeb135_bug_1076270_support_windows_10.py | alembic/versions/17e83fdeb135_bug_1076270_support_windows_10.py | """bug 1076270 - support windows 10
Revision ID: 17e83fdeb135
Revises: 52dbc7357409
Create Date: 2014-10-03 14:03:29.837940
"""
# revision identifiers, used by Alembic.
revision = '17e83fdeb135'
down_revision = '52dbc7357409'
from alembic import op
from socorro.lib import citexttype, jsontype, buildtype
from socorro.lib.migrations import fix_permissions, load_stored_proc
import sqlalchemy as sa
from sqlalchemy import types
from sqlalchemy.dialects import postgresql
from sqlalchemy.sql import table, column
def upgrade():
op.execute("""
INSERT INTO os_versions
(major_version, minor_version, os_name, os_version_string)
VALUES (6, 4, 'Windows', 'Windows 10')
""")
def downgrade():
op.execute("""
DELEE FROM os_versions
WHERE major_version = 6
AND minor_version = 4
AND os_name = 'Windows'
AND os_version_string = 'Windows 10'
""")
| Python | 0 | |
dcdd783859da957ae92c04fd22fcb70c48d3144f | Create MultipartPostHandler.py | Slack.indigoPlugin/Contents/Server-Plugin/MultipartPostHandler.py | Slack.indigoPlugin/Contents/Server-Plugin/MultipartPostHandler.py | #!/usr/bin/python
####
# 02/2006 Will Holcomb <wholcomb@gmail.com>
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# 7/26/07 Slightly modified by Brian Schneider
# in order to support unicode files ( multipart_encode function )
# http://peerit.blogspot.com/2007/07/multipartposthandler-doesnt-work-for.html
"""
Usage:
Enables the use of multipart/form-data for posting forms
Inspirations:
Upload files in python:
http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/146306
urllib2_file:
Fabien Seisen: <fabien@seisen.org>
Example:
import MultipartPostHandler, urllib2, cookielib
cookies = cookielib.CookieJar()
opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookies),
MultipartPostHandler.MultipartPostHandler)
params = { "username" : "bob", "password" : "riviera",
"file" : open("filename", "rb") }
opener.open("http://wwww.bobsite.com/upload/", params)
Further Example:
The main function of this file is a sample which downloads a page and
then uploads it to the W3C validator.
"""
import urllib
import urllib2
import mimetools, mimetypes
import os, stat
from cStringIO import StringIO
class Callable:
def __init__(self, anycallable):
self.__call__ = anycallable
# Controls how sequences are uncoded. If true, elements may be given multiple values by
# assigning a sequence.
doseq = 1
class MultipartPostHandler(urllib2.BaseHandler):
handler_order = urllib2.HTTPHandler.handler_order - 10 # needs to run first
def http_request(self, request):
data = request.get_data()
if data is not None and type(data) != str:
v_files = []
v_vars = []
try:
for(key, value) in data.items():
if type(value) == file:
v_files.append((key, value))
else:
v_vars.append((key, value))
except TypeError:
systype, value, traceback = sys.exc_info()
raise TypeError, "not a valid non-string sequence or mapping object", traceback
if len(v_files) == 0:
data = urllib.urlencode(v_vars, doseq)
else:
boundary, data = self.multipart_encode(v_vars, v_files)
contenttype = 'multipart/form-data; boundary=%s' % boundary
if(request.has_header('Content-Type')
and request.get_header('Content-Type').find('multipart/form-data') != 0):
print "Replacing %s with %s" % (request.get_header('content-type'), 'multipart/form-data')
request.add_unredirected_header('Content-Type', contenttype)
request.add_data(data)
return request
def multipart_encode(vars, files, boundary = None, buf = None):
if boundary is None:
boundary = mimetools.choose_boundary()
if buf is None:
buf = StringIO()
for(key, value) in vars:
buf.write('--%s\r\n' % boundary)
buf.write('Content-Disposition: form-data; name="%s"' % key)
buf.write('\r\n\r\n' + value + '\r\n')
for(key, fd) in files:
file_size = os.fstat(fd.fileno())[stat.ST_SIZE]
filename = fd.name.split('/')[-1]
contenttype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
buf.write('--%s\r\n' % boundary)
buf.write('Content-Disposition: form-data; name="%s"; filename="%s"\r\n' % (key, filename))
buf.write('Content-Type: %s\r\n' % contenttype)
# buffer += 'Content-Length: %s\r\n' % file_size
fd.seek(0)
buf.write('\r\n' + fd.read() + '\r\n')
buf.write('--' + boundary + '--\r\n\r\n')
buf = buf.getvalue()
return boundary, buf
multipart_encode = Callable(multipart_encode)
https_request = http_request
def main():
import tempfile, sys
validatorURL = "http://validator.w3.org/check"
opener = urllib2.build_opener(MultipartPostHandler)
def validateFile(url):
temp = tempfile.mkstemp(suffix=".html")
os.write(temp[0], opener.open(url).read())
params = { "ss" : "0", # show source
"doctype" : "Inline",
"uploaded_file" : open(temp[1], "rb") }
print opener.open(validatorURL, params).read()
os.remove(temp[1])
if len(sys.argv[1:]) > 0:
for arg in sys.argv[1:]:
validateFile(arg)
else:
validateFile("http://www.google.com")
if __name__=="__main__":
main()
| Python | 0 | |
f5ada694fae30f15498c775e8c4aa14a08459251 | Add slogan plugin | plugins/slogan.py | plugins/slogan.py | import re
import requests
import urllib.parse
class Plugin:
def __call__(self, bot):
bot.on_respond(r"slogan(?:ise)? (.*)$", self.on_respond)
bot.on_help("slogan", self.on_help)
def on_respond(self, bot, msg, reply):
url = "http://www.sloganizer.net/en/outbound.php?slogan={0}".format(urllib.parse.quote(msg["match"][0]))
headers = { "User-Agent": "SmartBot" }
page = requests.get(url, headers=headers)
reply(re.sub("<.*?>", "", page.text))
def on_help(self, bot, msg, reply):
reply("Syntax: slogan[ise] <thing>")
| Python | 0.000001 | |
779fb015913a17fcb8fb290515845e6b47c3ae50 | Create the converter (with span-conversion functionality) | latex2markdown.py | latex2markdown.py | """
A Very simple tool to convert latex documents to markdown documents
"""
import re
span_substitutions = [
(r'\\emph\{(.+)\}', r'*\1*'),
(r'\\textbf\{(.+)\}', r'**\1**'),
(r'\\verb;(.+);', r'`\1`'),
(r'\\includegraphics\{(.+)\}', r''),
]
def convert_span_elements(line):
""" Converts all recognizable span elements into markdown
"""
for (f, r) in span_substitutions:
p = re.compile(f)
line = p.sub(r, line)
return line
# This next bit is to test the conversion as it builds
from sys import stdin
if __name__=="__main__":
for line in stdin:
print(convert_span_elements(line),end='')
| Python | 0 | |
2fdabf544c75096efafe2d14988efa28619643ab | add scheme | app/scheme_mongodb.py | app/scheme_mongodb.py | import pymongo
import bson
from bson import json_util
import warnings
from cStringIO import StringIO
from pymongo import Connection, uri_parser
import bson.son as son
import json
import logging
def open(url=None, task=None):
#parses a mongodb uri and returns the database
#"mongodb://localhost/test.in?query='{"key": value}'"
uri = url if url else "mongodb://localhost/test.in"
#print 'uri: ' + uri
params = uri.split('?', 1)
uri = params[0]
uri_info = uri_parser.parse_uri(uri)
query = None
#TODO test flow from a query
#parse json to a dict = q_d
# ^^ this is where we use json_util.object_hook
#SON()['query'] = q_d['query']
#for k,v in q_d.iteritems:
# if k not "query":
# SON[k] = v
options = {}
if len(params) > 1:
params = params[1]
list_of_params = params.split('&', 1)
for p in params:
name, json_obj = params.split('=')
if name == 'query':
query = son.SON(json.loads(json_obj, object_hook=json_util.object_hook))
else:
options[name] = json_obj
'''
query = son.SON()
li_q = json.loads(json_query)
for tupl in li_q:
if tupl[0] == "$max" or tupl[0] == "$min":
obj_id = bson.objectid.ObjectId(tupl[1])
query[tupl[0]] = {u'_id' : obj_id}
else:
query[tupl[0]] = tupl[1]
'''
if not query:
query = {}
#go around: connect to the sonnection then choose db by ['dbname']
with warnings.catch_warnings():
warnings.simplefilter("ignore")
connection = Connection(uri)
database_name = uri_info['database']
collection_name = uri_info['collection']
db = connection[database_name]
collection = db[collection_name]
cursor = collection.find(query, None)
wrapper = MongoWrapper(cursor)
return wrapper
#WRAPPED!
class MongoWrapper(object):
"""Want to wrap the cursor in an object that
supports the following operations: """
def __init__(self, cursor):
self.cursor = cursor
self.offset = 0
def __iter__(self):
#most important method
for rec in self.cursor:
yield rec
def __len__(self):
#may need to do this more dynamically (see lib/disco/comm.py ln 163)
return self.cursor.count()
def close(self):
self.cursor.close()
@property
def read(self, size=-1):
list_of_records = []
if size > 0:
for i in range(size):
list_of_records.append(self.cursor.__iter__())
return list_of_records
def input_stream(stream, size, url, params):
mon = open(url)
return mon
| Python | 0.000032 | |
ea06c46d0c79846fada6175dff7ea085ed9fed7d | Add test script for sv_comp | run_benchmark.py | run_benchmark.py | #!/usr/bin/env python
"""Usage: run_benchmark.py <symdivine_dir> <benchmark> [options]
Arguments:
<benchmark> input *.i file
<symdivine_dir> location of symdivine
Options:
--cheapsimplify Use only cheap simplification methods
--dontsimplify Disable simplification
--disabletimeout Disable timeout for Z3
-p --partialstore Use partial SMT store (better caching)
-c --enablecaching Enable caching of Z3 formulas
-s --statistics Enable output of statistics
-v --verbose Enable verbose mode
-w --vverbose Enable extended verbose mode
-o level of optimalizations [default: 2]
"""
import sys
import subprocess
import signal
import os
import atexit
import resource
import docopt
from time import sleep
from tempfile import mkdtemp
class Timeout(Exception):
pass
def start_timeout(sec):
def alarm_handler(signum, data):
raise Timeout
signal.signal(signal.SIGALRM, alarm_handler)
signal.alarm(sec)
def stop_timeout():
# turn of timeout
signal.signal(signal.SIGALRM, signal.SIG_DFL)
signal.alarm(0)
def run_symdivine(symdivine_location, benchmark, symdivine_params = None):
cmd = [os.path.join(symdivine_location, "symdivine")]
cmd.append('reachability')
cmd.append(benchmark)
if symdivine_params:
cmd = cmd + symdivine_params
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
return p
def compile_benchmark(src, opt_level, tmpdir):
out = os.path.join(tmpdir, "model.ll")
cmd = "clang -S -emit-llvm {0} -o {1} {2}".format(opt_level, out, src)
if os.system(cmd) != 0:
print("ERROR")
print("Compilation failed")
return out
def printTimeConsumed():
us = resource.getrusage(resource.RUSAGE_CHILDREN)
usr = getattr(us, 'ru_utime')
syst = getattr(us, 'ru_stime')
print('=== TIME CONSUMED')
print(usr + syst)
sys.stdout.flush()
def printMemoryUsage():
us = resource.getrusage(resource.RUSAGE_CHILDREN)
maxrss = getattr(us, 'ru_maxrss')
print('=== MEMORY USAGE')
print(maxrss / 1024.0)
sys.stdout.flush()
def rmrf_tmp_dir(d):
return os.system('rm -rf {0}'.format(d))
def copy_source_to_tmp(src):
# create temporary directory and copy sources there
tmpdir = mkdtemp(prefix='symdivine.', dir='.')
basename = os.path.basename(src)
if os.system('cp {0} {1}/{2}'.format(src, tmpdir, basename)) != 0:
# cp already gave error message
rmrf_tmp_dir(tmpdir)
print('=== RESULT')
print('ERROR')
sys.exit(1)
return (tmpdir, '{0}/{1}'.format(tmpdir, basename))
def signal_childs(sig):
signal.signal(sig, signal.SIG_IGN)
os.kill(0, sig)
signal.signal(sig, signal.SIG_DFL)
def sigpipe_handler(signum, data):
signal_childs(signal.SIGINT)
signal_childs(signal.SIGINT)
signal_childs(signal.SIGKILL)
def parse_args():
arguments = docopt.docopt(__doc__)
arguments = {k: v for k, v in arguments.items() if v}
if not "-o" in arguments:
arguments["-o"] = 2
if not "--timeout" in arguments:
arguments["--timeout"] = 900
benchmark = arguments["<benchmark>"]
opt = "-o" + str(arguments["-o"])
loc = arguments["<symdivine_dir>"]
timeout = arguments["--timeout"]
del arguments["-o"]
del arguments["<benchmark>"]
del arguments["<symdivine_dir>"]
del arguments["--timeout"]
res = []
for key, value in arguments.iteritems():
res.append(key)
if isinstance(value, basestring):
res.append(value)
return (loc, benchmark, opt, timeout, res)
if __name__ == "__main__":
location, benchmark, opt_level, timeout, symdivine_params = parse_args()
tmpdir, src = copy_source_to_tmp(benchmark)
signal.signal(signal.SIGPIPE, sigpipe_handler)
start_timeout(timeout)
print('=== RESULT')
sys.stdout.flush()
try:
benchmark_comp = compile_benchmark(src, opt_level, tmpdir)
p = run_symdivine(location, benchmark_comp, symdivine_params)
(out, err) = p.communicate()
stop_timeout()
if p.returncode != 0:
print('ERROR')
if not out is None:
if "Safe." in out:
print("TRUE")
elif "Error state" in out:
print("FALSE")
else:
print("UNKNOWN")
if not err is None:
print(err)
sys.stdout.flush()
except Timeout:
print('TIMEOUT')
sys.stdout.flush()
finally:
stop_timeout()
printTimeConsumed()
printMemoryUsage()
signal_childs(signal.SIGINT)
signal_childs(signal.SIGTERM)
rmrf_tmp_dir(tmpdir)
| Python | 0 | |
03ad7302f75ea5de0870c798ec70f1a1912288ca | Add main.py file Description for 'hello! hosvik' | src/main.py | src/main.py | import sys
print(sys.platform);
print('Hello hosvik!')
| Python | 0.000001 | |
054c75ce1a63732be7a58ec1150e9f8aaff2aedb | Create test.py | plugins/test.py | plugins/test.py | @bot.message_handler(commands=['test', 'toast'])
def send_test(message):
bot.send_message(message.chat.id, TEST_MSG.encode("utf-8"))
| Python | 0.000005 | |
5c9ffaaa8e244bb9db627a0408258750cc0e81d6 | Create ping.py | src/ping.py | src/ping.py | nisse
| Python | 0.000003 | |
1473e0f4f1949349ef7212e0755fa8ffa6401cbe | Create process_htk_mlf_zh.py | process_htk_mlf_zh.py | process_htk_mlf_zh.py | #!/usr/bin/env python
#
# This script reads in a HTK MLF format label file and converts the
# encoded contents to GBK encoding.
#
import string, codecs
fin=open('vom_utt_wlab.mlf')
fout=codecs.open('vom_utt_wlab.gbk.mlf', encoding='gbk', mode='w')
while True:
sr=fin.readline()
if sr=='':break
sr=sr.strip()
if sr.endswith('.lab"'):
print >>fout, sr
while True:
sr=(fin.readline()).strip()
if sr=='.':break
if sr.startswith('\\'):
lst=(sr.strip('\\')).split('\\') # get the list of octal representation of each byte
bins=bytearray()
for itm in lst:
val=0
for ii in range(3): # each octal number will have exactly 3 numbers, i.e. of the form \nnn
val=val*8
val=val+int(itm[ii])
bins.append(val)
print >>fout, bins.decode('gbk')
else:
print >>fout, sr
print >>fout, '.'
else:
print >>fout, sr
fin.close()
fout.close()
| Python | 0 | |
c8c807cfcb4422edc0e2dbe3a4673a62fa37cbfa | Add extra migration triggered by updated django / parler (#501) | djangocms_blog/migrations/0037_auto_20190806_0743.py | djangocms_blog/migrations/0037_auto_20190806_0743.py | # Generated by Django 2.1.11 on 2019-08-06 05:43
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import parler.fields
import taggit_autosuggest.managers
class Migration(migrations.Migration):
dependencies = [
('djangocms_blog', '0036_auto_20180913_1809'),
]
operations = [
migrations.AlterField(
model_name='authorentriesplugin',
name='authors',
field=models.ManyToManyField(limit_choices_to={'djangocms_blog_post_author__publish': True}, to=settings.AUTH_USER_MODEL, verbose_name='authors'),
),
migrations.AlterField(
model_name='blogcategorytranslation',
name='master',
field=parler.fields.TranslationsForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='djangocms_blog.BlogCategory'),
),
migrations.AlterField(
model_name='blogconfigtranslation',
name='master',
field=parler.fields.TranslationsForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='djangocms_blog.BlogConfig'),
),
migrations.AlterField(
model_name='latestpostsplugin',
name='tags',
field=taggit_autosuggest.managers.TaggableManager(blank=True, help_text='Show only the blog articles tagged with chosen tags.', related_name='djangocms_blog_latest_post', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='filter by tag'),
),
migrations.AlterField(
model_name='post',
name='tags',
field=taggit_autosuggest.managers.TaggableManager(blank=True, help_text='A comma-separated list of tags.', related_name='djangocms_blog_tags', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags'),
),
migrations.AlterField(
model_name='posttranslation',
name='master',
field=parler.fields.TranslationsForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='djangocms_blog.Post'),
),
]
| Python | 0 | |
e082c803bf5ce31c4948d0d512e9ec0366cf0adc | Create politeusersbot.py | politeusersbot.py | politeusersbot.py | #Polite Users Bot created by Kooldawgstar
import praw
from time import sleep
import random
USERNAME = "USERNAME"
PASSWORD = "PASSWORD"
LIMIT = 100
RESPONSES = ["Thanks for being a nice user and thanking people for help!",
"Thank you for being a nice user and thanking people for help!",
]
responded = set()
r = praw.Reddit(user_agent="Enter in Useragent here")
r.login(USERNAME, PASSWORD)
subreddit = r.get_subreddit("Polite_Users_Bot")
def meets_criteria(responded, comment):
#add whatever criteria/logic you want here
return (not str(comment.author) == USERNAME) and (not comment.id in responded) and ("thanks" , "thx" , "thank you" , "thank u" in comment.body.lower())
def generate_response(comment):
#generate whatever response you want, you can make it specific to a comment by checking for various conditions
return random.choice(RESPONSES)
while True:
for comment in subreddit.get_comments(limit=LIMIT):
if meets_criteria(responded, comment):
print (comment.body)
print (comment.id)
print (str(comment.author))
while True: #continue to try responding to the comment until it works, unless something unknown occurs
try:
comment.reply(generate_response(comment))
print ("Breaking out after responding, and adding to the list")
responded.add(comment.id)
break
except praw.errors.RateLimitExceeded:
print ("Sleeping, rate limit :(")
sleep(10*60) #sleep for 10 minutes, that's the timer limit
except:
print ("Some unknown error has occurred, bail out...")
break
print ("---------------------------------------\n\n")
print ("sleeping")
sleep(60) #sleep for a minute for new comments to show up
| Python | 0.00002 | |
94481f656690956b2a4eb5a1227948d24ba4cc05 | Add actual command line python function (#7) | bin/CCDSingleEpochStile.py | bin/CCDSingleEpochStile.py | #!/usr/bin/env python
from stile.lsst.base_tasks import CCDSingleEpochStileTask
CCDSingleEpochStileTask.parseAndRun()
| Python | 0.00002 | |
225d5232cca6bb42e39959b2330758225a748477 | add little script to retrieve URLs to PS1-DR1 images | py/legacyanalysis/get-ps1-skycells.py | py/legacyanalysis/get-ps1-skycells.py | import requests
from astrometry.util.fits import *
from astrometry.util.multiproc import *
def get_cell((skycell, subcell)):
url = 'http://ps1images.stsci.edu/cgi-bin/ps1filenames.py?skycell=%i.%03i' % (skycell, subcell)
print('Getting', url)
r = requests.get(url)
lines = r.text.split('\n')
#assert(len(lines) == 6)
cols = 'projcell subcell ra dec filter mjd type filename shortname'
assert(lines[0] == cols)
lines = lines[1:]
lines = [l.split() for l in lines]
T = fits_table()
types = dict(projcell=np.int16, subcell=np.int16, ra=np.float64, dec=np.float64, mjd=None)
types['type'] =None
for i,col in enumerate(cols.split()):
tt = types.get(col, str)
if tt is None:
continue
vals = [words[i] for words in lines]
#print('Values for', col, ':', vals)
# HACK -- base-10 parsing for integer subcell
if col == 'subcell':
vals = [int(v, 10) for v in vals]
T.set(col, np.array([tt(v) for v in vals], dtype=tt))
return T
mp = multiproc(8)
TT = []
for skycell in range(635, 2643+1):
args = []
for subcell in range(100):
args.append((skycell, subcell))
TTi = mp.map(get_cell, args)
Ti = merge_tables(TTi)
Ti.writeto('ps1skycells-%i.fits' % skycell)
TT.extend(TTi)
T = merge_tables(TT)
T.writeto('ps1skycells.fits')
| Python | 0 | |
0f5ecc42485d4f0e89fbe202b57a2e7735ea69cc | Create product_images.py | product_images.py | product_images.py | from openerp.osv import osv, fields
class product_template(osv.Model):
_inherit = 'product.template'
_columns = {
'x_secondpicture': fields.binary("Second Image",
help="This field holds the second image used as image for the product, limited to 1024x1024px."),
}
product_template()
| Python | 0.000012 | |
e81f6e01ac55723e015c4d7d9d8f61467378325a | Add autoincrement to ZUPC.id | migrations/versions/e187aca7c77a_zupc_id_autoincrement.py | migrations/versions/e187aca7c77a_zupc_id_autoincrement.py | """ZUPC.id autoincrement
Revision ID: e187aca7c77a
Revises: ccd5b0142a76
Create Date: 2019-10-21 14:01:10.406983
"""
# revision identifiers, used by Alembic.
revision = 'e187aca7c77a'
down_revision = '86b41c3dbd00'
from alembic import op
from sqlalchemy.dialects import postgresql
from sqlalchemy.schema import Sequence, CreateSequence, DropSequence
import sqlalchemy as sa
def upgrade():
op.execute('''
CREATE SEQUENCE ZUPC_id_seq;
ALTER TABLE "ZUPC" ALTER COLUMN id SET DEFAULT nextval('ZUPC_id_seq');
''')
def downgrade():
op.execute('''
ALTER TABLE "ZUPC" ALTER COLUMN id DROP DEFAULT;
DROP SEQUENCE ZUPC_id_seq
''')
| Python | 0.00001 | |
cc8b3f8a7fb6af29f16d47e4e4caf56f17605325 | Add command handler. | src/server/commandHandler.py | src/server/commandHandler.py | from src.shared.encode import decodePosition
class CommandHandler(object):
def __init__(self, gameState, connectionManager):
self.gameState = gameState
self.connectionManager = connectionManager
def broadcastMessage(self, *args, **kwargs):
self.connectionManager.broadcastMessage(*args, **kwargs)
def sendMessage(self, *args, **kwargs):
self.connectionManager.sendMessage(*args, **kwargs)
def createConnection(self, playerId):
playerX, playerY = self.gameState.getPos(playerId)
self.sendMessage(playerId, "your_id_is", [playerId])
self.broadcastMessage("new_obelisk", [playerX, playerY])
for otherId in self.gameState.positions:
# We already broadcast this one to everyone, including ourself.
if otherId == playerId:
continue
otherX, otherY = self.gameState.getPos(otherId)
self.sendMessage("new_obelisk", [otherId, otherX, otherY])
def removeConnection(self, playerId):
self.broadcastMessage("delete_obelisk", [playerId])
self.gameState.removePlayer(playerId)
def stringReceived(self, playerId, data):
command = data.strip().lower()
STEP_SIZE = 1.0
RELATIVE_MOVES = {
'n': [ 0.0, STEP_SIZE],
's': [ 0.0, -STEP_SIZE],
'e': [ STEP_SIZE, 0.0],
'w': [-STEP_SIZE, 0.0],
}
if command in RELATIVE_MOVES:
self.gameState.movePlayerBy(playerId,
RELATIVE_MOVES[command])
else:
newPos = decodePosition(command)
if newPos is not None:
self.gameState.movePlayerTo(playerId, newPos)
# TODO: Maybe only broadcast the new position if we handled a valid
# command? Else the position isn't changed....
playerX, playerY = self.gameState.getPos(playerId)
self.broadcastMessage("set_pos", [playerId, playerX, myY])
| Python | 0 | |
be67baac2314408b295bddba3e5e4b2ca9bfd262 | Add ffs.exceptions | ffs/exceptions.py | ffs/exceptions.py | """
ffs.exceptions
Base and definitions for all exceptions raised by FFS
"""
class Error(Exception):
"Base Error class for FFS"
class DoesNotExistError(Error):
"Something should have been here"
| Python | 0.00354 | |
2737e1d46263eff554219a5fa5bad060b8f219d3 | Add CLI script for scoring huk-a-buk. | score_hukabuk.py | score_hukabuk.py | import json
import os
import time
DATA = {'turns': {}}
class Settings(object):
FILENAME = None
CURRENT_TURN = 0
NAME_CHOICES = None
def set_filename():
filename = raw_input('Set the filename? ').strip()
if not filename:
filename = str(int(time.time()))
Settings.FILENAME = filename + '.json'
def save_game():
with open(Settings.FILENAME, 'w') as fh:
json.dump(DATA, fh)
def enter_names():
names = {}
while True:
name = raw_input('Enter name: ')
if name.strip() == '':
break
names[name] = -5
DATA['names'] = names
Settings.NAME_CHOICES = '\n'.join([
'%d: %s' % (i, name)
for i, name in enumerate(names.keys())
])
save_game()
def game_over():
game_over = raw_input('Is the game over? [y/n] ')
return game_over.lower().strip() == 'y'
def get_bidder():
actual_bidder = None
while actual_bidder is None:
print(Settings.NAME_CHOICES)
bidder = raw_input('Who won the bid? ')
try:
bidder = int(bidder)
actual_bidder = Settings.NAME_CHOICES[bidder]
except:
if bidder in Settings.NAME_CHOICES:
actual_bidder = bidder
return actual_bidder
def get_bid():
actual_bid = None
while actual_bid is None:
bid = raw_input('Bid amount? ')
try:
bid = int(bid)
if bid in (2, 3, 4, 5):
actual_bid = bid
except:
pass
return actual_bid
def get_points():
result = {}
print '=' * 60
print 'Scores for turn %d:' % (Settings.CURRENT_TURN,)
for name in DATA['names'].keys():
msg = 'Score for %r: ' % (name,)
actual_score = None
while actual_score is None:
score = raw_input(msg)
try:
score = int(score)
if score in (-5, 0, 1, 2, 3, 4, 5):
actual_score = score
except:
pass
result[name] = actual_score
DATA['names'][name] += actual_score
return result
def play_turn():
turn = DATA['turns'].setdefault(Settings.CURRENT_TURN, {})
turn['bidder'] = get_bidder()
turn['bid'] = get_bid()
turn['points'] = get_points()
Settings.CURRENT_TURN += 1
save_game()
def print_scores():
print '=' * 60
print 'Current scores:'
print '-' * 60
for name, score in DATA['names'].items():
print '%r -> %d' % (name, score)
print '=' * 60
def play_game():
while not game_over():
print_scores()
play_turn()
def main():
set_filename()
enter_names()
play_game()
if __name__ == '__main__':
main()
| Python | 0 | |
7f64a56a17fc6d73da4ac2987d42931885925db0 | Create server.py | server/server.py | server/server.py | import http.server
import socketserver
PORT = 80
Handler = http.server.SimpleHTTPRequestHandler
httpd = socketserver.TCPServer(("", PORT), Handler)
httpd.serve_forever()
| Python | 0.000001 | |
d599e5a35d4ac056dbefa8ec8af6c8be242c12f1 | Add test case for input pipeline. | linen_examples/wmt/input_pipeline_test.py | linen_examples/wmt/input_pipeline_test.py | # Copyright 2021 The Flax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pathlib
import tempfile
from absl.testing import absltest
import input_pipeline
from configs import default
import tensorflow_datasets as tfds
# We just use different values here to verify that the input pipeline uses the
# the correct value for the 3 different datasets.
_TARGET_LENGTH = 32
_EVAL_TARGET_LENGTH = 48
_PREDICT_TARGET_LENGTH = 64
class InputPipelineTest(absltest.TestCase):
def _get_datasets(self):
config = default.get_config()
config.per_device_batch_size = 1
config.vocab_size = 32
config.max_corpus_chars = 1000
config.max_target_length = _TARGET_LENGTH
config.max_eval_target_length = _EVAL_TARGET_LENGTH
config.max_predict_length = _PREDICT_TARGET_LENGTH
vocab_path = os.path.join(tempfile.mkdtemp(), 'sentencepiece_model')
# Go two directories up to the root of the flax directory.
flax_root_dir = pathlib.Path(__file__).parents[2]
data_dir = str(flax_root_dir) + '/.tfds/metadata' # pylint: disable=unused-variable
with tfds.testing.mock_data(num_examples=128, data_dir=data_dir):
train_ds, eval_ds, predict_ds, _ = input_pipeline.get_wmt_datasets(
n_devices=2,
config=config,
shard_idx=0,
shard_count=1,
vocab_path=vocab_path)
return train_ds, eval_ds, predict_ds
def test_train_ds(self):
train_ds = self._get_datasets()[0]
expected_shape = [2, _TARGET_LENGTH] # 2 devices.
# For training we pack multiple short examples in one example.
# *_position and *_segmentation indicate the boundaries.
for batch in train_ds.take(3):
self.assertEqual({k: v.shape.as_list() for k, v in batch.items()}, {
'inputs': expected_shape,
'inputs_position': expected_shape,
'inputs_segmentation': expected_shape,
'targets': expected_shape,
'targets_position': expected_shape,
'targets_segmentation': expected_shape,
})
def test_eval_ds(self):
eval_ds = self._get_datasets()[1]
expected_shape = [2, _EVAL_TARGET_LENGTH] # 2 devices.
for batch in eval_ds.take(3):
self.assertEqual({k: v.shape.as_list() for k, v in batch.items()}, {
'inputs': expected_shape,
'targets': expected_shape,
})
def test_predict_ds(self):
predict_ds = self._get_datasets()[2]
expected_shape = [2, _PREDICT_TARGET_LENGTH] # 2 devices.
for batch in predict_ds.take(3):
self.assertEqual({k: v.shape.as_list() for k, v in batch.items()}, {
'inputs': expected_shape,
'targets': expected_shape,
})
if __name__ == '__main__':
absltest.main()
| Python | 0.998969 | |
be6997772bd7e39dd1f68d96b3d52a82372ad216 | update migartions | tracpro/supervisors/migrations/0002_auto_20141102_2231.py | tracpro/supervisors/migrations/0002_auto_20141102_2231.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('supervisors', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='supervisor',
options={'verbose_name': 'Supervisor', 'verbose_name_plural': 'Supervisors'},
),
migrations.AlterField(
model_name='supervisor',
name='region',
field=models.CharField(help_text='The name of the Region or State this supervisor belongs to, this should map to the Contact group on RapidPro', max_length=64, verbose_name='Region'),
preserve_default=True,
),
]
| Python | 0 | |
0ccca70cf289fb219768d1a124cacf11396a0ecc | Add files via upload | src/pque.py | src/pque.py | class Pque(object):
"""make as priority queue priority scale is 0 through -99
0 has greatest priority with ties being first come first pop"""
def __init__(self):
self.next_node = None
self.priority = 0
self.value = None
self.tail = None
self.head = None
self.size = 0
def insert(self,value , priority = -99):
""" inserts a value into the que defalt priority is -99"""
new_pque = Pque()
new_pque.priority = priority
if self.size is 0:
self.head = new_pque
self.tail = new_pque
else:
current_node = self.head
pre_node = None
for x in range(self.size - 1):
if new_pque.priority > current_node.priority:
if current_node is self.head:
new_pque.next_node = self.head
self.head = new_pque
break
else:
pre_node.next_node = new_pque
new_pque.next_node = current.node
break
if current_node is self.tail:
self.tail.next_node = new_pque
self.tail = new_pque
break
else:
pre_node = current_node
current_node = current_node.next_node
self.size += 1
new_pque.value = value
def peek(self):
"""returns the data in the head of the pque with out removing it"""
if self.head is None:
raise IndexError ('que is empty')
return slef.head.value
def pop(self):
"""returns the data in the head of pque and removes it """
if self.head is None:
raise IndexError ('que is empty')
temp_val = self.head.value
self.head = self.head.next_node
self.size -= 1
return temp_val
| Python | 0 | |
06235d5913cd5eb54d3767f6a7cf60acb1966b39 | Create prettyrpc.py | prettyrpc.py | prettyrpc.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from xmlrpclib import ServerProxy
class PrettyProxy(object):
def __init__(self, *args, **kwargs):
self._real_proxy = ServerProxy(*args, **kwargs)
def __getattr__(self, name):
return lambda *args, **kwargs: getattr(self._real_proxy, name)(args, kwargs)
| Python | 0 | |
38faa038cbc7b8cedbb2dc13c2760f2a270a5f1a | Create problem-5.py | problem-5.py | problem-5.py | n = 0
while True:
n += 1
divisible_list = []
for i in range(1,21):
is_divisible = (n % i == 0)
if is_divisible:
divisible_list.append(is_divisible)
else:
break
if len(divisible_list) == 20:
break
print(n)
| Python | 0.000911 | |
d326391f6412afb54ee05a02b3b11e075f703765 | fix value < 0 or higher than max. closes #941 | kivy/uix/progressbar.py | kivy/uix/progressbar.py | '''
Progress Bar
============
.. versionadded:: 1.0.8
.. image:: images/progressbar.jpg
:align: right
The :class:`ProgressBar` widget is used to visualize progress of some task.
Only horizontal mode is supported, vertical mode is not available yet.
The progress bar has no interactive elements, It is a display-only widget.
To use it, simply assign a value to indicate the current progress::
from kivy.uix.progressbar import ProgressBar
pb = ProgressBar(max=1000)
# this will update the graphics automatically (75% done):
pb.value = 750
'''
__all__ = ('ProgressBar', )
from kivy.uix.widget import Widget
from kivy.properties import NumericProperty, AliasProperty
class ProgressBar(Widget):
'''Class for creating a Progress bar widget.
See module documentation for more details.
'''
def __init__(self, **kwargs):
self._value = 0.
super(ProgressBar, self).__init__(**kwargs)
def _get_value(self):
return self._value
def _set_value(self, value):
value = max(0, min(self.max, value))
if value != self._value:
self._value = value
return True
value = AliasProperty(_get_value, _set_value)
'''Current value used for the slider.
:data:`value` is a :class:`~kivy.properties.AliasProperty`, than returns the
value of the progressbar. If the value is < 0 or > :data:`max`, it will be
normalized to thoses boundaries.
.. versionchanged:: 1.5.2
The value is now limited between 0 to :data:`max`
'''
def get_norm_value(self):
d = self.max
if d == 0:
return 0
return self.value / float(d)
def set_norm_value(self, value):
self.value = value * self.max
value_normalized = AliasProperty(get_norm_value, set_norm_value,
bind=('value', 'max'))
'''Normalized value inside the 0-max to 0-1 range::
>>> pb = ProgressBar(value=50, max=100)
>>> pb.value
50
>>> slider.value_normalized
0.5
:data:`value_normalized` is an :class:`~kivy.properties.AliasProperty`.
'''
max = NumericProperty(100.)
'''Maximum value allowed for :data:`value`.
:data:`max` is a :class:`~kivy.properties.NumericProperty`, default to 100.
'''
if __name__ == '__main__':
from kivy.base import runTouchApp
runTouchApp(ProgressBar(value=50))
| '''
Progress Bar
============
.. versionadded:: 1.0.8
.. image:: images/progressbar.jpg
:align: right
The :class:`ProgressBar` widget is used to visualize progress of some task.
Only horizontal mode is supported, vertical mode is not available yet.
The progress bar has no interactive elements, It is a display-only widget.
To use it, simply assign a value to indicate the current progress::
from kivy.uix.progressbar import ProgressBar
pb = ProgressBar(max=1000)
# this will update the graphics automatically (75% done):
pb.value = 750
'''
__all__ = ('ProgressBar', )
from kivy.uix.widget import Widget
from kivy.properties import NumericProperty, AliasProperty
class ProgressBar(Widget):
'''Class for creating a Progress bar widget.
See module documentation for more details.
'''
value = NumericProperty(0.)
'''Current value used for the slider.
:data:`value` is a :class:`~kivy.properties.NumericProperty`, default to 0.
'''
def get_norm_value(self):
d = self.max
if d == 0:
return 0
return self.value / float(d)
def set_norm_value(self, value):
self.value = value * self.max
value_normalized = AliasProperty(get_norm_value, set_norm_value,
bind=('value', 'max'))
'''Normalized value inside the 0-max to 0-1 range::
>>> pb = ProgressBar(value=50, max=100)
>>> pb.value
50
>>> slider.value_normalized
0.5
:data:`value_normalized` is an :class:`~kivy.properties.AliasProperty`.
'''
max = NumericProperty(100.)
'''Maximum value allowed for :data:`value`.
:data:`max` is a :class:`~kivy.properties.NumericProperty`, default to 100.
'''
if __name__ == '__main__':
from kivy.base import runTouchApp
runTouchApp(ProgressBar(value=50))
| Python | 0.000019 |
5e2e5eed760fdc40d474e511662cf7c22b1ea29b | add usbwatch.py | usbwatch.py | usbwatch.py | #!/usr/bin/env python3
# usbwatch.py - monitor addition/removal of USB devices
#
#
import pyudev
class UsbDevice:
@staticmethod
def fromUdevDevice(udev):
attr = lambda name: udev.attributes.asstring(name)
try:
try:
manufacturer = attr('manufacturer')
except KeyError:
manufacturer = None
return UsbDevice( udev.device_path,
( attr('busnum') + '-' + attr('devpath'),
attr('idVendor') + ":" + attr('idProduct'),
manufacturer,
attr('product')
)
)
except KeyError:
return None
def __init__(self, devicePath, info):
self.path = devicePath
self.bus, self.id, self.manufacturer, self.product = info
def __repr__(self):
return "UsbDevice(%s, %s)" % (self.path, (self.bus, self.id, self.manufacturer, self.product))
def __str__(self):
return "%s (%s): %s, %s" % (self.id, self.bus, self.manufacturer, self.product)
# UsbWatcher monitors the connection status of USB devices.
# It remembers the devices which are "known" to be connected to the system.
class UsbWatcher:
def __init__(self):
self.ctx = pyudev.Context()
self.mon = pyudev.Monitor.from_netlink(self.ctx)
self.mon.filter_by('usb')
self.knowns = dict()
# Query the currently connected USB devices
# Forcefully updates the list of "known" devices
def poll(self):
old_knowns = self.knowns
self.knowns = dict()
for udev in self.ctx.list_devices(subsystem="usb"):
dev = UsbDevice.fromUdevDevice(udev)
if dev is not None:
self.knowns[udev.device_path] = dev
if udev.device_path in old_knowns:
old_knowns.pop(udev.device_path)
else:
self.onAdd(dev)
for path, dev in old_knowns.items():
self.onRemove(dev)
# Monitor newly added devices. Any devices connected beforehand are ignored
def watch(self):
for action, udev in iter(self.mon):
if action == 'add':
dev = UsbDevice.fromUdevDevice(udev)
if dev is not None:
self.knowns[udev.device_path] = dev
self.onAdd(dev)
elif action == 'remove':
if udev.device_path in self.knowns:
dev = self.knowns.pop(udev.device_path)
self.onRemove(dev)
# Called upon a device is added to the system
# Override this
def onAdd(self,dev):
print("add %s " % str(dev))
# Called upon a device is removed from the system
# Override this
def onRemove(self,dev):
print("remove %s " % str(dev))
if __name__ == "__main__":
try:
wat = UsbWatcher()
wat.poll()
wat.watch()
except KeyboardInterrupt:
pass
| Python | 0.000003 | |
4380a8492698d389cf070ea5c7d76a9c4664a8ac | Add a script to produce mmcif file of stats from crystfel log files Commit ad3700e8 | yamtbx/dataproc/crystfel/command_line/stats_mmcif.py | yamtbx/dataproc/crystfel/command_line/stats_mmcif.py | def parse_dat(f_in, ret):
ifs = open(f_in)
first = ifs.readline()
if first.startswith(" 1/d centre"):
key = ""
if "Rsplit/%" in first:
key = "table_rsplit"
elif first.split()[2] == "CC":
key = "table_cc"
else:
return None
if key in ret: print "Warning: duplidated %s" %key
ret[key] = {}
for l in ifs:
_, fom, nref, d, dmax, dmin = l.split()
dmax, dmin = 10./float(dmax), 10./float(dmin)
ret[key].setdefault("dmax", []).append("%7.3f"%dmax)
ret[key].setdefault("dmin", []).append("%7.3f"%dmin)
if key == "table_rsplit":
fom = "%.4f"%(float(fom)/100.)
elif key == "table_cc":
fom = "%.4f"%(float(fom))
ret[key].setdefault("value", []).append(fom)
elif first.startswith("Center 1/nm"):
if "table" in ret: print "Warning: duplicated table"
ret["table"] = {}
nposs_all = 0
nref_all = 0
nmeas_all = 0
for l in ifs:
_, nref, nposs, cmpl, nmeas, red, snr, _, _, d, dmax, dmin = l.split()
dmax, dmin = 10./float(dmax), 10./float(dmin)
ret["table"].setdefault("dmax", []).append("%7.3f"%dmax)
ret["table"].setdefault("dmin", []).append("%7.3f"%dmin)
ret["table"].setdefault("snr", []).append(snr)
ret["table"].setdefault("cmpl", []).append(cmpl)
ret["table"].setdefault("redun", []).append(red)
ret["table"].setdefault("nuniq", []).append(nref)
ret["table"].setdefault("nposs", []).append(nposs)
nposs_all += int(nposs)
nref_all += int(nref)
nmeas_all += int(nmeas)
ret["cmpl"] = "%6.2f"%(100.*nref_all/nposs_all)
ret["redun"] = "%6.1f"%(nmeas_all/nref_all)
# parse_dat()
def parse_file(logfile, ret):
parse_dat(logfile, ret)
for l in open(logfile):
if "Overall Rsplit = " in l:
if "rsplit" in ret: print "Warning: duplicated Rsplit"
ret["rsplit"] = "%.3f"%(float(l[l.rindex("=")+1:l.rindex("%")])/100.)
elif "Overall CC = " in l:
if "cc12" in ret: print "Warning: duplicated CC"
ret["cc12"] = "%.4f"%(float(l[l.rindex("=")+1:].strip()))
elif "overall <snr> = " in l:
if "snr" in ret: print "Warning: duplicated SNR"
ret["snr"] = "%.2f"%(float(l[l.rindex("=")+1:].strip()))
elif "measurements in total" in l:
if "nmeas" in ret: print "Warning: duplicated measurements"
ret["nmes"] = l.split()[0]
elif "reflections in total" in l:
if "nuniq" in ret: print "Warning: duplicated reflections"
ret["nuniq"] = l.split()[0]
elif "Accepted resolution range" in l:
ret["dmin"] = l[l.rindex("to")+3:l.rindex("Ang")].strip()
ret["dmax"] = l[l.rindex("(")+1:l.rindex(" to")].strip()
# parse_file()
def run(log_files):
ret = {}
for f in log_files:
parse_file(f, ret)
s = """\
_reflns.entry_id UNNAMED
_reflns.d_resolution_low %(dmax)s
_reflns.d_resolution_high %(dmin)s
_reflns.number_all ?
_reflns.number_obs %(nuniq)s
_reflns.observed_criterion ?
_reflns.observed_criterion_F_max ?
_reflns.observed_criterion_F_min ?
_reflns.observed_criterion_I_max ?
_reflns.observed_criterion_I_min ?
_reflns.observed_criterion_sigma_F ?
_reflns.observed_criterion_sigma_I ?
_reflns.percent_possible_obs %(cmpl)s
_reflns.pdbx_redundancy %(redun)s
_reflns.pdbx_netI_over_sigmaI %(snr)s
_reflns.pdbx_number_measured_all ?
_reflns.pdbx_diffrn_id 1
_reflns.pdbx_ordinal 1
_reflns.pdbx_CC_half %(cc12)s
_reflns.pdbx_R_split %(rsplit)s
""" % ret
s += """\
#
loop_
_reflns_shell.d_res_high
_reflns_shell.d_res_low
_reflns_shell.meanI_over_sigI_obs
_reflns_shell.number_unique_all
_reflns_shell.percent_possible_all
_reflns_shell.pdbx_redundancy
_reflns_shell.pdbx_diffrn_id
_reflns_shell.pdbx_CC_half
_reflns_shell.pdbx_R_split
"""
for i in xrange(len(ret["table"]["dmax"])):
tmp = dict(dmin=ret["table"]["dmin"][i],
dmax=ret["table"]["dmax"][i],
snr=ret["table"]["snr"][i],
nuniq=ret["table"]["nuniq"][i],
cmpl=ret["table"]["cmpl"][i],
redun=ret["table"]["redun"][i],
cc12=ret["table_cc"]["value"][i],
rsplit=ret["table_rsplit"]["value"][i])
s += "%(dmax)s %(dmin)s %(snr)s %(nuniq)s %(cmpl)s %(redun)s 1 %(cc12)s %(rsplit)s\n" % tmp
print s
if __name__ == "__main__":
import sys
log_files = sys.argv[1:]
run(log_files)
| Python | 0 | |
ac7c3ccfdbd02eed6b2b7070160ef08f725c3578 | test migration | migrations/versions/5dc51870eece_initial_migration.py | migrations/versions/5dc51870eece_initial_migration.py | """initial migration
Revision ID: 5dc51870eece
Revises: None
Create Date: 2016-08-19 03:16:41.577553
"""
# revision identifiers, used by Alembic.
revision = '5dc51870eece'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('categorys',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('tag', sa.String(length=64), nullable=True),
sa.Column('count', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_table('roles',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('default', sa.Boolean(), nullable=True),
sa.Column('permissions', sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('name')
)
op.create_index(op.f('ix_roles_default'), 'roles', ['default'], unique=False)
op.create_table('users',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('email', sa.String(length=64), nullable=True),
sa.Column('username', sa.String(length=64), nullable=True),
sa.Column('role_id', sa.Integer(), nullable=True),
sa.Column('password_hash', sa.String(length=128), nullable=True),
sa.Column('confirmed', sa.Boolean(), nullable=True),
sa.Column('name', sa.String(length=64), nullable=True),
sa.Column('location', sa.String(length=64), nullable=True),
sa.Column('about_me', sa.Text(), nullable=True),
sa.Column('member_since', sa.DateTime(), nullable=True),
sa.Column('last_seen', sa.DateTime(), nullable=True),
sa.Column('avatar_hash', sa.String(length=32), nullable=True),
sa.ForeignKeyConstraint(['role_id'], ['roles.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
op.create_index(op.f('ix_users_username'), 'users', ['username'], unique=True)
op.create_table('follows',
sa.Column('follower_id', sa.Integer(), nullable=False),
sa.Column('followed_id', sa.Integer(), nullable=False),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.ForeignKeyConstraint(['followed_id'], ['users.id'], ),
sa.ForeignKeyConstraint(['follower_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('follower_id', 'followed_id')
)
op.create_table('posts',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.Text(), nullable=True),
sa.Column('body', sa.Text(), nullable=True),
sa.Column('body_html', sa.Text(), nullable=True),
sa.Column('summary', sa.Text(), nullable=True),
sa.Column('summary_html', sa.Text(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('author_id', sa.Integer(), nullable=True),
sa.Column('category_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['users.id'], ),
sa.ForeignKeyConstraint(['category_id'], ['categorys.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_posts_timestamp'), 'posts', ['timestamp'], unique=False)
op.create_table('comments',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('body', sa.Text(), nullable=True),
sa.Column('body_html', sa.Text(), nullable=True),
sa.Column('timestamp', sa.DateTime(), nullable=True),
sa.Column('disabled', sa.Boolean(), nullable=True),
sa.Column('author_id', sa.Integer(), nullable=True),
sa.Column('post_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['author_id'], ['users.id'], ),
sa.ForeignKeyConstraint(['post_id'], ['posts.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_comments_timestamp'), 'comments', ['timestamp'], unique=False)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index(op.f('ix_comments_timestamp'), table_name='comments')
op.drop_table('comments')
op.drop_index(op.f('ix_posts_timestamp'), table_name='posts')
op.drop_table('posts')
op.drop_table('follows')
op.drop_index(op.f('ix_users_username'), table_name='users')
op.drop_index(op.f('ix_users_email'), table_name='users')
op.drop_table('users')
op.drop_index(op.f('ix_roles_default'), table_name='roles')
op.drop_table('roles')
op.drop_table('categorys')
### end Alembic commands ###
| Python | 0.000001 | |
61a4d783f2c16a7b8f4fbf5a79f9588c58c8f618 | Add a serial port emulation module for debug/development use | systemd/emulator_serialport.py | systemd/emulator_serialport.py |
class SerialPortEmurator:
def __init__(self):
self.res = {
'AT+CGDCONT?': [
"(ECHO_BACK)",
"",
"",
"+CGDCONT: 1,\"IPV4V6\",\"access_point_name\",\"0.0.0.0\",0,0",
"",
"OK",
""
],
'AT$QCPDPP?': [
"(ECHO_BACK)",
"",
"",
"$QCPDPP: 1,3,\"user_id\"",
"$QCPDPP: 2,0",
"$QCPDPP: 3,0",
"$QCPDPP: 4,0",
"$QCPDPP: 5,0",
"$QCPDPP: 6,0",
"$QCPDPP: 7,0",
"$QCPDPP: 8,0",
"$QCPDPP: 9,0",
"$QCPDPP: 10,0",
"$QCPDPP: 11,0",
"$QCPDPP: 12,0",
"$QCPDPP: 13,0",
"$QCPDPP: 14,0",
"$QCPDPP: 15,0",
"$QCPDPP: 16,0",
"",
"OK",
""
],
'AT+CGDCONT=': [
"(ECHO_BACK)",
"",
"",
"OK",
""
],
'AT$QCPDPP=': [
"(ECHO_BACK)",
"",
"",
"OK",
""
],
'AT+CSQ': [
"(ECHO_BACK)",
"",
"",
"+CSQ: 4,99", # "+CSQ: 99,99"
"",
"OK",
""
],
'AT+CNUM': [
"(ECHO_BACK)",
"",
"",
"+CNUM: ,\"09099999999\",129", # "+CNUM: ,\"\",129"
"",
"OK",
""
],
'AT+CIMI': [
"(ECHO_BACK)",
"",
"",
"440111111111111", # "+CME ERROR: operation not allowed"
"",
"OK",
""
],
'AT+CPAS': [
"(ECHO_BACK)",
"",
"",
"+CPAS: 4", # "+CPAS: 0"
"",
"OK",
""
],
'ATI': [
"(ECHO_BACK)",
"",
"",
"Manufacturer: MAN",
"Model: MOD",
"Revision: REV",
"IMEI: 999999999999999",
"+GCAP: +CGSM",
"",
"OK",
""
]
}
def read_line(self):
if self.line < 0:
return None
try:
text = self.res[self.cmd][self.line]
self.line += 1
return text
except:
self.line = -1
return None
def write(self, str):
print("W:[%s]" % str)
self.cmd = str.strip()
if self.cmd.find('=') >= 0:
self.cmd = self.cmd[:self.cmd.find('=') + 1]
self.line = 0
self.res[self.cmd][0] = str.strip()
| Python | 0 | |
c85d867614ae9cd4e21a912acee83bdd9cf5cb8e | Create pypylayer.py | pypylayer.py | pypylayer.py | import subprocess
import atexit
import os
import functools
import time
import select
import sys
try:
import queue
except ImportError:
import Queue as queue
class MPlayerCasting(object):
types = {
"Flag": bool,
"Integer": int,
"Position": int,
"Float": float,
"Time": float,
"String": str,
"String list": dict
}
@classmethod
def get_cast(cls, mplayer_type):
if mplayer_type in cls.types:
return cls.types[mplayer_type]
else:
raise Exception("{0] is not a valid mplayer data type".format(mplayer_type))
class Player(object):
_base_args = ['-slave', '-idle', '-quiet']
ignored_props = ["pause"]
renamed_props = {"pause": "paused"}
read_only_props = ['length', 'pause', 'stream_end', 'stream_length',
'stream_start', 'stream_time_pos']
def __init__(self, exec_path='./mplayer'):
self.properties = []
self.exec_path = exec_path
self._base_args.insert(0, exec_path)
self._generate_properties()
self._process = subprocess.Popen(self._base_args,
stdin=subprocess.PIPE, stdout=subprocess.PIPE)
#Make subprocess quit with python
atexit.register(self.quit)
self._process.stdout.flush()
def _get_getter(self, name, type):
return
def _generate_properties(self):
cmd = [self.exec_path, "-list-properties"]
proc = subprocess.Popen(cmd, stdout=subprocess.PIPE)
for line in proc.stdout:
line = line.split()
if not line or not line[0].islower():
continue
alias = line[0].strip()
pname = line[0].strip()
ptype = line[1].strip()
pmin = line[2].strip()
pmax = line[3].strip()
#Check the property shouldn't be ignored
if pname in self.ignored_props:
continue
#Check if the property should be renamed
if pname in self.renamed_props:
alias = self.renamed_props[pname]
#Check the name isn't already in use
if hasattr(self, alias):
continue
if pmin == 'No':
pmin = None
else:
pmin = MPlayerCasting.get_cast(ptype)(pmin)
if pmax == 'No':
pmax = None
else:
pmax = MPlayerCasting.get_cast(ptype)(pmax)
#Check if the property should be readonly
if (pmin is None and pmax is None and pname != 'sub_delay') or (pname in self.read_only_props):
self._add_property(pname, ptype, pmin, pmax, read_only=True, alias=alias)
else:
self._add_property(pname, ptype, pmin, pmax)
#Add to the properties list
self.properties.append(alias)
def _add_property(self, pname, ptype, pmin, pmax, read_only=False, alias=None):
getter = lambda self: self._get_property(pname, ptype)
setter = None
if not alias: alias = pname
if not read_only:
setter = lambda self, value: self._set_property(value, pname, ptype, pmin, pmax)
setattr(self.__class__, alias, property(getter, setter))
def _run_command(self, command, wait=True):
#TODO: Currently timeout is not implemented, this means that if for some reason the command fails,
# the loop will not break.
is_loadfile = command.startswith("loadfile")
self._process.stdin.write(command + "\n")
if not wait:
return
while self._process.poll() is None:
output = self._process.stdout.readline()
output = output.strip()
if is_loadfile and output.startswith("Starting playback"):
return True
if output.startswith("ANS"):
result = output.partition('=')[2].strip('\'"')
if result == "PROPERTY_UNAVAILABLE":
return None
else:
return result
def _get_property(self, prop_name, prop_type):
cmd = "get_property {0}".format(prop_name)
result = self._run_command(cmd)
cast = MPlayerCasting.get_cast(prop_type)
if cast == bool:
if result == "no":
return False
else:
return True
if not result:
return result
else:
return cast(result)
def _set_property(self, value, pname, ptype, pmin, pmax):
if pmin is not None and value < pmin:
raise ValueError('value must be at least {0}'.format(pmin))
if pmax is not None and value > pmax:
raise ValueError('value must be at most {0}'.format(pmax))
cmd = "set_property {0} {1}".format(pname, value)
self._run_command(cmd, wait=False)
@property
def paused(self):
return self._get_property("pause", "Flag")
@paused.setter
def paused(self, value):
if value is True and not self.paused:
self.pause()
else:
self.resume()
def resume(self):
if self.paused:
self._run_command("pause", wait=False)
def pause(self):
if not self.paused:
self._run_command("pause", wait=False)
def quit(self):
self._process.kill()
def loadfile(self, path):
if not os.path.isfile(path):
raise Exception("Not a valid file path")
self._run_command('loadfile "{0}"\n'.format(path))
def stop(self):
self._run_command('stop', wait=False)
| Python | 0.000026 | |
144541a563a4f05a762aea82f39bdd63f33c19d5 | Add tests for new membrane | tartpy/tests/test_membrane2.py | tartpy/tests/test_membrane2.py | import pytest
from tartpy.runtime import SimpleRuntime, behavior
from tartpy.eventloop import EventLoop
from tartpy.membrane2 import Membrane
def test_membrane_protocol():
runtime = SimpleRuntime()
evloop = EventLoop()
m1 = Membrane({'protocol': 'membrane'}, runtime)
m2 = Membrane({'protocol': 'membrane'}, runtime)
result1 = None
@behavior
def actor1_beh(self, msg):
nonlocal result1
result1 = msg
result2 = None
@behavior
def actor2_beh(self, msg):
nonlocal result2
result2 = msg
actor1 = runtime.create(actor1_beh)
actor2 = runtime.create(actor2_beh)
uid_for_2_at_mb2 = m2.get_uid(actor2)
proxy_for_2_at_mb1 = m1.create_proxy(uid_for_2_at_mb2,
m2.config)
proxy_for_2_at_mb1 << {'foo': 5,
'reply_to': actor1}
evloop.run()
# test message from m1 to m2
assert result2['foo'] == 5
# test that 'reply_to' is a proxy at m2
proxy_for_1_at_mb2 = result2['reply_to']
assert proxy_for_1_at_mb2 is not actor1
proxy_for_1_at_mb2 << {'bar': 3,
'reply_to': actor2}
evloop.run()
# test message back from m2 to m1
assert result1['bar'] == 3
# test that proxy at m1 is reused
assert result1['reply_to'] is proxy_for_2_at_mb1
# test a string message across Membranes
proxy_for_2_at_mb1 << 'a string message'
evloop.run()
assert result2 == 'a string message'
def test_dos():
runtime = SimpleRuntime()
m = Membrane({'protocol': 'membrane'}, runtime)
with pytest.raises(KeyError):
m.local_delivery(0, {})
def test_marshall_unmarshall():
runtime = SimpleRuntime()
m = Membrane({'protocol': 'membrane'}, runtime)
assert m.marshall_message(5) == 5
assert m.marshall_message('foo') == 'foo'
assert m.marshall_message([1, 2, 'bar']) == [1, 2, 'bar']
assert m.marshall_message({'foo': 5, 'bar': 'baz'}) == {'foo': 5, 'bar': 'baz'}
assert m.unmarshall_message(5) == 5
assert m.unmarshall_message('foo') == 'foo'
assert m.unmarshall_message([1, 2, 'bar']) == [1, 2, 'bar']
assert m.unmarshall_message({'foo': 5, 'bar': 'baz'}) == {'foo': 5, 'bar': 'baz'}
@behavior
def sink_beh(self, msg):
pass
sink = runtime.create(sink_beh)
s = m.marshall_message(sink)
assert m.is_marshalled_actor(s)
assert m.unmarshall_message(s) is sink
s = m.marshall_message({'foo': sink})
assert m.is_marshalled_actor(s['foo'])
assert m.unmarshall_message(s)['foo'] is sink
s = m.marshall_message([sink])
assert m.is_marshalled_actor(s[0])
assert m.unmarshall_message(s)[0] is sink
| Python | 0 | |
9c2be5533dc14443a67ed22c34e2f059992e43cb | Create camera.py | Camera/camera.py | Camera/camera.py | from SimpleCV import Camera
# Initialize the camera
cam = Camera()
# Loop to continuously get images
while True:
# Get Image from camera
img = cam.getImage()
# Make image black and white
img = img.binarize()
# Draw the text "Hello World" on image
img.drawText("Hello World!")
# Show the image
img.show()
| Python | 0.000002 | |
6014dab06ed2275c5703ab9f9e63272656733c69 | Add retrieve_all_pages util method from mtp-cashbook | moj_utils/rest.py | moj_utils/rest.py | from django.conf import settings
def retrieve_all_pages(api_endpoint, **kwargs):
"""
Some MTP apis are paginated, this method loads all pages into a single results list
:param api_endpoint: slumber callable, e.g. `[api_client].cashbook.transactions.locked.get`
:param kwargs: additional arguments to pass into api callable
"""
loaded_results = []
offset = 0
while True:
response = api_endpoint(limit=settings.REQUEST_PAGE_SIZE, offset=offset,
**kwargs)
count = response.get('count', 0)
loaded_results += response.get('results', [])
if len(loaded_results) >= count:
break
offset += settings.REQUEST_PAGE_SIZE
return loaded_results
| Python | 0 | |
8d10e0e2db81023cb435b047f5c1da793e4b992e | Add python/matplotlib_.py | python/matplotlib_.py | python/matplotlib_.py | # matplotlib_.py
# Imports
from matplotlib import ticker
# label_axis
def label_axis(ax, x_or_y, axis_labels, flip, **props):
axis_ticks = range(0, len(axis_labels))
axis = getattr(ax, '%saxis' % x_or_y)
axis.set_major_locator(ticker.FixedLocator(axis_ticks))
axis.set_minor_locator(ticker.NullLocator())
axis.set_major_formatter(ticker.FixedFormatter(axis_labels))
axis.set_minor_formatter(ticker.NullFormatter())
lim = (-0.5, len(axis_labels) - 0.5)
if flip:
lim = lim[::-1]
set_lim = getattr(ax, 'set_%slim' % x_or_y)
set_lim(*lim)
if props:
plt.setp(axis.get_majorticklabels(), **props)
# label_xaxis
def label_xaxis(ax, xaxis_labels, flip=False, **props):
label_axis(ax, 'x', xaxis_labels, flip, **props)
# label_yaxis
def label_yaxis(ax, yaxis_labels, flip=False, **props):
label_axis(ax, 'y', yaxis_labels, flip, **props)
| Python | 0.00352 | |
de6d7c2531f59d407864c737468ae50de38ba9ac | Add some spanish bad words | revscoring/languages/spanish.regex.py | revscoring/languages/spanish.regex.py | # import re
# import warnings
# import enchant
# from nltk.corpus import stopwords
# from nltk.stem.snowball import SnowballStemmer
# from .language import Language, LanguageUtility
# STEMMER = SnowballStemmer("english")
# STOPWORDS = set(stopwords.words('english'))
BAD_REGEXES = set([
'ano',
'bastardo', 'bollo', 'boludo', 'bugarr[óo]n',
'ca(gar(ro)?|ca)', 'cabr[óo]n', 'cacas', 'capullo', 'carajo',
'chingar', 'chino', 'choch[oa]', 'cholo', 'chucha', 'chupar',
'chupapollas', 'chupamedias', 'cipote', 'clamidia', 'coger',
'cojones', 'concha', 'conejo', 'consolador', 'coño', 'cuca',
'culear', 'culo', 'cundango',
'drogata',
'facha', 'follar', 'fornicar', 'fulana', 'furcia',
'gabacho', 'gay', 'gilipollas', 'gitano', 'gonorrea', 'gordo',
'gringo', 'guiri',
'herpes', 'homosexual', 'huevos', '(huev|we)[óo]n',
'imb[ée]cil',
'japo', 'joder', 'joto', 'jud[íi]o',
'lesbiana',
'mach(orra|etorra)', 'maldito', 'mamada', 'manola',
'maric(a|[óo]n)', 'marimach[ao]', 'maripos[óo]n',
'mea(r|da)', 'mam[óo]n', 'mierda', 'minga', 'moro',
'nazi', 'negrata',
'ojete',
'paja', 'paki', 'pedo', 'pelao', 'pelotas', 'pendejo', 'pene', 'picha',
'pinche', 'pito', 'polla', 'polvo', 'poto', 'prostituta', 'put[ao]',
'puñal',
'rabo', 'ramera',
'sida', 'skin(head)?', 'subnormal', 'sudaca', 's[íi]filis',
'tonto', 'torta', 'tortillera', 'tranca', 'tranny',
'travesti', 'travolo', 'trolo',
'verga', 'vibrador', 'vulva',
'zapatona', 'zorra'
])
# BAD_REGEX = re.compile("|".join(BAD_REGEXES))
# DICTIONARY = enchant.Dict("en")
# def stem_word_process():
# def stem_word(word):
# return STEMMER.stem(word).lower()
# return stem_word
# stem_word = LanguageUtility("stem_word", stem_word_process)
# def is_badword_process():
# def is_badword(word):
# return bool(BAD_REGEX.match(word.lower()))
# return is_badword
# is_badword = LanguageUtility("is_badword", is_badword_process)
# def is_misspelled_process():
# def is_misspelled(word):
# return not DICTIONARY.check(word)
# return is_misspelled
# is_misspelled = LanguageUtility("is_misspelled", is_misspelled_process)
# def is_stopword_process():
# def is_stopword(word):
# return word.lower() in STOPWORDS
# return is_stopword
# is_stopword = LanguageUtility("is_stopword", is_stopword_process)
# english = Language("revscoring.languages.english",
# [stem_word, is_badword, is_misspelled, is_stopword])
| Python | 0.999999 | |
326ef75042fc1d3eeeb6834fd5ff80a2bd1a2be1 | Add incoreect_regex.py solution | HackerRank/PYTHON/Errors_and_Exceptions/incoreect_regex.py | HackerRank/PYTHON/Errors_and_Exceptions/incoreect_regex.py | #!/usr/bin/env python3
import re
if __name__ == '__main__':
for _ in range(int(input())):
try:
re.compile(input())
print('True')
except:
print('False')
| Python | 0.000002 | |
eea09f501e957ede24c8ca830fd488ebc34f3d9f | change question values based on answer for subtraction and division - related to issue #52 | questions/migrations/0010_recompute_values.py | questions/migrations/0010_recompute_values.py | # -*- coding: utf-8 -*-
import json
from south.v2 import DataMigration
class Migration(DataMigration):
def forwards(self, orm):
for q in orm.Question.objects.all():
if q.skill.level == 4 and (q.skill.parent.parent.name == "subtraction" or q.skill.parent.parent.name == "division"):
question_data = json.loads(q.data)
if "answer" in question_data.keys():
value = question_data["answer"]
q.value = value
elif q.skill.level == 4:
q.value = q.skill.name
else:
q.value = None
q.save()
def backwards(self, orm):
"Write your backwards methods here."
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'model.skill': {
'Meta': {'object_name': 'Skill'},
'children_list': ('django.db.models.fields.TextField', [], {'default': "''"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.IntegerField', [], {}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['model.Skill']"})
},
u'questions.answer': {
'Meta': {'object_name': 'Answer'},
'correctly_solved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'log': ('django.db.models.fields.TextField', [], {}),
'question': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'answers'", 'to': u"orm['questions.Question']"}),
'solving_time': ('django.db.models.fields.IntegerField', [], {}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'questions.question': {
'Meta': {'object_name': 'Question'},
'data': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'player': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['questions.Simulator']"}),
'skill': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['model.Skill']"}),
'type': ('django.db.models.fields.CharField', [], {'default': "'c'", 'max_length': '1'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'})
},
u'questions.simulator': {
'Meta': {'object_name': 'Simulator'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'note': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['questions']
symmetrical = True
| Python | 0.000002 | |
c675fe2a82733ef210bf287df277f8ae956a4295 | Add beginning of main script | rarbg-get.py | rarbg-get.py | #!env /usr/bin/python3
import sys
import urllib.parse
import urllib.request
def main():
search = sys.argv[1]
url = 'http://rarbg.to/torrents.php?order=seeders&by=DESC&search='
url = url + search
print(url)
req = urllib.request.Request(url, headers={'User-Agent' : "Magic Browser"})
resp = urllib.request.urlopen(req)
respData = resp.read()
if __name__ == '__main__':
main()
| Python | 0.000005 | |
552f082168b0243cc3998a2027c326031879e869 | add observables | robosuite/utils/observables.py | robosuite/utils/observables.py | import numpy as np
from robosuite.utils.buffers import DelayBuffer
class Observable:
"""
Base class for all observables -- defines interface for interacting with sensors
Args:
name (str): Name for this observable
sensor (function): Method to grab raw sensor data for this observable. Should take in
no arguments and return the raw sensor data for the current timestep
corrupter (function): Method to corrupt the raw sensor data for this observable. Should take in
the output of @sensor and return the same type (corrupted data)
delayer (function): Method to delay the raw sensor data when polling this observable. Should take in
no arguments and return an integer, for the number of timesteps to delay
data_size (int): Size of data array per sensor reading
history_size (int): Size of the internal history buffer to store recent values
"""
def __init__(self, name, sensor, corrupter, delayer, data_size, history_size=50):
# Set all internal variables and methods
self.name = name
self.sensor = sensor
self.corrupter = corrupter
self.delayer = delayer
self._current_observed_value = None # Will be modified later
self._data_size = data_size
self._history_size = history_size
self._history = DelayBuffer(dim=data_size, length=history_size)
# Enabled by default
self._enabled = True
def update(self):
"""
Updates internal values for this observable, if enabled
"""
if self._enabled:
# Get newest value, corrupt it, and store it in the history buffer
self._history.push(self.corrupter(self.sensor()))
# Update current observed value
obs = self._history.get_delayed_value(delay=self.delayer())
# Make sure to convert to single number if data_size is 1
self._current_observed_value = obs[0] if self._data_size == 1 else obs
def set_enabled(self, enabled):
"""
Sets whether this observable is active or not
Args:
enabled (bool): True if this observable should be enabled
"""
self._enabled = enabled
def set_sensor(self, sensor):
"""
Sets the sensor for this observable.
Args:
sensor (function): Method to grab raw sensor data for this observable. Should take in
no arguments and return the raw sensor data for the current timestep
"""
self.sensor = sensor
def set_corrupter(self, corrupter):
"""
Sets the corrupter for this observable.
Args:
corrupter (function): Method to corrupt the raw sensor data for this observable. Should take in
the output of self.sensor and return the same type (corrupted data)
"""
self.corrupter = corrupter
def set_delayer(self, delayer):
"""
Sets the delayer for this observable.
Args:
delayer (function): Method to delay the raw sensor data when polling this observable. Should take in
no arguments and return an integer, for the number of timesteps to delay
"""
self.delayer = delayer
@property
def observation(self):
"""
Current observation from this observable
Returns:
float or np.array: Current observed value from this observable
"""
return self._current_observed_value
class ImageObservable(Observable):
"""
Class for images (multi-dimensional sensor readings)
Args:
name (str): Name for this observable
sensor (function): Method to grab raw sensor data for this observable. Should take in
no arguments and return the raw sensor data for the current timestep
corrupter (function): Method to corrupt the raw sensor data for this observable. Should take in
the output of @sensor and return the same type (corrupted data)
delayer (function): Method to delay the raw sensor data when polling this observable. Should take in
no arguments and return an integer, for the number of timesteps to delay
image_shape (tuple): Shape of the image, e.g. for RGB, could be (H x W x 3)
history_size (int): Size of the internal history buffer to store recent values
"""
def __init__(self, name, sensor, corrupter, delayer, image_shape, history_size=50):
# Store image shape and find flattened image_shape to store data
self.image_shape = np.array(image_shape)
flattened_shape = np.product(self.image_shape)
# Run super init
super().__init__(name=name, sensor=sensor, corrupter=corrupter, delayer=delayer,
data_size=flattened_shape, history_size=history_size)
def update(self):
"""
Updates internal values for this observable, if enabled. Overrides superclass method to make sure
images are returned appropriately in their original multi-dimensional form
"""
if self._enabled:
# Get newest value, corrupt it, flatten it, and store it in the history buffer
self._history.push(self.corrupter(self.sensor()).flatten())
# Update current observed value
obs = self._history.get_delayed_value(delay=self.delayer())
# Convert back to multi-dimensional size
self._current_observed_value = obs.reshape(self.image_shape)
def create_uniform_noise_corrupter(low, high):
"""
Creates a corrupter that applies uniform noise to a given input within range @low to @high
Args:
low (float): Low-end of noise to apply
high (float): High-end of noise to apply
"""
def corrupter(inp):
inp = np.array(inp)
noise = (high - low) * np.random.random_sample(inp.shape) + low
return inp + noise
return corrupter
def create_gaussian_noise_corrupter(mean, std):
"""
Creates a corrupter that applies gaussian noise to a given input with mean @mean and std dev @std
Args:
mean (float): Mean of the noise to apply
std (float): Standard deviation of the noise to apply
"""
def corrupter(inp):
inp = np.array(inp)
noise = mean + std * np.random.randn(*inp.shape)
return inp + noise
return corrupter
def create_determinstic_delayer(delay):
"""
Create a deterministic delayer that always returns the same delay value
Args:
delay (int): Delay value to return
"""
return lambda: delay
def create_uniform_sampled_delayer(low, high):
"""
Creates uniformly sampled delayer, with minimum delay @low and maximum delay @high, both inclusive
Args:
low (int): Minimum possible delay
high (int): Maxmimum possible delay
"""
return lambda: np.random.randint(low=low, high=high+1)
def create_gaussian_sampled_delayer(mean, std):
"""
Creates a gaussian sampled delayer, with average delay @mean which varies by standard deviation @std
Args:
mean (float): Average delay
std (float): Standard deviation of the delay variation
"""
return lambda: int(np.round(mean + std * np.random.randn()))
| Python | 0.00209 | |
480ae590ea1116fdbb5c6601d7466408f274c433 | Implement for GNOME activateAutoLoginCommand | src/nrvr/el/gnome.py | src/nrvr/el/gnome.py | #!/usr/bin/python
"""nrvr.el.gnome - Manipulate Enterprise Linux GNOME
Classes provided by this module include
* Gnome
To be improved as needed.
Idea and first implementation - Leo Baschy <srguiwiz12 AT nrvr DOT com>
Public repository - https://github.com/srguiwiz/nrvr-commander
Copyright (c) Nirvana Research 2006-2013.
Modified BSD License"""
import re
class Gnome():
"""Utilities for manipulating a Gnome installation."""
@classmethod
def activateAutoLoginCommand(cls, username=None):
"""Build command to activate auto-login into GNOME.
username
defaults to None, which effects deactivateAutoLoginCommand.
Return command to activate auto-login into GNOME."""
command = cls.deactivateAutoLoginCommand()
if username:
username = re.escape(username) # precaution
command += r" ; sed -i -e '/^\[daemon\]/ a \AutomaticLoginEnable=true\nAutomaticLogin=" + username + r"' /etc/gdm/custom.conf"
return command
@classmethod
def deactivateAutoLoginCommand(cls):
"""Build command to deactivate auto-login into GNOME.
Return command to deactivate auto-login into GNOME."""
return r"sed -i -e '/^\s*AutomaticLoginEnable\s*=/ d' -e '/^\s*AutomaticLogin\s*=/ d' /etc/gdm/custom.conf"
if __name__ == "__main__":
print Gnome.activateAutoLoginCommand("joe")
print Gnome.deactivateAutoLoginCommand()
print Gnome.activateAutoLoginCommand()
| Python | 0.000001 | |
d135f84307a9ff7225938f3fae5314b8b5fa3d40 | add local store | rqbacktest/data/data_source.py | rqbacktest/data/data_source.py | import pytz
import pandas as pd
from ..instruments import Instrument
class LocalDataSource:
DAILY = 'daily.bcolz'
INSTRUMENTS = 'instruments.pk'
DIVIDEND = 'dividend.bcolz'
TRADING_DATES = 'trading_dates.bcolz'
YIELD_CURVE = 'yield_curve.bcolz'
YIELD_CURVE_TENORS = {
0 : 'S0',
30: 'M1',
60: 'M2',
90: 'M3',
180: 'M6',
270: 'M9',
365: 'Y1',
365*2: 'Y2',
365*3: 'Y3',
365*4: 'Y4',
365*5: 'Y5',
365*6: 'Y6',
365*7: 'Y7',
365*8: 'Y8',
365*9: 'Y9',
365*10: 'Y10',
365*15: 'Y15',
365*20: 'Y20',
365*30: 'Y30',
365*40: 'Y40',
365*50: 'Y50',
}
YIELD_CURVE_DURATION = sorted(YIELD_CURVE_TENORS.keys())
PRICE_SCALE = 1000.
def __init__(self, root_dir):
self._root_dir = root_dir
import bcolz
import os
import pickle
self._daily_table = bcolz.open(os.path.join(root_dir, LocalDataSource.DAILY))
self._instruments = {d['order_book_id']: Instrument(d)
for d in pickle.load(open(os.path.join(root_dir, LocalDataSource.INSTRUMENTS), 'rb'))}
self._dividend = bcolz.open(os.path.join(root_dir, LocalDataSource.DIVIDEND))
self._yield_curve = bcolz.open(os.path.join(root_dir, LocalDataSource.YIELD_CURVE))
self._trading_dates = pd.Index(pd.Timestamp(str(d), tz=pytz.utc) for d in
bcolz.open(os.path.join(root_dir, LocalDataSource.TRADING_DATES)))
def instruments(self, order_book_ids):
if type(order_book_ids) == str:
try:
return self._instruments[order_book_ids]
except KeyError:
print('ERROR: order_book_id {} not exists!'.format(order_book_ids))
return None
return [self._instruments[ob] for ob in order_book_ids
if ob in self._instruments]
def all_instruments(self, itype='CS'):
if itype is None:
return pd.DataFrame([[v.order_book_id, v.symbol, v.abbrev_symbol, v.type]
for v in self._instruments.values()],
columns=['order_book_id', 'symbol', 'abbrev_symbol', 'type'])
if itype not in ['CS', 'ETF', 'LOF', 'FenjiA', 'FenjiB', 'FenjiMu', 'INDX', 'Future']:
raise ValueError('Unknown type {}'.format(itype))
return pd.DataFrame([v.__dict__ for v in self._instruments.values() if v.type == itype])
def sector(self, code):
return [v.order_book_id for v in self._instruments.values()
if v.type == 'CS' and v.sector_code == code]
def industry(self, code):
return [v.order_book_id for v in self._instruments.values()
if v.type == 'CS' and v.industry_code == code]
def concept(self, *concepts):
return [v.order_book_id for v in self._instruments.values()
if v.type == 'CS' and any(c in v.concept_names.split('|') for c in concepts)]
def get_trading_dates(self, start_date, end_date):
d = self._trading_dates
return d[(d >= start_date) & (d <= end_date)]
def get_yield_curve(self, start_date, end_date):
duration = (end_date - start_date).days
tenor = 0
for t in LocalDataSource.YIELD_CURVE_DURATION:
if duration >= t:
tenor = t
else:
break
d = start_date.year*10000 + start_date.month*100 + start_date.day
return self._yield_curve.fetchwhere('date<={}'.format(d)).cols[self.YIELD_CURVE_TENORS[tenor]][-1] / 10000.0
def get_dividends(self, order_book_id):
try:
id = self._dividend.attrs['stock_id'][order_book_id]
except KeyError:
return None
dividends = self._dividend.fetchwhere('id=={}'.format(id))
return pd.DataFrame({
'book_closure_date': pd.Index(pd.Timestamp(str(d)) for d in dividends.cols['closure_date']),
'ex_dividend_date': pd.Index(pd.Timestamp(str(d)) for d in dividends.cols['ex_date']),
'payable_date': pd.Index(pd.Timestamp(str(d)) for d in dividends.cols['payable_date']),
'dividend_cash_before_tax': dividends.cols['cash_before_tax'][:] / 10000.0,
'round_lot': dividends.cols['round_lot']
}, index=pd.Index(pd.Timestamp(str(d)) for d in dividends.cols['announcement_date']))
def get_all_bars(self, order_book_id):
try:
id = self._daily_table.attrs['id_map'][order_book_id]
except KeyError:
raise RuntimeError('No data for {}'.format(order_book_id))
bars = self._daily_table.fetchwhere('id=={}'.format(id))
return pd.DataFrame({
'open': (bars.cols['open'][:] / self.PRICE_SCALE).round(2),
'close': (bars.cols['close'][:] / self.PRICE_SCALE).round(2),
'high': (bars.cols['high'][:] / self.PRICE_SCALE).round(2),
'low': (bars.cols['low'][:] / self.PRICE_SCALE).round(2),
'volume': bars.cols['volume'],
}, index=pd.Index(pd.Timestamp(str(d)) for d in bars.cols['date']))
| Python | 0.000296 | |
6affa7946bafc418423c8e1857c6f2b55066c31a | this will find all the primes below a given number | generateprimes.py | generateprimes.py | #!/usr/bin/env python 3.1
#doesn't work for numbers less than 4
from math import sqrt
def nextprime(number):
"This function will find the smallest prime larger than the current number"
potential= number
j=2
while j<=sqrt(potential):
if potential%j:
j=j+1
else:
#print "not prime"
potential=potential +1
j=2
continue
#print potential
return potential
number =4
rawstop = raw_input('I want to find all the prime numbers below ')
stop = int(rawstop)
print 2
print 3
while number < stop:
number = nextprime(number)
if number<stop:
print number
number= number +1 #would changing the one to a two allow me to skip testing even numbers? yes, now its faster
continue
number = nextprime(number)
print "done"
| Python | 0.999869 | |
6d12624e094ec58118d39c4340438c4a814d404f | add wildcard, this is just a string contain problem | wildcard.py | wildcard.py | class Solution:
# @param s, an input string
# @param p, a pattern string
# @return a boolean
def shrink(self, pattern):
shrinked = []
i = 0
while i < len(pattern):
stars = 0
questions = 0
while i < len(pattern) and pattern[i] in ['*', '?']:
if pattern[i] == '*':
stars += 1
else:
questions += 1
i += 1
if stars == 0:
if questions > 0:
shrinked.extend(['?'] * questions)
else:
shrinked.append(('*', questions))
if i < len(pattern):
shrinked.append(pattern[i])
i += 1
return shrinked
def compress_string_score(self, string_score, pattern_list):
compressed = []
i = 0
while i < len(string_score):
p = pattern_list[string_score[i]]
compressed.append(p)
repeat = 0
while p != '?' and i < len(string_score)-1 and\
string_score[i + 1] == string_score[i]:
repeat += 1
i += 1
if repeat:
compressed.append(('*', repeat))
i += 1
return compressed
def isMatch(self, s, p):
pl = self.shrink(p)
string_score = []
cursor = 0
for c in s:
try:
while cursor < len(pl) and isinstance(pl[cursor], tuple):
cursor += 1
if cursor >= len(pl):
# pattern exhausted, while string exists
break
# cursor is not a star
if c == pl[cursor] or pl[cursor] == '?':
string_score.append(cursor)
# move on until meets with an alphabetic
cursor += 1
else:
if string_score:
string_score.append(string_score[-1])
else:
return False
except:
print "%s: %s vs %s: %s" % (s, c, pl, cursor)
print string_score
raise
compressed = self.compress_string_score(string_score, pl)
print "%s %s vs %s" % (string_score, compressed, pl)
for c_single, p_single in zip(compressed, pl):
if c_single != p_single:
if isinstance(c_single, tuple) and isinstance(p_single, tuple)\
and c_single[1] > p_single[1]:
continue
else:
return False
return True
so = Solution()
ls = ["aa", "aa", "aaa", "aa", "aa", "ab", "aab", "axxbxxycxxde"]
lp = ["a", "aa", "aa", "*", "a*", "?*", "c*a*b", "a**?*??b???c?d*?*e"]
for s,p in zip(ls, lp):
line = "%s, %s -> %s" % (s, p, so.isMatch(s, p))
print line
| Python | 0.000001 | |
00c86aff808ecc5b6f015da5977265cfa76826bb | add fixtures that start related worker for tests | livewatch/tests/conftest.py | livewatch/tests/conftest.py | import pytest
import time
import django_rq
from celery.signals import worker_ready
from .celery import celery
WORKER_READY = list()
@worker_ready.connect
def on_worker_ready(**kwargs):
"""Called when the Celery worker thread is ready to do work.
This is to avoid race conditions since everything is in one python process.
"""
WORKER_READY.append(True)
@pytest.yield_fixture
def celery_worker(request):
"""Fixture starting a celery worker in background"""
from multiprocessing import Process
celery_args = ['-C', '-q', '-c', '1', '-P', 'solo', '--without-gossip']
proc = Process(target=lambda: celery.worker_main(celery_args))
def cleanup():
proc.terminate()
request.addfinalizer(cleanup)
proc.start()
# Wait for worker to finish initializing to avoid a race condition I've been experiencing.
for i in range(5):
if WORKER_READY:
break
time.sleep(1)
yield proc
proc.terminate()
time.sleep(1)
@pytest.yield_fixture
def rq_worker(request):
"""Fixture starting a rq worker in background"""
from multiprocessing import Process
def _proc_target(env):
import os
os.environ.update(env)
worker = django_rq.get_worker()
worker.work()
proc = Process(target=_proc_target, kwargs={
'env': {'DJANGO_SETTINGS_MODULE': 'livewatch.tests.settings'}
})
def cleanup():
proc.terminate()
request.addfinalizer(cleanup)
proc.start()
time.sleep(1)
yield proc
proc.terminate()
time.sleep(1)
| Python | 0 | |
275cddfa56501868787abeef10fc515102ffd11d | make setup.py find all packages, now in src | python/setup.py | python/setup.py | from distutils.core import setup
from setuptools import find_packages
setup(name='fancontrol',
version='0.1.0',
modules=['fancontrol'],
packages=find_packages(where="src"),
package_dir={"": "src"},
)
| Python | 0 | |
a14ac8fb2f10124a4978db19049bdf932e91c49d | Add avahi based beacon for zeroconf announcement | salt/beacons/avahi_announce.py | salt/beacons/avahi_announce.py | # -*- coding: utf-8 -*-
'''
Beacon to announce via avahi (zeroconf)
'''
# Import Python libs
from __future__ import absolute_import
import logging
# Import 3rd Party libs
try:
import avahi
HAS_PYAVAHI = True
except ImportError:
HAS_PYAVAHI = False
import dbus
log = logging.getLogger(__name__)
__virtualname__ = 'avahi_announce'
LAST_GRAINS = {}
BUS = dbus.SystemBus()
SERVER = dbus.Interface(BUS.get_object(avahi.DBUS_NAME, avahi.DBUS_PATH_SERVER),
avahi.DBUS_INTERFACE_SERVER)
GROUP = dbus.Interface(BUS.get_object(avahi.DBUS_NAME, SERVER.EntryGroupNew()),
avahi.DBUS_INTERFACE_ENTRY_GROUP)
def __virtual__():
if HAS_PYAVAHI:
return __virtualname__
return False
def validate(config):
'''
Validate the beacon configuration
'''
if not isinstance(config, dict):
return False, ('Configuration for avahi_announcement '
'beacon must be a dictionary')
elif not all(x in list(config.keys()) for x in ('servicetype', 'port', 'txt')):
return False, ('Configuration for avahi_announce beacon '
'must contain servicetype, port and txt items')
return True, 'Valid beacon configuration'
def beacon(config):
'''
Broadcast values via zeroconf
If the announced values are static, it is adviced to set run_once: True
(do not poll) on the beacon configuration. Grains can be used to define
txt values using the syntax: grains.<grain_name>
The default servicename its the hostname grain value.
Example Config
.. code-block:: yaml
beacons:
avahi_announce:
run_once: True
servicetype: _demo._tcp
txt:
ProdName: grains.productname
SerialNo: grains.serialnumber
Comments: 'this is a test'
'''
ret = []
changes = {}
txt = {}
global LAST_GRAINS
_validate = validate(config)
if not _validate[0]:
log.warning('Beacon {0} configuration invalid, '
'not adding. {1}'.format(__virtualname__, _validate[1]))
return ret
if 'servicename' in config:
servicename = config['servicename']
else:
servicename = __grains__['host']
for item in config['txt']:
if config['txt'][item].startswith('grains.'):
grain = config['txt'][item][7:]
txt[item] = __grains__[grain]
if LAST_GRAINS and (LAST_GRAINS[grain] != __grains__[grain]):
changes[str('txt.' + item)] = txt[item]
else:
txt[item] = config['txt'][item]
if not LAST_GRAINS:
changes[str('txt.' + item)] = txt[item]
if changes:
if not LAST_GRAINS:
changes['servicename'] = servicename
changes['servicetype'] = config['servicetype']
changes['port'] = config['port']
else:
GROUP.Reset()
GROUP.AddService(avahi.IF_UNSPEC, avahi.PROTO_UNSPEC, dbus.UInt32(0),
servicename, config['servicetype'], '', '',
dbus.UInt16(config['port']), avahi.dict_to_txt_array(txt))
GROUP.Commit()
ret.append({'tag': 'result', 'changes': changes})
LAST_GRAINS = __grains__
return ret
| Python | 0 | |
9c5de3b667a8e98b0304fb64e30113f551b33404 | Create getTwitterData.py | getTwitterData.py | getTwitterData.py | from tweepy import Stream
from tweepy import OAuthHandler
from tweepy.streaming import StreamListener
import time
ckey = 'dNATh8K9vGwlOSR2phVzaB9fh'
csecret = 'LmBKfyfoZmK1uIu577yFR9jYkVDRC95CXcKZQBZ8jWx9qdS4Vt'
atoken = '2165798475-nuQBGrTDeCgXTOneasqSFZLd3SppqAJDmXNq09V'
asecret = 'FOVzgXM0NJO2lHFydFCiOXCZdkhHlYBkmPNsWbRhLk8xd'
class Listener(StreamListener):
def on_data(self, data):
try:
#print data
tweet = data.split(',"text":"')[1].split('","source')[0]
#print tweet
#saveThis = str(time.time())+'::'+tweet
saveFile = open('twitterData.txt','a')
saveFile.write(tweet)
saveFile.write('\n')
saveFile.close()
except BaseException as e:
print ('failed ondata'), str(e)
time.sleep(5)
def on_error(self, status):
print (status)
#to authorize
auth = OAuthHandler(ckey, csecret)
auth.set_access_token(atoken, asecret)
twitterStream= Stream(auth, Listener())
twitterStream.filter(track=['Apple'])
start_time = time.clock()
while True:
if time.clock() - start_time > 5:
break
twitterStream.disconnect()
| Python | 0.000001 | |
44cee3df6cf5a1ce567df3cec0298c3e5145a95d | Add script query-vector.py, generate vector for query/bidword from word-vector.tsv | query-vector.py | query-vector.py | #!/usr/bin/env python
# -*- coding: utf-8
from sys import stdout
from threading import Thread
from codecs import open
from json import loads
from numpy import array, zeros, dot, sqrt
from hashlib import md5
from argparse import ArgumentParser
from progressbar import ProgressBar, Bar
from relevence_db import *
class DBLoadBidwordKernel(Thread):
def __init__(self, seg_file, word_dict, base_number, serial_number):
self.seg_file = seg_file
self.word_dict = word_dict
self.base_number = base_number
self.serial_number = serial_number
super(DBLoadBidwordKernel, self).__init__()
def run(self):
with open(self.seg_file, "r", encoding = "utf-8") as fp:
counter = 0
iterator = 0
for line in fp:
if iterator % self.base_number == self.serial_number:
iterator += 1
else:
iterator += 1
continue
bidword_str = line.strip()
word_list = bidword_str.split()
if word_list is None or len(word_list) == 0:
continue
word_count = 0
vector = zeros(200, dtype = "float32")
for word in word_list:
if word in self.word_dict:
vector += array(self.word_dict[word], dtype = "float32")
word_count += 1
if word_count > 0:
vector /= word_count
else:
continue
bidword_hash = md5(bidword_str.encode("utf-8")).hexdigest()
stdout.write("%s\t%s\t%s\n" % (bidword_hash, bidword_str.encode("utf-8"), " ".join([str(i) for i in vector])))
stdout.flush()
class DBLoadQueryKernel(Thread):
def __init__(self, seg_file, base_number, serial_number):
self.seg_file = seg_file
self.base_number = base_number
self.serial_number = serial_number
super(DBLoadQueryKernel, self).__init__()
def run(self):
engine, session = connect_database()
print "computing mean_vec ..."
mean_vec = None
word_query = session.query(Word)
word_count = word_query.count()
current_count = 0
progress = ProgressBar(maxval = word_count).start()
for word in word_query:
if mean_vec is None:
mean_vec = zeros(len(loads(word.vector)), dtype = "float32")
mean_vec += loads(word.vector)
current_count += 1
progress.update(current_count)
mean_vec /= word_count
progress.finish()
print "word count", word_query.count()
with open(self.seg_file, "r", encoding = "utf-8") as fp:
counter = 0
iterator = 0
for line in fp:
if iterator % self.base_number == self.serial_number:
iterator += 1
else:
iterator += 1
continue
query_str = line.strip()
word_list = query_str.split()
if word_list is None or len(word_list) == 0:
continue
word_count = 0
vector = zeros(mean_vec.shape, dtype = "float32")
for word in word_list:
word_record = session.query(Word).filter(Word.context == word.encode("utf-8")).first()
if word_record is not None:
vector += loads(word_record.vector)
word_count += 1
if word_count > 0:
vector /= word_count
else:
vector = mean_vec
query = Query(context = query_str.encode("utf-8"), vector = str(vector))
session.add(query)
counter += 1
print self.serial_number, iterator, counter
if (counter % 1000 == 0):
session.commit()
session.commit()
def load_dict(input_file):
with open(input_file, "r", encoding = "utf-8") as fp:
json_dict = loads(fp.read(), encoding = "utf-8")
return json_dict
def db_load_word(json_file):
word_dict = load_dict(json_file)
engine, session = connect_database()
print "db loading word ..."
progress = ProgressBar(maxval = len(word_dict)).start()
counter = 0
for key in word_dict:
word = Word(context = key, vector = str(word_dict[key]))
session.add(word)
counter += 1
progress.update(counter)
session.commit()
progress.finish()
def hive_word_to_word(json_file):
word_dict = load_dict(json_file)
for source_word in word_dict:
source_vector = array(word_dict[source_word], dtype = "float32")
source_hash = md5(source_word.encode("utf-8")).hexdigest()
sim_list = []
for target_word in word_dict:
target_vector = array(word_dict[target_word], dtype = "float32")
sim_score = dot(source_vector, target_vector) / sqrt(dot(source_vector, source_vector)) / sqrt(dot(target_vector, target_vector))
if sim_score > 0.5:
sim_list.append((target_word, sim_score))
sim_list.sort(lambda x, y: cmp(x[1], y[1]), reverse = True)
for i in range(50):
if i < len(sim_list):
print "%s\t%s\t%s\t%f" % (source_hash, source_word.encode("utf-8"), sim_list[i][0].encode("utf-8"), sim_list[i][1])
def hive_word_to_word(json_file):
word_dict = load_dict(json_file)
for word in word_dict:
print "word"
def hive_load_bidword(seg_file, dict_file, thread_number = 1):
thread_list = []
word_dict = load_dict(dict_file)
for i in range(thread_number):
thread_list.append(DBLoadBidwordKernel(seg_file, word_dict, thread_number, i))
for thread in thread_list:
thread.start()
for thread in thread_list:
thread.join()
def hive_load_query(seg_file, thread_number = 2):
thread_list = []
for i in range(thread_number):
thread_list.append(DBLoadQueryKernel(seg_file, thread_number, i))
for thread in thread_list:
thread.start()
for thread in thread_list:
thread.join()
def main():
parser = ArgumentParser()
parser.add_argument("seg_file", help = "segment file one query/bidword per line")
parser.add_argument("dict_file", help = "word2vec file in json format")
args = parser.parse_args()
seg_file = args.seg_file
dict_file = args.dict_file
hive_load_bidword(seg_file, dict_file)
if __name__ == "__main__":
main()
| Python | 0.000103 | |
e84640c5c67759be3de1a934d974c250d7b73a0c | Split kernels into their own name space | scikits/statsmodels/sandbox/kernel.py | scikits/statsmodels/sandbox/kernel.py | # -*- coding: utf-8 -*-
"""
This models contains the Kernels for Kernel smoothing.
Hopefully in the future they may be reused/extended for other kernel based method
"""
class Kernel(object):
"""
Generic 1D Kernel object.
Can be constructed by selecting a standard named Kernel,
or providing a lambda expression and domain.
The domain allows some algorithms to run faster for finite domain kernels.
"""
# MC: Not sure how this will look in the end - or even still exist.
# Main purpose of this is to allow custom kernels and to allow speed up
# from finite support.
def __init__(self, shape, h = 1.0, domain = None):
"""
shape should be a lambda taking and returning numeric type.
For sanity it should always return positive or zero.
"""
self.domain = domain
# TODO: Add checking code that shape is valid
self._shape = shape
self.h = h
def evaluate(self, xs, ys, x):
# TODO: make filtering more efficient
filtered = [(xx,yy) for xx,yy in zip(xs,ys) if (xx-x)/self.h >= self.domain[0] and (xx-x)/self.h <= self.domain[1]]
if len(filtered) > 0:
xs,ys = zip(*filtered)
w = np.sum([self((xx-x)/self.h) for xx in xs])
v = np.sum([yy*self((xx-x)/self.h) for xx, yy in zip(xs,ys)])
return v/w
else:
return 0
def __call__(self, x):
return self._shape(x)
class Gaussian(Kernel):
def __init__(self, h=1.0):
self.h = h
self._shape = lambda x: np.exp(-x**2/2.0)
| Python | 0.999848 | |
54187d401656061b0f17f2b84ab5a114c25e4137 | add a scoring script used in the KDD'12 track2 example. | examples/kdd2012track2/scoreKDD.py | examples/kdd2012track2/scoreKDD.py | """
Scoring Metrics for KDD Cup 2012, Track 2
Reads in a solution/subission files
Scores on the following three metrics:
-NWMAE
-WRMSE
-AUC
Author: Ben Hamner (kdd2012@benhamner.com)
"""
def scoreElementwiseMetric(num_clicks, num_impressions, predicted_ctr, elementwise_metric):
"""
Calculates an elementwise error metric
Parameters
----------
num_clicks : a list containing the number of clicks
num_impressions : a list containing the number of impressions
predicted_ctr : a list containing the predicted click-through rates
elementwise_metric : a function such as MSE that evaluates the error on a single instance, given the clicks, impressions, and p_ctr
Returns
-------
score : the error on the elementwise metric over the set
"""
score = 0.0
weight_sum = 0.0
for clicks, impressions, p_ctr in zip(num_clicks, num_impressions, predicted_ctr):
score += elementwise_metric(clicks, impressions, p_ctr)*impressions
weight_sum += impressions
score = score / weight_sum
return score
def scoreWRMSE(num_clicks, num_impressions, predicted_ctr):
"""
Calculates the Weighted Root Mean Squared Error (WRMSE)
Parameters
----------
num_clicks : a list containing the number of clicks
num_impressions : a list containing the number of impressions
predicted_ctr : a list containing the predicted click-through rates
Returns
-------
wrmse : the weighted root mean squared error
"""
import math
mse = lambda clicks, impressions, p_ctr: math.pow(clicks/impressions-p_ctr,2.0)
wmse = scoreElementwiseMetric(num_clicks, num_impressions, predicted_ctr, mse)
wrmse = math.sqrt(wmse)
return wrmse
def scoreNWMAE(num_clicks, num_impressions, predicted_ctr):
"""
Calculates the normalized weighted mean absolute error
Parameters
----------
num_clicks : a list containing the number of clicks
num_impressions : a list containing the number of impressions
predicted_ctr : a list containing the predicted click-through rates
Returns
-------
nwmae : the normalized weighted mean absolute error
"""
mae = lambda clicks, impressions, p_ctr: abs(clicks/impressions-p_ctr)
nwmae = scoreElementwiseMetric(num_clicks, num_impressions, predicted_ctr, mae)
return nwmae
def scoreClickAUC(num_clicks, num_impressions, predicted_ctr):
"""
Calculates the area under the ROC curve (AUC) for click rates
Parameters
----------
num_clicks : a list containing the number of clicks
num_impressions : a list containing the number of impressions
predicted_ctr : a list containing the predicted click-through rates
Returns
-------
auc : the area under the ROC curve (AUC) for click rates
"""
i_sorted = sorted(range(len(predicted_ctr)),key=lambda i: predicted_ctr[i],
reverse=True)
auc_temp = 0.0
click_sum = 0.0
old_click_sum = 0.0
no_click = 0.0
no_click_sum = 0.0
# treat all instances with the same predicted_ctr as coming from the
# same bucket
last_ctr = predicted_ctr[i_sorted[0]] + 1.0
#last_ctr = float("nan")
for i in range(len(predicted_ctr)):
if last_ctr != predicted_ctr[i_sorted[i]]:
auc_temp += (click_sum+old_click_sum) * no_click / 2.0
old_click_sum = click_sum
no_click = 0.0
last_ctr = predicted_ctr[i_sorted[i]]
no_click += num_impressions[i_sorted[i]] - num_clicks[i_sorted[i]]
no_click_sum += num_impressions[i_sorted[i]] - num_clicks[i_sorted[i]]
click_sum += num_clicks[i_sorted[i]]
auc_temp += (click_sum+old_click_sum) * no_click / 2.0
auc = auc_temp / (click_sum * no_click_sum)
return auc
def read_solution_file(f_sol_name):
"""
Reads in a solution file
Parameters
----------
f_sol_name : submission file name
Returns
-------
num_clicks : a list of clicks
num_impressions : a list of impressions
"""
f_sol = open(f_sol_name)
num_clicks = []
num_impressions = []
i = 0
for line in f_sol:
line = line.strip().split(",")
try:
clicks = float(line[0])
impressions = float(line[1])
except ValueError as e:
# skip over header
if(i!=0):
print("parse error at line: %d" % i)
print(e)
continue
num_clicks.append(clicks)
num_impressions.append(impressions)
i += 1
print("submission length=%d" % i)
return (num_clicks, num_impressions)
def read_submission_file(f_sub_name):
"""
Reads in a submission file
Parameters
----------
f_sub_name : submission file name
Returns
-------
predicted_ctr : a list of predicted click-through rates
"""
f_sub = open(f_sub_name)
predicted_ctr = []
for line in f_sub:
line = line.strip().split(",")
predicted_ctr.append(float(line[0]))
#predicted_ctr.append(float(line))
return predicted_ctr
def main():
import sys
if len(sys.argv) != 3:
print("Usage: python scoreKDD.py solution_file.csv submission_file.csv")
sys.exit(2)
num_clicks, num_impressions = read_solution_file(sys.argv[1])
predicted_ctr = read_submission_file(sys.argv[2])
print("num_clicks : %d" % len(num_clicks))
print("num_impressions : %d" % len(num_impressions))
print("num_predicted_ctrs: %d" % len(predicted_ctr))
auc = scoreClickAUC(num_clicks, num_impressions, predicted_ctr)
print("AUC : %f" % auc)
nwmae = scoreNWMAE(num_clicks, num_impressions, predicted_ctr)
print("NWMAE: %f" % nwmae)
wrmse = scoreWRMSE(num_clicks, num_impressions, predicted_ctr)
print("WRMSE: %f" % wrmse)
if __name__=="__main__":
main()
| Python | 0 | |
632056eef0666808d16740f434a305d0c8995132 | Create magooshScraper.py | magooshScraper.py | magooshScraper.py | import scrapy
from bs4 import BeautifulSoup
class magooshSpider(scrapy.Spider):
name = 'magoosh'
start_urls = ['http://gre.magoosh.com/login']
def parse(self, response):
return scrapy.FormRequest.from_response(
response,
'''
Replace the fake text below with your own registered
email and password on http://gre.magoosh.com:
'''
formdata={'session[login]': 'abc@xyz.com', 'session[password]': 'somepassword'},
callback=self.after_login
)
def after_login(self, response):
if 'Dashboard' in response.body:
self.logger.info('Logged in successfully!')
return scrapy.Request('http://gre.magoosh.com/lessons',
callback=self.lessonsPage_loaded)
def lessonsPage_loaded(self, response):
self.logger.info('Lessons page opened.')
soup = BeautifulSoup(response.body)
for categ in soup.find_all('h2'):
# Set the Subject name to crawl
# In this example, Maths section is scraped.
if 'Math' in categ:
self.logger.info('Math section found.')
cgparent = categ.parent.parent
for vu in cgparent.find_all('a'):
link = str(vu.get('href'))
if '/lessons/' in link:
s = 'http://gre.magoosh.com' + str(link) + "\n"
req = scrapy.Request(s, callback=self.videoPage_loaded)
yield req
return
def videoPage_loaded(self, response):
self.logger.info('Fetching video...')
soup = BeautifulSoup(response.body)
for div in soup.find_all('div'):
if div.get('data-file'):
vl = div.get('data-file')
f = open('scrapedVideoLinks.txt', 'a')
f.write(str(vl) + '\n')
f.close()
| Python | 0.000001 | |
dbfc033fdfaad5820765a41766a5342831f3c4f9 | add util script to dump twitter oauth tokens | scripts/remove_twuser_oauth.py | scripts/remove_twuser_oauth.py | """Remove a twitter user's oauth tokens and reload iembot"""
from __future__ import print_function
import json
import sys
import psycopg2
import requests
def main(argv):
"""Run for a given username"""
screen_name = argv[1]
settings = json.load(open("../settings.json"))
pgconn = psycopg2.connect(database=settings['databaserw']['openfire'],
user=settings['databaserw']['user'],
host=settings['databaserw']['host'])
cursor = pgconn.cursor()
cursor.execute("""
DELETE from iembot_twitter_oauth where screen_name = %s
""", (screen_name, ))
print(("Removed %s entries from the database for screen name '%s'"
) % (cursor.rowcount, screen_name))
cursor.close()
pgconn.commit()
uri = "http://iembot:9003/reload"
req = requests.get(uri, timeout=30)
print("reloading iembot %s" % (repr(req.content), ))
if __name__ == '__main__':
main(sys.argv)
| Python | 0 | |
77aa24bbea447d8684614f0d089320d134412710 | Test ini-configured app. | test_app.py | test_app.py | from flask import Flask
from flask.ext.iniconfig import INIConfig
app = Flask(__name__)
INIConfig(app)
with app.app_context():
app.config.from_inifile('settings.ini')
| Python | 0 | |
d1df2b573c515d3ea18ce46ccc58c8bc9e788915 | Clean commit | src/pymatgen_pars.py | src/pymatgen_pars.py | import pymatgen as mg
from pymatgen.matproj.rest import MPRester
import pandas as pd
from pymatgen import Element,Composition
import multiprocessing as mp
import pickle
import json
from monty.json import MontyEncoder,MontyDecoder
import numpy as np
def ret_struct_obj(i):
return mg.Structure.from_str(i,fmt="cif")
def return_struct_list():
with open("ternaries_from_mg.pickle",'r') as f:
temp_list=pickle.load(f)
p=mp.Pool(4)
struct_lis=p.map(ret_struct_obj,temp_list)
return struct_lis
def read_ternaries():
with MPRester() as m:
ternaries1 = m.query(criteria={"nelements": 3}, properties=['icsd_ids', 'pretty_formula', 'cif'])
list_cif = [i['cif'] for i in ternaries1]
outfile=open("ternaries_from_mg.pickle",'w')
pickle.dump(list_cif,outfile)
del(list_cif)
outfile.close()
def read_unique_data(filename):
with open(filename,'r') as f:
structs=json.load(f,cls=MontyDecoder)
return structs
def get_space_groups(strts):
sgroups=np.array([a.get_spacegroup_info()[0] for a in strts])
return sgroups
def read_data(filename):
"""
:argument
filename - The filename of the csv file to read from
:returns
DataFrame - Pandas Dataframe containing the formatted parsed data
"""
uniq_data=read_unique_data(filename)
space_groups=get_space_groups(uniq_data)
(comps,stoich_coeffs,at_nos,eneg)=get_comp_data(uniq_data)
DataFrame = pd.DataFrame({"Z1": at_nos[:, 0]})
DataFrame["Z2"] = at_nos[:, 1]
DataFrame["Z3"] = at_nos[:, 2]
DataFrame["St_coeff1"] = stoich_coeffs[:, 0]
DataFrame["St_coeff2"] = stoich_coeffs[:, 1]
DataFrame["St_coeff3"] = stoich_coeffs[:, 2]
DataFrame["Eneg1"] = eneg[:, 0]
DataFrame["Eneg2"] = eneg[:, 1]
DataFrame["Eneg3"] = eneg[:, 2]
DataFrame["Space Group"] = space_groups
DataFrame["Composition"] = comps
return DataFrame
def get_comp_data(un_data):
element_universe = [str(e) for e in Element]
dict_element = {}
for i, j in enumerate(element_universe):
dict_element[str(j)] = i
stoich_array = np.zeros((len(un_data), 3), dtype=float)
at_num_array = np.zeros((len(un_data), 3), dtype=int)
electroneg_array = np.zeros((len(un_data), 3), dtype=float)
comp_array=[a.composition for a in un_data]
temp_dict_list = [dict(comp.get_el_amt_dict()) for comp in comp_array]
for index,temp_dict in enumerate(temp_dict_list):
for count, key in enumerate(temp_dict.keys()):
stoich_array[index][count] = temp_dict[key]
if key not in ['D', 'T']:
at_num_array[index][count] = Element(key).Z
electroneg_array[index][count] = Element(key).X
else:
at_num_array[index][count] = Element('H').Z
electroneg_array[index][count] = Element('H').X
del(dict_element)
del(temp_dict_list)
return (comp_array,stoich_array,at_num_array,electroneg_array)
#if __name__=="__main__":
| Python | 0 | |
74bde8878aa9b336046374ce75fc4c7bc63eaba7 | add test for VampSimpleHost | tests/test_vamp_simple_host.py | tests/test_vamp_simple_host.py | #! /usr/bin/env python
from unit_timeside import unittest, TestRunner
from timeside.decoder.file import FileDecoder
from timeside.core import get_processor
from timeside import _WITH_VAMP
from timeside.tools.test_samples import samples
@unittest.skipIf(not _WITH_VAMP, 'vamp-simple-host library is not available')
class TestVampsimpleHost(unittest.TestCase):
def setUp(self):
self.analyzer = get_processor('vamp_simple_host')()
def testOnC4_scale(self):
"runs on C4_scale"
self.source = samples["C4_scale.wav"]
def tearDown(self):
decoder = FileDecoder(self.source)
(decoder | self.analyzer).run()
results = self.analyzer.results
print results.keys()
#print results
#print results.to_yaml()
#print results.to_json()
#print results.to_xml()
if __name__ == '__main__':
unittest.main(testRunner=TestRunner())
| Python | 0.000001 | |
0d596f8c7148c2ac13c2b64be09ca1e20719cdb9 | add dumper of flowpaths to shapefile | scripts/util/dump_flowpaths.py | scripts/util/dump_flowpaths.py | """Dump flowpaths to a shapefile."""
from geopandas import read_postgis
from pyiem.util import get_dbconn
def main():
"""Go Main Go."""
pgconn = get_dbconn('idep')
df = read_postgis("""
SELECT f.fpath, f.huc_12, ST_Transform(f.geom, 4326) as geo from
flowpaths f, huc12 h WHERE h.scenario = 0 and f.scenario = 0
and h.huc_12 = f.huc_12 and h.states ~* 'IA'
""", pgconn, index_col=None, geom_col='geo')
df.to_file("ia_flowpaths.shp")
if __name__ == '__main__':
main()
| Python | 0 | |
345af55938baef0da1f0793d8a109fcee63692dd | Add files via upload | tokenize.py | tokenize.py | #!/usr/bin/python
#-*-coding:utf-8 -*-
# author: mld
# email: miradel51@126.com
# date : 2017/9/28
import sys
import string
import re
def tokenizestr(original_str):
after_tok = ""
#in order to encoding type, I only do like this and only use replace some special tokens without re.sub
#sym = "[$%#@~&*;].,!^(){><\?}-:=-+"
#original_str = re.sub(sym," "+sym,original_str)
original_str = original_str.replace("[", " [")
original_str = original_str.replace('!', " !")
original_str = original_str.replace("%", " %")
original_str = original_str.replace("#", " #")
original_str = original_str.replace("@", " @")
original_str = original_str.replace("~", "~ ")
original_str = original_str.replace("&", " &")
original_str = original_str.replace("*", " *")
original_str = original_str.replace(".", " .")
original_str = original_str.replace(";", " ;")
original_str = original_str.replace(",", " ,")
original_str = original_str.replace("^", " ^")
original_str = original_str.replace("(", " (")
original_str = original_str.replace(")", " )")
original_str = original_str.replace("{", " {")
original_str = original_str.replace(">", " >")
original_str = original_str.replace("?", " ?")
original_str = original_str.replace("}", " }")
original_str = original_str.replace("-", " -")
original_str = original_str.replace(":", " :")
original_str = original_str.replace("=", " =")
original_str = original_str.replace("+", " +")
after_tok = original_str
return after_tok
if __name__ == '__main__':
ori_ = sys.argv[1]
tok_ = sys.argv[2]
ori_file = open(ori_,"r")
tok_file = open(tok_,"w")
context = ""
for eachline in ori_file:
context = eachline.strip()
#need to tokenization (just separate symboles from words in current line)
context = tokenizestr(context)
tok_file.write(context)
tok_file.write("\n")
ori_file.close()
tok_file.close() | Python | 0 | |
f7768b10df84a4b3bb784ee1d449e380b93d88bb | add a simple scan example | data/scan_example.py | data/scan_example.py | import numpy
import theano
from theano import tensor
# some numbers
n_steps = 10
n_samples = 5
dim = 10
input_dim = 20
output_dim = 2
# one step function that will be used by scan
def oneStep(x_t, h_tm1, W_x, W_h, W_o):
h_t = tensor.tanh(tensor.dot(x_t, W_x) +
tensor.dot(h_tm1, W_h))
o_t = tensor.dot(h_t, W_o)
return h_t, o_t
# spawn theano tensor variable, our symbolic input
# a 3D tensor (n_steps, n_samples, dim)
x = tensor.tensor3(dtype='float32')
# initial state of our rnn
init_state = tensor.alloc(0., n_samples, dim)
# create parameters that we will use,
# note that, parameters are theano shared variables
# parameters for input to hidden states
W_x_ = numpy.random.randn(input_dim, dim).astype('float32')
W_x = theano.shared(W_x_)
# parameters for hidden state transition
W_h_ = numpy.random.randn(dim, dim).astype('float32')
W_h = theano.shared(W_h_)
# parameters from hidden state to output
W_o_ = numpy.random.randn(dim, output_dim).astype('float32')
W_o = theano.shared(W_o_)
# scan function
([h_vals, o_vals], updates) = theano.scan(
fn=oneStep,
sequences=[x],
outputs_info=[init_state, None],
non_sequences=[W_x, W_h, W_o],
n_steps=n_steps,
strict=True)
# let us now compile a function to get the output
f = theano.function([x], [h_vals, o_vals])
# now we will call the compiled function with actual input
actual_input = numpy.random.randn(
n_steps, n_samples, input_dim).astype('float32')
h_vals_, o_vals_ = f(actual_input)
# print the shapes
print 'shape of input :', actual_input.shape
print 'shape of h_vals:', h_vals_.shape
print 'shape of o_vals:', o_vals_.shape
| Python | 0.000001 | |
9737f8b1551adb5d3be62b1922de27d867ac2b24 | Add forwarding script for build-bisect.py. | build/build-bisect.py | build/build-bisect.py | #!/usr/bin/python
# Copyright (c) 2010 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
print "This script has been moved to tools/bisect-builds.py."
print "Please update any docs you're working from!"
sys.exit(1)
| Python | 0.000002 | |
4c2663939008285c395ee5959c38fab280f43e58 | Create 03.PracticeCharsAndStrings.py | TechnologiesFundamentals/ProgrammingFundamentals/DataTypesAndVariables-Exercises/03.PracticeCharsAndStrings.py | TechnologiesFundamentals/ProgrammingFundamentals/DataTypesAndVariables-Exercises/03.PracticeCharsAndStrings.py | print(input())
print(input())
print(input())
print(input())
print(input())
| Python | 0 | |
782e4da9d04c656b3e5290269a4f06328ee5d508 | add file | main.py | main.py | import numpy as np
@np.vectorize
def F(n):
return 1./np.sqrt(5.)*(((1.+np.sqrt(5))/2.)**n-((1.-np.sqrt(5))/2.)**n)
n = np.arange(10)
F = F(n)
np.savetxt("F.txt", F)
| Python | 0.000002 | |
ea0f02d6be95d0d8ef3081d49743702605467b51 | Add toy box | royalword.py | royalword.py | # Public: Toybox verison of W3 program.
#
# w3_dict - dictionary of interesting words.
# w3_defs - dictionary of definitions.
#
# Keys for both dicts are matching integers for word/defintion.
#
# word_display() - Displays a randomly generated word and its definition.
# learning_listed - Removes displayed word from main list.
# Returns random sets of a word from the list and it's definition.
##########################################################################
# DICTIONARIES
w3_dict = {
1: 'amorous',
2: 'amorphous',
3: 'antithesis',
4: 'apostate',
5: 'apotheosis',
6: 'belligerent',
7: 'beneficent',
8: 'bromide',
9: 'callipygian',
10: 'censorious',
11: 'cistern',
12: 'codicil',
13: 'cognizant',
14: 'cognomen',
15: 'concise',
16: 'corollary',
17: 'debonair'
}
w3_defs = {
1: '1: inclined toward or displaying love 2: expressive of or exciting sexual love or romance',
2: 'formless, shapeless, having no definite form or distinct shape',
3: '1: exact opposite 2: the juxtaposition of contrasting words or ideas to give a feeling of balance',
4: '1: a disloyal person who betrays or deserts his cause or religion or political party or friend etc. 2: not faithful to religion or party or cause',
5: '1: model of excellence or perfection of a kind; one having no equal 2: the elevation of a person ie. as to the status of a god',
6: 'characteristic of an enemy or one eager to fight',
7: 'doing or producing good',
8: '1: any of the salts of hydrobromic acid; formerly used as a sedative but now generally replaced by safer drugs 2: a trite or obvious remark',
9: 'pertaining to or having finely developed buttocks',
10: 'harshly critical or expressing censure',
11: 'an artificial reservoir for storing liquids; especially an underground tank for storing rainwater',
12: 'a supplement to a will; a testamentary instrument intended to alter an already executed will',
13: 'having or showing knowledge or understanding or realization or perception',
14: 'a familiar name for a person',
15: 'expressing much in few words',
16: '1: a practical consequence that follows naturally 2: an inference that follows directly from the proof of another proposition',
17: '1: having a sophisticated charm 2: having a cheerful, lively, and self-confident air'
}
##########################################################################
from random import randint
##########################################################################
# INTIALIZING VARIABLES
n = len(w3_dict)
choice = ''
# Words to be re-displayed at algorithmic intervals until learnt
# * currently useless haha *
learning_list = {}
learning_list_defs = {}
# Words no longer needing to be displayed
# * currently even more useless ;_; *
learnt_list = {}
learnt_list_defs = {}
##########################################################################
# MAKING METHODS
def word_display():
word = randint(1,n)
print w3_dict[word]
print w3_defs[word]
learning_listed(word)
def learning_listed(word):
new_word = w3_dict.pop(word)
new_def = w3_defs.pop(word)
learning_list.update({word: new_word})
learning_list_defs.update({word: new_def})
def learning_time():
"""Outputs words from learning_list or returns false"""
##########################################################################
# MAIN PROGRAM
print("W3")
print('Press E to Exit or any key to reload.\n')
if w3_dict:
while choice != 'E':
try:
word_display()
choice = raw_input('\n ')
print(' ')
except KeyError:
# Random integer is removed key
print('Fetching...\n')
else:
# Main dictionary is emptied!
print('All done!')
| Python | 0.999799 | |
60f13bdfb97e83ac1bf2f72e3eec2e2c2b88cbb3 | add tests for potential density computation | biff/tests/test_bfe.py | biff/tests/test_bfe.py | # coding: utf-8
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Third-party
import astropy.units as u
from astropy.constants import G as _G
G = _G.decompose([u.kpc,u.Myr,u.Msun]).value
import numpy as np
# Project
from .._bfe import density
# Check that we get A000=1. for putting in hernquist density
def hernquist_density(xyz, M, r_s):
xyz = np.atleast_2d(xyz)
r = np.sqrt(np.sum(xyz**2, axis=-1))
return M/(2*np.pi) * r_s / (r * (r+r_s)**3)
def hernquist_potential(xyz, M, r_s):
xyz = np.atleast_2d(xyz)
r = np.sqrt(np.sum(xyz**2, axis=-1))
return -G*M / (r + r_s)
def test_hernquist():
nmax = 6
lmax = 2
Anlm = np.zeros((nmax+1,lmax+1,lmax+1))
Anlm[0,0,0] = 1.
M = 1E10
r_s = 3.5
nbins = 128
rr = np.linspace(0.1,10.,nbins)
xyz = np.zeros((nbins,3))
xyz[:,0] = rr
bfe_dens = density(xyz, M, r_s, Anlm, nmax, lmax)
true_dens = hernquist_density(xyz, M, r_s)
np.testing.assert_allclose(bfe_dens, true_dens)
| Python | 0.000001 | |
1e808aa70882cd30cd0ac7a567d12efde99b5e61 | Create runserver.py | runserver.py | runserver.py | from ucwa.http import app
app.run(debug=True)
| Python | 0.000002 | |
98eb8c1bb013106108e239c7bc8b6961a2f321cd | Allow debug mode from the CLI | blaze/server/spider.py | blaze/server/spider.py | #!/usr/bin/env python
from __future__ import absolute_import
import os
import sys
import argparse
import yaml
from odo import resource
from odo.utils import ignoring
from .server import Server, DEFAULT_PORT
__all__ = 'spider', 'from_yaml'
def _spider(resource_path, ignore, followlinks, hidden):
resources = {}
for filename in (os.path.join(resource_path, x)
for x in os.listdir(resource_path)):
basename = os.path.basename(filename)
if (basename.startswith(os.curdir) and not hidden or
os.path.islink(filename) and not followlinks):
continue
if os.path.isdir(filename):
new_resources = _spider(filename, ignore=ignore,
followlinks=followlinks, hidden=hidden)
if new_resources:
resources[basename] = new_resources
else:
with ignoring(*ignore):
resources[basename] = resource(filename)
return resources
def spider(path, ignore=(ValueError, NotImplementedError), followlinks=True,
hidden=False):
"""Traverse a directory and call ``odo.resource`` on its contentso
Parameters
----------
path : str
Path to a directory of resources to load
ignore : tuple of Exception, optional
Ignore these exceptions when calling resource
followlinks : bool, optional
Follow symbolic links
hidden : bool, optional
Load hidden files
Returns
-------
dict
Possibly nested dictionary of containing basenames mapping to resources
"""
return {
os.path.basename(path): _spider(path, ignore=ignore,
followlinks=followlinks,
hidden=hidden)
}
def from_yaml(path, ignore=(ValueError, NotImplementedError), followlinks=True,
hidden=False):
"""Construct a dictionary of resources from a YAML specification.
Parameters
----------
path : str
Path to a YAML specification of resources to load
ignore : tuple of Exception, optional
Ignore these exceptions when calling resource
followlinks : bool, optional
Follow symbolic links
hidden : bool, optional
Load hidden files
Returns
-------
dict
A dictionary mapping top level keys in a YAML file to resources.
See Also
--------
spider : Traverse a directory tree for resources
"""
resources = {}
for name, info in yaml.load(path.read()).items():
if 'source' not in info:
raise ValueError('source key not found for data source named %r' %
name)
source = info['source']
if os.path.isdir(source):
resources[name] = spider(os.path.expanduser(source),
ignore=ignore,
followlinks=followlinks,
hidden=hidden)
else:
resources[name] = resource(source, dshape=info.get('dshape'))
return resources
def _parse_args():
p = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
p.add_argument('path', type=argparse.FileType('r'), nargs='?',
default=sys.stdin,
help='A YAML file specifying the resources to load')
p.add_argument('-p', '--port', type=int, default=DEFAULT_PORT,
help='Port number')
p.add_argument('-H', '--host', type=str, default='127.0.0.1',
help='Host name. Use 0.0.0.0 to listen on all public IPs')
p.add_argument('-l', '--follow-links', action='store_true',
help='Follow links when listing files')
p.add_argument('-e', '--ignored-exception', nargs='*',
default=['Exception'],
help='Exceptions to ignore when calling resource on a file')
p.add_argument('-d', '--hidden', action='store_true',
help='Call resource on hidden files')
p.add_argument('-D', '--debug', action='store_true',
help='Start the Flask server in debug mode')
return p.parse_args()
def _main():
args = _parse_args()
ignore = tuple(getattr(__builtins__, e) for e in args.ignored_exception)
resources = from_yaml(args.path,
ignore=ignore,
followlinks=args.follow_links,
hidden=args.hidden)
Server(resources).run(host=args.host, port=args.port, debug=args.debug)
if __name__ == '__main__':
_main()
| #!/usr/bin/env python
from __future__ import absolute_import
import os
import sys
import argparse
import yaml
from odo import resource
from odo.utils import ignoring
from .server import Server, DEFAULT_PORT
__all__ = 'spider', 'from_yaml'
def _spider(resource_path, ignore, followlinks, hidden):
resources = {}
for filename in (os.path.join(resource_path, x)
for x in os.listdir(resource_path)):
basename = os.path.basename(filename)
if (basename.startswith(os.curdir) and not hidden or
os.path.islink(filename) and not followlinks):
continue
if os.path.isdir(filename):
new_resources = _spider(filename, ignore=ignore,
followlinks=followlinks, hidden=hidden)
if new_resources:
resources[basename] = new_resources
else:
with ignoring(*ignore):
resources[basename] = resource(filename)
return resources
def spider(path, ignore=(ValueError, NotImplementedError), followlinks=True,
hidden=False):
"""Traverse a directory and call ``odo.resource`` on its contentso
Parameters
----------
path : str
Path to a directory of resources to load
ignore : tuple of Exception, optional
Ignore these exceptions when calling resource
followlinks : bool, optional
Follow symbolic links
hidden : bool, optional
Load hidden files
Returns
-------
dict
Possibly nested dictionary of containing basenames mapping to resources
"""
return {
os.path.basename(path): _spider(path, ignore=ignore,
followlinks=followlinks,
hidden=hidden)
}
def from_yaml(path, ignore=(ValueError, NotImplementedError), followlinks=True,
hidden=False):
"""Construct a dictionary of resources from a YAML specification.
Parameters
----------
path : str
Path to a YAML specification of resources to load
ignore : tuple of Exception, optional
Ignore these exceptions when calling resource
followlinks : bool, optional
Follow symbolic links
hidden : bool, optional
Load hidden files
Returns
-------
dict
A dictionary mapping top level keys in a YAML file to resources.
See Also
--------
spider : Traverse a directory tree for resources
"""
resources = {}
for name, info in yaml.load(path.read()).items():
if 'source' not in info:
raise ValueError('source key not found for data source named %r' %
name)
source = info['source']
if os.path.isdir(source):
resources[name] = spider(os.path.expanduser(source),
ignore=ignore,
followlinks=followlinks,
hidden=hidden)
else:
resources[name] = resource(source, dshape=info.get('dshape'))
return resources
def _parse_args():
p = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
p.add_argument('path', type=argparse.FileType('r'), nargs='?',
default=sys.stdin,
help='A YAML file specifying the resources to load')
p.add_argument('-p', '--port', type=int, default=DEFAULT_PORT,
help='Port number')
p.add_argument('-H', '--host', type=str, default='127.0.0.1',
help='Host name. Use 0.0.0.0 to listen on all public IPs')
p.add_argument('-l', '--follow-links', action='store_true',
help='Follow links when listing files')
p.add_argument('-e', '--ignored-exception', nargs='*',
default=['Exception'],
help='Exceptions to ignore when calling resource on a file')
p.add_argument('-d', '--hidden', action='store_true',
help='Call resource on hidden files')
return p.parse_args()
def _main():
args = _parse_args()
ignore = tuple(getattr(__builtins__, e) for e in args.ignored_exception)
resources = from_yaml(args.path,
ignore=ignore,
followlinks=args.follow_links,
hidden=args.hidden)
Server(resources).run(host=args.host, port=args.port)
if __name__ == '__main__':
_main()
| Python | 0.000001 |
f737a8be41111f65944b00eb85a76687653fc8c0 | Create sort_fpkm.py | sort_fpkm.py | sort_fpkm.py | import os
import fnmatch
import sys, csv ,operator
for root, dirnames, filenames in os.walk('/Users/idriver/RockLab-files/test'):
for filename in fnmatch.filter(filenames, '*.fpkm_tracking'):
if filename =='isoforms.fpkm_tracking':
data = csv.reader(open(os.path.join(root, filename), 'rU'),delimiter='\t')
header = next(data, None) # returns the headers or `None` if the input is empty
sortedlist = sorted(data, key=operator.itemgetter(0))
#now write the sorte result into new CSV file
with open(root+'/'+root.split('/')[-1]+'_isoforms.fpkm_tracking', "wb") as f:
fileWriter = csv.writer(f, delimiter='\t')
fileWriter.writerow(header)
for row in sortedlist:
fileWriter.writerow(row)
| Python | 0 | |
d42aad6a15dfe9cc5a63dbb19efe112534b91a5e | Add autoexec script for reference (already bundled in config) | resources/autoexec.py | resources/autoexec.py | # place at ~/.kodi/userdata/autoexec.py
import xbmc
import time
xbmc.executebuiltin("XBMC.ReplaceWindow(1234)")
time.sleep(0.1)
xbmc.executebuiltin('PlayMedia("/storage/videos/SSL","isdir")')
xbmc.executebuiltin('xbmc.PlayerControl(repeatall)')
xbmc.executebuiltin("Action(Fullscreen)")
| Python | 0 | |
159ed7dd9dd5ade6c4310d2aa106b13bf94aa903 | Add empty cloner | stoneridge_cloner.py | stoneridge_cloner.py | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
# TODO - This will run on the central server, and download releases from ftp.m.o
# to a local directory for serving up to the clients, which will download the
# necessary stuff via stoneridge_downloader.py
| Python | 0.000007 | |
9c3682ec717fd4de5555874ad3665c6f7be479b8 | improve ecom | netforce_sale/netforce_sale/models/payment_method.py | netforce_sale/netforce_sale/models/payment_method.py | from netforce.model import Model,fields,get_model
from netforce import database
from netforce.logger import audit_log
class PaymentMethod(Model):
_inherit="payment.method"
def payment_received(self,context={}):
res=super().payment_received(context=context)
if res:
return res
transaction_no=context.get("transaction_no")
audit_log("Payment received: transaction_no=%s"%transaction_no)
pay_type=context.get("type")
res=get_model("sale.order").search([["transaction_no","=",transaction_no]])
if not res:
print("Sales order not found for transaction_no=%s"%transaction_no)
return
sale_id=res[0]
print("Found sales order %d for transaction_no=%s"%(sale_id,transaction_no))
sale=get_model("sale.order").browse(sale_id)
if not sale.is_paid:
method=sale.pay_method_id
if not method:
raise Exception("Missing sales order payment method")
if method.type!=pay_type:
raise Exception("Received sales order payment with wrong method (pmt: %s, sale: %s)"%(pay_type,method.type))
audit_log("Creating payment for sales order %s: transaction_no=%s"%(sale.number,transaction_no))
sale.payment_received(context=context)
settings=get_model("ecom2.settings").browse(1) # XXX: change this
if settings.ecom_return_url:
url=settings.ecom_return_url+str(sale_id)
else:
url="/ui#name=sale&mode=form&active_id=%d"%sale_id
return {
"next_url": url,
}
def payment_pending(self,context={}):
res=super().payment_pending(context=context)
if res:
return res
transaction_no=context.get("transaction_no")
res=get_model("sale.order").search([["transaction_no","=",transaction_no]])
if not res:
return
sale_id=res[0]
settings=get_model("ecom2.settings").browse(1) # XXX: change this
if settings.ecom_return_url:
url=settings.ecom_return_url+str(sale_id)
else:
url="/ui#name=sale&mode=form&active_id=%d"%sale_id
return {
"next_url": url,
}
def payment_error(self,context={}):
res=super().payment_error(context=context)
if res:
return res
transaction_no=context.get("transaction_no")
res=get_model("sale.order").search([["transaction_no","=",transaction_no]])
if not res:
return
sale_id=res[0]
settings=get_model("ecom2.settings").browse(1)
if settings.ecom_return_url:
url=settings.ecom_return_url+str(sale_id)
else:
url="/ui#name=sale&mode=form&active_id=%d"%sale_id
return {
"next_url": url,
}
PaymentMethod.register()
| from netforce.model import Model,fields,get_model
from netforce import database
from netforce.logger import audit_log
class PaymentMethod(Model):
_inherit="payment.method"
def payment_received(self,context={}):
res=super().payment_received(context=context)
if res:
return res
transaction_no=context.get("transaction_no")
audit_log("Payment received: transaction_no=%s"%transaction_no)
amount=context.get("amount")
currency_id=context.get("currency_id")
pay_type=context.get("type")
res=get_model("sale.order").search([["transaction_no","=",transaction_no]])
if not res:
print("Sales order not found for transaction_no=%s"%transaction_no)
return
sale_id=res[0]
print("Found sales order %d for transaction_no=%s"%(sale_id,transaction_no))
sale=get_model("sale.order").browse(sale_id)
if not sale.is_paid:
if currency_id and currency_id!=sale.currency_id.id:
raise Exception("Received sales order payment in wrong currency (pmt: %s, sale: %s)"%(currency_id,sale.currency_id.id))
method=sale.pay_method_id
if not method:
raise Exception("Missing sales order payment method")
if method.type!=pay_type:
raise Exception("Received sales order payment with wrong method (pmt: %s, sale: %s)"%(pay_type,method.type))
audit_log("Creating payment for sales order %s: transaction_no=%s"%(sale.number,transaction_no))
sale.payment_received()
settings=get_model("ecom2.settings").browse(1) # XXX: change this
if settings.ecom_return_url:
url=settings.ecom_return_url+str(sale_id)
else:
url="/ui#name=sale&mode=form&active_id=%d"%sale_id
return {
"next_url": url,
}
def payment_pending(self,context={}):
res=super().payment_pending(context=context)
if res:
return res
transaction_no=context.get("transaction_no")
res=get_model("sale.order").search([["transaction_no","=",transaction_no]])
if not res:
return
sale_id=res[0]
settings=get_model("ecom2.settings").browse(1) # XXX: change this
if settings.ecom_return_url:
url=settings.ecom_return_url+str(sale_id)
else:
url="/ui#name=sale&mode=form&active_id=%d"%sale_id
return {
"next_url": url,
}
def payment_error(self,context={}):
res=super().payment_error(context=context)
if res:
return res
transaction_no=context.get("transaction_no")
res=get_model("sale.order").search([["transaction_no","=",transaction_no]])
if not res:
return
sale_id=res[0]
settings=get_model("ecom2.settings").browse(1)
if settings.ecom_return_url:
url=settings.ecom_return_url+str(sale_id)
else:
url="/ui#name=sale&mode=form&active_id=%d"%sale_id
return {
"next_url": url,
}
PaymentMethod.register()
| Python | 0.000035 |
3f6a08d92f46c606e99c14eb12849e1386704cf3 | Bump version to 0.6 | oscar/__init__.py | oscar/__init__.py | import os
# Use 'final' as the 4th element to indicate
# a full release
VERSION = (0, 6, 0, 'final', 0)
def get_short_version():
return '%s.%s' % (VERSION[0], VERSION[1])
def get_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
# Append 3rd digit if > 0
version = '%s.%s' % (version, VERSION[2])
if VERSION[3:] == ('alpha', 0):
version = '%s pre-alpha' % version
elif VERSION[3] != 'final':
version = '%s %s %s' % (version, VERSION[3], VERSION[4])
return version
# Cheeky setting that allows each template to be accessible by two paths.
# Eg: the template 'oscar/templates/oscar/base.html' can be accessed via both
# 'base.html' and 'oscar/base.html'. This allows Oscar's templates to be
# extended by templates with the same filename
OSCAR_MAIN_TEMPLATE_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'templates/oscar')
OSCAR_CORE_APPS = [
'oscar',
'oscar.apps.analytics',
'oscar.apps.checkout',
'oscar.apps.address',
'oscar.apps.shipping',
'oscar.apps.catalogue',
'oscar.apps.catalogue.reviews',
'oscar.apps.partner',
'oscar.apps.basket',
'oscar.apps.payment',
'oscar.apps.offer',
'oscar.apps.order',
'oscar.apps.customer',
'oscar.apps.promotions',
'oscar.apps.search',
'oscar.apps.voucher',
'oscar.apps.wishlists',
'oscar.apps.dashboard',
'oscar.apps.dashboard.reports',
'oscar.apps.dashboard.users',
'oscar.apps.dashboard.orders',
'oscar.apps.dashboard.promotions',
'oscar.apps.dashboard.catalogue',
'oscar.apps.dashboard.offers',
'oscar.apps.dashboard.partners',
'oscar.apps.dashboard.pages',
'oscar.apps.dashboard.ranges',
'oscar.apps.dashboard.reviews',
'oscar.apps.dashboard.vouchers',
'oscar.apps.dashboard.communications',
# 3rd-party apps that oscar depends on
'haystack',
'treebeard',
'sorl.thumbnail',
]
def get_core_apps(overrides=None):
"""
Return a list of oscar's apps amended with any passed overrides
"""
if not overrides:
return OSCAR_CORE_APPS
def get_app_label(app_label, overrides):
pattern = app_label.replace('oscar.apps.', '')
for override in overrides:
if override.endswith(pattern):
if 'dashboard' in override and 'dashboard' not in pattern:
continue
return override
return app_label
apps = []
for app_label in OSCAR_CORE_APPS:
apps.append(get_app_label(app_label, overrides))
return apps
| import os
# Use 'final' as the 4th element to indicate
# a full release
VERSION = (0, 6, 0, 'beta', 1)
def get_short_version():
return '%s.%s' % (VERSION[0], VERSION[1])
def get_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
# Append 3rd digit if > 0
version = '%s.%s' % (version, VERSION[2])
if VERSION[3:] == ('alpha', 0):
version = '%s pre-alpha' % version
elif VERSION[3] != 'final':
version = '%s %s %s' % (version, VERSION[3], VERSION[4])
return version
# Cheeky setting that allows each template to be accessible by two paths.
# Eg: the template 'oscar/templates/oscar/base.html' can be accessed via both
# 'base.html' and 'oscar/base.html'. This allows Oscar's templates to be
# extended by templates with the same filename
OSCAR_MAIN_TEMPLATE_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'templates/oscar')
OSCAR_CORE_APPS = [
'oscar',
'oscar.apps.analytics',
'oscar.apps.checkout',
'oscar.apps.address',
'oscar.apps.shipping',
'oscar.apps.catalogue',
'oscar.apps.catalogue.reviews',
'oscar.apps.partner',
'oscar.apps.basket',
'oscar.apps.payment',
'oscar.apps.offer',
'oscar.apps.order',
'oscar.apps.customer',
'oscar.apps.promotions',
'oscar.apps.search',
'oscar.apps.voucher',
'oscar.apps.wishlists',
'oscar.apps.dashboard',
'oscar.apps.dashboard.reports',
'oscar.apps.dashboard.users',
'oscar.apps.dashboard.orders',
'oscar.apps.dashboard.promotions',
'oscar.apps.dashboard.catalogue',
'oscar.apps.dashboard.offers',
'oscar.apps.dashboard.partners',
'oscar.apps.dashboard.pages',
'oscar.apps.dashboard.ranges',
'oscar.apps.dashboard.reviews',
'oscar.apps.dashboard.vouchers',
'oscar.apps.dashboard.communications',
# 3rd-party apps that oscar depends on
'haystack',
'treebeard',
'sorl.thumbnail',
]
def get_core_apps(overrides=None):
"""
Return a list of oscar's apps amended with any passed overrides
"""
if not overrides:
return OSCAR_CORE_APPS
def get_app_label(app_label, overrides):
pattern = app_label.replace('oscar.apps.', '')
for override in overrides:
if override.endswith(pattern):
if 'dashboard' in override and 'dashboard' not in pattern:
continue
return override
return app_label
apps = []
for app_label in OSCAR_CORE_APPS:
apps.append(get_app_label(app_label, overrides))
return apps
| Python | 0 |
d9710fa2af26ab4ab5fef62adc5be670437bea68 | Create logistics_regression.py | logistics_regression.py | logistics_regression.py | #!/usr/bin/python
# -*-coding:utf-8 -*-
from math import exp
import random
import data_tool
#y = x1*a1 + x2*a2 + x3*a3 + ... + xn*an + b
def predict(data,
coef,
bias):
pred = 0.0
for index in range(len(coef)):
pred += (data[index] * coef[index] + bias)
return sigmoid(pred)
def sigmoid(x):
res = 0.0
try :
if x > 60:
res = 1.0 / (1.0 + exp(-60))
elif x < -60:
res = 1.0 / (1.0 + exp(60))
else:
res = 1.0 / (1.0 + exp(-x))
except:
print 'over math.exp range ', x
return res
def sgd(train,
labels,
coef,
bias,
learn_rate,
nepoch):
for epoch in range(nepoch):
sum_error = 0.0
for index in range(len(train)):
pred = predict(train[index], coef, bias)
sum_error += (labels[index] - pred)
bias = (bias + learn_rate * sum_error * pred * (1 - pred))
for i in range(len(coef)):
coef[i] = (coef[i] + learn_rate * sum_error * pred * (1 - pred) * train[index][i])
return coef, bias
#generate standard normal distribution
def param_gauss(size):
param = []
for i in range(size):
param.append(random.gauss(mu=0, sigma=0.05))
return param
def logistic_regression(features_train, labels_train,
features_test, labels_test,
learn_rate, nepoch):
coef = param_gauss(len(features_train[0]))
bias = param_gauss(1)[0]
coef, bias = sgd(features_train, labels_train, coef, bias, learn_rate, nepoch)
pred = []
for index in range(len(features_test)):
pred.append(predict(features_test[index], coef, bias=bias))
return pred, coef, bias
def accuracy(pred, y_true):
correct = 0.0
for index in range(len(pred)):
if pred[index] == y_true[index]:
correct += 1.0
return correct / len(pred)
#test
features_train, labels_train, features_test, labels_test = data_tool.train_test_split(
data_tool.load_data(),
test_rate=0.3)
for i in range(5):
print 'cycle +++++++++++++++++++++++++++++++++++++++++++++++++++++ ', i
pred, coef, bias = logistic_regression(features_train, labels_train, features_test, labels_test,
learn_rate=0.02, nepoch=100)
score = accuracy(pred, labels_test)
print 'coef is: ', coef
print 'bias is: ', bias
print 'accuracy is: ', score
| Python | 0.000024 | |
9fc090e5ea669630e3a9932d21f28285b6aa0dc5 | add widget with nltk and Stanford taggers | orangecontrib/text/widgets/owpostagging.py | orangecontrib/text/widgets/owpostagging.py | from PyQt4 import QtGui
from Orange.widgets import gui
from Orange.widgets import settings
from Orange.widgets.widget import OWWidget
from orangecontrib.text.corpus import Corpus
from orangecontrib.text.tag.pos import taggers, StanfordPOSTagger
from orangecontrib.text.widgets.utils import ResourceLoader
class Input:
CORPUS = 'Corpus'
class Output:
CORPUS = 'Corpus'
class OWPOSTagger(OWWidget):
"""Marks up words with corresponding part of speech."""
name = 'POS Tagging'
icon = 'icons/POSTagging.svg'
priority = 50
inputs = [
(Input.CORPUS, Corpus, 'set_data'),
]
outputs = [
(Output.CORPUS, Corpus)
]
want_main_area = False
resizing_enabled = False
# Settings
autocommit = settings.Setting(True)
tagger_index = settings.Setting(0)
recent_stanford_models = settings.Setting([])
path_to_stanford_jar = settings.Setting(None)
STANFORD = len(taggers)
NOT_CONFIGURED = 1, "Tagger wasn't configured"
def __init__(self):
super().__init__()
self.corpus = None
self.tagger = None
self.stanford_tagger = None
button_group = QtGui.QButtonGroup(self, exclusive=True)
button_group.buttonClicked[int].connect(self.change_tagger)
layout = QtGui.QVBoxLayout()
layout.setSpacing(15)
self.controlArea.layout().addLayout(layout)
for i, tagger in enumerate(taggers + [StanfordPOSTagger]):
rb = QtGui.QRadioButton(text=tagger.name)
rb.setChecked(i == self.tagger_index)
button_group.addButton(rb, i)
layout.addWidget(rb)
box = QtGui.QGroupBox('Stanford')
layout = QtGui.QVBoxLayout(box)
layout.setMargin(0)
stanford_tagger = ResourceLoader(self.recent_stanford_models,
model_format='Stanford model (*.model *.tagger)',
provider_format='Java file (*.jar)',
model_button_label='Trained Model',
provider_button_label='Stanford POS Tagger')
self.set_stanford_tagger((self.recent_stanford_models[0] if len(self.recent_stanford_models) else None,
self.path_to_stanford_jar))
stanford_tagger.valueChanged.connect(self.set_stanford_tagger)
layout.addWidget(stanford_tagger)
self.controlArea.layout().addWidget(box)
buttons_layout = QtGui.QHBoxLayout()
buttons_layout.addWidget(self.report_button)
buttons_layout.addSpacing(15)
buttons_layout.addWidget(
gui.auto_commit(None, self, 'autocommit', 'Commit', box=False)
)
self.controlArea.layout().addLayout(buttons_layout)
def change_tagger(self, i):
if i != self.tagger_index:
self.tagger_index = i
self.on_change()
def set_data(self, data):
self.corpus = data
self.on_change()
def commit(self):
if self.tagger_index == self.STANFORD:
self.tagger = self.stanford_tagger
else:
self.tagger = taggers[self.tagger_index]
self.apply()
def apply(self):
if self.corpus is not None:
if not self.tagger:
if self.tagger_index == self.STANFORD:
self.error(2, self.stanford_error)
else:
self.error(*self.NOT_CONFIGURED)
else:
self.error(self.NOT_CONFIGURED[0])
new_corpus = self.tagger.tag_corpus(self.corpus)
self.send(Output.CORPUS, new_corpus)
def on_change(self):
self.error(1)
self.error(2)
self.commit()
def set_stanford_tagger(self, paths=None):
self.stanford_tagger = None
try:
StanfordPOSTagger.check(*paths)
self.stanford_tagger = StanfordPOSTagger(*paths)
except ValueError as e:
self.stanford_error = str(e)
self.on_change()
def send_report(self):
self.report_items('Tagger', (('Name', self.tagger.name),))
if __name__ == '__main__':
app = QtGui.QApplication([])
widget = OWPOSTagger()
widget.set_data(Corpus.from_file('deerwester'))
widget.show()
app.exec()
widget.saveSettings()
| Python | 0 | |
8ee7798af73f374485c1a97e82a98fd5ff8b3c48 | Add module for loading specific classes | nova/loadables.py | nova/loadables.py | # Copyright (c) 2011-2012 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Generic Loadable class support.
Meant to be used by such things as scheduler filters and weights where we
want to load modules from certain directories and find certain types of
classes within those modules. Note that this is quite different than
generic plugins and the pluginmanager code that exists elsewhere.
Usage:
Create a directory with an __init__.py with code such as:
class SomeLoadableClass(object):
pass
class MyLoader(nova.loadables.BaseLoader)
def __init__(self):
super(MyLoader, self).__init__(SomeLoadableClass)
If you create modules in the same directory and subclass SomeLoadableClass
within them, MyLoader().get_all_classes() will return a list
of such classes.
"""
import inspect
import os
import sys
from nova import exception
from nova.openstack.common import importutils
class BaseLoader(object):
def __init__(self, loadable_cls_type):
mod = sys.modules[self.__class__.__module__]
self.path = mod.__path__[0]
self.package = mod.__package__
self.loadable_cls_type = loadable_cls_type
def _is_correct_class(self, obj):
"""Return whether an object is a class of the correct type and
is not prefixed with an underscore.
"""
return (inspect.isclass(obj) and
(not obj.__name__.startswith('_')) and
issubclass(obj, self.loadable_cls_type))
def _get_classes_from_module(self, module_name):
"""Get the classes from a module that match the type we want."""
classes = []
module = importutils.import_module(module_name)
for obj_name in dir(module):
# Skip objects that are meant to be private.
if obj_name.startswith('_'):
continue
itm = getattr(module, obj_name)
if self._is_correct_class(itm):
classes.append(itm)
return classes
def get_all_classes(self):
"""Get the classes of the type we want from all modules found
in the directory that defines this class.
"""
classes = []
for dirpath, dirnames, filenames in os.walk(self.path):
relpath = os.path.relpath(dirpath, self.path)
if relpath == '.':
relpkg = ''
else:
relpkg = '.%s' % '.'.join(relpath.split(os.sep))
for fname in filenames:
root, ext = os.path.splitext(fname)
if ext != '.py' or root == '__init__':
continue
module_name = "%s%s.%s" % (self.package, relpkg, root)
mod_classes = self._get_classes_from_module(module_name)
classes.extend(mod_classes)
return classes
def get_matching_classes(self, loadable_class_names):
"""Get loadable classes from a list of names. Each name can be
a full module path or the full path to a method that returns
classes to use. The latter behavior is useful to specify a method
that returns a list of classes to use in a default case.
"""
classes = []
for cls_name in loadable_class_names:
obj = importutils.import_class(cls_name)
if self._is_correct_class(obj):
classes.append(obj)
elif inspect.isfunction(obj):
# Get list of classes from a function
for cls in obj():
classes.append(cls)
else:
error_str = 'Not a class of the correct type'
raise exception.ClassNotFound(class_name=cls_name,
exception=error_str)
return classes
| Python | 0.000023 | |
a8412ea3d8d72499db48e4a55c6f7b416d307025 | Add plan tests | tests/test_plan.py | tests/test_plan.py | import datetime
from fbchat._plan import GuestStatus, Plan
def test_plan_properties():
plan = Plan(time=..., title=...)
plan.guests = {
"1234": GuestStatus.INVITED,
"2345": GuestStatus.INVITED,
"3456": GuestStatus.GOING,
"4567": GuestStatus.DECLINED,
}
assert set(plan.invited) == {"1234", "2345"}
assert plan.going == ["3456"]
assert plan.declined == ["4567"]
def test_plan_from_pull():
data = {
"event_timezone": "",
"event_creator_id": "1234",
"event_id": "1111",
"event_type": "EVENT",
"event_track_rsvp": "1",
"event_title": "abc",
"event_time": "1500000000",
"event_seconds_to_notify_before": "3600",
"guest_state_list": (
'[{"guest_list_state":"INVITED","node":{"id":"1234"}},'
'{"guest_list_state":"INVITED","node":{"id":"2356"}},'
'{"guest_list_state":"DECLINED","node":{"id":"3456"}},'
'{"guest_list_state":"GOING","node":{"id":"4567"}}]'
),
}
plan = Plan(
time=datetime.datetime(2017, 7, 14, 2, 40, tzinfo=datetime.timezone.utc),
title="abc",
)
plan.uid = "1111"
plan.author_id = "1234"
plan.guests = {
"1234": GuestStatus.INVITED,
"2356": GuestStatus.INVITED,
"3456": GuestStatus.DECLINED,
"4567": GuestStatus.GOING,
}
assert plan == Plan._from_pull(data)
def test_plan_from_fetch():
data = {
"message_thread_id": 123456789,
"event_time": 1500000000,
"creator_id": 1234,
"event_time_updated_time": 1450000000,
"title": "abc",
"track_rsvp": 1,
"event_type": "EVENT",
"status": "created",
"message_id": "mid.xyz",
"seconds_to_notify_before": 3600,
"event_time_source": "user",
"repeat_mode": "once",
"creation_time": 1400000000,
"location_id": 0,
"location_name": None,
"latitude": "",
"longitude": "",
"event_id": 0,
"trigger_message_id": "",
"note": "",
"timezone_id": 0,
"end_time": 0,
"list_id": 0,
"payload_id": 0,
"cu_app": "",
"location_sharing_subtype": "",
"reminder_notif_param": [],
"workplace_meeting_id": "",
"genie_fbid": 0,
"galaxy": "",
"oid": 1111,
"type": 8128,
"is_active": True,
"location_address": None,
"event_members": {
"1234": "INVITED",
"2356": "INVITED",
"3456": "DECLINED",
"4567": "GOING",
},
}
plan = Plan(
time=datetime.datetime(2017, 7, 14, 2, 40, tzinfo=datetime.timezone.utc),
title="abc",
location="",
location_id="",
)
plan.uid = 1111
plan.author_id = 1234
plan.guests = {
"1234": GuestStatus.INVITED,
"2356": GuestStatus.INVITED,
"3456": GuestStatus.DECLINED,
"4567": GuestStatus.GOING,
}
assert plan == Plan._from_fetch(data)
def test_plan_from_graphql():
data = {
"id": "1111",
"lightweight_event_creator": {"id": "1234"},
"time": 1500000000,
"lightweight_event_type": "EVENT",
"location_name": None,
"location_coordinates": None,
"location_page": None,
"lightweight_event_status": "CREATED",
"note": "",
"repeat_mode": "ONCE",
"event_title": "abc",
"trigger_message": None,
"seconds_to_notify_before": 3600,
"allows_rsvp": True,
"related_event": None,
"event_reminder_members": {
"edges": [
{"node": {"id": "1234"}, "guest_list_state": "INVITED"},
{"node": {"id": "2356"}, "guest_list_state": "INVITED"},
{"node": {"id": "3456"}, "guest_list_state": "DECLINED"},
{"node": {"id": "4567"}, "guest_list_state": "GOING"},
]
},
}
plan = Plan(
time=datetime.datetime(2017, 7, 14, 2, 40, tzinfo=datetime.timezone.utc),
title="abc",
location="",
location_id="",
)
plan.uid = "1111"
plan.author_id = "1234"
plan.guests = {
"1234": GuestStatus.INVITED,
"2356": GuestStatus.INVITED,
"3456": GuestStatus.DECLINED,
"4567": GuestStatus.GOING,
}
assert plan == Plan._from_graphql(data)
| Python | 0.000001 | |
5211117033f596bd506e81e8825ddfb08634c25e | Create battery.py | client/iOS/battery.py | client/iOS/battery.py | # coding: utf-8
import collections, objc_util
battery_info = collections.namedtuple('battery_info', 'level state')
def get_battery_info():
device = objc_util.ObjCClass('UIDevice').currentDevice()
device.setBatteryMonitoringEnabled_(True)
try:
return battery_info(int(device.batteryLevel() * 100),
'unknown unplugged charging full'.split()[device.batteryState()])
finally:
device.setBatteryMonitoringEnabled_(False)
def battery_is_low(threshold = 20):
battery_info = get_battery_info()
return (battery_info.level <= threshold
and battery_info.state.startswith('un'))
__all__ = 'get_battery_info battery_is_low'.split()
if __name__ == '__main__':
print(get_battery_info())
print(battery_is_low(15))
| Python | 0.000028 | |
840d4d555b7b2858ca593251f1593943b10b135b | Add setup_egg.py | setup_egg.py | setup_egg.py | #!/usr/bin/env python
"""Wrapper to run setup.py using setuptools."""
from setuptools import setup
################################################################################
# Call the setup.py script, injecting the setuptools-specific arguments.
extra_setuptools_args = dict(
tests_require=['nose'],
test_suite='nose.collector',
zip_safe=False,
)
if __name__ == '__main__':
execfile('setup.py', dict(__name__='__main__',
extra_setuptools_args=extra_setuptools_args))
| Python | 0.000001 | |
71a6c671f802e3b1c123b083ef34f81efeb55750 | Create MakeMaskfiles.py | MakeMaskfiles.py | MakeMaskfiles.py | import gzip
import sys
from collections import defaultdict
def readFasta(infile):
sequence = ''
if '.gz' in infile:
with gzip.open(infile) as data:
for line in data:
if '>' in line:
seqname = line.strip().replace('>','')
else:
sequence += line.strip().replace(' ','')
else:
with open(infile) as data:
for line in data:
if '>' in line:
seqname = line.strip().replace('>','')
else:
sequence += line.strip().replace(' ','')
return sequence
_, repeatmask_file, callable_mask_file, window_size, chrom, outprefix = sys.argv
window_size = int(window_size)
#repeatmask_file = "helperfiles/RepeatMasks/chr{}.fa.masked"
#callable_mask_file = "helperfiles/AccessibilityMasks/20140520.chr{}.strict_mask.fasta.gz"
bases_called = 0
# Mask file for repetitative regions
repeatmask = readFasta(repeatmask_file)
callable_mask = readFasta(callable_mask_file)
with open(outprefix + '.bed','w') as outbed, open (outprefix + '.txt','w') as out:
d = defaultdict(int)
prev_base = 'Notcalled'
start = 0
for i in range(len(callable_mask)):
repeat_base = repeatmask[i]
callable_base = callable_mask[i]
# Round down to nearest window start
window = i - i%window_size
d[window] += 0
if repeat_base != 'N' and callable_base == 'P':
current_base = 'Called'
d[window] += 1
else:
current_base = 'Notcalled'
# extend
if current_base == prev_base:
end = i
# Make a new one
if current_base != prev_base:
if prev_base == 'Called':
outbed.write('{}\t{}\t{}\t{}\n'.format(chrom, start, end, prev_base))
start = i
end = i
prev_base = current_base
if prev_base == 'Called':
outbed.write('{}\t{}\t{}\t{}\n'.format(chrom, start, end, prev_base))
# Write output files
for window in range(0, max(d)+window_size, window_size):
out.write('{}\t{}\t{}\n'.format(chrom, window, d[window] / float(window_size)))
| Python | 0.000001 | |
8e4cbd3dd09aac90cf2d71adb5ad841274b60575 | Create convolution_digit_recognition.py | Convolution_Neural_Networks/convolution_digit_recognition.py | Convolution_Neural_Networks/convolution_digit_recognition.py | # Optimise a neural network
import threading
from Queue import Queue
from numpy import *
from create_MEGA_THETA import *
from RLU_forward_backward import *
from get_overlaps import *
# get data from csv file into array
data = genfromtxt('train2.csv', delimiter=',')
# get a subset for testing
num_cpus = 4
N = 5*num_cpus # batch size of data subset
y_vals = data[:,0] # outputs
x_vals = data[:,1:].T # inputs
#x_vals = (x_vals > 0)*ones(shape(x_vals)) # make inputs 0/1
data_size = size(y_vals)
# define some NN layers
base_in = 783
xi = array([1525,10])
mangle_upper = 30
# randomly initiate NN values
MT = create_MEGA_THETA(xi)
#MT = genfromtxt('convltn_to_10_backup.csv', delimiter=',')
MTDIM = shape(MT)
DELTA = zeros([MTDIM[0],MTDIM[1]])
# learning rate
alpha = 1*N**2
beta = .02
# create some Queues for DELTA, time, good
time_queue = Queue()
good_queue = Queue()
DELTA_queue = Queue()
# initiate queues
time_queue.put(1.0)
good_queue.put(0.0)
success = 0 # ouch
counter = 0 # meh
# execute backprop procedure until accuracy is high
while success < .9999999: # success rate goal
selection = random.random_integers(0,data_size-1,N) # stochastic gradient descent
# get some pixels labels to mangle
mangle0 = random.randint(0,mangle_upper)
mangle1 = random.randint(0,mangle_upper)
random_mangle0 = random.random_integers(0,base_in,mangle0)
random_mangle1 = random.random_integers(0,base_in,mangle1)
batch_x_vals = x_vals[:,selection] # SGD batch
batch_y_vals = y_vals[selection] # SGD batch
# mangle pixels, some --> 0 some --> 1
batch_x_vals[random_mangle0,:] = 0
batch_x_vals[random_mangle1,:] = 1
DELTA_queue.put(DELTA)
DELTA_queue.task_done()
# run backprop optimisation
threads = [] # use multi-threading
for i in range(0,N):
# input vector
x = batch_x_vals[:,i]
overlaps = get() # from get_overlaps import
x1 = x[overlaps[0]]
x2 = x[overlaps[1]]
x4 = x[overlaps[2]]
x = hstack((x1, x2, x2, x4, x4, x4, x4))
y = zeros(10) # output of NN (row-less)
y[batch_y_vals[i]] = 1
thr = threading.Thread(target=forwardBackward,
args=(xi, x, y, MT, time_queue, good_queue, DELTA_queue))
threads.append(thr)
print 'StArTiNg ThReAdS'
# start all threads
for i in range(0,N):
threads[i].start()
# spin until all threads finish
for i in range(0,N):
threads[i].join()
good = good_queue.get()
time = time_queue.get()
success = good/time
print 'Success: ',success
good_queue.put(good)
good_queue.task_done()
time_queue.put(time)
time_queue.task_done()
DELTA = DELTA_queue.get()
print 'Queue size: ',DELTA_queue.qsize()
MT = MT - (alpha/N)*DELTA #- N*beta*MT
DELTA = 0*DELTA
if counter == 50:
savetxt('convltn_to_10.csv', MT, delimiter=',')
print 'Success: ',success
print '\n### SAVED DATA to convltn_to_10.csv ###\n'
counter = 0 # reset
counter += 1
savetxt('convltn_to_10.csv', MT, delimiter=',')
| Python | 0.999784 | |
675de92e16e268badd8c6f5de992c3901cc8f2ce | Update Category Model | apps/shop/migrations/0004_category_parent_category.py | apps/shop/migrations/0004_category_parent_category.py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-11 19:34
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('shop', '0003_product_model_name'),
]
operations = [
migrations.AddField(
model_name='category',
name='parent_category',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='shop.Category'),
),
]
| Python | 0 | |
f2d1421555f00f7bcb77f43cd010c221045c6bfd | Add tests for nd-shifty | tests/console/test_shifty.py | tests/console/test_shifty.py | import click.testing
from netdumplings.console.shifty import shifty
from netdumplings.exceptions import NetDumplingsError
class TestShifty:
"""
Test the nd-shifty commandline tool.
"""
def test_shifty(self, mocker):
"""
Test that the DumplingHub is instantiated as expected and that run()
is called.
"""
mock_hub = mocker.patch('netdumplings.DumplingHub')
runner = click.testing.CliRunner()
result = runner.invoke(
shifty,
[
'--address', 'testhost',
'--in-port', 1001,
'--out-port', 1002,
'--status-freq', 99,
],
)
mock_hub.assert_called_once_with(
address='testhost',
in_port=1001,
out_port=1002,
status_freq=99,
)
mock_hub.return_value.run.assert_called_once()
assert result.exit_code == 0
def test_shifty_with_error(self, mocker):
"""
Test that a NetDumplingsError in DumplingHub.run() results in shifty
exiting with status code 1.
"""
mock_hub = mocker.patch('netdumplings.DumplingHub')
mock_hub.return_value.run.side_effect = NetDumplingsError
runner = click.testing.CliRunner()
result = runner.invoke(shifty)
assert result.exit_code == 1
| Python | 0 | |
dd983ae232829559766bcdf4d2ea58861b8a47ad | Bring your own daemon. | varnish_statd.py | varnish_statd.py | #!/usr/bin/env python
import time
import os
from pprint import pprint
import varnishapi
def stat(name=None):
if name is None:
vsc = varnishapi.VarnishStat()
else:
vsc = varnishapi.VarnishStat(opt=["-n", name])
r = vsc.getStats()
values = dict(((k, v['val']) for k, v in r.iteritems()))
vsc.Fini()
return values
names = os.getenv('VARNISH_STATD_NAMES')
if names:
names = names.split(',')
else:
names = (None,)
wait = int(os.getenv('VARNISH_STATD_WAIT', 60))
carbon = os.getenv('CARBON_HOST', '127.0.0.1')
stats = os.getenv("VARNISH_STATD_STATS", "hitmisspass").split(',')
while True:
for n in names:
s = stat(n)
if 'hitmisspass' in stats:
for k in ['cache_hit', 'cache_hitpass', 'cache_miss']:
v = s['MAIN.%s' % k]
print("%s: %s" % (k, v))
#pprint(s)
time.sleep(wait)
| Python | 0 | |
c1d4525d5f43a5c2bfbfd88ab0dd943eb2452574 | add 127 | vol3/127.py | vol3/127.py | from fractions import gcd
if __name__ == "__main__":
LIMIT = 120000
rad = [1] * LIMIT
for i in range(2, LIMIT):
if rad[i] == 1:
for j in range(i, LIMIT, i):
rad[j] *= i
ele = []
for i in range(1, LIMIT):
ele.append([rad[i], i])
ele = sorted(ele)
ans = 0
for c in range(3, LIMIT):
chalf = c / 2
for [ra, a] in ele:
if ra * rad[c] > chalf:
break
b = c - a
if a >= b:
continue
if ra * rad[b] * rad[c] >= c:
continue
if gcd(ra, rad[b]) != 1:
continue
ans += c
print ans
| Python | 0.999999 | |
8e7b57c8bc7be6a061d0c841700291a7d85df989 | add 174 | vol4/174.py | vol4/174.py | if __name__ == "__main__":
L = 10 ** 6
count = [0] * (L + 1)
for inner in range(1, L / 4 + 1):
outer = inner + 2
used = outer * outer - inner * inner
while used <= L:
count[used] += 1
outer += 2
used = outer * outer - inner * inner
print sum(map(lambda x: 1 if 1 <= x <= 10 else 0, count))
| Python | 0.999959 | |
86ae2fbda8974c093770a9a3563bd40975202d15 | add display image | displayImage/main.py | displayImage/main.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''display an image using opengl'''
import sys
import PySide
from PySide.QtGui import *
from PySide.QtCore import *
from PySide.QtOpenGL import *
from OpenGL.GL import *
from OpenGL.GL import shaders
import numpy as np
from PIL import Image
def shaderFromFile(shaderType, shaderFile):
shaderSrc = ''
with open(shaderFile) as sf:
shaderSrc = sf.read()
return shaders.compileShader(shaderSrc, shaderType)
class MyGLWidget(QGLWidget):
def __init__(self, gformat, parent=None):
super(MyGLWidget, self).__init__(gformat, parent)
# buffer object ids
self.vaoID = None
self.vboVerticesID = None
self.vboIndicesID = None
self.textureID = None
self.sprogram = None
self.vertices = None
self.indices = None
self.loadImage('Lenna.png')
def loadImage(self, imageFile):
# load the image using Pillow
self.im = Image.open(self.textFileName)
# set window size to the images size
self.setGeometry(40, 40, self.im.size[0], self.im.size[1])
# set window title
self.setWindowTitle('Dispaly - ' + imageFile)
def initializeGL(self):
glClearColor(0, 0, 0, 0)
# create shader from file
vshader = shaderFromFile(GL_VERTEX_SHADER, 'shader.vert')
fshader = shaderFromFile(GL_FRAGMENT_SHADER, 'shader.frag')
# compile shaders
self.sprogram = shaders.compileProgram(vshader, fshader)
# get attribute and set uniform for shaders
glUseProgram(self.sprogram)
self.vertexAL = glGetAttribLocation(self.sprogram, 'pos')
self.tmUL = glGetUniformLocation(self.sprogram, 'textureMap')
glUniform1i(self.tmUL, 0)
glUseProgram(0)
# two triangle to make a quat
self.vertices = np.array((0.0, 0.0,
1.0, 0.0,
1.0, 1.0,
0.0, 1.0), dtype=np.float32)
self.indices = np.array((0, 1, 2,
0, 2, 3), dtype=np.ushort)
# set up vertex array
self.vaoID = glGenVertexArrays(1)
self.vboVerticesID = glGenBuffers(1)
self.vboIndicesID = glGenBuffers(1)
glBindVertexArray(self.vaoID)
glBindBuffer(GL_ARRAY_BUFFER, self.vboVerticesID)
# copy vertices data from memery to gpu memery
glBufferData(GL_ARRAY_BUFFER, self.vertices.nbytes, self.vertices, GL_STATIC_DRAW)
# tell opengl how to procces the vertices data
glEnableVertexAttribArray(self.vertexAL)
glVertexAttribPointer(self.vertexAL, 2, GL_FLOAT, GL_FALSE, 0, None)
# send the indice data too
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.vboIndicesID)
glBufferData(GL_ELEMENT_ARRAY_BUFFER, self.indices.nbytes, self.indices, GL_STATIC_DRAW)
# flip the image in the Y axis
im = self.im.transpose(Image.FLIP_TOP_BOTTOM)
# set up texture
self.textureID = glGenTextures(1)
glActiveTexture(GL_TEXTURE0)
glBindTexture(GL_TEXTURE_2D, self.textureID)
# set filters
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)
# set uv coords mode
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP)
# send the image data to gpu memery
glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB, self.im.size[0], self.im.size[1],
0, GL_RGB, GL_UNSIGNED_BYTE, im.tostring())
print("Initialization successfull")
def resizeGL(self, w, h):
glViewport(0, 0, w, h)
def paintGL(self, *args, **kwargs):
# clear the buffers
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
# active shader
glUseProgram(self.sprogram)
# draw triangles
glDrawElements(GL_TRIANGLES, self.indices.size, GL_UNSIGNED_SHORT, None)
glUseProgram(0)
if __name__ == '__main__':
app = QApplication(sys.argv)
gformat = QGLFormat()
gformat.setVersion(4, 3)
gformat.setProfile(QGLFormat.CoreProfile)
mywidget = MyGLWidget(gformat)
mywidget.show()
# print information on screen
print("\tUsing PySide " + PySide.__version__)
print("\tVendor: " + glGetString(GL_VENDOR))
print("\tRenderer: " + glGetString(GL_RENDERER))
print("\tVersion: " + glGetString(GL_VERSION))
print("\tGLSL: " + glGetString(GL_SHADING_LANGUAGE_VERSION))
sys.exit(app.exec_())
| Python | 0.000001 | |
a5b4fa261750fa79d61fc16b6061d449aa7e3523 | Add missing block.py | rasterio/block.py | rasterio/block.py | """Raster Blocks"""
from collections import namedtuple
BlockInfo = namedtuple('BlockInfo', ['row', 'col', 'window', 'size'])
| Python | 0.000088 | |
65843b537e45b98068566c6cc57e4a3ad139d607 | add variant.py | cendr/views/api/variant.py | cendr/views/api/variant.py | # NEW API
from cendr import api, cache, app
from cyvcf2 import VCF
from flask import jsonify
import re
import sys
from subprocess import Popen, PIPE
def get_region(region):
m = re.match("^([0-9A-Za-z]+):([0-9]+)-([0-9]+)$", region)
if not m:
return msg(None, "Invalid region", 400)
chrom = m.group(1)
start = int(m.group(2))
end = int(m.group(3))
return chrom, start, end
@app.route('/api/variant/<region>')
def variant_from_region(region):
vcf = "http://storage.googleapis.com/elegansvariation.org/releases/{version}/WI.{version}.vcf.gz".format(version = 20170312)
m = re.match("^([0-9A-Za-z]+):([0-9]+)-([0-9]+)$", region)
if not m:
return "Error - malformed region.", 400
start = int(m.group(2))
end = int(m.group(3))
if start >= end:
return "Invalid start and end region values", 400
if end - start > 1e5:
return "Maximum region size is 100 kb", 400
comm = ["bcftools", "view", vcf, region]
out, err = Popen(comm, stdout=PIPE, stderr=PIPE).communicate()
#if err:
# return err, 400
#v = VCF(out)
return jsonify({"out": out.splitlines(), "comm": ' '.join(comm)})
| Python | 0.000001 | |
d2bcba204d36a8ffd1e6a1ed79b89fcb6f1c88c5 | Add file to test out kmc approach. Dump training k-mers to fasta file | ideas/test_kmc.py | ideas/test_kmc.py | # This code will test out the idea of using kmc to
# 1. quickly enumerate the k-mers
# 2. intersect these with the training database, output as fasta
# 3. use that reduced fasta of intersecting kmers as the query to CMash
####################################################################
# First, I will need to dump the training database to a fasta file
from CMash import MinHash as MH
import os
import blist
training_out_file = '/nfs1/Koslicki_Lab/koslickd/KMC_test/NathanRefSeqTraining60mers.fa'
training_data ='/nfs1/Koslicki_Lab/koslickd/MiCOPCMash/TrainingData/NathanRefSeq/micopdb_n_1000_k_60.h5'
training_file_names = "/nfs1/Koslicki_Lab/koslickd/MiCOPCMash/TrainingData/NathanRefSeq/absolute_file_names.txt"
file_names = []
with open(training_file_names, 'r') as fid:
iter = 0
for line in fid.readlines():
line = line.strip()
file_names.append(os.path.basename(line))
iter += 1
if iter > 1000:
break
all_kmers = blist.blist()
for file_name in file_names:
sketch = MH.import_multiple_from_single_hdf5(training_data, import_list=[file_name])[0]
all_kmers += sketch._kmers
all_kmers_set = set(all_kmers)
with open(training_out_file, 'w') as fid:
iter = 0
for kmer in all_kmers_set:
fid.write(">seq_%d\n" % iter)
fid.write("%s\n" % kmer)
iter += 1
##########################################################################
| Python | 0 | |
c584bca2f9ac7bc005128d22b4e81a6b4885724c | allow Fabric to infrastructure config from YAML data files | templates/fabfile.py | templates/fabfile.py | import yaml
from fabric.api import env, run
def import_inf(data='web_app_basic.yml'):
inf_data = open(data, 'r')
inf = yaml.load(inf_data)
# for box in inf:
# print '\n'
# for parameter in box:
# print parameter, ':', box[parameter]
return inf
inf_data.close()
inf = import_inf()
env.hosts = [inf[1]['ip']]
env.user = 'vagrant'
env.password = 'vagrant'
def hostinf():
run('hostname')
run('ip a sh dev eth1')
run('uname -a')
| Python | 0 | |
f657a02a560af1a5860f9a532052f54330018620 | Build "shell" target with chromium_code set. | ui/shell/shell.gyp | ui/shell/shell.gyp | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'variables': {
'chromium_code': 1,
},
'targets': [
{
'target_name': 'shell',
'type': 'static_library',
'dependencies': [
'../../skia/skia.gyp:skia',
'../aura/aura.gyp:aura',
'../views/views.gyp:views',
],
'sources': [
'minimal_shell.cc',
'minimal_shell.h',
],
},
],
}
| # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'shell',
'type': 'static_library',
'dependencies': [
'../aura/aura.gyp:aura',
'../views/views.gyp:views',
'../../skia/skia.gyp:skia',
],
'sources': [
'minimal_shell.cc',
'minimal_shell.h',
],
},
],
}
| Python | 0.999994 |
bddfeeec193d9fb61d99c70be68093c854e541f7 | Add initial check thorium state | salt/thorium/check.py | salt/thorium/check.py | '''
The check Thorium state is used to create gateways to commands, the checks
make it easy to make states that watch registers for changes and then just
succeed or fail based on the state of the register, this creates the pattern
of having a command execution get gated by a check state via a requisite.
'''
def gt(name, value):
'''
Only succeed if the value in the given register location is greater than
the given value
'''
ret = {'name': name,
'result': False,
'comment': '',
'changes': {}}
if name not in __reg__:
ret['result'] = None
ret['comment'] = 'Value {0} not in register'.format(name)
return ret
if __reg__[name]['val'] > value:
ret['result'] = True
return ret
def lt(name, value):
'''
Only succeed if the value in the given register location is greater than
the given value
'''
ret = {'name': name,
'result': False,
'comment': '',
'changes': {}}
if name not in __reg__:
ret['result'] = None
ret['comment'] = 'Value {0} not in register'.format(name)
return ret
if __reg__[name]['val'] < value:
ret['result'] = True
return ret
def contains(name, value):
'''
Only succeed if the value in the given register location is greater than
the given value
'''
ret = {'name': name,
'result': False,
'comment': '',
'changes': {}}
if name not in __reg__:
ret['result'] = None
ret['comment'] = 'Value {0} not in register'.format(name)
return ret
try:
if __reg__[name]['val'] in value:
ret['result'] = True
except TypeError:
pass
return ret
| Python | 0 | |
6c08a3d795f9bd2f2d0850fb4c2b7f20474908a9 | Add test for scrunch block | test/test_scrunch.py | test/test_scrunch.py | # Copyright (c) 2016, The Bifrost Authors. All rights reserved.
# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of The Bifrost Authors nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Test all aspects of `bifrost.blocks.scrunch`"""
import unittest
import bifrost as bf
import bifrost.pipeline as bfp
import bifrost.blocks as blocks
class CallbackBlock(blocks.CopyBlock):
"""Testing-only block which calls user-defined
functions on sequence and on data"""
def __init__(self, iring, seq_callback, data_callback, *args, **kwargs):
super(CallbackBlock, self).__init__(iring, *args, **kwargs)
self.seq_callback = seq_callback
self.data_callback = data_callback
def on_sequence(self, iseq):
self.seq_callback(iseq)
return super(CallbackBlock, self).on_sequence(iseq)
def on_data(self, ispan, ospan):
self.data_callback(ispan, ospan)
return super(CallbackBlock, self).on_data(ispan, ospan)
class TestScrunchBlock(unittest.TestCase):
def setUp(self):
"""Create settings shared between tests"""
self.fil_file = "./data/2chan4bitNoDM.fil"
self.gulp_nframe = 101
self.shape_settings = [-1, 1, 2]
def check_sequence_before(self, seq):
"""Function passed to `CallbackBlock`, which
checks sequence before scrunch"""
tensor = seq.header['_tensor']
self.assertEqual(tensor['shape'], [-1,1,2])
self.assertEqual(tensor['dtype'], 'u8')
self.assertEqual(tensor['labels'], ['time', 'pol', 'freq'])
self.assertEqual(tensor['units'], ['s', None, 'MHz'])
def check_data_before(self, ispan, ospan):
"""Function passed to `CallbackBlock`, which
checks data before scrunch"""
self.assertLessEqual(ispan.nframe, self.gulp_nframe)
self.assertEqual(ospan.nframe, ispan.nframe)
self.assertEqual(ispan.data.shape, (ispan.nframe,1,2))
self.assertEqual(ospan.data.shape, (ospan.nframe,1,2))
def check_sequence_after(self, seq):
"""Function passed to `CallbackBlock`, which
checks sequence after scrunch"""
tensor = seq.header['_tensor']
self.assertEqual(tensor['shape'], self.shape_settings)
self.assertEqual(tensor['dtype'], 'u8')
self.assertEqual(tensor['labels'], ['time', 'pol', 'freq'])
self.assertEqual(tensor['units'], ['s', None, 'MHz'])
def check_data_after(self, ispan, ospan):
"""Function passed to `CallbackBlock`, which
checks data after scrunch"""
self.assertLessEqual(ispan.nframe, self.gulp_nframe)
self.assertEqual(ospan.nframe, ispan.nframe)
self.assertEqual(ispan.data.shape, (ispan.nframe,1,2))
self.assertEqual(ospan.data.shape, (ospan.nframe,1,2))
def test_null_scrunch(self):
"""Check that scrunching no spans leaves header intact"""
self.shape_settings = [-1, 1, 2]
with bfp.Pipeline() as pipeline:
data = blocks.sigproc.read_sigproc([self.fil_file], self.gulp_nframe)
call_data = CallbackBlock(
data, self.check_sequence_before, self.check_data_before)
scrunched = blocks.scrunch(data, 1)
call_data = CallbackBlock(
scrunched, self.check_sequence_after, self.check_data_after)
pipeline.run()
| Python | 0 | |
0b0647a0537c3c325f5cf57cae933e06f7997ea9 | add "_" prefix to plot names | crosscat/tests/timing_analysis.py | crosscat/tests/timing_analysis.py | import argparse
def _generate_parser():
default_num_rows = [100, 400, 1000, 4000]
default_num_cols = [8, 16, 32]
default_num_clusters = [1, 2]
default_num_views = [1, 2]
#
parser = argparse.ArgumentParser()
parser.add_argument('--dirname', default='timing_analysis', type=str)
parser.add_argument('--num_rows', nargs='+', default=default_num_rows, type=int)
parser.add_argument('--num_cols', nargs='+', default=default_num_cols, type=int)
parser.add_argument('--num_clusters', nargs='+', default=default_num_clusters, type=int)
parser.add_argument('--num_views', nargs='+', default=default_num_views, type=int)
parser.add_argument('--no_plots', action='store_true')
return parser
def _munge_args(args):
kwargs = args.__dict__.copy()
dirname = kwargs.pop('dirname')
generate_plots = not kwargs.pop('no_plots')
return kwargs, dirname, generate_plots
if __name__ == '__main__':
from crosscat.utils.general_utils import Timer, MapperContext, NoDaemonPool
from crosscat.utils.timing_test_utils import reader, read_all_configs, \
read_results, writer, runner, gen_configs
import crosscat.utils.timing_test_utils as ttu
import experiment_runner.experiment_utils as eu
# parse args
parser = _generate_parser()
args = parser.parse_args()
kwargs, dirname, generate_plots = _munge_args(args)
config_list = ttu.gen_configs(
kernel_list = ttu._kernel_list,
n_steps=[10],
**kwargs
)
with Timer('experiments') as timer:
with MapperContext(Pool=NoDaemonPool) as mapper:
# use non-daemonic mapper since run_geweke spawns daemonic processes
eu.do_experiments(config_list, runner, writer, dirname, mapper)
pass
pass
if generate_plots:
# read the data back in
all_configs = read_all_configs(dirname)
_all_results = read_results(all_configs, dirname)
is_same_shape = lambda result: result['start_dims'] == result['end_dims']
use_results = filter(is_same_shape, _all_results)
# add plot_prefix so plots show up at top of list of files/folders
ttu.plot_results(use_results, plot_prefix='_', dirname=dirname)
| import argparse
def _generate_parser():
default_num_rows = [100, 400, 1000, 4000]
default_num_cols = [8, 16, 32]
default_num_clusters = [1, 2]
default_num_views = [1, 2]
#
parser = argparse.ArgumentParser()
parser.add_argument('--dirname', default='timing_analysis', type=str)
parser.add_argument('--num_rows', nargs='+', default=default_num_rows, type=int)
parser.add_argument('--num_cols', nargs='+', default=default_num_cols, type=int)
parser.add_argument('--num_clusters', nargs='+', default=default_num_clusters, type=int)
parser.add_argument('--num_views', nargs='+', default=default_num_views, type=int)
parser.add_argument('--no_plots', action='store_true')
return parser
def _munge_args(args):
kwargs = args.__dict__.copy()
dirname = kwargs.pop('dirname')
generate_plots = not kwargs.pop('no_plots')
return kwargs, dirname, generate_plots
if __name__ == '__main__':
from crosscat.utils.general_utils import Timer, MapperContext, NoDaemonPool
from crosscat.utils.timing_test_utils import reader, read_all_configs, \
read_results, writer, runner, gen_configs
import crosscat.utils.timing_test_utils as ttu
import experiment_runner.experiment_utils as eu
# parse args
parser = _generate_parser()
args = parser.parse_args()
kwargs, dirname, generate_plots = _munge_args(args)
config_list = ttu.gen_configs(
kernel_list = ttu._kernel_list,
n_steps=[10],
**kwargs
)
with Timer('experiments') as timer:
with MapperContext(Pool=NoDaemonPool) as mapper:
# use non-daemonic mapper since run_geweke spawns daemonic processes
eu.do_experiments(config_list, runner, writer, dirname, mapper)
pass
pass
if generate_plots:
# read the data back in
all_configs = read_all_configs(dirname)
_all_results = read_results(all_configs, dirname)
is_same_shape = lambda result: result['start_dims'] == result['end_dims']
use_results = filter(is_same_shape, _all_results)
ttu.plot_results(use_results, dirname=dirname)
| Python | 0 |
42e0504933d6b9e55cdb6edb9931ba080baab136 | add 408, replace print in test cases into assert | python/408_valid_word_abbreviation.py | python/408_valid_word_abbreviation.py | """
Given a non-empty string s and an abbreviation abbr, return whether the string
matches with the given abbreviation.
A string such as "word" contains only the following valid abbreviations:
["word", "1ord", "w1rd", "wo1d", "wor1", "2rd", "w2d", "wo2", "1o1d", "1or1",
"w1r1", "1o2", "2r1", "3d", "w3", "4"]
Notice that only the above abbreviations are valid abbreviations of the string
"word". Any other string is not a valid abbreviation of "word".
Note:
Assume s contains only lowercase letters and abbr contains only lowercase
letters and digits.
Example 1:
Given s = "internationalization", abbr = "i12iz4n":
Return true.
Example 2:
Given s = "apple", abbr = "a2e":
Return false.
"""
class Solution(object):
def validWordAbbreviation(self, word, abbr):
"""
:type word: str
:type abbr: str
:rtype: bool
"""
nums = set([str(i) for i in range(10)])
digits = []
loc = -1
for c in abbr:
if c in nums:
if c == '0' and digits == []:
return False
digits.append(c)
else:
if digits:
loc += int("".join(digits))
digits = []
loc += 1
if loc >= len(word):
return False
if c != word[loc]:
return False
if digits:
loc += int("".join(digits))
return loc == len(word) - 1
assert Solution().validWordAbbreviation("a", "2") == False
assert Solution().validWordAbbreviation("word", "w2d") == True
assert Solution().validWordAbbreviation("internationalization", "i12iz4n") == True
assert Solution().validWordAbbreviation("apple", "a3e") == True
assert Solution().validWordAbbreviation("apple", "a2e") == False
print("all cases passed")
| Python | 0.000002 | |
5e91e3b2c7e4cbc9f14067a832b87c336c0811e7 | update add test for c4 | redis_i_action/c4-process-log-and-replication/test.py | redis_i_action/c4-process-log-and-replication/test.py | class TestCh04(unittest.TestCase):
def setUp(self):
import redis
self.conn = redis.Redis(db=15)
self.conn.flushdb()
def tearDown(self):
self.conn.flushdb()
del self.conn
print
print
def test_list_item(self):
import pprint
conn = self.conn
print "We need to set up just enough state so that a user can list an item"
seller = 'userX'
item = 'itemX'
conn.sadd('inventory:' + seller, item)
i = conn.smembers('inventory:' + seller)
print "The user's inventory has:", i
self.assertTrue(i)
print
print "Listing the item..."
l = list_item(conn, item, seller, 10)
print "Listing the item succeeded?", l
self.assertTrue(l)
r = conn.zrange('market:', 0, -1, withscores=True)
print "The market contains:"
pprint.pprint(r)
self.assertTrue(r)
self.assertTrue(any(x[0] == 'itemX.userX' for x in r))
def test_purchase_item(self):
self.test_list_item()
conn = self.conn
print "We need to set up just enough state so a user can buy an item"
buyer = 'userY'
conn.hset('users:userY', 'funds', 125)
r = conn.hgetall('users:userY')
print "The user has some money:", r
self.assertTrue(r)
self.assertTrue(r.get('funds'))
print
print "Let's purchase an item"
p = purchase_item(conn, 'userY', 'itemX', 'userX', 10)
print "Purchasing an item succeeded?", p
self.assertTrue(p)
r = conn.hgetall('users:userY')
print "Their money is now:", r
self.assertTrue(r)
i = conn.smembers('inventory:' + buyer)
print "Their inventory is now:", i
self.assertTrue(i)
self.assertTrue('itemX' in i)
self.assertEquals(conn.zscore('market:', 'itemX.userX'), None)
def test_benchmark_update_token(self):
benchmark_update_token(self.conn, 5)
if __name__ == '__main__':
unittest.main()
| Python | 0 | |
5fd556bc01fdd5d3c9690a56a70557fbd6eb73f8 | print the to calc statistical test | MachineLearning/print_ensemble_precisions.py | MachineLearning/print_ensemble_precisions.py | #
# This program is distributed without any warranty and it
# can be freely redistributed for research, classes or private studies,
# since the copyright notices are not removed.
#
# This file just read the data to calculate the statistical test
#
# Jadson Santos - jadsonjs@gmail.com
#
# to run this exemple install pyhton modules:
#
# pip3 install pandas
#
# Python Data Analysis Library
# https://pandas.pydata.org
import pandas as pd
# This module provides functions for calculating mathematical statistics of numeric (Real-valued) data.
# https://docs.python.org/3/library/statistics.html
import statistics
#
# PUT THE RESULT DIRECTORY AND ENSEMBLE ALGORITHM GENEREATED BY WEKA ON HERE
#
# read the CSV file with your data base and put into a Pandas DataFrame
# https://www.shanelynn.ie/using-pandas-dataframe-creating-editing-viewing-data-in-python/
#
directory = '/Users/jadson/tmp/results/' # where are the files generated by weka
#
# prints the data of all homogeneous ensemble
#
def printHomogeneo():
for model in ['knn', 'ad', 'nb', 'mlp']:
for ensemble in ['bagging', 'boosting', 'stacking_homogeneo']:
print(' -------------------- ')
print(model+' --> '+ensemble)
print(' -------------------- ')
for num_classifiers in [10, 15, 20]:
df = pd.read_csv( directory+ensemble+'_'+model+'_'+str(num_classifiers)+'.csv' )
#Getting the precision data
precision = df['IR_precision'].values
# {0} is the num of argument of format function : {.4} sets the precision to 4 decimals.
for p in range(len(precision)):
print('{0:.4}'.format(precision[p]))
#
# prints the data of all heterogeneous ensemble
#
def printHeterogeneo():
for ensemble in ['stacking_heterogeneo']:
print(' -------------------- ')
print(ensemble)
print(' -------------------- ')
for model in ['MLP_AD', 'MLP_NB', 'MLP_NB_AD', 'NB_AD']:
for num_classifiers in [10, 15, 20]:
df = pd.read_csv( directory+ensemble+'_'+model+'_'+str(num_classifiers)+'.csv' )
#Getting the precision data
precision = df['IR_precision'].values
# {0} is the num of argument of format function : {.4} sets the precision to 4 decimals.
for p in range(len(precision)):
print('{0:.4}'.format(precision[p]))
printHomogeneo()
printHeterogeneo()
| Python | 0.999999 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.