commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
18428461f230d7d57056bd1a6a0fc2a66cedb5f1
|
increment version
|
grizli/version.py
|
grizli/version.py
|
# git describe --tags
__version__ = "0.8.0-10-g15486e6"
|
Python
| 0.000004
|
@@ -41,16 +41,16 @@
.0-1
-0
+4
-g
-1
548
-6e6
+f692
%22%0A
|
b66c987170d8d0b6b83d25f4214208b9251c644c
|
Document error handling
|
code/document.py
|
code/document.py
|
from boto.s3.key import Key
import socket
import sys
import xmltodict
from collections import OrderedDict
import gzip
class Document(object):
"""
This class represents a subtitle file.
"""
def __init__(self, key, label):
self.label = label
self.key = key
self.parsed_xml = self.parse_xml()
self.contents = self.extract_sub()
def get_sub(self):
"""Returns subtitle from file if it exists."""
try:
return self.contents['document']['s']
except KeyError:
print sub.keys()
def load_file(self):
if type(self.key) == Key:
filename = 'file.xml'
self.key.get_contents_to_filename('file.xml')
if self.key.name.endswith('.gz'):
return gzip.GzipFile(fileobj=open(filename, 'rb'))
else:
return open(filename,'r')
else:
filename = self.key
if filename.endswith('.gz'):
return gzip.GzipFile(fileobj=open(filename, 'rb'))
else:
return open(filename,'r')
def parse_xml(self):
"""
Loads XML file and converts to OrderedDict
"""
f = self.load_file()
# if self.key.name == 'subtitle_project,data/xml/en/1969/65063/59688.xml.gz':
# print f.read()
# try:
# return xmltodict.parse(f.read())
# except:
# return {}
xml=''
line = f.readline()
while line:
xml += line.strip()
line = f.readline()
try:
xml_dict = xmltodict.parse(xml)
except:
print xml
return
f.close()
return xml_dict
def extract_row(self, row):
"""Returns informations attached to one row of a subtitle.
"""
row_id, times, words = [], [], []
if '@id' in row:
row_id = row['@id']
if 'time' in row:
times = self.flatten_row(row['time'], '@value')
if 'w' in row:
words = self.flatten_row(row['w'], '#text')
return row_id, times, words
def extract_sub(self):
"""
Returns subtitle as a list of triplets (id, timestamps, words).
"""
if 'document' in self.parsed_xml.keys():
doc = self.parsed_xml['document']
else:
return []
sentences = []
if 's' in doc.keys():
for row in doc['s']:
sentences.append(self.extract_row(row))
return sentences
def flatten_row(self, elem, field):
"""Flattens nested dictionaries in the XML file."""
if type(elem) == list:
return [e[field] for e in elem]
elif type(elem) == OrderedDict:
return [elem[field]]
def get_bag_of_words(self):
"""Returns list of all words."""
return [word for id, t, sentence in self.contents for word in sentence]
def parse_nb(self):
"""
Parameters
--------
Returns RDD of LabeledPoint objects to be trained.
"""
return (self.filename, LabeledPoint(self.label, self.vec))
|
Python
| 0.000001
|
@@ -1456,17 +1456,17 @@
urn %7B%7D%0A%0A
-
+#
x
@@ -1474,17 +1474,17 @@
l=''%0A%0A%0A%0A
-
+#
l
@@ -1499,25 +1499,25 @@
readline()%0A%0A
-
+#
while
@@ -1523,17 +1523,17 @@
e line:%0A
-
+#
@@ -1555,16 +1555,37 @@
strip()%0A
+# try:%0A#
@@ -1608,34 +1608,242 @@
dline()%0A
-%0A try:%0A
+# except:%0A# ip = socket.gethostbyname(socket.gethostname())%0A# filename = self.key.name%0A# raise IOError('Unzipping error - IP: %25s, file: %25s' %25 (ip, filename))%0A%0A# try:%0A
@@ -1873,13 +1873,12 @@
rse(
-xml
+f
)%0A
+#
@@ -1881,32 +1881,33 @@
except:%0A
+#
prin
@@ -1912,16 +1912,17 @@
int xml%0A
+#
@@ -2524,47 +2524,34 @@
-if 'document' in self.parsed_xml.keys()
+sentences = %5B%5D%0A try
:%0A
@@ -2595,35 +2595,62 @@
ment'%5D%0A e
-lse
+xcept AttributeError, KeyError
:%0A re
@@ -2654,27 +2654,16 @@
return
-%5B%5D%0A
sentence
@@ -2663,21 +2663,16 @@
entences
- = %5B%5D
%0A%0A
|
7aa5c99682a626eb1a189d29e609bee290451f14
|
Create trace_dir if not exists. Other minor changes.
|
utils/config_import.py
|
utils/config_import.py
|
import yaml
import os
import sqlite3
class DistillerConfig:
def __init__(self, config_file, section):
self.config = read_config(config_file, section)
try:
self.project_name = self.config['name']
self.trace_queue = "%s-trace-queue" % self.project_name
self.trace_results = "%s-trace-results" % self.project_name
self.min_queue = "%s-min-queue" % self.project_name
self.min_results = "%s-min-results" % self.project_name
except KeyError:
raise Exception(" Project name not defined.")
try:
self.operations = self.config['operations']
if len(self.operations) == 0:
raise Exception("You must select atleast one mode of operation.")
except:
raise Exception("You must select atleast one mode of operation.")
try:
self.mode = self.config['filter']['mode']
self.modules = self.config['filter']['modules']
except KeyError:
# Optional arguments
self.mode = None
self.modules = None
if section == "server":
try:
self.db_path = self.config['db_path']
action = None
if os.path.isfile(self.db_path) and ("reduce" in self.operations or "trace" in self.operations):
while action != "R" and action != "A":
action = raw_input("Database Exists! [R]eplace or [A]ppend? ").upper()
if action == "R":
os.remove(self.db_path)
sql = sqlite3.connect(self.db_path)
c = sql.cursor()
c.execute('''CREATE TABLE IF NOT EXISTS modules
(num INTEGER PRIMARY KEY, name TEXT, UNIQUE (name))''')
c.execute('''CREATE TABLE IF NOT EXISTS seeds
(num INTEGER PRIMARY KEY, name TEXT, ublock_cnt, UNIQUE (name))''')
c.execute('''CREATE TABLE IF NOT EXISTS master_lookup
(bblock TEXT PRIMARY KEY)''')
# Results are calculated using the full data set
# Wipe if they exist
c.execute('''DROP TABLE IF EXISTS results''')
c.execute('''CREATE TABLE results (name TEXT PRIMARY KEY, ublock_cnt INT)''')
sql.commit()
except KeyError:
raise Exception("No database path defined.")
try:
self.seed_dir = self.config['seed_dir']
except KeyError:
raise Exception("No seed dir defined.")
try:
self.trace_dir = self.config['trace_dir']
except KeyError:
raise Exception("No trace dir defined.")
try:
if "reduce" in self.operations or "minimize" in self.operations:
self.output_dir = self.config['output_dir']
try:
self.min_dir = os.path.join(self.output_dir, "minimized")
os.makedirs(self.min_dir)
except os.error:
# Ignore if dir already exists
pass
else:
self.output_dir = None
self.min_dir = None
except KeyError:
raise Exception("No output path defined.")
elif section == "client":
try:
self.host = self.config['host']
except KeyError:
raise Exception("No host defined.")
try:
self.drio_path = self.config['drio_path']
if not os.path.isfile(self.drio_path):
raise Exception("Can not find DynamoRio - %s" % self.drio_path)
except KeyError:
raise Exception("No DynamoRio path defined.")
try:
self.target_path = self.config['target_path']
if not os.path.isfile(self.target_path):
raise Exception("Can not find target - %s" % self.target_path)
except KeyError:
raise Exception("No target path defined.")
try:
self.w_time = self.config['wait_time']
except KeyError:
raise Exception("No wait time defined.")
try:
self.m_time = self.config['max_timeout']
except KeyError:
raise Exception("No max timeout defined.")
# Optional args
try:
self.target_args = self.config['target_args']
if self.target_args is None:
self.target_args = ''
except KeyError:
self.target_args = None
try:
self.pre_cmd = self.config['pre_cmd']
except KeyError:
self.pre_cmd = None
try:
self.post_cmd = self.config['post_cmd']
except KeyError:
self.post_cmd = None
def read_config(config_file, section):
sections = ['project', section]
with open(config_file, 'r') as stream:
data = yaml.load(stream)
config = {}
try:
for section in sections:
for k, v in data[section].iteritems():
config[k] = v
except KeyError:
raise Exception(" Unable to find section %s" % section)
return config
|
Python
| 0
|
@@ -2663,24 +2663,30 @@
%22No seed dir
+ectory
defined.%22)%0A
@@ -2757,24 +2757,326 @@
trace_dir'%5D%0A
+ if not os.path.isdir(self.trace_dir):%0A try:%0A os.makedirs(self.trace_dir)%0A except os.error:%0A pass%0A except:%0A raise Exception(%22Could not create trace directory!%22)%0A
@@ -3137,16 +3137,22 @@
race dir
+ectory
defined
@@ -3773,20 +3773,25 @@
output
-path
+directory
defined
|
e31d0a25e15ebba565fc7466a9d307829a75201b
|
Update script to put things in supportfiles/ now and reference supportfiles.tar.bz2 for better autocomplete.
|
autogallery.py
|
autogallery.py
|
#!/usr/bin/env python
import os, sys, getopt #for path navigation and argument handling
import Image #for thumbnailing, Python Image Library
import zipfile, tarfile #for archive reading/writing
import ConfigParser #for parsing config file
try:
opts, args = getopt.getopt(sys.argv[1:], "", ["rex", "regen", "regenall"])
except getopt.GetoptError as e:
print "Argument error: ", e
print " Use --rex to re-extract support files."
sys.exit(2)
rex, regen = 0, 0
for opt, arg in opts:
if opt in ("--rex"):
rex = 1
if opt in ("--regen"):
regen = 1
if opt in ("--regenall"):
rex = 1
regen = 1
if os.path.isfile('template.html') == 0 or rex == 1:
print "extracting supporting files ..."
support = tarfile.open('autogallerystuff.tar.bz2', 'r:bz2')
support.extractall()
support.close()
del support
else:
print "seems files have been extracted already, use --rex to unpack anyway and overwrite"
pwd = os.getcwdu()
dircontents = os.listdir(pwd)
images = []
config = ConfigParser.ConfigParser()
config.readfp(open('config.cfg'))
filetypes = config.get('autogallery', 'filetypes')
filetypes = [element.strip() for element in filetypes.split(',')]
filetypes = tuple(filetypes)
print "finding images in directory ..."
for item in dircontents:
for filetype in filetypes:
if item.find(filetype) != -1:
images.append(item)
else:
pass
print "checking for zip file..."
if os.path.isfile('images.zip') == 0 or regen == 1:
print "zip not found, creating"
zippy = zipfile.ZipFile('images.zip', 'w')
for image in images:
zippy.write(image.encode('CP437'))
zippy.close()
del zippy
print "zip created"
else:
print "zip exists, not recreating"
zipfilesize = os.path.getsize('images.zip')
ziplinks = '<a href="images.zip">zipfile</a> ('+ str(zipfilesize/1024/1024) +' MB)'
print "creating thumbs dir ..."
thumbsdir = str(pwd + '/' + 'thumbs')
try:
os.mkdir(thumbsdir)
except OSError, e:
if e.errno == 17:
print "\nthumbs directory already exists\n"
else:
raise
size = 200, 200
for image in images:
if os.path.isfile(pwd + '/thumbs/' + 's_' + image) == 0 or regen == 1:
print "creating image thumbnail for " + image + " ... \t"
im = Image.open(pwd + '/' + image)
im.thumbnail(size)
im.save(pwd + '/thumbs/' + 's_' + image, "JPEG")
else:
print "thumb exists for " + image + ", not rebuilding"
dirnamelist = pwd.split('/')
dirname = dirnamelist[-1]
del dirnamelist
imagelinks = ''
nofxlinks = ''
for image in images:
imagelinks += config.get('autogallery', 'imagefxlink', vars={'image': image, 'dirname': dirname}) + "\n\t\t"
# imagelinks += '<a href="' + image + '" rel="prettyPhoto['+ dirname +']" title="'+ image +'"><img src="thumbs/s_'+ image +'" /></a>'
title = config.get('autogallery', 'title', vars={'dirname': dirname})
fxtags = config.get('autogallery', 'fxtags')
print "opening and writing files ..."
template = open('template.html', 'r') #open template file
index = open('index.html', 'w') #open index file
templatecontents = template.read()
templatecontents = templatecontents.replace('{DIRNAME}', title)
templatecontents = templatecontents.replace('{ZIPS}', ziplinks)
buildnofx = config.get('autogallery', 'buildnofx')
if buildnofx == "1":
for image in images:
nofxlinks += config.get('autogallery', 'imagenofxlink', vars={'image': image, 'dirname': dirname}) + "\n\t\t"
nofx = open('nofx.html', 'w') #open effectless index file
nofxcontents = templatecontents.replace('{FXTAGS}', fxtags)
nofxcontents = nofxcontents.replace('{IMAGECODE}', nofxlinks)
nofx.write(nofxcontents)
del nofxcontents
templatecontents = templatecontents.replace('{FXTAGS}', fxtags)
templatecontents = templatecontents.replace('{IMAGECODE}', imagelinks)
index.write(templatecontents)
|
Python
| 0
|
@@ -14,16 +14,40 @@
v python
+%0A# -*- coding: utf-8 -*-
%0A%0Aimport
@@ -641,16 +641,29 @@
isfile('
+supportfiles/
template
@@ -758,24 +758,20 @@
en('
-autogallerystuff
+supportfiles
.tar
@@ -1057,16 +1057,29 @@
p(open('
+supportfiles/
config.c
@@ -2923,16 +2923,29 @@
= open('
+supportfiles/
template
|
68eaa885e15b98bc05376d9ddca6926258be2c46
|
make header fields with dates (e.g. last-modified) comparable
|
httoop/header/conditional.py
|
httoop/header/conditional.py
|
# -*- coding: utf-8 -*-
from httoop.header.element import HeaderElement
class ETag(HeaderElement):
pass
class LastModified(HeaderElement):
__name__ = 'Last-Modified'
class IfMatch(HeaderElement):
__name__ = 'If-Match'
class IfModifiedSince(HeaderElement):
__name__ = 'If-Modified-Since'
class IfNoneMatch(HeaderElement):
__name__ = 'If-None-Match'
class IfUnmodifiedSince(HeaderElement):
__name__ = 'If-Unmodified-Since'
|
Python
| 0
|
@@ -68,16 +68,142 @@
ement%0A%0A%0A
+class _DateComparable(object):%0A%09from httoop.date import Date%0A%09def sanitize(self):%0A%09%09self.value = self.Date.parse(self.value)%0A%0A
class ET
@@ -248,16 +248,33 @@
odified(
+_DateComparable,
HeaderEl
@@ -380,32 +380,49 @@
IfModifiedSince(
+_DateComparable,
HeaderElement):%0A
@@ -543,16 +543,33 @@
edSince(
+_DateComparable,
HeaderEl
|
683842330c3a6fa8974d141fc4c3095a8c095510
|
add total variation loss to summary
|
examples/style-transfer/style_transfer.py
|
examples/style-transfer/style_transfer.py
|
# -*- coding: utf-8 -*-
import argparse
import os
import sys
import numpy as np
from scipy.misc import imread, imresize, imsave
import tensorflow as tf
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)))
import tfmodel
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("--content", type=str, default="img/tensorflow_logo.png")
parser.add_argument("--style", type=str, default="img/chouju_sumou.jpg")
parser.add_argument("--output_dir", type=str, default="outputs")
parser.add_argument("--content_weight", type=float, default=0.1)
parser.add_argument("--style_weight", type=float, default=0.9)
parser.add_argument("--tv_weight", type=float, default=0.001)
parser.add_argument("--iterations", type=int, default=1000)
parser.add_argument("--learning_rate", type=float, default=2e0)
args, unknown_args = parser.parse_known_args()
CONTENT = args.content
STYLE = args.style
OUTPUT_DIR = args.output_dir
CONTENT_WEIGHT = args.content_weight
STYLE_WEIGHT = args.style_weight
TV_WEIGHT = args.tv_weight
LEARNING_RATE = args.learning_rate
ITERATIONS = args.iterations
content_img = np.array([imresize(imread(CONTENT, mode="RGB"), [224, 224])], dtype=np.float32)
style_img = np.array([imresize(imread("img/chouju_sumou.jpg", mode="RGB"), [224, 224])], dtype=np.float32)
# Compute target content and target style
with tf.Graph().as_default() as g1:
img_ph = tf.placeholder(tf.float32, [1, 224, 224, 3])
net = tfmodel.vgg.Vgg16(img_tensor=img_ph)
content_layer_tensors = [net.h_conv4_2, net.h_conv5_2]
style_layer_tensors = [net.h_conv1_1, net.h_conv2_1, net.h_conv3_1, net.h_conv4_1, net.h_conv5_1]
with tf.Session() as sess:
net.restore_pretrained_variables(session=sess)
content_layers = sess.run(content_layer_tensors, feed_dict={img_ph: content_img})
style_layers = sess.run(style_layer_tensors, feed_dict={img_ph: style_img})
with tf.Graph().as_default() as g2:
img_tensor = tf.Variable(tf.random_normal([1, 224, 224, 3], stddev=0.256))
tf.summary.image("generated_image", img_tensor, max_outputs=100)
tf.summary.image("content", content_img)
tf.summary.image("style", style_img)
net = tfmodel.vgg.Vgg16(img_tensor=img_tensor, trainable=False)
content_layer_tensors = [net.h_conv4_2, net.h_conv5_2]
style_layer_tensors = [net.h_conv1_1, net.h_conv2_1, net.h_conv3_1, net.h_conv4_1, net.h_conv5_1]
# Build content loss
with tf.name_scope("content_loss"):
content_losses = []
for i in range(len(content_layers)):
content_losses.append(tf.reduce_mean(tf.squared_difference(content_layer_tensors[i], content_layers[i])))
content_loss = tf.reduce_sum(content_losses) * tf.constant(CONTENT_WEIGHT, name="content_weight")
tf.summary.scalar("content_loss", content_loss)
# Build style loss
with tf.name_scope("style_loss"):
style_losses = []
for i in range(len(style_layers)):
# Compute target gram matrix
features = np.reshape(style_layers[i], (-1, style_layers[i].shape[3]))
style_gram = np.matmul(features.T, features) / features.size
# Build style tensor
_, height, width, number = map(lambda x: x.value, style_layer_tensors[i].get_shape())
size = height * width * number
feats = tf.reshape(style_layer_tensors[i], (-1, number))
gram = tf.matmul(tf.transpose(feats), feats) / size
style_losses.append(tf.nn.l2_loss(gram - style_gram) / size)
style_loss = tf.reduce_sum(style_losses) * tf.constant(STYLE_WEIGHT, name="style_weight")
tf.summary.scalar("style_loss", style_loss)
# Build total variation loss
with tf.name_scope("total_variation_loss"):
h = img_tensor.get_shape()[1].value
w = img_tensor.get_shape()[2].value
tv_loss = tf.reduce_mean([
tf.nn.l2_loss(img_tensor[:, 1:, :, :] - img_tensor[:, :w-1, :, :]),
tf.nn.l2_loss(img_tensor[:, :, 1:, :] - img_tensor[:, :, :w-1, :])
]) * tf.constant(TV_WEIGHT, name="tv_weight")
# Build total loss
with tf.name_scope("total_loss"):
total_loss = content_loss + style_loss + tv_loss
tf.summary.scalar("total_loss", total_loss)
optim = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE).minimize(total_loss)
init_op = tf.global_variables_initializer()
summary_writer = tf.summary.FileWriter("summary/neuralstyle", graph=g2)
merged = tf.summary.merge_all()
with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess:
sess.run(init_op)
net.restore_pretrained_variables(session=sess)
res = sess.run(content_layer_tensors)
var = sess.run(img_tensor)
for i in range(ITERATIONS):
if i % 20 == 0:
if not os.path.exists(OUTPUT_DIR):
os.mkdir(OUTPUT_DIR)
imsave(os.path.join(OUTPUT_DIR, "output-{}.jpg".format(i)), sess.run(img_tensor)[0])
summary = sess.run(merged)
summary_writer.add_summary(summary, i)
_, t, c, s, tv = sess.run([optim, total_loss, content_loss, style_loss, tv_loss])
print(
"Iter: {0} TotalLoss: {1} ContentLoss: {2} StyleLoss: {3} TotalVariationLoss: {4}".format(
i, t, c, s, tv
)
)
|
Python
| 0.000009
|
@@ -773,17 +773,17 @@
default=
-1
+3
000)%0Apar
@@ -4124,16 +4124,75 @@
weight%22)
+%0A tf.summary.scalar(%22total_variation_loss%22, tv_loss)
%0A%0A #
|
82bc502cf7bb64236feba6e140d98bb9e555f4ca
|
Fix assert_raises for catching parents of exceptions.
|
tests/backport_assert_raises.py
|
tests/backport_assert_raises.py
|
from __future__ import unicode_literals
"""
Patch courtesy of:
https://marmida.com/blog/index.php/2012/08/08/monkey-patching-assert_raises/
"""
# code for monkey-patching
import nose.tools
# let's fix nose.tools.assert_raises (which is really unittest.assertRaises)
# so that it always supports context management
# in order for these changes to be available to other modules, you'll need
# to guarantee this module is imported by your fixture before either nose or
# unittest are imported
try:
nose.tools.assert_raises(Exception)
except TypeError:
# this version of assert_raises doesn't support the 1-arg version
class AssertRaisesContext(object):
def __init__(self, expected):
self.expected = expected
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, tb):
self.exception = exc_val
nose.tools.assert_equal(exc_type, self.expected)
# if you get to this line, the last assertion must have passed
# suppress the propagation of this exception
return True
def assert_raises_context(exc_type):
return AssertRaisesContext(exc_type)
nose.tools.assert_raises = assert_raises_context
|
Python
| 0
|
@@ -877,16 +877,96 @@
exc_val%0A
+ if issubclass(exc_type, self.expected):%0A return True%0A
|
bea4752dea1e7f01257b38faef9e21ba0e946983
|
Implement psutil within blackbox tests
|
tests/blackbox/testlib/utils.py
|
tests/blackbox/testlib/utils.py
|
# Copyright 2019 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Utility functions for blackbox testing.
"""
# isort: STDLIB
import os
import random
import string
from subprocess import PIPE, Popen
# Name prefix, so that we hopefully don't destroy any end user data by mistake!
TEST_PREF = os.getenv("STRATIS_UT_PREFIX", "STRATI$_DE$TROY_ME!_")
def p_n():
"""
Return a random pool name
:return: Random String
"""
return TEST_PREF + "pool" + random_string()
def fs_n():
"""
Return a random FS name
:return: Random String
"""
return TEST_PREF + "fs" + random_string()
def random_string(length=4):
"""
Generates a random string
:param length: Length of random string
:return: String
"""
return "{0}".format(
"".join(random.choice(string.ascii_uppercase) for _ in range(length))
)
def process_exists(name):
"""
Walk the process table looking for executable 'name', returns pid if one
found, else return None
"""
for pid in [pid for pid in os.listdir("/proc") if pid.isdigit()]:
try:
exe_name = os.readlink(os.path.join("/proc/", pid, "exe"))
except OSError:
continue
if exe_name and exe_name.endswith(os.path.join("/", name)):
return pid
return None
def umount_mdv():
"""
Locate and umount any stratis mdv mounts
:return: None
"""
with open("/proc/self/mounts", "r") as mounts:
for line in mounts.readlines():
if "/stratis/.mdv-" in line:
mountpoint = line.split()[1]
exec_command(["umount", mountpoint])
def exec_command(cmd):
"""
Executes the specified infrastructure command.
:param cmd: command to execute
:type cmd: list of str
:returns: standard output
:rtype: str
:raises AssertionError: if exit code is non-zero
"""
exit_code, stdout_text, stderr_text = exec_test_command(cmd)
expected_exit_code = 0
if expected_exit_code != exit_code:
print("cmd = %s [%d != %d]" % (str(cmd), expected_exit_code, exit_code))
print("STDOUT= %s" % stdout_text)
print("STDERR= %s" % stderr_text)
assert expected_exit_code == exit_code
return stdout_text
def exec_test_command(cmd):
"""
Executes the specified test command
:param cmd: Command and arguments as list
:type cmd: list of str
:returns: (exit code, std out text, std err text)
:rtype: triple of int * str * str
"""
process = Popen(cmd, stdout=PIPE, stderr=PIPE, close_fds=True, env=os.environ)
result = process.communicate()
return (
process.returncode,
bytes(result[0]).decode("utf-8"),
bytes(result[1]).decode("utf-8"),
)
|
Python
| 0.000002
|
@@ -708,16 +708,51 @@
Popen%0A%0A
+# isort: THIRDPARTY%0Aimport psutil%0A%0A
# Name p
@@ -1448,24 +1448,28 @@
-Wal
+Loo
k th
-e
+rough
process
tab
@@ -1468,366 +1468,408 @@
cess
- table looking for executable 'name', returns pid if one%0A found, else return None%0A %22%22%22%0A for pid in %5Bpid for pid in os.listdir(%22/
+es, using their pids, to find one matching 'name'.%0A Return None if no such process found, else return the pid.%0A :param name: name of process to check%0A :type name: str%0A :return: pid or None%0A :rtype: int or NoneType%0A %22%22%22%0A for
proc
-%22)
i
-f pid.isdigit()%5D:%0A try:%0A exe_name = os.readlink(os.path.join(%22/proc/%22, pid, %22exe%22))%0A except OSError:%0A continue%0A if exe_name and exe_name.endswith(os.path.join(%22/%22, name))
+n psutil.process_iter(%5B%22name%22%5D):%0A try:%0A if proc.name() == name:%0A return proc.pid%0A except psutil.NoSuchProcess
:%0A
@@ -1882,18 +1882,13 @@
-return pid
+pass%0A
%0A
|
fc94bda4cb840b74fbd1226d69bf0aafc5e16e61
|
return when not installed (#283)
|
pwndbg/commands/rop.py
|
pwndbg/commands/rop.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import re
import subprocess
import tempfile
import gdb
import pwndbg.commands
import pwndbg.vmmap
parser = argparse.ArgumentParser(description="Dump ROP gadgets with Jon Salwan's ROPgadget tool.",
epilog="Example: rop --grep 'pop rdi' -- --nojop")
parser.add_argument('--grep', type=str,
help='String to grep the output for')
parser.add_argument('argument', nargs='*', type=str,
help='Arguments to pass to ROPgadget')
@pwndbg.commands.ArgparsedCommand(parser)
@pwndbg.commands.OnlyWithFile
def rop(grep, argument):
with tempfile.NamedTemporaryFile() as corefile:
# If the process is running, dump a corefile so we get actual addresses.
if pwndbg.proc.alive:
filename = corefile.name
gdb.execute('gcore %s' % filename)
else:
filename = pwndbg.proc.exe
# Build up the command line to run
cmd = ['ROPgadget',
'--binary',
filename]
cmd += argument
try:
io = subprocess.Popen(cmd, stdout=subprocess.PIPE)
except Exception:
print("Could not run ROPgadget. Please ensure it's installed and in $PATH.")
(stdout, stderr) = io.communicate()
stdout = stdout.decode('latin-1')
if not grep:
print(stdout)
return
for line in stdout.splitlines():
if re.search(grep, line):
print(line)
@pwndbg.commands.Command
def ropgadget(*a):
return rop(*a)
|
Python
| 0
|
@@ -1438,16 +1438,35 @@
$PATH.%22)
+%0A return
%0A%0A
|
0a5b7c606a711307bdc41179cf94c0a72c15ee92
|
Make BaseCommandTest automatically instantiate commands using decoration magic.
|
hypebot/commands/hypetest.py
|
hypebot/commands/hypetest.py
|
# Copyright 2019 The Hypebot Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities for testing commands.
This file will be a dependency of all tests within hypebot, but will not be
included in the main binary.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from hypebot import basebot
from hypebot import hypecore
from hypebot.core import params_lib
from hypebot.interfaces import interface_factory
from hypebot.protos import channel_pb2
TEST_CHANNEL = channel_pb2.Channel(
id='#test', name='Test', visibility=channel_pb2.Channel.PUBLIC)
class BaseCommandTestCase(unittest.TestCase):
# Set the default bot params (used by core) to something sane for testing.
BOT_PARAMS = params_lib.MergeParams(basebot.BaseBot.DEFAULT_PARAMS, {
'interface': {
'type': 'CaptureInterface',
},
'storage': {
'type': 'MemStore',
'cached_type': 'MemStore',
},
'execution_mode': {
# This currently sets the command prefix to `!`. We should figure out
# a better long-term solution for the command prefix though since this
# can in theory change other behavior within core, but currently
# should have no other impacts.
'dev': False,
},
'commands': {},
'subscriptions': {},
})
def setUp(self):
super(BaseCommandTestCase, self).setUp()
self.interface = interface_factory.CreateFromParams(
self.BOT_PARAMS.interface)
self.core = hypecore.Core(self.BOT_PARAMS, self.interface)
|
Python
| 0
|
@@ -1200,16 +1200,224 @@
BLIC)%0A%0A%0A
+def ForCommand(command_cls):%0A %22%22%22Decorator to enable setting the command for each test class.%22%22%22%0A%0A def _Internal(test_cls):%0A test_cls._command_cls = command_cls%0A return test_cls%0A%0A return _Internal%0A%0A%0A
class Ba
@@ -2159,16 +2159,499 @@
,%0A %7D)%0A%0A
+ @classmethod%0A def setUpClass(cls):%0A super(BaseCommandTestCase, cls).setUpClass()%0A if not hasattr(cls, '_command_cls'):%0A raise AttributeError(%0A ('%25s is missing command initializer. All BaseCommandTestCases must'%0A ' be decorated with @ForCommand and given the command they are'%0A ' testing. For example:%5Cn%5Cn@ForCommand(simple_commands.HelpCommand'%0A ')%5Cnclass HelpCommandTest(BaseCommandTestCase):%5Cn ...') %25%0A cls.__name__)%0A%0A
def se
@@ -2861,9 +2861,146 @@
erface)%0A
+ # We disable ratelimiting for tests.%0A self.command = self._command_cls(%7B'ratelimit': %7B%0A 'enabled': False%0A %7D%7D, self.core)
%0A
|
6cef7f841fc34321d68e8c85ff7f78682c59eae2
|
Add help and version text; check for IO errors
|
py-chrome-bookmarks.py
|
py-chrome-bookmarks.py
|
#!/usr/bin/python
# py-chrome-bookmarks
#
# A script to convert Google Chrome's bookmarks file to the standard HTML-ish
# format.
#
# (c) Benjamin Esham, 2011. See the accompanying README for this file's
# license and other information.
import json, sys, os, re
# html escaping code from http://wiki.python.org/moin/EscapingHtml
html_escape_table = {
"&": "&",
'"': """,
"'": "'",
">": ">",
"<": "<",
}
def html_escape(text):
return ''.join(html_escape_table.get(c,c) for c in text)
def sanitize(string):
res = ''
string = html_escape(string)
for i in range(len(string)):
if ord(string[i]) > 127:
res += '&#x%x;' % ord(string[i])
else:
res += string[i]
return res
def html_for_node(node):
if 'url' in node:
return html_for_url_node(node)
elif 'children' in node:
return html_for_parent_node(node)
else:
return ''
def html_for_url_node(node):
if not re.match("javascript:", node['url']):
return '<dt><a href="%s">%s</a>\n' % (sanitize(node['url']), sanitize(node['name']))
else:
return ''
def html_for_parent_node(node):
return '<dt><h3>%s</h3>\n<dl><p>%s</dl><p>\n' % (sanitize(node['name']),
''.join([html_for_node(n) for n in node['children']]))
in_file = os.path.expanduser(sys.argv[1])
out_file = os.path.expanduser(sys.argv[2])
f = open(in_file, 'r')
j = json.loads(f.read())
f.close()
out = open(out_file, 'w')
out.write("""<!DOCTYPE NETSCAPE-Bookmark-file-1>
<meta http-equiv='Content-Type' content='text/html; charset=UTF-8' />
<title>Bookmarks</title>
<h1>Bookmarks</h1>
<dl><p>
<dl>%(bookmark_bar)s</dl>
<dl>%(other)s</dl>
"""
% {'bookmark_bar': html_for_node(j['roots']['bookmark_bar']),
'other': html_for_node(j['roots']['other'])})
out.close()
|
Python
| 0
|
@@ -1214,16 +1214,764 @@
n'%5D%5D))%0A%0A
+def version_text():%0A%09old_out = sys.stdout%0A%09sys.stdout = sys.stderr%0A%0A%09print %22py-chrome-bookmarks%22%0A%09print %22(c) 2011, Benjamin Esham%22%0A%09print %22https://github.com/bdesham/py-chrome-bookmarks%22%0A%0A%09sys.stdout = old_out%0A%0Adef help_text():%0A%09version_text()%0A%0A%09old_out = sys.stdout%0A%09sys.stdout = sys.stderr%0A%0A%09print%0A%09print %22usage: python py-chrome-bookmarks input-file output-file%22%0A%09print %22 input-file is the Chrome bookmarks file%22%0A%09print %22 output-file is the destination for the generated HTML bookmarks file%22%0A%0A%09sys.stdout = old_out%0A%0A# check for help or version requests%0A%0Aif len(sys.argv) != 3 or %22-h%22 in sys.argv or %22--help%22 in sys.argv:%0A%09help_text()%0A%09exit()%0A%0Aif %22-v%22 in sys.argv or %22--version%22 in sys.argv:%0A%09version_text()%0A%09exit()%0A%0A# the actual code here...%0A%0A
in_file
@@ -2048,16 +2048,22 @@
gv%5B2%5D)%0A%0A
+try:%0A%09
f = open
@@ -2077,16 +2077,143 @@
e, 'r')%0A
+except IOError, e:%0A%09print %3E%3E sys.stderr, %22py-chrome-bookmarks: error opening the input file.%22%0A%09print %3E%3E sys.stderr, e%0A%09exit()%0A%0A
j = json
@@ -2240,16 +2240,22 @@
lose()%0A%0A
+try:%0A%09
out = op
@@ -2271,16 +2271,143 @@
le, 'w')
+%0Aexcept IOError, e:%0A%09print %3E%3E sys.stderr, %22py-chrome-bookmarks: error opening the output file.%22%0A%09print %3E%3E sys.stderr, e%0A%09exit()
%0A%0Aout.wr
|
98467f55ef8526d343065da7d6a896b16539fa53
|
use consistent hash for etag
|
http_agent/utils/etag.py
|
http_agent/utils/etag.py
|
def make_entity_tag(body):
checksum = hash(body) + (1 << 64)
return '"{checksum}"'.format(checksum=checksum)
|
Python
| 0.000001
|
@@ -1,8 +1,35 @@
+from zlib import adler32%0A%0A%0A
def make
@@ -66,29 +66,29 @@
m =
-hash(body) + (1 %3C%3C 64
+adler32(body.encode()
)%0A
|
82f6a4cf6e1e5ceef2c48811eceb93e8a7ce13e3
|
Add handler for demo.
|
filestore/file_readers.py
|
filestore/file_readers.py
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from .retrieve import HandlerBase
import six
import logging
import h5py
import numpy as np
import os.path
logger = logging.getLogger(__name__)
class _HdfMapsHandlerBase(HandlerBase):
"""
Reader for XRF data stored in hdf5 files.
The data set is assumed to be in a group called MAPS and stored
as a 3D array ordered [energy, x, y].
Parameters
----------
filename : str
Path to physical location of file
dset_path : str
The path to the dataset inside of 'MAPS'
"""
def __init__(self, filename, dset_path):
self._filename = filename
self._dset_path = dset_path
self._file = None
self._dset = None
self.open()
def open(self):
"""
Open the file for reading.
Provided as a stand alone function to allow re-opening of the handler
"""
if self._file:
return
self._file = h5py.File(self._filename, mode='r')
self._dset = self._file['/'.join(['MAPS', self._dset_path])]
def close(self):
"""
Close the underlying file
"""
super(_HdfMapsHandlerBase, self).close()
self._file.close()
def __call__(self):
if not self._file:
raise RuntimeError("File is not open")
class HDFMapsSpectrumHandler(_HdfMapsHandlerBase):
def __call__(self, x, y):
"""
Return the spectrum at the x, y position
Parameters
----------
x : int
raster index in the x direction
y : int
raster index in the y direction
Returns
-------
spectrum : ndarray
The MCA channels
"""
super(HDFMapsSpectrumHandler, self).__call__()
return self._dset[:, x, y]
class HDFMapsEnergyHandler(_HdfMapsHandlerBase):
def __call__(self, e_index):
"""
Return the raster plane at a fixed energy
Parameters
----------
e_index : int
The index of the engery
Returns
-------
plane : ndarray
The raster image at a fixed energy.
"""
super(HDFMapsEnergyHandler, self).__call__()
return self._dset[e_index, :, :]
class NpyHandler(HandlerBase):
"""
Class to deal with reading npy files
Parameters
----------
fpath : str
Path to file
mmap_mode : {'r', 'r+', c}, optional
memmap mode to use to open file
"""
def __init__(self, filename, mmap_mode=None):
self._mmap_mode = mmap_mode
if not os.path.exists(filename):
raise IOError("the requested file {fpath} does not exst")
self._fpath = filename
def __call__(self):
return np.load(self._fpath, self._mmap_mode)
|
Python
| 0
|
@@ -245,24 +245,774 @@
__name__)%0A%0A%0A
+class _HDF5HandlerBase(HandlerBase):%0A%0A def open(self):%0A if self._file:%0A return%0A self._file = h5py.File(self._filename)%0A%0A def close(self):%0A super(HDF5HandlerBase, self).close()%0A self._file.close()%0A%0A%0Aclass HDF5DatasetSliceHandler(_HDF5HandlerBase):%0A %22Handler for Stuart's first detector demo%22%0A def __init__(self, filename, frame_per_point):%0A self._filename = filename%0A self.open()%0A%0A def __call__(self, point_number):%0A dataset_name = '/entry/data/data'%0A # Don't read out the dataset until it is requested for the first time.%0A if not hasattr(self, '_dataset'):%0A self._dataset = self._file%5Bdataset_name%5D%0A return self._dataset%5Bpoint_number, :, :%5D%0A%0A%0A
class _HdfMa
@@ -1017,32 +1017,37 @@
MapsHandlerBase(
+_HDF5
HandlerBase):%0A
|
ec668c693051f70026360ac2f3bc67ced6c01a21
|
fix little bug
|
src/fb_messenger/test/test_attachements.py
|
src/fb_messenger/test/test_attachements.py
|
import unittest
class FirstTest(unittest.TestCase):
def test_first(self):
self.assertEqual(True, False, 'incorrect types')
|
Python
| 0.000001
|
@@ -108,12 +108,11 @@
ue,
-Fals
+Tru
e, '
|
72fa89b1b6052f71873f25af2feb9f77c64dd3b0
|
use calculation to build the team lists
|
mue/integration_test.py
|
mue/integration_test.py
|
from random import choice
import json
import sys
import pymue
MAX_WAY = 7.8
def way_cost(way_length):
if way_length <= 1:
return way_length * 100
elif way_length < MAX_WAY:
return (way_length * 100) ** 2
return sys.float_info.max
print "read data...."
pref = ""
if len(sys.argv):
pref = "_%s" % sys.argv[1]
with open("teams%s.json" % pref, "r") as f:
team_data = json.load(f)
with open("distances%s.json" % pref, "r") as f:
distance_data = json.load(f)
print "map teams...."
team_map = dict()
team_map_reverse = dict()
for (idx, team) in enumerate(team_data):
team_map[team["id"]] = idx
team_map_reverse[idx] = team["id"]
cnt = len(team_data)
teams = sorted(team_map.values())
print "build distance matrix..."
distance_matrix = pymue.DistanceMatrix(cnt)
for distance_run in distance_data:
for src in distance_run:
for dst in distance_run[src]:
distance_matrix.set_cost(team_map[int(src)],
team_map[int(dst)],
way_cost(distance_run[src][dst]))
calculation = pymue.Calculation(cnt, distance_matrix)
print "calculate best routes...."
cnt_hosts = cnt / 3
seen = pymue.SeenTable(cnt)
def add_meeting(host, guests, iteration_data, meetings):
meeting = {host, guests.first, guests.second}
meetings.append(meeting)
i = 0
best_distance = sys.float_info.max
best_plan = None
def deploy_host(host_idx, current_round, meetings_list, iteration_data, round_data):
"""
:param seen_table: dict of set
"""
if host_idx == cnt_hosts:
if current_round < 2:
new_round = current_round + 1
round_data = calculation.next_round_data(round_data, iteration_data)
iteration_data.clear_round_data()
return deploy_host(0, new_round, meetings_list, iteration_data, round_data)
else:
global i, best_distance, best_plan
i += 1
#print "ENDPOINT", meetings_list
if iteration_data.distance < best_distance:
print "new best (%i)" % i, iteration_data.distance, meetings_list
best_distance = iteration_data.distance
calculation.update_best(best_distance)
best_plan = meetings_list
return
tests = cnt_hosts * 3
actual_host = round_data.hosts[host_idx]
if current_round > 0:
iteration_data.distance = calculation.host_distance(round_data, actual_host) + iteration_data.distance
if iteration_data.distance >= best_distance:
return
possible_guests = calculation.determine_guest_candidates(round_data, iteration_data, actual_host)
if current_round > 0:
tests = cnt_hosts / 3
for candidate in possible_guests[:tests]:
if current_round == 0:
actual_distance = iteration_data.distance
else:
actual_distance = candidate.distance
guests = candidate.guests
new_iteration_data = pymue.IterationData(iteration_data)
new_iteration_data.distance = actual_distance
new_iteration_data.set_station(actual_host, guests.first, guests.second)
actual_meetings = list(meetings_list) # XXX copy.deepcopy(meetings)
meetings = list(actual_meetings[current_round])
actual_meetings[current_round] = meetings
add_meeting(actual_host, guests, new_iteration_data, meetings)
deploy_host(host_idx + 1, current_round, actual_meetings, new_iteration_data, round_data)
def test():
round_data = calculation.initial_round_data()
iteration_data = pymue.IterationData(cnt)
deploy_host(0, 0, [[], [], []], iteration_data, round_data)
print ""
print "======best plan======"
print "1st round:", best_plan[0]
print "2nd round:", best_plan[1]
print "3rd round:", best_plan[2]
test()
print ""
print "teams:", cnt
print "solutions that where calculated:", i
|
Python
| 0
|
@@ -54,16 +54,52 @@
rt pymue
+%0Afrom collections import defaultdict
%0A%0AMAX_WA
@@ -1412,16 +1412,372 @@
ing)%0A%0A%0A%0A
+def generate_plan(round_data, iteration_data):%0A plan = %5B%5D%0A for round_num in range(3):%0A round_set = defaultdict(set)%0A for (team_id, station) in enumerate(calculation.round_stations(pymue.Round(round_num), round_data, iteration_data)):%0A round_set%5Bstation%5D.add(team_id)%0A plan.append(round_set.values())%0A return plan%0A%0A%0A
%0Ai = 0%0Ab
@@ -2532,29 +2532,57 @@
stance,
-meetings_list
+generate_plan(round_data, iteration_data)
%0A
|
a7bfebe4bc1cdeca333675ccdfd19e385dbc864b
|
improve leak checker flag description
|
jax/config.py
|
jax/config.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
def bool_env(varname: str, default: bool) -> bool:
"""Read an environment variable and interpret it as a boolean.
True values are (case insensitive): 'y', 'yes', 't', 'true', 'on', and '1';
false values are 'n', 'no', 'f', 'false', 'off', and '0'.
Args:
varname: the name of the variable
default: the default boolean value
Raises: ValueError if the environment variable is anything else.
"""
val = os.getenv(varname, str(default))
val = val.lower()
if val in ('y', 'yes', 't', 'true', 'on', '1'):
return True
elif val in ('n', 'no', 'f', 'false', 'off', '0'):
return False
else:
raise ValueError("invalid truth value %r for environment %r" % (val, varname))
def int_env(varname: str, default: int) -> int:
"""Read an environment variable and interpret it as an integer."""
return int(os.getenv(varname, default))
class Config:
def __init__(self):
self.values = {}
self.meta = {}
self.FLAGS = NameSpace(self.read)
self.use_absl = False
self.omnistaging_enabled = bool_env('JAX_OMNISTAGING', True)
self._omnistaging_disablers = []
def update(self, name, val):
if self.use_absl:
setattr(self.absl_flags.FLAGS, name, val)
else:
self.check_exists(name)
if name not in self.values:
raise Exception("Unrecognized config option: {}".format(name))
self.values[name] = val
def read(self, name):
if self.use_absl:
return getattr(self.absl_flags.FLAGS, name)
else:
self.check_exists(name)
return self.values[name]
def add_option(self, name, default, opt_type, meta_args, meta_kwargs):
if name in self.values:
raise Exception("Config option {} already defined".format(name))
self.values[name] = default
self.meta[name] = (opt_type, meta_args, meta_kwargs)
def check_exists(self, name):
if name not in self.values:
raise AttributeError("Unrecognized config option: {}".format(name))
def DEFINE_bool(self, name, default, *args, **kwargs):
self.add_option(name, default, bool, args, kwargs)
def DEFINE_integer(self, name, default, *args, **kwargs):
self.add_option(name, default, int, args, kwargs)
def DEFINE_string(self, name, default, *args, **kwargs):
self.add_option(name, default, str, args, kwargs)
def DEFINE_enum(self, name, default, *args, **kwargs):
self.add_option(name, default, 'enum', args, kwargs)
def config_with_absl(self):
# Run this before calling `app.run(main)` etc
import absl.flags as absl_FLAGS # noqa: F401
from absl import app, flags as absl_flags
self.use_absl = True
self.absl_flags = absl_flags
absl_defs = { bool: absl_flags.DEFINE_bool,
int: absl_flags.DEFINE_integer,
str: absl_flags.DEFINE_string,
'enum': absl_flags.DEFINE_enum }
for name, val in self.values.items():
flag_type, meta_args, meta_kwargs = self.meta[name]
absl_defs[flag_type](name, val, *meta_args, **meta_kwargs)
app.call_after_init(lambda: self.complete_absl_config(absl_flags))
def complete_absl_config(self, absl_flags):
for name, _ in self.values.items():
self.update(name, getattr(absl_flags.FLAGS, name))
def parse_flags_with_absl(self):
global already_configured_with_absl
if not already_configured_with_absl:
import absl.flags
self.config_with_absl()
absl.flags.FLAGS(sys.argv, known_only=True)
self.complete_absl_config(absl.flags)
already_configured_with_absl = True
if not FLAGS.jax_omnistaging:
self.disable_omnistaging()
def register_omnistaging_disabler(self, disabler):
if self.omnistaging_enabled:
self._omnistaging_disablers.append(disabler)
else:
disabler()
def enable_omnistaging(self):
if not self.omnistaging_enabled:
raise Exception("can't re-enable omnistaging after it's been disabled")
def disable_omnistaging(self):
if self.omnistaging_enabled:
for disabler in self._omnistaging_disablers:
disabler()
self.omnistaging_enabled = False
class NameSpace(object):
def __init__(self, getter):
self._getter = getter
def __getattr__(self, name):
return self._getter(name)
config = Config()
flags = config
FLAGS = flags.FLAGS
already_configured_with_absl = False
flags.DEFINE_bool(
'jax_enable_checks',
bool_env('JAX_ENABLE_CHECKS', False),
help='Turn on invariant checking (core.skip_checks = False)'
)
flags.DEFINE_bool(
'jax_omnistaging',
bool_env('JAX_OMNISTAGING', True),
help='Enable staging based on dynamic context rather than data dependence.'
)
flags.DEFINE_integer(
'jax_tracer_error_num_traceback_frames',
int_env('JAX_TRACER_ERROR_NUM_TRACEBACK_FRAMES', 5),
help='Set the number of stack frames in JAX tracer error messages.'
)
flags.DEFINE_bool(
'jax_check_tracer_leaks',
bool_env('JAX_CHECK_TRACER_LEAKS', False),
help='Turn on checking for leaked tracers as soon as a trace completes.'
)
|
Python
| 0.000003
|
@@ -5540,32 +5540,33 @@
alse),%0A help=
+(
'Turn on checkin
@@ -5615,12 +5615,153 @@
mpletes.
-'
+ '%0A 'Enabling leak checking may have performance impacts: some caching '%0A 'is disabled, and other overheads may be added.'),
%0A)%0A
|
4979e8e5ee8ac6cb86ab260f44f052b27381eeb6
|
bump version
|
giddy/__init__.py
|
giddy/__init__.py
|
__version__ = "1.2.0"
# __version__ has to be defined in the first line
"""
:mod:`giddy` --- Spatial Dynamics and Mobility
==============================================
"""
from . import directional
from . import ergodic
from . import markov
from . import mobility
from . import rank
from . import util
|
Python
| 0
|
@@ -12,11 +12,11 @@
= %22
-1.2
+2.0
.0%22%0A
|
522906d2842d90722776f898015fde060c967401
|
Update cam.py
|
pyCam/build_0.3/cam.py
|
pyCam/build_0.3/cam.py
|
import cv2
import numpy as np
from twilio.rest import TwilioRestClient
import time
#importing modules ^^
body_cascade = cv2.CascadeClassifier('haarcascade_fullbody.xml')
#importing cascade-classfiers ^^
vc = cv2.VideoCapture(0)
#finding default camera ^^
while -1:
ret, img = vc.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
bodies = body_cascade.detectMultiScale(gray, 1.2, 2)
#converting img frame by frame to suitable type ^^
for (x,y,w,h) in bodies:
cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,171),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = img[y:y+h, x:x+w]
client = TwilioRestClient("AC47b13b617c5806614265237ce06fa110", "e4e74dbdf6719d769422a90225dd8814") #account_sid, auth_token for twilio accaount.
client.messages.create(to="+15122997254", from_="+15125807197", #my number, twilio number
body="Alert: person(s) on property.") #messege
time.sleep(300)
#look for features, draw box on features, sends sms upon sight of features ^^
cv2.imshow('WebDetect',img)
k = cv2.waitKey(30) & 0xff
if k == 27:
vc.release()
cv2.destroyAllWindows()
#shows video feed, ESC key kills program ^^
|
Python
| 0.000001
|
@@ -195,17 +195,16 @@
lassfier
-s
%5E%5E%0A%0Avc
@@ -654,78 +654,82 @@
nt(%22
-AC47b13b617c5806614265237ce06fa110%22, %22e4e74dbdf6719d769422a90225dd8814
+xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx%22, %22xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
%22) #
@@ -851,10 +851,12 @@
%22, #
-my
+user
num
|
10ee1e1fc58153644bb027c0534dc8f80d09e9bc
|
Define font size value as the class constant
|
pyavagen/generators.py
|
pyavagen/generators.py
|
import os
import abc
import math
from random import randint, choice
from PIL import Image, ImageDraw, ImageFilter, ImageFont
from pyavagen.validators import (
TypeValidator,
ColorValidator,
MinValueValidator,
)
from pyavagen.utils import get_random_color
class AvatarField(object):
def __init__(self, validators=None, default=None):
self.validators = validators
self.default = default
def __set_name__(self, owner, name):
self.name = name
def __get__(self, instance, owner):
return instance.__dict__[self.name]
def __set__(self, instance, value):
value = self.get_default() if not value else value
self.run_validators(value)
instance.__dict__[self.name] = value
def get_default(self):
"""
Returns the default value for this field.
"""
if self.default:
if callable(self.default):
return self.default()
return self.default
return None
def run_validators(self, value):
"""
Runs validators for a passed value.
"""
if value and self.validators:
for validator in self.validators:
validator(value, self.name)
class BaseAvatar(metaclass=abc.ABCMeta):
SIZE_MIN = 4
size = AvatarField(
validators=[
TypeValidator(int),
MinValueValidator(SIZE_MIN),
]
)
def __init__(self, size):
self.size = size
self.img = self.get_initial_img()
def get_initial_img(self):
"""
Returns new PIL.Image.Image object for self.img from __init__ method.
"""
return Image.new(
mode='RGB',
size=tuple([self.size]) * 2,
)
@abc.abstractmethod
def generate(self):
"""
Generates an image and must returns the PIL.Image.Image object.
"""
pass
class SquareAvatar(BaseAvatar):
BLUR_RADIUS_MIN = 0
BLUR_RADIUS_DEFAULT = 1
squares_quantity_on_axis = AvatarField(
validators=[
TypeValidator(int),
MinValueValidator(1),
]
)
blur_radius = AvatarField(
default=BLUR_RADIUS_DEFAULT,
validators=[
TypeValidator(int),
MinValueValidator(BLUR_RADIUS_MIN),
]
)
rotate = AvatarField(
validators=[
TypeValidator(int),
]
)
square_border = AvatarField(
validators=[
TypeValidator(str),
ColorValidator(),
]
)
def __init__(self, squares_quantity_on_axis=None, blur_radius=None,
rotate=None, square_border=None, *args, **kwargs):
super(SquareAvatar, self).__init__(*args, **kwargs)
self.blur_radius = blur_radius
self.square_border = square_border
self.rotate = rotate if rotate else randint(0, 360)
self.squares_quantity_on_axis = (
squares_quantity_on_axis if
squares_quantity_on_axis else
randint(3, 4)
)
def get_initial_img(self):
return Image.new(
mode='RGB',
size=tuple([self.size * 2]) * 2,
)
def generate(self):
draw = ImageDraw.Draw(self.img)
size2x = self.size * 2
square_side_length = size2x // self.squares_quantity_on_axis
for i in range(size2x // square_side_length):
for j in range(size2x // square_side_length):
draw.rectangle(
xy=(
i * square_side_length,
j * square_side_length,
(i + 1) * square_side_length,
(j + 1) * square_side_length
),
outline=self.square_border,
fill=get_random_color())
self.img = self.img.rotate(self.rotate)
distance_a = math.sqrt(2) * self.size / 2
distance_b = size2x - self.size - distance_a
x0 = choice([distance_a, distance_b])
y0 = choice([distance_a, distance_b])
x1 = size2x - (size2x - self.size - x0)
y1 = size2x - (size2x - self.size - y0)
self.img = self.img.crop(box=(x0, y0, x1, y1))
self.img = self.img.filter(ImageFilter.GaussianBlur(self.blur_radius))
return self.img
class CharAvatar(BaseAvatar):
DEFAULT_BACKGROUND_COLOR = get_random_color
DEFAULT_FONT = os.path.join(
os.path.abspath(os.path.dirname(__file__)), 'fonts/Comfortaa-Regular.ttf'
)
string = AvatarField(
validators=[
TypeValidator(str),
]
)
font = AvatarField(
default=DEFAULT_FONT,
validators=[
TypeValidator(str),
]
)
background_color = AvatarField(
default=DEFAULT_BACKGROUND_COLOR,
validators=[
TypeValidator(str),
ColorValidator(),
]
)
font_size = AvatarField(
validators=[
TypeValidator(int),
MinValueValidator(1)
]
)
def __init__(self, string, font=None, background_color=None,
font_size=None, *args, **kwargs):
self.background_color = background_color
super(CharAvatar, self).__init__(*args, **kwargs)
self.font_size = font_size if font_size else int(0.6 * self.size)
self.font = font
self.string = string
def get_initial_img(self):
img = Image.new(
mode='RGB',
size=tuple([self.size]) * 2,
color=self.background_color,
)
return img
def generate(self):
draw = ImageDraw.Draw(self.img)
img_width, img_height = self.img.size
font = ImageFont.truetype(font=self.font, size=self.font_size)
char = self.string[0].upper()
char_width, char_height = font.getsize(char)
char_offset_by_height = font.getoffset(char)[1]
char_position = (
(img_width - char_width) / 2,
((img_height - char_height) / 2) - char_offset_by_height / 2
)
draw.text(xy=char_position, text=char, font=font)
return self.img
class CharSquareAvatar(SquareAvatar, CharAvatar):
def generate(self):
self.img = SquareAvatar.generate(self)
self.img = CharAvatar.generate(self)
return self.img
|
Python
| 0.000527
|
@@ -4570,24 +4570,46 @@
r.ttf'%0A )
+%0A FONT_SIZE_MIN = 1
%0A%0A string
@@ -5105,17 +5105,29 @@
lidator(
-1
+FONT_SIZE_MIN
)%0A
|
64c08dfc40240c7b1b4b876b12bdb57ace22d675
|
remove print statement
|
gippy/__init__.py
|
gippy/__init__.py
|
#!/usr/bin/env python
################################################################################
# GIPPY: Geospatial Image Processing library for Python
#
# AUTHOR: Matthew Hanson
# EMAIL: matt.a.hanson@gmail.com
#
# Copyright (C) 2015 Applied Geosolutions
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
from .version import __version__
def mac_update():
""" update search path on mac """
import sys
print 'here'
if sys.platform == 'darwin':
import os
from subprocess import check_output
lib = 'libgip.so'
path = os.path.dirname(__file__)
for f in ['_gippy.so', '_algorithms.so']:
fin = os.path.join(path, f)
cmd = ['install_name_tool', '-change', lib, os.path.join(path, lib), fin]
print cmd
check_output(cmd)
mac_update()
from gippy import init, DataType, GeoImage, GeoVector, Options
# register GDAL and OGR formats
init()
# cleanup functions
del gippy
del version
del init
del mac_update
|
Python
| 0.999999
|
@@ -1032,25 +1032,8 @@
sys%0A
- print 'here'%0A
|
30f89eacb428af7091d238d39766d6481735c670
|
fix for BASH_FUNC_module on qstat output
|
igf_airflow/hpc/hpc_queue.py
|
igf_airflow/hpc/hpc_queue.py
|
import json
import subprocess
from collections import defaultdict
from tempfile import TemporaryFile
def get_pbspro_job_count(job_name_prefix=''):
'''
A function for fetching running and queued job information from a PBSPro HPC cluster
:param job_name_prefix: A text to filter running jobs, default ''
:returns: A defaultdict object with the following structure
{ job_name: {'Q': counts, 'R': counts }}
'''
try:
with TemporaryFile() as tmp_file:
subprocess.\
check_call(
['qstat','-t','-f','-F','json'],
stdout=tmp_file)
tmp_file.seek(0)
json_data = tmp_file.read()
json_data = json.loads(json_data)
jobs = json_data.get('Jobs')
active_jobs = dict()
if jobs is not None:
active_jobs = defaultdict(lambda: defaultdict(int))
if len(jobs) > 0:
for _,job_data in jobs.items():
job_name = job_data.get('Job_Name')
job_state = job_data.get('job_state')
if job_name.startswith(job_name_prefix):
if job_state == 'Q':
active_jobs[job_name]['Q'] += 1
if job_state == 'R':
active_jobs[job_name]['R'] += 1
return active_jobs
except Exception as e:
raise ValueError('Failed to get job counts from hpc, error: {0}'.format(e))
|
Python
| 0.000001
|
@@ -522,39 +522,139 @@
-%5B
'qstat
-','-t','-f','-F','json'%5D
+ -t -f -F json%7Cgrep -v BASH_FUNC_module', # this can fix or break pipeline as well%0A shell=True
,%0A
|
8998d0f617791f95b1ed6b4a1fffa0f71752b801
|
Update docs/params for initialization methods.
|
pybo/bayesopt/inits.py
|
pybo/bayesopt/inits.py
|
"""
Implementation of methods for sampling initial points.
"""
# future imports
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
# global imports
import numpy as np
# local imports
from ..utils import ldsample
# exported symbols
__all__ = ['init_middle', 'init_uniform', 'init_latin', 'init_sobol']
def init_middle(bounds):
return np.mean(bounds, axis=1)[None, :]
def init_uniform(bounds, rng=None):
n = 3*len(bounds)
X = ldsample.random(bounds, n, rng)
return X
def init_latin(bounds, rng=None):
n = 3*len(bounds)
X = ldsample.latin(bounds, n, rng)
return X
def init_sobol(bounds, rng=None):
n = 3*len(bounds)
X = ldsample.sobol(bounds, n, rng)
return X
|
Python
| 0
|
@@ -383,16 +383,96 @@
ounds):%0A
+ %22%22%22%0A Initialize using a single query in the middle of the space.%0A %22%22%22%0A
retu
@@ -525,32 +525,40 @@
_uniform(bounds,
+ n=None,
rng=None):%0A
@@ -557,33 +557,222 @@
e):%0A
-n = 3*len(bounds)
+%22%22%22%0A Initialize using %60n%60 uniformly distributed query points. If %60n%60 is %60None%60%0A then use 3D points where D is the dimensionality of the input space.%0A %22%22%22%0A n = 3*len(bounds) if (n is None) else n
%0A X =
@@ -833,32 +833,40 @@
it_latin(bounds,
+ n=None,
rng=None):%0A
@@ -865,33 +865,220 @@
e):%0A
-n = 3*len(bounds)
+%22%22%22%0A Initialize using a Latin hypercube design of size %60n%60. If %60n%60 is %60None%60%0A then use 3D points where D is the dimensionality of the input space.%0A %22%22%22%0A n = 3*len(bounds) if (n is None) else n
%0A X =
@@ -1146,16 +1146,24 @@
(bounds,
+ n=None,
rng=Non
@@ -1170,33 +1170,214 @@
e):%0A
-n = 3*len(bounds)
+%22%22%22%0A Initialize using a Sobol sequence of length %60n%60. If %60n%60 is %60None%60 then use%0A 3D points where D is the dimensionality of the input space.%0A %22%22%22%0A n = 3*len(bounds) if (n is None) else n
%0A X =
|
cfc13f7e98062a2eb5a9a96298ebc67ee79d9602
|
Use path for urls
|
src/clarityv2/deductions/admin.py
|
src/clarityv2/deductions/admin.py
|
from django.conf.urls import url
from django.contrib import admin
from django.db.models import Sum
from clarityv2.utils.views.private_media import PrivateMediaView
from .models import Deduction
class DeductionPrivateMediaView(PrivateMediaView):
model = Deduction
permission_required = 'invoices.can_view_invoice'
file_field = 'receipt'
@admin.register(Deduction)
class DeductionAdmin(admin.ModelAdmin):
list_display = ('name', 'date', 'amount')
search_fields = ('name', 'notes')
change_list_template = 'admin/deductions/deduction/change_list.html'
def changelist_view(self, request, extra_context=None):
response = super().changelist_view(request, extra_context=None)
if hasattr(response, 'context_data'):
cl = response.context_data.get('cl')
if cl:
queryset = cl.get_queryset(request)
amount = queryset.aggregate(Sum('amount'))['amount__sum']
response.context_data['total_amount'] = amount
return response
def get_urls(self):
extra = [
url(
r'^(?P<pk>.*)/file/$',
self.admin_site.admin_view(DeductionPrivateMediaView.as_view()),
name='deductions_deduction_receipt'
),
]
return extra + super().get_urls()
|
Python
| 0.000002
|
@@ -9,13 +9,8 @@
ngo.
-conf.
urls
@@ -17,19 +17,20 @@
import
-url
+path
%0Afrom dj
@@ -1085,19 +1085,20 @@
-url
+path
(%0A
@@ -1111,28 +1111,19 @@
-r'%5E(?P
+'
%3Cpk%3E
-.*)
/file/
-$
',%0A
|
a8d7afe076c14115f3282114cecad216e46e7353
|
Update scipy_effects.py
|
pydub/scipy_effects.py
|
pydub/scipy_effects.py
|
"""
This module provides scipy versions of high_pass_filter, and low_pass_filter
as well as an additional band_pass_filter.
Of course, you will need to install scipy for these to work.
When this module is imported the high and low pass filters are used when calling
audio_segment.high_pass_filter() and audio_segment.high_pass_filter() instead
of the slower, less powerful versions provided by pydub.effects.
"""
from scipy.signal import butter, sosfilt
from .utils import register_pydub_effect
def _mk_butter_filter(freq, type, order):
"""
Args:
freq: The cutoff frequency for highpass and lowpass filters. For
band filters, a list of [low_cutoff, high_cutoff]
type: "lowpass", "highpass", or "band"
order: nth order butterworth filter (default: 5th order). The
attenuation is -6dB/octave beyond the cutoff frequency (for 1st
order). A Higher order filter will have more attenuation, each level
adding an additional -6dB (so a 3rd order butterworth filter would
be -18dB/octave).
Returns:
function which can filter a mono audio segment
"""
def filter_fn(seg):
assert seg.channels == 1
nyq = 0.5 * seg.frame_rate
try:
freqs = [f / nyq for f in freq]
except TypeError:
freqs = freq / nyq
sos = butter(order, freqs, btype=type, output='sos')
y = sosfilt(sos, seg.get_array_of_samples())
return seg._spawn(y.astype(seg.array_type).tostring())
return filter_fn
@register_pydub_effect
def band_pass_filter(seg, low_cutoff_freq, high_cutoff_freq, order=5):
filter_fn = _mk_butter_filter([low_cutoff_freq, high_cutoff_freq], 'band', order=order)
return seg.apply_mono_filter_to_each_channel(filter_fn)
@register_pydub_effect
def high_pass_filter(seg, cutoff_freq, order=5):
filter_fn = _mk_butter_filter(cutoff_freq, 'highpass', order=order)
return seg.apply_mono_filter_to_each_channel(filter_fn)
@register_pydub_effect
def low_pass_filter(seg, cutoff_freq, order=5):
filter_fn = _mk_butter_filter(cutoff_freq, 'lowpass', order=order)
return seg.apply_mono_filter_to_each_channel(filter_fn)
|
Python
| 0.000002
|
@@ -239,18 +239,39 @@
filters
-ar
+from this module%0Awill b
e used w
@@ -281,17 +281,17 @@
calling
-%0A
+
audio_se
@@ -319,16 +319,17 @@
r() and
+%0A
audio_se
@@ -360,17 +360,17 @@
instead
-%0A
+
of the s
@@ -399,16 +399,17 @@
ersions
+%0A
provided
|
7508d20bd3d6af0b2e5a886c8ea2f895d9e69935
|
Bump version: 0.2.1 → 0.2.2
|
pyfilemail/__init__.py
|
pyfilemail/__init__.py
|
# -*- coding: utf-8 -*-
__title__ = 'pyfilemail'
__version__ = '0.2.1'
__author__ = 'Daniel Flehner Heen'
__license__ = 'MIT'
__copyright__ = 'Copyright 2016 Daniel Flehner Heen'
import os
import logging
from functools import wraps
import appdirs
# Init logger
logger = logging.getLogger('pyfilemail')
level = os.getenv('PYFILEMAÌL_DEBUG') and logging.DEBUG or logging.INFO
logger.setLevel(level)
# Formatter
format_string = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
formatter = logging.Formatter(format_string)
# File logger setup
datadir = appdirs.user_data_dir(appname='pyfilemail', version=__version__)
if not os.path.exists(datadir):
os.makedirs(datadir)
logfile = os.path.join(datadir, 'pyfilemail.log')
filehandler = logging.FileHandler(logfile)
filehandler.setLevel(level)
filehandler.setFormatter(formatter)
# Stream logger
streamhandler = logging.StreamHandler()
streamhandler.setLevel(logging.WARNING)
# Add handler
logger.addHandler(filehandler)
logger.addHandler(streamhandler)
# Decorator to make sure user is logged in
from errors import FMBaseError
def login_required(f):
"""Decorator function to check if user is loged in.
:raises: :class:`FMBaseError` if not logged in
"""
@wraps(f)
def check_login(cls, *args, **kwargs):
if not cls.logged_in:
raise FMBaseError('Please login to use this method')
return f(cls, *args, **kwargs)
return check_login
from users import User # lint:ok
from transfer import Transfer # lint:ok
|
Python
| 0.000001
|
@@ -62,17 +62,17 @@
= '0.2.
-1
+2
'%0A__auth
|
954043ded2d23072fd55d2a9e10db058959b9921
|
Add docstring to the Boa lexer.
|
pygments/lexers/boa.py
|
pygments/lexers/boa.py
|
# -*- coding: utf-8 -*-
import re
from pygments.lexer import RegexLexer, words
from pygments.token import *
__all__ = ['BoaLexer']
line_re = re.compile('.*?\n')
class BoaLexer(RegexLexer):
"""
http://boa.cs.iastate.edu/docs/
"""
name = 'Boa'
aliases = ['boa']
filenames = ['*.boa']
reserved = words(
('input', 'output', 'of', 'weight', 'before', 'after', 'stop', 'ifall', 'foreach', 'exists', 'function',
'break', 'switch', 'case', 'visitor', 'default', 'return', 'visit', 'while', 'if', 'else'),
suffix=r'\b', prefix=r'\b')
keywords = words(
('bottom', 'collection', 'maximum', 'mean', 'minimum', 'set', 'sum', 'top', 'string', 'int', 'bool', 'float',
'time', 'false', 'true', 'array', 'map', 'stack', 'enum', 'type'), suffix=r'\b', prefix=r'\b')
classes = words(
('Project', 'ForgeKind', 'CodeRepository', 'Revision', 'RepositoryKind', 'ChangedFile', 'FileKind', 'ASTRoot',
'Namespace', 'Declaration', 'Type', 'Method', 'Variable', 'Statement', 'Expression', 'Modifier',
'StatementKind', 'ExpressionKind', 'ModifierKind', 'Visibility', 'TypeKind', 'Person', 'ChangeKind'),
suffix=r'\b', prefix=r'\b')
operators = ('->', ':=', ':', '=', '<<', '!', '++', '||', '&&', '+', '-', '*', ">", "<")
string_sep = ('`', '\"')
built_in_functions = words(
(
# Array functions
'new', 'sort',
# Date & Time functions
'yearof', 'dayofyear', 'hourof', 'minuteof', 'secondof', 'now', 'addday', 'addmonth', 'addweek', 'addyear',
'dayofmonth', 'dayofweek', 'dayofyear', 'formattime', 'trunctoday', 'trunctohour', 'trunctominute',
'trunctomonth', 'trunctosecond', 'trunctoyear',
# Map functions
'clear', 'haskey', 'keys', 'lookup', 'remove', 'values',
# Math functions
'abs', 'acos', 'acosh', 'asin', 'asinh', 'atan', 'atan2', 'atanh', 'ceil', 'cos', 'cosh', 'exp', 'floor',
'highbit', 'isfinite', 'isinf', 'isnan', 'isnormal', 'log', 'log10', 'max', 'min', 'nrand', 'pow', 'rand',
'round', 'sin', 'sinh', 'sqrt', 'tan', 'tanh', 'trunc',
# Other functions
'def', 'hash', 'len',
# Set functions
'add', 'contains', 'remove',
# String functions
'format', 'lowercase', 'match', 'matchposns', 'matchstrs', 'regex', 'split', 'splitall', 'splitn',
'strfind', 'strreplace', 'strrfind', 'substring', 'trim', 'uppercase',
# Type Conversion functions
'bool', 'float', 'int', 'string', 'time',
# Domain-Specific functions
'getast', 'getsnapshot', 'hasfiletype', 'isfixingrevision', 'iskind', 'isliteral',
),
prefix=r'\b',
suffix=r'\(')
tokens = {
'root': [
(r'#.*?$', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(reserved, Keyword.Reserved),
(built_in_functions, Name.Function),
(keywords, Keyword.Type),
(classes, Name.Classes),
(words(operators), Operator),
(r'[][(),;{}\\.]', Punctuation),
(r'"(\\\\|\\"|[^"])*"', String),
(r'`(\\\\|\\`|[^`])*`', String),
(words(string_sep), String.Delimeter),
(r'[a-zA-Z_]+', Name.Variable),
(r'[0-9]+', Number.Integer),
(r'\s+?', Text), # Whitespace
]
}
|
Python
| 0
|
@@ -18,16 +18,234 @@
-8 -*-%0D%0A
+%22%22%22%0D%0A pygments.lexers.boa%0D%0A ~~~~~~~~~~~~~~~~~~~~%0D%0A%0D%0A Lexers for the Boa language.%0D%0A%0D%0A :copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.%0D%0A :license: BSD, see LICENSE for details.%0D%0A%22%22%22%0D%0A%0D%0A
import r
@@ -430,16 +430,36 @@
%22%22%0D%0A
+Lexer for the %60Boa %3C
http://b
@@ -481,16 +481,58 @@
du/docs/
+%3E%60_ language.%0D%0A%0D%0A .. versionadded:: 2.4
%0D%0A %22%22
|
08f9d84cd32d89bec89ee600b70bdbe6e3909070
|
version bump
|
gooey/__init__.py
|
gooey/__init__.py
|
import os
from gooey.python_bindings.gooey_decorator import Gooey
from gooey.python_bindings.gooey_parser import GooeyParser
# from gooey.gui import application
version_file = os.path.join(os.path.dirname(__file__), 'version')
__version__ = '0.8.15.2'
|
Python
| 0.000001
|
@@ -253,10 +253,8 @@
.8.1
-5.2
+6
'%0D%0A
|
dc5a30caf4e2668e0e89e0800787930a3fb40dd1
|
add myself to docs authors in latex
|
pylons/docs/en/conf.py
|
pylons/docs/en/conf.py
|
# -*- coding: utf-8 -*-
#
# Pylons documentation build configuration file, created by
# sphinx-quickstart on Mon Apr 21 20:41:33 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os
# If your extensions are in another directory, add it here.
#sys.path.append('some/directory')
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx']
intersphinx_mapping = {'http://www.sqlalchemy.org/docs/05/': None}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General substitutions.
project = 'Pylons'
copyright = '2008, 2009, Ben Bangert, James Gardner, Philip Jenvey'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents.
#
# The short X.Y version.
version = '0.9.7'
# The full version, including alpha/beta/rc tags.
release = '0.9.7'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
html_style = 'default.css'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Content template for the index page.
#html_index = ''
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_use_modindex = True
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# Output file base name for HTML help builder.
htmlhelp_basename = 'Pylonsdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'Pylons.tex', 'Pylons Reference Documentation',
'Ben Bangert, Graham Higgins, James Gardner', 'manual', 'toctree_only'),
]
# Additional stuff for the LaTeX preamble.
latex_preamble = '''
\usepackage{palatino}
\definecolor{TitleColor}{rgb}{0.7,0,0}
\definecolor{InnerLinkColor}{rgb}{0.7,0,0}
\definecolor{OuterLinkColor}{rgb}{0.8,0,0}
\definecolor{VerbatimColor}{rgb}{0.985,0.985,0.985}
\definecolor{VerbatimBorderColor}{rgb}{0.8,0.8,0.8}
'''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = False
|
Python
| 0
|
@@ -4108,16 +4108,31 @@
Gardner
+, Philip Jenvey
', 'manu
@@ -4135,16 +4135,19 @@
manual',
+%0A
'toctre
|
f7066d6bdd4fefbf517cd8ab44951955bb9f3a2a
|
Fix min/max for None types
|
gpaw/setup/gcc.py
|
gpaw/setup/gcc.py
|
#!/usr/bin/env python3
"""Wrapper for the GNU compiler that converts / removes incompatible
compiler options and allows for file-specific tailoring."""
import sys
from subprocess import call
# Default compiler and options
compiler = 'gcc'
args2change = {}
fragile_files = ['c/xc/tpss.c']
# Default optimisation settings
default_level = 3
default_flags = ['-funroll-loops']
fragile_level = 2
fragile_flags = []
# Puhti (Bull Sequana X1000)
if True:
compiler = 'mpicc'
default_flags += ['-march=cascadelake']
# Sisu (Cray XC40)
if not True:
compiler = 'cc'
default_flags += ['-march=haswell -mtune=haswell -mavx2']
fragile_files += ['c/xc/revtpss.c']
# Taito (HP cluster)
if not True:
compiler = 'mpicc'
default_flags += ['-ffast-math -march=sandybridge -mtune=haswell']
optimise = None # optimisation level 0/1/2/3 (None == default)
debug = False # use -g or not
fragile = False # use special flags for current file?
sandwich = True # use optimisation flag twice (= no override possible)
# process arguments
args = []
for arg in sys.argv[1:]:
arg = arg.strip()
if arg.startswith('-O'):
level = int(arg.replace('-O',''))
if not optimise or level > optimise:
optimise = level
elif arg == '-g':
debug = True
elif arg in args2change:
if args2change[arg]:
args.append(args2change[arg])
else:
if arg in fragile_files:
fragile = True
args.append(arg)
# set default optimisation level and flags
if fragile:
optimise = min(fragile_level, optimise)
flags = fragile_flags
else:
optimise = max(default_level, optimise)
flags = default_flags
# add optimisation level to flags
if optimise is not None:
flags.insert(0, '-O{0}'.format(optimise))
if sandwich:
args.append('-O{0}'.format(optimise))
# make sure -g is always the _first_ flag, so it doesn't mess e.g. with the
# optimisation level
if debug:
flags.insert(0, '-g')
# construct and execute the compile command
cmd = '{0} {1} {2}'.format(compiler, ' '.join(flags), ' '.join(args))
print(cmd)
call(cmd, shell=True)
|
Python
| 0.000072
|
@@ -1540,16 +1540,49 @@
ragile:%0A
+ if optimise is not None:%0A
opti
@@ -1645,24 +1645,57 @@
flags%0Aelse:%0A
+ if optimise is not None:%0A
optimise
|
0b6b7ab518362445f3901f8d3b0d6281e2671c3f
|
Make code python3 compatible
|
drivers/python/setup.py
|
drivers/python/setup.py
|
# Copyright 2010-2012 RethinkDB, all rights reserved.
from setuptools import setup, Extension
from distutils.command.build_ext import build_ext
from distutils.errors import DistutilsPlatformError, CCompilerError, DistutilsExecError
import sys
class build_ext_nofail(build_ext):
# This class can replace the build_ext command with one that does not fail
# when the extension fails to build.
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError, e:
self._failed(e)
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except (CCompilerError, DistutilsExecError), e:
self._failed(e)
else:
try:
import google.protobuf.internal.cpp_message
except ImportError:
print >> sys.stderr, "*** WARNING: The installed protobuf library does not seem to include the C++ extension"
print >> sys.stderr, "*** WARNING: The RethinkDB driver will fallback to using the pure python implementation"
def _failed(self, e):
print >> sys.stderr, "*** WARNING: Unable to compile the C++ extension"
print >> sys.stderr, e
print >> sys.stderr, "*** WARNING: Defaulting to the python implementation"
setup(name="rethinkdb"
,version="1.7.0-2"
,description="This package provides the Python driver library for the RethinkDB database server."
,url="http://rethinkdb.com"
,maintainer="RethinkDB Inc."
,maintainer_email="bugs@rethinkdb.com"
,packages=['rethinkdb']
,install_requires=['protobuf']
,ext_modules=[Extension('rethinkdb_pbcpp', sources=['./rethinkdb/pbcpp.cpp', './rethinkdb/ql2.pb.cc'],
include_dirs=['./rethinkdb'], libraries=['protobuf'])]
,cmdclass={"build_ext":build_ext_nofail}
)
|
Python
| 0.000222
|
@@ -495,17 +495,19 @@
ormError
-,
+ as
e:%0A
@@ -680,17 +680,19 @@
ecError)
-,
+ as
e:%0A
|
7418379d959cba0e96161c9e61f340541b82d85f
|
clean up xor a bit
|
python/examples/xor.py
|
python/examples/xor.py
|
# Copyright Hugh Perkins 2015
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at http://mozilla.org/MPL/2.0/.
from __future__ import print_function
# import sys
# import array
import PyDeepCL
import random
import numpy as np
def go():
print('xor')
random.seed(1)
cl = PyDeepCL.DeepCL()
net = PyDeepCL.NeuralNet(cl, 2, 1)
# net.addLayer(PyDeepCL.InputLayerMaker().numPlanes(2).imageSize(1))
net.addLayer(PyDeepCL.ConvolutionalMaker().numFilters(2).filterSize(1).padZeros().biased())
net.addLayer(PyDeepCL.ActivationMaker().sigmoid())
net.addLayer(PyDeepCL.ConvolutionalMaker().numFilters(2).filterSize(1).padZeros().biased())
net.addLayer(PyDeepCL.ActivationMaker().sigmoid())
# net.addLayer(PyDeepCL.FullyConnectedMaker().numPlanes(2).imageSize(1).biased().relu())
# net.addLayer(PyDeepCL.FullyConnectedMaker().numPlanes(2).imageSize(1).biased().relu())
# net.addLayer( PyDeepCL.FullyConnectedMaker().numPlanes(10).imageSize(1).biased().linear() )
#net.addLayer( PyDeepCL.SquareLossMaker() )
net.addLayer(PyDeepCL.SoftMaxMaker())
print(net.asString())
data = [
{'data': [-1, -1], 'label': 0},
{'data': [1, -1], 'label': 1},
{'data': [-1, 1], 'label': 1},
{'data': [1, 1], 'label': 0}
]
N = len(data)
planes = 2
size = 1
images = np.zeros((N, planes, size, size), dtype=np.float32)
labels = np.zeros((N,), dtype=np.int32)
for n in range(N):
images[n,0,0,0] = data[n]['data'][0]
images[n,1,0,0] = data[n]['data'][1]
labels[n] = data[n]['label']
sgd = PyDeepCL.SGD(cl, 0.1, 0.0)
netLearner = PyDeepCL.NetLearner(
sgd, net,
N, images.reshape((images.size,)), labels,
N, images.reshape((images.size,)), labels,
N)
netLearner.setSchedule(2000)
netLearner.run()
if __name__ == '__main__':
go()
|
Python
| 0
|
@@ -25,9 +25,9 @@
201
-5
+6
%0A#%0A#
@@ -226,16 +226,45 @@
L/2.0/.%0A
+%22%22%22%0ASimple example of xor%0A%22%22%22
%0Afrom __
@@ -297,37 +297,8 @@
tion
-%0A%0A# import sys%0A# import array
%0Aimp
@@ -376,869 +376,8 @@
')%0A%0A
- random.seed(1)%0A cl = PyDeepCL.DeepCL()%0A net = PyDeepCL.NeuralNet(cl, 2, 1)%0A # net.addLayer(PyDeepCL.InputLayerMaker().numPlanes(2).imageSize(1))%0A net.addLayer(PyDeepCL.ConvolutionalMaker().numFilters(2).filterSize(1).padZeros().biased())%0A net.addLayer(PyDeepCL.ActivationMaker().sigmoid())%0A net.addLayer(PyDeepCL.ConvolutionalMaker().numFilters(2).filterSize(1).padZeros().biased())%0A net.addLayer(PyDeepCL.ActivationMaker().sigmoid())%0A # net.addLayer(PyDeepCL.FullyConnectedMaker().numPlanes(2).imageSize(1).biased().relu())%0A # net.addLayer(PyDeepCL.FullyConnectedMaker().numPlanes(2).imageSize(1).biased().relu())%0A # net.addLayer( PyDeepCL.FullyConnectedMaker().numPlanes(10).imageSize(1).biased().linear() )%0A #net.addLayer( PyDeepCL.SquareLossMaker() )%0A net.addLayer(PyDeepCL.SoftMaxMaker())%0A print(net.asString())%0A%0A
@@ -565,16 +565,34 @@
n(data)%0A
+ batchSize = N%0A
plan
@@ -611,16 +611,506 @@
ize = 1%0A
+ learningRate = 0.1%0A numEpochs = 4000%0A%0A cl = PyDeepCL.DeepCL()%0A net = PyDeepCL.NeuralNet(cl, planes, size)%0A net.addLayer(PyDeepCL.ConvolutionalMaker().numFilters(2).filterSize(1).padZeros().biased())%0A net.addLayer(PyDeepCL.ActivationMaker().sigmoid())%0A net.addLayer(PyDeepCL.ConvolutionalMaker().numFilters(2).filterSize(1).padZeros().biased())%0A net.addLayer(PyDeepCL.ActivationMaker().sigmoid())%0A net.addLayer(PyDeepCL.SoftMaxMaker())%0A print(net.asString())%0A%0A
imag
@@ -1245,45 +1245,40 @@
-images%5Bn,0,0,0%5D = data%5Bn%5D%5B'data'%5D%5B0%5D%0A
+for plane in range(planes):%0A
@@ -1290,17 +1290,21 @@
mages%5Bn,
-1
+plane
,0,0%5D =
@@ -1319,17 +1319,21 @@
'data'%5D%5B
-1
+plane
%5D%0A
@@ -1391,19 +1391,28 @@
SGD(cl,
-0.1
+learningRate
, 0.0)%0A
@@ -1576,17 +1576,25 @@
-N
+batchSize
)%0A ne
@@ -1618,12 +1618,17 @@
ule(
-2000
+numEpochs
)%0A
|
9729c3aecccfa8130db7b5942c423c0807726f81
|
Add feature importance bar chart.
|
python/gbdt/_forest.py
|
python/gbdt/_forest.py
|
from libgbdt import Forest as _Forest
class Forest:
def __init__(self, forest):
if type(forest) is str or type(forest) is unicode:
self._forest = _Forest(forest)
elif type(forest) is _Forest:
self._forest = forest
else:
raise TypeError, 'Unsupported forest type: {0}'.format(type(forest))
def predict(self, data_store):
"""Computes prediction scores for data_store."""
return self._forest.predict(data_store._data_store)
def feature_importance(self):
"""Outputs list of feature importances in descending order."""
return self._forest.feature_importance()
def __str__(self):
return self._forest.as_json()
|
Python
| 0
|
@@ -656,16 +656,631 @@
ance()%0A%0A
+ def feature_importance_bar_chart(self, color='blue'):%0A try:%0A from matplotlib import pyplot as plt%0A import numpy%0A except ImportError:%0A raise ImportError('Please install matplotlib and numpy.')%0A%0A fimps = self.feature_importance()%0A importances = %5Bv for _, v in fimps%5D%0A features = %5Bf for f,_ in fimps%5D%0A ind = -numpy.arange(len(fimps))%0A%0A _, ax = plt.subplots()%0A plt.barh(ind, importances, align='center', color=color)%0A ax.set_yticks(ind)%0A ax.set_yticklabels(features)%0A ax.set_xlabel('Feature importance')%0A%0A
def
|
f9a4a4a79896a3864f7c80702292c8bcc1ddd860
|
Make rrdtool support optional
|
python/nagcat/trend.py
|
python/nagcat/trend.py
|
# Copyright 2008-2009 ITA Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RRDTool Trending"""
import os
import re
from twisted.python import logfile
import rrdtool
from nagcat import log, util
_rradir = None
def init(dir):
global _rradir
assert dir
if not os.path.exists(dir):
try:
os.makedirs(dir)
except OSError, ex:
raise util.InitError("Cannot create %s: %s" % (repr(dir), ex))
if not os.path.isdir(dir):
raise util.InitError("%s is not a directory!" % repr(dir))
if not os.access(dir, os.R_OK | os.W_OK | os.X_OK):
raise util.InitError("%s is not readable and/or writeable!" % repr(dir))
_rradir = dir
def Trend(config):
"""Generator for Trend objects, will return either a trending
object or None depending on if trending is enabled.
"""
if _rradir:
return _Trend(config, _rradir)
else:
return None
class _Trend(object):
#: Valid rrdtool data source types
TYPES = ("GAUGE", "COUNTER", "DERIVE", "ABSOLUTE")
#: RRAs required for graphing, (period, interval) in seconds
RRAS = ((1440, 60), # 4 hours of 1 minute intervals
(86400, 300), # 1 day of 5 minute intervals
(604800, 1800), # 7 days of 30 minute intervals
(2678400, 7200), # 31 days of 2 hour intervals
(31622400, 86400)) # 366 days of 1 day intervals
def __init__(self, config, dirname, start=None):
self.conf = config
self.conf.expand()
self.type = self.conf.get('type', "").upper()
self.step = int(self.conf.get('repeat'))
self.alerts = bool(self.conf.get('alerts', False))
self.season = int(util.Interval(self.conf.get('season', '1d')))
self.alpha = float(self.conf.get('alpha', 0.001))
self.beta = float(self.conf.get('beta', 0.0001))
self.gamma = float(self.conf.get('gamma', 0.2))
self.start = start
self.dirname = dirname
self.basename = "%s-%s" % (
re.sub("[^a-z0-9_\.]", "", self.conf['host'].lower()),
re.sub("[^a-z0-9_\.]", "", self.conf['name'].lower()))
self.rrdfile = os.path.join(self.dirname, "%s.rrd" % self.basename)
self.logfile = logfile.LogFile("%s.log" % self.basename, self.dirname,
rotateLength=1024*1024, maxRotatedFiles=2)
# Default to a 1 minute step when repeat is useless
if self.step == 0:
self.step = 60
if self.type not in self.TYPES:
raise util.KnownError("Invalid data source type: %s" % self.type)
if os.path.exists(self.rrdfile):
self.validate()
else:
self.create()
def create(self):
# For now just create archives with the minimal data required
# to generate cacti graphs since that's where the data will be
# displayed. More options can be added later...
log.info("Creating RRA: %s", self.rrdfile)
args = ["--step", str(self.step)]
if self.start:
args += ["--start", str(self.start)]
# Don't allow more than 1 missed update
# TODO: support more than one data source
args.append("DS:default:%s:%d:U:U" % (self.type, self.step*2))
for period, interval in self.RRAS:
if interval < self.step:
continue
steps = interval // self.step
rows = period // steps
args.append("RRA:MAX:0.5:%d:%d" % (steps, rows))
args.append("RRA:AVERAGE:0.5:%d:%d" % (steps, rows))
# The seasonal period is defined in terms of data points.
# Save 5 seasons of data for now, not sure what the best value is...
season_rows = self.season // self.step
record_rows = season_rows * 5
args.append("RRA:HWPREDICT:%d:%f:%f:%d" %
(record_rows, self.alpha, self.beta, season_rows))
rrdtool.create(self.rrdfile, *args)
rrdtool.tune(self.rrdfile, "--gamma", str(self.gamma),
"--gamma-deviation", str(self.gamma))
self.validate()
def validate(self):
info = rrdtool.info(self.rrdfile)
assert info['step'] == self.step
assert info['ds']['default']['type'] == self.type
assert info['ds']['default']['minimal_heartbeat'] == self.step*2
for rra in info['rra']:
if rra['cf'] in ('AVERAGE', 'MAX'):
assert rra['xff'] == 0.5
def update(self, time, value):
try:
float(value)
except ValueError:
# Value is not a number so mark it unknown.
value = "U"
log.debug("Updating %s with %s", self.rrdfile, value)
self.logfile.write("%s %s\n" % (time, value))
rrdtool.update(self.rrdfile, "%s:%s" % (time, value))
|
Python
| 0
|
@@ -661,16 +661,25 @@
logfile%0A
+try:%0A
import r
@@ -684,16 +684,55 @@
rrdtool
+%0Aexcept ImportError:%0A rrdtool = None
%0A%0Afrom n
@@ -805,16 +805,16 @@
_rradir%0A
-
asse
@@ -821,16 +821,118 @@
rt dir%0A%0A
+ if rrdtool is None:%0A raise util.InitError(%22The python module 'rrdtool' is not installed%22)%0A%0A
if n
|
6c036e6ba44793261dea6017091423af3624050c
|
Fix SettingWithCopyWarning
|
pytrthree/dataframe.py
|
pytrthree/dataframe.py
|
import io
import re
from typing import Sequence, Union
import numpy as np
import pandas as pd
import pytz
from . import utils
logger = utils.make_logger('pytrthree')
TRTHFile = Union[str, io.TextIOWrapper]
class TRTHIterator:
"""
Helper class to parse a set of TRTH .csv.gz files
and yield DataFrame grouped by RIC.
"""
def __init__(self, files, chunksize=10 ** 6):
"""
Validates input files and initializes iterator.
:param files: Compressed CSV files downloaded from the TRTH API
:param chunksize: Number of rows to be parsed per iteration.
Higher number causes higher memory usage.
"""
self.files = self._validate_input(files)
self.chunksize = chunksize
self.iter = self.make_next()
def __iter__(self):
return self
def __next__(self):
return next(self.iter)
@staticmethod
def _validate_input(files: Union[TRTHFile, Sequence[TRTHFile]]) -> Sequence[TRTHFile]:
if isinstance(files, (str, io.TextIOWrapper)):
files = [files]
for f in files:
if not isinstance(f, (str, io.TextIOWrapper)):
raise ValueError(f'Invalid input: {f}')
output = []
for file in files:
fname = file.name if isinstance(file, io.TextIOWrapper) else file
try:
_, ftype = utils.parse_rid_type(fname)
except (IndexError, ValueError):
logger.debug(f'Ignoring {fname}')
continue
if ftype in {'confirmation', 'report'}:
logger.debug(f'Ignoring {fname}')
continue
else:
output.append(file)
return sorted(output)
def make_next(self):
"""Iterates over input files and generates single-RIC DataFrames"""
for file in self.files:
lastrow = None
chunks = pd.read_csv(file, iterator=True, chunksize=self.chunksize)
for i, chunk in enumerate(chunks):
fname = file.name if isinstance(file, io.TextIOWrapper) else file
logger.info('{} chunk #{}'.format(fname.split('/')[-1], i+1))
for ric, df in chunk.groupby('#RIC'):
processed_df = self.pre_process(df.copy(), lastrow)
yield (ric, processed_df)
lastrow = None
lastrow = processed_df.iloc[-1]
@staticmethod
def pre_process(df, lastrow=None) -> pd.DataFrame:
"""Generates a unique DateTimeIndex and drops datetime-related columns"""
def find_columns(df, pattern):
try:
return [i for i in df.columns if re.search(pattern, i)][0]
except IndexError:
return None
# Remove characters that cause problems in MongoDB/Pandas (itertuples)
df.columns = [re.sub('\.|-|#', '', col) for col in df.columns]
df = df.dropna(axis=1, how='all')
# Make DateTimexIndex
date_col = find_columns(df, 'Date')
time_col = find_columns(df, 'Time')
gmt_col = find_columns(df, 'GMT')
if time_col:
df.index = pd.to_datetime(df[date_col].astype(str) + ' ' + df[time_col])
df.drop([date_col, time_col], axis=1, inplace=True)
else:
df.index = pd.to_datetime(df[date_col].astype(str))
df.drop(date_col, axis=1, inplace=True)
return df
# Add small offset to repeated timestamps to make timeseries index unique.
offset = pd.DataFrame(df.index).groupby(0).cumcount() * np.timedelta64(1, 'us')
df.index += offset.values
# Make DateTimeIndex timezone-aware
if gmt_col:
assert len(df[gmt_col].value_counts()) == 1
df.index = df.index + pd.Timedelta(hours=df.ix[0, gmt_col])
df.index = df.index.tz_localize(pytz.FixedOffset(9 * 60))
df.drop(gmt_col, axis=1, inplace=True)
else:
df.index = df.index.tz_localize(pytz.timezone('utc'))
# Make sure rows separated by chunks have different timestamps
if lastrow is not None:
if lastrow['RIC'] == df.ix[0, 'RIC'] and lastrow.name == df.index[0]:
logger.debug(f'Adjusting first row timestamp: {df.ix[0, "RIC"]}')
df.index.values[0] += np.timedelta64(1, 'us')
return df
|
Python
| 0
|
@@ -3271,32 +3271,37 @@
ol%5D)%0A
+ df =
df.drop(%5Bdate_c
@@ -3317,38 +3317,24 @@
col%5D, axis=1
-, inplace=True
)%0A el
@@ -3404,32 +3404,37 @@
tr))%0A
+ df =
df.drop(date_co
@@ -3438,38 +3438,24 @@
_col, axis=1
-, inplace=True
)%0A
|
851fe0dad512dbf9888638566135d1f8cd0dd853
|
fix #4036
|
gui/qt/qrtextedit.py
|
gui/qt/qrtextedit.py
|
from electrum.i18n import _
from electrum.plugins import run_hook
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import QFileDialog
from .util import ButtonsTextEdit, MessageBoxMixin, ColorScheme
class ShowQRTextEdit(ButtonsTextEdit):
def __init__(self, text=None):
ButtonsTextEdit.__init__(self, text)
self.setReadOnly(1)
self.addButton(":icons/qrcode.png", self.qr_show, _("Show as QR code"))
run_hook('show_text_edit', self)
def qr_show(self):
from .qrcodewidget import QRDialog
try:
s = str(self.toPlainText())
except:
s = self.toPlainText()
QRDialog(s).exec_()
def contextMenuEvent(self, e):
m = self.createStandardContextMenu()
m.addAction(_("Show as QR code"), self.qr_show)
m.exec_(e.globalPos())
class ScanQRTextEdit(ButtonsTextEdit, MessageBoxMixin):
def __init__(self, text="", allow_multi=False):
ButtonsTextEdit.__init__(self, text)
self.allow_multi = allow_multi
self.setReadOnly(0)
self.addButton(":icons/file.png", self.file_input, _("Read file"))
icon = ":icons/qrcode_white.png" if ColorScheme.dark_scheme else ":icons/qrcode.png"
self.addButton(icon, self.qr_input, _("Read QR code"))
run_hook('scan_text_edit', self)
def file_input(self):
fileName, __ = QFileDialog.getOpenFileName(self, 'select file')
if not fileName:
return
with open(fileName, "r") as f:
data = f.read()
self.setText(data)
def qr_input(self):
from electrum import qrscanner, get_config
try:
data = qrscanner.scan_barcode(get_config().get_video_device())
except BaseException as e:
self.show_error(str(e))
data = ''
if not data:
data = ''
if self.allow_multi:
new_text = self.text() + data + '\n'
else:
new_text = data
self.setText(new_text)
return data
def contextMenuEvent(self, e):
m = self.createStandardContextMenu()
m.addAction(_("Read QR code"), self.qr_input)
m.exec_(e.globalPos())
|
Python
| 0.000001
|
@@ -1495,16 +1495,33 @@
return%0A
+ try:%0A
@@ -1559,24 +1559,28 @@
+
+
data = f.rea
@@ -1583,16 +1583,139 @@
.read()%0A
+ except BaseException as e:%0A self.show_error(_('Error opening file') + ':%5Cn' + str(e))%0A else:%0A
|
50d44ec25eb102451a495dd645ffb2a6f77012ae
|
Add a shortcut for imports
|
queue_util/__init__.py
|
queue_util/__init__.py
|
Python
| 0.000002
|
@@ -0,0 +1,82 @@
+from queue_util.consumer import Consumer%0Afrom queue_util.producer import Producer%0A
|
|
bbc250fa9e961ad273ad90152e42500ae4241890
|
Add hotfix for slow joiner symptom
|
queuing/zmq-simplex.py
|
queuing/zmq-simplex.py
|
# -*- coding: utf8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4 expandtab
'''Hands-on simplex chat implementation.
The module supports command line invocation (any order of invocation):
TerminalA> python3 -m zmq-simplex -s
TerminalB> python3 -m zmq-simplex -p
(TerminalB)> go type stuff
(TerminalA){'id': datetime.datetime(...), 'message': 'go type stuff'}
'''
import zmq
class Simplexer:
'''Rudimentary simplex publisher-subscriber implementation (unsolicited
message exchange).
This is a simple, one-directional chat application without indicators
for publisher initiated line termination. See zmq-one-off-handshake.py
for details.
ØMQ essentials:
Publishing:
zmq.Context().socket(PUB)
socket.bind()
socket.send_pyobj()
Subscription:
zmq.Context().socket(SUB)
socket.connect()
socket.recv_pyobj()
'''
def __init__(self):
'''Initialise ØMQ context and define socket options for publishing and
subscription.
'''
# The zmq_connect() function connects the socket to an endpoint and
# then accepts incoming connections on that endpoint.
#
# https://zeromq.github.io/pyzmq/api/zmq.html#zmq.Context
# http://api.zeromq.org/4-0:zmq-connect
self.context = zmq.Context()
# The endpoint is a string consisting of a transport :// followed by
# an address. The transport specifies the underlying protocol to use.
# The address specifies the transport-specific address to connect to.
# ØMQ provides the the following transports:
# http://api.zeromq.org/4-0:zmq_tcp
# http://api.zeromq.org/4-0:zmq_ipc (local inter-process)
# http://api.zeromq.org/4-0:zmq_inproc (local in-process)
# http://api.zeromq.org/4-0:zmq_pgm (multicast)
self.address = 'tcp://127.0.0.1:5555'
# A socket of type ZMQ_SUB is used by a subscriber to subscribe to
# data distributed by a publisher. Initially a ZMQ_SUB socket is not
# subscribed to any messages, use the ZMQ_SUBSCRIBE option of
# zmq_setsockopt(3) to specify which messages to subscribe to. The
# zmq_send() function is not implemented for this socket type.
#
# http://api.zeromq.org/4-0:zmq-socket#toc10
self.subscribe_sock = zmq.SUB
self.publish_sock = zmq.PUB
# The ZMQ_SUBSCRIBE option shall establish a new message filter on a
# ZMQ_SUB socket. Newly created ZMQ_SUB sockets shall filter out all
# incoming messages, therefore you should call this option to establish
# an initial message filter.
#
# An empty option_value of length zero shall subscribe to all incoming
# messages. A non-empty option_value shall subscribe to all messages
# beginning with the specified prefix. Multiple filters may be
# attached to a single ZMQ_SUB socket, in which case a message shall
# be accepted if it matches at least one filter.
#
# See http://api.zeromq.org/4-0:zmq-setsockopt#toc6
self.subscribe_allmsg = (zmq.SUBSCRIBE, b'')
# Register a simple termination signal
# Note: Signalling in general is a tricky thing so don't expect much
# at this point.
def terminate(signal, frame):
'''Signal callback to terminate the program.'''
print("Terminating")
import sys
self.__del__()
sys.exit() # Apparently this is the only way to abort input()
from signal import signal, SIGINT
signal(SIGINT, terminate)
def __del__(self):
'''Close all open resources on object disposal.'''
if self.context.closed:
return
self.sock.close()
self.context.term()
def publish(self):
'''Create and open sending endpoint of channel. Also read text from
STDIN and publish it to the channel.
Beware that subscribe() and publish() are mutually exclusive, here.
'''
# https://zeromq.github.io/pyzmq/api/zmq.html#zmq.Context.socket
self.sock = self.context.socket(self.publish_sock)
# https://zeromq.github.io/pyzmq/api/zmq.html#zmq.Socket.bind
self.sock.bind(self.address)
print("Publishing on {}. Ctrl-C to abort.".format(self.address))
from datetime import datetime
while True:
text = input("> ")
# Note: Common Python data structures have build-in support
msg = {"topic": "chat", "id": datetime.now(), "message": text}
# https://zeromq.github.io/pyzmq/api/zmq.html#zmq.Socket.send_pyobj
# Send Flags: http://api.zeromq.org/4-0:zmq-send#toc2
self.sock.send_pyobj(msg)
def subscribe(self):
'''Create and open receiving endpoint of channel. Also subscribe to
messages from the channel and write then to STDOUT.
Beware that subscribe() and publish() are mutually exclusive, here.
'''
# https://zeromq.github.io/pyzmq/api/zmq.html#zmq.Context.socket
self.sock = self.context.socket(self.subscribe_sock)
# https://zeromq.github.io/pyzmq/api/zmq.html#zmq.Socket.setsockopt
self.sock.setsockopt(*self.subscribe_allmsg)
# https://zeromq.github.io/pyzmq/api/zmq.html#zmq.Socket.connect
self.sock.connect(self.address)
print("Subscribing to {}. Ctrl-C to abort.".format(self.address))
while True:
# https://zeromq.github.io/pyzmq/api/zmq.html#zmq.Socket.recv_pyobj
# Receive Flags: http://api.zeromq.org/4-0:zmq-recv#toc2
msg = self.sock.recv_pyobj()
print(msg)
# Some trivial CLI
if __name__ == '__main__':
from argparse import ArgumentParser
p = ArgumentParser(
description='send a message through a simplex channel using 0mq'
)
modes = p.add_mutually_exclusive_group(required=True)
modes.add_argument(
'-p', '--publish',
action='store_true',
help='assume publisher position in simplex channel',
)
modes.add_argument(
'-s', '--subscribe',
action='store_true',
help='assume subscriber position in simplex channel',
)
channel = Simplexer()
args = p.parse_args()
if args.publish:
channel.publish()
elif args.subscribe:
channel.subscribe()
|
Python
| 0
|
@@ -4449,32 +4449,188 @@
t(self.address))
+%0A import time%0A time.sleep(1) # HOTFIX for slow joiner symptom%0A # http://zguide.zeromq.org/page:all#sockets-and-patterns
%0A%0A from d
|
325c5a8f407340fa8901f406c301fa8cbdac4ff8
|
bump version to 0.13.0
|
gunicorn/__init__.py
|
gunicorn/__init__.py
|
# -*- coding: utf-8 -
#
# This file is part of gunicorn released under the MIT license.
# See the NOTICE for more information.
version_info = (0, 12, 2)
__version__ = ".".join(map(str, version_info))
SERVER_SOFTWARE = "gunicorn/%s" % __version__
|
Python
| 0
|
@@ -146,12 +146,12 @@
0, 1
-2, 2
+3, 0
)%0A__
|
d35c9bb68d786e06738bbc80f0e79964887cdd1d
|
Revert changing of -, needs to be fixed by the user interface
|
rabbitmq_monitoring.py
|
rabbitmq_monitoring.py
|
#!/usr/bin/env python
"""
This script extracts metrics from a RabbitMQ instance.
The usage of this script is as follows:
rabbitmq_monitoring.py
"""
import json
from time import sleep
import collections
import sys
from os.path import basename
import urllib2
from base64 import b64encode
from string import replace
#
# Maps the API path names to Boundary Metric Identifiers
#
KEY_MAPPING = [
("object_totals_queues", "RABBITMQ_OBJECT_TOTALS_QUEUES"),
("object_totals_channels", "RABBITMQ_OBJECT_TOTALS_CHANNELS"),
("object_totals_exchanges", "RABBITMQ_OBJECT_TOTALS_EXCHANGES"),
("object_totals_consumers", "RABBITMQ_OBJECT_TOTALS_CONSUMERS"),
("object_totals_connections", "RABBITMQ_OBJECT_TOTALS_CONNECTIONS"),
("message_stats_deliver", "RABBITMQ_MESSAGE_STATS_DELIVER"),
("message_stats_deliver_details_rate", "RABBITMQ_MESSAGE_STATS_DELIVER_DETAILS_RATE"),
("message_stats_deliver_no_ack", "RABBITMQ_MESSAGE_STATS_DELIVER_NO_ACK"),
("message_stats_deliver_no_ack_details_rate", "RABBITMQ_MESSAGE_STATS_DELIVER_NO_ACK_DETAILS_RATE"),
("message_stats_deliver_get", "RABBITMQ_MESSAGE_STATS_DELIVER_GET"),
("message_stats_deliver_get_details_rate", "RABBITMQ_MESSAGE_STATS_DELIVER_GET_DETAILS_RATE"),
("message_stats_redeliver", "RABBITMQ_MESSAGE_STATS_REDELIVER"),
("message_stats_redeliver_details_rate", "RABBITMQ_MESSAGE_STATS_REDELIVER_DETAILS_RATE"),
("message_stats_publish", "RABBITMQ_MESSAGE_STATS_PUBLISH"),
("message_stats_publish_details_rate", "RABBITMQ_MESSAGE_STATS_PUBLISH_DETAILS_RATE"),
("queue_totals_messages", "RABBITMQ_QUEUE_TOTALS_MESSAGES"),
("queue_totals_messages_details_rate", "RABBITMQ_QUEUE_TOTALS_MESSAGES_DETAILS_RATE"),
("queue_totals_messages_ready", "RABBITMQ_QUEUE_TOTALS_MESSAGES_READY"),
("queue_totals_messages_ready_details_rate", "RABBITMQ_QUEUE_TOTALS_MESSAGES_READY_DETAILS_RATE"),
("queue_totals_messages_unacknowledged", "RABBITMQ_QUEUE_TOTALS_MESSAGES_UNACKNOWLEDGED"),
("queue_totals_messages_unacknowledged_details_rate","RABBITMQ_QUEUE_TOTALS_MESSAGES_UNACKNOWLEDGED_DETAILS_RATE"),
("mem_used","RABBITMQ_MEM_USED"),
("disk_free","RABBITMQ_DISK_FREE")
]
class RabitMQMonitoring():
def __init__(self):
self.pollInterval = None
self.hostname = None
self.port = None
self.user = None
self.password = None
self.url = None
def send_get(self,url):
response = requests.get(url, auth=(self.user, self.password))
return response.json()
def call_api(self, endpoint):
url = self.url + endpoint
auth = b64encode(self.user + ":" + self.password)
headers = {
"Accept": "application/json",
"Authorization": "Basic %s" % auth,
}
request = urllib2.Request(url,headers=headers)
try:
response = urllib2.urlopen(request)
except urllib2.URLError as e:
sys.stderr.write("Error connecting to host: {0} ({1}), Error: {2}".format(self.host,e.errno,e.message))
raise
except urllib2.HTTPError as e:
sys.stderr.write("Error getting data from AWS Cloud Watch API: %s (%d), Error: %s",
getattr(h, "reason", "Unknown Reason"),h.code, h.read())
raise
return json.load(response)
def print_dict(self, dic):
for (key, value) in KEY_MAPPING:
if dic.get(key,"-") != "-":
name = replace(dic.get("name"),"@",":")
print("%s %s %s" % (value.upper(), dic.get(key, "-"), name))
def get_details(self):
overview = self.call_api("overview")
nodes = self.call_api("nodes")
if nodes:
overview.update(nodes[0])
if overview:
data = self.flatten_dict(overview)
self.print_dict(data)
def flatten_dict(self, dic, parent_key='', sep='_'):
items = []
for k, v in dic.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, collections.MutableMapping):
items.extend(self.flatten_dict(v, new_key, sep).items())
else:
items.append((new_key, v))
return dict(items)
def extractMetrics(self):
self.get_details()
def get_configuration(self):
'''
1) Reads the param.json file that contains the configuration of the plugin.
2) Sets the values to member variables of the class instance.
'''
with open('param.json') as f:
parameters = json.loads(f.read())
self.hostname = parameters['hostname']
self.port = parameters['port']
self.pollInterval = float(parameters['pollInterval'])/1000.0
self.user = parameters['user']
self.password = parameters['password']
self.url = "http://" + self.hostname + ":" + self.port + "/api/"
def continuous_monitoring(self):
while True:
self.get_details()
sleep(float(self.pollInterval))
if __name__ == "__main__":
monitor = RabitMQMonitoring()
monitor.get_configuration()
monitor.continuous_monitoring()
|
Python
| 0
|
@@ -3320,16 +3320,8 @@
e =
-replace(
dic.
@@ -3335,17 +3335,8 @@
me%22)
-,%22@%22,%22:%22)
%0A
|
8368aa5da5382a49d04b0d956503ca3eb6e797ce
|
Add overloading __len__()
|
ds_binary_heap_tuple.py
|
ds_binary_heap_tuple.py
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
class BinaryHeap(object):
"""Binary Min Heap class with (key, val)."""
def __init__(self):
self.heap_ls = [(0, 0)]
self.current_size = 0
def _percolate_up(self, i):
while i // 2 > 0:
if self.heap_ls[i][0] < self.heap_ls[i // 2][0]:
tmp = self.heap_ls[i // 2]
self.heap_ls[i // 2] = self.heap_ls[i]
self.heap_ls[i] = tmp
i = i // 2
def insert(self, new_node):
self.heap_ls.append(new_node)
self.current_size += 1
self._percolate_up(self.current_size)
def _get_min_child(self, i):
if (i * 2 + 1) > self.current_size:
return i * 2
else:
if self.heap_ls[i * 2][0] < self.heap_ls[i * 2 + 1][0]:
return i * 2
else:
return i * 2 + 1
def _percolate_down(self, i):
while (i * 2) <= self.current_size:
min_child = self._get_min_child(i)
if self.heap_ls[i][0] > self.heap_ls[min_child][0]:
tmp = self.heap_ls[i]
self.heap_ls[i] = self.heap_ls[min_child]
self.heap_ls[min_child] = tmp
else:
pass
i = min_child
def find_min(self):
return self.heap_ls[1]
def delete_min(self):
val_del = self.heap_ls[1]
self.heap_ls[1] = self.heap_ls[self.current_size]
self.current_size -= 1
self.heap_ls.pop()
self._percolate_down(1)
return val_del
def is_empty(self):
return self.current_size == 0
def size(self):
return self.current_size
def __contains__(self, vertex):
for vertex_tp in self.heap_ls:
if vertex == vertex_tp[1]:
return True
return False
def build_heap(self, a_list):
# alist: a list of tuples.
self.current_size = len(a_list)
self.heap_ls = [(0, 0)] + a_list[:]
i = len(a_list) // 2
while i > 0:
self._percolate_down(i)
i -= 1
def decrease_key(self, val, new_key):
done_bool = False
i = 1
new_pos = 0
while not done_bool and i <= self.current_size:
if self.heap_ls[i][1] == val:
done_bool = True
new_pos = i
else:
i += 1
if new_pos > 0:
self.heap_ls[new_pos] = (new_key, val)
self._percolate_up(new_pos)
def main():
bh_min = BinaryHeap()
bh_min.insert((1, 'a'))
print('Insert (1, a): {}'.format(bh_min.heap_ls))
bh_min.insert((3, 'c'))
print('Insert (3, c): {}'.format(bh_min.heap_ls))
bh_min.insert((4, 'b'))
print('Insert (4, b): {}'.format(bh_min.heap_ls))
bh_min.insert((5, 'e'))
print('Insert (5, e): {}'.format(bh_min.heap_ls))
bh_min.insert((2, 'd'))
print('Insert (2, d): {}'.format(bh_min.heap_ls))
print('Find min: {}'.format(bh_min.find_min()))
print('Is empty? {}'.format(bh_min.is_empty()))
print('Size? {}'.format(bh_min.size()))
print('Min: {}'.format(bh_min.delete_min()))
print('Delete min: {}'.format(bh_min.heap_ls))
bh_min = BinaryHeap()
bh_min.build_heap([(1, 'a'), (3, 'c'), (2, 'b')])
print('Build heap: {}'.format(bh_min.heap_ls))
print('c in bh_min: {}'.format('c' in bh_min))
print('d in bh_min: {}'.format('d' in bh_min))
if __name__ == '__main__':
main()
|
Python
| 0.000602
|
@@ -1763,16 +1763,73 @@
t_size%0A%0A
+ def __len__(self):%0A return self.current_size%0A%0A
def
@@ -3489,32 +3489,135 @@
_min.heap_ls))%0A%0A
+ print('len by size(): %7B%7D'.format(bh_min.size()))%0A print('len by len(): %7B%7D'.format(len(bh_min)))%0A
print('c in
|
dda25cdb808259ec2a91cba74273dc6c929af0aa
|
Check README.md and CHANGELOG.md.
|
dscan/plugins/drupal.py
|
dscan/plugins/drupal.py
|
from cement.core import handler, controller
from dscan.plugins import BasePlugin
from dscan.common.update_api import GitRepo
import dscan.common.update_api as ua
import dscan.common.versions
class Drupal(BasePlugin):
plugins_base_url = [
"%ssites/all/modules/%s/",
"%ssites/default/modules/%s/",
"%smodules/contrib/%s/",
"%smodules/%s/"]
themes_base_url = [
"%ssites/all/themes/%s/",
"%ssites/default/themes/%s/",
"%sthemes/%s/"]
forbidden_url = "sites/"
regular_file_url = ["misc/drupal.js", 'core/misc/drupal.js']
module_common_file = "LICENSE.txt"
update_majors = ['6','7','8', '9']
interesting_urls = [
("CHANGELOG.txt", "Default changelog file"),
("user/login", "Default admin"),
]
interesting_module_urls = [
('CHANGELOG.txt', 'Changelog file'),
('changelog.txt', 'Changelog file'),
('CHANGELOG.TXT', 'Changelog file'),
('README.txt', 'README file'),
('readme.txt', 'README file'),
('README.TXT', 'README file'),
('LICENSE.txt', 'License file'),
('API.txt', 'Contains API documentation for the module')
]
class Meta:
label = 'drupal'
@controller.expose(help='drupal related scanning tools')
def drupal(self):
self.plugin_init()
def update_version_check(self):
"""
@return: True if new tags have been made in the github repository.
"""
return ua.github_tags_newer('drupal/drupal/', self.versions_file,
update_majors=self.update_majors)
def update_version(self):
"""
@return: updated VersionsFile
"""
gr, versions_file, new_tags = ua.github_repo_new('drupal/drupal/',
'drupal/drupal', self.versions_file, self.update_majors)
hashes = {}
for version in new_tags:
gr.tag_checkout(version)
hashes[version] = gr.hashes_get(versions_file)
versions_file.update(hashes)
return versions_file
def update_plugins_check(self):
return ua.update_modules_check(self)
def update_plugins(self):
"""
@return: (plugins, themes) a tuple which contains two list of
strings, the plugins and the themes.
"""
plugins_url = 'https://drupal.org/project/project_module?page=%s'
plugins_css = '.node-project-module > h2 > a'
themes_url = 'https://drupal.org/project/project_theme?page=%s'
themes_css = '.node-project-theme > h2 > a'
per_page = 25
plugins = []
for elem in ua.modules_get(plugins_url, per_page, plugins_css):
plugins.append(elem['href'].split("/")[-1])
themes = []
for elem in ua.modules_get(themes_url, per_page, themes_css):
themes.append(elem['href'].split("/")[-1])
return plugins, themes
def load(app=None):
handler.register(Drupal)
|
Python
| 0
|
@@ -898,32 +898,76 @@
angelog file'),%0A
+ ('CHANGELOG.md', 'Changelog file'),%0A
('change
@@ -1071,32 +1071,70 @@
'README file'),%0A
+ ('README.md', 'README file'),%0A
('readme
|
58a71e60932a546fee919ce38b78254f885364f3
|
Update density imports
|
gypsy/stand_density_factor.py
|
gypsy/stand_density_factor.py
|
"""Stand density factor estimators"""
import logging
from gypsy.GYPSYNonSpatial import (
densityAw,
densitySw,
densitySb,
densityPl,
)
LOGGER = logging.getLogger(__name__)
def estimate_sdf_aw(spc, site_index, bhage, density):
'''Main purpose of this function is to estimate SDF for the species
:param str spc: species list
:param float site_index: site index of species Aw
:param float bhage: breast height age of speceis Aw
:param float density: density of species Aw
'''
density_est = 0
sdf = 0
if density <= 0 or bhage <= 0 or site_index <= 0:
return density_est, sdf
if spc[0] in ('Aw', 'Bw', 'Pb', 'A', 'H'):
sdf = density # best SDF guess
tolerance = 0.00001
within_tolerance = False
iter_count = 0
while not within_tolerance:
result = densityAw(sdf, bhage, site_index, ret_detail=True)
k1 = result['k1'] #pylint: disable=invalid-name
k2 = result['k2'] #pylint: disable=invalid-name
density_est = result['density']
if abs(density-density_est) < tolerance:
within_tolerance = True
else:
density_est = (density + density_est) / 2
sdf = density_est * k2 / k1
iter_count += 1
if iter_count == 1500:
LOGGER.warning('Slow convergence')
break
return density_est, sdf
def estimate_sdf_sb(spc, site_index, tage, density):
'''Main purpose of this function is to estimate SDF for the species
:param str spc: species list
:param float site_index: site index of species Sb
:param float tage: total age of species Sb
:param float density: density of species Sb
'''
density_est = 0
sdf = 0
if density > 0 and (tage > 0 or site_index > 0):
if spc[0] in ('Sb', 'Lt', 'La', 'Lw', 'L'):
sdf = density # best SDF guess
tolerance = 0.00001
within_tolerance = False
iter_count = 0
while not within_tolerance:
result = densitySb(sdf, tage, site_index, ret_detail=True)
k1 = result['k1'] #pylint: disable=invalid-name
k2 = result['k2'] #pylint: disable=invalid-name
density_est = result['density']
if abs(density-density_est) < tolerance:
within_tolerance = True
else:
density_est = (density + density_est)/2
sdf = density_est * k2/k1
iter_count += 1
if iter_count == 150:
LOGGER.warning('Slow convergence')
break
return density_est, sdf
def estimate_sdf_sw(spc, site_index, tage, sdfaw, density):
'''Main purpose of this function is to estimate SDF for the species
:param str spc: species list
:param float site_index: site index of species Sw
:param float tage: total age of species Sw
:param float sdfaw: Stand Density Factor of species Aw, this parameter indicates that
the density of Sw depends on the density of Aw
:param float density: density of species Sw
'''
density_est = 0
sdf = 0
if density > 0 and (tage > 0 or site_index > 0):
if spc[0] in ('Sw', 'Se', 'Fd', 'Fb', 'Fa'):
sdf = density # best SDF guess
tolerance = 0.00001
within_tolerance = False
iter_count = 0
while not within_tolerance:
result = densitySw(sdf, sdfaw, tage, site_index, ret_detail=True)
k1 = result['k1'] #pylint: disable=invalid-name
k2 = result['k2'] #pylint: disable=invalid-name
density_est = result['density']
if abs(density-density_est) < tolerance:
within_tolerance = True
else:
density_est = (density + density_est)/2
sdf = density_est * k2/k1
iter_count += 1
if iter_count == 150:
LOGGER.warning('Slow convergence')
break
return density_est, sdf
def estimate_sdf_pl(spc, site_index, tage, sdfaw, sdfsw, sdfsb, density):
'''Main purpose of this function is to estimate SDF for the species
:param str spc: species list
:param float site_index: site index of species Pl
:param float tage: total age of species Pl
:param float sdfaw: Stand Density Factor of species Aw
:param float sdfsw: Stand Density Factor of species Sw
:param float sdfpl: Stand Density Factor of species Pl
these parameters SDF above indicate that the density of Pl
depends on the density of all otehr species
:param float density: density of species Pl
'''
density_est = 0
sdf = 0
if density > 0 and (tage > 0 or site_index > 0):
if spc[0] in ('P', 'Pl', 'Pj', 'Pa', 'Pf'):
sdf = density # best SDF guess
tolerance = 0.00001
within_tolerance = False
iter_count = 0
while not within_tolerance:
result = densityPl(sdfaw, sdfsw, sdfsb, sdf,
tage, site_index, ret_detail=True)
k1 = result['k1'] #pylint: disable=invalid-name
k2 = result['k2'] #pylint: disable=invalid-name
density_est = result['density']
if abs(density-density_est) < tolerance:
within_tolerance = True
else:
density_est = (density + density_est)/2
sdf = density_est * k2/k1
iter_count += 1
if iter_count == 150:
LOGGER.warning('Slow convergence')
break
return density_est, sdf
|
Python
| 0
|
@@ -62,23 +62,15 @@
psy.
-GYPSYNonSpatial
+density
imp
@@ -83,61 +83,101 @@
+estimate_
density
-A
+_a
w,%0A
+estimate_
density
-S
+_s
w,%0A
+estimate_
density
-S
+_s
b,%0A
+estimate_
density
-P
+_p
l,%0A)
@@ -896,16 +896,26 @@
t =
+estimate_
density
-A
+_a
w(sd
@@ -2160,24 +2160,34 @@
esult =
+estimate_
density
-S
+_s
b(sdf, t
@@ -3626,16 +3626,26 @@
t =
+estimate_
density
-S
+_s
w(sd
@@ -5252,16 +5252,26 @@
t =
+estimate_
density
-P
+_p
l(sd
@@ -5294,16 +5294,26 @@
b, sdf,%0A
+
|
fbb3df846b3b9f4a86d6238cc5605a8d771ff924
|
add python 3 compatibility
|
dumper/logging_utils.py
|
dumper/logging_utils.py
|
from __future__ import unicode_literals
import logging
class BaseLogger(object):
@classmethod
def get_logger(cls):
logger = logging.getLogger(cls.module)
logger.setLevel('DEBUG')
return logger
@classmethod
def _log(cls, message):
cls.get_logger().debug(message)
@classmethod
def _cache_action(cls, action, path=None, method=None, key=None):
log_string = action
if path:
log_string += ' path "{0}"'.format(path)
if method:
log_string += ' with method "{0}"'.format(method)
if key:
log_string += ' as key "{0}"'.format(key)
cls._log(log_string)
class MiddlewareLogger(BaseLogger):
module = 'dumper.middleware'
@classmethod
def get(cls, key, value, request):
success_text = 'found' if value else 'didnt find'
cls._cache_action(
success_text,
path=request.path,
method=request.method,
key=key
)
@classmethod
def not_get(cls, request):
cls._cache_action(
'skipped getting',
path=request.path,
)
@classmethod
def save(cls, key, request):
cls._cache_action(
'cached',
path=request.path,
method=request.method,
key=key
)
@classmethod
def not_save(cls, request):
cls._cache_action(
'not saving cache for',
path=request.path,
method=request.method,
)
class InvalidationLogger(BaseLogger):
module = 'dumper.invalidation'
@classmethod
def invalidate(cls, path, key):
cls._cache_action(
'invalidated',
path=path,
key=key
)
class SiteLogger(BaseLogger):
module = 'dumper.site'
@classmethod
def register(cls, model):
app_name = model._meta.app_label
model_name = model._meta.object_name
cls._log('registered {0}.{1}'.format(app_name, model_name))
@classmethod
def invalidate_instance(cls, instance):
instance_name = unicode(instance)
model = instance.__class__
app_name = model._meta.app_label
model_name = model._meta.object_name
cls._log('invalidating instance #{0} "{1}" of {2}.{3}'.format(
instance.pk,
instance_name,
app_name,
model_name
))
|
Python
| 0.000001
|
@@ -49,16 +49,27 @@
logging%0A
+import six%0A
%0A%0Aclass
@@ -2136,22 +2136,28 @@
_name =
-unicod
+six.text_typ
e(instan
|
0f4d2b75cde58f6926636563691182fb5896c894
|
Add docstring to autocorr and ambiguity functions so the axes and peak location of the result is made clear.
|
echolect/core/coding.py
|
echolect/core/coding.py
|
# Copyright 2013 Ryan Volz
# This file is part of echolect.
# Echolect is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Echolect is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with echolect. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from echolect.filtering import filtering
__all__ = ['autocorr', 'ambiguity']
def autocorr(code, nfreq=1):
# special case because matched_doppler does not handle nfreq < len(code)
if nfreq == 1:
acorr = filtering.matched(code, code)
else:
acorr = filtering.matched_doppler(code, nfreq, code)
return acorr
def ambiguity(code, nfreq=1):
acorr = autocorr(code, nfreq)
# normalize so answer at zero delay, zero Doppler is 1
b = len(code)
if nfreq == 1:
acorr = acorr / acorr[b - 1]
else:
acorr = acorr / acorr[0, b - 1]
amb = acorr.real**2 + acorr.imag**2
return amb
|
Python
| 0
|
@@ -800,24 +800,1013 @@
, nfreq=1):%0A
+ %22%22%22Calculate autocorrelation of code for nfreq frequencies.%0A %0A If nfreq == 1, the result is a 1-D array with length that is %0A 2*len(code) - 1. The peak value of sum(abs(code)**2) is located%0A in the middle at index len(code) - 1.%0A %0A If nfreq %3E 1, the result is a 2-D array with the first index%0A corresponding to frequency shift. The code is frequency shifted%0A by normalized frequencies of range(nfreq)/nfreq and correlated%0A with the baseband code. The result acorr%5B0%5D gives the %0A autocorrelation with 0 frequency shift, acorr%5B1%5D with 1/nfreq%0A frequency shift, etc. These frequencies are the same as (and %0A are in the same order as) the FFT frequencies for an nfreq-%0A length FFT.%0A ****Thus, the peak value is at acorr%5B0, len(code) - 1%5D****%0A %0A To relocate the peak to the middle of the result, use%0A np.fft.fftshift(acorr, axes=0)%0A To relocate the peak to the %5B0, 0%5D entry, use%0A np.fft.ifftshift(acorr, axes=1)%0A %0A %22%22%22%0A
# specia
@@ -2055,24 +2055,253 @@
, nfreq=1):%0A
+ %22%22%22Calculate the ambiguity function of code for nfreq frequencies.%0A %0A The ambiguity function is the square of the autocorrelation, %0A normalized so the peak value is 1.%0A %0A See autocorr for details.%0A %0A %22%22%22%0A
acorr =
|
006fa9a63913f1a54091ba06eb87cea1f117c55a
|
Version bump for the new package
|
inapp_survey/__init__.py
|
inapp_survey/__init__.py
|
__version__ = '0.1.1'
|
Python
| 0
|
@@ -12,11 +12,11 @@
= '0.1.
-1
+2
'%0A
|
919b9900d7960054fc7eaf67a9d9bc07e325c966
|
Fix fnmatch for /**/ case
|
editorconfig/fnmatch.py
|
editorconfig/fnmatch.py
|
"""Filename matching with shell patterns.
fnmatch(FILENAME, PATTERN) matches according to the local convention.
fnmatchcase(FILENAME, PATTERN) always takes case in account.
The functions operate by translating the pattern into a regular
expression. They cache the compiled regular expressions for speed.
The function translate(PATTERN) returns a regular expression
corresponding to PATTERN. (It does not compile it.)
Based on code from fnmatch.py file distributed with Python 2.6.
Licensed under PSF License (see LICENSE.txt file).
Changes to original fnmatch module:
- translate function supports ``*`` and ``**`` similarly to fnmatch C library
"""
import os
import re
__all__ = ["fnmatch", "fnmatchcase", "translate"]
_cache = {}
LEFT_BRACE = re.compile(r'(?:^|[^\\])\{')
RIGHT_BRACE = re.compile(r'(?:^|[^\\])\}')
NUMERIC_RANGE = re.compile(r'([+-]?\d+)\.\.([+-]?\d+)')
def fnmatch(name, pat):
"""Test whether FILENAME matches PATTERN.
Patterns are Unix shell style:
- ``*`` matches everything except path separator
- ``**`` matches everything
- ``?`` matches any single character
- ``[seq]`` matches any character in seq
- ``[!seq]`` matches any char not in seq
- ``{s1,s2,s3}`` matches any of the strings given (separated by commas)
An initial period in FILENAME is not special.
Both FILENAME and PATTERN are first case-normalized
if the operating system requires it.
If you don't want this, use fnmatchcase(FILENAME, PATTERN).
"""
name = os.path.normpath(name).replace(os.sep, "/")
return fnmatchcase(name, pat)
def cached_translate(pat):
if not pat in _cache:
res, num_groups = translate(pat)
regex = re.compile(res)
_cache[pat] = regex, num_groups
return _cache[pat]
def fnmatchcase(name, pat):
"""Test whether FILENAME matches PATTERN, including case.
This is a version of fnmatch() which doesn't case-normalize
its arguments.
"""
regex, num_groups = cached_translate(pat)
match = regex.match(name)
if not match:
return False
pattern_matched = True
for (num, (min_num, max_num)) in zip(match.groups(), num_groups):
if num[0] == '0' or not (min_num <= int(num) <= max_num):
pattern_matched = False
break
return pattern_matched
def translate(pat, nested=False):
"""Translate a shell PATTERN to a regular expression.
There is no way to quote meta-characters.
"""
i, n = 0, len(pat) # Current index (i) and length (n) of pattern
brace_level = 0
in_brackets = False
res = ''
escaped = False
matching_braces = (len(LEFT_BRACE.findall(pat)) ==
len(RIGHT_BRACE.findall(pat)))
numeric_groups = []
while i < n:
c = pat[i]
i = i + 1
if c == '*':
j = i
if j < n and pat[j] == '*':
res = res + '.*'
else:
res = res + '[^/]*'
elif c == '?':
res = res + '.'
elif c == '[':
if in_brackets:
res = res + '\\['
else:
j = i
has_slash = False
while j < n and pat[j] != ']':
if pat[j] == '/' and pat[j-1] != '\\':
has_slash = True
break
j += 1
if has_slash:
res = res + '\\[' + pat[i:j+1] + '\\]'
i = j + 2
else:
if i < n and pat[i] in '!^':
i = i + 1
res = res + '[^'
else:
res = res + '['
in_brackets = True
elif c == '-':
if in_brackets:
res = res + c
else:
res = res + '\\' + c
elif c == ']':
res = res + c
in_brackets = False
elif c == '{':
j = i
has_comma = False
while j < n and (pat[j] != '}' or escaped):
if pat[j] == ',' and not escaped:
has_comma = True
break
escaped = pat[j] == '\\' and not escaped
j = j + 1
if not has_comma and j < n:
num_range = NUMERIC_RANGE.match(pat[i:j])
if num_range:
numeric_groups.append(map(int, num_range.groups()))
res = res + "([+-]?\d+)"
else:
inner_res, inner_groups = translate(pat[i:j], nested=True)
res = res + '\\{%s\\}' % (inner_res,)
numeric_groups += inner_groups
i = j + 1
elif matching_braces:
res = res + '(?:'
brace_level += 1
else:
res = res + '\\{'
elif c == ',':
if brace_level > 0 and not escaped:
res = res + '|'
else:
res = res + '\\,'
elif c == '}':
if brace_level > 0 and not escaped:
res = res + ')'
brace_level -= 1
else:
res = res + '\\}'
elif c != '\\':
res = res + re.escape(c)
if c == '\\':
if escaped:
res = res + re.escape(c)
escaped = not escaped
else:
escaped = False
res = res.encode('utf-8')
if not nested:
res = res + '\Z(?ms)'
return res, numeric_groups
|
Python
| 0.000004
|
@@ -5328,16 +5328,189 @@
+ '%5C%5C%7D'%0A
+ elif c == '/':%0A if pat%5Bi:i+3%5D == %22**/%22:%0A res = res + %22(?:/%7C/.*/)%22%0A i += 3%0A else:%0A res = res + '/'%0A
|
63ecfbe4bbe10ba9cb4e2e374e99ac1a78276023
|
fix sql syntax error
|
eheritage/injest/vic.py
|
eheritage/injest/vic.py
|
from sqlalchemy import create_engine, MetaData, text
import json
import MySQLdb, MySQLdb.cursors
conn = MySQLdb.connect(user="vhd", passwd="vhd", db="vhd", cursorclass=MySQLdb.cursors.SSDictCursor, charset="utf8")
cursor = conn.cursor()
engine = create_engine("mysql+mysqldb://vhd:vhd@localhost/vhd?charset=utf8&use_unicode=0")
def test():
everything_query = """
select places.id, places.place_name, places.location, places.vhr_number, places.significance, places.longitude, places.latitude,
act_categories.act_category_name,
addresses.street_number, addresses.street_name, addresses.suburb, addresses.state, addresses.postcode, lga_names.lga_name,
architects.architect_name,
architectural_styles.architectural_style_name,
constructions.construction_start, constructions.construction_end
from places
JOIN act_categories_places on places.id = act_categories_places.place_id
JOIN act_categories on act_categories_places.act_category_id = act_categories.id
JOIN status_names ON places.status_id = status_names.id
JOIN place_owners ON status_names.place_owner_id = place_owners.id
LEFT JOIN addresses on places.id = addresses.place_id
LEFT JOIN lga_names on addresses.lga_name_id = lga_names.id
LEFT JOIN architects_places on places.id = architects_places.place_id
LEFT JOIN architects on architects_places.architect_id = architects.id
LEFT JOIN architectural_styles_places on places.id = architectural_styles_places.place_id
LEFT JOIN architectural_styles on architectural_styles_places.place_id = architectural_styles.id
LEFT JOIN constructions on places.id = constructions.place_id
ORDER BY places.id
LIMIT 1"""
result = engine.execute(everything_query)
for row in result:
print(dict(row))
def get_number_of_places():
"""Retrieve the total number of Victorian Heritage Places"""
result = engine.execute("SELECT COUNT(*) FROM places")
num = result.first()[0]
return num
def get_addresses(place_id):
addresses_q = """SELECT street_number, street_name, suburb, state,
postcode, lga_names.lga_name
FROM addresses a
LEFT JOIN lga_names ON a.lga_name_id = lga_names.id
WHERE place_id = :place_id"""
addresses = engine.execute(text(addresses_q), place_id=place_id)
return [dict(address) for address in addresses]
def get_act_categories(place_id):
act_categories_q = """SELECT act_category_name
FROM act_categories_places acp
JOIN act_categories ac ON acp.act_category_id = ac.id
WHERE place_id = :place_id"""
act_categories = engine.execute(text(act_categories_q), place_id=place_id)
return [act_category[0] for act_category in act_categories]
def get_architects(place_id):
architects_q = """SELECT architect_name
FROM architects_places ap
JOIN architects ON ap.architect_id = architects.id
WHERE place_id = :place_id"""
architects = engine.execute(text(architects_q), place_id=place_id)
return [architect_name[0] for architect_name in architects]
def get_architectural_styles(place_id):
architectural_styles_q = \
"""SELECT architectural_style_name
FROM architectural_styles_places asp
JOIN architectural_styles as ON asp.architectural_style_id = as.id
WHERE place_id = :place_id"""
architectural_styles = engine.execute(text(architectural_styles_q),
place_id=place_id)
return [architectural_style_name[0]
for architectural_style_name in architectural_styles]
def get_item_categories(place_id):
item_categories_q = """SELECT ig.group_name, ic.item_category_name
FROM vhd.item_categories_places icp
LEFT JOIN item_groups ig ON icp.item_group_id = ig.id
LEFT JOIN item_categories ic ON icp.item_category_id = ic.id
WHERE place_id = :place_id
ORDER BY sort_order"""
item_categories = engine.execute(text(item_categories_q), place_id=place_id)
return [{'group': group, 'name': name}
for group, name in item_categories.fetchall()]
def massage_before_indexing(place):
place['state'] = 'VIC'
place['place_id'] = place['id']
place['url'] = u"http://vhd.heritage.vic.gov.au/vhd/heritagevic#detail_places;%d" % place['id']
place['country'] = 'Australia'
place['name'] = place['place_name']
for address in place['addresses']:
address['state'] = 'VIC'
try:
place['geolocation'] = {
'lat': float(place['latitude']),
'lon': float(place['longitude'])
}
except TypeError:
pass
del(place['latitude'])
del(place['longitude'])
def all_places():
"""Generator function for returning all victorian places
"""
places_query = \
"""SELECT places.id, places.place_name, places.location,
places.significance, places.vhr_number, places.longitude,
places.latitude, constructions.construction_start,
constructions.construction_end, places.nat_trust_listing_number,
places.nat_estate_listing_number, status_names.status_name,
place_owners.place_owner_name
FROM places
LEFT JOIN constructions ON places.id = constructions.place_id
LEFT JOIN status_names
ON places.status_id = status_names.id
AND status_names.published_online = 1
LEFT JOIN place_owners
ON status_names.place_owner_id = place_owners.id
"""
result = cursor.execute(places_query)
for place in cursor:
row_id = place['id']
place['addresses'] = get_addresses(row_id)
place['act_categories'] = get_act_categories(row_id)
place['architects'] = get_architects(row_id)
place['architectural_styles'] = get_architectural_styles(row_id)
place['categories'] = get_item_categories(row_id)
massage_before_indexing(place)
# print(json.dumps(place, indent=4))
yield place
#places_table = meta.tables['places']
#results = engine.execute(places_table.select().limit(10))
|
Python
| 0.000073
|
@@ -3443,16 +3443,17 @@
styles a
+r
s ON asp
@@ -3479,16 +3479,17 @@
e_id = a
+r
s.id%0A
|
cc6bc2b9af67c064339371b43795c36ed3e5ddcb
|
use TemplateResponse everywhere
|
ella_galleries/views.py
|
ella_galleries/views.py
|
from django.http import Http404
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.utils.translation import ungettext
from django.utils.cache import patch_vary_headers
from ella.core.views import get_templates_from_publishable
def gallery_item_detail(request, context, item_slug=None):
'''
Returns ``GalleryItem`` object by its slug or first one (given by
``GalleryItem``.``order``) from ``Gallery``.
'''
gallery = context['object']
item_sorted_dict = gallery.items
count = len(item_sorted_dict)
count_str = ungettext('%(count)d object total', '%(count)d objects total',
count) % {'count': count}
next = None
previous = None
if count == 0:
# TODO: log empty gallery
raise Http404()
if item_slug is None:
item = item_sorted_dict.value_for_index(0)
if count > 1:
next = item_sorted_dict.value_for_index(1)
position = 1
else:
try:
item = item_sorted_dict[item_slug]
except KeyError:
raise Http404()
item_index = item_sorted_dict.keyOrder.index(item_slug)
if item_index > 0:
previous = item_sorted_dict.value_for_index(item_index - 1)
if (item_index + 1) < count:
next = item_sorted_dict.value_for_index(item_index + 1)
position = item_index + 1
context.update({
'gallery': gallery,
'item': item,
'item_list' : item_sorted_dict.values(),
'next' : next,
'previous' : previous,
'count' : count,
'count_str' : count_str,
'position' : position,
})
if request.is_ajax():
template_name = "item-ajax.html"
else:
template_name = "item.html"
response = render_to_response(
get_templates_from_publishable(template_name, context['object']),
context,
context_instance=RequestContext(request),
)
patch_vary_headers(response, ('X-Requested-With',))
return response
|
Python
| 0
|
@@ -41,86 +41,49 @@
ngo.
-shortcuts import render_to_response%0Afrom django.template import RequestContext
+template.response import TemplateResponse
%0Afro
@@ -1768,27 +1768,42 @@
e =
-render_to_response(
+TemplateResponse(%0A request,
%0A
@@ -1894,58 +1894,8 @@
xt,%0A
- context_instance=RequestContext(request),%0A
|
63587ab033a0aabd52af6b657600d2d2547f034f
|
Bump release version
|
grove/__init__.py
|
grove/__init__.py
|
##############################################################################
# Copyright 2016-2017 Rigetti Computing
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
__version__ = '1.2.0'
|
Python
| 0
|
@@ -781,13 +781,13 @@
__ = '1.
-2
+3
.0'%0A
|
e0de9a865b731f3f24bc7a42909849abc738217f
|
Increment version
|
karld/_meta.py
|
karld/_meta.py
|
version_info = (0, 2, 0)
version = '.'.join(map(str, version_info))
|
Python
| 0.000002
|
@@ -15,17 +15,17 @@
(0, 2,
-0
+1
)%0Aversio
|
96395ef3cc411e7e5455635604c0bea606ec006c
|
update model.py
|
model.py
|
model.py
|
import threading
class DataValidationError(Exception):
""" Used for an data validation errors when deserializing """
pass
class Customer(object):
def __init__(self, wishlist={}, wishlist_id={}):
self.wishlist = wishlist
self.wishlist_id = wishlist_id
self.index = 0
def create(self, name):
self.wishlist[name] = []
w_id = self.__next_index()
self.wishlist_id[w_id] = name
def add_product(self, name, pid):
if pid == 0:
return
else:
dict = self.wishlist[name]
val = [id for id in dict if id == pid]
if val:
return
else:
dict.append(pid)
def display(self, name):
dict = self.wishlist[name]
return dict
def __next_index(self):
""" Generates the next index in a continual sequence """
self.index += 1
return self.index
class CustomerList(object):
cust_id = {}
def __init__(self, id, name):
self.id = id
self.name = name
self.pid = 0
def deserialize(self, data):
if not isinstance(data, dict):
raise DataValidationError('Invalid wishlist data: body of request contained bad or no data')
if data.has_key('PID'):
self.pid = data['PID']
try:
self.name = data['name']
except KeyError as err:
raise DataValidationError('Invalid wishlist: missing wishlist name')
return
def save(self):
if CustomerList.cust_id.has_key(self.id):
c = CustomerList.cust_id[self.id]
if c.wishlist.has_key(self.name):
c.add_product(self.name, self.pid)
return
else:
c.create(self.name)
c.add_product(self.name, self.pid)
else:
c = Customer({}, {})
c.create(self.name)
c.add_product(self.name, self.pid)
CustomerList.cust_id[self.id] = c
return
def serialize(self):
c = CustomerList.cust_id[self.id]
product_list = c.display(self.name)
return {"Wishlist name": self.name, "Product list": [p for p in product_list]}
@staticmethod
def find(custid):
if CustomerList.cust_id.has_key(custid):
c = CustomerList.cust_id[custid]
return c.wishlist
else:
return None
@staticmethod
def find_wishlist(wishlists,name):
if wishlists.has_key(name):
print (name)
return {"Wishlist name": name, "Product list": [p for p in wishlists[name]]}
else:
return None
@staticmethod
def delete_by_id(custid,wid):
if CustomerList.cust_id.has_key(custid):
c = CustomerList.cust_id[custid]
if c.wishlist_id.has_key(wid):
c.wishlist_id.pop(wid, None)
return True
return False
@staticmethod
def find_by_id(custid,wid):
if CustomerList.cust_id.has_key(custid):
c = CustomerList.cust_id[custid]
if c.wishlist_id.has_key(wid):
name = c.wishlist_id[wid]
return {"Wishlist name": name, "Product list": [p for p in c.wishlist[name]]}
else:
return None
else:
return None
@staticmethod
def update(data,oldName,custid):
c = CustomerList.cust_id[custid]
if(c.wishlist.has_key(oldName)):
product = c.wishlist[oldName]
del c.wishlist[oldName]
new_name = data['name']
c.wishlist[new_name] = product
for key,value in c.wishlist_id.iteritems():
if value == oldName:
index = key
c.wishlist_id[index] = new_name
CustomerList.cust_id[custid] = c
return {"Successfully updated wishlist with new name ": new_name}
@staticmethod
def remove_all():
""" Removes all of the Pets from the database """
CustomerList.cust_id = {}
return CustomerList.cust_id
|
Python
| 0.000001
|
@@ -2105,32 +2105,118 @@
ust_id%5Bself.id%5D%0A
+%09 for k,v in c.wishlist_id.iteritems():%0A%09 if self.name == v:%0A%09 id = k%0A
product_
@@ -2251,32 +2251,42 @@
return %7B
+%22ID%22: id,
%22Wishlist name%22:
@@ -2339,16 +2339,25 @@
t_list%5D%7D
+%0A
%0A%0A @s
@@ -2650,33 +2650,122 @@
e):%0A
- print (name)%0A
+%09%09%09c = CustomerList.cust_id%5Bcustid%5D%0A%09%09%09for k,v in c.wishlist_id.iteritems():%0A%09%09%09%09if name == v:%0A%09%09%09%09%09id = k%0A
@@ -2768,32 +2768,37 @@
+
return %7B
%22Wishlist na
@@ -2777,32 +2777,42 @@
return %7B
+%22ID%22: id,
%22Wishlist name%22:
@@ -2893,32 +2893,41 @@
return None
+%0A
%0A%0A @staticmet
@@ -3449,16 +3449,27 @@
return %7B
+%22ID%22: wid,
%22Wishlis
|
c39ce3485af781e8974a70200baa1f51e5c1633b
|
fix imports
|
gstat/__init__.py
|
gstat/__init__.py
|
Python
| 0.000002
|
@@ -0,0 +1,60 @@
+from gstat import gstat, gstats, gstat_elapsed, gstat_event%0A
|
|
9212f90ae383cae05178585b31cc4ff26e584d28
|
use smaller pixmaps for the toolbar
|
examples/gtkhtml_demo.py
|
examples/gtkhtml_demo.py
|
# $Id: gtkhtml_demo.py,v 1.13 2002/08/28 12:12:14 kjetilja Exp $
import sys
import os
import urllib, urlparse
from gtk import *
from gnome.ui import *
from gtkhtml import *
import cStringIO, threading, Queue, time
import pycurl
# URL history
history = []
# Links for 'forward'
forward = []
# Number of concurrent connections to the web-server
NUM_THREADS = 4
# About
def about(button):
GnomeAbout('GtkHTML Test with PycURL', '',
'License GPL2',
['Original code by Matt Wilson, modified by Kjetil Jacobsen'],
('This is a useless application demonstrating the '
'GtkHTML widget with Python and PycURL.')).show()
# HTML template for reporting internal errors
internal_error = """
<html>
<head>
<title>Error</title>
</head>
<body>
<h1>Error</h1>
<b>%s</b>
</body>
</html>
"""
# Worker threads downloads objects and passes them to the renderer
class WorkerThread(threading.Thread):
def __init__(self, queue, render):
threading.Thread.__init__(self)
self.queue = queue # Download request queue
self.render = render # Render output queue
def run(self):
curl = pycurl.Curl()
curl.setopt(pycurl.FOLLOWLOCATION, 1)
curl.setopt(pycurl.MAXREDIRS, 5)
curl.setopt(pycurl.HTTPHEADER, ["User-Agent: GtkHTML/PycURL demo browser"])
while 1:
url, handle = self.queue.get()
if url == None:
curl.close()
raise SystemExit
b = cStringIO.StringIO()
curl.setopt(pycurl.WRITEFUNCTION, b.write)
curl.setopt(pycurl.URL, url)
try:
curl.perform()
except pycurl.error, msg:
b.write(internal_error % msg[1])
except:
msg = "Error retrieving URL: %s" % url
b.write(internal_error % msg)
# Flag empty documents to the renderer
if b.tell() == 0:
b.close()
b = None
# Enqueue the document on the rendering pipeline
self.render.append((b, handle))
# Main rendering window, handles gtk events and sends requests to worker threads
class HtmlWindow(GtkHTML):
def __init__(self):
GtkHTML.__init__(self)
self.queue = Queue.Queue()
self.render = []
self.threads = []
for num_threads in range(NUM_THREADS):
t = WorkerThread(self.queue, self.render)
t.start()
self.threads.append(t)
def mainquit(self, *args):
# Send a 'terminate' message to the worker threads
for t in self.threads:
t.queue.put((None, None))
mainquit()
def load_url(self, html, url):
t1 = time.time()
self.num_obj = 0
if history: url = urllib.basejoin(history[-1], url)
history.append(url)
html.load_empty()
handle = html.begin()
url = url.strip()
self.request_url(html, url, handle)
self.urlentry.set_text(url)
# Render incoming objects
while self.num_obj > 0:
if len(self.render) == 0:
mainiteration(0)
continue
self.num_obj -= 1
buf, handle = self.render.pop(0)
if buf != None:
html.write(handle, buf.getvalue())
buf.close()
html.end(handle, HTML_STREAM_OK)
t2 = time.time()
self.statusbar.set_text("Done (%.3f seconds)" % (t2-t1))
def submit(self, html, method, path, params):
print 'Submit is not supported yet'
print method, path, params
def request_url(self, html, url, handle):
url = urllib.basejoin(history[-1], url)
self.statusbar.set_text("Requesting URL: %s" % url)
self.queue.put((url, handle))
self.num_obj += 1
def entry_activate(self, entry, html):
url = entry.get_text()
self.load_url(html, url)
del forward[:]
def do_back(self, _b):
forward.append(history[-1])
del history[-1]
url = history[-1]
del history[-1]
self.load_url(html, url)
def do_forward(self, _b):
if len(forward) == 0: return
self.load_url(html, forward[-1])
del forward[-1]
def do_reload(self, _b):
if len(history) == 0: return
url = history[-1]
del history[-1]
self.load_url(html, url)
# Setup windows and menus
html = HtmlWindow()
file_menu = [
UIINFO_ITEM_STOCK('Quit', None, html.mainquit, STOCK_MENU_QUIT),
]
help_menu = [
UIINFO_ITEM_STOCK('About...', None, about, STOCK_MENU_ABOUT),
]
menus = [
UIINFO_SUBTREE('File', file_menu),
UIINFO_SUBTREE('Help', help_menu)
]
toolbar = [
UIINFO_ITEM_STOCK('Back', 'Previous page', html.do_back, STOCK_PIXMAP_BACK),
UIINFO_ITEM_STOCK('Forward', 'Next page', html.do_forward, STOCK_PIXMAP_FORWARD),
UIINFO_ITEM_STOCK('Reload', 'Reload current page', html.do_reload, STOCK_PIXMAP_REFRESH)
]
win = GnomeApp("html_demo", "Python GtkHTML Test")
win.set_wmclass("gtk_html_test", "GtkHTMLTest")
win.connect('delete_event', html.mainquit)
vbox = GtkVBox(spacing=3)
vbox.set_border_width(2)
vbox.show()
win.set_contents(vbox)
entry = GtkEntry()
html.connect('url_requested', html.request_url)
html.connect('link_clicked', html.load_url)
html.connect('submit', html.submit)
entry.connect('activate', html.entry_activate, html)
vbox.pack_start(entry, expand=FALSE)
entry.show()
html.urlentry = entry
html.set_usize(800, 600)
sw = GtkScrolledWindow()
sw.set_policy(POLICY_AUTOMATIC, POLICY_AUTOMATIC)
sw.add(html)
vbox.pack_start(sw)
sep = GtkHSeparator()
vbox.pack_start(sep, expand=FALSE)
status = GtkLabel('')
status.set_justify(JUSTIFY_LEFT)
status.set_alignment(0.0, 0.5)
win.set_statusbar(status)
win.create_menus(menus)
win.create_toolbar(toolbar)
html.statusbar = status
html.load_empty()
win.show_all()
threads_enter()
mainloop()
threads_leave()
|
Python
| 0
|
@@ -21,17 +21,17 @@
py,v 1.1
-3
+4
2002/08
@@ -41,13 +41,13 @@
12:
-12:14
+42:25
kje
@@ -4855,22 +4855,20 @@
, STOCK_
-PIXMAP
+MENU
_BACK),%0A
@@ -4936,22 +4936,20 @@
, STOCK_
-PIXMAP
+MENU
_FORWARD
@@ -5032,14 +5032,12 @@
OCK_
-PIXMAP
+MENU
_REF
|
1acc7e7f32aba2419a564f771e97625671514ace
|
Fix test order when running with nose
|
html5lib/tests/test_parser.py
|
html5lib/tests/test_parser.py
|
import os
import sys
import traceback
import StringIO
import warnings
import re
warnings.simplefilter("error")
from support import html5lib_test_files, TestData, convert, convertExpected
import html5lib
from html5lib import html5parser, treebuilders, constants
treeTypes = {"simpletree":treebuilders.getTreeBuilder("simpletree"),
"DOM":treebuilders.getTreeBuilder("dom")}
#Try whatever etree implementations are avaliable from a list that are
#"supposed" to work
try:
import xml.etree.ElementTree as ElementTree
treeTypes['ElementTree'] = treebuilders.getTreeBuilder("etree", ElementTree, fullTree=True)
except ImportError:
try:
import elementtree.ElementTree as ElementTree
treeTypes['ElementTree'] = treebuilders.getTreeBuilder("etree", ElementTree, fullTree=True)
except ImportError:
pass
try:
import xml.etree.cElementTree as cElementTree
treeTypes['cElementTree'] = treebuilders.getTreeBuilder("etree", cElementTree, fullTree=True)
except ImportError:
try:
import cElementTree
treeTypes['cElementTree'] = treebuilders.getTreeBuilder("etree", cElementTree, fullTree=True)
except ImportError:
pass
try:
try:
import lxml.html as lxml
except ImportError:
import lxml.etree as lxml
treeTypes['lxml'] = treebuilders.getTreeBuilder("lxml", lxml, fullTree=True)
except ImportError:
pass
try:
import BeautifulSoup
treeTypes["beautifulsoup"] = treebuilders.getTreeBuilder("beautifulsoup", fullTree=True)
except ImportError:
pass
#Try whatever dom implementations are avaliable from a list that are
#"supposed" to work
try:
import pxdom
treeTypes["pxdom"] = treebuilders.getTreeBuilder("dom", pxdom)
except ImportError:
pass
#Run the parse error checks
checkParseErrors = False
#XXX - There should just be one function here but for some reason the testcase
#format differs from the treedump format by a single space character
def convertTreeDump(data):
return "\n".join(convert(3)(data).split("\n")[1:])
namespaceExpected = re.compile(r"^(\s*)<(\S+)>", re.M).sub
def runParserTest(innerHTML, input, expected, errors, treeClass,
namespaceHTMLElements):
#XXX - move this out into the setup function
#concatenate all consecutive character tokens into a single token
try:
p = html5parser.HTMLParser(tree = treeClass,
namespaceHTMLElements=namespaceHTMLElements)
except constants.DataLossWarning:
return
try:
if innerHTML:
document = p.parseFragment(input, innerHTML)
else:
try:
document = p.parse(input)
except constants.DataLossWarning:
return
except:
errorMsg = u"\n".join([u"\n\nInput:", input, u"\nExpected:", expected,
u"\nTraceback:", traceback.format_exc()])
assert False, errorMsg.encode("utf8")
output = convertTreeDump(p.tree.testSerializer(document))
expected = convertExpected(expected)
if namespaceHTMLElements:
expected = namespaceExpected(r"\1<html \2>", expected)
errorMsg = u"\n".join([u"\n\nInput:", input, u"\nExpected:", expected,
u"\nReceived:", output])
assert expected == output, errorMsg.encode("utf8")
errStr = [u"Line: %i Col: %i %s"%(line, col,
constants.E[errorcode] % datavars if isinstance(datavars, dict) else (datavars,)) for
((line,col), errorcode, datavars) in p.errors]
errorMsg2 = u"\n".join([u"\n\nInput:", input,
u"\nExpected errors (" + str(len(errors)) + u"):\n" + u"\n".join(errors),
u"\nActual errors (" + str(len(p.errors)) + u"):\n" + u"\n".join(errStr)])
if checkParseErrors:
assert len(p.errors) == len(errors), errorMsg2.encode("utf-8")
def test_parser():
sys.stdout.write('Testing tree builders '+ " ".join(treeTypes.keys()) + "\n")
for treeName, treeCls in treeTypes.iteritems():
files = html5lib_test_files('tree-construction')
for filename in files:
testName = os.path.basename(filename).replace(".dat","")
tests = TestData(filename, "data")
for index, test in enumerate(tests):
input, errors, innerHTML, expected = [test[key] for key in
'data', 'errors',
'document-fragment',
'document']
if errors:
errors = errors.split("\n")
for namespaceHTMLElements in (True, False):
print input
yield (runParserTest, innerHTML, input, expected, errors, treeCls,
namespaceHTMLElements)
break
|
Python
| 0.000003
|
@@ -145,17 +145,50 @@
st_files
-,
+ as data_files%0Afrom support import
TestDat
@@ -4019,19 +4019,19 @@
sys.std
-out
+err
.write('
@@ -4094,90 +4094,24 @@
n%22)%0A
-%0A for treeName, treeCls in treeTypes.iteritems():%0A files = html5lib_test
+ files = data
_fil
@@ -4134,24 +4134,25 @@
ction')%0A
+%0A
for file
@@ -4170,28 +4170,24 @@
es:%0A
-
testName = o
@@ -4236,28 +4236,24 @@
%22)%0A%0A
-
-
tests = Test
@@ -4275,29 +4275,33 @@
%22data%22)%0A
-%0A
+
+%0A
+
for inde
@@ -4329,20 +4329,16 @@
tests):%0A
-
@@ -4625,20 +4625,16 @@
-
if error
@@ -4632,28 +4632,24 @@
if errors:%0A
-
@@ -4676,24 +4676,25 @@
split(%22%5Cn%22)%0A
+%0A
@@ -4685,36 +4685,79 @@
%22)%0A%0A
-
+for treeName, treeCls in treeTypes.iteritems():
%0A
|
ae3f7266437c8670ec6531fb2f17eec328fcd6ed
|
Fix bug
|
gwv/validators/ids.py
|
gwv/validators/ids.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import re
from gwv.kagedata import KageData
from gwv.validators import ValidatorClass
filters = {
"alias": {True, False},
"category": {"ids"}
}
_re_idc = re.compile(r"^u2ff[\dab]$")
_re_vars = re.compile(r"-([gtvhmi]|k[pv]?|us?|j[asv]?)?(\d{2})(-(var|itaiji)-\d{3})?(@|$)")
def indexOfFirstKanjiBuhinLine(sname, kage):
"""IDSの最初の漢字を部品としているKageLine(なければNone)を返す"""
for i, sname_i in enumerate(sname):
if _re_idc.match(sname_i):
continue
firstKanji = sname_i
if firstKanji == "cdp":
firstKanji += "-" + sname[i + 1]
for line in kage.lines:
if line.data[0] == 99 and line.data[7].startswith(firstKanji):
return line
return None
return None
class Validator(ValidatorClass):
name = "ids"
def is_invalid(self, name, related, kage, gdata, dump):
# Replace with the entity if the glyph is an alias
if kage.isAlias():
r = dump.get(gdata[19:].split("@")[0], None)
if r:
gdata = r[1]
kage = KageData(gdata)
if not (kage.lines[0].data[0] == 99 and kage.len > 1):
return False
fData = kage.lines[0].data
if fData[4] == fData[6]:
aspect = float("inf")
else:
aspect = abs(float(fData[3] - fData[5]) / (fData[4] - fData[6])) # x/y
sname = name.split("-")
# ⿰⿱ とか ⿱⿰ とかで始まるものは最初の部品の縦横比を予測できない
isComplicated = (sname[1] in ("u2ff0", "u2ff2") and sname[0] in ("u2ff1", "u2ff3")) or \
(sname[1] in ("u2ff1", "u2ff3") and sname[0] in ("u2ff0", "u2ff2"))
m = _re_vars.search(fData[7])
if m:
firstBuhinType = m.group(2) # 偏化変形接尾コード
else:
firstBuhinType = None
if sname[0] in ("u2ff0", "u2ff2"):
# [-01] + [-02] or [-01] + [-01] + [-02]
if firstBuhinType in ("03", "04", "09", "14", "24") and fData[5] - fData[3] > 175.0:
return [1, fData[7]] # 左右のIDSだが最初が上下の部品
if firstBuhinType == "02":
return [2, fData[7]] # 左右のIDSだが右部品が最初
if not isComplicated and firstBuhinType not in ("01", "08") and aspect > 1.8:
return [6, [0, gdata[0]]] # 左右のIDSだが最初の部品が横長の配置
fkline = indexOfFirstKanjiBuhinLine(sname, kage)
if fkline is not None and fkline.line_number != 0:
return [3, [fkline.line_number, fkline.strdata]] # 左右のIDSだが左の字が最初でない
elif sname[0] in ("u2ff1", "u2ff3"):
# [-03] + [-04] or [-03] + [-03] + [-04]
if firstBuhinType in ("01", "02", "08") and fData[6] - fData[4] > 175.0:
return [10, fData[7]] # 上下のIDSだが最初が左右の部品
if firstBuhinType in ("04", "14", "24"):
return [12, fData[7]] # 上下のIDSだが下部品が最初
if not isComplicated and firstBuhinType not in ("03", "09") and aspect < 0.65:
return [15, [0, gdata[0]]] # 上下のIDSだが最初の部品が縦長の配置
fkline = indexOfFirstKanjiBuhinLine(sname, kage)
if fkline is not None and fkline.line_number != 0:
return [13, [fkline.line_number, fkline.strdata]] # 上下のIDSだが上の字が最初でない
elif sname[0] in ("u2ff4", "u2ff5", "u2ff6", "u2ff7", "u2ff8", "u2ff9", "u2ffa"):
# [-05] + [-06]
if firstBuhinType in ("02", "06", "07"):
return [22, fData[7]] # 囲むIDSだが内側部品が最初
fkline = indexOfFirstKanjiBuhinLine(sname, kage)
if fkline is not None and fkline.line_number != 0:
return [23, [fkline.line_number, fkline.strdata]] # 囲みIDSだが外の字が最初でない
elif sname[0] == "u2ffb":
fkline = indexOfFirstKanjiBuhinLine(sname, kage)
if fkline is not None and fkline.line_number != 0:
return [33, [fkline.line_number, fkline.strdata]] # 重ねIDSだがIDSで最初の字が最初でない
else:
return [90, sname[0]] # 未定義のIDC
|
Python
| 0.000001
|
@@ -2438,32 +2438,45 @@
urn %5B6, %5B0,
-g
+kage.lines%5B0%5D.str
data
-%5B0%5D
%5D%5D # %E5%B7%A6%E5%8F%B3%E3%81%AEIDS
@@ -3176,16 +3176,29 @@
%5B0,
-g
+kage.lines%5B0%5D.str
data
-%5B0%5D
%5D%5D
|
dfeb286810064a2e62d19639320fb95f784c4760
|
fix forward of logit
|
model.py
|
model.py
|
import torch
import torch.nn as nn
from torch.nn import init
import torch.nn.functional as F
from torch import autograd
class CNN_Text(nn.Module):
def __init__(self, args, char_or_word, vectors=None):
super(CNN_Text,self).__init__()
self.args = args
V = args.embed_num
if char_or_word == 'char':
D = args.char_embed_dim
else:
D = args.word_embed_dim
C = args.class_num
Ci = 1
Co = args.kernel_num
if char_or_word == 'char':
Ks = args.char_kernel_sizes
else:
Ks = args.word_kernel_sizes
self.embed = nn.Embedding(V, D, padding_idx=1)
if char_or_word != 'char' and vectors is not None:
self.embed.weight.data = vectors
# print(self.embed.weight.data[100])
# print(self.embed.weight.data.size())
self.convs1 = nn.ModuleList([nn.Conv2d(Ci, Co, (K, D)) for K in Ks])
if char_or_word == 'word':
for layer in self.convs1:
if args.ortho_init == True:
init.orthogonal(layer.weight.data)
else:
layer.weight.data.uniform_(-0.01, 0.01)
layer.bias.data.zero_()
'''
self.conv13 = nn.Conv2d(Ci, Co, (3, D))
self.conv14 = nn.Conv2d(Ci, Co, (4, D))
self.conv15 = nn.Conv2d(Ci, Co, (5, D))
'''
self.dropout = nn.Dropout(args.dropout)
self.fc1 = nn.Linear(len(Ks)*Co, C)
if char_or_word == 'word':
if args.ortho_init == True:
init.orthogonal(self.fc1.weight.data)
else:
init.normal(self.fc1.weight.data)
self.fc1.weight.data.mul_(0.01)
self.fc1.bias.data.zero_()
# print(V, D, C, Ci, Co, Ks, self.convs1, self.fc1)
def conv_and_pool(self, x, conv):
x = F.relu(conv(x)).squeeze(3) #(N,Co,W)
x = F.max_pool1d(x, x.size(2)).squeeze(2)
return x
def forward(self, x):
x = self.confidence(x)
logit = F.log_softmax(self.fc1(x)) # (N,C)
return logit
def confidence(self, x):
x = self.embed(x) # (N,W,D)
if self.args.static and self.args.word_vector:
x = autograd.Variable(x.data)
x = x.unsqueeze(1) # (N,Ci,W,D)
x = [F.relu(conv(x)).squeeze(3) for conv in self.convs1] # [(N,Co,W), ...]*len(Ks)
# print([x_p.size() for x_p in x])
x = [F.max_pool1d(i, i.size(2)).squeeze(2) for i in x] # [(N,Co), ...]*len(Ks)
x = torch.cat(x, 1)
'''
x1 = self.conv_and_pool(x,self.conv13) #(N,Co)
x2 = self.conv_and_pool(x,self.conv14) #(N,Co)
x3 = self.conv_and_pool(x,self.conv15) #(N,Co)
x = torch.cat((x1, x2, x3), 1) # (N,len(Ks)*Co)
'''
x = self.dropout(x) # (N,len(Ks)*Co)
linear_out = self.fc1(x)
return linear_out
class SimpleLogistic(nn.Module):
def __init__(self, args):
super(SimpleLogistic, self).__init__()
self.args = args
self.input_size = self.args.class_num * 2
self.output_size = self.args.class_num
self.layer_num = self.args.layer_num
self.layers = nn.ModuleList([nn.Linear(self.input_size, self.input_size) if x < self.layer_num - 1 else
nn.Linear(self.input_size, self.output_size) for x in range(self.layer_num)])
def forward(self, x1, x2):
x = torch.cat((x1, x2), dim=-1)
for layer in self.layers:
x = layer(x)
return x
class StackingNet(nn.Module):
def __init__(self, args):
super(StackingNet, self).__init__()
self.args = args
self.input_size = self.args.class_num
self.output_size = self.args.class_num
self.layer_num = self.args.layer_num
self.params = nn.ParameterList([nn.Parameter(torch.rand(1)) for i in range(2)])
def forward(self, inputs):
output = 0
for index, input in enumerate(inputs):
output += input * self.params[index].expand(input.size())
output = F.log_softmax(output)
return output
|
Python
| 0.003063
|
@@ -2097,27 +2097,17 @@
softmax(
-self.fc1(x)
+x
) # (N,C
|
cbbe9d50108747b15864436de01947bc2598b1b3
|
Fix bug
|
WindAdapter/data_provider.py
|
WindAdapter/data_provider.py
|
# -*- coding: utf-8 -*-
import pandas as pd
try:
from WindPy import w
except ImportError:
pass
class WindRunner:
def __init__(self):
try:
w.start()
except NameError:
pass
def __del__(self):
try:
w.stop()
except AttributeError:
pass
except NameError:
pass
class WindDataProvider:
WIND_RUNNER = WindRunner()
def __init__(self):
pass
@staticmethod
def force_throw_err(raw_data, func_name):
if raw_data.ErrorCode != 0:
raise ValueError('{0}: {1}'.format(raw_data.Data[0], func_name))
elif len(raw_data.Data) == 0:
raise ValueError('{0}: empty data returned'.format(func_name))
@staticmethod
def get_universe(index_id, date, output_weight=False):
index_id = index_id.lower()
try:
if index_id == 'fulla':
code = 'a001010100000000'
params = 'sectorid=' + code + ';field=wind_code' if date is None \
else 'date=' + str(date) + ';sectorid=' + code
raw_data = w.wset('sectorconstituent', params)
else:
short_params = 'windcode=' + index_id
params = short_params if date is None else short_params + ';date=' + str(date)
raw_data = w.wset('IndexConstituent', params)
WindDataProvider.force_throw_err(raw_data, 'WindDataProvider.get_universe')
if output_weight:
return pd.DataFrame(data=raw_data.Data[3], index=raw_data.Data[1], columns=['weight'])
else:
return raw_data.Data[1]
except NameError:
pass
@staticmethod
def advance_date(date, unit, freq):
try:
ret = w.tdaysoffset(int(unit) * -1, date, 'period=' + freq)
WindDataProvider.force_throw_err(ret, 'WindDataProvider.advance_date')
return ret.Data[0][0]
except NameError:
pass
@staticmethod
def biz_days_list(start_date, end_date, freq):
try:
dates = w.tdays(start_date, end_date, 'period=' + freq)
WindDataProvider.force_throw_err(dates, 'WindDataProvider.biz_days_list')
return dates.Data[0]
except NameError:
pass
@staticmethod
def query_data(api, sec_id, indicator, extra_params, start_date=None, end_date=None):
if api == 'w.wsd':
ret = eval(api)(sec_id, indicator, start_date, end_date, extra_params)
elif api == 'w.wss':
ret = eval(api)(sec_id, indicator, extra_params)
else:
raise ValueError('WindDataProvider.query_data: unknown type of api')
WindDataProvider.force_throw_err(ret, 'WindDataProvider.query_data')
return ret
|
Python
| 0.000001
|
@@ -808,16 +808,21 @@
id, date
+=None
, output
|
d35f381727d5aa9e11d79cca9fe917527136853c
|
update the new method URL (from new AF website recently announced by Richard -- it looks more clean)
|
haietmoba-reminder.py
|
haietmoba-reminder.py
|
import sys
from os import path
import webbrowser
from PyQt4 import QtGui, QtCore
if hasattr(sys, 'frozen'):
scriptDir = path.dirname(unicode(sys.executable, sys.getfilesystemencoding()))
else:
scriptDir = path.dirname(unicode(__file__, sys.getfilesystemencoding()))
theQuestion = 'How am I experiencing this moment of being alive?'
methodURL = 'http://actualfreedom.com.au/richard/articles/thismomentofbeingalive.htm'
projectURL = 'http://bitbucket.org/srid/haietmoba-reminder'
welcomeMsg = '''This application will remind you to ask HAIETMOBA every %d minutes. \
For each reminder, answer yourself how you are experiencing this moment of being alive; \
then click one of the buttons depending on how you are generally feeling.
'''
class MainWindow(QtGui.QWidget):
def __init__(self):
QtGui.QMainWindow.__init__(self)
self.resize(1, 1) # adding widgets will expand to fit size
self.setWindowTitle('HAIETMOBA?')
self.setToolTip(theQuestion)
self.setWindowFlags(
QtCore.Qt.Window |
QtCore.Qt.WindowMinimizeButtonHint | # only minimize (no maximize)
QtCore.Qt.WindowStaysOnTopHint)
self.createInterface()
self.gap = 10 # in minutes
self.quitAction = QtGui.QAction("&Quit", self,
triggered=app.quit)
def setGap(self, gap):
"""Set the gap between reminders in minutes"""
self.gap = gap
def show(self):
super(MainWindow, self).show()
self.center()
def createInterface(self):
"""Create the UI elements of our main window"""
# The reason for using three buttons (instead of just one called 'OK')
# is to help prevent the habituation. At least, one has to invest in
# a few thoughts when there are more buttons ("which one to click? ah,
# that requires me to first answer the question!")
good = QtGui.QPushButton(":-&)")
good.setToolTip('Feeling good (generally)')
meh = QtGui.QPushButton(":-&|")
meh.setToolTip('Feeling OK/neutral (generally) -- what is preventing me from feeling good now?')
bad = QtGui.QPushButton(":-&(")
bad.setToolTip('Feeling bad (generally) -- should investigate the issue')
good.clicked.connect(self.receiveAnswer)
meh.clicked.connect(self.receiveAnswer)
bad.clicked.connect(self.receiveAnswer)
# The question itself in a big/bold text
lbl = QtGui.QLabel()
lbl.setText(theQuestion)
lbl.setFont(QtGui.QFont('Verdana', 16, 100))
# Qt layout boilerplate
hbox = QtGui.QHBoxLayout()
hbox.addStretch(1)
hbox.addWidget(good)
hbox.addWidget(meh)
hbox.addWidget(bad)
hbox.addStretch(1)
vbox = QtGui.QVBoxLayout()
vbox.addStretch(1)
vbox.addWidget(lbl)
vbox.addLayout(hbox)
vbox.addStretch(1)
self.setLayout(vbox)
def receiveAnswer(self):
"""On receiving the answer, hide the window till next reminder"""
self.hide()
interval = 1000*60*self.gap
QtCore.QTimer.singleShot(interval, self.show)
def center(self):
"""Center the window on screen"""
screen = QtGui.QDesktopWidget().screenGeometry()
size = self.geometry()
self.move(
(screen.width()-size.width())/2,
(screen.height()-size.height())/2)
def closeEvent(self, event):
reply = QtGui.QMessageBox.question(
self, 'Message', 'Are you sure to quit?',
QtGui.QMessageBox.Yes, QtGui.QMessageBox.No)
if reply == QtGui.QMessageBox.Yes:
event.accept()
app.trayIcon.hide()
else:
event.ignore()
class Application(QtGui.QApplication):
def __init__(self, *args, **kw):
QtGui.QApplication.__init__(self, *args, **kw)
self.icon = QtGui.QIcon(path.join(scriptDir, 'data/icon.png'))
def createInterface(self):
self.mainWindow = MainWindow()
self.mainWindow.setWindowIcon(self.icon)
self.createSystemTrayIcon()
def show(self):
self.mainWindow.show()
self.mainWindow.center()
self.trayIcon.show()
self.trayIcon.showMessage(
'Welcome',
welcomeMsg % self.mainWindow.gap,
QtGui.QSystemTrayIcon.Information,
1000*60)
def quit(self, *a, **k):
super(Application, self).quit()
self.trayIcon.hide()
def createSystemTrayIcon(self):
"""Create a systray icon with a context menu"""
self.trayIcon = QtGui.QSystemTrayIcon(self.icon, self.mainWindow)
# systray context menu
menu = QtGui.QMenu(self.mainWindow)
self.frequency = QtGui.QActionGroup(self.mainWindow)
for (mins, choice) in [(1, 'Every minute'),
(2, 'Every 2 minutes'),
(3, 'Every 3 minutes'),
(4, 'Every 4 minutes'),
(5, 'Every 5 minutes'),
(10, 'Every 10 minutes (recommended)'),
(15, 'Every 15 minutes'),
(20, 'Every 20 minutes'),
(30, 'Every 30 minutes'),
(60, 'Every hour')]:
a = self.frequency.addAction(choice)
a.setCheckable(True)
menu.addAction(a)
def getGapSetter(m):
return lambda: self.mainWindow.setGap(m)
a.triggered.connect(getGapSetter(mins))
if 'recommended' in choice:
a.setChecked(True) # default
self.mainWindow.setGap(mins)
menu.addSeparator()
aboutAction = menu.addAction('About the actualism method')
aboutAction.triggered.connect(lambda: webbrowser.open(methodURL))
aboutAppAction = menu.addAction('Visit the application home page')
aboutAppAction.triggered.connect(lambda: webbrowser.open(projectURL))
menu.addSeparator()
menu.addAction(self.mainWindow.quitAction)
self.trayIcon.setContextMenu(menu)
self.trayIcon.setToolTip(theQuestion)
self.trayIcon.messageClicked.connect(
lambda : webbrowser.open(methodURL))
app = Application(sys.argv)
if not QtGui.QSystemTrayIcon.isSystemTrayAvailable():
QtGui.QMessageBox.critical(None, "Systray",
"I couldn't detect any system tray on this system.")
sys.exit(1)
app.createInterface()
app.show()
sys.exit(app.exec_())
|
Python
| 0
|
@@ -368,16 +368,20 @@
'http://
+www.
actualfr
|
1cb30a296e7fe8e742d68f87a1c403e1269f8206
|
support global_threshold=auto
|
Wrappers/Dials/Spotfinder.py
|
Wrappers/Dials/Spotfinder.py
|
#!/usr/bin/env python
# Spotfinder.py
#
# Copyright (C) 2013 Diamond Light Source, Richard Gildea, Graeme Winter
#
# This code is distributed under the BSD license, a copy of which is
# included in the root directory of this package.
#
# Find spots for autoindexing using the DIALS code; this will probably be
# renamed to Spotfinder at some point.
from __future__ import division
import os
from __init__ import _setup_xia2_environ
_setup_xia2_environ()
from Handlers.Flags import Flags
def Spotfinder(DriverType = None):
'''A factory for SpotfinderWrapper classes.'''
from Driver.DriverFactory import DriverFactory
DriverInstance = DriverFactory.Driver(DriverType)
class SpotfinderWrapper(DriverInstance.__class__):
def __init__(self):
DriverInstance.__class__.__init__(self)
self.set_executable('dials.find_spots')
self._sweep_filename = None
self._input_spot_filename = 'strong.pickle'
self._scan_ranges = []
self._nspots = 0
self._min_spot_size = None
self._kernel_size = None
self._global_threshold = None
self._sigma_strong = None
self._filter_ice_rings = False
self._phil_file = None
return
def set_sweep_filename(self, sweep_filename):
self._sweep_filename = sweep_filename
return
def set_input_spot_filename(self, spot_filename):
self._input_spot_filename = spot_filename
return
def get_spot_filename(self):
return os.path.join(
self.get_working_directory(), self._input_spot_filename)
def set_scan_ranges(self, scan_ranges):
self._scan_ranges = scan_ranges
return
def add_scan_range(self, scan_range):
self._scan_ranges.append(scan_range)
return
def get_nspots(self):
return self._nspots
def set_phil_file(self, phil_file):
self._phil_file = phil_file
return
def set_min_spot_size(self, min_spot_size):
self._min_spot_size = int(min_spot_size)
def set_kernel_size(self, kernel_size):
self._kernel_size = int(kernel_size)
def set_global_threshold(self, global_threshold):
self._global_threshold = float(global_threshold)
def set_sigma_strong(self, sigma_strong):
self._sigma_strong = sigma_strong
def set_filter_ice_rings(self, filter_ice_rings):
self._filter_ice_rings = filter_ice_rings
def run(self):
from Handlers.Streams import Debug
Debug.write('Running dials.find_spots')
self.clear_command_line()
self.add_command_line('input.datablock="%s"' % self._sweep_filename)
self.add_command_line('output.reflections="%s"' % self._input_spot_filename)
nproc = Flags.get_parallel()
self.set_cpu_threads(nproc)
self.add_command_line('nproc=%i' % nproc)
for scan_range in self._scan_ranges:
self.add_command_line('spotfinder.scan_range=%d,%d' % scan_range)
if self._min_spot_size is not None:
self.add_command_line('min_spot_size=%i' % self._min_spot_size)
if self._kernel_size is not None:
self.add_command_line('kernel_size=%i %i' % \
(self._kernel_size, self._kernel_size))
if self._global_threshold is not None:
self.add_command_line('global_threshold=%f' % self._global_threshold)
if self._sigma_strong is not None:
self.add_command_line('sigma_strong=%i' % self._sigma_strong)
if self._filter_ice_rings:
self.add_command_line('ice_rings.filter=%s' % self._filter_ice_rings)
if self._phil_file is not None:
self.add_command_line("%s" % self._phil_file)
self.start()
self.close_wait()
self.check_for_errors()
for record in self.get_all_output():
if record.startswith('Saved') and 'reflections to' in record:
self._nspots = int(record.split()[1])
return
return SpotfinderWrapper()
|
Python
| 0.000208
|
@@ -2156,14 +2156,8 @@
d =
-float(
glob
@@ -2168,17 +2168,16 @@
hreshold
-)
%0A%0A de
@@ -3263,17 +3263,17 @@
eshold=%25
-f
+s
' %25 self
|
f66cc11e2c9f9e1f9524133f83b33989259e7c33
|
support google bounding box format
|
backend/app.py
|
backend/app.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import sys
import codecs
import json
from bottle import route, run, get, request, response
o8 = codecs.getwriter('utf-8')(sys.stdout)
e8 = codecs.getwriter('utf-8')(sys.stderr)
@route('/api/v1/pois.json')
def pois_v1():
global _pois
categories = request.query.get('categories', None)
if categories is not None:
categories = categories.split(',')
else:
categories = request.query.getall('category')
if len(categories) == 0:
categories = None
bounding_box = None
if request.query.get('bbox', None) is not None:
bounding_box = BoundingBox(request.query.get('bbox', None))
result = []
for poi in _pois:
if categories is not None and poi.category not in categories:
continue
elif categories is not None and poi.category in categories:
if bounding_box is not None and poi.lon > bounding_box.left and poi.lon < bounding_box.right and poi.lat > bounding_box.bottom and poi.lat < bounding_box.top:
result.append(poi)
elif bounding_box is None:
result.append(poi)
elif categories is None:
if bounding_box is not None and poi.lon > bounding_box.left and poi.lon < bounding_box.right and poi.lat > bounding_box.bottom and poi.lat < bounding_box.top:
result.append(poi)
elif bounding_box is None:
result.append(poi)
response.content_type = 'application/json'
return json.dumps([poi.to_dict() for poi in result], ensure_ascii=False)
class BoundingBox(object):
def __init__(self, box):
bbox = box.split(",")
if len(bbox) != 4:
raise ValueError
self.bottom = float(bbox[0])
self.left = float(bbox[1])
self.top = float(bbox[2])
self.right = float(bbox[3])
def __repr__(self):
return "<Bounding Box (%s, %s), (%s, %s)>" % (self.left, self.bottom, self.right, self.top)
def __str__(self):
return unicode(self).encode('ASCII', 'backslashreplace')
def __unicode__(self):
return u"({0}, {1}), ({2}, {3})".format(self.left, self.bottom, self.right, self.top)
class POI(object):
def __init__(self, lon, lat, title, location, category):
self.lon = lon
self.lat = lat
self.title = title
self.location = location
self.category = category
@classmethod
def from_list(cls, lst):
lon = float(lst[0])
lat = float(lst[1])
title = lst[2]
location = lst[3]
category = lst[4]
return POI(lon, lat, title, location, category)
def to_dict(self):
result = {}
for attr in ['lon', 'lat', 'title', 'location', 'category']:
result[attr] = getattr(self, attr)
return result
def __repr__(self):
return "<POI %s %s>" % (self.category, str(self))
def __str__(self):
return unicode(self).encode('ASCII', 'backslashreplace')
def __unicode__(self):
return u"{0}, {1}".format(self.title, self.location)
def read_pois(file):
result = []
with codecs.open(file, 'r', encoding='utf-8') as f:
for linenum, line in enumerate(f):
parts = [item.strip() for item in line.replace("\n", "").split(",") if len(item) > 0]
# print(u', '.join(parts), file=o8)
if len(parts) < 5 or len(parts) > 7:
print(u"! Unknown format, line {0}: {1}".format(linenum, line.replace("\n", ""), file=e8))
else:
result.append(POI.from_list(parts))
print(u"# Loaded {0} POIs!".format(len(result)), file=e8)
return result
if __name__ == '__main__':
from optparse import OptionParser
parser = OptionParser()
parser.add_option("--host", dest="host",
help="bind to HOST", metavar="HOST", default="localhost")
parser.add_option("--port", dest="port",
help="bind to PORT", metavar="PORT", type="int", default=8022)
parser.add_option("-p", "--poi-file", dest="poi_file",
help="read points of interests from FILE", metavar="FILE", default="../data/curated_sights.csv")
parser.add_option("-d", "--debug",
action="store_true", dest="debug", default=False,
help="print extra debug output")
opts, args = parser.parse_args()
_pois = read_pois(opts.poi_file)
run(host=opts.host, port=opts.port)
|
Python
| 0
|
@@ -664,34 +664,14 @@
b
-ounding_box = BoundingBox(
+box =
requ
@@ -697,16 +697,90 @@
', None)
+.replace(%22(%22, %22%22).replace(%22)%22, %22%22)%0A bounding_box = BoundingBox(bbox
)%0A%0A r
|
cd1b8bf468c8a30c18243f94d6c0115503bff7c6
|
Use invisible target to set paste location in page. Clsoes #1
|
kitnarchive.py
|
kitnarchive.py
|
#!/usr/bin/python
from redmine import Redmine, ResourceNotFoundError
from redmine.exceptions import BaseRedmineError
from datetime import datetime
from logging import getLogger
from re import split as re_split
from kitnirc.modular import Module
from kitnirc.contrib.admintools import is_admin
_log = getLogger(__name__)
class ArchiveModule(Module):
def __init__(self, *args, **kwargs):
super(ArchiveModule, self).__init__(*args, **kwargs)
config = self.controller.config
if not config.has_section('redmine'):
raise KeyError("No redmine section in configuration file.")
for setting in ['host', 'api_key', 'default_project', 'infobot']:
if not config.has_option('redmine', setting):
raise KeyError("Missing required redmine setting: {}".format(setting))
self.hostname = config.get('redmine', 'host')
self.host = Redmine(self.hostname, key=config.get('redmine', 'api_key'))
self.project = config.get('redmine', 'default_project')
self.infobot = config.get('redmine', 'infobot')
self.waiting = None
self.info = ""
self.requester = ()
self.got_preamble = False
def append_page(self, title, new_text, project_id=None, comment=None, requester=None):
if project_id == None:
project_id = self.project
if comment == None:
if requester:
comment = "Updated by {} at {}'s request.".format(self.controller.config.get('server', 'nick'), requester[0])
else:
comment = "Updated by {}.".format(self.controller.config.get('server', 'nick'))
page = self.host.wiki_page.get(title, project_id=project_id)
page.text += new_text
page.comments = comment
page.save()
def archive(self):
factoids = re_split(r'( or |\|)', self.info)[::2]
formatted_info = "\n\n{} recorded on {} that {} is:\n* ".format(self.controller.config.get('server', 'nick'), datetime.today().date(), self.waiting) + "\n* ".join(factoids)
try:
self.append_page('API_test', formatted_info, requester=self.requester)
self.controller.client.reply(self.requester[1], self.requester[0], "Done! {}/projects/{}/wiki/API_test".format(self.hostname, self.project, self.waiting))
except ResourceNotFoundError:
self.controller.client.reply(self.requester[1], self.requester[0], "Sorry, that wiki page doesn't exist yet.")
self.clear()
def clear(self):
_log.debug("Clearing waiting stuff.")
self.waiting = None
self.requester = ()
self.info = ""
self.got_preamble = False
@Module.handle('PRIVMSG')
def handle_privmsg(self, client, actor, recipient, message):
message = message.strip()
if str(recipient) != client.user.nick:
if message.startswith(client.user.nick):
message = message.split(None, 1)[1]
else:
return
if message == "clear" and is_admin(self.controller, client, actor):
if self.waiting is None:
client.reply(recipient, actor, "I'm not actually trying to archive anything right now.")
else:
client.reply(recipient, actor, "Okay, I'll stop trying to archive {}.".format(self.waiting))
self.clear()
elif message == "source":
client.reply(recipient, actor, "https://github.com/relsqui/archivebot")
elif message == "help":
client.reply(recipient, actor, "Usage: archive <topic>. So far I just post to a test page, as a proof of concept, but stay tuned.")
elif self.waiting and actor == self.infobot and str(recipient) == client.user.nick:
preamble = self.waiting + " =is= "
if self.got_preamble:
if message.startswith("... "):
message = message[4:]
if message.endswith(" ..."):
message = message[:-3]
self.info += (message)
else:
self.info += (message)
self.archive()
elif message.startswith(preamble):
message = message[len(preamble):]
self.got_preamble = True
if message.endswith(" ..."):
message = message[:-3]
self.info += (message)
else:
self.info += (message)
self.archive()
else:
client.reply(self.requester[1], self.requester[0], "{} doesn't seem to know anything about {}.".format(self.infobot.split("!", 1)[0], self.waiting))
self.clear()
elif message.startswith("archive"):
if self.waiting != None:
client.reply(recipient, actor, "Hang on, I'm still archiving {}.".format(self.waiting))
else:
key = message.split(None, 1)[1]
self.waiting = key
self.requester = (actor.split("!", 1)[0], recipient)
infonick = self.infobot.split("!", 1)[0]
self.controller.client.msg(infonick, "{}: literal {}".format(infonick, key))
else:
client.reply(recipient, actor, "?")
module = ArchiveModule
|
Python
| 0
|
@@ -1716,16 +1716,291 @@
ect_id)%0A
+ target = %22%5Cn!%3Ehttps://raw.githubusercontent.com/relsqui/archivebot/master/ArchiveBot-target.png!%22%0A new_text += target%0A if target in page.text:%0A parts = page.text.split(target, 1)%0A page.text = new_text.join(parts)%0A else:%0A
|
2a0e3fe9c83da1d11b892c7c35e367f414329936
|
Update teaching_modules.py
|
src/ensae_teaching_cs/automation/teaching_modules.py
|
src/ensae_teaching_cs/automation/teaching_modules.py
|
# -*- coding: utf-8 -*-
"""
@file
@brief List of modules to maintain for the teachings.
"""
def get_teaching_modules():
"""
List of teachings modules to maintain (CI + documentation).
.. runpython::
:showcode:
from ensae_teaching_cs.automation import get_teaching_modules
print('\\n'.join(sorted(get_teaching_modules())))
"""
return ["pymlbenchmark", "_benchmarks", "ensae_teaching_dl", "machinelearningext",
"lecture_citation", "botadi", "pyquickhelper", "jyquickhelper",
"python3_module_template", "mathenjeu", "pymmails", "pymyinstall",
"pyensae", "pyrsslocal", "pysqllike", "ensae_projects", "ensae_teaching_cs",
"code_beatrix", "actuariat_python", "mlstatpy", "jupytalk", "teachpyx",
"tkinterquickhelper", "cpyquickhelper", "pandas_streaming",
"lightmlboard", "lightmlrestapi", "mlinsights", "pyenbc", "mlprodict",
"papierstat", "sparkouille", "manydataapi", "csharpy", "csharpyml",
"wrapclib", "myblog", "_check_python_install", "onnxcustom"
]
|
Python
| 0.000001
|
@@ -1074,22 +1074,8 @@
all%22
-, %22onnxcustom%22
%0A
|
bc08499fd803278ea502bafdf845dec438f951f3
|
Update range-sum-query-2d-immutable.py
|
Python/range-sum-query-2d-immutable.py
|
Python/range-sum-query-2d-immutable.py
|
# Time: ctor: O(m * n)
# lookup: O(1)
# Space: O(m * n)
#
# Given a 2D matrix matrix, find the sum of the elements inside
# the rectangle defined by its upper left corner (row1, col1)
# and lower right corner (row2, col2).
#
# Range Sum Query 2D
# The above rectangle (with the red border) is defined by
# (row1, col1) = (2, 1) and (row2, col2) = (4, 3),
# which contains sum = 8.
#
# Example:
# Given matrix = [
# [3, 0, 1, 4, 2],
# [5, 6, 3, 2, 1],
# [1, 2, 0, 1, 5],
# [4, 1, 0, 1, 7],
# [1, 0, 3, 0, 5]
# ]
#
# sumRegion(2, 1, 4, 3) -> 8
# sumRegion(1, 1, 2, 2) -> 11
# sumRegion(1, 2, 2, 4) -> 12
# Note:
# You may assume that the matrix does not change.
# There are many calls to sumRegion function.
# You may assume that row1 <= row2 and col1 <= col2.
class NumMatrix(object):
def __init__(self, matrix):
"""
initialize your data structure here.
:type matrix: List[List[int]]
"""
if not matrix:
return
m, n = len(matrix), len(matrix[0])
self.sums = [[0 for _ in xrange(n+1)] for _ in xrange(m+1)]
for i in xrange(1, m+1):
for j in xrange(1, n+1):
self.sums[i][j] = matrix[i-1][j-1]
self.sums[i][j] += self.sums[i][j-1]
for j in xrange(1, n+1):
for i in xrange(1, m+1):
self.sums[i][j] += self.sums[i-1][j]
def sumRegion(self, row1, col1, row2, col2):
"""
sum of elements matrix[(row1,col1)..(row2,col2)], inclusive.
:type row1: int
:type col1: int
:type row2: int
:type col2: int
:rtype: int
"""
return self.sums[row2+1][col2+1] - self.sums[row2+1][col1] - \
self.sums[row1][col2+1] + self.sums[row1][col1]
# Your NumMatrix object will be instantiated and called as such:
# numMatrix = NumMatrix(matrix)
# numMatrix.sumRegion(0, 1, 2, 3)
# numMatrix.sumRegion(1, 2, 3, 4)
|
Python
| 0.000005
|
@@ -979,16 +979,17 @@
return%0A
+%0A
|
27f8b342e1a4bea9c807b005d16f932880bb7136
|
Document utils.setup_readline
|
jedi/utils.py
|
jedi/utils.py
|
"""
Utilities for end-users.
"""
import sys
from jedi import Interpreter
def readline_complete(text, state):
"""
Function to be passed to :func:`readline.set_completer`.
Usage::
import readline
readline.set_completer(readline_complete)
"""
ns = vars(sys.modules['__main__'])
completions = Interpreter(text, [ns]).completions()
try:
return text + completions[state].complete
except IndexError:
return None
def setup_readline():
"""
Install Jedi completer to :mod:`readline`.
"""
try:
import readline
except ImportError:
print("Module readline not available.")
else:
readline.set_completer(readline_complete)
readline.parse_and_bind("tab: complete")
# Default delimiters minus "()":
readline.set_completer_delims(' \t\n`~!@#$%^&*-=+[{]}\\|;:\'",<>/?')
|
Python
| 0.000001
|
@@ -548,16 +548,269 @@
dline%60.%0A
+%0A This function setups :mod:%60readline%60 to use Jedi in Python interactive%0A shell. If you want to use custom %60%60PYTHONSTARTUP%60%60 file, you can call%0A this function like this:%0A%0A %3E%3E%3E from jedi.utils import setup_readline%0A %3E%3E%3E setup_readline()%0A%0A
%22%22%22%0A
|
d2514b01be2bc3ba527eb9dc528309d2c7452ddb
|
Remove additional space after initials
|
mycroft/audio/speech.py
|
mycroft/audio/speech.py
|
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import re
import time
from threading import Lock
from mycroft.configuration import Configuration
from mycroft.metrics import report_timing, Stopwatch
from mycroft.tts import TTSFactory
from mycroft.util import create_signal, check_for_signal
from mycroft.util.log import LOG
from mycroft.messagebus.message import Message
from mycroft.tts.remote_tts import RemoteTTSTimeoutException
from mycroft.tts.mimic_tts import Mimic
bus = None # Mycroft messagebus connection
config = None
tts = None
tts_hash = None
lock = Lock()
mimic_fallback_obj = None
_last_stop_signal = 0
def _start_listener(message):
"""
Force Mycroft to start listening (as if 'Hey Mycroft' was spoken)
"""
create_signal('startListening')
def handle_speak(event):
"""
Handle "speak" message
"""
config = Configuration.get()
Configuration.init(bus)
global _last_stop_signal
# Get conversation ID
if event.context and 'ident' in event.context:
ident = event.context['ident']
else:
ident = 'unknown'
start = time.time() # Time of speech request
with lock:
stopwatch = Stopwatch()
stopwatch.start()
utterance = event.data['utterance']
if event.data.get('expect_response', False):
# When expect_response is requested, the listener will be restarted
# at the end of the next bit of spoken audio.
bus.once('recognizer_loop:audio_output_end', _start_listener)
# This is a bit of a hack for Picroft. The analog audio on a Pi blocks
# for 30 seconds fairly often, so we don't want to break on periods
# (decreasing the chance of encountering the block). But we will
# keep the split for non-Picroft installs since it give user feedback
# faster on longer phrases.
#
# TODO: Remove or make an option? This is really a hack, anyway,
# so we likely will want to get rid of this when not running on Mimic
if (config.get('enclosure', {}).get('platform') != "picroft" and
len(re.findall('<[^>]*>', utterance)) == 0):
chunks = re.split(r'(?<!\w\.\w.)(?<![A-Z][a-z]\.)(?<=\.|\;|\?)\s',
utterance)
for chunk in chunks:
# Check if somthing has aborted the speech
if (_last_stop_signal > start or
check_for_signal('buttonPress')):
# Clear any newly queued speech
tts.playback.clear()
break
try:
mute_and_speak(chunk, ident)
except KeyboardInterrupt:
raise
except Exception:
LOG.error('Error in mute_and_speak', exc_info=True)
else:
mute_and_speak(utterance, ident)
stopwatch.stop()
report_timing(ident, 'speech', stopwatch, {'utterance': utterance,
'tts': tts.__class__.__name__})
def mute_and_speak(utterance, ident):
"""
Mute mic and start speaking the utterance using selected tts backend.
Args:
utterance: The sentence to be spoken
ident: Ident tying the utterance to the source query
"""
global tts_hash
# update TTS object if configuration has changed
if tts_hash != hash(str(config.get('tts', ''))):
global tts
# Stop tts playback thread
tts.playback.stop()
tts.playback.join()
# Create new tts instance
tts = TTSFactory.create()
tts.init(bus)
tts_hash = hash(str(config.get('tts', '')))
LOG.info("Speak: " + utterance)
try:
tts.execute(utterance, ident)
except RemoteTTSTimeoutException as e:
LOG.error(e)
mimic_fallback_tts(utterance, ident)
except Exception as e:
LOG.error('TTS execution failed ({})'.format(repr(e)))
def mimic_fallback_tts(utterance, ident):
global mimic_fallback_obj
# fallback if connection is lost
config = Configuration.get()
tts_config = config.get('tts', {}).get("mimic", {})
lang = config.get("lang", "en-us")
if not mimic_fallback_obj:
mimic_fallback_obj = Mimic(lang, tts_config)
tts = mimic_fallback_obj
LOG.debug("Mimic fallback, utterance : " + str(utterance))
tts.init(bus)
tts.execute(utterance, ident)
def handle_stop(event):
"""
handle stop message
"""
global _last_stop_signal
if check_for_signal("isSpeaking", -1):
_last_stop_signal = time.time()
tts.playback.clear() # Clear here to get instant stop
bus.emit(Message("mycroft.stop.handled", {"by": "TTS"}))
def init(messagebus):
""" Start speech related handlers.
Arguments:
messagebus: Connection to the Mycroft messagebus
"""
global bus
global tts
global tts_hash
global config
bus = messagebus
Configuration.init(bus)
config = Configuration.get()
bus.on('mycroft.stop', handle_stop)
bus.on('mycroft.audio.speech.stop', handle_stop)
bus.on('speak', handle_speak)
bus.on('mycroft.mic.listen', _start_listener)
tts = TTSFactory.create()
tts.init(bus)
tts_hash = config.get('tts')
def shutdown():
if tts:
tts.playback.stop()
tts.playback.join()
if mimic_fallback_obj:
mimic_fallback_obj.playback.stop()
mimic_fallback_obj.playback.join()
|
Python
| 0.999956
|
@@ -2695,16 +2695,310 @@
== 0):%0A
+ # Remove any whitespace present after the period,%0A # if a character (only alpha) ends with a period%0A # ex: A. Lincoln -%3E A.Lincoln%0A # so that we don't split at the period%0A utterance = re.sub(r'%5Cb(%5BA-za-z%5D%5B%5C.%5D)(%5Cs+)', r'%5Cg%3C1%3E', utterance)%0A
|
c9dceb4dc83490ab0eebcd3efc9590d3275f53df
|
tidy up messytables-jts integration
|
ktbh/schema.py
|
ktbh/schema.py
|
import unicodecsv
from cStringIO import StringIO
import messytables
import itertools
import slugify
import jsontableschema
from messytables.types import *
from messytables_jts import rowset_as_schema
def censor(dialect):
tmp = dict(dialect)
censored = [
"doublequote",
"lineterminator",
"skipinitialspace"
]
[ tmp.pop(i) for i in censored ]
return tmp
def sabotage(d):
[ d.__setitem__(k, d[k].encode('utf-8')) for k in d
if isinstance(d[k], unicode) ]
def get_type_of_column(col):
try:
return rowset_as_schema(col)
except:
return "any"
def infer_schema(data, _dialect):
f = StringIO(data)
sabotage(_dialect)
d = unicodecsv.reader(f, dialect=None, **_dialect)
field_names = d.next()
f.seek(0)
dialect = censor(_dialect)
t = messytables.CSVTableSet(f, **dialect).tables[0]
sample = itertools.islice(t, 0, 9)
types = messytables.type_guess(sample)
json_table_schema_types = map(get_type_of_column(t),
types)
slugs = [ slugify.slugify(i) for i in field_names ]
metadata = zip(slugs, field_names, json_table_schema_types)
sch = jsontableschema.JSONTableSchema()
for field_id, label, field_type in metadata:
sch.add_field(field_id=field_id,
label=label,
field_type=field_type)
return sch.as_json()
|
Python
| 0
|
@@ -177,32 +177,34 @@
import
-rowset_as_schema
+celltype_as_string
%0A%0Adef ce
@@ -571,24 +571,26 @@
urn
-rowset_as_schema
+celltype_as_string
(col
@@ -1043,11 +1043,8 @@
lumn
-(t)
,%0A
|
39de531241f987daf2f417fd419c7bd63248dd9d
|
Bump version number.
|
kyokai/util.py
|
kyokai/util.py
|
"""
Misc utilities.
"""
import os
import pathlib
VERSION = "1.3.8"
VERSIONT = tuple(map(int, VERSION.split('.')))
HTTP_CODES = {
200: "OK",
201: "Created",
202: "Accepted",
203: "Non-Authoritative Information",
204: "No Content",
205: "Reset Content",
301: "Moved Permanently",
302: "Found",
303: "See Other",
304: "Not Modified",
400: "Bad Request",
401: "Unauthorized",
403: "Forbidden",
404: "Not Found",
405: "Method Not Allowed",
410: "Gone",
413: "Payload Too Large",
429: "Too Many Requests",
500: "Internal Server Error"
}
def static_filename(filename: str) -> str:
"""
Naive static filename implementation, to allow serving static files.
"""
built = ""
p = pathlib.PurePath(filename)
for part in p.parts:
if part != "..":
built += part + os.path.sep
return built[:-1]
|
Python
| 0
|
@@ -60,11 +60,11 @@
%221.
-3.8
+5.0
%22%0AVE
|
34ca71d5db9c1f17d236e5e49471fb6f2a6e1747
|
Implement Paulo tip about router.
|
aldryn_search/router.py
|
aldryn_search/router.py
|
# -*- coding: utf-8 -*-
from django.conf import settings
from cms.utils.i18n import get_current_language
from haystack import routers
from haystack.constants import DEFAULT_ALIAS
class LanguageRouter(routers.BaseRouter):
def for_read(self, **hints):
language = get_current_language()
if language not in settings.HAYSTACK_CONNECTIONS:
return DEFAULT_ALIAS
return language
def for_write(self, **hints):
language = get_current_language()
if language not in settings.HAYSTACK_CONNECTIONS:
return DEFAULT_ALIAS
return language
|
Python
| 0
|
@@ -59,21 +59,31 @@
rom
-cms
+django
.utils.
-i18
+translatio
n im
@@ -87,31 +87,70 @@
import get_
-current
+language%0Afrom cms.utils.i18n import alias_from
_language%0A%0Af
@@ -319,32 +319,24 @@
guage = get_
-current_
language()%0A
@@ -338,35 +338,79 @@
e()%0A
-if language
+alias = alias_from_language(language)%0A%0A if alias
not in sett
@@ -484,24 +484,21 @@
return
-language
+alias
%0A%0A de
@@ -552,16 +552,8 @@
get_
-current_
lang
@@ -571,19 +571,63 @@
-if language
+alias = alias_from_language(language)%0A%0A if alias
not
@@ -709,17 +709,14 @@
return
-language
+alias
%0A
|
9fa2214b03d3264240b46fe5368b37ffa696f50c
|
Allow test selection according to tags
|
baf/src/baf.py
|
baf/src/baf.py
|
"""The main module of the Bayesian API Fuzzer."""
import sys
from time import time
from fastlog import log
from csv_reader import read_csv_as_dicts
from setup import setup
from cliargs import cli_parser
from fuzzer import run_test
from results import Results
from report_generator import generate_reports
VERSION_MAJOR = 1
VERSION_MINOR = 0
def run_all_loaded_tests(cfg, fuzzer_settings, tests, results):
"""Run all tests read from CSV file."""
i = 1
for test in tests:
log.info("Starting test #{n} with name '{desc}'".format(n=i, desc=test["Name"]))
with log.indent():
run_test(cfg, fuzzer_settings, test, results)
i += 1
def start_tests(cfg, fuzzer_settings, tests, results):
"""Start all tests using the already loaded configuration and fuzzer settings."""
log.info("Run tests")
with log.indent():
if not tests or len(tests) == 0:
log.error("No tests loaded!")
sys.exit(-1)
if len(tests) == 1:
log.success("Loaded 1 test")
else:
log.success("Loaded {n} tests".format(n=len(tests)))
run_all_loaded_tests(cfg, fuzzer_settings, tests, results)
def read_fuzzer_settings(filename):
"""Read fuzzer settings from the CSV file."""
log.info("Read fuzzer settings")
with log.indent():
fuzzer_settings = read_csv_as_dicts(filename)
if len(fuzzer_settings) == 1:
log.success("Loaded 1 setting")
else:
log.success("Loaded {n} settings".format(n=len(fuzzer_settings)))
return fuzzer_settings
def show_version():
"""Show BAF version."""
print("BAF version {major}.{minor}".format(major=VERSION_MAJOR, minor=VERSION_MINOR))
def main():
"""Entry point to the Bayesian API Fuzzer."""
log.setLevel(log.INFO)
cli_arguments = cli_parser.parse_args()
if cli_arguments.version:
show_version()
sys.exit(0)
else:
cfg = setup(cli_arguments)
fuzzer_settings = read_fuzzer_settings("fuzzer_settings.csv")
results = Results()
tests = read_csv_as_dicts(cfg["input_file"])
t1 = time()
start_tests(cfg, fuzzer_settings, tests, results)
t2 = time()
generate_reports(tests, results, cfg, t2 - t1)
if __name__ == "__main__":
# execute only if run as a script
main()
|
Python
| 0
|
@@ -165,17 +165,29 @@
rt setup
+, parse_tags
%0A
-
from cli
@@ -684,16 +684,605 @@
+= 1%0A%0A%0A
+def run_tests_with_tags(cfg, fuzzer_settings, tests, results, tags):%0A %22%22%22Run tests read from CSV file that are marged by any of tags provided in tags parameter.%22%22%22%0A i = 1%0A for test in tests:%0A test_tags = parse_tags(test%5B%22Tags%22%5D)%0A if tags %3C= test_tags:%0A log.info(%22Starting test #%7Bn%7D with name '%7Bdesc%7D'%22.format(n=i, desc=test%5B%22Name%22%5D))%0A with log.indent():%0A run_test(cfg, fuzzer_settings, test, results)%0A i += 1%0A else:%0A log.info(%22Skipping test #%7Bn%7D with name '%7Bdesc%7D'%22.format(n=i, desc=test%5B%22Name%22%5D))%0A%0A%0A
def star
@@ -1317,32 +1317,38 @@
, tests, results
+, tags
):%0A %22%22%22Start
@@ -1721,24 +1721,49 @@
en(tests)))%0A
+ if not tags:%0A
run_
@@ -1817,16 +1817,106 @@
esults)%0A
+ else:%0A run_tests_with_tags(cfg, fuzzer_settings, tests, results, tags)%0A
%0A%0Adef re
@@ -2866,32 +2866,59 @@
t1 = time()%0A
+ tags = cfg%5B%22tags%22%5D%0A
start_te
@@ -2949,32 +2949,38 @@
, tests, results
+, tags
)%0A t2 = t
|
4c600c15c64f92fb256225ad3f4e85e3ea057976
|
Remove collections import from SMOTEENN
|
imblearn/combine/smote_enn.py
|
imblearn/combine/smote_enn.py
|
"""Class to perform over-sampling using SMOTE and cleaning using ENN."""
from __future__ import print_function
from __future__ import division
from ..over_sampling import SMOTE
from ..under_sampling import EditedNearestNeighbours
from ..base import SamplerMixin
class SMOTEENN(SamplerMixin):
"""Class to perform over-sampling using SMOTE and cleaning using ENN.
Combine over- and under-sampling using SMOTE and Edited Nearest Neighbours.
Parameters
----------
ratio : str or float, optional (default='auto')
If 'auto', the ratio will be defined automatically to balance
the dataset. Otherwise, the ratio is defined as the
number of samples in the minority class over the the number of
samples in the majority class.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by np.random.
k : int, optional (default=5)
Number of nearest neighbours to used to construct synthetic
samples.
m : int, optional (default=10)
Number of nearest neighbours to use to determine if a minority
sample is in danger.
out_step : float, optional (default=0.5)
Step size when extrapolating.
kind_smote : str, optional (default='regular')
The type of SMOTE algorithm to use one of the following
options: 'regular', 'borderline1', 'borderline2', 'svm'.
size_ngh : int, optional (default=3)
Size of the neighbourhood to consider to compute the average
distance to the minority point samples.
kind_sel : str, optional (default='all')
Strategy to use in order to exclude samples.
- If 'all', all neighbours will have to agree with the samples of
interest to not be excluded.
- If 'mode', the majority vote of the neighbours will be used in
order to exclude a sample.
n_jobs : int, optional (default=-1)
The number of threads to open if possible.
Attributes
----------
min_c_ : str or int
The identifier of the minority class.
max_c_ : str or int
The identifier of the majority class.
stats_c_ : dict of str/int : int
A dictionary in which the number of occurences of each class is
reported.
X_shape_ : tuple of int
Shape of the data `X` during fitting.
Notes
-----
The method is presented in [1]_.
This class does not support mutli-class.
Examples
--------
>>> from collections import Counter
>>> from sklearn.datasets import make_classification
>>> from imblearn.combine import SMOTEENN
>>> X, y = make_classification(n_classes=2, class_sep=2, weights=[0.1, 0.9],
... n_informative=3, n_redundant=1, flip_y=0,
... n_features=20, n_clusters_per_class=1,
... n_samples=1000, random_state=10)
>>> print('Original dataset shape {}'.format(Counter(y)))
Original dataset shape Counter({1: 900, 0: 100})
>>> sme = SMOTEENN(random_state=42)
>>> X_res, y_res = sme.fit_sample(X, y)
>>> print('Resampled dataset shape {}'.format(Counter(y_res)))
Resampled dataset shape Counter({0: 900, 1: 865})
References
----------
.. [1] G. Batista, R. C. Prati, M. C. Monard. "A study of the behavior of
several methods for balancing machine learning training data," ACM
Sigkdd Explorations Newsletter 6 (1), 20-29, 2004.
"""
def __init__(self, ratio='auto', random_state=None,
k=5, m=10, out_step=0.5, kind_smote='regular',
size_ngh=3, kind_enn='all', n_jobs=-1, **kwargs):
super(SMOTEENN, self).__init__(ratio=ratio)
self.random_state = random_state
self.k = k
self.m = m
self.out_step = out_step
self.kind_smote = kind_smote
self.size_ngh = size_ngh
self.kind_enn = kind_enn
self.n_jobs = n_jobs
self.kwargs = kwargs
from collections import Counter
self.sm = SMOTE(ratio=self.ratio, random_state=self.random_state,
k=self.k, m=self.m, out_step=self.out_step,
kind=self.kind_smote, n_jobs=self.n_jobs,
**self.kwargs)
self.enn = EditedNearestNeighbours(random_state=self.random_state,
size_ngh=self.size_ngh,
kind_sel=self.kind_enn,
n_jobs=self.n_jobs)
def fit(self, X, y):
"""Find the classes statistics before to perform sampling.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Matrix containing the data which have to be sampled.
y : ndarray, shape (n_samples, )
Corresponding label for each sample in X.
Returns
-------
self : object,
Return self.
"""
super(SMOTEENN, self).fit(X, y)
# Fit using SMOTE
self.sm.fit(X, y)
return self
def _sample(self, X, y):
"""Resample the dataset.
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Matrix containing the data which have to be sampled.
y : ndarray, shape (n_samples, )
Corresponding label for each sample in X.
Returns
-------
X_resampled : ndarray, shape (n_samples_new, n_features)
The array containing the resampled data.
y_resampled : ndarray, shape (n_samples_new)
The corresponding label of `X_resampled`
"""
# Transform using SMOTE
X, y = self.sm.sample(X, y)
# Fit and transform using ENN
return self.enn.fit_sample(X, y)
|
Python
| 0
|
@@ -4230,48 +4230,8 @@
rgs%0A
- from collections import Counter%0A
|
e5e523890dd1129402d7a0477468ee47dee3fd91
|
Fix missing part/block conversion.
|
inbox/sendmail/smtp/common.py
|
inbox/sendmail/smtp/common.py
|
from inbox.sendmail.base import generate_attachments, SendError
from inbox.sendmail.smtp.postel import BaseSMTPClient
from inbox.sendmail.smtp.message import create_email, create_reply
class SMTPClient(BaseSMTPClient):
""" SMTPClient for Gmail and other providers. """
def _send_mail(self, db_session, message, smtpmsg):
"""Send the email message."""
# Send it using SMTP:
try:
return self._send(smtpmsg.recipients, smtpmsg.msg)
except SendError as e:
self.log.error(str(e))
raise
def send_new(self, db_session, draft, recipients):
"""
Send a previously created + saved draft email from this user account.
"""
inbox_uid = draft.inbox_uid
subject = draft.subject
body = draft.sanitized_body
attachments = generate_attachments(draft.attachments)
smtpmsg = create_email(self.sender_name, self.email_address,
inbox_uid, recipients, subject, body,
attachments)
return self._send_mail(db_session, draft, smtpmsg)
def send_reply(self, db_session, draft, recipients):
"""
Send a previously created + saved draft email reply from this user
account.
"""
inbox_uid = draft.inbox_uid
subject = draft.subject
body = draft.sanitized_body
attachments = generate_attachments(draft.attachments)
smtpmsg = create_reply(self.sender_name, self.email_address,
draft.in_reply_to, draft.references,
inbox_uid, recipients, subject, body,
attachments)
return self._send_mail(db_session, draft, smtpmsg)
|
Python
| 0
|
@@ -807,32 +807,86 @@
.sanitized_body%0A
+ blocks = %5Bp.block for p in draft.attachments%5D%0A
attachme
@@ -912,32 +912,21 @@
chments(
-draft.attachment
+block
s)%0A%0A
@@ -1433,32 +1433,86 @@
.sanitized_body%0A
+ blocks = %5Bp.block for p in draft.attachments%5D%0A
attachme
@@ -1538,32 +1538,21 @@
chments(
-draft.attachment
+block
s)%0A%0A
|
d5561c37780fa29db15a076eff0c3faf126c6470
|
handle script files with . in the name properly (Issue #23)
|
jip/parser.py
|
jip/parser.py
|
#!/usr/bin/env python
"""The JIP parser module provides methods to parse tools from scripts.
"""
import os
import re
from collections import defaultdict
from textwrap import dedent
from jip.tools import Block, ScriptTool
#currently supported block type
VALIDATE_BLOCK = "validate"
COMMAND_BLOCK = "command"
SETUP_BLOCK = "setup"
INIT_BLOCK = "init"
PIPELINE_BLOCK = "pipeline"
# currently supported block types as a list
SUPPORTED_BLOCKS = [
SETUP_BLOCK,
INIT_BLOCK,
VALIDATE_BLOCK,
COMMAND_BLOCK,
PIPELINE_BLOCK
]
# the pattern to find opening blocks catching
# #%begin [<type> [<interpreter> [<args>]]]
_begin_block_pattern = re.compile(r'^\s*#%begin\s*'
'(?P<type>\w*)\s*'
'(?P<interpreter>\w*)\s*'
'(?P<args>.*)')
# end block pattern catches
# #%end [<type>]
_end_block_pattern = re.compile(r'^\s*#%end(\s+(?P<type>\w+))?$')
def split_header(lines):
"""Split lines into header and content lines removing the
shebang.
"""
if isinstance(lines, basestring):
lines = lines.split("\n")
header = []
content = []
header_finished = False
# iterate without shebang line
for l in [l for l in lines if (len(l) < 2 or l[0:2] != "#!")]:
if not header_finished \
and (len(l) >= 1 and l[0] == "#") \
and (len(l) < 2 or l[0:2] not in ["#%"]):
header.append(l)
else:
header_finished = True
content.append(l)
return header, content
def parse_block_begin(l, lineno=1):
"""Check if the given line opens a block. If a block is opened,
a new block is created and returned"""
match = _begin_block_pattern.match(l)
if match:
# begin block parsing
m = match.groupdict()
if m['type'] == '':
raise Exception("%d :: #%begin block defined but "
"not type specified!" % (lineno))
if not m['type'] in SUPPORTED_BLOCKS:
raise Exception("%d :: Block type '%s' not supported" %
(lineno, m['type']))
interpreter = m['interpreter'] if m['interpreter'] != '' else None
return m['type'], Block(interpreter=interpreter,
interpreter_args=m['args'],
lineno=lineno)
return None, None
def parse_block_end(l, current_type, lineno=1):
"""Parse end of block and raises an exception in case
the block types do not match.
"""
match = _end_block_pattern.match(l)
if match:
m = match.groupdict()
if m['type'] is not None and \
m['type'] != '' and m['type'] != current_type:
raise Exception("%d :: Block types do not match. Currently open "
"block is '%s' and closing block is "
"'%s'" % (lineno, current_type, m['type']))
return True
return False
def parse_blocks(content, lineno=0):
"""Parse content lines collecting blocks
"""
blocks = defaultdict(list)
current_block = None
current_type = None
anonymous_block = False
for lineno, l in enumerate(content, start=lineno + 1):
if len(l.strip()) == 0:
continue
block_type, new_block = parse_block_begin(l, lineno)
if new_block:
if anonymous_block:
if len(current_block.content) > 0:
blocks[COMMAND_BLOCK].append(current_block)
current_type = None
current_block = None
anonymous_block = False
if current_block:
raise Exception("%d :: Nested blocks are not supported! "
"Currently open block is '%s'" %
(lineno, current_block))
current_block = new_block
current_type = block_type
elif parse_block_end(l, current_type):
if len(current_block.content) > 0:
blocks[current_type].append(current_block)
current_block = None
anonymous_block = False
elif current_block is not None:
current_block.content.append(l)
else:
# create anonymous bash block
current_block = Block(interpreter="bash", lineno=lineno)
current_block.content.append(l)
current_type = COMMAND_BLOCK
anonymous_block = True
if current_block is not None:
# close anonymous blocks
if len(current_block.content) > 0:
blocks[current_type].append(current_block)
return blocks
def _create_docstring(header):
"""Removes the shebang and all initial comment characters before
it joins and dedents teh lines
:param header: header lines
:type header: list of strings
"""
if header and len(header) > 0:
head_of = 0 if not header[0].startswith("#!") else 1
lines = []
for h in header[head_of:]:
if len(h) > 0:
if h[0] == "#":
h = h[1:]
lines.append(h)
return dedent("\n".join(lines))
return ""
def load(content, script_class=None, is_pipeline=False):
lines = content.split("\n")
if not is_pipeline:
if len(lines[0]) > 0:
if re.match(r'^#!/usr/bin/env.*jip.*(-p|--pipeline).*$', lines[0]):
is_pipeline = True
header, content = split_header(lines)
lineno = len(header) + 1
blocks = parse_blocks(content, lineno)
command_block = None
validate_block = None
pipeline_block = None
setup_block = None
init_block = None
if sum([len(b) for b in blocks.values()]) == 0:
raise Exception("No blocks found!")
for block_type, blocks in blocks.iteritems():
if len(blocks) > 1:
raise Exception("Multiple blocks of type %s currently "
"not supported" % (block_type))
if len(blocks) == 1:
if block_type == COMMAND_BLOCK:
command_block = blocks[0]
elif block_type == VALIDATE_BLOCK:
validate_block = blocks[0]
elif block_type == PIPELINE_BLOCK:
pipeline_block = blocks[0]
elif block_type == SETUP_BLOCK:
setup_block = blocks[0]
elif block_type == INIT_BLOCK:
init_block = blocks[0]
docstring = _create_docstring(header)
if script_class is None:
script_class = ScriptTool
if is_pipeline:
pipeline_block = command_block
pipeline_block.interpreter = "python"
command_block = None
return script_class(docstring=docstring,
setup_block=setup_block,
init_block=init_block,
command_block=command_block,
validation_block=validate_block,
pipeline_block=pipeline_block)
def loads(path, script_class=None, is_pipeline=False):
if path is not None and not os.path.exists(path):
raise Exception("Script file not found : %s" % path)
with open(path, 'r') as f:
lines = "\n".join([l.rstrip() for l in f.readlines()])
tool = load(lines, script_class=script_class, is_pipeline=is_pipeline)
tool.path = os.path.abspath(path)
if tool.name is None:
tool.name = os.path.basename(path)
try:
tool.name = tool.name[:tool.name.index('.')]
except:
pass
return tool
raise Exception("Error while loading script from %s" % path)
|
Python
| 0
|
@@ -7560,16 +7560,17 @@
ol.name.
+r
index('.
@@ -7572,16 +7572,34 @@
ex('.')%5D
+.replace(%22.%22, %22_%22)
%0A
|
2808b0dfba0597e09d80eafedfead246779111d9
|
Clean up verbose looking code
|
MOAL/data_structures/trees/binary_trees.py
|
MOAL/data_structures/trees/binary_trees.py
|
# -*- coding: utf-8 -*-
__author__ = """Chris Tabor (dxdstudio@gmail.com)"""
if __name__ == '__main__':
from os import getcwd
from os import sys
sys.path.append(getcwd())
from MOAL.helpers.display import Section
from MOAL.helpers.display import print_h4
from MOAL.helpers.display import cmd_title
from MOAL.data_structures.abstract.tree import Tree
DEBUG = True if __name__ == '__main__' else False
class InvalidChildNodeCount(Exception):
pass
class BinaryTree(Tree):
"""A binary tree is the same as a tree ADT, except each node must have a max
of two child nodes, unless it's a leaf node, in which case it has zero."""
def __setitem__(self, key, val):
if len(val['edges']) > 2:
raise InvalidChildNodeCount(
'Binary Tree cannot have more than two children!')
super(BinaryTree, self).__setitem__(key, val)
def get_left_child(self, node):
return node['edges'][0]
def get_right_child(self, node):
if len(node['edges'] < 2):
return None
return node['edges'][1]
def is_degenerate(self):
# TODO
pass
def is_pathological(self):
# TODO
return self.is_degenerate()
class BinarySearchTree(BinaryTree):
def __init__(self, *args, **kwargs):
super(BinarySearchTree, self).__init__(*args, **kwargs)
self.rebalance(self.get_root())
def _lt(self, node_a, node_b):
"""Comparator function, which can be used to implement a BST.
This should be sub-classed and overridden for customer comparisons,
beyond typical integer comparison."""
node_a = self.__getitem__(node_a)
node_b = self.__getitem__(node_b)
if 'val' in node_a and 'val' in node_b:
return node_a['val'] > node_b['val']
else:
return False
def rebalance(self, node):
if len(node['edges']) < 2:
return
if self._lt(node['edges'][0], node['edges'][1]):
list(reversed(node['edges']))
class BifurcatingArborescence(BinaryTree):
"""A hilariously verbose alternative name for a Binary Tree!"""
if DEBUG:
with Section('Binary Tree'):
"""
0 root
/ \
/ \
1 2 interior
/ / \
/ / \
3 4 5 leaves
The tree above is represented in python code below.
"""
data = {
0: {'edges': [1, 2], 'is_root': True},
1: {'edges': [3], 'parent': 0},
2: {'edges': [4, 5], 'parent': 0},
3: {'edges': [], 'parent': 1},
4: {'edges': [], 'parent': 2},
5: {'edges': [], 'parent': 2},
}
btree = BinaryTree(data)
print(btree)
print_h4(
'Binary trees',
desc=('They can have no more than two nodes, '
'so adding new edges that do not conform'
' should throw an error.'))
try:
btree[6] = {'edges': [7, 8, 9], 'parent': 3}
except InvalidChildNodeCount:
cmd_title('Error called successfully', newlines=False)
bst = BinarySearchTree(data)
print(bst)
bst.add_child(5, 6)
bst.add_siblings(5, [10, 11])
print(bst)
|
Python
| 0.998916
|
@@ -1882,39 +1882,40 @@
node):%0A
-if len(
+edges =
node%5B'edges'%5D) %3C
@@ -1911,16 +1911,37 @@
'edges'%5D
+%0A if len(edges
) %3C 2:%0A
@@ -1982,39 +1982,23 @@
_lt(
-node%5B'
edges
-'%5D
%5B0%5D,
-node%5B'
edges
-'%5D
%5B1%5D)
@@ -2025,29 +2025,21 @@
eversed(
-node%5B'
edges
-'%5D
))%0A%0A%0Acla
|
872a96b52061bd9ab3a3178aacf3e3d0be2cc498
|
Make field filter errors ValidationErrors
|
nap/dataviews/fields.py
|
nap/dataviews/fields.py
|
from django.db.models.fields import NOT_PROVIDED
from nap.utils import digattr
class field(property):
'''A base class to compare against.'''
def __get__(self, instance, cls=None):
if instance is None:
return self
return self.fget(instance._obj)
def __set__(self, instance, value):
self.fset(instance._obj, value)
class Field(field):
'''
class V(DataView):
foo = Field('bar', default=1)
'''
def __init__(self, name, default=NOT_PROVIDED, filters=None):
self.name = name
self.default = default
self.filters = filters or []
def __get__(self, instance, cls=None):
if instance is None:
return self
value = getattr(instance._obj, self.name, self.default)
for filt in self.filters:
value = filt.from_python(value)
return value
def __set__(self, instance, value):
for filt in self.filters[::-1]:
value = filt.to_python(value)
setattr(instance._obj, self.name, value)
class DigField(Field):
def __get__(self, instance, cls=None):
if instance is None:
return self
return digattr(instance._obj, self.name, self.default)
def __set__(self, instance):
raise NotImplementedError
|
Python
| 0.000008
|
@@ -42,16 +42,57 @@
PROVIDED
+%0Afrom django.forms import ValidationError
%0A%0Afrom n
@@ -853,16 +853,37 @@
ilters:%0A
+ try:%0A
@@ -910,32 +910,131 @@
m_python(value)%0A
+ except (TypeError, ValueError):%0A raise ValidationError('Invalid value')%0A
return v
|
f41bb86dd5263d63172b303a5a3993fc28e612dc
|
fix spelling of "received"
|
django-hq/apps/receiver/submitprocessor.py
|
django-hq/apps/receiver/submitprocessor.py
|
from models import *
import logging
import hashlib
import settings
import traceback
import sys
import os
import string
import uuid
from django.db import transaction
def get_submission_path():
return settings.rapidsms_apps_conf['receiver']['xform_submission_path']
@transaction.commit_on_success
def do_raw_submission(metadata, payload, domain=None, is_resubmission=False):
logging.debug("Begin do_raw_submission()")
transaction = str(uuid.uuid1())
new_submit = Submission()
new_submit.transaction_uuid = transaction
if is_resubmission:
new_submit.submit_ip = metadata['HTTP_ORIGINAL_IP']
new_submit.submit_time = datetime.strptime(metadata['HTTP_TIME_RECEIEVED'], "%Y-%m-%d %H:%M:%S")
else:
if metadata.has_key('HTTP_X_FORWARDED_FOR'):
new_submit.submit_ip = metadata['HTTP_X_FORWARDED_FOR']
elif metadata.has_key('REMOTE_HOST'):
new_submit.submit_ip = metadata['REMOTE_HOST']
else:
new_submit.submit_ip = '127.0.0.1'
if metadata.has_key('HTTP_CONTENT_TYPE'):
content_type = metadata['HTTP_CONTENT_TYPE']
else:
content_type = metadata['CONTENT_TYPE']#"text/xml"
new_submit.raw_header = repr(metadata)
logging.debug("compute checksum")
new_submit.checksum = hashlib.md5(payload).hexdigest()
logging.debug("Get bytes")
#new_submit.bytes_received = int(request.META['HTTP_CONTENT_LENGTH'])
if metadata.has_key('HTTP_CONTENT_LENGTH'):
new_submit.bytes_received = int(metadata['HTTP_CONTENT_LENGTH'])
else:
new_submit.bytes_received = int(metadata['CONTENT_LENGTH'])
try:
newfilename = os.path.join(get_submission_path(),transaction + '.postdata')
logging.debug("try to write file")
fout = open(newfilename, 'w')
fout.write('Content-type: %s\n' % content_type.replace("'newdivider'","newdivider"))
fout.write('Content-length: %s\n\n' % new_submit.bytes_received)
fout.write(payload)
fout.close()
logging.debug("write successful")
new_submit.raw_post = newfilename
except:
logging.error("Unable to write raw post data")
logging.error("Unable to write raw post data: Exception: " + str(sys.exc_info()[0]))
logging.error("Unable to write raw post data: Traceback: " + str(sys.exc_info()[1]))
return '[error]'
#return render_to_response(template_name, context, context_instance=RequestContext(request))
logging.debug("try to write model")
new_submit.domain = domain
new_submit.save()
logging.debug("save to db successful")
return new_submit
|
Python
| 0.999883
|
@@ -733,17 +733,16 @@
ME_RECEI
-E
VED'%5D, %22
|
bbe263e8bd9bb12ccef681d4f21f6b90c89f059d
|
Remove some debug logging
|
flask/test/test_signup.py
|
flask/test/test_signup.py
|
from __future__ import unicode_literals
from test import TestCase
from web import app
from db import session, User
from nose.tools import eq_
class TestSignup(TestCase):
def test_sign_up(self):
app.test_client().post('/', data={'email': 'andrew@lorente.name'})
users = session().query(User.email).all()
eq_(users, [('andrew@lorente.name',)])
self.visit('/')
self.browser.fill('email', 'joe@lewis.name')
self.browser.find_by_name('go').click()
assert self.browser.is_text_present('Thanks'), 'rude!'
users = session().query(User.email).all()
eq_(users, [('andrew@lorente.name',), ('joe@lewis.name',)])
def test_valid_emails_get_validated(self):
print 'here before visit'
self.visit('/')
print 'here after visit'
self.browser.fill('email', 'eric@holscher.name')
assert self.browser.is_text_present('valid'), "didn't get validated"
def test_invalid_emails_get_yelled_about(self):
self.visit('/')
self.browser.fill('email', 'aghlaghlaghl')
assert self.browser.is_text_present('invalid'), "didn't get yelled at"
|
Python
| 0.000003
|
@@ -735,90 +735,23 @@
-print 'here before visit'%0A self.visit('/')%0A print 'here after visit'
+self.visit('/')
%0A
|
5060eeb34b44158bcd5e8b466650269adae8d194
|
Add getter for grammar rules
|
viper/grammar/grammar.py
|
viper/grammar/grammar.py
|
import viper.lexer as vl
from viper.grammar.languages import *
from os.path import dirname, join
from typing import ClassVar, List
class GrammarToken:
def __init__(self, lexeme_class: ClassVar, text=None):
self._lexeme_class = lexeme_class
self._text = text
def __eq__(self, other):
if isinstance(other, GrammarToken):
return self._lexeme_class == other._lexeme_class
elif isinstance(other, Literal):
return other.value == self
return isinstance(other, self._lexeme_class)
def __str__(self):
if self._text is not None:
return self._text
else:
return f'{self._lexeme_class.__name__}Token'
def __repr__(self):
return str(self)
class GrammarLiteral:
def __init__(self, val: str):
self._val = val
def __eq__(self, other):
if isinstance(other, GrammarLiteral):
return self._val == other._val
if isinstance(other, vl.Lexeme):
return self._val == other.text
return False
def __str__(self):
return f'"{self._val}"'
def __repr__(self):
return str(self)
INDENT = GrammarToken(vl.Indent)
DEDENT = GrammarToken(vl.Dedent)
NEWLINE = GrammarToken(vl.NewLine)
PERIOD = GrammarToken(vl.Period, vl.PERIOD.text)
COMMA = GrammarToken(vl.Comma, vl.COMMA.text)
OPEN_PAREN = GrammarToken(vl.OpenParen, vl.OPEN_PAREN.text)
CLOSE_PAREN = GrammarToken(vl.CloseParen, vl.CLOSE_PAREN.text)
COLON = GrammarToken(vl.Colon, vl.COLON.text)
ARROW = GrammarToken(vl.Arrow, vl.ARROW.text)
NUMBER = GrammarToken(vl.Number)
NAME = GrammarToken(vl.Name)
CLASS = GrammarToken(vl.Class)
OPERATOR = GrammarToken(vl.Operator)
class GrammarParseError(Exception):
def __init__(self, message='', line_no=None):
if line_no is not None:
message = f'[{line_no}]: {message}'
super().__init__(message)
class Grammar:
def __init__(self, grammar_file: str):
self._grammar_dict = {}
self._parse_file(grammar_file)
self.grammar = alt(*self._grammar_dict.values())
def partial_parse(self, lexemes: List[vl.Lexeme], lang=None) -> SPPF:
if lang is None:
lang = self.grammar
return make_sppf(lang, lexemes)
def parse_single(self, lexemes: List[vl.Lexeme]) -> SPPF:
lang = self._grammar_dict['single_line']
return self.partial_parse(lexemes, lang)
def parse_multiple(self, lexemes: List[vl.Lexeme]) -> SPPF:
lang = self._grammar_dict['many_lines']
return self.partial_parse(lexemes, lang)
def _parse_file(self, grammar_file: str):
raw_rules = {}
with open(grammar_file) as gf:
line_no = 0
for line in gf:
line = line.strip()
line_no += 1
if not line:
continue
if line.startswith('#'):
continue
try:
name, raw_rule = self._split_line(line)
except Exception as e:
if len(e.args) > 0:
msg = e.args[0]
else:
msg = ''
raise GrammarParseError(msg, line_no)
raw_rules[name] = raw_rule
self._parse_rules(raw_rules)
@staticmethod
def _split_line(line: str):
name, rule = line.split('::=')
name = name.strip()
if not (name.startswith('<') and name.endswith('>')):
raise ValueError(f"no angle brackets around production name: {name}")
name = name[1:-1]
rule = rule.strip()
return name, rule
def _parse_rules(self, raw_rules):
for name, raw_rule_list in raw_rules.items():
rule_tup = (self._parse_rule(raw_rule) for raw_rule in raw_rule_list.split('|'))
self._grammar_dict[name] = alt(*rule_tup)
def _parse_rule(self, rule: str) -> Language:
raw_tokens = rule.split()
rule_parts = []
for raw_token in raw_tokens:
token = self._parse_token(raw_token)
rule_parts.append(token)
rule = concat(*rule_parts)
return rule
def _parse_token(self, token: str) -> Language:
if token == 'INDENT':
return literal(INDENT)
if token == 'DEDENT':
return literal(DEDENT)
if token == 'NEWLINE':
return literal(NEWLINE)
if token == 'PERIOD' or token == '.':
return literal(PERIOD)
if token == 'COMMA' or token == ',':
return literal(COMMA)
if token == 'OPEN_PAREN' or token == '(':
return literal(OPEN_PAREN)
if token == 'CLOSE_PAREN' or token == ')':
return literal(CLOSE_PAREN)
if token == 'COLON' or token == ':':
return literal(COLON)
if token == 'ARROW' or token == '->':
return literal(ARROW)
if token == 'NAME':
return literal(NAME)
if token == 'NUMBER':
return literal(NUMBER)
if token == 'CLASS':
return literal(CLASS)
if token == 'OPERATOR':
return literal(OPERATOR)
if token.startswith('<') and token.endswith('>'):
return self._make_rule(token[1:-1])
if token.endswith('?'):
subtoken = self._parse_token(token[:-1])
if isinstance(subtoken, RuleLiteral):
return opt(subtoken)
else:
raise GrammarParseError(f"optional wrapping non-rule production: {subtoken}")
if token.endswith('*'):
subtoken = self._parse_token(token[:-1])
if isinstance(subtoken, RuleLiteral):
return rep(subtoken)
else:
raise GrammarParseError(f"repeat-star wrapping non-rule production: {subtoken}")
return literal(GrammarLiteral(token))
def _make_rule(self, rule):
return RuleLiteral(rule, self._grammar_dict)
GRAMMAR_FILE = join(dirname(__file__), 'formal_grammar.bnf')
GRAMMAR = Grammar(GRAMMAR_FILE)
|
Python
| 0.000002
|
@@ -2586,32 +2586,133 @@
lexemes, lang)%0A%0A
+ def get_rule(self, rule: str) -%3E Language:%0A return self._grammar_dict.get(rule, empty())%0A%0A
def _parse_f
|
1a961d34a0c2773853123267f391f861bfe02426
|
fix access class variable
|
nupic/algorithms/anomaly.py
|
nupic/algorithms/anomaly.py
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""Anomaly-related algorithms."""
import numpy
from nupic.algorithms.anomaly_likelihood import AnomalyLikelihood
class Anomaly(object):
"""basic class that computes anomaly"""
# anomaly modes supported
MODE_PURE = "pure"
MODE_LIKELIHOOD = "likelihood"
MODE_WEIGHTED = "weighted"
def __init__(self, useTP = None, slidingWindowSize = None, anomalyMode="pure"):
"""
@param (optional) useTP -- tp temporal pooler instance used
@param (optional) slidingWindowSize -- enables moving average on final anomaly score; how many elements are summed up, sliding window size; int >= 0
@param (optional) anomalyMode -- (string) which way to use to compute anomaly; possible values are:
-- "pure" -- the default, how much anomal the value is; float 0..1 where 1=totally unexpected
-- "likelihood" -- uses the anomaly_likelihood code; models probability of receiving this value and anomalyScore; used in Grok
-- "weighted" -- "pure" anomaly weighted by "likelihood" (anomaly * likelihood)
"""
# using TP
self._tp = useTP
if self._tp is not None:
self._prevPredictedColumns = numpy.array([])
# using cumulative anomaly , sliding window
self._windowSize = slidingWindowSize
if self._windowSize is not None:
self._buf = numpy.array([0] * self._windowSize, dtype=numpy.float) #sliding window buffer
self._i = 0 # index pointer to actual position
# mode
self._mode = anomalyMode
if self._mode == MODE_LIKELIHOOD:
self._likelihood = AnomalyLikelihood() # probabilistic anomaly
def computeAnomalyScore(self, activeColumns, prevPredictedColumnsi, value=None, timestamp=None):
"""Compute the anomaly score as the percent of active columns not predicted.
@param activeColumns: array of active column indices
@param prevPredictedColumns: array of columns indices predicted in previous step (ignored with useTP != None)
@param value: (optional) input value, that is what activeColumns represent; used in anomaly-likelihood
@param timestamp: (optional) date timestamp when the sample occured; used in anomaly-likelihood
@return the computed anomaly score; float 0..1
"""
# using TP provided during init, _prevPredColumns stored internally here
if self._tp is not None:
prevPredictedColumns = self._prevPredictedColumns # override the values passed by parameter with the stored value
self._prevPredictedColumns = self._tp.getOutputData("topDownOut").nonzero()[0]
# 1. here is the 'classic' anomaly score
anomalyScore = Anomaly._pureAnomaly(activeColumns, prevPredictedColumns)
# use probabilistic anomaly
if self._mode == MODE_LIKELIHOOD:
probability = self._likelihood.anomalyProbability(value, anomalyScore, timestamp)
# compute final anomaly based on selected mode
if self._mode == MODE_PURE:
score = anomalyScore
elif self._mode == MODE_LIKELIHOOD:
score = probability
elif self._mode == MODE_WEIGHTED:
score = anomalyScore * probability
else:
raise ValueError("Invalid anomaly mode; only supported modes are: \"pure\", \"likelihood\", \"weighted\"; you used:"+self._mode)
# last, do moving-average if windowSize is set
if self._windowSize is not None:
score = self._movingAverage(score)
return score
def _movingAverage(self, newElement=None):
"""moving average
@param newValue (optional) add a new element before computing the avg
@return moving average of self._windowSize last elements
"""
if self._windowSize is None:
raise RuntimeError("Moving average has not been enabled during __init__ (self._windowSize not set)")
if newElement is not None:
self._buf[self._i]= newElement
#debug print "buf="+str(self._buf)+" i="+str(self._i)+" score="+str(score)
self._i = (self._i + 1) % self._windowSize
return self._buf.sum()/float(self._windowSize) # normalize to 0..1
@staticmethod
def _pureAnomaly(activeColumns, prevPredictedColumns):
"""the pure anomaly score
computed as diff of current active columns and columns predicted from previous round
@param activeColumns: array of active column indices
@param prevPredictedColumns: array of columns indices predicted in previous step
@return anomaly score 0..1 (float)
"""
nActiveColumns = len(activeColumns)
if nActiveColumns > 0:
# Test whether each element of a 1-D array is also present in a second
# array. Sum to get the total # of columns that are active and were
# predicted.
score = numpy.sum(numpy.in1d(activeColumns, prevPredictedColumns))
# Get the percent of active columns that were NOT predicted, that is
# our anomaly score.
score = (nActiveColumns - score) / float(nActiveColumns)
elif len(prevPredictedColumns) > 0:
# There were predicted columns but none active.
score = 1.0
else:
# There were no predicted or active columns.
score = 0.0
return score
|
Python
| 0.000003
|
@@ -3720,32 +3720,40 @@
f self._mode ==
+Anomaly.
MODE_LIKELIHOOD:
@@ -3915,24 +3915,32 @@
lf._mode ==
+Anomaly.
MODE_PURE:%0A
@@ -3980,32 +3980,40 @@
f self._mode ==
+Anomaly.
MODE_LIKELIHOOD:
@@ -4058,24 +4058,32 @@
lf._mode ==
+Anomaly.
MODE_WEIGHTE
|
6d9e8e8831cd08fa358f33f155a760de3ec59f3b
|
document that this file is generated
|
Lib/fontTools/ttLib/tables/__init__.py
|
Lib/fontTools/ttLib/tables/__init__.py
|
def _moduleFinderHint():
"""Dummy function to let modulefinder know what tables may be
dynamically imported. Generated by MetaTools/buildTableList.py.
"""
import B_A_S_E_
import C_F_F_
import D_S_I_G_
import G_D_E_F_
import G_P_O_S_
import G_S_U_B_
import J_S_T_F_
import L_T_S_H_
import O_S_2f_2
import T_S_I_B_
import T_S_I_D_
import T_S_I_J_
import T_S_I_P_
import T_S_I_S_
import T_S_I_V_
import T_S_I__0
import T_S_I__1
import T_S_I__2
import T_S_I__3
import T_S_I__5
import _c_m_a_p
import _c_v_t
import _f_p_g_m
import _g_a_s_p
import _g_l_y_f
import _h_d_m_x
import _h_e_a_d
import _h_h_e_a
import _h_m_t_x
import _k_e_r_n
import _l_o_c_a
import _m_a_x_p
import _n_a_m_e
import _p_o_s_t
import _p_r_e_p
import _v_h_e_a
import _v_m_t_x
|
Python
| 0.000003
|
@@ -1,8 +1,77 @@
+# DON'T EDIT! This file is generated by MetaTools/buildTableList.py.%0A
def _mod
@@ -558,16 +558,33 @@
_S_I__5%0A
+%09import V_O_R_G_%0A
%09import
|
1f977aa5fa28ed1e351f337191291198384abe02
|
Set auth_encryption_key option to be secret
|
heat/common/crypt.py
|
heat/common/crypt.py
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
from Crypto.Cipher import AES
from oslo_config import cfg
from heat.openstack.common.crypto import utils
auth_opts = [
cfg.StrOpt('auth_encryption_key',
default='notgood but just long enough i think',
help="Encryption key used for authentication info in database.")
]
cfg.CONF.register_opts(auth_opts)
def encrypt(auth_info):
if auth_info is None:
return None, None
sym = utils.SymmetricCrypto()
res = sym.encrypt(cfg.CONF.auth_encryption_key[:32],
auth_info, b64encode=True)
return 'oslo_decrypt_v1', res
def oslo_decrypt_v1(auth_info):
if auth_info is None:
return None
sym = utils.SymmetricCrypto()
return sym.decrypt(cfg.CONF.auth_encryption_key[:32],
auth_info, b64decode=True)
def heat_decrypt(auth_info):
"""Decrypt function for data that has been encrypted using an older
version of Heat.
Note: the encrypt function returns the function that is needed to
decrypt the data. The database then stores this. When the data is
then retrieved (potentially by a later version of Heat) the decrypt
function must still exist. So whilst it may seem that this function
is not referenced, it will be referenced from the database.
"""
if auth_info is None:
return None
auth = base64.b64decode(auth_info)
iv = auth[:AES.block_size]
cipher = AES.new(cfg.CONF.auth_encryption_key[:32], AES.MODE_CFB, iv)
res = cipher.decrypt(auth[AES.block_size:])
return res
def list_opts():
yield None, auth_opts
|
Python
| 0.000001
|
@@ -743,16 +743,44 @@
n_key',%0A
+ secret=True,%0A
@@ -831,12 +831,8 @@
i t
-hink
',%0A
@@ -855,31 +855,27 @@
lp=%22
-Encryption key used for
+Key used to encrypt
aut
@@ -894,16 +894,20 @@
info in
+the
database
@@ -907,16 +907,91 @@
atabase.
+ %22%0A %22Length of this key must be 16, 24 or 32 characters.
%22)%0A%5D%0A%0Acf
|
76e22fed42833a1b998dbc37a0a0e62764092de3
|
Use simplejson for older Python versions
|
Roundabound/__init__.py
|
Roundabound/__init__.py
|
# Needed to get with statement working in Python 2.5
from __future__ import with_statement
import glob
import os
import os.path
import time
import logging
from zipfile import ZipFile
class LogRotationError(Exception):
def __init__(self, message):
Exception.__init__(self, message)
def parse_age(arg):
return int(arg) * 24 * 60 * 60;
class RotationSet:
def __init__(self, config, dry_run=False):
self.file_pattern = config['pattern']
self.archive_age = None
self.delete_age = None
self.archive_path = None
self.dry_run = dry_run
if config.has_key('archive_age'):
self.archive_age = parse_age(config['archive_age'])
if config.has_key('archive_path'):
self.archive_path = config['archive_path']
(file_path, file_pattern) = os.path.split(self.file_pattern)
self.archive_file_pattern = \
os.path.join(self.archive_path, file_pattern + ".zip")
else:
self.archive_file_pattern = self.file_pattern + ".zip"
if config.has_key('delete_age'):
self.delete_age = parse_age(config['delete_age'])
def rotate(self):
now = time.time()
for filename in glob.glob(self.file_pattern):
stat = os.stat(filename)
age = now - stat.st_mtime
logging.debug('Examining file %s; age %d' % (filename, age))
if self.archive_age != None and age > self.archive_age:
self._archive(filename)
elif self.delete_age != None and age > self.delete_age:
self._do_action(lambda x: os.remove(filename))
logging.info('Deleted %s' % filename)
for filename in glob.glob(self.archive_file_pattern):
stat = os.stat(filename)
age = now - stat.st_mtime
logging.debug('Examining file %s; age %d' % (filename, age))
if self.delete_age != None and age > self.delete_age:
self._do_action(lambda x: os.remove(filename))
logging.info('Deleted %s' % filename)
def _archive(self, filename):
if self.archive_path != None:
(path, name) = os.path.split(filename)
archived_name = os.path.join(self.archive_path, name + '.zip')
else:
archived_name = filename + '.zip'
self._do_action(lambda x: self._do_archive(filename, archived_name, name))
logging.info('Archived %s to %s' % (filename, archived_name))
def _do_archive(self, filename, archived_name, name):
with ZipFile(archived_name, 'w') as zip_file:
zip_file.write(filename, name)
os.remove(filename)
def _do_action(self, action):
if not self.dry_run:
action(None)
class LogRotate:
def __init__(self, config, dry_run=False):
if not config.has_key('sets'):
raise LogRotationError('Configuration is missing key "sets".')
self.dry_run = dry_run
self.sets = {}
for (set_name, set_config) in config["sets"].items():
self.sets[set_name] = RotationSet(set_config, dry_run)
def rotate(self):
logging.info('Starting log rotation')
if self.dry_run:
logging.warning('Running in dry run mode. No files will be altered.')
for (set_name, rotation_set) in self.sets.items():
logging.info('Rotating %s' % set_name)
try:
rotation_set.rotate()
except Exception, e:
logging.error('Encountered error while rotating set "%s".'
% set_name, exc_info=True)
logging.debug('Log rotation done')
def main(argv):
import json
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--config', default='roundabound.cfg', help='the configuration file to use')
parser.add_argument('--verbosity', default='WARN', help='indicates the verbosity of the output', choices=['DEBUG', 'INFO', 'WARN', 'ERROR'])
parser.add_argument('--dry-run', action='store_true', help='If set, will not actually do anything, just log actions.')
args = parser.parse_args(argv)
logging.basicConfig(level=getattr(logging, args.verbosity))
with open(args.config, 'r') as config_file:
config = json.loads(config_file.read())
logrotate = LogRotate(config, args.dry_run)
logrotate.rotate()
if __name__ == '__main__':
import sys
main(sys.argv)
|
Python
| 0
|
@@ -3835,19 +3835,87 @@
-import json
+try:%0D%0A import json%0D%0A except ImportError:%0D%0A import simplejson%0D%0A
%0D%0A
|
57100d99b58263a76f9bf405f7bdc62e8839552e
|
Fix bug with JSON serialization
|
model.py
|
model.py
|
import datetime
import random
from google.appengine.api import memcache
from google.appengine.ext import db
def from_milliseconds(millis):
return datetime.datetime.utcfromtimestamp(millis / 1000)
def to_milliseconds(date_time):
delta = date_time - from_milliseconds(0)
return int(round(delta.total_seconds() * 1000))
class ShardedCounterConfig(db.Expando):
"""
Represents the sharded counter config, that helps us figure out how many shards to use for a sharded counter
__key__ == name property in ShardedCounter
"""
name = db.StringProperty(required=True)
shards = db.IntegerProperty(default=1)
@classmethod
def cache_key(cls, name):
return 'shard_config_' + name
@classmethod
def get_sharded_config(cls, name):
cache_key = ShardedCounterConfig.cache_key(name)
config = memcache.get(cache_key)
if not config:
''' Try fetching from datastore '''
config = ShardedCounterConfig.get_or_insert(name, name=name, shards=20)
memcache.set(cache_key, config, time=86400)
return config
class CrashReport(db.Expando):
"""
Represents an Crash Report item
"""
name = db.StringProperty(required=True) # key_name and not the sharded key name
labels = db.StringListProperty(default=[])
crash = db.TextProperty(required=True)
fingerprint = db.StringProperty(required=True)
date_time = db.DateTimeProperty(required=True, default=datetime.datetime.utcnow())
count = db.IntegerProperty(default=0)
@classmethod
def get_count(cls, name):
total = memcache.get(name)
if total is None:
total = 0
q = CrashReport.all()
q.filter('name = ', name)
for counter in q.run():
total += counter.count
memcache.set(name, str(total))
''' total can be a string (when cached) '''
return int(total)
@classmethod
def add_or_remove(cls, fingerprint, crash, labels=None, is_add=True, delta=1):
key_name = CrashReport.key_name(fingerprint)
config = ShardedCounterConfig.get_sharded_config(key_name)
shards = config.shards
shard_to_use = random.randint(0, shards-1)
shard_key_name = key_name + '_' + str(shard_to_use)
crash_report = CrashReport.get_or_insert(shard_key_name,
name=key_name, crash=crash, fingerprint=fingerprint, labels=labels)
if is_add:
crash_report.count += delta
crash_report.put()
memcache.incr(key_name, delta, initial_value=0)
else:
crash_report.count -= delta
crash_report.put()
memcache.decr(key_name, delta)
return crash_report
@classmethod
def get_crash(cls, fingerprint):
q = CrashReport.all()
q.filter('name =', CrashReport.key_name(fingerprint))
crash_report = q.get()
if not crash_report:
return None
else:
return crash_report
@classmethod
def key_name(cls, name):
return cls.kind() + '_' + name
@classmethod
def to_json(cls, entity):
return {
'key': entity.key(),
'crash': entity.crash,
'labels': entity.labels or list(),
'fingerprint': entity.fingerprint,
'time': to_milliseconds(entity.date_time), # in millis
'count': cls.get_count(entity.name)
}
class Link(object):
"""
Represents a link (essentially contains the url, title and active properties).
"""
def __init__(self, title, url, active=False):
self.title = title
self.url = url
self.active = active
|
Python
| 0.000002
|
@@ -3243,16 +3243,24 @@
'key':
+unicode(
entity.k
@@ -3263,16 +3263,17 @@
ty.key()
+)
,%0A
|
1eeb8588d1a407e91e73749a35e5fb1f6cace196
|
Add option for page to be in no category
|
app/page.py
|
app/page.py
|
from app import app, db, utils
from app.models import User, Page
from flask import Flask, redirect, request
from flask_login import current_user
from flask_wtf import Form
from wtforms import validators, StringField, TextAreaField, HiddenField, SelectField, BooleanField
from flask_wtf.html5 import IntegerField
from wtforms.validators import DataRequired
import datetime, time
choices = [('calendars', 'Calendars'),
('about', 'About Us'),
('academics', 'Academics'),
('students', 'Students'),
('parents', 'Parents'),
('admissions', 'Admissions')]
#custom widget for rendering a TinyMCE input
def TinyMCE(field):
return """ <script src="//cdn.tinymce.com/4/tinymce.min.js"></script>
<script>tinymce.init({
selector:'#editor',
theme: 'modern',
height: 800,
plugins: [
'advlist autolink link image lists charmap print preview hr anchor pagebreak spellchecker',
'searchreplace wordcount visualblocks visualchars code fullscreen insertdatetime media nonbreaking',
'save table contextmenu directionality emoticons template paste textcolor'
],
table_default_attributes: {
class: 'table-condensed'
},
content_css: '/static/css/tinymce.css',
toolbar: 'insertfile undo redo | styleselect | bold italic | alignleft aligncenter alignright alignjustify | bullist numlist outdent indent | link image | print preview media fullpage | forecolor backcolor'
});</script>
<textarea id='editor'> %s </textarea>""" % field._value()
class NewPageForm(Form):
title = StringField('Title:', validators=[validators.Length(min=0,max=1000)])
category = SelectField('Category:', choices=choices)
dividerBelow = BooleanField('Divider below page name in dropdown menu')
index = IntegerField('Ordering index (lower is higher up):')
body = TextAreaField('Body:', validators=[validators.Length(min=0,max=75000)], widget=TinyMCE)
bodyhtml = HiddenField()
def new_page():
form = NewPageForm()
if form.validate_on_submit():
title = form.title.data
body = form.bodyhtml.data
category = form.category.data
dividerBelow = form.dividerBelow.data
index = form.index.data
if len(title) < 1:
form.title.errors.append("This field is required.")
form.body.data = body
return utils.render_with_navbar("newpage.html", form=form, title=title, index=index)
if index<0 or index>100:
form.index.errors.append("Number must be between 0 and 100.")
form.body.data = body
return utils.render_with_navbar("newpage.html", form=form, title=title, index=index)
name = "-".join(title.split(" ")).lower()
newpage = Page(title=title, name=name, category=category, dividerBelow=dividerBelow, index=index, body=body)
db.session.add(newpage)
db.session.commit()
time.sleep(0.5);
return redirect("/page/" + name)
return utils.render_with_navbar("newpage.html", form=form)
def edit_page(page_name):
if not page_name:
return utils.render_with_navbar("404.html"), 404
currentPage = Page.query.filter_by(name=page_name).first()
if not currentPage:
return utils.render_with_navbar("404.html"), 404
title = currentPage.title
bodyhtml = currentPage.body
category = currentPage.category
dividerBelow = currentPage.dividerBelow
index = currentPage.index
form = NewPageForm(category=category, dividerBelow=dividerBelow)
form.body.data = bodyhtml
if form.validate_on_submit():
newtitle = form.title.data
newbody = form.bodyhtml.data
newcategory = form.category.data
newdividerBelow = form.dividerBelow.data
newindex = form.index.data
if len(newtitle) < 1:
form.title.errors.append("This field is required.")
form.body.data = newbody
return utils.render_with_navbar("editpage.html", form=form, title=newtitle, index=newindex)
if index<0 or index>100:
form.index.erros.append("Number must be between 0 and 100.")
form.body.data = newbody
return utils.render_with_navbar("editpage.html", form=form, title=newtitle, index=newindex)
newname = "-".join(newtitle.split(" ")).lower()
if newname != page_name:
page = Page.query.filter_by(name=newname).first()
if page:
form.title.errors.append("A page with this name already exists.")
form.body.data = newbody
return utils.render_with_navbar("editpage.html", form=form, title=newtitle, index=newindex)
currentPage.title = newtitle
currentPage.body = newbody
currentPage.name = newname
currentPage.category = newcategory
currentPage.dividerBelow = newdividerBelow
currentPage.index = newindex
db.session.commit()
time.sleep(0.5)
return redirect("/page/" + newname)
return utils.render_with_navbar("editpage.html", form=form, title=title, index=index)
def delete_page(page_name):
if not page_name:
return utils.render_with_navbar("404.html"), 404
page = Page.query.filter_by(name=page_name)
if not page:
return utils.render_with_navbar("404.html"), 404
page.delete()
db.session.commit()
return redirect("/")
|
Python
| 0
|
@@ -383,16 +383,50 @@
ices = %5B
+('none', 'None'),%0A
('calend
@@ -441,24 +441,24 @@
alendars'),%0A
-
@@ -2006,16 +2006,52 @@
er up):'
+, validators=%5Bvalidators.Optional()%5D
)%0A bo
@@ -2654,16 +2654,17 @@
=index)%0A
+%0A
@@ -2663,32 +2663,43 @@
if index
+ and (index
%3C0 or index%3E100:
@@ -2689,32 +2689,33 @@
x%3C0 or index%3E100
+)
:%0A fo
@@ -2949,32 +2949,315 @@
(%22 %22)).lower()%0A%0A
+ page = Page.query.filter_by(name=name).first()%0A if page:%0A form.title.errors.append(%22A page with this name already exists.%22)%0A form.body.data = body%0A return utils.render_with_navbar(%22newpage.html%22, form=form, title=title, index=index)%0A%0A%0A
newpage
@@ -4556,24 +4556,35 @@
if index
+ and (index
%3C0 or index%3E
@@ -4586,16 +4586,17 @@
ndex%3E100
+)
:%0A
|
ae7c7d076b1c80457fc491dc390e9861c1559d97
|
fix broken protobuf path am: a46f70c546
|
bazel/deps.bzl
|
bazel/deps.bzl
|
# Copyright (C) 2019 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
load("@bazel_tools//tools/build_defs/repo:git.bzl", "new_git_repository")
# This file must be kept in sync with tools/install-build-deps.
def perfetto_deps():
# Note: this is more recent than the version of protobuf we use in the
# GN and Android builds. This is because older versions of protobuf don't
# support Bazel.
_add_repo_if_not_existing(
http_archive,
name = "com_google_protobuf",
strip_prefix = "protobuf-3.9.0",
url = "https://github.com/google/protobuf/archive/v3.9.0.tar.gz",
sha256 = "2ee9dcec820352671eb83e081295ba43f7a4157181dad549024d7070d079cf65",
)
_add_repo_if_not_existing(
http_archive,
name = "perfetto_dep_sqlite",
url = "https://storage.googleapis.com/perfetto/sqlite-amalgamation-3250300.zip",
sha256 = "2ad5379f3b665b60599492cc8a13ac480ea6d819f91b1ef32ed0e1ad152fafef",
strip_prefix = "sqlite-amalgamation-3250300",
build_file = "//bazel:sqlite.BUILD",
)
_add_repo_if_not_existing(
http_archive,
name = "perfetto_dep_sqlite_src",
url = "https://storage.googleapis.com/perfetto/sqlite-src-3250300.zip",
sha256 = "c7922bc840a799481050ee9a76e679462da131adba1814687f05aa5c93766421",
strip_prefix = "sqlite-src-3250300",
build_file = "//bazel:sqlite.BUILD",
)
_add_repo_if_not_existing(
new_git_repository,
name = "perfetto_dep_linenoise",
remote = "https://fuchsia.googlesource.com/third_party/linenoise.git",
commit = "c894b9e59f02203dbe4e2be657572cf88c4230c3",
build_file = "//bazel:linenoise.BUILD",
shallow_since = "1469784335 +0200",
)
_add_repo_if_not_existing(
new_git_repository,
name = "perfetto_dep_jsoncpp",
remote = "https://github.com/open-source-parsers/jsoncpp",
commit = "6aba23f4a8628d599a9ef7fa4811c4ff6e4070e2", # v1.9.3
build_file = "//bazel:jsoncpp.BUILD",
shallow_since = "1590760226 +0800",
)
_add_repo_if_not_existing(
new_git_repository,
name = "perfetto_dep_zlib",
remote = "https://android.googlesource.com/platform/external/zlib.git",
commit = "dfa0646a03b4e1707469e04dc931b09774968fe6",
build_file = "//bazel:zlib.BUILD",
shallow_since = "1557160162 -0700",
)
# Without this protobuf.bzl fails. This seems a bug in protobuf_deps().
_add_repo_if_not_existing(
http_archive,
name = "bazel_skylib",
sha256 = "bbccf674aa441c266df9894182d80de104cabd19be98be002f6d478aaa31574d",
strip_prefix = "bazel-skylib-2169ae1c374aab4a09aa90e65efe1a3aad4e279b",
url = "https://github.com/bazelbuild/bazel-skylib/archive/2169ae1c374aab4a09aa90e65efe1a3aad4e279b.tar.gz",
)
def _add_repo_if_not_existing(repo_rule, name, **kwargs):
if name not in native.existing_rules():
repo_rule(name = name, **kwargs)
|
Python
| 0.000001
|
@@ -1164,22 +1164,31 @@
hub.com/
-google
+protocolbuffers
/protobu
|
ecf96772ff471f5ef928453b5f6103943a60d752
|
add - property to set params.
|
versus/src/logistic.py
|
versus/src/logistic.py
|
"""
Logistic regression.
"""
import numpy
from theano import config
import theano.tensor as T
from theano import shared
def decode(self, encoding):
""" Decode an encoded model.
:return: LogisticRegression with encodings parameters
"""
pass
class LogisticRegression(object):
"""Multi-class Logistic Regression Class
The logistic regression is fully described by a weight matrix :math:`W`
and bias vector :math:`b`. Classification is done by projecting data
points onto a set of hyperplanes, the distance to which is used to
determine a class membership probability.
"""
def __init__(self, _input, n_in, n_out):
""" Initialize the parameters of the logistic regression
:type input: theano.tensor.TensorType
:param input: symbolic variable that describes the input of the
architecture (one minibatch)
:type n_in: int
:param n_in: number of input units, the dimension of the space in
which the datapoints lie
:type n_out: int
:param n_out: number of output units, the dimension of the space in
which the labels lie
"""
# initialize with 0 the weights W as a matrix of shape (n_in, n_out)
self.W = shared(value=numpy.zeros((n_in, n_out),
dtype=config.floatX),
name='W', borrow=True)
# initialize the baises b as a vector of n_out 0s
self.b = shared(value=numpy.zeros((n_out,),
dtype=config.floatX),
name='b', borrow=True)
# compute vector of class-membership probabilities in symbolic form
self.p_y_given_x = T.nnet.softmax(T.dot(_input, self.W) + self.b)
# compute prediction as class whose probability is maximal in
# symbolic form
self.y_pred = T.argmax(self.p_y_given_x, axis=1)
# parameters of the model
self.params = [self.W, self.b]
def encode(self):
""" Encode the parameters of the model """
pass
def negative_log_likelihood(self, y):
"""Return the mean of the negative log-likelihood of the prediction
of this model under a given target distribution.
.. math::
\frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
\frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|} \log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
\ell (\theta=\{W,b\}, \mathcal{D})
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
Note: we use the mean instead of the sum so that
the learning rate is less dependent on the batch size
"""
# y.shape[0] is (symbolically) the number of rows in y, i.e.,
# number of examples (call it n) in the minibatch
# T.arange(y.shape[0]) is a symbolic vector which will contain
# [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of
# Log-Probabilities (call it LP) with one row per example and
# one column per class LP[T.arange(y.shape[0]),y] is a vector
# v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,
# LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
# the mean (across minibatch examples) of the elements in v,
# i.e., the mean log-likelihood across the minibatch.
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
def errors(self, y):
"""Return a float representing the number of errors in the minibatch
over the total number of examples of the minibatch ; zero one
loss over the size of the minibatch
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
"""
# check if y has same dimension of y_pred
if y.ndim != self.y_pred.ndim:
raise TypeError('y should have the same shape as self.y_pred',
('y', y.type, 'y_pred', self.y_pred.type))
# check if y is of the correct datatype
if y.dtype.startswith('int'):
# the T.neq operator returns a vector of 0s and 1s, where 1
# represents a mistake in prediction
return T.mean(T.neq(self.y_pred, y))
else:
raise NotImplementedError()
|
Python
| 0
|
@@ -2034,16 +2034,17 @@
self.
+_
params =
@@ -2150,16 +2150,158 @@
pass%0A%0A
+ @property%0A def params(self):%0A return self._params%0A%0A @params.setter%0A def params(self, value):%0A self._params = value%0A
%0A def
|
197a0440ddbf31aa87a6a6998b41344be4924076
|
fix on the rows update
|
gui/placeslist.py
|
gui/placeslist.py
|
# -*- coding: utf8 -*-
from PyQt4 import QtGui
from PyQt4 import QtCore
class placesList(QtGui.QTableWidget):
_columns = ('Name', 'Type', 'X', 'Y', 'Locate')
_app = None
_parent = None
def __init__(self, parent, app):
"""
Initialisation of the window, creates the GUI and displays the window.
"""
self._app = app
QtGui.QTableView.__init__(self, parent)
self._parent = parent
self.setColumnCount(len(self._columns))
self.setHorizontalHeaderLabels(self._columns)
self.verticalHeader().setVisible(False)
self.setEditTriggers(QtGui.QAbstractItemView.NoEditTriggers)
self.setSelectionBehavior(QtGui.QAbstractItemView.SelectRows)
self.setSelectionMode(QtGui.QAbstractItemView.SingleSelection)
self.setData()
def setData(self):
for index, row in enumerate(self._app.map.places):
self.insertRow(index)
self.setItem(index, 0, QtGui.QTableWidgetItem(row['name']))
self.setItem(index, 1, QtGui.QTableWidgetItem(self._app.map.getPlaceTypesLabels()[row['type']]))
self.setItem(index, 2, QtGui.QTableWidgetItem(str(row['coordinates'][0])))
self.setItem(index, 3, QtGui.QTableWidgetItem(str(row['coordinates'][1])))
self.setCellWidget(index, 4, QtGui.QPushButton("Locate"))
self.resizeColumnsToContents()
|
Python
| 0
|
@@ -751,16 +751,84 @@
(self):%0A
+%09%09self.clearContents()%0A%09%09nbRowsToInsert = len(self._app.map.places)%0A
%09%09for in
@@ -872,16 +872,57 @@
laces):%0A
+%09%09%09if self.rowCount() %3C nbRowsToInsert:%0A%09
%09%09%09self.
|
29e18edf18c14cd11c8cebd93548eeadbb61b1da
|
Fix biases by using correct shape
|
model.py
|
model.py
|
import tensorflow as tf
def make_weight_variable(name, num_inputs, num_outputs):
return tf.get_variable(
name,
[num_inputs, num_outputs],
initializer=tf.contrib.layers.variance_scaling_initializer()
)
class Model:
def __init__(self, chars, max_steps, lstm_units=250, l1_units=200, l2_units=150,
learning_rate=0.001, l2=0.001):
self.chars = chars
self.max_steps = max_steps
# Define placeholders for training data
self.features = tf.placeholder(dtype=tf.int32, shape=[None, max_steps])
self.labels = tf.placeholder(dtype=tf.int32, shape=[None, max_steps])
self.mask = tf.placeholder(dtype=tf.float32, shape=[None, max_steps])
# Define LSTM layer
features_one_hot = tf.one_hot(self.features, len(chars) + 1, dtype=tf.float32)
lstm_3d, _ = tf.nn.dynamic_rnn(
cell=tf.contrib.rnn.LSTMCell(num_units=lstm_units),
dtype=tf.float32,
inputs=features_one_hot
)
lstm_flat = tf.reshape(lstm_3d, [-1, lstm_units])
# Define first ReLU layer
l1_weights = make_weight_variable("l1-weights", lstm_units, l1_units)
l1_biases = tf.Variable(0.1, name='l1-biases')
layer1 = tf.nn.relu(tf.matmul(lstm_flat, l1_weights) + l1_biases)
# Define second ReLU layer
l2_weights = make_weight_variable("l2-weights", l1_units, l2_units)
l2_biases = tf.Variable(0.1, name='l2-biases')
layer2 = tf.nn.relu(tf.matmul(layer1, l2_weights) + l2_biases)
# Define output layer
out_weights = make_weight_variable("out-weights", l2_units, len(chars) + 1)
out_biases = tf.Variable(0.1, name='out-biases')
self.out_logits = tf.matmul(layer2, out_weights) + out_biases
# Define training objective
loss_flat = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=tf.reshape(self.labels, [-1]),
logits=self.out_logits
)
loss_flat_masked = loss_flat * tf.reshape(self.mask, [-1])
self.loss = tf.reduce_mean(loss_flat_masked)
weight_vars = [v for v in tf.trainable_variables() if 'bias' not in v.name]
self.l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in weight_vars]) * l2
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
self.train_op = optimizer.minimize(self.loss + self.l2_loss)
|
Python
| 0.0001
|
@@ -1216,35 +1216,66 @@
s = tf.Variable(
-0.1
+tf.constant(0.1, shape=%5Bl1_units%5D)
, name='l1-biase
@@ -1488,35 +1488,66 @@
s = tf.Variable(
-0.1
+tf.constant(0.1, shape=%5Bl2_units%5D)
, name='l2-biase
@@ -1648,24 +1648,57 @@
utput layer%0A
+ out_len = len(chars) + 1%0A
out_
@@ -1753,30 +1753,23 @@
_units,
-len(chars) + 1
+out_len
)%0A
@@ -1795,19 +1795,49 @@
ariable(
-0.1
+tf.constant(0.1, shape=%5Bout_len%5D)
, name='
|
5809fc832340e3ee5d798fa347e4933e874bdb8b
|
Allow older django to find the tests
|
voting/tests/__init__.py
|
voting/tests/__init__.py
|
Python
| 0.000003
|
@@ -0,0 +1,92 @@
+import django%0Aif django.VERSION%5B0%5D == 1 and django.VERSION%5B1%5D %3C 6:%0A from .tests import *%0A
|
|
b33b6c7a5ae8835514389340ff3152f03f619984
|
prefix number underscore sep
|
holodeck/settings.py
|
holodeck/settings.py
|
LOGICAL_SHARDS = 8
PHYSICAL_SHARDS = [
{
'ENGINE': 'django.db.backends.sqlite3',
'NAME_PREFIX': 'holodeck1',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
},
{
'ENGINE': 'django.db.backends.sqlite3',
'NAME_PREFIX': 'holodeck2',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
},
]
|
Python
| 0.998789
|
@@ -119,16 +119,17 @@
holodeck
+_
1',%0A
@@ -301,16 +301,17 @@
holodeck
+_
2',%0A
|
0b38626104d587513db82535bbadc8b5d305f614
|
Fix #659
|
nextcloudappstore/user/forms.py
|
nextcloudappstore/user/forms.py
|
from allauth.account.utils import filter_users_by_email, user_username, \
user_pk_to_url_str
from django import forms
from django.contrib.auth import get_user_model
from django.forms import EmailField, CharField, PasswordInput
from django.utils.translation import ugettext_lazy as _
from snowpenguin.django.recaptcha2.fields import ReCaptchaField
from snowpenguin.django.recaptcha2.widgets import ReCaptchaWidget
class SignupFormRecaptcha(forms.Form):
"""integrate a recaptcha field."""
recaptcha = ReCaptchaField(widget=ReCaptchaWidget())
first_name = CharField(max_length=30, label=_('First name'))
last_name = CharField(max_length=30, label=_('Last name'))
def signup(self, request, user):
user.first_name = self.cleaned_data['first_name']
user.last_name = self.cleaned_data['last_name']
user.save()
class DeleteAccountForm(forms.Form):
email = EmailField(required=True, label=_('Your e-mail address'))
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('user', None)
super().__init__(*args, **kwargs)
def clean_email(self):
email = self.cleaned_data.get('email')
if self.user and self.user.email == email:
return email
else:
raise forms.ValidationError(_(
'The given e-mail address does not match your e-mail address'))
class AccountForm(forms.ModelForm):
passwd = CharField(widget=PasswordInput(), label=_('Confirm password'),
help_text=_('Password is required to prevent '
'unauthorized users from changing your '
'email address and resetting your '
'password. This field does not update your '
'password!'))
class Meta:
model = get_user_model()
fields = ('first_name', 'last_name', 'email')
def clean_email(self):
value = self.cleaned_data['email']
users = filter_users_by_email(value)
if [u for u in users if u.pk != self.instance.pk]:
msg = _(
'This e-mail address is already associated with another '
'account.')
raise forms.ValidationError(msg)
return value
def clean_passwd(self):
value = self.cleaned_data['passwd']
if self.instance.check_password(value):
return value
else:
raise forms.ValidationError(_('Invalid password'))
class CustomResetPasswordForm(forms.Form):
# remove this class once issue #1307 is resolved django-allauth
email = forms.EmailField(
label=_("E-mail"),
required=True,
widget=forms.TextInput(attrs={
"type": "email",
"size": "30",
"placeholder": _("E-mail address"),
})
)
def clean_email(self):
email = self.cleaned_data["email"]
from allauth.account.adapter import get_adapter
email = get_adapter().clean_email(email)
self.users = filter_users_by_email(email)
return self.cleaned_data["email"]
def save(self, request, **kwargs):
from django.contrib.sites.shortcuts import get_current_site
current_site = get_current_site(request)
email = self.cleaned_data["email"]
from django.contrib.auth.tokens import default_token_generator
token_generator = kwargs.get("token_generator",
default_token_generator)
for user in self.users:
temp_key = token_generator.make_token(user)
# save it to the password reset model
# password_reset = PasswordReset(user=user, temp_key=temp_key)
# password_reset.save()
# send the password reset email
from django.urls import reverse
path = reverse("account_reset_password_from_key",
kwargs=dict(uidb36=user_pk_to_url_str(user),
key=temp_key))
from allauth.utils import build_absolute_uri
url = build_absolute_uri(
request, path)
context = {"current_site": current_site,
"user": user,
"password_reset_url": url,
"request": request}
from allauth.account import app_settings
if app_settings.AUTHENTICATION_METHOD \
!= app_settings.AuthenticationMethod.EMAIL:
context['username'] = user_username(user)
from allauth.account.adapter import get_adapter
get_adapter(request).send_mail(
'account/email/password_reset_key',
email,
context)
return self.cleaned_data["email"]
|
Python
| 0
|
@@ -1,28 +1,100 @@
+from allauth.account.forms import EmailAwarePasswordResetTokenGenerator%0A
from allauth.account.utils i
@@ -3172,16 +3172,32 @@
il(email
+, is_active=True
)%0A%0A
@@ -3445,187 +3445,63 @@
-from django.contrib.auth.tokens import default_token_generator%0A token_generator = kwargs.get(%22token_generator%22,%0A default_t
+token_generator = EmailAwarePasswordResetT
oken
-_g
+G
enerator
)%0A%0A
@@ -3496,16 +3496,17 @@
enerator
+(
)%0A%0A
@@ -4444,20 +4444,16 @@
ETHOD %5C%0A
-
|
9246d33429e1940d5a98c3c16e708159437b88fa
|
enable header modification
|
lib/neuroimaging/tools/AnalyzeHeaderTool.py
|
lib/neuroimaging/tools/AnalyzeHeaderTool.py
|
import os, sys
from optparse import OptionParser, Option
from neuroimaging.data import DataSource
from neuroimaging.refactoring.analyze import struct_fields, AnalyzeHeader
##############################################################################
class AnalyzeHeaderTool (OptionParser):
"Command-line tool for getting and setting Analyze header values."
_usage= "%prog [options] <hdrfile>\n"+__doc__
options = (
Option('-a', '--attribute', dest="attname",
help="Get or set this attribute"),
Option('-v', '--value', dest="value",
help="Set attribute to this value"))
#-------------------------------------------------------------------------
def __init__(self, *args, **kwargs):
OptionParser.__init__(self, *args, **kwargs)
self.set_usage(self._usage)
self.add_options(self.options)
#-------------------------------------------------------------------------
def _error(self, message):
print message
self.print_help()
sys.exit(0)
#-------------------------------------------------------------------------
def run(self):
options, args = self.parse_args()
if len(args) != 1: self._error("Please provide a header file name")
filename = args[0]
if not DataSource().exists(filename):
self._error("File not found: %s"%filename)
header = AnalyzeHeader(filename)
attname, value = options.attname, options.value
if attname is not None:
if value is not None:
print "before: %s = %s"%(attname, getattr(header, attname))
setattr(header, attname, value)
print "after: %s = %s"%(attname, getattr(header, attname))
#write back out
else: print "%s = %s"%(attname, getattr(header, attname))
elif value is not None:
self._error("Only provide a value when an attribute is provided")
else: print header
if __name__ == "__main__": AnalyzeHeaderTool().run()
|
Python
| 0
|
@@ -1576,27 +1576,26 @@
%22before: %25s
- =
+%5Ct
%25s%22%25(attname
@@ -1702,19 +1702,18 @@
fter: %25s
- =
+%5Ct
%25s%22%25(att
@@ -1764,23 +1764,37 @@
-#write back out
+header.write(filename+%22.new%22)
%0A
@@ -1817,19 +1817,18 @@
rint %22%25s
- =
+%5Ct
%25s%22%25(att
|
1117f6e2d51ed6faf37aa2a6deab8a6ff8fa0e5b
|
test for compiling simple command
|
linemode/tests/test_command_list_printer.py
|
linemode/tests/test_command_list_printer.py
|
import unittest
from linemode.drivers.command_list import CommandListPrinter
class TestCommandListPrinter(unittest.TestCase):
pass
|
Python
| 0.000001
|
@@ -52,34 +52,23 @@
import
-CommandListPrinter
+compile
%0A%0A%0Aclass
@@ -119,9 +119,138 @@
-pass
+def test_simple_command(self):%0A program = compile(%5B%0A %22reset%22%0A %5D)%0A self.assertEqual(program, b'reset')
%0A
|
5662a6cb9cd567b5e08398e4e5394f8049f02741
|
Update tests to allow for id in content type json
|
feincms_extensions/tests/test_content_types.py
|
feincms_extensions/tests/test_content_types.py
|
import datetime
from django.test import TestCase
from . import factories
from .models import Dummy
from .. import content_types
class TestJsonRichTextContent(TestCase):
model = Dummy.content_type_for(content_types.JsonRichTextContent)
def test_json(self):
"""A JsonRichTextContent can be rendered to json."""
text = 'Rich Text'
content = self.model(region='body', text=text)
self.assertEqual(content.json(), {
'content_type': 'rich-text',
'html': text,
})
class TestJsonSectionContent(TestCase):
model = Dummy.content_type_for(content_types.JsonSectionContent)
def test_json(self):
"""A JsonSectionContent can be rendered to json."""
title = 'Section 1'
richtext = 'Rich Text'
image_type = 'image'
copyright = 'Incuna'
created = datetime.datetime(year=2015, month=3, day=1)
image = factories.MediaFileFactory.build(
type=image_type,
copyright=copyright,
created=created,
)
content = self.model(
region='body',
title=title,
richtext=richtext,
mediafile=image,
)
expected = {
'content_type': 'section',
'title': title,
'html': richtext,
'mediafile': {
'url': image.file.url,
'type': image_type,
'created': created,
'copyright': copyright,
'file_size': image.file.size,
},
}
self.assertEqual(content.json(), expected)
def test_json_no_mediafile(self):
"""A JsonSectionContent can be rendered to json."""
title = 'Section 1'
richtext = 'Rich Text'
content = self.model(
region='body',
title=title,
richtext=richtext,
mediafile=None,
)
expected = {
'content_type': 'section',
'title': title,
'html': richtext,
'mediafile': None,
}
self.assertEqual(content.json(), expected)
class TestJsonMediaFileContent(TestCase):
model = Dummy.content_type_for(content_types.JsonMediaFileContent)
def test_json(self):
"""A JsonMediaFileContent can be rendered to json."""
image_type = 'image'
copyright = 'Incuna'
created = datetime.datetime(year=2015, month=3, day=1)
image = factories.MediaFileFactory.build(
type=image_type,
copyright=copyright,
created=created,
)
content = self.model(region='body', mediafile=image)
expected = {
'content_type': 'media-file',
'url': image.file.url,
'type': image_type,
'created': created,
'copyright': copyright,
'file_size': image.file.size,
}
self.assertEqual(content.json(), expected)
|
Python
| 0
|
@@ -342,32 +342,48 @@
t = 'Rich Text'%0A
+ pk = 42%0A
content
@@ -419,16 +419,23 @@
ext=text
+, pk=pk
)%0A
@@ -534,24 +534,46 @@
tml': text,%0A
+ 'id': pk,%0A
%7D)%0A%0A
@@ -877,32 +877,48 @@
ight = 'Incuna'%0A
+ pk = 42%0A
created
@@ -1258,16 +1258,35 @@
=image,%0A
+ pk=pk,%0A
@@ -1340,32 +1340,54 @@
pe': 'section',%0A
+ 'id': pk,%0A
'tit
@@ -1885,16 +1885,32 @@
ch Text'
+%0A pk = 42
%0A%0A
@@ -1925,32 +1925,51 @@
t = self.model(%0A
+ pk=pk,%0A
regi
@@ -2599,32 +2599,48 @@
month=3, day=1)
+%0A pk = 42
%0A%0A image
@@ -2758,32 +2758,51 @@
reated=created,%0A
+ pk=pk,%0A
)%0A
@@ -2870,32 +2870,32 @@
expected = %7B%0A
-
'con
@@ -2920,16 +2920,38 @@
-file',%0A
+ 'id': pk,%0A
|
3f0b19d153360ee5cf1fda1acfa0e4ad846b6c86
|
fix admin.py, remove site on AccountAccess and add it on Provider
|
allaccess/admin.py
|
allaccess/admin.py
|
from django.contrib import admin
from .models import Provider, AccountAccess
class ProviderAdmin(admin.ModelAdmin):
"Admin customization for OAuth providers."
list_display = ('name', 'enabled', )
class AccountAccessAdmin(admin.ModelAdmin):
"Admin customization for accounts."
list_display = (
'__str__', 'provider', 'user', 'created', 'modified', 'site',)
list_filter = ('provider', 'created', 'modified', 'site', )
admin.site.register(Provider, ProviderAdmin)
admin.site.register(AccountAccess, AccountAccessAdmin)
|
Python
| 0
|
@@ -199,16 +199,71 @@
nabled',
+ 'site',)%0A list_filter = ('name', 'enabled', 'site',
)%0A%0A%0Acla
@@ -423,32 +423,24 @@
'modified',
- 'site',
)%0A list_f
@@ -478,32 +478,24 @@
'modified',
- 'site',
)%0A%0A%0Aadmin.s
|
0834084e76bf22c7cde39f60876d993afa28f9a6
|
Rewrite candidate to committee linking function
|
analysis/search.py
|
analysis/search.py
|
#!/usr/bin/env python
#import pandas
import sys
import psycopg2
#from sqlalchemy import create_engine
from geo import states
from geo.geolocation import GeoLocation
class SearchLocation:
'Provides search functions for database queries'
def __init__(self, conn_settings):
self.__geodb = "geozipcodes"
self.__db_prefix = conn_settings['db_prefix']
self.__db_user = conn_settings['db_user']
self.__db_password = conn_settings['db_password']
self.__db_host = conn_settings['db_host']
self.__db_port = conn_settings['db_port']
self.__year = conn_settings['year']
try:
self.geo_conn = psycopg2.connect(dbname=self.__db_prefix+self.__geodb, \
user=self.__db_user,
password=self.__db_password,
host=self.__db_host,
port=self.__db_port
)
self.geo_conn.set_client_encoding("UTF8")
self.geo_cur = self.geo_conn.cursor()
self.fec_conn = psycopg2.connect(dbname=self.__db_prefix+str(self.__year), \
user=self.__db_user,
password=self.__db_password,
host=self.__db_host,
port=self.__db_port
)
self.fec_conn.set_client_encoding("UTF8")
self.fec_cur = self.fec_conn.cursor()
except psycopg2.Error:
raise Exception("Did you define database parameters in config?")
def __del__(self):
self.fec_cur.close()
self.geo_cur.close()
self.fec_conn.close()
self.geo_conn.close()
def get_candidate_committees(cands):
if self.__year <= 1998:
cand_comms = {}
for candidate in cands:
linkage_query = "SELECT cmte_id FROM committee_master WHERE cand_id='%s'" % candidate[1]
cand_comms[candidate[0]] = {"cand_id": candidate[1], "comm_ids":[]}
self.fec_cur.execute(linkage_query)
committee_ids = self.fec_cur.fetchall()
for committee_id in committee_ids:
cand_comms[candidate[0]]["comm_ids"].append(committee_id[0])
return cand_comms
if self.__year > 1998:
cand_comms = {}
for candidate in candidates:
linkage_query = "SELECT cmte_id FROM candidate_linkage WHERE cand_id='%s'" % candidate[1]
cand_comms[candidate[0]] = {"cand_id": candidate[1], "comm_ids":[]}
self.fec_cur.execute(linkage_query)
committee_ids = self.fec_cur.fetchall()
for committee_id in committee_ids:
cand_comms[candidate[0]]["comm_ids"].append(committee_id[0])
return cand_comms
def search_names_by_zip(self, parameters):
'Search by zipcode'
try:
zipcode = parameters['zipcode']
distance = parameters['distance']
unit = parameters['unit']
except KeyError:
raise KeyError("Please define zipcode, distance, and unit")
if unit == "miles":
distance = distance/0.62137
zipcode_stmt = "SELECT state, latitude, longitude FROM zipcodes WHERE zip LIKE'%s%%';" % zipcode
self.geo_cur.execute(zipcode_stmt)
state, lat, lon = self.geo_cur.fetchone()
loc = GeoLocation.from_degrees(lat, lon)
SW_loc, NE_loc = loc.bounding_locations(distance)
zipcodes_stmt = "SELECT zip FROM zipcodes WHERE latitude BETWEEN '%s' AND '%s' AND longitude BETWEEN '%s' AND '%s' and state='%s';" % \
(SW_loc.deg_lat, NE_loc.deg_lat, SW_loc.deg_lon, NE_loc.deg_lon, state)
self.geo_cur.execute(zipcodes_stmt)
zipcodes = self.geo_cur.fetchall()
__zipcodes = []
for __zipcode in zipcodes:
__zipcodes.append(__zipcode[0].split(".")[0])
__temp = "SELECT DISTINCT cand_name, cand_id, cand_pty_affiliation, cand_city, cand_st FROM candidate_master WHERE cand_zip in %s ORDER BY cand_name;"
candidates_query = self.fec_cur.mogrify(__temp, (tuple(__zipcodes),))
self.fec_cur.execute(candidates_query)
candidates = self.fec_cur.fetchall()
candidates_committees = get_candidate_committees(candidates)
return candidates, candidates_committees
def search_by_other(self, parameters):
'Search by city, state, or city and state'
try:
search_key = parameters['search']
search_query = parameters['query']
except KeyError:
raise KeyError("Please define search parameter")
if ',' in search_query:
city_key = 'cand_city'
st_key = 'cand_st'
city = search_query.split(', ')[0]
st = search_query.split(', ')[1]
for state in states.states_titles:
if state['name'] == st:
st = state['abbreviation']
query_stmt = "SELECT cand_name, cand_id, cand_pty_affiliation, cand_city, cand_st FROM candidate_master WHERE %s LIKE UPPER('%%%s%%') and %s LIKE UPPER('%%%s%%');" % \
(city_key, city, st_key, st)
else:
for state in states.states_titles:
if state['name'] == search_query:
search_query = state['abbreviation']
query_stmt = "SELECT DISTINCT cand_name, cand_id, cand_pty_affiliation, cand_city, cand_st FROM candidate_master WHERE %s LIKE UPPER('%%%s%%');" % (search_key, search_query)
self.fec_cur.execute(query_stmt)
candidates = self.fec_cur.fetchall()
candidates_committees= get_candidate_committees(candidates)
# return ([(name, cand_id, cand_pty_affiliation, cand_city, cand_st), ...], {cand_name : {cand_id: 'cand_id', comm_ids: [cmte_id]}}
return candidates, candidates_committees
def search_by_name(self, parameters):
try:
name = parameters['name']
except KeyError:
raise KeyError("Please define name")
pass
|
Python
| 0.000003
|
@@ -1666,16 +1666,23 @@
%0A def
+self.__
get_cand
@@ -4070,16 +4070,23 @@
ttees =
+self.__
get_cand
@@ -5311,16 +5311,23 @@
ittees=
+self.__
get_cand
|
ee6d4f50b4a27e9cc8c3b5f8a821a6d9c0cf4f21
|
remove unwanted changes
|
frappe/website/page_renderers/document_page.py
|
frappe/website/page_renderers/document_page.py
|
import frappe
from frappe.model.document import get_controller
from frappe.website.page_renderers.base_template_page import BaseTemplatePage
from frappe.website.utils import build_response
from frappe.website.router import (get_doctypes_with_web_view,
get_page_info_from_web_page_with_dynamic_routes)
class DocumentPage(BaseTemplatePage):
def can_render(self):
'''
Find a document with matching `route` from all doctypes with `has_web_view`=1
'''
if self.search_in_doctypes_with_web_view():
return True
if self.search_web_page_dynamic_routes():
return True
return False
def search_in_doctypes_with_web_view(self):
for doctype in get_doctypes_with_web_view():
filters = dict(route=self.path)
meta = frappe.get_meta(doctype)
condition_field = self.get_condition_field(meta)
if condition_field:
filters[condition_field] = 1
try:
self.docname = frappe.db.get_value(doctype, filters, 'name')
if self.docname:
self.doctype = doctype
return True
except Exception as e:
if not frappe.db.is_missing_column(e):
raise e
def search_web_page_dynamic_routes(self):
d = get_page_info_from_web_page_with_dynamic_routes(self.path)
if d:
self.doctype = 'Web Page'
self.docname = d.name
return True
else:
return False
def render(self):
self.doc = frappe.get_doc(self.doctype, self.docname)
self.init_context()
self.update_context()
self.post_process_context()
html = frappe.get_template(self.template_path).render(self.context)
html = self.add_csrf_token(html)
return build_response(self.path, html, self.http_status_code or 200, self.headers)
def update_context(self):
self.context.doc = self.doc
self.context.update(self.context.doc.as_dict())
self.context.update(self.context.doc.get_page_info())
self.template_path = self.context.template or self.template_path
if not self.template_path:
self.template_path = self.context.doc.meta.get_web_template()
if hasattr(self.doc, "get_context"):
ret = self.doc.get_context(self.context)
if ret:
self.context.update(ret)
for prop in ("no_cache", "sitemap"):
if prop not in self.context:
self.context[prop] = getattr(self.doc, prop, False)
def get_condition_field(self, meta):
condition_field = None
if meta.is_published_field:
condition_field = meta.is_published_field
elif not meta.custom:
controller = get_controller(meta.name)
condition_field = controller.website.condition_field
return condition_field
|
Python
| 0.005417
|
@@ -167,30 +167,26 @@
import
-build_response
+cache_html
%0Afrom fr
@@ -1303,24 +1303,155 @@
nder(self):%0A
+%09%09html = self.get_html()%0A%09%09html = self.add_csrf_token(html)%0A%0A%09%09return self.build_response(html)%0A%0A%09@cache_html%0A%09def get_html(self):%0A
%09%09self.doc =
@@ -1646,126 +1646,19 @@
)%0A%09%09
-html = self.add_csrf_token(html)%0A%0A%09%09return build_response(self.path, html, self.http_status_code or 200, self.headers)
+return html
%0A%0A%09d
|
a5f1ad3e47daf3f8db04b605fb13ff3f9f871e3a
|
Divide entire loss by n, not just mll component.
|
gpytorch/mlls/exact_marginal_log_likelihood.py
|
gpytorch/mlls/exact_marginal_log_likelihood.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import math
import torch
from .marginal_log_likelihood import MarginalLogLikelihood
from ..lazy import LazyVariable, NonLazyVariable
from ..likelihoods import GaussianLikelihood
from ..random_variables import GaussianRandomVariable, MultitaskGaussianRandomVariable
from ..variational import MVNVariationalStrategy
class ExactMarginalLogLikelihood(MarginalLogLikelihood):
def __init__(self, likelihood, model):
"""
A special MLL designed for exact inference
Args:
- likelihood: (Likelihood) - the likelihood for the model
- model: (Module) - the exact GP model
"""
if not isinstance(likelihood, GaussianLikelihood):
raise RuntimeError("Likelihood must be Gaussian for exact inference")
super(ExactMarginalLogLikelihood, self).__init__(likelihood, model)
def forward(self, output, target):
if not isinstance(output, GaussianRandomVariable) and not isinstance(output, MultitaskGaussianRandomVariable):
raise RuntimeError("ExactMarginalLogLikelihood can only operate on Gaussian random variables")
if not isinstance(output.covar(), LazyVariable):
output = output.__class__(output.mean(), NonLazyVariable(output.covar()))
mean, covar = self.likelihood(output).representation()
n_data = target.size(-1)
if target.size() != mean.size():
raise RuntimeError(
"Expected target size to equal mean size, but got {} and {}".format(target.size(), mean.size())
)
if isinstance(output, MultitaskGaussianRandomVariable):
if target.ndimension() == 2:
mean = mean.view(-1)
target = target.view(-1)
elif target.ndimension() == 3:
mean = mean.view(mean.size(0), -1)
target = target.view(target.size(0), -1)
# Get log determininat and first part of quadratic form
inv_quad, log_det = covar.inv_quad_log_det(inv_quad_rhs=(target - mean).unsqueeze(-1), log_det=True)
# Add terms for SGPR / when inducing points are learned
trace_diff = torch.zeros_like(inv_quad)
for variational_strategy in self.model.variational_strategies():
if isinstance(variational_strategy, MVNVariationalStrategy):
trace_diff = trace_diff.add(variational_strategy.trace_diff())
trace_diff = trace_diff / self.likelihood.log_noise.exp()
res = -0.5 * sum([inv_quad, log_det, n_data * math.log(2 * math.pi), -trace_diff])
res.div_(n_data)
# Add log probs of priors on the parameters
for _, param, prior in self.named_parameter_priors():
res.add_(prior.log_prob(param).sum())
for _, prior, params, transform in self.named_derived_priors():
res.add_(prior.log_prob(transform(*params)).sum())
return res
|
Python
| 0.000004
|
@@ -2693,33 +2693,8 @@
ff%5D)
-%0A res.div_(n_data)
%0A%0A
@@ -3009,9 +3009,22 @@
turn res
+.div_(n_data)
%0A
|
87041ab71b2f1c1de4425070a032d03b42045d6d
|
Test step to repeatedly access server API
|
integration-tests/features/steps/server_api.py
|
integration-tests/features/steps/server_api.py
|
"""Basic checks for the server API."""
from behave import given, then, when
import requests
from src.utils import *
from src.authorization_tokens import *
from src.attribute_checks import *
from src.schema_validator import *
@when('I access {url:S}')
def access_url(context, url):
"""Access the service API using the HTTP GET method."""
context.response = requests.get(context.coreapi_url + url)
@when('I access {url:S} with authorization token')
def access_url_with_authorization_token(context, url):
"""Access the service API using the HTTP GET method."""
context.response = requests.get(context.coreapi_url + url,
headers=authorization(context))
@when('I access {url:S} without valid values')
def check_submit_feedback(context, url):
"""Access the submit-feedback API using the HTTP POST method."""
payload = {
"stack_id": "1234-569586048",
"recommendation_type": "companion",
"package_name": "blah-blah",
"feedback_type": True,
"ecosystem": None
}
context.response = requests.post(context.coreapi_url + url,
headers=authorization(context),
data=payload)
@then('I should see {num:d} ecosystems')
def check_ecosystems(context, num):
"""Check if the API call returns correct number of ecosystems."""
ecosystems = context.response.json()['items']
assert len(ecosystems) == num
for e in ecosystems:
# assert that there is 'ecosystem' field in every ecosystem
assert 'ecosystem' in e
@then('I should see {num:d} versions ({versions}), all for {ecosystem}/{package} package')
def check_versions(context, num=0, versions='', ecosystem='', package=''):
"""Check the versions for the selected ecosystems and package."""
versions = split_comma_separated_list(versions)
vrsns = context.response.json()['items']
assert len(vrsns) == num
for v in vrsns:
assert v['ecosystem'] == ecosystem
assert v['package'] == package
assert v['version'] in versions
@then('I should find the endpoint {endpoint} in the list of supported endpoints')
def check_endpoint_in_paths(context, endpoint):
"""Check the existence of given endpoint in the list of all supported endpoints."""
data = context.response.json()
paths = check_and_get_attribute(data, "paths")
assert endpoint in paths, "Cannot find the expected endpoint {e}".format(
e=endpoint)
@then('I should find the schema {schema} version {version} in the list of supported schemas')
def check_schema_existence(context, schema, version, selector=None):
"""Check the existence of given schema."""
data = context.response.json()
if selector is not None:
api_schemas = check_and_get_attribute(data, selector)
schema = check_and_get_attribute(api_schemas, schema)
else:
schema = check_and_get_attribute(data, schema)
check_and_get_attribute(schema, version)
@then('I should find the schema {schema} version {version} in the list of supported schemas '
'for API calls')
def check_schema_existence_api_call(context, schema, version):
"""Check the existence of given schema (API calls)."""
check_schema_existence(context, schema, version, "api")
@then('I should find the schema {schema} version {version} in the list of component analyses '
'schemas')
def check_schema_existence_component_analyses(context, schema, version):
"""Check the existence of given schema (component analyses)."""
check_schema_existence(context, schema, version, "component_analyses")
@then('I should find the schema version {version} in the list of schema versions')
def check_schema_version(context, version):
"""Check the existence of schema version."""
data = context.response.json()
check_and_get_attribute(data, version)
@then('I should find valid schema in the server response')
def check_valid_schema(context):
"""Check if the schema is valid, validation is performed against metaschema."""
data = context.response.json()
validate_schema(data)
|
Python
| 0
|
@@ -84,16 +84,28 @@
requests
+%0Aimport time
%0A%0Afrom s
@@ -410,24 +410,541 @@
rl + url)%0A%0A%0A
+@when('I access the %7Burl:S%7D %7Brepeat_count:d%7D times with %7Bdelay:d%7D seconds delay')%0Adef access_url_repeatedly(context, url, repeat_count, delay):%0A %22%22%22Access the service API using the HTTP GET method repeatedly.%22%22%22%0A context.api_call_results = %5B%5D%0A url = context.coreapi_url + url%0A%0A # repeatedly call REST API endpoint and collect HTTP status codes%0A for i in range(repeat_count):%0A response = requests.get(url)%0A context.api_call_results.append(response.status_code)%0A time.sleep(delay)%0A%0A%0A
@when('I acc
|
77a0d62157ea5b9bc27e7e081676fe7045f83c2e
|
Add admin help handler to admincommands
|
ownbot/admincommands.py
|
ownbot/admincommands.py
|
# -*- coding: utf-8 -*-
"""
Provides the ownbot AdminCommands class.
"""
from telegram.parsemode import ParseMode
from telegram.ext import CommandHandler
from ownbot.auth import requires_usergroup
from ownbot.user import User
from ownbot.usermanager import UserManager
class AdminCommands(object):
"""
Provides admin command handlers for user/group
management.
Args:
dispatcher (telegram.dispatcher): Command dispatcher to register the
admin commands.
"""
def __init__(self, dispatcher):
self.__usermanager = UserManager()
self.__dispatcher = dispatcher
self.__register_handlers()
def __register_handlers(self):
"""Registers the admin commands.
"""
self.__dispatcher.addHandler(
CommandHandler("adminhelp", self.__admin_help)
)
self.__dispatcher.addHandler(
CommandHandler("users", self.__get_users)
)
self.__dispatcher.addHandler(
CommandHandler("adduser", self.__add_user, pass_args=True)
)
self.__dispatcher.addHandler(
CommandHandler("rmuser", self.__rm_user, pass_args=True)
)
def __admin_help(self, bot, update):
"""Command handler function for `adminhelp` command.
Sends a list of all available commands to the
client.
"""
print(update.message.text)
@staticmethod
@requires_usergroup("admin")
def __get_users(bot, update):
"""Command handler function for `users` command.
Sends a list of all currently registered
users.
Args:
bot (telegram.Bot): The bot object.
update (telegram.Update): The sent update.
"""
message = str()
config = UserManager().config
if not config:
message = "No users registered"
bot.sendMessage(chat_id=update.message.chat_id, text=message)
for group, data in config.iteritems():
message += "*{0}*\n".format(group)
if data.get("users"):
message += " verified users:\n"
for user in data.get("users"):
message += " - {0} with id {1}\n" \
.format(user.get("username"),
user.get("id"))
if data.get("unverified"):
message += " unverified users:\n"
for user in data.get("unverified"):
message += " - {0}\n".format(user)
bot.sendMessage(chat_id=update.message.chat_id, text=message,
parse_mode=ParseMode.MARKDOWN)
@staticmethod
@requires_usergroup("admin")
def __add_user(bot, update, args):
"""Command handler function for `adduser` command.
Adds a telegram user to a usergroup.
Args:
bot (telegram.Bot): The bot object.
update (telegram.Update): The sent update.
args (list): The command's arguments.
"""
if len(args) != 2:
message = "Usage: adduser <user> <group>"
bot.sendMessage(chat_id=update.message.chat_id, text=message)
return
username = args[0]
group = args[1]
if not UserManager().add_user(username, group):
message = "The user '{0}' is already in the group '{1}'!" \
.format(username, group)
else:
message = "Added user '{0}' to the group '{1}'." \
.format(username, group)
bot.sendMessage(chat_id=update.message.chat_id, text=message)
@staticmethod
@requires_usergroup("admin")
def __rm_user(bot, update, args):
"""Command handler function for `rmuser` command.
Removes a telegram user from a usergroup.
Args:
bot (telegram.Bot): The bot object.
update (telegram.Update): The sent update.
args (list): The command's arguments.
"""
if len(args) != 2:
message = "Usage: rmuser <user> <group>"
bot.sendMessage(chat_id=update.message.chat_id, text=message)
return
username = args[0]
group = args[1]
if UserManager().rm_user(username, group):
message = "Removed user '{0}' from the group '{1}'.".format(
username, group)
else:
message = "The user '{0}' could not be found in the group '{1}'!"\
.format(username, group)
bot.sendMessage(chat_id=update.message.chat_id, text=message)
|
Python
| 0
|
@@ -199,37 +199,8 @@
oup%0A
-from ownbot.user import User%0A
from
@@ -267,16 +267,58 @@
object):
+ # pylint: disable=too-few-public-methods
%0A %22%22%22
@@ -1218,16 +1218,68 @@
)%0A%0A
+%0A @staticmethod%0A @requires_usergroup(%22admin%22)%0A
def
@@ -1291,22 +1291,16 @@
in_help(
-self,
bot, upd
@@ -1449,53 +1449,445 @@
nt.%0A
- %22%22%22%0A print(update.message.text
+%0A Args:%0A bot (telegram.Bot): The bot object.%0A update (telegram.Update): The sent update.%0A %22%22%22%0A message = %22%22%22%0A*Available Admin Commands*%0A/users - Lists all registered users.%0A/adduser - Adds a user to a group.%0A/rmuser - Removes a user from a group.%0A %22%22%22%0A bot.sendMessage(chat_id=update.message.chat_id, text=message,%0A parse_mode=ParseMode.MARKDOWN
)%0A%0A
|
16110c627100f5fd6bdaaf859ed71559ea17780a
|
Fix push/pull tests
|
jupyterlab_git/tests/test_pushpull.py
|
jupyterlab_git/tests/test_pushpull.py
|
from subprocess import PIPE
from mock import patch, call, Mock
from jupyterlab_git.git import Git
@patch('subprocess.Popen')
@patch('os.environ', {'TEST': 'test'})
def test_git_pull_fail(mock_subproc_popen):
# Given
process_mock = Mock()
attrs = {
'communicate.return_value': ('output', 'Authentication failed'.encode('utf-8')),
'returncode': 1
}
process_mock.configure_mock(**attrs)
mock_subproc_popen.return_value = process_mock
# When
actual_response = Git(root_dir='/bin').pull('test_curr_path')
# Then
mock_subproc_popen.assert_has_calls([
call(
['git', 'pull', '--no-commit'],
stdout=PIPE,
stderr=PIPE,
cwd='/bin/test_curr_path',
env={'TEST': 'test', 'GIT_TERMINAL_PROMPT': '0'},
),
call().communicate()
])
assert {'code': 1, 'message': 'Authentication failed'} == actual_response
@patch('subprocess.Popen')
@patch('os.environ', {'TEST': 'test'})
def test_git_pull_success(mock_subproc_popen):
# Given
process_mock = Mock()
attrs = {
'communicate.return_value': ('output', ''.encode('utf-8')),
'returncode': 0
}
process_mock.configure_mock(**attrs)
mock_subproc_popen.return_value = process_mock
# When
actual_response = Git(root_dir='/bin').pull('test_curr_path')
# Then
mock_subproc_popen.assert_has_calls([
call(
['git', 'pull', '--no-commit'],
stdout=PIPE,
stderr=PIPE,
cwd='/bin/test_curr_path',
env={'TEST': 'test', 'GIT_TERMINAL_PROMPT': '0'},
),
call().communicate()
])
assert {'code': 0} == actual_response
@patch('subprocess.Popen')
@patch('os.environ', {'TEST': 'test'})
def test_git_push_fail(mock_subproc_popen):
# Given
process_mock = Mock()
attrs = {
'communicate.return_value': ('output', 'Authentication failed'.encode('utf-8')),
'returncode': 1
}
process_mock.configure_mock(**attrs)
mock_subproc_popen.return_value = process_mock
# When
actual_response = Git(root_dir='/bin').push('test_origin', 'HEAD:test_master', 'test_curr_path')
# Then
mock_subproc_popen.assert_has_calls([
call(
['git', 'push', 'test_origin', 'HEAD:test_master'],
stdout=PIPE,
stderr=PIPE,
cwd='/bin/test_curr_path',
env={'TEST': 'test', 'GIT_TERMINAL_PROMPT': '0'},
),
call().communicate()
])
assert {'code': 1, 'message': 'Authentication failed'} == actual_response
@patch('subprocess.Popen')
@patch('os.environ', {'TEST': 'test'})
def test_git_push_success(mock_subproc_popen):
# Given
process_mock = Mock()
attrs = {
'communicate.return_value': ('output', 'does not matter'.encode('utf-8')),
'returncode': 0
}
process_mock.configure_mock(**attrs)
mock_subproc_popen.return_value = process_mock
# When
actual_response = Git(root_dir='/bin').push('.', 'HEAD:test_master', 'test_curr_path')
# Then
mock_subproc_popen.assert_has_calls([
call(
['git', 'push', '.', 'HEAD:test_master'],
stdout=PIPE,
stderr=PIPE,
cwd='/bin/test_curr_path',
env={'TEST': 'test', 'GIT_TERMINAL_PROMPT': '0'},
),
call().communicate()
])
assert {'code': 0} == actual_response
|
Python
| 0.000686
|
@@ -627,36 +627,30 @@
%5B'git
-', 'pull', '
+ pull
--no-commit'
@@ -1452,20 +1452,14 @@
'git
-', 'pull', '
+ pull
--no
@@ -2282,28 +2282,22 @@
%5B'git
-', 'push', '
+ push
test_ori
@@ -2291,36 +2291,33 @@
push test_origin
-', '
+
HEAD:test_master
@@ -3160,25 +3160,16 @@
'git
-', 'push', '.', '
+ push .
HEAD
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.