commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
74b8ca3a3e07139545049e6c9a9fb054208473be
|
fix import_csv command
|
munge/cli.py
|
munge/cli.py
|
import argparse
import os.path
import config
import importers
import sa_util
def import_module(args):
for module in args.module:
m = getattr(importers, module)
m.importer(verbose=args.verbose)
view_summaries(args)
sa_util.swap_tables(verbose=args.verbose)
def view_summaries(args, just_views=False):
for module in args.module:
m = getattr(importers, module)
data = getattr(m, 'AUTO_SQL')
if data:
sa_util.build_views_and_summaries(
data, verbose=args.verbose, just_views=just_views
)
def defined_tables():
tables = []
for module in importers.__all__:
m = getattr(importers, module)
tables_fn = getattr(m, 'tables', None)
if tables_fn:
tables += tables_fn()
info = []
info += getattr(m, 'VIEWS_DATA', None) or []
info += getattr(m, 'SUMMARIES_DATA', None) or []
for item in info:
tables.append(item['name'])
return tables
def clean_db(args):
sa_util.clear_temp_objects(verbose=args.verbose)
tables = sorted(list(
set(sa_util.table_view_list())
- set(defined_tables())
- set(sa_util.dependent_objects())
))
print 'Unknown tables'
for table in tables:
print '\t%s' % table
for table in tables:
response = raw_input('Delete table `%s` [No/yes/quit]:' % table)
if response and response.upper()[0] == 'Y':
sa_util.drop_table_or_view(table, verbose=args.verbose)
if response and response.upper()[0] == 'Q':
return
def export_all(verbose=False):
if verbose:
print('Exporting all tables')
from csv_util import dump_all
dump_all(verbose=verbose)
def export_custom(verbose=False):
if verbose:
print('Exporting custom tables')
import custom_output
def db_functions(verbose=False):
if verbose:
print('Creating db functions')
import postgres_functions
def import_csv(args):
verbose = args.verbose
filename = args.filename
tablename = args.tablename
delimiter = args.delimiter
filename = os.path.join(config.DATA_PATH, 'import', filename)
if delimiter == '\\t':
delimiter = '\t'
if not tablename:
tablename = os.path.splitext(os.path.basename(filename))[0]
if verbose:
print('Importing %s' % args.filename)
sa_util.import_single(filename, tablename, encoding=args.encoding,
delimiter=delimiter, verbose=verbose)
sa_util.swap_tables(verbose=verbose)
def webserver(args):
from munge.app import app
app.run(debug=True)
def main():
commands = [
'export_all',
'export_custom',
'web',
'clean_db',
'db_functions',
'swap_temp',
]
parser = argparse.ArgumentParser(
description='Command line interface for munge'
)
parser.add_argument('-v', '--verbose', action='count', default=0)
subparsers = parser.add_subparsers(help='commands', dest='command')
for command in commands:
subparsers.add_parser(command)
import_csv_parser = subparsers.add_parser('import_csv')
import_csv_parser.add_argument("--encoding", default='utf-8')
import_csv_parser.add_argument("--delimiter", default=',')
import_csv_parser.add_argument('--tablename', default=None)
import_csv_parser.add_argument('filename')
module_commands = [
'import',
'views',
'summaries',
]
for command in module_commands:
module_parser = subparsers.add_parser(command)
module_parser.add_argument('module', nargs='*')
args = parser.parse_args()
if args.command == 'export_all':
export_all(verbose=args.verbose)
elif args.command == 'import':
import_module(args)
elif args.command == 'views':
view_summaries(args, just_views=True)
sa_util.swap_tables(verbose=args.verbose)
elif args.command == 'swap_temp':
sa_util.swap_tables(verbose=args.verbose)
elif args.command == 'summaries':
view_summaries(args)
sa_util.swap_tables(verbose=args.verbose)
elif args.command == 'export_custom':
export_custom(verbose=args.verbose)
elif args.command == 'import_csv':
import_csv(args)
elif args.command == 'web':
webserver(args)
elif args.command == 'clean_db':
clean_db(args)
elif args.command == 'db_functions':
db_functions(verbose=args.verbose)
|
Python
| 0.000439
|
@@ -71,16 +71,32 @@
sa_util%0A
+import csv_util%0A
%0A%0Adef im
@@ -1712,21 +1712,16 @@
-from
csv_util
imp
@@ -1720,29 +1720,9 @@
util
- import dump_all%0A
+.
dump
@@ -2393,26 +2393,27 @@
lename)%0A
-sa
+csv
_util.import
|
e35d791a25c1297e5f6477955da075cf66375e35
|
Fix addr2line output parsing wrt ':'
|
src/untrusted/crash_dump/decode_dump.py
|
src/untrusted/crash_dump/decode_dump.py
|
#!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility to decode a crash dump generated by untrusted_crash_dump.[ch]
Currently this produces a simple stack trace.
"""
import json
import optparse
import os
import posixpath
import subprocess
import sys
class CoreDecoder(object):
"""Class to process core dumps."""
def __init__(self, main_nexe, nmf_filename,
addr2line, library_paths, platform):
"""Construct and object to process core dumps.
Args:
main_nexe: nexe to resolve NaClMain references from.
nmf_filename: nmf to resovle references from.
addr2line: path to appropriate addr2line.
library_paths: list of paths to search for libraries.
platform: platform string to use in nmf files.
"""
self.main_nexe = main_nexe
self.nmf_filename = nmf_filename
if nmf_filename == '-':
self.nmf_data = {}
else:
self.nmf_data = json.load(open(nmf_filename))
self.addr2line = addr2line
self.library_paths = library_paths
self.platform = platform
def _SelectModulePath(self, filename):
"""Select which path to get a module from.
Args:
filename: filename of a module (as appears in phdrs).
Returns:
Full local path to the file.
Derived by consulting the manifest.
"""
# For some names try the main nexe.
# NaClMain is the argv[0] setup in sel_main.c
# (null) shows up in chrome.
if self.main_nexe is not None and filename in ['NaClMain', '', '(null)']:
return self.main_nexe
filepart = posixpath.basename(filename)
nmf_entry = self.nmf_data.get('files', {}).get(filepart, {})
nmf_url = nmf_entry.get(self.platform, {}).get('url')
# Try filename directly if not in manifest.
if nmf_url is None:
return filename
# Look for the module relative to the manifest (if any),
# then in other search paths.
paths = []
if self.nmf_filename != '-':
paths.append(os.path.dirname(self.nmf_filename))
paths.extend(self.library_paths)
for path in paths:
pfilename = os.path.join(path, nmf_url)
if os.path.exists(pfilename):
return pfilename
# If nothing else, try the path directly.
return filename
def _DecodeAddressSegment(self, segments, address):
"""Convert an address to a segment relative one, plus filename.
Args:
segments: a list of phdr segments.
address: a process wide code address.
Returns:
A tuple of filename and segment relative address.
"""
for segment in segments:
for phdr in segment['dlpi_phdr']:
start = segment['dlpi_addr'] + phdr['p_vaddr']
end = start + phdr['p_memsz']
if address >= start and address < end:
return (segment['dlpi_name'], address - segment['dlpi_addr'])
return ('(null)', address)
def _Addr2Line(self, segments, address):
"""Use addr2line to decode a code address.
Args:
segments: A list of phdr segments.
address: a code address.
Returns:
A list of dicts containing: function, filename, lineno.
"""
filename, address = self._DecodeAddressSegment(segments, address)
filename = self._SelectModulePath(filename)
if not os.path.exists(filename):
return [{
'function': 'Unknown_function',
'filename': 'unknown_file',
'lineno': -1,
}]
# Use address - 1 to get the call site instead of the line after.
address -= 1
cmd = [
self.addr2line, '-f', '--inlines', '-e', filename, '0x%08x' % address,
]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
process_stdout, _ = process.communicate()
assert process.returncode == 0
lines = process_stdout.splitlines()
assert len(lines) % 2 == 0
results = []
for index in xrange(len(lines) / 2):
func = lines[index * 2]
afilename, lineno = lines[index * 2 + 1].split(':', 1)
results.append({
'function': func,
'filename': afilename,
'lineno': int(lineno),
})
return results
def LoadAndDecode(self, core_path):
"""Given a core.json file, load and embellish with decoded addresses.
Args:
core_path: source file containing a dump.
Returns:
An embelished core dump dict (decoded code addresses).
"""
core = json.load(open(core_path))
for frame in core['frames']:
frame['scopes'] = self._Addr2Line(core['segments'], frame['prog_ctr'])
return core
def StackTrace(self, info):
"""Convert a decoded core.json dump to a simple stack trace.
Args:
info: core.json info with decoded code addresses.
Returns:
A list of dicts with filename, lineno, function (deepest first).
"""
trace = []
for frame in info['frames']:
for scope in frame['scopes']:
trace.append(scope)
return trace
def PrintTrace(self, trace, out):
"""Print a trace to a file like object.
Args:
trace: A list of [filename, lineno, function] (deepest first).
out: file like object to output the trace to.
"""
for scope in trace:
out.write('%s at %s:%d\n' % (
scope['function'],
scope['filename'],
scope['lineno']))
def Main(args):
parser = optparse.OptionParser(
usage='USAGE: %prog [options] <core.json>')
parser.add_option('-m', '--main-nexe', dest='main_nexe',
help='nexe to resolve NaClMain references from')
parser.add_option('-n', '--nmf', dest='nmf_filename', default='-',
help='nmf to resolve references from')
parser.add_option('-a', '--addr2line', dest='addr2line',
help='path to appropriate addr2line')
parser.add_option('-L', '--library-path', dest='library_paths',
action='append', default=[],
help='path to search for shared libraries')
parser.add_option('-p', '--platform', dest='platform',
help='platform in a style match nmf files')
options, args = parser.parse_args(args)
if len(args) != 1:
parser.print_help()
sys.exit(1)
decoder = CoreDecoder(
main_nexe=options.main_nexe,
nmf_filename=options.nmf_filename,
addr2line=options.add2line,
library_paths=options.library_paths,
platform=options.platform)
info = decoder.LoadAndDecode(args[0])
trace = decoder.StackTrace(info)
decoder.PrintTrace(trace, sys.stdout)
if __name__ == '__main__':
Main(sys.argv[1:])
|
Python
| 0.000118
|
@@ -4040,16 +4040,17 @@
2 + 1%5D.
+r
split(':
|
85f7b462490fec31c26d8c27bc86d547ae032d91
|
Version 0.1.1
|
wordpress2puput/__init__.py
|
wordpress2puput/__init__.py
|
# -*- coding: utf-8 -*-
__author__ = 'David Valera'
__email__ = 'david.valera.martinez@gmail.com'
__version__ = '0.1'
|
Python
| 0.000001
|
@@ -109,10 +109,12 @@
_ = '0.1
+.1
'%0A
|
5edf75129189fc37ce24ff338821b726b0a7c28a
|
Revert 2c1ee5f..1620020
|
Lesson_3_Problem_Set/05-Fixing_Name/name.py
|
Lesson_3_Problem_Set/05-Fixing_Name/name.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
In this problem set you work with cities infobox data, audit it, come up with a cleaning idea and then clean it up.
In the previous quiz you recognized that the "name" value can be an array (or list in Python terms).
It would make it easier to process and query the data later, if all values for the name
would be in a Python list, instead of being just a string separated with special characters, like now.
Finish the function fix_name(). It will recieve a string as an input, and it has to return a list
of all the names. If there is only one name, the list with have only one item in it, if the name is "NONE",
the list should be empty.
The rest of the code is just an example on how this function can be used
"""
import codecs
import csv
import pprint
CITIES = 'cities.csv'
def fix_name(name):
if name == "NULL" or name == "":
return []
else:
return name.replace('{', '').replace('}','').split('|')
return name
def process_file(filename):
data = []
with open(filename, "r") as f:
reader = csv.DictReader(f)
#skipping the extra matadata
for i in range(3):
l = reader.next()
# processing file
for line in reader:
# calling your function to fix the area value
if "name" in line:
line["name"] = fix_name(line["name"])
data.append(line)
return data
def test():
data = process_file(CITIES)
print "Printing 20 results:"
for n in range(20):
pprint.pprint(data[n]["name"])
assert data[14]["name"] == ['Negtemiut', 'Nightmute']
assert data[3]["name"] == ['Kumhari']
if __name__ == "__main__":
test()
|
Python
| 0
|
@@ -856,132 +856,24 @@
-if name == %22NULL%22 or name == %22%22:%0A return %5B%5D%0A else:%0A return name.replace('%7B', '').replace('%7D','').split('%7C')
+# YOUR CODE HERE
%0A%0A
|
f3702a0f248d4cd1fef3a9ed2e1716b9ed8511a1
|
allow one value in range filter
|
django_filters/filters.py
|
django_filters/filters.py
|
from __future__ import absolute_import
from __future__ import unicode_literals
from datetime import timedelta
from django import forms
from django.db.models import Q
from django.db.models.sql.constants import QUERY_TERMS
from django.utils import six
from django.utils.timezone import now
from django.utils.translation import ugettext_lazy as _
from .fields import RangeField, LookupTypeField, Lookup
__all__ = [
'Filter', 'CharFilter', 'BooleanFilter', 'ChoiceFilter',
'MultipleChoiceFilter', 'DateFilter', 'DateTimeFilter', 'TimeFilter',
'ModelChoiceFilter', 'ModelMultipleChoiceFilter', 'NumberFilter',
'RangeFilter', 'DateRangeFilter', 'AllValuesFilter', 'MethodFilter'
]
LOOKUP_TYPES = sorted(QUERY_TERMS)
class Filter(object):
creation_counter = 0
field_class = forms.Field
def __init__(self, name=None, label=None, widget=None, action=None,
lookup_type='exact', required=False, distinct=False, exclude=False, **kwargs):
self.name = name
self.label = label
if action:
self.filter = action
self.lookup_type = lookup_type
self.widget = widget
self.required = required
self.extra = kwargs
self.distinct = distinct
self.exclude = exclude
self.creation_counter = Filter.creation_counter
Filter.creation_counter += 1
@property
def field(self):
if not hasattr(self, '_field'):
help_text = _('This is an exclusion filter') if self.exclude else ''
if (self.lookup_type is None or
isinstance(self.lookup_type, (list, tuple))):
if self.lookup_type is None:
lookup = [(x, x) for x in LOOKUP_TYPES]
else:
lookup = [
(x, x) for x in LOOKUP_TYPES if x in self.lookup_type]
self._field = LookupTypeField(self.field_class(
required=self.required, widget=self.widget, **self.extra),
lookup, required=self.required, label=self.label, help_text=help_text)
else:
self._field = self.field_class(required=self.required,
label=self.label, widget=self.widget,
help_text=help_text, **self.extra)
return self._field
def filter(self, qs, value):
if isinstance(value, Lookup):
lookup = six.text_type(value.lookup_type)
value = value.value
else:
lookup = self.lookup_type
if value in ([], (), {}, None, ''):
return qs
method = qs.exclude if self.exclude else qs.filter
qs = method(**{'%s__%s' % (self.name, lookup): value})
if self.distinct:
qs = qs.distinct()
return qs
class CharFilter(Filter):
field_class = forms.CharField
class BooleanFilter(Filter):
field_class = forms.NullBooleanField
def filter(self, qs, value):
if value is not None:
return qs.filter(**{self.name: value})
return qs
class ChoiceFilter(Filter):
field_class = forms.ChoiceField
class MultipleChoiceFilter(Filter):
"""
This filter preforms an OR query on the selected options.
"""
field_class = forms.MultipleChoiceField
def filter(self, qs, value):
value = value or ()
if len(value) == len(self.field.choices):
return qs
q = Q()
for v in value:
q |= Q(**{self.name: v})
return qs.filter(q).distinct()
class DateFilter(Filter):
field_class = forms.DateField
class DateTimeFilter(Filter):
field_class = forms.DateTimeField
class TimeFilter(Filter):
field_class = forms.TimeField
class ModelChoiceFilter(Filter):
field_class = forms.ModelChoiceField
class ModelMultipleChoiceFilter(MultipleChoiceFilter):
field_class = forms.ModelMultipleChoiceField
class NumberFilter(Filter):
field_class = forms.DecimalField
class RangeFilter(Filter):
field_class = RangeField
def filter(self, qs, value):
if value:
q = Q()
if value.start:
q |= Q(**{'%s__gt'%self.name:value.start})
if value.stop:
q |= Q(**{'%s__lt'%self.name:value.stop})
return qs.filter(q)
return qs
_truncate = lambda dt: dt.replace(hour=0, minute=0, second=0)
class DateRangeFilter(ChoiceFilter):
options = {
'': (_('Any date'), lambda qs, name: qs.all()),
1: (_('Today'), lambda qs, name: qs.filter(**{
'%s__year' % name: now().year,
'%s__month' % name: now().month,
'%s__day' % name: now().day
})),
2: (_('Past 7 days'), lambda qs, name: qs.filter(**{
'%s__gte' % name: _truncate(now() - timedelta(days=7)),
'%s__lt' % name: _truncate(now() + timedelta(days=1)),
})),
3: (_('This month'), lambda qs, name: qs.filter(**{
'%s__year' % name: now().year,
'%s__month' % name: now().month
})),
4: (_('This year'), lambda qs, name: qs.filter(**{
'%s__year' % name: now().year,
})),
}
def __init__(self, *args, **kwargs):
kwargs['choices'] = [
(key, value[0]) for key, value in six.iteritems(self.options)]
super(DateRangeFilter, self).__init__(*args, **kwargs)
def filter(self, qs, value):
try:
value = int(value)
except (ValueError, TypeError):
value = ''
return self.options[value][1](qs, self.name)
class AllValuesFilter(ChoiceFilter):
@property
def field(self):
qs = self.model._default_manager.distinct()
qs = qs.order_by(self.name).values_list(self.name, flat=True)
self.extra['choices'] = [(o, o) for o in qs]
return super(AllValuesFilter, self).field
class MethodFilter(Filter):
"""
This filter will allow you to run a method that exists on the filterset class
"""
def __init__(self, *args, **kwargs):
# Get the action out of the kwargs
action = kwargs.get('action', None)
# If the action is a string store the action and set the action to our own filter method
# so it can be backwards compatible and work as expected, the parent will still treat it as
# a filter that has an action
self.parent_action = ''
text_types = (str, six.text_type)
if type(action) in text_types:
self.parent_action = str(action)
kwargs.update({
'action': self.filter
})
# Call the parent
super(MethodFilter, self).__init__(*args, **kwargs)
def filter(self, qs, value):
"""
This filter method will act as a proxy for the actual method we want to call.
It will try to find the method on the parent filterset, if not it defaults
to just returning the queryset
"""
parent = getattr(self, 'parent', None)
parent_filter_method = getattr(parent, self.parent_action, None)
if parent_filter_method is not None:
return parent_filter_method(qs, value)
return qs
|
Python
| 0.000001
|
@@ -4149,33 +4149,32 @@
q
-%7C
= Q(**%7B'%25s__gt'%25
@@ -4236,25 +4236,25 @@
q
-%7C
+&
= Q(**%7B'%25s__
|
c91beca414a5216a3fd5f8f5ef1c1643f0aea2f9
|
Tag the rc release
|
oct2py/__init__.py
|
oct2py/__init__.py
|
# -*- coding: utf-8 -*-
# Copyright (c) oct2py developers.
# Distributed under the terms of the MIT License.
"""
Oct2Py is a means to seamlessly call M-files and GNU Octave functions from
Python.
It manages the Octave session for you, sharing data behind the scenes using
MAT files. Usage is as simple as:
.. code-block:: python
>>> import oct2py
>>> oc = oct2py.Oct2Py()
>>> x = oc.zeros(3,3)
>>> print(x, x.dtype.str) # doctest: +SKIP
[[ 0. 0. 0.]
[ 0. 0. 0.]
[ 0. 0. 0.]] <f8
If you want to run legacy m-files, do not have MATLAB(TM), and do not fully
trust a code translator, this is your library.
"""
from __future__ import absolute_import, print_function, division
from .core import Oct2Py
from .io import Struct, Cell, StructArray
from .utils import get_log, Oct2PyError
from .demo import demo
from .speed_check import speed_check
from .thread_check import thread_check
__version__ = '4.0-dev'
__all__ = ['Oct2Py', 'Oct2PyError', 'octave', 'Struct', 'Cell', 'StructArray',
'demo', 'speed_check', 'thread_check', '__version__', 'get_log']
try:
octave = Oct2Py()
except Oct2PyError as e:
print(e)
def kill_octave():
"""Kill all octave instances (cross-platform).
This will restart the "octave" instance. If you have instantiated
Any other Oct2Py objects, you must restart them.
"""
import os
if os.name == 'nt':
os.system('taskkill /im octave /f')
else:
os.system('killall -9 octave')
os.system('killall -9 octave-cli')
octave.restart()
|
Python
| 0
|
@@ -968,12 +968,11 @@
'4.0
--dev
+rc1
'%0D%0A_
|
35801fdc81c4f5f93862b900bba656cfbbf1652c
|
Prepare version 0.1.18
|
djangocms_spa/__init__.py
|
djangocms_spa/__init__.py
|
__version__ = '0.1.17'
|
Python
| 0
|
@@ -17,7 +17,7 @@
.1.1
-7
+8
'%0A
|
42abea9c7c8a8317d85eddd8737fb75312a2576a
|
Update MNIST_CNN.py
|
MNIST_CNN/MNIST_CNN.py
|
MNIST_CNN/MNIST_CNN.py
|
import numpy as np
import time
from Neural_Networks import *
def Read_MNIST(training_set_images, training_set_labels, test_set_images, test_set_labels, number_training, number_test, image, target_output):
# training set images
file = open(training_set_images, 'rb')
for _ in range(4):
file.read(4)
for h in range(number_training):
for j in range(28 * 28):
image[h][j] = int.from_bytes(file.read(1), byteorder='little') / 255
file.close()
# training set labels
file = open(training_set_labels, 'rb')
for _ in range(2):
file.read(4)
for h in range(number_training):
label = int.from_bytes(file.read(1), byteorder='little')
for j in range(10):
target_output[h][j] = float(j == label)
file.close()
# test set images
file = open(test_set_images, 'rb')
for _ in range(4):
file.read(4)
for h in range(number_training, number_training + number_test):
for j in range(28 * 28):
image[h][j] = int.from_bytes(file.read(1), byteorder='little') / 255
file.close()
# test set labels
file = open(test_set_labels, 'rb')
for _ in range(2):
file.read(4)
for h in range(number_training, number_training + number_test):
label = int.from_bytes(file.read(1), byteorder='little')
for j in range(10):
target_output[h][j] = float(j == label)
file.close()
batch_size = 60
decay_rate = 0.977
epsilon = 0.001
learning_rate = 0.005
number_iterations = 50
number_training = 60000
number_test = 10000
# train from scratch
NN = Neural_Networks()
NN.Add_Layer("MNIST", 1, 28, 28)
NN.Add_Layer("BN,ReLU", 24, 24, 24) # batch normalization, ReLU activation
NN.Add_Layer("", 24, 12, 12)
NN.Add_Layer("BN,ReLU", 24, 8, 8) # batch normalization, ReLU activation
NN.Add_Layer("BN,ReLU", 48, 8, 8) # batch normalization, ReLU activation
NN.Add_Layer("", 48, 4, 4)
NN.Add_Layer("BN,ReLU", 192) # batch normalization, ReLU activation
NN.Add_Layer("CE,softmax", 10) # cross-entropy loss, softmax activation
NN.Connect(1, 0, "W") # 5x5 convolution
NN.Connect(2, 1, "P,max") # 2x2 max pooling
NN.Connect(3, 2, "W,DS") # 5x5 depthwise separable convolution
NN.Connect(4, 3, "W") # 1x1 convolution
NN.Connect(5, 4, "P,max") # 2x2 max pooling
NN.Connect(6, 5, "W") # fully connected
NN.Connect(7, 6, "W") # fully connected
NN.Initialize(0, 0.01)
# or load pretrained model
# NN = Neural_Networks("MNIST_CNN.txt")
NN.Set_Number_Threads(int(input("The number of threads : ")))
image = np.zeros((number_training + number_test, 784), dtype='f')
target_output = np.zeros((number_training + number_test, 10), dtype='f')
path = input("path where MNIST handwritten digits dataset is : ")
Read_MNIST(path + "train-images.idx3-ubyte", path + "train-labels.idx1-ubyte", path + "t10k-images.idx3-ubyte", path + "t10k-labels.idx1-ubyte", number_training, number_test, image, target_output)
start = time.time()
for f in range(number_iterations):
score = [0, 0]
loss = NN.Train(batch_size, number_training, image, target_output, learning_rate, epsilon)
h = 0
for i in range(number_training + number_test):
h = h + 1
if h == batch_size or i == number_training + number_test - 1:
_input = np.array(image[i - h + 1])
output = np.zeros((h, 10), dtype='f')
for g in range(1, h):
_input = np.append(_input, image[i - h + g + 1])
NN.Test(h, _input, output)
for g in range(h):
score[0 if i - h + g + 1 < number_training else 1] += int(target_output[i - h + g + 1][np.argmax(output[g])])
h = 0
print('.', end='')
NN.Save("NN.txt")
print('score: {} / {}, {} / {}, loss = {:.6f}, step {}, {:.2f} sec'.format(score[0], number_training, score[1], number_test, loss, f + 1, time.time() - start))
learning_rate *= decay_rate
|
Python
| 0.000004
|
@@ -1627,16 +1627,21 @@
ch_size
+
= 60%0D%0Ade
@@ -1645,24 +1645,29 @@
%0Adecay_rate
+
= 0.977%0D%0Aeps
@@ -1671,16 +1671,24 @@
epsilon
+
= 0.001%0D
@@ -1702,16 +1702,18 @@
ng_rate
+
= 0.005%0D
@@ -1762,16 +1762,16 @@
60000%0D%0A
-
number_t
@@ -1774,16 +1774,20 @@
er_test
+
= 10000%0D
|
104e05f326b7138e524296c049a1860c8c8a8cea
|
document col_to_number
|
MPropulator/helpers.py
|
MPropulator/helpers.py
|
import string
def column_range(start, stop, skip_columns=None):
"""0-indexed function that returns a list of Excel column names, except
for skip_columns
:param start: column index at which you begin iterating
:param stop: column index at which you want to stop iterating
:param skip_columns: column NAMES you'd like to skip
:return: list of Excel column names
"""
if skip_columns is None:
skip_columns = []
if start < 0:
raise ValueError("Start must be >= 0")
if stop < 0:
raise ValueError("Stop must be >= 0")
return [column_name(i + 1) for i in range(start, stop) \
if column_name(i + 1) not in skip_columns]
def column_name(col):
""" 1-indexed function that, given a column number, returns
the Excel column name.
:rtype : string
:param col: the column you want to return
:return: name of the col-th Excel column
"""
assert isinstance(col, int), 'Column must be int'
assert col >= 1, 'Column must be >= 1'
excel_col = str()
div = col
while div:
(div, mod) = divmod(div - 1, 26)
excel_col = chr(mod + ord('A')) + excel_col
return excel_col
def col_to_number(col):
num = 0
for c in col:
if c in string.ascii_letters:
num = num * 26 + (ord(c.upper()) - ord('A')) + 1
else:
raise ValueError("Input had characters other than ASCII letters")
return num
def cell_name(row, col):
""" 0-indexed function that, given a row and column number,
returns the Excel cell name.
:param row: row index
:param col: column index
:return: string
"""
assert isinstance(row, int), 'Row must be int'
assert row >= 0, 'Row index must be >= 0'
assert col >= 0, 'Column index must be >= 0'
return column_name(col + 1) + str(row + 1)
|
Python
| 0.000301
|
@@ -8,16 +8,17 @@
string%0A%0A
+%0A
def colu
@@ -635,10 +635,8 @@
top)
- %5C
%0A
@@ -1187,20 +1187,17 @@
cel_col%0A
-
+%0A
%0Adef col
@@ -1209,24 +1209,295 @@
umber(col):%0A
+ %22%22%220-indexed function that, given a column name, returns%0A the number of that column in excel.%0A%0A col: string consisting of Excel column name%0A return: int that corresponds to excel column name%0A%0A col_to_number(%22A%22) =%3E 0%0A col_to_number(%22AB%22) =%3E 28%0A %22%22%22%0A
num = 0%0A
@@ -1580,17 +1580,16 @@
m * 26 +
-
(ord(c.
@@ -1705,16 +1705,16 @@
tters%22)%0A
-
retu
@@ -1721,16 +1721,17 @@
rn num%0A%0A
+%0A
def cell
|
660a3ccc0bbfedca23a7a317119c9402065c82b6
|
Fix the worksheet option.
|
logss.py
|
logss.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Log a row to a Google Spreadsheet."""
__author__ = 'Dominic Mitchell <dom@happygiraffe.net>'
import getpass
import optparse
import os
import sys
import gdata.spreadsheet.service
class Error(Exception):
pass
def Authenticate(client, username):
# TODO: OAuth. We must be able to do this without a password.
client.ClientLogin(username,
getpass.getpass('Password for %s: ' % username))
def ExtractKey(entry):
# This is what spreadsheetExample seems to do…
return entry.id.text.split('/')[-1]
def FindKeyOfSpreadsheet(client, name):
spreadsheets = client.GetSpreadsheetsFeed()
spreadsheet = [s for s in spreadsheets.entry if s.title.text == name]
if not spreadsheet:
raise Error('Can\'t find spreadsheet named %s', name)
if len(spreadsheet) > 1:
raise Error('More than one spreadsheet named %s', name)
return ExtractKey(spreadsheet[0])
def FindKeyOfWorksheet(client, name):
if name == 'default':
return name
worksheets = client.GetWorksheetsFeed()
worksheet = [w for w in worksheets.entry if w.title.text == name]
if not worksheet:
raise Error('Can\'t find worksheet named %s', name)
if len(worksheet) > 1:
raise Error('Many worksheets named %s', name)
return ExtractKey(worksheet[0])
def ColumnNamesHaveData(cols):
"""Are these just names, or do they have data (:)?"""
return len([c for c in cols if ':' in c]) > 0
def DefineFlags():
usage = u"""usage: %prog [options] [col1:va1 …]"""
desc = """
Log data into a Google Spreadsheet.
With no further arguments, a list of column names will be printed to stdout.
Otherwise, remaining arguments should be of the form `columnname:value'.
One row will be added for each invocation of this program.
If you just specify column names (without a value), then data will be read
from stdin in whitespace delimited form, and mapped to each column name
in order.
"""
parser = optparse.OptionParser(usage=usage, description=desc)
parser.add_option('--debug', dest='debug', action='store_true',
help='Enable debug output', default=False)
parser.add_option('--key', dest='key',
help='The key of the spreadsheet to update '
'(the value of the key= parameter in the URL)')
parser.add_option('--name', dest='name',
help='The name of the spreadsheet to update')
parser.add_option('--worksheet', dest='worksheet',
help='The name of the worksheet to update',
default='default')
parser.add_option('-u', '--username', dest='username',
help='Which username to log in as (default: %default)',
default='%s@gmail.com' % getpass.getuser())
return parser
def main():
parser = DefineFlags()
(opts, args) = parser.parse_args()
if (not opts.name and not opts.key) or (opts.name and opts.key):
parser.error('You must specify either --name or --key')
client = gdata.spreadsheet.service.SpreadsheetsService()
client.debug = opts.debug
client.source = os.path.basename(sys.argv[0])
Authenticate(client, opts.username)
key = opts.key or FindKeyOfSpreadsheet(client, opts.name)
wkey = FindKeyOfWorksheet(client, opts.worksheet)
if len(args) > 1:
cols = args
if ColumnNamesHaveData(cols):
# Data is mixed into column names.
data = dict(c.split(':', 1) for c in cols)
client.InsertRow(data, key, wksht_id=wkey)
else:
# Read from stdin, pipe data to spreadsheet.
for line in sys.stdin:
vals = line.rstrip().split()
data = dict(zip(cols, vals))
client.InsertRow(data, key, wksht_id=wkey)
else:
list_feed = client.GetListFeed(key)
for col in sorted(list_feed.entry[0].custom.keys()):
print col
return 0
if __name__ == '__main__':
sys.exit(main())
|
Python
| 0.000005
|
@@ -961,32 +961,37 @@
orksheet(client,
+ key,
name):%0A if nam
@@ -1058,24 +1058,27 @@
ksheetsFeed(
+key
)%0A workshee
@@ -3269,24 +3269,29 @@
heet(client,
+ key,
opts.worksh
@@ -3761,24 +3761,39 @@
ListFeed(key
+, wksht_id=wkey
)%0A for co
|
c6793f73ba7f86767975ecdc7abdb287265c4733
|
add taggit app
|
meinberlin/settings.py
|
meinberlin/settings.py
|
"""
Django settings for meinberlin project.
Generated by 'django-admin startproject' using Django 1.8.17.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'qid$h1o8&wh#p(j)lifis*5-rf@lbiy8%^3l4x%@b$z(tli@ab'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'wagtail.wagtailforms',
'wagtail.wagtailredirects',
'wagtail.wagtailembeds',
'wagtail.wagtailsites',
'wagtail.wagtailusers',
'wagtail.wagtailsnippets',
'wagtail.wagtaildocs',
'wagtail.wagtailimages',
'wagtail.wagtailsearch',
'wagtail.wagtailadmin',
'wagtail.wagtailcore',
'wagtail.contrib.wagtailstyleguide',
'rest_framework',
'allauth',
'allauth.account',
'apps.cms',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
'wagtail.wagtailcore.middleware.SiteMiddleware',
'wagtail.wagtailredirects.middleware.RedirectMiddleware',
)
ROOT_URLCONF = 'meinberlin.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'meinberlin.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
|
Python
| 0.000001
|
@@ -1418,24 +1418,60 @@
yleguide',%0A%0A
+ 'taggit', # wagtail dependency%0A
'rest_fr
|
50bc10585dddfbabda31b857b9bb4319110ccb8b
|
allow bool values in TEST_OUTPUT_VERBOSE for backwards-compatibility
|
src/xmlrunner/extra/djangotestrunner.py
|
src/xmlrunner/extra/djangotestrunner.py
|
# -*- coding: utf-8 -*-
"""Custom Django test runner that runs the tests using the
XMLTestRunner class.
This script shows how to use the XMLTestRunner in a Django project. To learn
how to configure a custom TestRunner in a Django project, please read the
Django docs website.
"""
from django.conf import settings
try:
# Only available in Django1.3+
# http://docs.djangoproject.com/en/dev/topics/testing/#writing-unit-tests
from django.utils import unittest
except ImportError:
import unittest #we just defeault to the basic unittest
from django.db.models import get_app, get_apps
from django.test.utils import setup_test_environment, teardown_test_environment
from django.test.simple import (
build_suite, build_test, DjangoTestSuiteRunner, reorder_suite
)
from django.test.testcases import TestCase
import xmlrunner
class XMLTestRunner(DjangoTestSuiteRunner):
def run_tests(self, test_labels, verbosity=1, interactive=True, extra_tests=[]):
"""
Run the unit tests for all the test labels in the provided list.
Labels must be of the form:
- app.TestClass.test_method
Run a single specific test method
- app.TestClass
Run all the test methods in a given class
- app
Search for doctests and unittests in the named application.
When looking for tests, the test runner will look in the models and
tests modules for the application.
A list of 'extra' tests may also be provided; these tests
will be added to the test suite.
Returns the number of tests that failed.
"""
setup_test_environment()
settings.DEBUG = False
verbosity = getattr(settings, 'TEST_OUTPUT_VERBOSE', 1)
descriptions = getattr(settings, 'TEST_OUTPUT_DESCRIPTIONS', False)
output = getattr(settings, 'TEST_OUTPUT_DIR', '.')
suite = unittest.TestSuite()
if test_labels:
for label in test_labels:
if '.' in label:
suite.addTest(build_test(label))
else:
app = get_app(label)
suite.addTest(build_suite(app))
else:
for app in get_apps():
suite.addTest(build_suite(app))
for test in extra_tests:
suite.addTest(test)
suite = reorder_suite(suite, (TestCase,))
old_config = self.setup_databases()
result = xmlrunner.XMLTestRunner(
verbosity=verbosity, descriptions=descriptions,
output=output).run(suite)
self.teardown_databases(old_config)
teardown_test_environment()
return len(result.failures) + len(result.errors)
|
Python
| 0.000176
|
@@ -1758,16 +1758,98 @@
SE', 1)%0A
+ if isinstance(verbosity, bool):%0A verbosity = (1, 2)%5Bverbosity%5D%0A
|
46cca5f2f07625054be7fcead7179fe79f0c41a8
|
Use existing helper function
|
src/zeit/content/volume/browser/form.py
|
src/zeit/content/volume/browser/form.py
|
from zeit.cms.i18n import MessageFactory as _
import gocept.form.grouped
import transaction
import zeit.cms.browser.form
import zeit.cms.interfaces
import zeit.cms.repository.folder
import zeit.cms.repository.interfaces
import zeit.cms.settings.interfaces
import zeit.content.image.interfaces
import zeit.content.volume.interfaces
import zeit.content.volume.volume
import zope.component
import zope.formlib.form
import zope.formlib.interfaces
import zope.interface
import zope.schema
class DuplicateVolumeWarning(Exception):
zope.interface.implements(zope.formlib.interfaces.IWidgetInputError)
def doc(self):
return _(u'A volume with the given name already exists.')
class Base(object):
form_fields = zope.formlib.form.FormFields(
zeit.content.volume.interfaces.IVolume).select(
'product', 'year', 'volume',
'date_digital_published', 'teaserText')
field_groups = (
gocept.form.grouped.Fields(
_('Volume'),
('product', 'year', 'volume',
'date_digital_published', 'teaserText'),
css_class='column-left'),
gocept.form.grouped.RemainingFields(
_('Texts'),
css_class='column-right'),
)
def __init__(self, context, request):
"""Dynamically add fields for `IImageGroup` references from XML config.
We want to define the available references via an XML source, thus we
must read them on the fly and generate the schema fields accordingly.
To store the chosen values, we set `interface` on the field, thus it is
adapted to `IVolumeCovers` which stores the information in the the XML
of `Volume`.
"""
super(Base, self).__init__(context, request)
source = zeit.content.volume.interfaces.VOLUME_COVER_SOURCE(
self.context)
for name in source:
field = zope.schema.Choice(
title=source.title(name), required=False,
source=zeit.content.image.interfaces.imageGroupSource)
field.__name__ = name
field.interface = zeit.content.volume.interfaces.IVolumeCovers
self.form_fields += zope.formlib.form.FormFields(field)
self.field_groups += (gocept.form.grouped.Fields(
_('Covers'), tuple(source), css_class='column-right'),)
class Add(Base, zeit.cms.browser.form.AddForm):
title = _('Add volume')
factory = zeit.content.volume.volume.Volume
checkout = False
def suggestName(self, object):
"""Define __name__ automatically."""
return str(object.volume).rjust(2, '0')
def setUpWidgets(self, *args, **kw):
super(Add, self).setUpWidgets(*args, **kw)
settings = zeit.cms.settings.interfaces.IGlobalSettings(self.context)
if not self.widgets['year'].hasInput():
self.widgets['year'].setRenderedValue(settings.default_year)
if not self.widgets['volume'].hasInput():
self.widgets['volume'].setRenderedValue(settings.default_volume)
def add(self, object):
path = self.volume_location(object)
# The last part of the path is the filename for the volume object.
volume_filename = path[-1]
container = self.create_location(path[:-1], object)
if self._check_duplicate_volume(container, volume_filename):
return
container[volume_filename] = object
self._created_object = container[volume_filename]
self._finished_add = True
def create_location(self, path, object):
repository = zope.component.getUtility(
zeit.cms.repository.interfaces.IRepository)
folder = repository
for elem in path:
if folder.get(elem) is None:
folder[elem] = zeit.cms.repository.folder.Folder()
folder = folder[elem]
return folder
def volume_location(self, object):
location = object.product.location.format(
year=object.year, name=str(object.volume).rjust(2, '0'))
location = location.replace(zeit.cms.interfaces.ID_NAMESPACE, '')
return [x for x in location.split('/') if x]
def _check_duplicate_volume(self, location, name):
if location.get(name) is not None:
transaction.doom()
self.errors = (DuplicateVolumeWarning(),)
self.status = _('There were errors')
self.form_reset = False
return True
return False
class Edit(Base, zeit.cms.browser.form.EditForm):
title = _('Edit volume')
class Display(Base, zeit.cms.browser.form.DisplayForm):
title = _('View volume')
|
Python
| 0.000001
|
@@ -3256,29 +3256,52 @@
r =
-self.create_location(
+zeit.cms.content.add.find_or_create_folder(*
path
@@ -3305,24 +3305,16 @@
ath%5B:-1%5D
-, object
)%0A
@@ -3497,16 +3497,16 @@
lename%5D%0A
+
@@ -3536,378 +3536,8 @@
ue%0A%0A
- def create_location(self, path, object):%0A repository = zope.component.getUtility(%0A zeit.cms.repository.interfaces.IRepository)%0A%0A folder = repository%0A for elem in path:%0A if folder.get(elem) is None:%0A folder%5Belem%5D = zeit.cms.repository.folder.Folder()%0A folder = folder%5Belem%5D%0A%0A return folder%0A%0A
|
192c8bb4b3614a92edb19f71a42dfb3f973d160f
|
Remove unused method - we allow users to override runner parameters and make them immutable / set a default value inside action parameters.
|
st2api/st2api/controllers/v1/actions.py
|
st2api/st2api/controllers/v1/actions.py
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from mongoengine import ValidationError
from pecan import abort
import six
# TODO: Encapsulate mongoengine errors in our persistence layer. Exceptions
# that bubble up to this layer should be core Python exceptions or
# StackStorm defined exceptions.
from st2api.controllers import resource
from st2api.controllers.v1.actionviews import ActionViewsController
from st2common import log as logging
from st2common.constants.pack import DEFAULT_PACK_NAME
from st2common.exceptions.apivalidation import ValueValidationException
from st2common.models.api.base import jsexpose
from st2common.persistence.action import Action
from st2common.models.api.action import ActionAPI
from st2common.validators.api.misc import validate_not_part_of_system_pack
import st2common.validators.api.action as action_validator
http_client = six.moves.http_client
LOG = logging.getLogger(__name__)
class ActionsController(resource.ContentPackResourceController):
"""
Implements the RESTful web endpoint that handles
the lifecycle of Actions in the system.
"""
views = ActionViewsController()
model = ActionAPI
access = Action
supported_filters = {
'name': 'name',
'pack': 'pack'
}
query_options = {
'sort': ['pack', 'name']
}
include_reference = True
@staticmethod
def _validate_action_parameters(action, runnertype_db):
# check if action parameters conflict with those from the supplied runner_type.
conflicts = [p for p in action.parameters.keys() if p in runnertype_db.runner_parameters]
if len(conflicts) > 0:
msg = 'Parameters %s conflict with those inherited from runner_type : %s' % \
(str(conflicts), action.runner_type)
LOG.error(msg)
abort(http_client.CONFLICT, msg)
@jsexpose(body_cls=ActionAPI, status_code=http_client.CREATED)
def post(self, action):
"""
Create a new action.
Handles requests:
POST /actions/
"""
if not hasattr(action, 'pack'):
setattr(action, 'pack', DEFAULT_PACK_NAME)
# Perform validation
validate_not_part_of_system_pack(action)
action_validator.validate_action(action)
# ActionsController._validate_action_parameters(action, runnertype_db)
action_model = ActionAPI.to_model(action)
LOG.debug('/actions/ POST verified ActionAPI object=%s', action)
action_db = Action.add_or_update(action_model)
LOG.debug('/actions/ POST saved ActionDB object=%s', action_db)
extra = {'action_db': action_db}
LOG.audit('Action created. Action.id=%s' % (action_db.id), extra=extra)
action_api = ActionAPI.from_model(action_db)
return action_api
@jsexpose(arg_types=[str], body_cls=ActionAPI)
def put(self, action_ref_or_id, action):
action_db = self._get_by_ref_or_id(ref_or_id=action_ref_or_id)
action_id = action_db.id
if not getattr(action, 'pack', None):
action.pack = action_db.pack
# Perform validation
validate_not_part_of_system_pack(action)
action_validator.validate_action(action)
try:
action_db = ActionAPI.to_model(action)
action_db.id = action_id
action_db = Action.add_or_update(action_db)
except (ValidationError, ValueError) as e:
LOG.exception('Unable to update action data=%s', action)
abort(http_client.BAD_REQUEST, str(e))
return
action_api = ActionAPI.from_model(action_db)
LOG.debug('PUT /actions/ client_result=%s', action_api)
return action_api
@jsexpose(arg_types=[str], status_code=http_client.NO_CONTENT)
def delete(self, action_ref_or_id):
"""
Delete an action.
Handles requests:
POST /actions/1?_method=delete
DELETE /actions/1
DELETE /actions/mypack.myaction
"""
action_db = self._get_by_ref_or_id(ref_or_id=action_ref_or_id)
action_id = action_db.id
try:
validate_not_part_of_system_pack(action_db)
except ValueValidationException as e:
abort(http_client.BAD_REQUEST, str(e))
LOG.debug('DELETE /actions/ lookup with ref_or_id=%s found object: %s',
action_ref_or_id, action_db)
try:
Action.delete(action_db)
except Exception as e:
LOG.error('Database delete encountered exception during delete of id="%s". '
'Exception was %s', action_id, e)
abort(http_client.INTERNAL_SERVER_ERROR, str(e))
return
extra = {'action_db': action_db}
LOG.audit('Action deleted. Action.id=%s' % (action_db.id), extra=extra)
return None
|
Python
| 0.000002
|
@@ -2105,521 +2105,8 @@
ue%0A%0A
- @staticmethod%0A def _validate_action_parameters(action, runnertype_db):%0A # check if action parameters conflict with those from the supplied runner_type.%0A conflicts = %5Bp for p in action.parameters.keys() if p in runnertype_db.runner_parameters%5D%0A if len(conflicts) %3E 0:%0A msg = 'Parameters %25s conflict with those inherited from runner_type : %25s' %25 %5C%0A (str(conflicts), action.runner_type)%0A LOG.error(msg)%0A abort(http_client.CONFLICT, msg)%0A%0A
@@ -2543,87 +2543,8 @@
n)%0A%0A
- # ActionsController._validate_action_parameters(action, runnertype_db)%0A
|
1f044c497b5a25248827b5a50e3d898ae8c7f6af
|
Update local_settings.py
|
src/odontology/odontology/local_settings.py
|
src/odontology/odontology/local_settings.py
|
"""
Django settings for odontology project.
Generated by 'django-admin startproject' using Django 1.9.2.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'h0_%$7qsz@i-^7_&tcl0_xayt1ce46&20d2#xw=jhvn+gfpm45'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#django-allauth
'django.contrib.sites',
'allauth',
'allauth.account',
'allauth.socialaccount',
'allauth.socialaccount.providers.google',
#pagination
'bootstrap_pagination',
#apps
'core',
'person',
'register',
]
SITE_ID = 1
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'odontology.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
)
WSGI_APPLICATION = 'odontology.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'odontology',
'USER': 'nanomolina',
'PASSWORD': 'nano1234',
'HOST': 'localhost',
'PORT': '',
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'es-ar'
TIME_ZONE = 'America/Argentina/Cordoba'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_ROOT = os.path.join(BASE_DIR, 'static_F')
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
STATICFILES_DIRS = [
"static",
]
LOGIN_URL = '/accounts/login/'
LOGIN_REDIRECT_URL = '/home/'
SOCIALACCOUNT_PROVIDERS = \
{ 'google':
{ 'SCOPE': ['profile', 'email'],
'AUTH_PARAMS': { 'access_type': 'online'} }}
EMAIL_USE_TLS = True
EMAIL_HOST = 'smtp.gmail.com'
EMAIL_PORT = 587
EMAIL_HOST_USER = DEFAULT_FROM_EMAIL = ''
EMAIL_HOST_PASSWORD = ''
|
Python
| 0.000001
|
@@ -681,58 +681,8 @@
= '
-h0_%25$7qsz@i-%5E7_&tcl0_xayt1ce46&20d2#xw=jhvn+gfpm45
'%0A%0A#
@@ -2865,26 +2865,16 @@
NAME': '
-odontology
',%0A
@@ -2889,18 +2889,8 @@
': '
-nanomolina
',%0A
@@ -2913,16 +2913,8 @@
': '
-nano1234
',%0A
|
48a243d48c9eb2a1165ff4cadf26313c6a3659c1
|
Add back changes which were accidentaly removed in ac123a028f296d49e09dda35efb3bb458fa0d161.
|
st2common/st2common/cmd/install_pack.py
|
st2common/st2common/cmd/install_pack.py
|
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from oslo_config import cfg
from st2common import config
from st2common import log as logging
from st2common.config import do_register_cli_opts
from st2common.script_setup import setup as common_setup
from st2common.util.pack_management import download_pack
from st2common.util.pack_management import get_and_set_proxy_config
from st2common.util.virtualenvs import setup_pack_virtualenv
__all__ = [
'main'
]
LOG = logging.getLogger(__name__)
def _register_cli_opts():
cli_opts = [
cfg.MultiStrOpt('pack', default=None, required=True, positional=True,
help='Name of the pack to install.'),
cfg.BoolOpt('verify-ssl', default=True,
help=('Verify SSL certificate of the Git repo from which the pack is '
'downloaded.')),
cfg.BoolOpt('force', default=False,
help='True to force pack installation and ignore install '
'lock file if it exists.'),
]
do_register_cli_opts(cli_opts)
def main(argv):
_register_cli_opts()
# Parse CLI args, set up logging
common_setup(config=config, setup_db=False, register_mq_exchanges=False,
register_internal_trigger_types=False)
packs = cfg.CONF.pack
verify_ssl = cfg.CONF.verify_ssl
force = cfg.CONF.force
proxy_config = get_and_set_proxy_config()
for pack in packs:
# 1. Download the pack
LOG.info('Installing pack "%s"' % (pack))
result = download_pack(pack=pack, verify_ssl=verify_ssl, force=force,
proxy_config=proxy_config, force_permissions=True)
success = result[2][0]
if success:
LOG.info('Successfully installed pack "%s"' % (pack))
else:
error = result[2][1]
LOG.error('Failed to install pack "%s": %s' % (pack, error))
sys.exit(2)
# 2. Setup pack virtual environment
LOG.info('Setting up virtualenv for pack "%s"' % (pack))
setup_pack_virtualenv(pack_name=pack, update=False, logger=LOG,
proxy_config=proxy_config, use_python3=False,
no_download=True)
LOG.info('Successfully set up virtualenv for pack "%s"' % (pack))
return 0
|
Python
| 0
|
@@ -2430,24 +2430,100 @@
ions=True)%0A%0A
+ # Raw pack name excluding the version%0A pack_name = result%5B1%5D%0A
succ
@@ -2617,32 +2617,37 @@
ck %22%25s%22' %25 (pack
+_name
))%0A else:
@@ -2743,16 +2743,21 @@
%25 (pack
+_name
, error)
@@ -2881,32 +2881,37 @@
ck %22%25s%22' %25 (pack
+_name
))%0A setup
@@ -2941,16 +2941,21 @@
ame=pack
+_name
, update
@@ -3169,16 +3169,21 @@
%25 (pack
+_name
))%0A%0A
|
d9810b8bc00b08275af0df0b213915151f553c3a
|
Fix comments in `ref_doc_generation.py`.
|
doc/ref_doc_generation.py
|
doc/ref_doc_generation.py
|
import logging
import os
from jinja2 import Environment, FileSystemLoader
# Input source directory and input list of directories.
CURRENT_DIR = os.path.dirname(os.path.realpath(__file__))
DO_PROJECT_NAME = 'DO-CV'
DO_SOURCE_DIR = os.path.join(CURRENT_DIR, '../src/DO')
DO_LIBRARIES = sorted(os.walk(DO_SOURCE_DIR).next()[1])
# Input directory containing Jinja2-based templates.
REF_DOC_TEMPLATE_DIR = 'ref_doc_templates'
# Output directory in which we put the reference documentation.
OUTPUT_REF_DOC_DIR = 'source/reference'
logger = logging.getLogger(__name__)
env = Environment(loader=FileSystemLoader(REF_DOC_TEMPLATE_DIR))
def list_source_files(library):
""" Establish the list of header files that constitute the target library.
Parameters
----------
library: str
The library name.
"""
# Get the absolute path of the module.
library_dir_path = os.path.join(DO_SOURCE_DIR, library)
source_files = []
for dir, sub_dirs, files in os.walk(library_dir_path):
logger.info('Exploring directory: {}'.format(dir))
# Get the relative path of the directory.
dir_relpath = os.path.relpath(dir, DO_SOURCE_DIR)
if dir_relpath == '.':
dir_relpath = ''
# Get the list of header files
for file in files:
if file.endswith('.hpp'):
file_relpath = os.path.join(dir_relpath, file)
source_files.append(file_relpath)
logger.info('Appended file: {}'.format(file_relpath))
return source_files
def list_projects_source():
""" Populate the list of projects source for breathe.
`breathe_projects_source` should be of the following form:
breathe_projects_source = {
'DO-CV': (
DO_SOURCE_DIR,
['Core.hpp', 'Core/Timer.hpp', 'Core/Color.hpp', ...
'Graphics.hpp', ...]
)
}
"""
header_files = []
for library in DO_LIBRARIES:
master_header_file = '{}.hpp'.format(library)
modules = list_source_files(library)
header_files.append(master_header_file)
header_files.extend(modules)
breathe_projects_source = {
DO_PROJECT_NAME: (DO_SOURCE_DIR, header_files)
}
return breathe_projects_source
def generate_section(title):
markup = '=' * len(title)
return '\n'.join([title, markup])
def generate_module_doc(library, module):
""" Generate the reference documentation of the module.
library: str
The name of the library.
module: str
The name of the module.
"""
module_dir = os.path.dirname(module)
# Create the directory if necessary.
output_dir = os.path.join(OUTPUT_REF_DOC_DIR, module_dir)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Generate the reference documentation.
template = env.get_template('module.rst')
title, _ = os.path.splitext(os.path.basename(module))
context_data = {
'section': generate_section(title),
'module': os.path.basename(module),
'library': DO_PROJECT_NAME
}
rendered_template = template.render(**context_data)
# Save the rendered documentation to file.
output_file_path = '{}.rst'.format(os.path.join(output_dir, title))
with open(output_file_path, 'w') as output_file:
output_file.write(rendered_template)
def generate_ref_doc(library, module_list):
""" This generates automatically rst files for the reference documentation
of the target library.
Parameters
----------
library: str
The library name.
module_list: list(str)
The list of modules that constitutes the library.
"""
# Render the reference documentation.
template = env.get_template('library.rst')
# The list of documentation files for each module.
doc_files = [os.path.splitext(module)[0] for module in module_list]
context_data = {
'section': generate_section(library),
'library': library,
'modules': doc_files
}
rendered_template = template.render(**context_data)
# Save the rendered documentation to file.
if not os.path.exists(OUTPUT_REF_DOC_DIR):
os.makedirs(OUTPUT_REF_DOC_DIR)
output_file_path = '{}.rst'.format(os.path.join(OUTPUT_REF_DOC_DIR,
library))
with open(output_file_path, 'w') as output_file:
output_file.write(rendered_template)
def generate_ref_doc_toc():
""" Generate the table of contents of the reference documentation.
"""
template = env.get_template('ref_doc_toc.rst')
context_data = {
'libraries': DO_LIBRARIES
}
rendered_template = template.render(**context_data)
with open(os.path.join('source', 'ref_doc_toc.rst'), 'w') as output_file:
output_file.write(rendered_template)
def generate_all_ref_doc():
""" Convenience function to generate all the reference documentation.
"""
# Generate the documentation index.
generate_ref_doc_toc()
for library in DO_LIBRARIES:
# Remove the master header file from the list of modules.
module_list = list_source_files(library)
# Generate documentation index of the library.
generate_ref_doc(library, module_list)
# Generate documentation file for each module of the library.
for module in module_list:
generate_module_doc(library, module)
if __name__ == '__main__':
generate_all_ref_doc()
sources = list_projects_source()
|
Python
| 0
|
@@ -855,30 +855,41 @@
path of the
-module
+library directory
.%0A librar
@@ -1283,24 +1283,25 @@
header files
+.
%0A for
|
aafa325272649f03a3cd02cb82f2362704142b98
|
Improve exception proceduer
|
buzz2weibo.py
|
buzz2weibo.py
|
#!/usr/bin/python
# vim: set fileencoding=utf-8 :
# buzz2weibo
# Copyright 2011 Sun Zhigang
# See LICENSE for details.
from config import *
from urllib2 import urlopen, URLError
from json import load
from activity import *
from weibopy.auth import OAuthHandler
from weibopy.api import API
import os, errno
WEIBO_APP_KEY = '3127127763'
WEIBO_APP_SECRET = '21cc35f55fc8fe73b73162964c0bb415'
# 运行一次最多同步几条。缺省3。连续同步太多会被休息的
WEIBO_MAX_SYNC_COUNT = 3
def post2weibo(api, act):
message = act.content + act.link
if act.geo != '':
geo = act.geo.split(' ')
else:
geo = [None, None]
if act.image != '':
# 下载图像文件
try:
u = urlopen(act.image);
data = u.read()
u.close()
except URLError:
# 如果下载不下来,表示……,就别试了,当普通消息发吧
raise
status = api.update_status(status=message, lat=geo[0], long=geo[1])
return
filename = IMAGES_PATH + '/' + act.image_filename
f = open(filename, 'w')
f.write(data)
f.close()
status = api.upload(filename, status=message, lat=geo[0], long=geo[1])
else:
status = api.update_status(status=message, lat=geo[0], long=geo[1])
# 建图片目录
try:
os.makedirs(IMAGES_PATH)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST:
pass
else:
raise
if USE_HTTPS:
prefix = 'https://'
else:
prefix = 'http://'
buzz_url=prefix + 'www.googleapis.com/buzz/v1/activities/' + BUZZ_USERID + '/@public?alt=json'
# 读buzz
fp = urlopen(buzz_url)
#fp = open('buzz.json')
buzz = load(fp)
fp.close()
# 微博认证
auth = OAuthHandler(WEIBO_APP_KEY, WEIBO_APP_SECRET)
auth.setToken(WEIBO_TOKEN_KEY, WEIBO_TOKEN_SECRET)
api = API(auth)
# 读已经同步过的activity id
synced_ids = set()
try:
fp = open(HISTORY_FILE, 'r')
for line in fp:
synced_ids.add(line.strip())
fp.close()
except IOError as (errno, strerror):
# 如果文件不存在,就继续;否则,触发异常
if errno != 2:
raise
# 开始同步
count = 0
for item in buzz['data']['items']:
# 解析buzz
try:
# 如果来源名是“Source Name”,就用SourceNameActivity处理
act = eval(item['source']['title'].replace(' ', '') + 'Activity(item)')
except NameError:
# SourceNameActivity没有,就跳到这里,用WildcardActivity做缺省处理
act = WildcardActivity(item);
# 同步未同步过的
if act.id not in synced_ids:
print '-----------------------'
print 'syncing ' + act.id
print item['source']['title']
if act.content != '':
print act.content
if act.link != '':
print act.link
if act.image != '':
print act.image
if act.image_filename != '':
print act.image_filename
if act.geo != '':
print act.geo
if not DEBUG:
post2weibo(api, act)
synced_ids.add(act.id)
# 将同步过的activity id写入历史文件
fp = open(HISTORY_FILE, 'w')
for id in synced_ids:
fp.write(id + '\n')
fp.close()
count = count + 1
if count >= WEIBO_MAX_SYNC_COUNT:
break
|
Python
| 0.000015
|
@@ -1283,30 +1283,12 @@
rror
- as exc: # Python %3E2.5
+, e:
%0A
@@ -1292,18 +1292,16 @@
if e
-xc
.errno =
@@ -1897,30 +1897,11 @@
rror
- as (errno, strerror)
+, e
:%0A
@@ -1931,16 +1931,18 @@
%0A if
+e.
errno !=
@@ -1942,17 +1942,28 @@
rrno !=
-2
+errno.ENOENT
:%0A
|
6ec10208ea799f417b612273485e081b5b07eb21
|
Fix bug where __init__ was accessed for objects that might not have it.
|
doc/sphinxext/numpydoc.py
|
doc/sphinxext/numpydoc.py
|
"""
========
numpydoc
========
Sphinx extension that handles docstrings in the Numpy standard format. [1]
It will:
- Convert Parameters etc. sections to field lists.
- Convert See Also section to a See also entry.
- Renumber references.
- Extract the signature from the docstring, if it can't be determined otherwise.
.. [1] http://projects.scipy.org/numpy/wiki/CodingStyleGuidelines#docstring-standard
"""
import os, re, pydoc
from docscrape_sphinx import get_doc_object, SphinxDocString
from sphinx.util.compat import Directive
import inspect
def mangle_docstrings(app, what, name, obj, options, lines,
reference_offset=[0]):
cfg = dict(use_plots=app.config.numpydoc_use_plots,
show_class_members=app.config.numpydoc_show_class_members)
if what == 'module':
# Strip top title
title_re = re.compile(ur'^\s*[#*=]{4,}\n[a-z0-9 -]+\n[#*=]{4,}\s*',
re.I|re.S)
lines[:] = title_re.sub(u'', u"\n".join(lines)).split(u"\n")
else:
doc = get_doc_object(obj, what, u"\n".join(lines), config=cfg)
lines[:] = unicode(doc).split(u"\n")
if app.config.numpydoc_edit_link and hasattr(obj, '__name__') and \
obj.__name__:
if hasattr(obj, '__module__'):
v = dict(full_name=u"%s.%s" % (obj.__module__, obj.__name__))
else:
v = dict(full_name=obj.__name__)
lines += [u'', u'.. htmlonly::', '']
lines += [u' %s' % x for x in
(app.config.numpydoc_edit_link % v).split("\n")]
# replace reference numbers so that there are no duplicates
references = []
for line in lines:
line = line.strip()
m = re.match(ur'^.. \[([a-z0-9_.-])\]', line, re.I)
if m:
references.append(m.group(1))
# start renaming from the longest string, to avoid overwriting parts
references.sort(key=lambda x: -len(x))
if references:
for i, line in enumerate(lines):
for r in references:
if re.match(ur'^\d+$', r):
new_r = u"R%d" % (reference_offset[0] + int(r))
else:
new_r = u"%s%d" % (r, reference_offset[0])
lines[i] = lines[i].replace(u'[%s]_' % r,
u'[%s]_' % new_r)
lines[i] = lines[i].replace(u'.. [%s]' % r,
u'.. [%s]' % new_r)
reference_offset[0] += len(references)
def mangle_signature(app, what, name, obj, options, sig, retann):
# Do not try to inspect classes that don't define `__init__`
if (inspect.isclass(obj) and
'initializes x; see ' in pydoc.getdoc(obj.__init__)):
return '', ''
if not (callable(obj) or hasattr(obj, '__argspec_is_invalid_')): return
if not hasattr(obj, '__doc__'): return
doc = SphinxDocString(pydoc.getdoc(obj))
if doc['Signature']:
sig = re.sub(u"^[^(]*", u"", doc['Signature'])
return sig, u''
def initialize(app):
try:
app.connect('autodoc-process-signature', mangle_signature)
except:
monkeypatch_sphinx_ext_autodoc()
def setup(app, get_doc_object_=get_doc_object):
global get_doc_object
get_doc_object = get_doc_object_
app.connect('autodoc-process-docstring', mangle_docstrings)
app.connect('builder-inited', initialize)
app.add_config_value('numpydoc_edit_link', None, False)
app.add_config_value('numpydoc_use_plots', None, False)
app.add_config_value('numpydoc_show_class_members', True, True)
# Extra mangling directives
name_type = {
'cfunction': 'function',
'cmember': 'attribute',
'cmacro': 'function',
'ctype': 'class',
'cvar': 'object',
'class': 'class',
'function': 'function',
'attribute': 'attribute',
'method': 'function',
'staticmethod': 'function',
'classmethod': 'function',
}
for name, objtype in name_type.items():
app.add_directive('np-' + name, wrap_mangling_directive(name, objtype))
#------------------------------------------------------------------------------
# Input-mangling directives
#------------------------------------------------------------------------------
from docutils.statemachine import ViewList
def get_directive(name):
from docutils.parsers.rst import directives
try:
return directives.directive(name, None, None)[0]
except AttributeError:
pass
try:
# docutils 0.4
return directives._directives[name]
except (AttributeError, KeyError):
raise RuntimeError("No directive named '%s' found" % name)
def wrap_mangling_directive(base_directive_name, objtype):
base_directive = get_directive(base_directive_name)
if inspect.isfunction(base_directive):
base_func = base_directive
class base_directive(Directive):
required_arguments = base_func.arguments[0]
optional_arguments = base_func.arguments[1]
final_argument_whitespace = base_func.arguments[2]
option_spec = base_func.options
has_content = base_func.content
def run(self):
return base_func(self.name, self.arguments, self.options,
self.content, self.lineno,
self.content_offset, self.block_text,
self.state, self.state_machine)
class directive(base_directive):
def run(self):
env = self.state.document.settings.env
name = None
if self.arguments:
m = re.match(r'^(.*\s+)?(.*?)(\(.*)?', self.arguments[0])
name = m.group(2).strip()
if not name:
name = self.arguments[0]
lines = list(self.content)
mangle_docstrings(env.app, objtype, name, None, None, lines)
self.content = ViewList(lines, self.content.parent)
return base_directive.run(self)
return directive
#------------------------------------------------------------------------------
# Monkeypatch sphinx.ext.autodoc to accept argspecless autodocs (Sphinx < 0.5)
#------------------------------------------------------------------------------
def monkeypatch_sphinx_ext_autodoc():
global _original_format_signature
import sphinx.ext.autodoc
if sphinx.ext.autodoc.format_signature is our_format_signature:
return
print "[numpydoc] Monkeypatching sphinx.ext.autodoc ..."
_original_format_signature = sphinx.ext.autodoc.format_signature
sphinx.ext.autodoc.format_signature = our_format_signature
def our_format_signature(what, obj):
r = mangle_signature(None, what, None, obj, None, None, None)
if r is not None:
return r[0]
else:
return _original_format_signature(what, obj)
|
Python
| 0
|
@@ -2679,16 +2679,57 @@
bj) and%0A
+ (not hasattr(obj, '__init__') or%0A
@@ -2780,16 +2780,17 @@
init__))
+)
:%0A
|
c00a12fb593e329cc1fc2d3710cd9e89d0abfa16
|
set production api url
|
c4r/config.py
|
c4r/config.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Server parameters
baseApiUrl = 'http://stage.cloud4rpi.io:3000/api'
|
Python
| 0.000001
|
@@ -82,17 +82,12 @@
http
+s
://
-stage.
clou
|
6bed5a89f92e1ef7538058f755fecbdde2476493
|
Add debug output.
|
newsdedup.py
|
newsdedup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""News dedup for Tiny Tiny RSS."""
#
# Copyright (C) 2015 Peter Reuterås
import ConfigParser
import argparse
import logging
import sys
import time
from collections import deque
from fuzzywuzzy import fuzz
from time import gmtime, strftime
from ttrss.client import TTRClient
def read_configuration(config_file):
"""Read configuration file."""
config = ConfigParser.RawConfigParser()
config.read(config_file)
if config.sections() == []:
print "Can't find configuration file."
sys.exit(1)
return config
def init_ttrss(config):
"""Init Tiny tiny RSS API."""
hostname = config.get('ttrss', 'hostname')
username = config.get('ttrss', 'username')
password = config.get('ttrss', 'password')
return TTRClient(hostname, username, password, auto_login=True)
def init_title_queue(config):
"""Init deque queue to store handled titles."""
maxcount = int(config.get('newsdedup', 'maxcount'))
return deque(maxlen=maxcount)
def init_ignore_list(config):
"""Read ignore list from config and store in array."""
ignorestring = config.get('newsdedup', 'ignore')
return ignorestring.split(',')
def learn_last_read(rss, queue, arguments, config):
"""Get maxcount of read RSS and add to queue."""
maxlearn = int(config.get('newsdedup', 'maxcount'))
feeds = rss.get_feeds()
headlines = feeds[3].headlines(view_mode='all_articles', limit=1)
start_id = headlines[0].id - maxlearn - rss.get_unread_count()
learned = 0
while learned < maxlearn:
limit = 200 if maxlearn > 200 else maxlearn
headlines = feeds[3].headlines(
view_mode='all_articles',
since_id=start_id + learned, limit=limit)
for article in headlines:
if not article.unread:
queue.append(article.title)
learned += 1
if arguments.verbose:
print_time_message(arguments, "Learned titles from " + str(learned) + " RSS articles.")
return queue
def compare_to_queue(queue, head, ratio, arguments):
"""Compare current title to all in queue."""
for item in queue:
if fuzz.token_sort_ratio(item, head.title) > ratio:
if arguments.verbose:
print_time_message(arguments, "### Old title: " + item)
print_time_message(arguments, "### New: " + head.feed_title + ": " + head.title)
print_time_message(arguments, "### Ratio:" + fuzz.token_sort_ratio(item, head.title))
return fuzz.token_sort_ratio(item, head.title)
return 0
def handle_known_news(rss, head):
"""Mark read and add stare. Might change in the future."""
rss.update_article(head.id, 1, 0)
rss.mark_read(head.id)
def print_time_message(arguments, message):
"""Print time and message."""
if arguments.daemon:
print message
else:
print strftime("%Y-%m-%d %H:%M:%S:", gmtime()), message
def monitor_rss(rss, queue, ignore_list, arguments, config):
"""Main function to check new rss posts."""
feeds = rss.get_feeds()
headlines = feeds[3].headlines(view_mode='all_articles', limit=1)
start_id = headlines[0].id - rss.get_unread_count()
ratio = int(config.get('newsdedup', 'ratio'))
sleeptime = int(config.get('newsdedup', 'sleep'))
headlines = []
while True:
feeds = rss.get_feeds(unread_only=True)
try:
headlines = feeds[1].headlines(since_id=start_id, view_mode='unread')
except: # pylint: disable=bare-except
print_time_message(arguments, "Exception when trying to get feeds.")
for head in headlines:
if head.id > start_id:
start_id = head.id
if arguments.verbose:
print_time_message(arguments, head.feed_title + ": " + head.title)
if (not head.is_updated) and (not head.feed_id in ignore_list):
if compare_to_queue(queue, head, ratio, arguments) > 0:
handle_known_news(rss, head)
queue.append(head.title)
if arguments.debug:
print_time_message(arguments, "Sleeping.")
time.sleep(sleeptime)
def run(rss_api, title_queue, feed_ignore_list, args, configuration):
"""Main loop."""
while True:
try:
monitor_rss(rss_api, title_queue, feed_ignore_list, args, configuration)
except KeyboardInterrupt:
sys.exit(1)
except: # pylint: disable=bare-except
print_time_message(args, "Exception in monitor_rss.")
def main():
"""Main function to handle arguments."""
parser = argparse.ArgumentParser(
prog='newsdedup',
description='''This programs dedups RSS articles handled by
Tiny tiny RSS.''',
epilog='''Program made by Peter Reuterås, @reuteras on Twitter.
If you find a bug please let me know.''')
parser.add_argument('configFile', metavar='newsdedup.cfg',
default='newsdedup.cfg', nargs='?',
help='Specify configuration file.')
parser.add_argument('-d', '--debug', action="store_true",
help='Debug output (separate from verbose).')
parser.add_argument('-D', '--daemon', action="store_true",
help='Run as daemon.')
parser.add_argument('-q', '--quiet', action="store_true",
help='Quiet, i.e. catch SSL warnings.')
parser.add_argument('-v', '--verbose', action="store_true",
help='Verbose output.')
args = parser.parse_args()
if args.quiet:
logging.captureWarnings(True)
configuration = read_configuration(args.configFile)
rss_api = init_ttrss(configuration)
title_queue = init_title_queue(configuration)
feed_ignore_list = init_ignore_list(configuration)
learn_last_read(rss_api, title_queue, args, configuration)
run(rss_api, title_queue, feed_ignore_list, args, configuration)
if __name__ == '__main__':
main()
|
Python
| 0.000001
|
@@ -1896,16 +1896,151 @@
ed += 1%0A
+ if arguments.debug:%0A print_time_message(arguments, %22Debug: Learned titles from %22 + str(learned) + %22 RSS articles.%22)%0A
if a
|
920e75491f3aaa74980e11086cfebe911c2def4b
|
Remove yield from datasets tests
|
statsmodels/datasets/tests/test_data.py
|
statsmodels/datasets/tests/test_data.py
|
import numpy as np
import pandas as pd
import statsmodels.datasets as datasets
from statsmodels.datasets import co2
from statsmodels.datasets.utils import Dataset
def test_co2_python3():
# this failed in pd.to_datetime on Python 3 with pandas <= 0.12.0
dta = co2.load_pandas()
class TestDatasets(object):
@classmethod
def setup_class(cls):
exclude = ['check_internet', 'clear_data_home', 'get_data_home',
'get_rdataset', 'tests', 'utils', 'webuse']
cls.sets = []
for dataset_name in dir(datasets):
if not dataset_name.startswith('_') and dataset_name not in exclude:
cls.sets.append(dataset_name)
def check(self, dataset_name):
dataset = __import__('statsmodels.datasets.' + dataset_name, fromlist=[''])
data = dataset.load()
assert isinstance(data, Dataset)
assert isinstance(data.data, np.recarray)
df_data = dataset.load_pandas()
assert isinstance(data, Dataset)
assert isinstance(df_data.data, pd.DataFrame)
def test_all_datasets(self):
for dataset_name in self.sets:
yield (self.check, dataset_name)
|
Python
| 0
|
@@ -1,12 +1,30 @@
+import importlib%0A%0A
import numpy
@@ -54,53 +54,41 @@
pd%0A
-%0A
import
-statsmodels.datasets as datasets%0Afrom
+nose%0Aimport pytest%0A%0Aimport
sta
@@ -108,19 +108,8 @@
sets
- import co2
%0Afro
@@ -157,213 +157,8 @@
et%0A%0A
-%0Adef test_co2_python3():%0A # this failed in pd.to_datetime on Python 3 with pandas %3C= 0.12.0%0A dta = co2.load_pandas()%0A%0A%0Aclass TestDatasets(object):%0A%0A @classmethod%0A def setup_class(cls):%0A
excl
@@ -229,24 +229,16 @@
-
-
'get_rda
@@ -273,28 +273,20 @@
ebuse'%5D%0A
- cls.
+data
sets = %5B
@@ -287,24 +287,16 @@
ts = %5B%5D%0A
-
for data
@@ -311,16 +311,28 @@
in dir(
+statsmodels.
datasets
@@ -334,24 +334,16 @@
asets):%0A
-
if n
@@ -419,67 +419,180 @@
- cls.sets.append(dataset_name)%0A%0A def check(self,
+datasets.append(dataset_name)%0A%0A%0A# TODO: Remove nottest when nose support is dropped%0A@nose.tools.nottest%0A@pytest.mark.parametrize('dataset_name', datasets)%0Adef test_dataset(
data
@@ -602,28 +602,24 @@
_name):%0A
-
-
dataset = __
@@ -620,18 +620,31 @@
t =
-__
import
-__
+lib.import_module
('st
@@ -682,33 +682,14 @@
name
-, fromlist=%5B''%5D)%0A
+)%0A
-
data
@@ -706,20 +706,16 @@
.load()%0A
-
asse
@@ -735,36 +735,32 @@
(data, Dataset)%0A
-
assert isins
@@ -794,20 +794,16 @@
y)%0A%0A
-
-
df_data
@@ -826,20 +826,16 @@
andas()%0A
-
asse
@@ -840,32 +840,35 @@
sert isinstance(
+df_
data, Dataset)%0A
@@ -866,20 +866,16 @@
ataset)%0A
-
asse
@@ -917,20 +917,61 @@
Frame)%0A%0A
-
+%0A# TODO: Remove when nose support is dropped%0A
def test
@@ -988,23 +988,15 @@
ets(
-self
):%0A
-
-
for
@@ -1006,22 +1006,16 @@
aset
-_name in self.
+ in data
sets
@@ -1028,41 +1028,26 @@
-
- yield (self.check, dataset_name
+test_dataset(dataset
)%0A
|
97badc176f4a8ac30eb3932359e2e132e36170c4
|
Increase the number of workers
|
docker/gunicorn_config.py
|
docker/gunicorn_config.py
|
import multiprocessing
from os import getenv
bind = '127.0.0.1:8001'
workers = multiprocessing.cpu_count() * 2
timeout = 60
threads = multiprocessing.cpu_count() * 2
max_requests = 1000
max_requests_jitter = 5
# Read the DEBUG setting from env var
try:
if getenv('DOCKER_SAL_DEBUG').lower() == 'true':
errorlog = '/var/log/gunicorn/gunicorn-error.log'
accesslog = '/var/log/gunicorn/gunicorn-access.log'
loglevel = 'info'
except:
pass
|
Python
| 0.672158
|
@@ -102,17 +102,17 @@
unt() *
-2
+3
%0Atimeout
@@ -157,17 +157,17 @@
unt() *
-2
+3
%0Amax_req
@@ -178,10 +178,9 @@
s =
-10
+5
00%0Am
|
80357e07de707b0aa989a9910f6f2d8374d978a7
|
Add some comments about how we're cleaning callables
|
lib/ansible/template/safe_eval.py
|
lib/ansible/template/safe_eval.py
|
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
import sys
from ansible.compat.six import string_types
from ansible.compat.six.moves import builtins
from ansible import constants as C
from ansible.plugins import filter_loader, test_loader
def safe_eval(expr, locals={}, include_exceptions=False):
'''
This is intended for allowing things like:
with_items: a_list_variable
Where Jinja2 would return a string but we do not want to allow it to
call functions (outside of Jinja2, where the env is constrained).
Based on:
http://stackoverflow.com/questions/12523516/using-ast-and-whitelists-to-make-pythons-eval-safe
'''
# define certain JSON types
# eg. JSON booleans are unknown to python eval()
JSON_TYPES = {
'false': False,
'null': None,
'true': True,
}
# this is the whitelist of AST nodes we are going to
# allow in the evaluation. Any node type other than
# those listed here will raise an exception in our custom
# visitor class defined below.
SAFE_NODES = set(
(
ast.Add,
ast.BinOp,
ast.Call,
ast.Compare,
ast.Dict,
ast.Div,
ast.Expression,
ast.List,
ast.Load,
ast.Mult,
ast.Num,
ast.Name,
ast.Str,
ast.Sub,
ast.USub,
ast.Tuple,
ast.UnaryOp,
)
)
# AST node types were expanded after 2.6
if sys.version_info[:2] >= (2, 7):
SAFE_NODES.update(
set(
(ast.Set,)
)
)
# And in Python 3.4 too
if sys.version_info[:2] >= (3, 4):
SAFE_NODES.update(
set(
(ast.NameConstant,)
)
)
filter_list = []
for filter in filter_loader.all():
filter_list.extend(filter.filters().keys())
test_list = []
for test in test_loader.all():
test_list.extend(test.tests().keys())
CALL_WHITELIST = C.DEFAULT_CALLABLE_WHITELIST + filter_list + test_list
class CleansingNodeVisitor(ast.NodeVisitor):
def generic_visit(self, node, inside_call=False):
if type(node) not in SAFE_NODES:
raise Exception("invalid expression (%s)" % expr)
elif isinstance(node, ast.Call):
inside_call = True
elif isinstance(node, ast.Name) and inside_call:
if hasattr(builtins, node.id) and node.id not in CALL_WHITELIST:
raise Exception("invalid function: %s" % node.id)
# iterate over all child nodes
for child_node in ast.iter_child_nodes(node):
self.generic_visit(child_node, inside_call)
if not isinstance(expr, string_types):
# already templated to a datastructure, perhaps?
if include_exceptions:
return (expr, None)
return expr
cnv = CleansingNodeVisitor()
try:
parsed_tree = ast.parse(expr, mode='eval')
cnv.visit(parsed_tree)
compiled = compile(parsed_tree, expr, 'eval')
result = eval(compiled, JSON_TYPES, dict(locals))
if include_exceptions:
return (result, None)
else:
return result
except SyntaxError as e:
# special handling for syntax errors, we just return
# the expression string back as-is to support late evaluation
if include_exceptions:
return (expr, None)
return expr
except Exception as e:
if include_exceptions:
return (expr, e)
return expr
|
Python
| 0
|
@@ -3230,16 +3230,218 @@
e_call:%0A
+ # Disallow calls to builtin functions that we have not vetted%0A # as safe. Other functions are excluded by setting locals in%0A # the call to eval() later on%0A
@@ -4099,32 +4099,253 @@
, expr, 'eval')%0A
+ # Note: passing our own globals and locals here constrains what%0A # callables (and other identifiers) are recognized. this is in%0A # addition to the filtering of builtins done in CleansingNodeVisitor%0A
result =
|
15090b84e1c7359c49cb45aec4d9b4d492f855ac
|
Update smb test to include port parameter
|
tests/scoring_engine/engine/checks/test_smb.py
|
tests/scoring_engine/engine/checks/test_smb.py
|
from scoring_engine.engine.basic_check import CHECKS_BIN_PATH
from tests.scoring_engine.engine.checks.check_test import CheckTest
class TestSMBCheck(CheckTest):
check_name = 'SMBCheck'
required_properties = ['share', 'file', 'hash']
properties = {
'share': 'ScoringShare',
'file': 'flag.txt',
'hash': '123456789'
}
accounts = {
'pwnbus': 'pwnbuspass'
}
cmd = CHECKS_BIN_PATH + "/smb_check --host '127.0.0.1' --user 'pwnbus' --pass 'pwnbuspass' --share 'ScoringShare' --file 'flag.txt' --hash '123456789'"
|
Python
| 0
|
@@ -463,16 +463,27 @@
7.0.0.1'
+ --port 100
--user
|
0ce7fa3100ce3d1626519da3f5395622af0e7fdf
|
fix for issue 440
|
tests/window/WINDOW_SET_MOUSE_SYSTEM_CURSOR.py
|
tests/window/WINDOW_SET_MOUSE_SYSTEM_CURSOR.py
|
#!/usr/bin/env python
'''Test that mouse cursor can be set to a platform-dependent image.
Expected behaviour:
One window will be opened. Press the left and right arrow keys to cycle
through the system mouse cursors. The current cursor selected will
be printed to the terminal.
Note that not all cursors are unique on each platform; for example,
if a platform doesn't define a cursor for a given name, a suitable
replacement (e.g., a plain arrow) will be used instead.
Close the window or press ESC to end the test.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id: WINDOW_SET_MOUSE_VISIBLE.py 703 2007-02-28 14:18:00Z Alex.Holkner $'
import unittest
from pyglet import window
from pyglet.window import key
from pyglet.gl import *
class WINDOW_SET_MOUSE_PLATFORM_CURSOR(unittest.TestCase):
i = 0
def on_key_press(self, symbol, modifiers):
names = [
self.w.CURSOR_DEFAULT,
self.w.CURSOR_CROSSHAIR,
self.w.CURSOR_HAND,
self.w.CURSOR_HELP,
self.w.CURSOR_NO,
self.w.CURSOR_SIZE,
self.w.CURSOR_SIZE_UP,
self.w.CURSOR_SIZE_UP_RIGHT,
self.w.CURSOR_SIZE_RIGHT,
self.w.CURSOR_SIZE_DOWN_RIGHT,
self.w.CURSOR_SIZE_DOWN,
self.w.CURSOR_SIZE_DOWN_LEFT,
self.w.CURSOR_SIZE_LEFT,
self.w.CURSOR_SIZE_UP_LEFT,
self.w.CURSOR_SIZE_UP_DOWN,
self.w.CURSOR_SIZE_LEFT_RIGHT,
self.w.CURSOR_TEXT,
self.w.CURSOR_WAIT,
self.w.CURSOR_WAIT_ARROW,
]
if symbol == key.RIGHT:
self.i = (self.i + 1) % len(names)
elif symbol == key.LEFT:
self.i = (self.i - 1) % len(names)
cursor = self.w.get_system_mouse_cursor(names[self.i])
self.w.set_mouse_cursor(cursor)
print 'Set cursor to "%s"' % names[self.i]
return True
def test_set_visible(self):
self.width, self.height = 200, 200
self.w = w = window.Window(self.width, self.height)
w.push_handlers(self)
while not w.has_exit:
glClear(GL_COLOR_BUFFER_BIT)
w.dispatch_events()
w.flip()
w.close()
if __name__ == '__main__':
unittest.main()
|
Python
| 0
|
@@ -1610,16 +1610,79 @@
%5D%0A
+ if symbol == key.ESCAPE:%0A self.w.on_close()%0A
|
b3889bbdab80fb502c74b99b61cf36bae112ce2c
|
Add property decorator to getters
|
node/node.py
|
node/node.py
|
from configparser import ConfigParser
from driver import BTRFSDriver
class Node:
"""
# Dummy config example
[bk1-z3.presslabs.net]
ssd = True
"""
def __init__(self, context):
self._conf_path = context['node']['conf_path']
self._driver = BTRFSDriver(context['volume_path'])
self._name, self._labels = '', {}
config = ConfigParser()
config.read(self._conf_path)
try:
self._name = config.sections()[0]
for label, value in config[self._name].iteritems():
self._labels[label] = value
except IndexError:
pass
def get_subvolumes(self):
return self._driver.get_all()
def name(self):
return self._name
def labels(self):
return self._labels
|
Python
| 0
|
@@ -702,16 +702,30 @@
_all()%0A%0A
+ @property%0A
def
@@ -763,16 +763,30 @@
._name%0A%0A
+ @property%0A
def
|
33d81aa4077a656949822a8fe45ef3dd8ebbfe2c
|
add a missing import (#48006)
|
python/paddle/fluid/tests/unittests/xpu/process_group_bkcl.py
|
python/paddle/fluid/tests/unittests/xpu/process_group_bkcl.py
|
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import random
import numpy as np
import sys
import paddle
from paddle.fluid.framework import _test_eager_guard
from paddle.fluid.dygraph.parallel import ParallelEnv
def init_process_group(strategy=None):
nranks = ParallelEnv().nranks
rank = ParallelEnv().local_rank
is_master = True if rank == 0 else False
pg_group = dist.init_parallel_env()
return pg_group.process_group
class TestProcessGroupFp32(unittest.TestCase):
def setUp(self):
paddle.seed(2022)
random.seed(2022)
np.random.seed(2022)
self.config()
def config(self):
self.dtype = "float32"
self.shape = (2, 10, 5)
def test_create_process_group_bkcl(self):
with _test_eager_guard():
device_id = paddle.distributed.ParallelEnv().dev_id
paddle.set_device('xpu:%d' % device_id)
pg = init_process_group()
sys.stdout.write(
"rank {}: size {} name {}\n".format(
pg.rank(), pg.size(), pg.name()
)
)
sys.stdout.write(
"rank {}: test new group api ok\n".format(pg.rank())
)
# test allreduce sum
# rank 0
x = np.random.random(self.shape).astype(self.dtype)
tensor_x = paddle.to_tensor(x)
# rank 1
y = np.random.random(self.shape).astype(self.dtype)
tensor_y = paddle.to_tensor(y)
sum_result = tensor_x + tensor_y
if pg.rank() == 0:
task = dist.all_reduce(tensor_x)
assert np.array_equal(tensor_x, sum_result)
else:
task = dist.all_reduce(tensor_y)
assert np.array_equal(tensor_y, sum_result)
sys.stdout.write(
"rank {}: test allreduce sum api ok\n".format(pg.rank())
)
# TODO
# test allreduce max/min/prod
# test broadcast
# rank 0
x = np.random.random(self.shape).astype(self.dtype)
tensor_x = paddle.to_tensor(x)
# rank 1
y = np.random.random(self.shape).astype(self.dtype)
tensor_y = paddle.to_tensor(y)
broadcast_result = paddle.assign(tensor_x)
if pg.rank() == 0:
# XPU don't support event query by now, so just use sync op here
task = dist.broadcast(tensor_x, 0)
paddle.device.xpu.synchronize()
assert np.array_equal(broadcast_result, tensor_x)
else:
task = dist.broadcast(tensor_y, 0)
paddle.device.xpu.synchronize()
assert np.array_equal(broadcast_result, tensor_y)
sys.stdout.write(
"rank {}: test broadcast api ok\n".format(pg.rank())
)
# test barrier
# rank 0
if pg.rank() == 0:
pg.barrier(device_id)
# rank 1
else:
task = pg.barrier(device_id)
task.wait()
sys.stdout.write("rank {}: test barrier api ok\n".format(pg.rank()))
# test allgather
# rank 0
x = np.random.random(self.shape).astype(self.dtype)
y = np.random.random(self.shape).astype(self.dtype)
tensor_x = paddle.to_tensor(x)
tensor_y = paddle.to_tensor(y)
out_shape = list(self.shape)
out_shape[0] *= 2
out = np.random.random(out_shape).astype(self.dtype)
tensor_out = paddle.to_tensor(out)
if pg.rank() == 0:
task = pg.all_gather(tensor_x, tensor_out)
task.wait()
paddle.device.xpu.synchronize()
# rank 1
else:
tensor_out_list = [
paddle.empty_like(tensor_x),
paddle.empty_like(tensor_x),
]
task = dist.all_gather(tensor_out_list, tensor_y)
paddle.device.xpu.synchronize()
tensor_out = paddle.concat(tensor_out_list)
out_1 = paddle.slice(tensor_out, [0], [0], [out_shape[0] // 2])
out_2 = paddle.slice(
tensor_out, [0], [out_shape[0] // 2], [out_shape[0]]
)
assert np.array_equal(tensor_x, out_1)
assert np.array_equal(tensor_y, out_2)
sys.stdout.write(
"rank {}: test allgather api ok\n".format(pg.rank())
)
if pg.rank() == 0:
task = pg.all_gather(tensor_x, tensor_out)
task.wait()
paddle.device.xpu.synchronize()
# rank 1
else:
tensor_out_list = []
task = dist.all_gather(tensor_out_list, tensor_y)
paddle.device.xpu.synchronize()
tensor_out = paddle.concat(tensor_out_list)
out_1 = paddle.slice(tensor_out, [0], [0], [out_shape[0] // 2])
out_2 = paddle.slice(
tensor_out, [0], [out_shape[0] // 2], [out_shape[0]]
)
assert np.array_equal(tensor_x, out_1)
assert np.array_equal(tensor_y, out_2)
sys.stdout.write(
"rank {}: test allgather api2 ok\n".format(pg.rank())
)
class TestProcessGroupFp16(TestProcessGroupFp32):
def setUp(self):
paddle.seed(2022)
random.seed(2022)
np.random.seed(2022)
self.config()
def config(self):
self.dtype = "float16"
self.shape = (4, 20, 20)
if __name__ == "__main__":
unittest.main()
|
Python
| 0
|
@@ -679,16 +679,50 @@
paddle%0A
+import paddle.distributed as dist%0A
from pad
|
af7a2e59b76a5c404e393a6fc1aeca9517018185
|
Fix peacock crash when filename with unicode exists
|
python/peacock/ExodusViewer/plugins/ExodusFilterProxyModel.py
|
python/peacock/ExodusViewer/plugins/ExodusFilterProxyModel.py
|
#!/usr/bin/env python2
#* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
from PyQt5 import QtWidgets, QtCore
import sys
import os
import re
class ExodusFilterProxyModel(QtCore.QSortFilterProxyModel):
"""
A filename filter for Exodus *.efiles.
"""
def filterAcceptsRow(self, row, parent):
index0 = self.sourceModel().index(row, 0, parent)
filename = str(self.sourceModel().filePath(index0))
if os.path.isdir(filename):
return True
match = re.search(r'(.*?)\.e(-s[0-9]+)', filename)
if not match or filename.endswith('.e'):
return True
else:
return False
if __name__ == "__main__":
qapp = QtWidgets.QApplication(sys.argv)
fd = QtWidgets.QFileDialog()
fd.setFileMode(QtWidgets.QFileDialog.ExistingFiles)
fd.setWindowTitle('Select ExodusII File(s)')
fd.setDirectory('/Users/slauae/projects/gui/tests/chigger/input')
fd.setNameFilter('ExodusII Files (*.e)')
fd.setOption(QtWidgets.QFileDialog.DontUseNativeDialog)
proxy = ExodusFilterProxyModel(fd)
fd.setProxyModel(proxy)
fd.raise_()
fd.exec_()
|
Python
| 0.000002
|
@@ -638,12 +638,8 @@
e =
-str(
self
@@ -669,17 +669,16 @@
(index0)
-)
%0A%0A
|
814b344082fbce471509c54c683470467dd8f814
|
use env to find python binary
|
main.pyw
|
main.pyw
|
#!/usr/bin/python3
from tkinter import *
from models import *
import logging
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BCM)
appWindow = Tk()
appWindow.title("Virtual Controller")
# Set fullscreen [Not necessary when debugging]
# appWindow.geometry("{0}x{1}+0+0".format(appWindow.winfo_screenwidth(), appWindow.winfo_screenheight()))
appWindow.focus_set() # <-- move focus to this widget
# Binds <Escape> key to quit the program
appWindow.bind("<Escape>", lambda e: e.widget.destroy())
# Removes the title bar and menu bar
appWindow.overrideredirect(True)
# This holds the controller state at any moment
ctrlState = ControllerState()
# The virtual controller can set state flags via the UI
app = VirtualControllerDisplay(appWindow, ctrlState)
# Instantiate the console controller that will send out the state to the console when needed
consoleCtrl = ConsoleController(ctrlState)
# Now loads the GPIO Controller that will set state flags depending on the GPIO inputs
# It needs the app to flash the buttons
gpioCtrl = GPIOController(ctrlState, app)
# Run main loop
appWindow.mainloop()
# Cleanup GPIOs
GPIO.cleanup()
|
Python
| 0.000018
|
@@ -4,16 +4,20 @@
usr/bin/
+env
python3%0A
|
e6547ff12935b86ecf5fa35b78073b3001dd4de0
|
Use single quotes for strings everywhere.
|
mario.py
|
mario.py
|
#!/usr/bin/env python3
# Copyright (c) 2015 Damir Jelić.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import tempfile
import mimetypes
import subprocess
import urllib.request
from urllib.error import HTTPError, URLError
from urllib.parse import urlparse
def github_translation(url):
url = url.replace('github.com', 'raw.githubusercontent.com')
url = url.replace('blob/', '')
return url
VIDEO_STREAMER = 'livestreamer'
VIDEO_URLS = ('youtube.com', 'youtu.be', 'www.youtube.com')
STREAM_URLS = ('www.twitch.tv', 'twitch.tv')
TRANSLATION_RULES = {
'github.com' : github_translation,
}
USER_AGENT = 'Mozilla/5.0 (Windows NT 6.3; rv:36.0) Gecko/20100101 Firefox/36.0'
def launch_command(command, args):
if type(args) is str:
args = [args]
return subprocess.call([command] + args)
def launch_browser(url):
browser = os.getenv('BROWSER')
if not browser:
browser = 'rifle'
return launch_command(browser, url)
def is_terminal():
# Check if stdin, stdout and stderr are connected to a terminal.
return sys.stdout.isatty() and sys.stdin.isatty() and sys.stderr.isatty()
def launch_editor(url):
editor = os.getenv('EDITOR')
if not editor:
editor = 'rifle'
if editor in ('vi', 'vim', 'neovim', 'nano', 'joe') and not is_terminal():
term = os.getenv('TERMCMD')
if term and term.endswith('termite'):
file_name = download_file(url)
if file_name:
return launch_command('termite', ['-e', editor + ' ' + file_name])
else:
return -1
return download_launch_command('rifle', url, '-f t')
return download_launch_command('rifle', url)
def download_file(url):
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', USER_AGENT)]
tmp_dir = tempfile.gettempdir()
try:
with tempfile.NamedTemporaryFile(prefix="plumber-", dir=tmp_dir, delete=False) as f:
f.write(opener.open(url).read())
return f.name
except OSError:
return None
def download_launch_command(command, url, args=None):
file_name = download_file(url)
if not file_name:
return -1
if args:
if type(args) is str:
args = [args] + [file_name]
else:
args = args + [file_name]
else:
args = file_name
return launch_command(command, args)
def handle_mime(url, mime_type):
if mime_type.startswith('video/'):
return launch_command('rifle', url)
elif mime_type.startswith('image/'):
return download_launch_command('rifle', url)
elif mime_type == 'application/pdf':
return download_launch_command('rifle', url)
elif mime_type.startswith('text/') and mime_type != 'text/html':
return launch_editor(url)
else:
return launch_browser(url)
def lookup_content_type(url):
request = urllib.request.Request(url=url, method='HEAD')
try:
request = urllib.request.urlopen(request)
response = request.getheader('Content-Type')
except (HTTPError, URLError):
return None, None
if ';' in response:
content_type, encoding = response.split(';', maxsplit=1)
return content_type, encoding.strip()
return response, None
def find_mime_type(url):
path = url.path
url = url.geturl()
mime_type, encoding = mimetypes.guess_type(url)
if not mime_type:
mime_type, encoding = mimetypes.guess_type(path)
if not mime_type:
mime_type, encoding = lookup_content_type(url)
return mime_type
def main():
if len(sys.argv) != 2:
return -1
url = urlparse(sys.argv[1])
url_string = url.geturl()
if url.netloc in (VIDEO_URLS):
return launch_command('mpv', url_string)
elif url.netloc in (STREAM_URLS):
return launch_command(VIDEO_STREAMER, url_string)
mime_type = find_mime_type(url)
if mime_type and mime_type != "text/html":
for site, rule in TRANSLATION_RULES.items():
if site in url.netloc:
url_string = rule(url_string)
return handle_mime(url_string, mime_type)
else:
return launch_browser(url_string)
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -2025,17 +2025,17 @@
fix=
-%22
+'
plumber-
%22, d
@@ -2030,17 +2030,17 @@
plumber-
-%22
+'
, dir=tm
@@ -4088,17 +4088,17 @@
type !=
-%22
+'
text/htm
@@ -4102,9 +4102,9 @@
html
-%22
+'
:%0A
|
59c14e1c0d69309c554ddafa5e168115ba05ddfd
|
Update the win/lose determination code
|
match.py
|
match.py
|
'''
The match monitoring module for SaltBot
'''
from bs4 import BeautifulSoup
import requests
import time
from bet import bet_player1
from website import website
class Match:
def __init__(self):
self.id = 0 # TODO: Use SQL to determine this w/ MAX()
self.player1 = None
self.player2 = None
self.duration = None
self.winner = None
self.p1bets = None
self.p2bets = None
self.mybet = None
def start_round(self, mybet, website):
self.player1 = website.get_player1_name()
self.player2 = website.get_player2_name()
self.p1bets = website.get_player1_wagers()
self.p2bets = website.get_player2_wagers()
self.mybet = mybet
def end_round(self, duration, winner):
self.duration = duration
self.winner = winner
def save_round(self):
# TODO: Save to SQL
return
def record_match(session, request):
# Initialize a match
site = website(session, request)
while(True):
# Add a delay to avoid overloading the server
time.sleep(10)
# Update status
prev_status = site.get_betting_status()
prev_balance = site.get_balance()
site.update()
status = site.get_betting_status()
balance = site.get_balance()
if (prev_status == 'locked' and status == 'open'):
if (balance > prev_balance):
print('Our bet wins')
elif (balance < prev_balance):
print('Our bet loses')
else:
print('Money remained the same')
print(site.get_json())
print('\nBetting is now open!')
print('Balance: ' + str(balance))
# Place the bet
bet_player1(session, 500)
elif (prev_status == 'open' and status == 'locked'):
print('The match begins!')
|
Python
| 0
|
@@ -1004,16 +1004,64 @@
request)
+%0A balance_start = None%0A balance_end = None
%0A%0A wh
@@ -1225,50 +1225,8 @@
s()%0A
- prev_balance = site.get_balance()%0A
@@ -1289,45 +1289,8 @@
us()
-%0A balance = site.get_balance()
%0A%0A
@@ -1342,24 +1342,69 @@
== 'open'):%0A
+ balance_end = site.get_balance()%0A
@@ -1418,23 +1418,28 @@
ance
- %3E prev_
+_end %3E
balance
+_start
):%0A
@@ -1504,23 +1504,28 @@
ance
- %3C prev_
+_end %3C
balance
+_start
):%0A
@@ -1756,16 +1756,20 @@
(balance
+_end
))%0A%0A
@@ -1930,10 +1930,57 @@
egins!')
+%0A balance_start = site.get_balance()
%0A%0A
|
8c9b29f0becbef26625e7a2fbe6aaf1d75faeacd
|
fix parameter order
|
lib/exabgp/reactor/network/tcp.py
|
lib/exabgp/reactor/network/tcp.py
|
# encoding: utf-8
"""
setup.py
Created by Thomas Mangin on 2013-07-13.
Copyright (c) 2013-2013 Exa Networks. All rights reserved.
"""
import struct
import socket
import platform
from exabgp.util.errstr import errstr
from exabgp.protocol.family import AFI
from exabgp.reactor.network.error import errno
from .error import NotConnected,BindingError,MD5Error,NagleError,TTLError,AsyncError
def create (afi):
try:
if afi == AFI.ipv4:
io = socket.socket(socket.AF_INET, socket.SOCK_STREAM, socket.IPPROTO_TCP)
if afi == AFI.ipv6:
io = socket.socket(socket.AF_INET6, socket.SOCK_STREAM, socket.IPPROTO_TCP)
try:
io.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except AttributeError:
pass
try:
io.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
except AttributeError:
pass
except socket.error:
raise NotConnected('Could not create socket')
return io
def bind (io,afi,ip):
try:
if afi == AFI.ipv4:
io.bind((ip,0))
if afi == AFI.ipv6:
io.bind((ip,0,0,0))
except socket.error,e:
raise BindingError('Could not bind to local ip %s - %s' % (ip,str(e)))
def connect (io,ip,afi,md5):
try:
if afi == AFI.ipv4:
io.connect((ip,179))
if afi == AFI.ipv6:
io.connect((ip,179,0,0))
except socket.error, e:
if e.errno == errno.EINPROGRESS:
return
if md5:
raise NotConnected('Could not connect to peer %s, check your MD5 password (%s)' % (ip,errstr(e)))
raise NotConnected('Could not connect to peer %s (%s)' % (ip,errstr(e)))
def MD5 (io,ip,afi,md5):
if md5:
os = platform.system()
if os == 'FreeBSD':
if md5 != 'kernel':
raise MD5Error(
'FreeBSD requires that you set your MD5 key via ipsec.conf.\n'
'Something like:\n'
'flush;\n'
'add <local ip> <peer ip> tcp 0x1000 -A tcp-md5 "password";'
)
try:
TCP_MD5SIG = 0x10
io.setsockopt(socket.IPPROTO_TCP, TCP_MD5SIG, 1)
except socket.error,e:
raise MD5Error(
'FreeBSD requires that you rebuild your kernel to enable TCP MD5 Signatures:\n'
'options IPSEC\n'
'options TCP_SIGNATURE\n'
'device crypto\n'
)
elif os == 'Linux':
try:
TCP_MD5SIG = 14
TCP_MD5SIG_MAXKEYLEN = 80
n_port = socket.htons(179)
if afi == AFI.ipv4:
SS_PADSIZE = 120
n_addr = socket.inet_pton(socket.AF_INET, ip)
tcp_md5sig = 'HH4s%dx2xH4x%ds' % (SS_PADSIZE, TCP_MD5SIG_MAXKEYLEN)
md5sig = struct.pack(tcp_md5sig, socket.AF_INET, n_port, n_addr, len(md5), md5)
if afi == AFI.ipv6:
SS_PADSIZE = 100
SIN6_FLOWINFO = 0
SIN6_SCOPE_ID = 0
n_addr = socket.inet_pton(socket.AF_INET6, ip)
tcp_md5sig = 'HHI16sI%dx2xH4x%ds' % (SS_PADSIZE, TCP_MD5SIG_MAXKEYLEN)
md5sig = struct.pack(tcp_md5sig, socket.AF_INET6, n_port, SIN6_FLOWINFO, n_addr, SIN6_SCOPE_ID, len(md5), md5)
io.setsockopt(socket.IPPROTO_TCP, TCP_MD5SIG, md5sig)
except socket.error,e:
raise MD5Error('This linux machine does not support TCP_MD5SIG, you can not use MD5 (%s)' % errstr(e))
else:
raise MD5Error('ExaBGP has no MD5 support for %s' % os)
def nagle (io,ip):
try:
# diable Nagle's algorithm (no grouping of packets)
io.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
except AttributeError:
raise NagleError("Could not disable nagle's algorithm for %s" % ip)
def TTL (io,ip,ttl):
# None (ttl-security unset) or zero (maximum TTL) is the same thing
if ttl:
try:
io.setsockopt(socket.IPPROTO_IP,socket.IP_TTL, 20)
except socket.error,e:
raise TTLError('This OS does not support IP_TTL (ttl-security) for %s (%s)' % (ip,errstr(e)))
def async (io,ip):
try:
io.setblocking(0)
except socket.error, e:
raise AsyncError('could not set socket non-blocking for %s (%s)' % (ip,errstr(e)))
# try:
# try:
# # Linux / Windows
# self.message_size = io.getsockopt(socket.SOL_SOCKET, socket.SO_MAX_MSG_SIZE)
# except AttributeError:
# # BSD
# self.message_size = io.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)
# except socket.error, e:
# self.message_size = None
|
Python
| 0.000001
|
@@ -907,17 +907,17 @@
nd (
+ip,
io,afi
-,ip
):%0A%09
|
67e620716b494f74c9b913b6514463eb4689c590
|
add OMPL_DEBUG and other convenience function to python bindings
|
py-bindings/ompl/util/__init__.py
|
py-bindings/ompl/util/__init__.py
|
from os.path import abspath, dirname
from ompl import dll_loader
dll_loader('ompl', dirname(abspath(__file__)))
from ompl.util._util import *
|
Python
| 0
|
@@ -135,8 +135,610 @@
mport *%0A
+import inspect%0A%0Adef OMPL_DEBUG(text):%0A c = inspect.currentframe().f_back%0A getOutputHandler().log(text, LogLevel.LOG_DEBUG, c.f_code.co_filename, c.f_lineno)%0Adef OMPL_INFORM(text):%0A c = inspect.currentframe().f_back%0A getOutputHandler().log(text, LogLevel.LOG_INFO, c.f_code.co_filename, c.f_lineno)%0Adef OMPL_WARN(text):%0A c = inspect.currentframe().f_back%0A getOutputHandler().log(text, LogLevel.LOG_WARN, c.f_code.co_filename, c.f_lineno)%0Adef OMPL_ERROR(text):%0A c = inspect.currentframe().f_back%0A getOutputHandler().log(text, LogLevel.LOG_ERROR, c.f_code.co_filename, c.f_lineno)%0A
|
a0ae834ec44acdbb6b7979a3df9a50659acc1d11
|
Fix broken star import
|
src/pybel_tools/summary/error_summary.py
|
src/pybel_tools/summary/error_summary.py
|
# -*- coding: utf-8 -*-
"""This module contains functions that provide summaries of the errors encountered while parsing a BEL script"""
from collections import Counter, defaultdict
from pybel.constants import ANNOTATIONS
from pybel.parser.parse_exceptions import *
from .node_summary import get_namespaces, get_names_by_namespace
from ..utils import check_has_annotation
__all__ = [
'count_error_types',
'count_naked_names',
'get_naked_names',
'get_incorrect_names_by_namespace',
'get_incorrect_names',
'get_undefined_namespaces',
'get_undefined_namespace_names',
'calculate_incorrect_name_dict',
'calculate_suggestions',
'calculate_error_by_annotation',
'group_errors',
'get_names_including_errors',
'get_names_including_errors_by_namespace',
'get_undefined_annotations',
'get_namespaces_with_incorrect_names',
]
def count_error_types(graph):
"""Counts the occurrence of each type of error in a graph
:param pybel.BELGraph graph: A BEL graph
:return: A Counter of {error type: frequency}
:rtype: collections.Counter
"""
return Counter(e.__class__.__name__ for _, _, e, _ in graph.warnings)
def _naked_names_iter(graph):
"""Iterates over naked name warnings froma graph
:param pybel.BELGraph graph: A BEL graph
:rtype: iter[NakedNameWarning]
"""
for _, _, e, _ in graph.warnings:
if isinstance(e, NakedNameWarning):
yield e.name
def count_naked_names(graph):
"""Counts the frequency of each naked name (names without namespaces)
:param pybel.BELGraph graph: A BEL graph
:return: A Counter from {name: frequency}
:rtype: collections.Counter
"""
return Counter(_naked_names_iter(graph))
def get_naked_names(graph):
"""Gets the set of naked names in the graph
:param pybel.BELGraph graph: A BEL graph
:rtype: set[str]
"""
return set(_naked_names_iter(graph))
def get_namespaces_with_incorrect_names(graph):
"""Returns the set of all namespaces with incorrect names in the graph"""
return {
e.namespace
for _, _, e, _ in graph.warnings
if isinstance(e, (MissingNamespaceNameWarning, MissingNamespaceRegexWarning))
}
def get_incorrect_names_by_namespace(graph, namespace):
"""Returns the set of all incorrect names from the given namespace in the graph
:param pybel.BELGraph graph: A BEL graph
:param str namespace: The namespace to filter by
:return: The set of all incorrect names from the given namespace in the graph
:rtype: set[str]
"""
return {
e.name
for _, _, e, _ in graph.warnings
if isinstance(e, (MissingNamespaceNameWarning, MissingNamespaceRegexWarning)) and e.namespace == namespace
}
def get_incorrect_names(graph):
"""Returns the dict of the sets of all incorrect names from the given namespace in the graph
:param pybel.BELGraph graph: A BEL graph
:return: The set of all incorrect names from the given namespace in the graph
:rtype: dict[str,set[str]]
"""
return {
namespace: get_incorrect_names_by_namespace(graph, namespace)
for namespace in get_namespaces(graph)
}
def get_undefined_namespaces(graph):
"""Gets all namespaces that aren't actually defined
:param pybel.BELGraph graph: A BEL graph
:return: The set of all undefined namespaces
:rtype: set[str]
"""
return {
e.namespace
for _, _, e, _ in graph.warnings
if isinstance(e, UndefinedNamespaceWarning)
}
def get_undefined_namespace_names(graph, namespace):
"""Gets the names from a namespace that wasn't actually defined
:param pybel.BELGraph graph: A BEL graph
:param str namespace: The namespace to filter by
:return: The set of all names from the undefined namespace
:rtype: set[str]
"""
return {
e.name
for _, _, e, _ in graph.warnings
if isinstance(e, UndefinedNamespaceWarning) and e.namespace == namespace
}
def get_undefined_annotations(graph):
"""Gets all annotations that aren't actually defined
:param pybel.BELGraph graph: A BEL graph
:return: The set of all undefined annotations
:rtype: set[str]
"""
return {
e.annotation
for _, _, e, _ in graph.warnings
if isinstance(e, UndefinedAnnotationWarning)
}
# FIXME need to change underlying definition and usage of this exception
def get_undefined_annotation_values(graph, annotation):
"""Gets the values from an annotation that wasn't actually defined
:param pybel.BELGraph graph: A BEL graph
:param str annotation: The annotaton to filter by
:return: The set of all values from the undefined annotation
:rtype: set[str]
"""
raise NotImplementedError
# return {e.value for _, _, e, _ in graph.warnings if isinstance(e, UndefinedAnnotationWarning) and e.annotation == annotation}
def calculate_incorrect_name_dict(graph):
"""Groups all of the incorrect identifiers in a dict of {namespace: list of erroneous names}
:param pybel.BELGraph graph: A BEL graph
:return: A dictionary of {namespace: list of erroneous names}
:rtype: dict[str, str]
"""
missing = defaultdict(list)
for line_number, line, e, ctx in graph.warnings:
if not isinstance(e, (MissingNamespaceNameWarning, MissingNamespaceRegexWarning)):
continue
missing[e.namespace].append(e.name)
return dict(missing)
def calculate_error_by_annotation(graph, annotation):
"""Groups the graph by a given annotation and builds lists of errors for each
:param pybel.BELGraph graph: A BEL graph
:param annotation: The annotation to group errors by
:type annotation: str
:return: A dictionary of {annotation value: list of errors}
:rtype: dict[str, list[str]]
"""
results = defaultdict(list)
for line_number, line, e, context in graph.warnings:
if not context or not check_has_annotation(context, annotation):
continue
values = context[ANNOTATIONS][annotation]
if isinstance(values, str):
results[values].append(e.__class__.__name__)
elif isinstance(values, (set, tuple, list)):
for value in values:
results[value].append(e.__class__.__name__)
return dict(results)
def group_errors(graph):
"""Groups the errors together for analysis of the most frequent error
:param pybel.BELGraph graph: A BEL graph
:return: A dictionary of {error string: list of line numbers}
:rtype: dict[str, list[int]]
"""
warning_summary = defaultdict(list)
for ln, _, e, _ in graph.warnings:
warning_summary[str(e)].append(ln)
return dict(warning_summary)
def get_names_including_errors_by_namespace(graph, namespace):
"""Takes the names from the graph in a given namespace and the erroneous names from the same namespace and returns
them together as a unioned set
:param pybel.BELGraph graph: A BEL graph
:param str namespace: The namespace to filter by
:return: The set of all correct and incorrect names from the given namespace in the graph
:rtype: set[str]
"""
return get_names_by_namespace(graph, namespace) | get_incorrect_names_by_namespace(graph, namespace)
def get_names_including_errors(graph):
"""Takes the names from the graph in a given namespace and the erroneous names from the same namespace and returns
them together as a unioned set
:param pybel.BELGraph graph: A BEL graph
:return: The dict of the sets of all correct and incorrect names from the given namespace in the graph
:rtype: dict[str,set[str]]
"""
return {
namespace: get_names_including_errors_by_namespace(graph, namespace)
for namespace in get_namespaces(graph)
}
|
Python
| 0.000003
|
@@ -631,37 +631,8 @@
t',%0A
- 'calculate_suggestions',%0A
|
cdf31e64ad2b951c4c8bd0acfb087b541490746c
|
Add root_path to http.services.Service
|
py/garage/garage/http/services.py
|
py/garage/garage/http/services.py
|
__all__ = [
'ServiceError',
'EndpointNotFound',
'VersionNotSupported',
'Service',
]
import logging
import re
from http import HTTPStatus
from http2 import HttpError
from garage import asserts
LOG = logging.getLogger(__name__)
class ServiceError(Exception):
pass
class EndpointNotFound(ServiceError):
pass
class VersionNotSupported(ServiceError):
pass
class Service:
def __init__(self, name, version):
LOG.info('create service %s version %d', name, version)
self.name = name
self.version = version
self.policies = []
self.endpoints = {}
self.decode = None
self.encode = None
def add_policy(self, policy):
self.policies.append(policy)
def add_endpoint(self, name, endpoint):
LOG.info('register endpoint %s to service %s version %d',
name, self.name, self.version)
name = name.encode('ascii')
asserts.precond(name not in self.endpoints)
self.endpoints[name] = endpoint
async def __call__(self, http_request, http_response):
path = http_request.headers.get(b':path')
if path is None:
raise HttpError(HTTPStatus.BAD_REQUEST)
try:
endpoint = self.dispatch(path)
except EndpointNotFound:
raise HttpError(HTTPStatus.NOT_FOUND)
except VersionNotSupported as e:
# Returning 400 when a request's version is newer is weird,
# but none of other 4xx or 5xx code makes more sense anyway.
# Like, 403? But, could we say we understand a request of
# newer version (premise of a 403)? At least when returning
# 400, we are telling the client that he could modify the
# request (down-version it) and send it again.
raise HttpError(
HTTPStatus.BAD_REQUEST,
reason='unsupported endpoint version',
)
try:
await self.call_endpoint(endpoint, http_request, http_response)
except HttpError:
raise
except Exception:
LOG.exception('err when calling endpoint')
raise HttpError(HTTPStatus.INTERNAL_SERVER_ERROR)
PATTERN_ENDPOINT = re.compile(br'/(\d+)/([\w_\-.]+)')
def dispatch(self, path):
match = self.PATTERN_ENDPOINT.match(path)
if not match:
raise EndpointNotFound(path)
version = int(match.group(1))
endpoint_name = match.group(2)
endpoint = self.endpoints.get(endpoint_name)
if endpoint is None:
raise EndpointNotFound(path)
if self.version < version:
raise VersionNotSupported(version)
return endpoint
async def call_endpoint(self, endpoint, http_request, http_response):
for policy in self.policies:
await policy(http_request.headers)
request = await http_request.body
if request:
if self.decode:
request = await self.decode(http_request.headers, request)
else:
request = None
response = await endpoint(request)
if self.encode:
response = await self.encode(http_request.headers, response)
asserts.postcond(isinstance(response, bytes))
http_response.headers[b':status'] = b'200'
await http_response.write(response)
await http_response.close()
|
Python
| 0.000018
|
@@ -560,16 +560,46 @@
version%0A
+ self.root_path = None%0A
@@ -2349,16 +2349,187 @@
path):%0A
+ if self.root_path:%0A if not path.startswith(self.root_path):%0A raise EndpointNotFound(path)%0A path = path%5Blen(self.root_path):%5D%0A%0A
|
66b6d3648c0a4229048c0f8a63ec410c407f1ba1
|
Fix unittest
|
src/pyscaffold/extensions/no_skeleton.py
|
src/pyscaffold/extensions/no_skeleton.py
|
# -*- coding: utf-8 -*-
"""
Extension that omits the creation of file `skeleton.py`
"""
from ..api import Extension
from ..api import helpers
class NoSkeleton(Extension):
"""Omit creation of skeleton.py"""
def activate(self, actions):
"""Activate extension
Args:
actions (list): list of actions to perform
Returns:
list: updated list of actions
"""
return self.register(
actions,
self.remove_files,
after='define_structure')
def remove_files(self, struct, opts):
"""Remove all skeleton files from structure
Args:
struct (dict): project representation as (possibly) nested
:obj:`dict`.
opts (dict): given options, see :obj:`create_project` for
an extensive list.
Returns:
struct, opts: updated project representation and options
"""
pkgs = opts['qual_pkg'].split('.')
file = [opts['project'], 'src'] + pkgs + ['skeleton.py']
struct = helpers.reject(struct, file)
file = [opts['project'], 'tests', 'test_skeleton.py']
struct = helpers.reject(struct, file)
return struct, opts
|
Python
| 0.00002
|
@@ -957,42 +957,73 @@
-pkgs = opts%5B'qual_pkg'%5D.split('.')
+# Namespace is not yet applied so deleting from package is enough
%0A
@@ -1061,20 +1061,27 @@
src'
-%5D + pkgs + %5B
+, opts%5B'package'%5D,
'ske
|
0062eeaf558a0eb9e8a736baf16932e56546001f
|
Fix silly Python indentation issues.
|
src/query_processing/query_processing.py
|
src/query_processing/query_processing.py
|
# LING 573 Question Answering System
# Code last updated 4/15/14 by Andrea Kahn
#
# This code implements a QueryProcessor for the question answering system.
import sys
import general_classes
import nltk
# TODO: A QueryProcessor should be initialized with the Question object, but should it
# have this question as an attribute, or should it have attributes id, type, q, target that
# match those of the question?
# Advantage of first approach: allows us to change the Question and have the QueryProcessor
# update automatically (not sure we'd ever do this).
# Advantage of second approach: saves typing (access the id with self.id, versus self.question.id).
# A QueryProcessor object has the attribute "question", a Question object.
class QueryProcessor(object):
def __init__(self, question):
self.question = question
# This method returns a set of SearchQuery objects.
def generate_queries(self):
tokenized_q = nltk.word_tokenize(self.question.q)
tokenized_target = nltk.word_tokenize(self.question.target)
# FIXME: Strip out punctuation tokens
# note from Claire: here is a temporary fix
punctuation = ['?','.',',']
search_query = [x for x in tokenized_q if x not in punctuation] + [x for x in tokenized_target if x not in punctuation]
# FIXME: Issue with leading escape character in some questions
return [search_query]
# This method returns an AnswerTemplate object.
def generate_ans_template(self):
# NB: The following if statement should always evaluate as True in our system, but
# its inclusion enables the system to more easily be extended to handle other types
# of questions, for which the text-processing and AnswerTemplate-generation steps
# might be slightlydifferent.
if self.question.type=="FACTOID":
# do some sort of text-processing on the natural-language question and context
# to determine NE type
# generate a corresponding AnswerTemplate object
# return it
return None
else:
sys.stderr.write("Warning: System can only handle \"factoid\" questions\n")
|
Python
| 0.000002
|
@@ -902,18 +902,24 @@
(self):%0A
-%09%09
+
tokenize
@@ -960,18 +960,24 @@
tion.q)%0A
-%09%09
+
tokenize
@@ -1156,18 +1156,24 @@
.',','%5D%0A
-%09%09
+
search_q
@@ -1350,18 +1350,24 @@
stions %0A
-%09%09
+
return %5B
|
74b9a7b5cba162dba986075e43ab29682a13e55e
|
fix orientation
|
py3status/modules/xrandr_rotate.py
|
py3status/modules/xrandr_rotate.py
|
# -*- coding: utf-8 -*-
"""
Control screen rotation.
Configuration parameters:
cache_timeout: how often to refresh this module.
(default 10)
format: a string that formats the output, can include placeholders.
(default '{icon}')
hide_if_disconnected: a boolean flag to hide icon when `screen` is
disconnected.
It has no effect unless `screen` option is also configured.
(default False)
horizontal_icon: a character to represent horizontal rotation.
(default 'H')
horizontal_rotation: a horizontal rotation for xrandr to use.
Available options: 'normal' or 'inverted'.
(default 'normal')
screen: display output name to rotate, as detected by xrandr.
If not provided, all enabled screens will be rotated.
(default None)
vertical_icon: a character to represent vertical rotation.
(default 'V')
vertical_rotation: a vertical rotation for xrandr to use.
Available options: 'left' or 'right'.
(default 'left')
Format placeholders:
{icon} a rotation icon, specified by `horizontal_icon` or `vertical_icon`.
{screen} a screen name, specified by `screen` option or detected
automatically if only one screen is connected, otherwise 'ALL'.
Color options:
color_degraded: Screen is disconnected
color_good: Displayed rotation is active
@author Maxim Baz (https://github.com/maximbaz) @contributor lasers
@license BSD
SAMPLE OUTPUT
{'color': '#00FF00', 'full_text': u'V'}
h
{'color': '#00FF00', 'full_text': u'H'}
"""
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 10
format = '{icon}'
hide_if_disconnected = False
horizontal_icon = 'H'
horizontal_rotation = 'normal'
screen = None
vertical_icon = 'V'
vertical_rotation = 'left'
def post_config_hook(self):
self.displayed = ''
def _get_all_outputs(self):
outputs = self.py3.command_output(['xrandr']).splitlines()
return [x.split()[0] for x in outputs if ' connected' in x]
def _get_current_rotation_icon(self, all_outputs):
output = self.screen or all_outputs[0]
data = self.py3.command_output(['xrandr']).splitlines()
orientation = ''.join(
[x.split()[3] for x in data if x.startswith(output)])
# xrandr may skip 'normal' so we check if it starts with '(' too
is_horizontal = (orientation.startswith('(') or
orientation in ['normal', 'inverted'])
return self.horizontal_icon if is_horizontal else self.vertical_icon
def _apply(self):
if self.displayed == self.horizontal_icon:
rotation = self.horizontal_rotation
else:
rotation = self.vertical_rotation
outputs = [self.screen] if self.screen else self._get_all_outputs()
for output in outputs:
cmd = 'xrandr --output ' + output + ' --rotate ' + rotation
self.py3.command_run(cmd)
def _switch_selection(self):
if self.displayed == self.horizontal_icon:
self.displayed = self.vertical_icon
else:
self.displayed = self.horizontal_icon
def on_click(self, event):
"""
Click events
- left click & scroll up/down: switch between rotations
- right click: apply selected rotation
"""
button = event['button']
if button in [1, 4, 5]:
self._switch_selection()
elif button == 3:
self._apply()
def xrandr_rotate(self):
all_outputs = self._get_all_outputs()
selected_screen_disconnected = (
self.screen is not None and self.screen not in all_outputs
)
if selected_screen_disconnected and self.hide_if_disconnected:
self.displayed = ''
full_text = ''
else:
if not self.displayed:
self.displayed = self._get_current_rotation_icon(all_outputs)
if len(all_outputs) == 1:
screen = self.screen or all_outputs[0]
else:
screen = 'ALL'
full_text = self.py3.safe_format(self.format,
dict(icon=self.displayed or '?',
screen=screen))
response = {
'cached_until': self.py3.time_in(self.cache_timeout),
'full_text': full_text
}
# coloration
if selected_screen_disconnected and not self.hide_if_disconnected:
response['color'] = self.py3.COLOR_DEGRADED
elif self.displayed == self._get_current_rotation_icon(all_outputs):
response['color'] = self.py3.COLOR_GOOD
return response
if __name__ == "__main__":
"""
Run module in test mode.
"""
from py3status.module_test import module_test
module_test(Py3status)
|
Python
| 0.000004
|
@@ -2140,55 +2140,8 @@
s):%0A
- output = self.screen or all_outputs%5B0%5D%0A
@@ -2213,55 +2213,78 @@
o
-rientation = ''.join(%0A %5Bx.split()%5B3%5D
+utput = self.screen or all_outputs%5B0%5D%0A output_line = ''.join(%5Bx
for
@@ -2320,24 +2320,25 @@
tput)%5D)%0A
+%0A
# xrandr
@@ -2333,250 +2333,199 @@
-# xrandr may skip 'normal' so we check if it starts with '(' too%0A is_horizontal = (orientation.startswith('(') or%0A orientation in %5B'normal', 'inverted'%5D)%0A return self.horizontal_icon if is_horizontal else
+for x in output_line.split():%0A if 'normal' in x or 'inverted' in x:%0A return self.horizontal_icon%0A elif 'left' in x or 'right' in x:%0A return
sel
|
c6b47431f75675547d54c3b68c07aad76721e513
|
fix srfit test
|
pyiid/tests/test_against_srfit.py
|
pyiid/tests/test_against_srfit.py
|
__author__ = 'christopher'
from pyiid.tests import *
from pyiid.experiments.elasticscatter import ElasticScatter
local_test_atoms = setup_atomic_square()[0] * 3
test_data = tuple(product([local_test_atoms], [None]))
def test_fq_against_srfit():
for value in test_data:
yield check_fq_against_srfit, value
def check_fq_against_srfit(value):
if not srfit:
KnownFailureTest()
# unpack the atoms and experiment
atoms = value[0]
exp = value[1]
# get the pyIID F(Q)
s = ElasticScatter(exp)
# s.set_processor('CPU', 'nxn')
ans1 = s.get_fq(atoms)
# get the SrFit F(Q)
stru = convert_atoms_to_stru(atoms)
srfit_calc = DebyePDFCalculator()
srfit_calc.qmin = s.exp['qmin']
srfit_calc.qmax = s.exp['qmax']
srfit_calc.qstep = s.exp['qbin']
r1, g1 = srfit_calc(stru)
assert_allclose(s.get_scatter_vector(), srfit_calc.qgrid)
ans2 = srfit_calc.fq
stats_check(ans1, ans2, rtol=1e-1, atol=5e-6)
del srfit_calc
assert_allclose(ans1, ans2, rtol=1e-1, atol=5e-6)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[
# '-s',
'--with-doctest',
# '--nocapture',
# '-v'
],
# env={"NOSE_PROCESSES": 1, "NOSE_PROCESS_TIMEOUT": 599},
exit=False)
|
Python
| 0.000001
|
@@ -216,110 +216,33 @@
)%0A%0A%0A
-def test_fq_against_srfit():%0A for value in test_data:%0A yield check_fq_against_srfit, value%0A%0A
+@known_fail_if(not srfit)
%0Adef
@@ -269,61 +269,11 @@
fit(
-value
):%0A
- if not srfit:%0A KnownFailureTest()%0A
@@ -322,16 +322,24 @@
s =
-value%5B0%5D
+local_test_atoms
%0A
@@ -349,16 +349,12 @@
p =
-value%5B1%5D
+None
%0A%0A
|
1b108ad916ec0d3acdb582491ab9bcd756e05f41
|
Check existence of updates dir only once
|
pyfarm/master/api/agent_updates.py
|
pyfarm/master/api/agent_updates.py
|
# No shebang line, this module is meant to be imported
#
# Copyright 2014 Ambient Entertainment Gmbh & Co. KG
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Agent Updates
-------------
The API allows access to agent update packages, possibly through redirects
"""
import re
import tempfile
from os import makedirs
from os.path import join, isdir, isfile
try:
from httplib import BAD_REQUEST, CREATED, NOT_FOUND
except ImportError: # pragma: no cover
from http.client import BAD_REQUEST, CREATED, NOT_FOUND
from werkzeug.utils import secure_filename
from flask.views import MethodView
from flask import request, g, redirect, send_file
from pyfarm.core.config import read_env
from pyfarm.core.logger import getLogger
from pyfarm.master.utility import jsonify
logger = getLogger("api.agents")
VERSION_REGEX = re.compile("\d+(\.\d+(\.\d+)?)?((-pre\d?)|(-dev\d?)|(-rc?\d?)|"
"(-alpha\d?)|(-beta\d?))?$")
class AgentUpdatesAPI(MethodView):
def put(self, version):
"""
A ``PUT`` to this endpoint will upload a new version of pyfarm-agent to
be used for agent auto-updates. The update must be a zip file.
.. http:put:: /api/v1/agents/updates/<string:version> HTTP/1.1
**Request**
.. sourcecode:: http
PUT /api/v1/agents/updates/1.2.3 HTTP/1.1
Content-Type: application/zip
<binary data>
**Response**
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/json
:statuscode 201: The update was put in place
:statuscode 400: there was something wrong with the request (such as an
invalid version number specified or the mime type not
being application/zip)
"""
if request.mimetype != "application/zip":
return (jsonify(error="Data for agent updates must be "
"application/zip"), BAD_REQUEST)
if not VERSION_REGEX.match(version):
return (jsonify(error="Version is not an acceptable version number"),
BAD_REQUEST)
updates_dir = read_env("PYFARM_AGENT_UPDATES_DIR",
join(tempfile.gettempdir(), "pyfarm-updates"))
if not isdir(updates_dir):
makedirs(updates_dir)
path = join(updates_dir, "pyfarm-agent-%s.zip" % version)
with open(path, "wb+") as zip_file:
zip_file.write(request.data)
return "", CREATED
def get(self, version):
"""
A ``GET`` to this endpoint will return the update package as a zip file
the specified version
.. http:get:: /api/v1/agents/updates/<string:version> HTTP/1.1
**Request**
.. sourcecode:: http
PUT /api/v1/agents/updates/1.2.3 HTTP/1.1
Accept: application/zip
**Response**
.. sourcecode:: http
HTTP/1.1 200 OK
Content-Type: application/zip
<binary data>
:statuscode 200: The update file was found and is returned
:statuscode 301: The update can be found under a different URL
:statuscode 400: there was something wrong with the request (such as an
invalid version number specified or the mime type not
being application/zip)
"""
if not VERSION_REGEX.match(version):
return (jsonify(error="Version is not an acceptable version number"),
BAD_REQUEST)
filename = "pyfarm-agent-%s.zip" % version
updates_webdir = read_env("PYFARM_AGENT_UPDATES_WEBDIR", None)
if updates_webdir:
return redirect(join(updates_webdir, filename))
updates_dir = read_env("PYFARM_AGENT_UPDATES_DIR",
join(tempfile.gettempdir(), "pyfarm-updates"))
update_file = join(updates_dir, filename)
if not isfile(update_file):
return (jsonify(error="Specified update not found"), NOT_FOUND)
return send_file(update_file)
|
Python
| 0
|
@@ -850,16 +850,41 @@
, isfile
+%0Afrom errno import EEXIST
%0A%0Atry:%0A
@@ -1463,16 +1463,233 @@
))?$%22)%0A%0A
+UPDATES_DIR = read_env(%0A %22PYFARM_AGENT_UPDATES_DIR%22, join(tempfile.gettempdir(), %22pyfarm-updates%22))%0A%0Atry:%0A makedirs(UPDATES_DIR)%0Aexcept OSError as e: # pragma: no cover%0A if e.errno != EEXIST:%0A raise%0A%0A
%0Aclass A
@@ -2947,238 +2947,31 @@
-updates_dir = read_env(%22PYFARM_AGENT_UPDATES_DIR%22,%0A join(tempfile.gettempdir(), %22pyfarm-updates%22))%0A if not isdir(updates_dir):%0A makedirs(updates_dir)%0A%0A path = join(updates_dir
+path = join(UPDATES_DIR
, %22p
@@ -4413,169 +4413,32 @@
date
-s_dir = read_env(%22PYFARM_AGENT_UPDATES_DIR%22,%0A join(tempfile.gettempdir(), %22pyfarm-updates%22))%0A update_file = join(updates_dir
+_file = join(UPDATES_DIR
, fi
|
e6ee5a31177d537bbdf2c60fef9654ac967168cc
|
update every 5 minutes results to irc
|
pyfibot_module/module_nhl_stats.py
|
pyfibot_module/module_nhl_stats.py
|
# -*- coding: utf-8 -*-
from peewee import SqliteDatabase
from eanhlstats.model import *
from eanhlstats.interface import *
import eanhlstats.settings
Team.create_table(True)
Player.create_table(True)
eanhlstats.settings.REGION = 3
import logging
from twisted.internet import reactor
log = logging.getLogger("motionmachine")
result = None
trackchannel = None
trackbot = None
def init(bot):
"""Called when the bot is loaded and on rehash"""
trackbot = None
trackchannel = None
result = None
pp_motion_machine(30)
def pp_motion_machine(delay):
"""
This will execute itself every <delay> seconds
"""
global result
global trackchannel
global trackbot
results = last_game("26")
if results and (results != result) and trackchannel and trackbot:
result = results
trackbot.say(trackchannel, str(result))
reactor.callLater(delay, pp_motion_machine, delay)
def command_trackresults(bot, user, channel, args):
global trackbot
global trackchannel
trackbot = bot
trackchannel = channel
def command_ts(bot, user, channel, args):
if args.strip() != "":
team = get_team(args)
if team:
data = get_team_stats(team)
if not data:
bot.say(channel, 'Error in fetching data for: ' + str(args))
return
bot.say(channel, str(stats_of_team(data)) + ' | ' + str(results_url(team)))
else:
bot.say(channel, 'Team not found: ' + str(args))
def command_ps(bot, user, channel, args):
if args.strip() != "":
if len(args.split('@')) == 2:
team_string = args.split('@')[1]
player_string = args.split('@')[0]
else:
team_string = eanhlstats.settings.DEFAULT_TEAM
player_string = args
team = get_team(team_string)
if team:
player = get_player(player_string, team)
if not player:
bot.say(channel, 'Player ' + str(player_string) + ' not found from team: ' + str(team.name))
return
bot.say(channel, str(stats_of_player(player)))
else:
bot.say(channel, 'Team not found: ' + str(team_string))
def command_switch(bot, user, channel, args):
if eanhlstats.settings.SYSTEM == "PS3":
eanhlstats.settings.SYSTEM = "XBX"
bot.say(channel, 'Switched nhl stats to XBX')
else:
eanhlstats.settings.SYSTEM = "PS3"
bot.say(channel, 'Switched nhl stats to PS3')
def command_top(bot, user, channel, args):
if args.strip() != "":
team = get_team(args)
if team:
players = get_players(team)
if not players:
bot.say(channel, 'No player data found from team: ' + str(team.name))
return
output = top_players(players, 5)
bot.say(channel, str(output))
else:
bot.say(channel, 'Team not found: ' + str(args))
def command_results(bot, user, channel, args):
if args.strip() != "":
team = get_team(args)
if team:
results = last_games(5, team)
if not results:
bot.say(channel, 'No results found for team ' + str(team.name) + ' for today.')
return
bot.say(channel, str(results))
else:
bot.say(channel, 'Team not found: ' + str(args))
def command_find(bot, user, channel, args):
if args.strip() != "":
teams = find_teams_by_abbreviation(args, 10)
if teams:
if len(teams) > 1:
bot.say(channel, str(pretty_print_teams(teams, 10)))
return
elif len(teams) == 1:
data = get_team_stats(teams[0])
if not data:
bot.say(channel, 'Error in fetching data for: ' + str(teams[0].name))
return
bot.say(channel, str(stats_of_team(data)) + ' | ' + str(results_url(teams[0])))
return
bot.say(channel, 'Teams not found with: ' + str(args))
|
Python
| 0
|
@@ -530,10 +530,12 @@
ine(
-30
+60*5
)%0A%0Ad
|
df5c7cdd46d2d33391bce72acc8b9f4c6de2f708
|
fix inifile passing to halcmd
|
lib/python/machinekit/launcher.py
|
lib/python/machinekit/launcher.py
|
import os
import sys
from time import *
import subprocess
import signal
from machinekit import compat
_processes = []
# ends a running Machinekit session
def end_session():
stop_processes()
stop_realtime()
# checks wheter a single command is available or not
def check_command(command):
process = subprocess.Popen('which ' + command, stdout=subprocess.PIPE,
shell=True)
process.wait()
if process.returncode != 0:
print((command + ' not found, check Machinekit installation'))
sys.exit(1)
# checks the whole Machinekit installation
def check_installation():
commands = ['realtime', 'configserver', 'halcmd', 'haltalk', 'webtalk']
for command in commands:
check_command(command)
# checks for a running session and cleans it up if necessary
def cleanup_session():
pids = []
commands = ['configserver', 'halcmd', 'haltalk', 'webtalk', 'rtapi']
process = subprocess.Popen(['ps', '-A'], stdout=subprocess.PIPE)
out, err = process.communicate()
for line in out.splitlines():
for command in commands:
if command in line:
pid = int(line.split(None, 1)[0])
pids.append(pid)
if pids != []:
sys.stdout.write("cleaning up leftover session... ")
sys.stdout.flush()
subprocess.check_call('realtime stop', shell=True)
for pid in pids:
try:
os.kill(pid, signal.SIGTERM)
except OSError:
pass
sys.stdout.write('done\n')
# starts a command, waits for termination and checks the output
def check_process(command):
sys.stdout.write("running " + command.split(None, 1)[0] + "... ")
sys.stdout.flush()
subprocess.check_call(command, shell=True)
sys.stdout.write('done\n')
# starts and registers a process
def start_process(command, check=True, wait=1.0):
sys.stdout.write("starting " + command.split(None, 1)[0] + "... ")
sys.stdout.flush()
process = subprocess.Popen(command, shell=True)
process.command = command
if check:
sleep(wait)
process.poll()
if (process.returncode is not None):
raise subprocess.CalledProcessError(process.returncode, command, None)
_processes.append(process)
sys.stdout.write('done\n')
# stops a registered process by its name
def stop_process(command):
for process in _processes:
processCommand = process.command.split(None, 1)[0]
if command == processCommand:
sys.stdout.write('stopping ' + command + '... ')
sys.stdout.flush()
process.kill()
process.wait()
sys.stdout.write('done\n')
# stops all registered processes
def stop_processes():
for process in _processes:
sys.stdout.write('stopping ' + process.command.split(None, 1)[0]
+ '... ')
sys.stdout.flush()
process.terminate()
process.wait()
sys.stdout.write('done\n')
# loads a HAL configuraton file
def load_hal_file(filename, ini=None):
sys.stdout.write("loading " + filename + '... ')
sys.stdout.flush()
command = 'halcmd -f ' + filename
if ini is not None:
command += ' -i ' + ini
subprocess.check_call(command, shell=True)
sys.stdout.write('done\n')
# loads a BBIO configuration file
def load_bbio_file(filename):
check_command('config-pin')
sys.stdout.write("loading " + filename + '... ')
sys.stdout.flush()
subprocess.check_call('config-pin -f ' + filename, shell=True)
sys.stdout.write('done\n')
# installs a comp RT component
def install_comp(filename):
install = True
base = os.path.splitext(os.path.basename(filename))[0]
flavor = compat.default_flavor()
moduleDir = compat.get_rtapi_config("RTLIB_DIR")
moduleName = flavor.name + '/' + base + flavor.mod_ext
modulePath = os.path.join(moduleDir, moduleName)
if os.path.exists(modulePath):
compTime = os.path.getmtime(filename)
moduleTime = os.path.getmtime(modulePath)
if (compTime < moduleTime):
install = False
if install is True:
sys.stdout.write("installing " + filename + '... ')
sys.stdout.flush()
if os.access(moduleDir, os.W_OK): # if we have write access we might not need sudo
subprocess.check_call('comp --install ' + filename, shell=True)
else:
subprocess.check_call('sudo comp --install ' + filename, shell=True)
sys.stdout.write('done\n')
# starts realtime
def start_realtime():
sys.stdout.write("starting realtime...")
sys.stdout.flush()
subprocess.check_call('realtime start', shell=True)
sys.stdout.write('done\n')
# stops realtime
def stop_realtime():
sys.stdout.write("stopping realtime... ")
sys.stdout.flush()
subprocess.check_call('realtime stop', shell=True)
sys.stdout.write('done\n')
# rip the Machinekit environment
def rip_environment(path=None, force=False):
if force is False and os.getenv('EMC2_PATH') is not None: # check if already ripped
return
if path is None:
command = None
scriptFilePath = os.environ['HOME'] + '/.bashrc'
if os.path.exists(scriptFilePath):
with open(scriptFilePath) as f: # use the bashrc
content = f.readlines()
for line in content:
if 'rip-environment' in line:
line = line.strip()
if (line[0] == '.'):
command = line
scriptFilePath = os.environ['HOME'] + '/machinekit/scripts/rip-environment'
if os.path.exists(scriptFilePath):
command = '. ' + scriptFilePath
if (command is None):
sys.stderr.write('Unable to rip environment')
sys.exit(1)
else:
command = '. ' + path + '/scripts/rip-environment'
process = subprocess.Popen(command + ' && env',
stdout=subprocess.PIPE,
shell=True)
for line in process.stdout:
(key, _, value) = line.partition('=')
os.environ[key] = value.rstrip()
sys.path.append(os.environ['PYTHONPATH'])
# checks the running processes and exits when exited
def check_processes():
for process in _processes:
process.poll()
if (process.returncode is not None):
_processes.remove(process)
end_session()
if (process.returncode != 0):
sys.exit(1)
else:
sys.exit(0)
# register exit signal handlers
def register_exit_handler():
signal.signal(signal.SIGINT, _exitHandler)
signal.signal(signal.SIGTERM, _exitHandler)
def _exitHandler(signum, frame):
end_session()
sys.exit(0)
# set the Machinekit debug level
def set_debug_level(level):
os.environ['DEBUG'] = str(level)
# set the Machinekit ini
def set_machinekit_ini(ini):
os.environ['MACHINEKIT_INI'] = ini
|
Python
| 0
|
@@ -3194,32 +3194,17 @@
'halcmd
- -f ' + filename
+'
%0A if
@@ -3252,16 +3252,50 @@
' + ini%0A
+ command += ' -f ' + filename%0A
subp
|
205483ef0241ee48821a801a97faa4d19121651a
|
check buffer, use own exception
|
pyscreenshot/plugins/gdk3pixbuf.py
|
pyscreenshot/plugins/gdk3pixbuf.py
|
# -*- coding: utf-8 -*-
"""Gdk3-based screenshotting.
Adapted from https://stackoverflow.com/a/37768950/81636, but uses
buffers directly instead of saving intermediate files (which is slow).
"""
import sys
from PIL import Image
from pyscreenshot.plugins.backend import CBackend
from pyscreenshot.util import platform_is_osx
class Gdk3BackendError(Exception):
pass
class Gdk3PixbufWrapper(CBackend):
name = "pygdk3"
childprocess = False
def __init__(self):
pass
def grab(self, bbox=None):
"""Grabs an image directly to a buffer.
:param bbox: Optional tuple or list containing (x1, y1, x2, y2) coordinates
of sub-region to capture.
:return: PIL RGB image
:raises: ValueError, if image data does not have 3 channels (RGB), each with 8
bits.
:rtype: Image
"""
if platform_is_osx():
raise Gdk3BackendError("osx not supported") # TODO
import gi
gi.require_version("Gdk", "3.0")
# gi.require_version('GdkPixbuf', '2.0')
from gi.repository import Gdk
from gi.repository import GdkPixbuf
# read_pixel_bytes: New in version 2.32.
if GdkPixbuf.PIXBUF_MAJOR == 2:
if GdkPixbuf.PIXBUF_MINOR < 32:
raise ValueError(
"GdkPixbuf min supported version: 2.32 current:"
+ GdkPixbuf.PIXBUF_VERSION
)
w = Gdk.get_default_root_window()
if bbox is not None:
g = [bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]]
else:
g = w.get_geometry()
pb = Gdk.pixbuf_get_from_window(w, *g)
if pb.get_bits_per_sample() != 8:
raise ValueError("Expected 8 bits per pixel.")
elif pb.get_n_channels() != 3:
raise ValueError("Expected RGB image.")
# Read the entire buffer into a python bytes object.
# read_pixel_bytes: New in version 2.32.
pixel_bytes = pb.read_pixel_bytes().get_data() # type: bytes
width, height = g[2], g[3]
# Probably for SSE alignment reasons, the pixbuf has extra data in each line.
# The args after "raw" help handle this; see
# http://effbot.org/imagingbook/decoder.htm#the-raw-decoder
return Image.frombytes(
"RGB", (width, height), pixel_bytes, "raw", "RGB", pb.get_rowstride(), 1
)
def backend_version(self):
import gi
return ".".join(map(str, gi.version_info))
|
Python
| 0
|
@@ -1686,16 +1686,87 @@
(w, *g)%0A
+ if not pb:%0A raise Gdk3BackendError(%22empty buffer%22)%0A%0A
@@ -1809,37 +1809,43 @@
raise
-Value
+Gdk3Backend
Error(%22Expected
@@ -1917,29 +1917,35 @@
raise
-Value
+Gdk3Backend
Error(%22Expec
|
6880efb6231cdf8d7dd19f9ef517006e29f7afeb
|
Make documentaion of logpowspec match behaviour
|
python_speech_features/sigproc.py
|
python_speech_features/sigproc.py
|
# This file includes routines for basic signal processing including framing and computing power spectra.
# Author: James Lyons 2012
import decimal
import numpy
import math
import logging
def round_half_up(number):
return int(decimal.Decimal(number).quantize(decimal.Decimal('1'), rounding=decimal.ROUND_HALF_UP))
def framesig(sig,frame_len,frame_step,winfunc=lambda x:numpy.ones((x,))):
"""Frame a signal into overlapping frames.
:param sig: the audio signal to frame.
:param frame_len: length of each frame measured in samples.
:param frame_step: number of samples after the start of the previous frame that the next frame should begin.
:param winfunc: the analysis window to apply to each frame. By default no window is applied.
:returns: an array of frames. Size is NUMFRAMES by frame_len.
"""
slen = len(sig)
frame_len = int(round_half_up(frame_len))
frame_step = int(round_half_up(frame_step))
if slen <= frame_len:
numframes = 1
else:
numframes = 1 + int(math.ceil((1.0*slen - frame_len)/frame_step))
padlen = int((numframes-1)*frame_step + frame_len)
zeros = numpy.zeros((padlen - slen,))
padsignal = numpy.concatenate((sig,zeros))
indices = numpy.tile(numpy.arange(0,frame_len),(numframes,1)) + numpy.tile(numpy.arange(0,numframes*frame_step,frame_step),(frame_len,1)).T
indices = numpy.array(indices,dtype=numpy.int32)
frames = padsignal[indices]
win = numpy.tile(winfunc(frame_len),(numframes,1))
return frames*win
def deframesig(frames,siglen,frame_len,frame_step,winfunc=lambda x:numpy.ones((x,))):
"""Does overlap-add procedure to undo the action of framesig.
:param frames: the array of frames.
:param siglen: the length of the desired signal, use 0 if unknown. Output will be truncated to siglen samples.
:param frame_len: length of each frame measured in samples.
:param frame_step: number of samples after the start of the previous frame that the next frame should begin.
:param winfunc: the analysis window to apply to each frame. By default no window is applied.
:returns: a 1-D signal.
"""
frame_len = round_half_up(frame_len)
frame_step = round_half_up(frame_step)
numframes = numpy.shape(frames)[0]
assert numpy.shape(frames)[1] == frame_len, '"frames" matrix is wrong size, 2nd dim is not equal to frame_len'
indices = numpy.tile(numpy.arange(0,frame_len),(numframes,1)) + numpy.tile(numpy.arange(0,numframes*frame_step,frame_step),(frame_len,1)).T
indices = numpy.array(indices,dtype=numpy.int32)
padlen = (numframes-1)*frame_step + frame_len
if siglen <= 0: siglen = padlen
rec_signal = numpy.zeros((padlen,))
window_correction = numpy.zeros((padlen,))
win = winfunc(frame_len)
for i in range(0,numframes):
window_correction[indices[i,:]] = window_correction[indices[i,:]] + win + 1e-15 #add a little bit so it is never zero
rec_signal[indices[i,:]] = rec_signal[indices[i,:]] + frames[i,:]
rec_signal = rec_signal/window_correction
return rec_signal[0:siglen]
def magspec(frames,NFFT):
"""Compute the magnitude spectrum of each frame in frames. If frames is an NxD matrix, output will be Nx(NFFT/2+1).
:param frames: the array of frames. Each row is a frame.
:param NFFT: the FFT length to use. If NFFT > frame_len, the frames are zero-padded.
:returns: If frames is an NxD matrix, output will be Nx(NFFT/2+1). Each row will be the magnitude spectrum of the corresponding frame.
"""
if numpy.shape(frames)[1] > NFFT:
logging.warn('frame length (%d) is greater than FFT size (%d), frame will be truncated. Increase NFFT to avoid.', numpy.shape(frames)[1], NFFT)
complex_spec = numpy.fft.rfft(frames,NFFT)
return numpy.absolute(complex_spec)
def powspec(frames,NFFT):
"""Compute the power spectrum of each frame in frames. If frames is an NxD matrix, output will be Nx(NFFT/2+1).
:param frames: the array of frames. Each row is a frame.
:param NFFT: the FFT length to use. If NFFT > frame_len, the frames are zero-padded.
:returns: If frames is an NxD matrix, output will be Nx(NFFT/2+1). Each row will be the power spectrum of the corresponding frame.
"""
return 1.0/NFFT * numpy.square(magspec(frames,NFFT))
def logpowspec(frames,NFFT,norm=1):
"""Compute the log power spectrum of each frame in frames. If frames is an NxD matrix, output will be Nx(NFFT/2+1).
:param frames: the array of frames. Each row is a frame.
:param NFFT: the FFT length to use. If NFFT > frame_len, the frames are zero-padded.
:param norm: If norm=1, the log power spectrum is normalised so that the max value (across all frames) is 1.
:returns: If frames is an NxD matrix, output will be Nx(NFFT/2+1). Each row will be the log power spectrum of the corresponding frame.
"""
ps = powspec(frames,NFFT);
ps[ps<=1e-30] = 1e-30
lps = 10*numpy.log10(ps)
if norm:
return lps - numpy.max(lps)
else:
return lps
def preemphasis(signal,coeff=0.95):
"""perform preemphasis on the input signal.
:param signal: The signal to filter.
:param coeff: The preemphasis coefficient. 0 is no filter, default is 0.95.
:returns: the filtered signal.
"""
return numpy.append(signal[0],signal[1:]-coeff*signal[:-1])
|
Python
| 0
|
@@ -4790,17 +4790,17 @@
mes) is
-1
+0
.%0A :r
|
8b34cf20b9cc010d321912433d772ccad8dbdb6f
|
Update MediaWiki version for i18n_family.py from trunk r8823
|
pywikibot/families/i18n_family.py
|
pywikibot/families/i18n_family.py
|
# -*- coding: utf-8 -*-
__version__ = '$Id$'
from pywikibot import family
# The Wikimedia i18n family
class Family(family.Family):
def __init__(self):
family.Family.__init__(self)
self.name = 'i18n'
self.langs = {
'i18n': 'translatewiki.net',
}
def version(self, code):
return "1.17alpha"
|
Python
| 0.000001
|
@@ -345,9 +345,9 @@
%221.1
-7
+8
alph
|
aeeed413830f58c60ab4b05beddf44ab4dba5e36
|
Update views.py
|
chat/views.py
|
chat/views.py
|
import random
import string
from django.db import transaction
from django.shortcuts import render, redirect
import haikunator
from .models import Room
def about(request):
return render(request, "chat/about.html")
def new_room(request):
"""
Randomly create a new room, and redirect to it.
"""
new_room = None
while not new_room:
with transaction.atomic():
label = haikunator.haikunate()
if Room.objects.filter(label=label).exists():
continue
new_room = Room.objects.create(label=label)
return redirect(chat_room, label=label)
def chat_room(request, label):
"""
Room view - show the room, with latest messages.
The template for this view has the WebSocket business to send and stream
messages, so see the template for where the magic happens.
"""
# If the room with the given label doesn't exist, automatically create it
# upon first visit (a la etherpad).
room, created = Room.objects.get_or_create(label=label)
# We want to show the last 50 messages, ordered most-recent-last
messages = (room.messages.order_by('-timestamp')[:1])
return render(request, "chat/room.html", {
'room': room,
'messages': messages,
})
|
Python
| 0
|
@@ -1153,16 +1153,17 @@
amp')%5B:1
+0
%5D)%0A%0A
|
ae996ab0bba11047d6317e18e54e926097b6759d
|
Add other job/mol settings when running jobs
|
chem/utils.py
|
chem/utils.py
|
import time
import bz2
import zipfile
import tarfile
import re
import collections
from models import ErrorReport
from chemtools import gjfwriter
from chemtools import fileparser
from chemtools.constants import KEYWORDS
from chemtools.ml import get_properties_from_decay_with_predictions, \
get_naive_feature_vector, \
get_decay_feature_vector, \
get_decay_distance_correction_feature_vector
from chemtools.mol_name import name_expansion, get_exact_name
from chemtools.interface import get_property_limits
from data.models import DataPoint
from cluster.interface import run_job, run_jobs
from project.utils import StringIO
def get_molecule_warnings(name):
try:
exact_spacers = get_exact_name(name, spacers=True)
error = None
except Exception as e:
exact_spacers = ''
error = str(e)
warn = ErrorReport.objects.filter(molecule=name)
warning = True if warn else None
exact_name = exact_spacers.replace('*', '')
new = not DataPoint.objects.filter(exact_name=exact_name).exists()
return exact_spacers, warning, error, new
def get_multi_molecule_warnings(string):
molecules = name_expansion(string)
new_molecules = collections.OrderedDict()
start = time.time()
for name in molecules:
if time.time() - start > 1:
raise ValueError("The operation has timed out.")
exact_spacer, warning, error, new = get_molecule_warnings(name)
if exact_spacer not in new_molecules:
new_molecules[exact_spacer] = [name, warning, error, new]
return zip(*new_molecules.values())
def get_molecule_info(molecule):
exactspacer, warning, error, new = get_molecule_warnings(molecule)
exactname = exactspacer.replace('*', '')
features = ['', '', '']
homo, lumo, gap = None, None, None
datapoint = None
if not error:
try:
features = [
get_naive_feature_vector(exactspacer),
get_decay_feature_vector(exactspacer),
get_decay_distance_correction_feature_vector(exactspacer),
]
homo, lumo, gap = get_properties_from_decay_with_predictions(
features[1]
)
except ValueError:
# multi core and other non-ML structures
pass
temp = DataPoint.objects.filter(exact_name=exactname,
band_gap__isnull=False).values()
if temp:
datapoint = temp[0]
limits = get_property_limits(molecule)
a = {
"molecule": molecule,
"exact_name": exactname,
"exact_name_spacers": exactspacer,
"features": features,
"datapoint": datapoint,
"homo": homo,
"lumo": lumo,
"band_gap": gap,
"new": new,
"known_errors": warning,
"error_message": error,
"limits": limits,
}
return a
def run_standard_jobs(credential, string, mol_settings, job_settings):
results = {
"worked": [],
"failed": [],
"error": None,
}
try:
results["cluster"] = credential.cluster.name
if not credential.user.is_staff:
results["error"] = "You must be a staff user to submit a job."
return results
except:
results["error"] = "Invalid credential"
results["cluster"] = None
return results
names = []
gjfs = []
for mol in name_expansion(string):
try:
out = gjfwriter.Benzobisazole(mol, **mol_settings)
names.append(mol)
gjfs.append(out.get_gjf())
except Exception as e:
results["failed"].append((mol, str(e)))
continue
if names:
temp = run_jobs(credential, names, gjfs, **job_settings)
results["worked"] = temp["worked"]
results["failed"].extend(temp["failed"])
results["error"] = temp["error"]
return results
def parse_file_list(files):
for f in files:
if f.name.endswith(".zip"):
with zipfile.ZipFile(f, "r") as zfile:
names = [x for x in zfile.namelist() if not x.endswith("/")]
for name in names:
newfile = StringIO(zfile.open(name).read(), name=name)
yield newfile
elif f.name.endswith(".tar.bz2") or f.name.endswith(".tar.gz"):
end = f.name.split(".")[-1]
with tarfile.open(fileobj=f, mode='r:' + end) as tfile:
for name in tfile.getnames():
if tfile.getmember(name).isfile():
newfile = StringIO(tfile.extractfile(name).read(),
name=name)
yield newfile
else:
yield f
def find_sets(files):
logs = []
datasets = []
for f in files:
if f.name.endswith(".log"):
logs.append(f)
else:
datasets.append(f)
logsets = {}
for f in logs:
nums = re.findall(r'n(\d+)', f.name)
if not nums:
continue
num = nums[-1]
name = f.name.replace(".log", '').replace("n%s" % num, '')
if name in logsets.keys():
logsets[name].append((num, f))
else:
logsets[name] = [(num, f)]
return logsets, datasets
def convert_logs(logsets):
converted = []
for key in logsets:
nvals = []
homovals = []
lumovals = []
gapvals = []
for num, log in logsets[key]:
parser = fileparser.Log(log)
nvals.append(num)
homovals.append(parser["HOMO"])
lumovals.append(parser["LUMO"])
gapvals.append(parser["BandGap"])
f = StringIO(name=key)
f.write(', '.join(nvals) + '\n')
f.write(', '.join(homovals) + '\n')
f.write(', '.join(lumovals) + '\n')
f.write(', '.join(gapvals) + '\n')
f.seek(0)
converted.append(f)
return converted
|
Python
| 0
|
@@ -3912,32 +3912,111 @@
%0A%0A if names:%0A
+ settings = %7Bk: v for k,v in job_settings.items()+mol_settings.items()%7D%0A
temp = r
@@ -4050,20 +4050,16 @@
gjfs, **
-job_
settings
|
ba326b959a93b860da2b84ddc09250913239e6d0
|
reduce show query precedence
|
chrisjones.py
|
chrisjones.py
|
"""
This module is the brain of the Chris Jones Bot. It handles queries and constructs responses
"""
import requests
import json
import elastic
from elastic import ES_URL
from google_nlp_api import GoogleNlp
import re
from QueryAnalyzer import QueryAnalyzer
import random
from fuzzywuzzy import process
from default_query import DefaultQuery
from default_query import PersonThoughtsQuery
from theater_query import TheaterQuery
from sentiment_query import SentimentQuery
import pdb
class ChrisJones:
"""
This is the main class of the Chris Jones Bot
"""
def __init__(self):
self.query_analyzer = QueryAnalyzer()
self.sentiment_selector = SentimentQuery()
print 'ChrisJones activated'
def respond(self, query):
"""
Main wrapper function for Chris Jones bot. Takes a query and returns a response.
args:
query (string): A text query from the user
return:
response (string): A string (possibly with markdown formatting)
"""
# Get query annotations
annotated_query = self.query_analyzer.annotate(query)
print annotated_query.get_framework()
# TODO - change uses of `query` to annotated_query.query to cut back on the number of arguments we have to pass around
# Route to correct query handler
if (query == 'how do you like your comedy'):
return DefaultQuery().generate_response(query, annotated_query)
elif len(annotated_query.genres) > 0:
# Genre query handler
### 'how do you like your GENRE',
# Write better handler when we get more genre questions
print 'Genre Query'
return DefaultQuery().generate_response(query, annotated_query)
elif len(annotated_query.theaters) > 0:
# Theater-related questions
print 'Theater Query'
router = {
'what was your favorite show at THEATER': lambda x,y: TheaterQuery().generate_response(x, y),
'How has THEATER changed over time': lambda x,y: TheaterQuery().generate_response(x, y),
'I want to go to THEATER. Do you think it is good': lambda x,y: TheaterQuery().generate_response(x, y)
}
# Find the closest question type and use it to access handler
return self.call_handler(router, query, annotated_query)
elif len(annotated_query.shows) > 0:
# Show related question types
print 'Show Query'
router = {
'what did you think of SHOW': lambda x,y: DefaultQuery().generate_response(x, y),
'what do you think is the best SHOW right now': lambda x,y: DefaultQuery().generate_response(x, y),
'what do you think of NOUN in SHOW': lambda x,y: DefaultQuery().generate_response(x, y)
}
# Find the closest question type and use it to access handler
return self.call_handler(router, query, annotated_query)
elif len(annotated_query.people) > 0:
# People-related questions
print 'People Query'
router = {
'what was PERSON best performance': lambda x,y: PersonThoughtsQuery().generate_response_best_performance(x, y),
'do you think PERSON is a good NOUN': lambda x,y: PersonThoughtsQuery().generate_response_good_noun(x, y),
'what do you think of PERSON': lambda x,y: PersonThoughtsQuery().generate_response(x, y)
}
# Find the closest question type and use it to access handler
return self.call_handler(router, query, annotated_query)
elif any(re.search(i, query) != None for i in ['like', 'dislike', 'love', 'hate']):
# TODO - Determine a more satisfying way to kick off this handler, perhaps it should just be more specific
# Sentiment Aggregation query handler
print 'Sentiment Query'
router = {('do you ' + d + ' ' + t):\
(lambda x, y: self.sentiment_selector.generate_response(x, y)) \
for t in ['THEATER', 'SHOW', 'GENRE', 'PERSON'] \
for d in ['like', 'dislike', 'hate', 'love']}
return self.call_handler(router, query, annotated_query)
elif any(re.search(i, query) != None for i in ['Chicago', 'chicago', 'New York', 'NYC']):
# Location and/or Chicago-based questions
print 'Location/Chicago Query'
router = {
'what embodies the essence of chicago theater': lambda x,y: DefaultQuery().generate_response(x, y),
'how is chicago different from New York?': lambda x,y: DefaultQuery().generate_response(x, y)
}
# Find the closest question type and use it to access handler
return self.call_handler(router, query, annotated_query)
else:
print 'Default Query'
### What do you think is good NOUN
return DefaultQuery().generate_response(query, annotated_query)
def call_handler(self, router, query, annotated_query):
"""
Helper function to use with query router dictionary switch statements
args:
router (dictionary): a dictionary with question frameworks as keys and implementations as values
query (string): a string with the user's query
annotated_query (AnnotatedQuery): an AnnotatedQuery corresponding to the user's query
return:
response (string): a response string, perhaps with markdown formatting
"""
question_type = process.extractOne(annotated_query.get_framework(), router.keys())[0]
return router[question_type](query, annotated_query)
if __name__ == '__main__':
# Work on query routing now
cj = ChrisJones()
query = 'How has the Goodman Theatre changed over time'
qa = QueryAnalyzer()
annotated_query = qa.get_keywords(query)
print cj.route_query(query, annotated_query)
|
Python
| 0.999482
|
@@ -2396,624 +2396,8 @@
y)%0A%0A
- elif len(annotated_query.shows) %3E 0:%0A # Show related question types%0A print 'Show Query'%0A router = %7B%0A 'what did you think of SHOW': lambda x,y: DefaultQuery().generate_response(x, y),%0A 'what do you think is the best SHOW right now': lambda x,y: DefaultQuery().generate_response(x, y),%0A 'what do you think of NOUN in SHOW': lambda x,y: DefaultQuery().generate_response(x, y)%0A %7D%0A # Find the closest question type and use it to access handler%0A return self.call_handler(router, query, annotated_query)%0A
%0A%0A
@@ -3674,32 +3674,649 @@
notated_query)%0A%0A
+ elif len(annotated_query.shows) %3E 0:%0A # Show related question types%0A print 'Show Query'%0A router = %7B%0A 'what did you think of SHOW': lambda x,y: DefaultQuery().generate_response(x, y),%0A 'what do you think is the best SHOW right now': lambda x,y: DefaultQuery().generate_response(x, y),%0A 'what do you think of NOUN in SHOW': lambda x,y: DefaultQuery().generate_response(x, y)%0A %7D%0A # Find the closest question type and use it to access handler%0A return self.call_handler(router, query, annotated_query)%0A%0A
elif any
|
91e745e83cb6cfd60bc0a90cb6203d9dd07d28de
|
Implement simple file size listing
|
cloud-fuse.py
|
cloud-fuse.py
|
#
# @file cloud-fues.py
#
# @brief Main entrypoint into the cloud-fuse software.
#
from __future__ import print_function, absolute_import, division
import logging
import math
import sqlite3
import os
import md5
from errno import ENOENT
from stat import S_IFDIR, S_IFREG
from sys import argv, exit
from time import time
from sqlalchemy import Column, String, Integer, ForeignKey, create_engine
from sqlalchemy.orm import relationship, backref, sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from fuse import FUSE, FuseOSError, Operations, LoggingMixIn, fuse_get_context
# Base from sqlalchemy orm so that we can derive classes from it.
Base = declarative_base()
# Holds information about specific files. Soon to be replaced with a more inode-like system.
class File(Base):
__tablename__ = 'files'
id = Column(Integer, primary_key=True)
path = Column(String)
name = Column(String)
permissions = Column(Integer)
size = Column(Integer)
# Main class passed to fuse - this is where we define the functions that are called by fuse.
class Context(LoggingMixIn, Operations):
# Remove the first character ('/') from path.
#
# @FIXME: Should check that the first character is actually / so that if it is called twice on the same string it does not take two characters off the front.
def preparePath(self, path):
return path[1:]
def listOfFileNames(self):
knownFiles = []
for file in session.query(File).order_by(File.id):
knownFiles.append(file.name)
return knownFiles
def getBlockRoot(self, path):
md5Instance = md5.new()
md5Instance.update(path)
return 'data/files/{}/blocks/'.format(md5Instance.hexdigest())
def listBlocks(self, path):
blockRoot = self.getBlockRoot(path)
blocks = os.listdir(blockRoot)
return len(blocks)
def addFile(self, path):
newFile = File(path=path, name=path, permissions=777, size=0)
session.add(newFile)
session.commit()
return newFile.id
def fileExists(self, path):
print("Checking if {} exists".format(self.preparePath(path)))
fileCountQuery = session.query(File).filter_by(path=self.preparePath(path))
fileCount = fileCountQuery.count()
print("Database query returned {}".format(fileCount))
if fileCount == 0:
print("File Does Not Exist")
return False
elif fileCount == 1:
print("File Exists")
return True
else:
print("Something unexpected happened - we should not have more than one file")
# Return true to stop another duplicate file being added
return True
def getFile(self, path):
return session.query(File).order_by(File.id).filter(File.path == self.preparePath(path)).one()
def removexattr(self, att1, att2):
return 0
def getattr(self, path, fh=None):
uid, gid, pid = fuse_get_context()
if self.preparePath(path) in self.listOfFileNames():
attr = dict(st_mode=(S_IFREG | 0o755), st_nlink=2)
elif path == '/':
attr = dict(st_mode=(S_IFDIR | 0o755), st_nlink=2)
else:
raise FuseOSError(ENOENT)
attr['st_ctime'] = attr['st_mtime'] = time()
return attr
def truncate(self, path, length, fh=None):
blockPath = self.getBlockRoot(path)
print("Deleting all files in: {}".format(blockPath))
for f in os.listdir(blockPath):
os.remove(blockPath+f)
def read(self, path, size, offset, fh):
if not self.preparePath(path) in self.listOfFileNames():
raise RuntimeError('unexpected path: %r' % path)
offsetFromFirstBlock=offset%512
firstBlock=int(math.ceil(offset/512))
numberOfBlocks=int(math.ceil((offsetFromFirstBlock+size)/512))
if numberOfBlocks > self.listBlocks(path) :
numberOfBlocks = self.listBlocks(path)
if offset == 0:
firstBlock = 1
for i in range(firstBlock, firstBlock+numberOfBlocks):
if(i == firstBlock):
bytesToRead=512-offsetFromFirstBlock
offsetForBlock=offsetFromFirstBlock
elif(i == firstBlock+numberOfBlocks):
bytesToRead=512-(512-offsetFromFirstBlock)
offsetForBlock=0
else:
bytesToRead=512
offsetForBlock=0
print("Would read {} bytes from block #{} at offset {}".format(bytesToRead, i, offsetForBlock))
blockPath = self.getBlockRoot(path)
print("Reading {} bytes from {} at offset {}".format(bytesToRead, self.getBlockRoot(path)+str(i), offsetForBlock))
f = open(self.getBlockRoot(path)+str(i), 'r')
f.seek(offsetForBlock)
blockContentsFromOffset = f.read(bytesToRead)
print("Would return: {}".format(blockContentsFromOffset))
return blockContentsFromOffset
def readdir(self, path, fh):
return ['.', '..'] + self.listOfFileNames()
def mkdir(self, path, mode):
print("do nothing")
def create(self, path, mode):
print("Create called")
if not self.fileExists(path):
self.addFile(path[1:])
return self.getFile(path[1:]).id
return os.EEXIST
def open(self, path, flags):
# NOT a real fd - but will do for simple testing
return self.getFile(path).id
def write(self, path, data, offset, fh):
blockPath = self.getBlockRoot(path)
if not os.path.exists(blockPath):
os.makedirs(blockPath)
blockSize = 512
firstBlock = int(math.ceil(offset/512))
firstBlockOffset = int(offset%512)
if offset == 0:
firstBlock = 1
print("Writing data {} of size {} to block {} at offset {}".format(data, len(data), firstBlock, firstBlockOffset))
f = os.open(blockPath+str(firstBlock), os.O_CREAT | os.O_WRONLY)
with os.fdopen(f, 'w') as file_obj:
file_obj.seek(firstBlockOffset)
file_obj.write(data)
return len(data)
if __name__ == '__main__':
if len(argv) != 2:
print('usage: %s <mountpoint>' % argv[0])
exit(1)
logging.basicConfig(level=logging.DEBUG)
engine = create_engine('sqlite:///', echo=True)
sessionMaker = sessionmaker()
sessionMaker.configure(bind=engine)
Base.metadata.create_all(engine)
session = sessionMaker()
fuse = FUSE(Context(), argv[1], ro=False, foreground=True, nothreads=True)
|
Python
| 0.000001
|
@@ -1983,16 +1983,247 @@
locks)%0A%0A
+ def getSizeOfFile(self, path):%0A totalSize = 0%0A blockRoot = self.getBlockRoot(path)%0A%0A for block in os.listdir(blockRoot):%0A totalSize += os.path.getsize(blockRoot+block)%0A%0A return totalSize%0A%0A
def
@@ -3452,32 +3452,66 @@
755), st_nlink=2
+, st_size=self.getSizeOfFile(path)
)%0A elif p
|
f66860c2015bb684d3789f3ce7a996e05521960b
|
fix clock pin constraint
|
litex_boards/platforms/zybo_z7.py
|
litex_boards/platforms/zybo_z7.py
|
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2019-2020 Florent Kermarrec <florent@enjoy-digital.fr>
# SPDX-License-Identifier: BSD-2-Clause
from litex.build.generic_platform import *
from litex.build.xilinx import XilinxPlatform, VivadoProgrammer
# IOs ----------------------------------------------------------------------------------------------
_io = [
# Clk / Rst
("clk125", 0, Pins("L16"), IOStandard("LVCMOS33")),
# Leds
("user_led", 0, Pins("M14"), IOStandard("LVCMOS33")),
("user_led", 1, Pins("M15"), IOStandard("LVCMOS33")),
("user_led", 2, Pins("G14"), IOStandard("LVCMOS33")),
("user_led", 3, Pins("D18"), IOStandard("LVCMOS33")),
# Switches
("user_sw", 0, Pins("G15"), IOStandard("LVCMOS33")),
("user_sw", 1, Pins("P15"), IOStandard("LVCMOS33")),
("user_sw", 2, Pins("W13"), IOStandard("LVCMOS33")),
("user_sw", 3, Pins("T16"), IOStandard("LVCMOS33")),
# Buttons
("user_btn", 0, Pins("R18"), IOStandard("LVCMOS33")),
("user_btn", 1, Pins("P16"), IOStandard("LVCMOS33")),
("user_btn", 2, Pins("V16"), IOStandard("LVCMOS33")),
("user_btn", 3, Pins("Y16"), IOStandard("LVCMOS33")),
# Serial
("serial", 0,
Subsignal("tx", Pins("T17")),
Subsignal("rx", Pins("Y17")),
IOStandard("LVCMOS33")
),
]
_ps7_io = [
# PS7
("ps7_clk", 0, Pins(1)),
("ps7_porb", 0, Pins(1)),
("ps7_srstb", 0, Pins(1)),
("ps7_mio", 0, Pins(54)),
("ps7_ddram", 0,
Subsignal("addr", Pins(15)),
Subsignal("ba", Pins(3)),
Subsignal("cas_n", Pins(1)),
Subsignal("ck_n", Pins(1)),
Subsignal("ck_p", Pins(1)),
Subsignal("cke", Pins(1)),
Subsignal("cs_n", Pins(1)),
Subsignal("dm", Pins(4)),
Subsignal("dq", Pins(32)),
Subsignal("dqs_n", Pins(4)),
Subsignal("dqs_p", Pins(4)),
Subsignal("odt", Pins(1)),
Subsignal("ras_n", Pins(1)),
Subsignal("reset_n", Pins(1)),
Subsignal("we_n", Pins(1)),
Subsignal("vrn", Pins(1)),
Subsignal("vrp", Pins(1)),
),
]
_usb_uart_pmod_io = [
# USB-UART PMOD on JB:
# - https://store.digilentinc.com/pmod-usbuart-usb-to-uart-interface/
("usb_uart", 0,
Subsignal("tx", Pins("pmodb:1")),
Subsignal("rx", Pins("pmodb:2")),
IOStandard("LVCMOS33")
),
]
# Connectors ---------------------------------------------------------------------------------------
_connectors = [
("pmoda", "N15 L14 K16 K14 N16 L15 J16 J14"), # XADC
("pmodb", "T20 U20 V20 W20 Y18 Y19 W18 W19"),
("pmodc", "V15 W15 T11 T10 W14 Y14 T12 U12"),
("pmodd", "T14 T15 P14 R14 U14 U15 V17 V18"),
("pmode", "V12 W16 J15 H15 V13 U17 T17 Y17"),
]
# Platform -----------------------------------------------------------------------------------------
class Platform(XilinxPlatform):
default_clk_name = "clk125"
default_clk_period = 1e9/125e6
def __init__(self):
XilinxPlatform.__init__(self, "xc7z010-clg400-1", _io, _connectors, toolchain="vivado")
self.add_extension(_ps7_io)
self.add_extension(_usb_uart_pmod_io)
def create_programmer(self):
return VivadoProgrammer()
def do_finalize(self, fragment):
XilinxPlatform.do_finalize(self, fragment)
self.add_period_constraint(self.lookup_request("clk125", loose=True), 1e9/125e6)
|
Python
| 0
|
@@ -408,11 +408,11 @@
ns(%22
-L16
+K17
%22),
|
4a5d5318d7f965d520b183ea489e64573444d4b8
|
fix csr & reset region
|
litex_boards/targets/redpitaya.py
|
litex_boards/targets/redpitaya.py
|
#!/usr/bin/env python3
#
# This file is part of LiteX-Boards.
#
# Copyright (c) 2020 Gwenhael Goavec-Merou <gwenhael.goavec-merou@trabucayre.com>
# SPDX-License-Identifier: BSD-2-Clause
import os
from migen import *
from litex_boards.platforms import redpitaya
from litex.build.xilinx.vivado import vivado_build_args, vivado_build_argdict
from litex.soc.interconnect import axi
from litex.soc.interconnect import wishbone
from litex.soc.cores.clock import *
from litex.soc.integration.soc_core import *
from litex.soc.integration.builder import *
from litex.soc.cores.led import LedChaser
# CRG ----------------------------------------------------------------------------------------------
class _CRG(Module):
def __init__(self, platform, sys_clk_freq, use_ps7_clk=False):
self.rst = Signal()
self.clock_domains.cd_sys = ClockDomain()
# # #
if use_ps7_clk:
assert sys_clk_freq == 125e6
self.comb += ClockSignal("sys").eq(ClockSignal("ps7"))
self.comb += ResetSignal("sys").eq(ResetSignal("ps7") | self.rst)
else:
self.submodules.pll = pll = S7PLL(speedgrade=-1)
self.comb += pll.reset.eq(self.rst)
pll.register_clkin(platform.request(platform.default_clk_name), platform.default_clk_freq)
pll.create_clkout(self.cd_sys, sys_clk_freq)
platform.add_false_path_constraints(self.cd_sys.clk, pll.clkin) # Ignore sys_clk to pll.clkin path created by SoC's rst.
# BaseSoC ------------------------------------------------------------------------------------------
class BaseSoC(SoCCore):
def __init__(self, board, sys_clk_freq=int(100e6), with_led_chaser=True, **kwargs):
platform = redpitaya.Platform(board)
# CRG --------------------------------------------------------------------------------------
use_ps7_clk = (kwargs.get("cpu_type", None) == "zynq7000")
sys_clk_freq = 125e6 if use_ps7_clk else sys_clk_freq
self.submodules.crg = _CRG(platform, sys_clk_freq, use_ps7_clk)
# SoCCore ----------------------------------------------------------------------------------
if kwargs["uart_name"] == "serial":
kwargs["uart_name"] = "usb_uart"
SoCCore.__init__(self, platform, sys_clk_freq, ident="LiteX SoC on Zebboard", **kwargs)
# Zynq7000 Integration ---------------------------------------------------------------------
if kwargs.get("cpu_type", None) == "zynq7000":
# Get and set the pre-generated .xci FIXME: change location? add it to the repository?
os.system("wget https://kmf2.trabucayre.com/redpitaya_ps7.txt")
os.makedirs("xci", exist_ok=True)
os.system("cp redpitaya_ps7.txt xci/redpitaya_ps7.xci")
self.cpu.set_ps7_xci("xci/redpitaya_ps7.xci")
# Connect AXI GP0 to the SoC with base address of 0x43c00000 (default one)
wb_gp0 = wishbone.Interface()
self.submodules += axi.AXI2Wishbone(
axi = self.cpu.add_axi_gp_master(),
wishbone = wb_gp0,
base_address = 0x43c00000)
self.bus.add_master(master=wb_gp0)
# Leds -------------------------------------------------------------------------------------
if with_led_chaser:
self.submodules.leds = LedChaser(
pads = platform.request_all("user_led"),
sys_clk_freq = sys_clk_freq)
# Build --------------------------------------------------------------------------------------------
def main():
from litex.soc.integration.soc import LiteXSoCArgumentParser
parser = LiteXSoCArgumentParser(description="LiteX SoC on Zedboard")
target_group = parser.add_argument_group(title="Target options")
target_group.add_argument("--build", action="store_true", help="Build design.")
target_group.add_argument("--load", action="store_true", help="Load bitstream.")
target_group.add_argument("--sys-clk-freq", default=100e6, help="System clock frequency.")
target_group.add_argument("--board", default="redpitaya14", help="Board type (redpitaya14 or redpitaya16).")
builder_args(parser)
soc_core_args(parser)
vivado_build_args(parser)
args = parser.parse_args()
soc = BaseSoC(
board = args.board,
sys_clk_freq = int(float(args.sys_clk_freq)),
**soc_core_argdict(args)
)
builder = Builder(soc, **builder_argdict(args))
if args.build:
builder.build(**vivado_build_argdict(args))
if args.load:
prog = soc.platform.create_programmer()
prog.load_bitstream(builder.get_bitstream_filename(mode="sram"), device=1)
if __name__ == "__main__":
main()
|
Python
| 0
|
@@ -494,32 +494,80 @@
c_core import *%0A
+from litex.soc.integration.soc import SoCRegion%0A
from litex.soc.i
@@ -2308,16 +2308,268 @@
b_uart%22%0A
+ if kwargs.get(%22cpu_type%22, None) == %22zynq7000%22:%0A kwargs%5B%22integrated_sram_size%22%5D = 0%0A kwargs%5B%22with_uart%22%5D = False%0A self.mem_map = %7B%0A 'csr': 0x43c0_0000, # Zynq GP0 default%0A %7D%0A
@@ -3529,16 +3529,116 @@
=wb_gp0)
+%0A self.bus.add_region(%22flash%22, SoCRegion(origin=0xFC00_0000, size=0x4_0000, mode=%22rwx%22))
%0A%0A
|
97f4e9e40fb67ef9e57321512a533772917cb52b
|
Version 0.6.1
|
src/pymatching/_version.py
|
src/pymatching/_version.py
|
# Copyright 2020 Oscar Higgott
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "0.6.0"
|
Python
| 0
|
@@ -588,11 +588,11 @@
= %220.6.
-0
+1
%22%0A
|
156fa00da531e575297bea402d0269f85c259b7e
|
use Cholesky decomposition for orthogonal projection
|
src/pymor/algorithms/ei.py
|
src/pymor/algorithms/ei.py
|
# This file is part of the pyMor project (http://www.pymor.org).
# Copyright Holders: Felix Albrecht, Rene Milk, Stephan Rave
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
from __future__ import absolute_import, division, print_function
from numbers import Number
import math as m
import numpy as np
from scipy.linalg import solve_triangular
from pymor.core import getLogger, BasicInterface
from pymor.core.cache import Cachable, cached, DEFAULT_DISK_CONFIG
from pymor.la import VectorArrayInterface
from pymor.operators.ei import EmpiricalInterpolatedOperator
def ei_greedy(evaluations, error_norm=None, target_error=None, max_interpolation_dofs=None,
projection='orthogonal', product=None):
assert projection in ('orthogonal', 'ei')
assert isinstance(evaluations, VectorArrayInterface) or all(isinstance(ev, VectorArrayInterface) for ev in evaluations)
if isinstance(evaluations, VectorArrayInterface):
evaluations = (evaluations,)
logger = getLogger('pymor.algorithms.ei.ei_greedy')
logger.info('Generating Interpolation Data ...')
interpolation_dofs = np.zeros((0,), dtype=np.int32)
interpolation_matrix = np.zeros((0,0))
collateral_basis = type(next(iter(evaluations))).empty(dim=next(iter(evaluations)).dim)
gramian_inverse = None
max_errs = []
def interpolate(U, ind=None):
coefficients = solve_triangular(interpolation_matrix, U.components(interpolation_dofs, ind=ind).T,
lower=True, unit_diagonal=True).T
# coefficients = np.linalg.solve(interpolation_matrix, U.components(interpolation_dofs, ind=ind).T).T
return collateral_basis.lincomb(coefficients)
# compute the maximum projection error and error vector for the current interpolation data
def projection_error():
max_err = -1.
# precompute gramian_inverse if needed
if projection == 'orthogonal' and len(interpolation_dofs) > 0:
if product is None:
gramian = collateral_basis.gramian()
else:
gramian = product.apply2(collateral_basis, collateral_basis, pairwise=False)
gramian_inverse = np.linalg.inv(gramian)
for AU in evaluations:
if len(interpolation_dofs) > 0:
if projection == 'ei':
AU_interpolated = interpolate(AU)
ERR = AU - AU_interpolated
else:
if product is None:
coefficients = gramian_inverse.dot(collateral_basis.dot(AU, pairwise=False)).T
else:
gramian = product
coefficients = gramian_inverse.dot(product.apply2(collateral_basis, AU, pairwise=False)).T
AU_projected = collateral_basis.lincomb(coefficients)
ERR = AU - AU_projected
else:
ERR = AU
errs = ERR.l2_norm() if error_norm is None else error_norm(ERR)
local_max_err_ind = np.argmax(errs)
local_max_err = errs[local_max_err_ind]
if local_max_err > max_err:
max_err = local_max_err
if len(interpolation_dofs) == 0 or projection == 'ei':
new_vec = ERR.copy(ind=local_max_err_ind)
else:
new_vec = AU.copy(ind=local_max_err_ind)
new_vec -= interpolate(AU, ind=local_max_err_ind)
return max_err, new_vec
# main loop
while True:
max_err, new_vec = projection_error()
logger.info('Maximum interpolation error with {} interpolation DOFs: {}'.format(len(interpolation_dofs),
max_err))
if target_error is not None and max_err <= target_error:
logger.info('Target error reached! Stopping extension loop.')
break
# compute new interpolation dof and collateral basis vector
new_dof = new_vec.amax()[0]
if new_dof in interpolation_dofs:
logger.info('DOF {} selected twice for interplation! Stopping extension loop.'.format(new_dof))
break
new_vec *= 1 / new_vec.components([new_dof])[0]
interpolation_dofs = np.hstack((interpolation_dofs, new_dof))
collateral_basis.append(new_vec, remove_from_other=True)
interpolation_matrix = collateral_basis.components(interpolation_dofs).T
max_errs.append(max_err)
triangularity_error = np.max(np.abs(interpolation_matrix - np.tril(interpolation_matrix)))
logger.info('Interpolation matrix is not lower triangular with maximum error of {}'
.format(triangularity_error))
if len(interpolation_dofs) >= max_interpolation_dofs:
logger.info('Maximum number of interpolation DOFs reached. Stopping extension loop.')
max_err, _ = projection_error()
logger.info('Final maximum interpolation error with {} interpolation DOFs: {}'.format(
len(interpolation_dofs), max_err))
break
logger.info('')
data = {'errors': max_errs}
return interpolation_dofs, collateral_basis, data
def interpolate_operators(discretization, operator_name, parameter_sample, error_norm=None,
target_error=None, max_interpolation_dofs=None,
projection='orthogonal', product=None):
# This class provides cached evaulations of the operator on the solutions.
# Should be replaced by something simpler in the future.
class EvaluationProvider(BasicInterface, Cachable):
# the following hack is currently necessary to prevent a deadlock in the cache backend ...
from tempfile import gettempdir
from os.path import join
DEFAULT_MEMORY_CONFIG = {"backend": 'LimitedMemory', 'arguments.max_kbytes': 20000}
DISK_CONFIG = {"backend": 'LimitedFile',
"arguments.filename": join(gettempdir(), 'pymor.ei_cache.dbm'),
'arguments.max_keys': 2000}
def __init__(self, discretization, operator, sample):
Cachable.__init__(self, config=self.DEFAULT_MEMORY_CONFIG)
self.discretization = discretization
self.sample = sample
self.operator = operator
@cached
def data(self, k):
mu = self.sample[k]
return self.operator.apply(self.discretization.solve(mu), mu=mu)
def __len__(self):
return len(self.sample)
def __getitem__(self, ind):
if not 0 <= ind < len(self.sample):
raise IndexError
return self.data(ind)
sample = tuple(parameter_sample)
operator = discretization.operators[operator_name]
evaluations = EvaluationProvider(discretization, operator, sample)
dofs, basis, data = ei_greedy(evaluations, error_norm, target_error, max_interpolation_dofs,
projection=projection, product=product)
ei_operator = EmpiricalInterpolatedOperator(operator, dofs, basis)
ei_operators = discretization.operators.copy()
ei_operators[operator_name] = ei_operator
ei_discretization = discretization.with_(operators=ei_operators, name='{}_ei'.format(discretization.name))
data.update({'dofs': dofs, 'basis': basis})
return ei_discretization, data
|
Python
| 0
|
@@ -369,16 +369,39 @@
iangular
+, cho_factor, cho_solve
%0A%0Afrom p
@@ -2235,39 +2235,55 @@
ian_
-inverse = np.linalg.inv(gramian
+cholesky = cho_factor(gramian, overwrite_a=True
)%0A%0A
@@ -2598,36 +2598,44 @@
ients =
-gramian_inverse.dot(
+cho_solve(gramian_cholesky,
collater
@@ -2781,28 +2781,36 @@
s =
-gramian_inverse.dot(
+cho_solve(gramian_cholesky,
prod
|
3da923e052fc28d71c101f8990d413ba59d4bae0
|
Fix media template
|
src/redmill/views/media.py
|
src/redmill/views/media.py
|
# This file is part of Redmill.
#
# Redmill is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Redmill is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with Redmill. If not, see <http://www.gnu.org/licenses/>.
import base64
import json
import os
import flask
import flask.json
from .. import database, models
from . import authenticate, jsonify, request_wants_json
def get(id_):
session = database.Session()
media = session.query(models.Media).get(id_)
if media is None:
flask.abort(404)
else:
if request_wants_json():
return jsonify(media)
else:
filename = os.path.join(
flask.current_app.config["media_directory"],
"{}".format(media.id))
size = os.path.getsize(filename)
prefixes = iter(["", "k", "M", "G", "T", "P", "E", "Z"])
while size >= 1024:
size /= 1024.
prefixes.next()
size = "{} {}B".format(int(size), prefixes.next())
parameters = {
"path": media.album.parents+[media.album, media],
"media": media, "size": size
}
return flask.render_template("media.html", **parameters)
@authenticate()
def post():
session = database.Session()
try:
data = json.loads(flask.request.data)
except:
flask.abort(400)
fields = ["title", "author", "content", "album_id"]
if any(field not in data for field in fields):
flask.abort(400)
content = base64.b64decode(data["content"])
if session.query(models.Album).get(data["album_id"]) is None:
flask.abort(404)
arguments = {
"title": data["title"],
"author": data["author"],
"album_id": data["album_id"],
}
if "keywords" in data:
arguments["keywords"] = data["keywords"]
if "filename" in data:
arguments["filename"] = data["filename"]
else:
arguments["filename"] = database.get_filesystem_path(
data["title"], content)
try:
media = models.Media(**arguments)
session.add(media)
session.commit()
filename = os.path.join(flask.current_app.config["media_directory"], "{}".format(media.id))
with open(filename, "wb") as fd:
fd.write(content)
except Exception as e:
session.rollback()
flask.abort(500, e)
view = __name__.split(".")[-1]
endpoint = "{}.get".format(view)
location = flask.url_for(endpoint, id_=media.id, _method="GET")
return flask.json.dumps(media), 201, { "Location": location }
@authenticate()
def put(id_):
return _update(id_)
@authenticate()
def patch(id_):
return _update(id_)
@authenticate()
def delete(id_):
session = database.Session()
value = session.query(models.Media).get(id_)
if value is None:
flask.abort(404)
else:
try:
filename = os.path.join(
flask.current_app.config["media_directory"],
"{}".format(value.id))
os.remove(filename)
session.delete(value)
except Exception as e:
session.rollback()
flask.abort(500, e)
session.commit()
return "", 204 # No content
def _update(id_):
fields = ["title", "author", "keywords", "filename", "album_id"]
try:
data = json.loads(flask.request.data)
except:
flask.abort(400)
session = database.Session()
item = session.query(models.Media).get(id_)
if item is None:
flask.abort(404)
for field in data:
if field not in fields:
flask.abort(400)
if flask.request.method == "PUT":
if set(data.keys()) != set(fields):
flask.abort(400)
for field, value in data.items():
setattr(item, field, value)
session.commit()
return flask.json.dumps(item)
|
Python
| 0.000002
|
@@ -1194,24 +1194,69 @@
media.id))%0A%0A
+ if os.path.isfile(filename):%0A
@@ -1280,32 +1280,36 @@
tsize(filename)%0A
+
pref
@@ -1373,16 +1373,20 @@
+
+
while si
@@ -1409,24 +1409,28 @@
+
size /= 1024
@@ -1439,32 +1439,36 @@
+
+
prefixes.next()%0A
@@ -1464,24 +1464,28 @@
xes.next()%0A%0A
+
@@ -1534,16 +1534,66 @@
.next())
+%0A else:%0A size = %22(none)%22
%0A%0A
|
9e6e2fd1903b8e4deb3b6737d86aadc2627cb4eb
|
add the `__all__` to `_compat.py`.
|
flask_storage/_compat.py
|
flask_storage/_compat.py
|
import sys
try:
from urlparse import urljoin
import urllib2 as http
except ImportError:
from urllib.parse import urljoin
from urllib import request as http
if sys.version_info[0] == 3:
string_type = str
else:
string_type = unicode
def to_bytes(text):
if isinstance(text, string_type):
text = text.encode('utf-8')
return text
|
Python
| 0
|
@@ -251,16 +251,75 @@
icode%0A%0A%0A
+__all__ = %5B'urljoin', 'http', 'string_type', 'to_bytes'%5D%0A%0A%0A
def to_b
|
b9873d93c047e0d51986d74251c0378033c425ae
|
print done when finished generating chunks
|
runapp.py
|
runapp.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Runs the development server of the downstream_node app.
# Not for production use.
import argparse
import csv
import time
from flask import Flask, jsonify
from werkzeug.serving import run_simple
from werkzeug.wsgi import DispatcherMiddleware
from datetime import datetime, timedelta
from sqlalchemy import select, engine, update, insert, bindparam, true, func
from downstream_node.startup import app, db
from downstream_node.models import Contract, Address, Token, File, Chunk
from downstream_node import node
from downstream_node.utils import MonopolyDistribution, Distribution
def initdb():
db.create_all()
def cleandb():
# delete expired contracts and files
s = Contract.__table__.delete().where(Contract.cached == true())
db.engine.execute(s)
# and delete unreferenced files
s = File.__table__.delete().where(~File.__table__.c.id.in_(select([Contract.__table__.c.file_id])))
db.engine.execute(s)
def get_available_sizes():
available_sizes_stmt = select([File.__table__.c.size]).select_from(Chunk.__table__.join(File.__table__))
available_sizes_result = db.engine.execute(available_sizes_stmt).fetchall()
available_sizes = [a[0] for a in available_sizes_result]
return available_sizes
def maintain_capacity(min_chunk_size, max_chunk_size, size, base=2):
# maintains a certain size of available chunks
while(1):
available_sizes = get_available_sizes()
available_dist = Distribution(from_list=available_sizes)
# print('Sizes already available: {0}'.format(available_dist))
# print('Total size available: {0}'.format(available_dist.get_total()))
dist = MonopolyDistribution(min_chunk_size, max_chunk_size, size, base)
# print('Desired distribution: {0}'.format(dist))
missing = dist.subtract(available_dist)
# print('Missing: {0}'.format(missing))
missing_list = missing.get_list()
if (len(missing_list) > 0):
print('Generating chunks: {0}'.format(missing_list))
for chunk_size in sorted(missing_list, reverse=True):
generate_chunks(chunk_size)
time.sleep(2)
def generate_chunks(size, number=1):
# generates a test chunk
for i in range(0,number):
node.generate_test_file(size)
def updatewhitelist(path):
with open(path,'r') as f:
r = csv.reader(f)
next(r)
updated=list()
for l in r:
s = Address.__table__.select().where(Address.address == l[0])
result = db.engine.execute(s).first()
if (result is not None):
db.engine.execute(Address.__table__.update().\
where(Address.id == result.id).\
values(crowdsale_balance=int(l[1])))
else:
db.engine.execute(Address.__table__.insert().\
values(address=l[0], crowdsale_balance=l[1]))
result = db.engine.execute(Address.__table__.select().\
where(Address.address == l[0])).first()
updated.append(result.id)
all = db.engine.execute(Address.__table__.select()).fetchall()
for row in all:
if (row.id not in updated):
# also recursively delete all tokens associated with that address
tbd_tokens = db.engine.execute(Token.__table__.select().\
where(Token.address_id == row.id)).fetchall()
for t in tbd_tokens:
# and all contracts associated with that address
db.engine.execute(Contract.__table__.delete().\
where(Contract.token_id == t.id))
db.engine.execute(Token.__table__.delete().\
where(Token.id == t.id))
db.engine.execute(Address.__table__.delete().\
where(Address.id == row.id))
def eval_args(args):
if args.initdb:
initdb()
elif args.cleandb:
cleandb()
elif (args.whitelist is not None):
updatewhitelist(args.whitelist)
elif (args.generate_chunk is not None):
generate_chunks(args.generate_chunk)
elif (args.maintain is not None):
print('Maintaining total size: {0}, min chunk size: {1}, max chunk size: {2}'.format(
args.maintain[2],
args.maintain[0],
args.maintain[1]))
maintain_capacity(int(args.maintain[0]), int(args.maintain[1]), int(args.maintain[2]))
else:
debug_root = Flask(__name__)
debug_root.debug = True
debug_root.add_url_rule('/','index',lambda: jsonify(msg='debugging'))
prefixed_app = DispatcherMiddleware(debug_root, {app.config['APPLICATION_ROOT']:app})
run_simple('localhost', 5000, prefixed_app, use_reloader=True)
def parse_args():
parser = argparse.ArgumentParser('downstream')
parser.add_argument('--initdb', action='store_true')
parser.add_argument('--cleandb', action='store_true')
parser.add_argument('--whitelist', help='updates the white list '
'in the db and exits from a whitelist csv file. each row except'
'the first should be in the format\n'
'"address","crowdsale_balance",...\n'
'and the first row will be skipped.')
parser.add_argument('--generate-chunk', help='Generates a test chunk of'
'specified size.', type=int)
parser.add_argument('--maintain', help='Maintain available chunk capacity'
'Specify three values (min chunk size, max chunk size, total pre-gen '
'size)', nargs=3)
return parser.parse_args()
def main():
args = parse_args()
eval_args(args)
if __name__ == '__main__':
main()
|
Python
| 0.000001
|
@@ -2179,16 +2179,79 @@
k_size)%0A
+ if (len(missing_list) %3E 0):%0A print('Done.')%0A
|
901b3e88704f938dcc090bb93b9818ac7ac994dd
|
Update ipc_lista1.8.py
|
lista1/ipc_lista1.8.py
|
lista1/ipc_lista1.8.py
|
#ipc_lista1.8
#Professor: Jucimar Junior
#Any Mendes Carvalho - 1615310044
#
#
#
#
#Faça um programa que pergunte quanto você ganha por hora e o número de horas trabalhadas no mês.
#Calcule e mostre o total do seu salário no referido mês.
QntHora = input("Entre com o valor de seu rendimento por hora: ")
hT = input("Entre com a quantidade de horas trabalhadas no mês: )
Salario = round(QntHora
|
Python
| 0
|
@@ -390,10 +390,11 @@
(QntHora
+*
%0A%0A
|
1d8d980a269e643aac645e6432c0b16714f8e86e
|
Update project.
|
examples/monte_carlo.py
|
examples/monte_carlo.py
|
__author__ = "Fernando Crema"
__copyright__ = "Copyright 2007, The Cogent Project"
__credits__ = ["Fernando Crema @FernandoCremaG", "Antonio Jesús Torres @ajtorresd"]
__license__ = "GPL"
__version__ = "1.0"
__maintainer__ = "Fernando Crema"
__email__ = "fernando.crema@sabermetrics.dev"
__status__ = "Production"
from random import random
def simulation(H, AB, p_h, p_hr, target_hr, target_avg, M=10000):
less_target, ab_target_hr_global = 0, []
for i in range(M):
hr, h_tot, ab_tot, ab_to_target_hr = 0, H, AB, 0
while hr < target_hr:
h_rand = random()
ab_tot += 1
ab_to_target_hr += 1
# Hit from the batter
# P(Hit) = p_h
if h_rand <= p_h:
h_tot += 1
hr_rand = random()
# P(HR | Hit) = p_hr
# HR from the batter
if hr_rand <= p_hr:
hr += 1
# Check if the average is less than target
if h_tot*1.0/ab_tot < target_avg:
less_target += 1
# Check the number of at bats until average target
ab_target_hr_global.append(ab_to_target_hr)
return less_target * 1.0 / M, ab_target_hr_global
if __name__ == "__main__":
H = 2704
AB = 8552
# Case A: Current Slump
p_hr_slump = 1.0/(17)
p_h_slump = 0.295
prob_fail, at_bats = simulation(H, AB, p_h_slump, p_hr_slump, 10, .310)
print("Case A: Current slump 2018-2019")
print("\t Average: {0:.3f} \n\t HR Rate1Hit: {1:.4f} ".format(p_h_slump, p_hr_slump))
print("\t Probability of failing (current performance): {0:.4f}".format(prob_fail))
print("\t Average at bats to achieve goal assuming current performance: {0:.3f}".format(sum(at_bats)/len(at_bats)))
# Case B: Career
p_hr_career = 466*1.0 / 2704
p_h_career = 2704*1.0 / 8552
prob_fail, at_bats = simulation(H, AB, p_h_career, p_hr_career, 10, .310)
print("Case B: Career 2003-2019")
print("\t Average: {0:.3f} \n\t HR rate|Hit: {1:.4f} ".format(p_h_career, p_hr_career))
print("\t Probability of failing (career): {0:.4f}".format(prob_fail))
print("\t Average at bats to achieve goal assuming career performance: {0:.3f}".format(sum(at_bats)/len(at_bats)))
# Case C: Hard Slump
p_hr_hard = 1.0/25.0
p_h_hard = 0.250
prob_fail, at_bats = simulation(H, AB, p_h_hard, p_hr_hard, 10, .310)
print("Case C: Hard slump")
print("\t Average: {0:.3f} \n\t HR Rate|Hit: {1:.4f} ".format(p_h_hard,p_hr_hard))
print("\t Probability of failing (Hard slump): {0:.4f}".format(prob_fail))
print("\t Average at bats to achieve goal assuming hard slump: {0:.3f}".format(sum(at_bats)/len(at_bats)))
|
Python
| 0
|
@@ -56,30 +56,51 @@
t 20
-07
+19
, The
-Cogent
+Sabermetrics Python Package
Project
%22%0A__
@@ -95,16 +95,17 @@
Project
+.
%22%0A__cred
|
84f9f45f984cbf0b4192cae49e51333767bb5576
|
fix runtox.py failure when 'tox' is not available on the current system path
|
runtox.py
|
runtox.py
|
#!/usr/bin/env python
import subprocess
import sys
if __name__ == "__main__":
subprocess.call(["tox",
"-i", "ALL=https://devpi.net/hpk/dev/",
"--develop",] + sys.argv[1:])
|
Python
| 0.000003
|
@@ -94,16 +94,38 @@
s.call(%5B
+sys.executable, %22-m%22,
%22tox%22,%0A
|
bc9c43160a58508e412592b0ab9a0d7f3a35c48c
|
fix regression in folde tests
|
src/adhocracy/adhocracy/folder/test_init.py
|
src/adhocracy/adhocracy/folder/test_init.py
|
import unittest
from pyramid import testing
class ResourcesAutolNamingFolderUnitTest(unittest.TestCase):
def setUp(self):
self.config = testing.setUp()
def tearDown(self):
testing.tearDown()
def _makeOne(self, d=None):
from . import ResourcesAutolNamingFolder
return ResourcesAutolNamingFolder(d)
def test_create(self):
from adhocracy.interfaces import IAutoNamingManualFolder
from zope.interface.verify import verifyObject
inst = self._makeOne()
assert verifyObject(IAutoNamingManualFolder, inst)
def test_next_name_empty(self):
ob = testing.DummyResource()
inst = self._makeOne()
assert inst.next_name(ob) == '0'.zfill(7)
assert inst.next_name(ob) == '1'.zfill(7)
def test_next_name_nonempty(self):
ob = testing.DummyResource()
inst = self._makeOne({'nonintifiable': ob})
assert inst.next_name(ob) == '0'.zfill(7)
def test_next_name_nonempty_intifiable(self):
ob = testing.DummyResource()
inst = self._makeOne({'0000000': ob})
assert inst.next_name(ob).startswith('0'.zfill(7) + '_20')
def test_next_name_empty_prefix(self):
ob = testing.DummyResource()
inst = self._makeOne()
assert inst.next_name(ob, prefix='prefix') == 'prefix' + '0'.zfill(7)
assert inst.next_name(ob,) == '1'.zfill(7)
def test_add(self):
ob = testing.DummyResource()
inst = self._makeOne()
inst.add('name', ob)
assert 'name' in inst
def test_add_next(self):
ob = testing.DummyResource()
inst = self._makeOne()
inst.add_next(ob)
assert '0'.zfill(7) in inst
def test_add_next_prefix(self):
ob = testing.DummyResource()
inst = self._makeOne()
inst.add_next(ob, prefix='prefix')
assert 'prefix' + '0'.zfill(7) in inst
|
Python
| 0
|
@@ -410,38 +410,20 @@
import I
-AutoNamingManualFolder
+Pool
%0A
@@ -534,30 +534,12 @@
ct(I
-AutoNamingManualFolder
+Pool
, in
|
d6e06d4be5c483bdf4aff8032ff22bee5a49be02
|
Fix broken test.
|
backend/sublime/testdata/view_test.py
|
backend/sublime/testdata/view_test.py
|
# coding=utf-8
import sys
import traceback
try:
import sublime
v = sublime.test_window.new_file()
assert v.id() != sublime.test_window.id()
assert sublime.test_window.id() == v.window().id()
assert v.size() == 0
e = v.begin_edit()
v.insert(e, 0, "hellå world")
v.end_edit(e)
assert v.substr(sublime.Region(0, v.size())) == "hellå world"
e = v.begin_edit()
v.insert(e, 0, """abrakadabra
simsalabim
hocus pocus
""")
v.end_edit(e)
assert v.rowcol(20) == (1, 8)
assert v.rowcol(25) == (2, 2)
assert len(v.sel()) == 1
assert len(list(v.sel())) == 1
assert v.settings().get("test", "hello") == "hello"
v.settings().set("test", 10)
assert v.settings().get("test") == 10
assert v.sel()[0] == (46, 46)
v.run_command("move", {"by": "characters", "forward": True})
assert v.sel()[0] == (47, 47)
v.run_command("move", {"by": "characters", "forward": False})
assert v.sel()[0] == (46, 46)
except:
print(sys.exc_info()[1])
traceback.print_exc()
raise
|
Python
| 0
|
@@ -829,11 +829,12 @@
d%22:
-Tru
+Fals
e%7D)%0A
@@ -864,13 +864,13 @@
= (4
-7, 47
+5, 45
)%0A
@@ -917,36 +917,35 @@
rs%22, %22forward%22:
-Fals
+Tru
e%7D)%0A assert v
|
5e4c9f5a82f9a4f505cbb5c11e411ef70bc78db9
|
Bump version
|
metakernel/__init__.py
|
metakernel/__init__.py
|
from ._metakernel import (
MetaKernel, IPythonKernel, register_ipython_magics, get_metakernel)
from . import pexpect
from .replwrap import REPLWrapper, u
from .process_metakernel import ProcessMetaKernel
from .magic import Magic, option
from .parser import Parser
__all__ = ['Magic', 'MetaKernel', 'option']
__version__ = '0.13.0'
del magic, _metakernel, parser, process_metakernel
|
Python
| 0
|
@@ -327,17 +327,17 @@
= '0.13.
-0
+1
'%0A%0Adel m
|
56786beda79bf5a88c73b4899c953b0a83eccaa1
|
edit felix to run trajectory
|
felix/felix.py
|
felix/felix.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# ------------------------------------------------
# Fachhochschule Bielefeld
# Ingenieurwissenschaften und Mathematik
# Ingenieurinformatik - Studienarbeit
# Michel Asmus, Marcel Bernauer, Phil Petschull
# ------------------------------------------------
# project: felix
# main
# ------------------------------------------------
try:
from leg import leg
except Exception as e:
print("Error: Importing leg failed!")
print(e)
try:
from test_felix import run_trajectory
except Exception as e:
print("Error: Importing run_trajectory failed!")
print(e)
from data import robot_data # servo constants
import serial.tools.list_ports # available COM-ports
class robot():
# =======================================
# Public class attributes
# =======================================
# =======================================
# Private methods
# =======================================
# Constructor
def __init__(self):
print("constructing...")
# determine COM-port...
DEVICENAME = self.set_comport()
if DEVICENAME:
print("Working with", DEVICENAME)
DEVICENAME = str(DEVICENAME).encode('utf-8')
else:
print("Aborted port-detection. Exiting...")
return
# Wake up...
self.leg = leg(robot_data[0]["legs"][0], DEVICENAME) # here just robot 0 and its leg 0
# Destructor
def __del__(self):
# safely turn off torque if necessary
if self.leg.torque:
input("Please watch out, hit enter to disable torque...")
self.leg.disable_torque()
print("destructing...")
# safe exit
self.leg.end_communication()
# give away the leg
def get_leg(self):
return self.leg
# get a list of available COM-ports on a win-system
def get_comports(self):
return serial.tools.list_ports.comports()
# interactive choice of COM-port
def set_comport(self):
while True:
print("Determining COM-Ports...")
ports = list(self.get_comports()) # element is like: "COM # - USB Serial Port (COM#)"
if not len(ports):
print("Found 0 COM-Ports :(\n")
if len(input("Enter anything to exit or hit enter to look for again...")):
return False
elif len(ports) == 1:
print("Found 1 COM-Port:", ports[0]) # take the only one
return str(ports[0]).split()[0]
else:
print("Found ", len(ports), " COM-Ports.") # choose it by typing in the index of the port shown
for index, port in enumerate(ports):
print(index, port)
return str(ports[int(input("Please choose by typing in the desired index of the port:"))]).split()[0]
# automatically enable/disable torque
def toggle_torque(self):
if not self.leg.torque:
self.leg.enable_torque()
print("Torque enabled.")
else:
self.leg.disable_torque()
print("Torque disabled.")
# =======================================
# Public methods
# =======================================
# options
def menu(self):
print("\nWelcome to FELIX - Feedback Error Learning with dynamIXel!")
options = {
'e' : "[e]xit programm",
'i' : "[i]nformation about the robot (data.py)",
't' : "[t]oggle torque-activation",
's' : "set movement [s]peed for all servos",
'r' : "[r]ead present position in degrees",
'd' : "move to [d]efault position",
'x' : "e[x]ecute dummy trajectory given in test_felix.py",
'o' : "move [o]ne servo to position given in degrees",
'a' : "move [a]ll servos to destination given in degrees"
}
while True:
print("\n--------------------------------------------")
print("Your Options:")
for option in options.values():
print(option)
print("--------------------------------------------")
choice = input("Please choose: ") # user input
# input processing
if choice == 'e':
break
elif choice == 'i':
for key, value in self.leg.leg_data.items():
print(key, " = ", value)
elif choice == 't':
self.toggle_torque()
elif choice == 's':
self.leg.set_speed_for_all(int(input("Please input speed:")))
pass
#self.leg.set_speed(input("Please input speed (default: 1000):"))
elif choice == 'r':
for servo_id, servo_pos in enumerate(self.leg.get_current_degrees()):
print("> servo", servo_id, "is at %7.3f degree." % servo_pos)
elif choice == 'd':
if self.leg.torque:
offset = 0.005
pos = [0, 0, 90, 90]
for id, pos in enumerate(pos):
print("will move servo", id, "to default position")
self.leg.move_servo_to_degrees(id, pos)
self.leg.test_servo_degree(id, pos, offset)
else: print("Please enable torque first!")
elif choice == 'x':
if self.leg.torque:
run_trajectory(self.leg)
else:
print("Please enable torque first!")
elif choice == 'o':
self.leg.move_servo_to_degrees(int(input("Please input servo-id:")), float(input("Please input position:")))
elif choice == 'a':
if self.leg.torque:
pos=list()
for i in range(self.leg.num_servo):
pos.append(input("Please input position for servo {}: ".format(i)))
self.leg.move_to_deg(pos)
else: print("Please enable torque first!")
#self.leg.move_to_deg([int(x) for x in input("Please input position (default: 0 0 90 90):").split()])
else:
print("Invalid input... Please try again")
# main
def main():
print("Starting FELIX...")
# Wake up...
felix = robot()
# UI
felix.menu()
# jump to main
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -489,20 +489,22 @@
ry:%0A
-from
+import
test_fe
@@ -510,30 +510,8 @@
elix
- import run_trajectory
%0Aexc
@@ -558,30 +558,26 @@
porting
-run_trajectory
+test_felix
failed!
@@ -5549,16 +5549,27 @@
+test_felix.
run_traj
|
d5ec09fe4ad4209c387b1b0da82a412ea83f7658
|
Change module name
|
misp_modules/modules/expansion/__init__.py
|
misp_modules/modules/expansion/__init__.py
|
from . import _vmray # noqa
__all__ = ['vmray_submit', 'asn_history', 'circl_passivedns', 'circl_passivessl',
'countrycode', 'cve', 'dns', 'btc_steroids', 'domaintools', 'eupi',
'farsight_passivedns', 'ipasn', 'passivetotal', 'sourcecache', 'virustotal',
'whois', 'shodan', 'reversedns', 'geoip_country', 'wiki', 'iprep',
'threatminer', 'otx', 'threatcrowd', 'vulndb', 'crowdstrike_falcon',
'yara_syntax_validator', 'hashdd', 'onyphe', 'onyphe_full', 'rbl',
'xforceexchange', 'sigma_syntax_validator', 'stix2_pattern_syntax_validator',
'sigma_queries', 'dbl_spamhaus', 'vulners', 'yara_query', 'macaddress_io',
'intel471']
|
Python
| 0.00002
|
@@ -55,19 +55,18 @@
', '
-asn_history
+bgpranking
', '
|
ac4058c78889f9500d8d61321fa1e3071280c9d6
|
add dump
|
misc/testzookeeper2.py
|
misc/testzookeeper2.py
|
'''
Created on 2016/9/20
:author: hubo
'''
from __future__ import print_function
from vlcp.server import main
from vlcp.event import Client
from vlcp.server.module import Module
from vlcp.config import defaultconfig
from vlcp.protocol.zookeeper import ZooKeeper, ZooKeeperConnectionStateEvent,\
ZooKeeperWatcherEvent
import vlcp.utils.zookeeper as zk
from vlcp.utils.zkclient import ZooKeeperClient, ZooKeeperSessionStateChanged
from vlcp.event.runnable import RoutineContainer
from namedstruct import dump
from pprint import pprint
@defaultconfig
class TestModule(Module):
_default_serverlist = ['tcp://localhost:3181/','tcp://localhost:3182/','tcp://localhost:3183/']
def __init__(self, server):
Module.__init__(self, server)
self.apiroutine = RoutineContainer(self.scheduler)
self.client = ZooKeeperClient(self.apiroutine, self.serverlist)
self.connections.append(self.client)
self.apiroutine.main = self.main
self.routines.append(self.apiroutine)
def watcher(self):
watcher = ZooKeeperWatcherEvent.createMatcher()
while True:
yield (watcher,)
print('WatcherEvent: %r' % (dump(self.apiroutine.event.message),))
def main(self):
def _watch(w):
for m in w.wait(self.apiroutine):
yield m
print('Watcher returns:', self.apiroutine.retvalue)
def _watchall(watchers):
for w in watchers:
if w is not None:
self.apiroutine.subroutine(_watch(w))
self.apiroutine.subroutine(self.watcher(), False, daemon = True)
up = ZooKeeperSessionStateChanged.createMatcher(ZooKeeperSessionStateChanged.CREATED, self.client)
yield (up,)
print('Connection is up: %r' % (self.client,))
for m in self.client.requests([zk.create(b'/vlcptest', b'test'),
zk.getdata(b'/vlcptest', True)], self.apiroutine):
yield m
print(self.apiroutine.retvalue)
pprint(dump(self.apiroutine.retvalue[0]))
_watchall(self.apiroutine.retvalue[3])
for m in self.apiroutine.waitWithTimeout(0.2):
yield m
for m in self.client.requests([zk.delete(b'/vlcptest'),
zk.getdata(b'/vlcptest', watch = True)], self.apiroutine):
yield m
print(self.apiroutine.retvalue)
pprint(dump(self.apiroutine.retvalue[0]))
_watchall(self.apiroutine.retvalue[3])
for m in self.client.requests([zk.multi(
zk.multi_create(b'/vlcptest2', b'test'),
zk.multi_create(b'/vlcptest2/subtest', 'test2')
),
zk.getchildren2(b'/vlcptest2', True)], self.apiroutine):
yield m
print(self.apiroutine.retvalue)
pprint(dump(self.apiroutine.retvalue[0]))
_watchall(self.apiroutine.retvalue[3])
for m in self.client.requests([zk.multi(
zk.multi_delete(b'/vlcptest2/subtest'),
zk.multi_delete(b'/vlcptest2')),
zk.getchildren2(b'/vlcptest2', True)], self.apiroutine):
yield m
print(self.apiroutine.retvalue)
pprint(dump(self.apiroutine.retvalue[0]))
_watchall(self.apiroutine.retvalue[3])
if __name__ == '__main__':
main()
|
Python
| 0.000001
|
@@ -1364,16 +1364,21 @@
urns:',
+dump(
self.api
@@ -1386,32 +1386,33 @@
outine.retvalue)
+)
%0A def _wa
|
bf7c37081dec2fafc2c45b3cefdf06af027d9c64
|
Use Name tag for reverse resolution if set
|
unbound_ec2/server.py
|
unbound_ec2/server.py
|
from abc import ABCMeta, abstractmethod
import time
from unboundmodule import *
class Server:
"""Abstract server class for serving DNS requests.
Provides functional framework for implementing authoritative or caching servers.
"""
__metaclass__ = ABCMeta
def __init__(self, zone, reverse_zone, ttl, lookup, ip_order):
self.zone = '%s.' % zone.strip('.')
self.reverse_zone = '%s.' % reverse_zone.strip('.')
self.lookup = lookup
self.ttl = ttl
self.ip_order = ip_order
def operate(self, _id, event, qstate, qdata):
"""
This is a main entry point function that will be called from the unbound python script
:param _id:
:param event:
:param qstate:
:param qdata:
:return:
"""
if event in [MODULE_EVENT_NEW, MODULE_EVENT_PASS]:
qname = qstate.qinfo.qname_str
if qstate.qinfo.qtype in [RR_TYPE_A, RR_TYPE_ANY]:
if qname.endswith(self.zone):
return self.handle_request(_id, event, qstate, qdata, getattr(self, 'forward_record'))
if qstate.qinfo.qtype in [RR_TYPE_PTR]:
if qname.endswith(self.reverse_zone):
return self.handle_request(_id, event, qstate, qdata, getattr(self, 'reverse_record'))
return self.handle_pass(_id, event, qstate, qdata)
if event == MODULE_EVENT_MODDONE:
return self.handle_finished(_id, event, qstate, qdata)
return self.handle_error(_id, event, qstate, qdata)
def handle_request(self, _id, event, qstate, qdata, record_function):
"""
Handle requests that match the serving criteria
:param _id:
:param event:
:param qstate:
:param qdata:
:return:
"""
qname = qstate.qinfo.qname_str
msg = self.new_dns_msg(qname)
instances = self.lookup.lookup(qname)
if len(instances) == 0:
log_info('%s not found' % qname)
qstate.return_rcode = RCODE_NXDOMAIN
else:
qstate.return_rcode = RCODE_NOERROR
for instance in instances:
record = record_function(qname, instance).encode("ascii")
msg.answer.append(record)
if not msg.set_return_msg(qstate):
qstate.ext_state[_id] = MODULE_ERROR
return True
qstate.return_msg.rep.security = 2
qstate.ext_state[_id] = MODULE_FINISHED
return True
@abstractmethod
def new_dns_msg(self, qname):
"""
Abstract function for instantiating DNSMessage
:param qname:
:return:
"""
return NotImplemented
def handle_pass(self, _id, event, qstate, qdata):
"""
Pass on the requests that do not match the serving criteria
:param _id:
:param event:
:param qstate:
:param qdata:
:return:
"""
qstate.ext_state[_id] = MODULE_WAIT_MODULE
return True
def handle_finished(self, _id, event, qstate, qdata):
"""
Complete serving the request that do not match the serving criteria
:param _id:
:param event:
:param qstate:
:param qdata:
:return:
"""
qstate.ext_state[_id] = MODULE_FINISHED
return True
def handle_error(self, _id, event, qstate, qdata):
"""
Serve request error
:param _id:
:param event:
:param qstate:
:param qdata:
:return:
"""
qstate.ext_state[_id] = MODULE_ERROR
return True
def __determine_address(self, instance):
if self.ip_order == 'private':
ordered_address = instance.private_ip_address or instance.ip_address
else:
ordered_address = instance.ip_address or instance.private_ip_address
return (instance.tags.get('Address')
or ordered_address).encode("ascii")
def __determine_name(self, instance):
return '%s.%s.' % (instance.id, self.zone.strip('.'))
def forward_record(self, qname, instance):
return "%s %d IN A %s" % (qname, self.ttl, self.__determine_address(instance))
def reverse_record(self, qname, instance):
return "%s %d IN PTR %s" % (qname, self.ttl, self.__determine_name(instance))
class Authoritative(Server):
"""This server will return non-cached authoritative answers.
"""
def new_dns_msg(self, qname):
"""
Return DNSMessage instance with AA flag set
:param qname:
:return:
"""
return DNSMessage(qname, RR_TYPE_A, RR_CLASS_IN, PKT_QR | PKT_RA | PKT_AA)
class Caching(Server):
"""This server will serve cached answers.
"""
def __init__(self, zone, reverse_zone, ttl, lookup, ip_order):
Server.__init__(self, zone, reverse_zone, ttl, lookup, ip_order)
self.cached_requests = {}
def new_dns_msg(self, qname):
"""
Return DNSMessage instance
:param qname:
:return:
"""
return DNSMessage(qname, RR_TYPE_A, RR_CLASS_IN, PKT_QR | PKT_RA)
def handle_forward(self, _id, event, qstate, qdata):
"""
Apart from the standard Server handle_forward answer, results will be stored in query and request cache
:param _id:
:param event:
:param qstate:
:param qdata:
:return:
"""
result = Server.handle_forward(self, _id, event, qstate, qdata)
qname = qstate.qinfo.qname_str
if not storeQueryInCache(qstate, qstate.qinfo, qstate.return_msg.rep, 0):
log_warn('Unable to store query in cache. possibly out of memory.')
else:
self.cached_requests[qname.rstrip('.')] = {'time': time.time(), 'qstate': qstate}
return result
|
Python
| 0
|
@@ -4070,60 +4070,238 @@
-return '%25s.%25s.' %25 (instance.id, self.zone.strip('.')
+domain = self.zone.rstrip('.')%0A name = instance.tags%5B'Name'%5D.split(',')%5B0%5D.rstrip('.') if 'Name' in instance.tags else instance.id%0A return '%25s.' %25 (name if domain in name else '%25s.%25s' %25 (name, domain)).encode(%22ascii%22
)%0A%0A
|
48894b2200d3324525ce3f1056fbd4d3420765e2
|
Make date string sent to Guardian API dynamic.
|
scrape.py
|
scrape.py
|
#!/usr/bin/env python
import argparse
from datetime import datetime
from json import loads
from bs4 import BeautifulSoup
from ebooklib import epub
import requests
def get_todays_news(api_key):
payload = {'api-key': api_key,
'section': 'world',
'from-date': '2015-03-22'}
r = requests.get('http://content.guardianapis.com/search', params=payload)
json = loads(r.text)
articles = [(x['webTitle'], x['webUrl']) for x in json['response']['results']]
return articles
def scrape(uri):
response = requests.get(uri)
soup = BeautifulSoup(response.text)
content = soup.find('div', class_='content__article-body')
filtered_content = content.find_all('p')
processed_content = u''.join([unicode(x) for x in filtered_content])
return processed_content
def make_chapter(title, content):
safe_title = u''.join([x for x in title if x.isalpha() or x.isspace()]).replace(u' ', u'-')
file_name = u'chapter-{}.xhtml'.format(safe_title)
chapter = epub.EpubHtml(title=title, file_name=file_name, lang='en')
chapter.content = u'<h1>{}</h1>{}'.format(title, content)
return chapter
def make_ebook(title, chapters):
book = epub.EpubBook()
book.set_title(title)
book.set_language('en')
date = datetime.now().strftime(u'%A %d %B %Y')
section_name = u'Headlines for {}'.format(date)
book.toc = ((epub.Link(c.file_name, c.title, c.title) for c in chapters),
(epub.Section(section_name), chapters))
for c in chapters:
book.add_item(c)
book.spine = ['nav'] + chapters
book.add_item(epub.EpubNcx())
safe_filename = u''.join([x for x in title if x.isalpha() or x.isspace() or x.isdigit()]).replace(u' ', u'-')
filename = u'{}.epub'.format(safe_filename.lower())
epub.write_epub(filename, book, {})
def main():
parser = argparse.ArgumentParser("Transform news from The Guardian's website into an epub file.")
parser.add_argument('api_key', type=str)
args = parser.parse_args()
uris = get_todays_news(args.api_key)
chapters = []
for title, raw_content in uris:
processed_content = scrape(raw_content)
chapter = make_chapter(title, processed_content)
chapters.append(chapter)
date = datetime.now().strftime(u'%A %d %B %Y')
book_title = u'News for {}'.format(date)
make_ebook(book_title, chapters)
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -181,16 +181,25 @@
ys_news(
+section,
api_key)
@@ -200,16 +200,81 @@
i_key):%0A
+ now = datetime.now()%0A api_date = now.strftime('%25Y-%25m-%25d')%0A
payl
@@ -330,15 +330,15 @@
n':
-'world'
+section
,%0A
@@ -367,20 +367,16 @@
e':
-'2015-03-22'
+api_date
%7D%0A
@@ -2124,16 +2124,25 @@
ys_news(
+'world',
args.api
|
770d6a1b390ffd9fbade5d374e7252b265f96762
|
Fix (Ordered)Dict iteritems/itervalues
|
poet/poet.py
|
poet/poet.py
|
#!/usr/bin/env python
""" homebrew-pypi-poet
Invoked like "poet foo" for some package foo **which is presently
installed in sys.path**, determines which packages foo and its dependents
depend on, downloads them from pypi and computes their checksums, and
spits out Homebrew resource stanzas.
"""
from __future__ import print_function
import argparse
from collections import OrderedDict
from hashlib import sha256
import json
import sys
from urllib.request import urlopen
import warnings
import codecs
from jinja2 import Template
import networkx
import pip
import tl.eggdeps.graph
FORMULA_TEMPLATE = Template(
"""class {{ package.name|capitalize }} < Formula
homepage "{{ package.homepage }}"
url "{{ package.url }}"
sha256 "{{ package.checksum }}"
{% if resources %}
{% for resource in resources %}
{% include ResourceTemplate %}
{% endfor %}
{% endif %}
def install
{% if resources %}
ENV.prepend_create_path "PYTHONPATH", libexec/"vendor/lib/python{{ py_version }}/site-packages"
%w[{{ resources|map(attribute='name')|join(' ') }}].each do |r|
resource(r).stage do
system "python", *Language::Python.setup_install_args(libexec/"vendor")
end
end
{% endif %}
ENV.prepend_create_path "PYTHONPATH", libexec/"lib/python{{ py_version }}/site-packages"
system "python", *Language::Python.setup_install_args(libexec)
bin.install Dir[libexec/"bin/*"]
bin.env_script_all_files(libexec/"bin", :PYTHONPATH => ENV["PYTHONPATH"])
end
end
""", trim_blocks=True)
RESOURCE_TEMPLATE = Template(
""" resource "{{ resource.name }}" do
url "{{ resource.url }}"
{{ resource.checksum_type }} "{{ resource.checksum }}"
end
""")
class PackageNotInstalledWarning(UserWarning):
pass
def research_package(name, version=None):
f = urlopen("https://pypi.python.org/pypi/{}/{}/json".
format(name, version or ''))
reader = codecs.getreader("utf-8")
pkg_data = json.load(reader(f))
f.close()
d = {}
d['name'] = pkg_data['info']['name']
d['homepage'] = pkg_data['info'].get('home_page', '')
for url in pkg_data['urls']:
if url['packagetype'] == 'sdist':
d['url'] = url['url']
f = urlopen(url['url'])
d['checksum'] = sha256(f.read()).hexdigest()
d['checksum_type'] = 'sha256'
f.close()
break
return d
def make_graph(pkg):
egg_graph = tl.eggdeps.graph.Graph()
egg_graph.from_specifications(pkg)
# create graph
ignore = ['argparse', 'pip', 'setuptools', 'wsgiref']
G = networkx.DiGraph()
keys = [key for key in egg_graph.keys() if key not in ignore]
G.add_nodes_from(keys)
G.add_edges_from([(k, v) for k in keys for v in egg_graph[k].keys()
if v not in ignore])
# add version attribute
installed_packages = pip.get_installed_distributions()
versions = {package.key: package.version for package in installed_packages}
for package in G.nodes():
try:
G.node[package]['version'] = versions[package]
except KeyError:
warnings.warn("{} is not installed so we cannot compute "
"resources for its dependencies.".format(package),
PackageNotInstalledWarning)
G.node[package]['version'] = None
for package in G.nodes():
package_data = research_package(package, G.node[package]['version'])
G.node[package].update(package_data)
# get the dependency resolution order
deps = networkx.algorithms.dag.topological_sort(G)
deps.reverse()
return OrderedDict([(dep, G.node[dep]) for dep in deps])
def formula_for(package):
nodes = make_graph(package)
resources = [value for key, value in nodes.iteritems()
if key.lower() != package.lower()]
root = nodes[package]
return FORMULA_TEMPLATE.render(package=root,
resources=resources,
py_version="2.7",
ResourceTemplate=RESOURCE_TEMPLATE)
def resources_for(package):
nodes = make_graph(package)
return '\n\n'.join([RESOURCE_TEMPLATE.render(resource=node)
for node in nodes.itervalues()])
def main():
parser = argparse.ArgumentParser(
description='Generate Homebrew resource stanzas for pypi packages '
'and their dependencies.')
actions = parser.add_mutually_exclusive_group()
actions.add_argument(
'--single', '-s', metavar='package', nargs='+',
help='Generate a resource stanza for one or more packages, '
'without considering dependencies.')
actions.add_argument(
'--formula', '-f', metavar='package',
help='Generate a complete formula for a pypi package with its '
'recursive pypi dependencies as resources.')
actions.add_argument(
'--resources', '-r', metavar='package',
help='Generate resource stanzas for a package and its recursive '
'dependencies (default).')
parser.add_argument('package', help=argparse.SUPPRESS, nargs='?')
args = parser.parse_args()
if (args.formula or args.resources) and args.package:
print('--formula and --resources take a single argument.',
file=sys.stderr)
parser.print_usage(sys.stderr)
return 1
if args.formula:
print(formula_for(args.formula))
elif args.single:
for i, package in enumerate(args.single):
data = research_package(package)
print(RESOURCE_TEMPLATE.render(resource=data))
if i != len(args.single)-1:
print()
else:
package = args.resources or args.package
if not package:
parser.print_usage(sys.stderr)
return 1
print(resources_for(package))
return 0
if __name__ == '__main__':
sys.exit(main())
|
Python
| 0
|
@@ -3794,20 +3794,16 @@
n nodes.
-iter
items()%0A
@@ -4281,12 +4281,8 @@
des.
-iter
valu
|
1775ed95135f78772e74d576fbe238b677373f3b
|
add support for multiple networks:
|
src/collectors/docker_stats/docker_stats.py
|
src/collectors/docker_stats/docker_stats.py
|
"""
The DockerStatsCollector collects stats from the docker daemon about currently running
containers.
"""
import diamond.collector
from diamond.utils.signals import SIGALRMException
try:
import docker
except ImportError:
docker = None
def env_list_to_dict(env_list):
env_dict = {}
for pair in env_list:
tokens = pair.split("=",1)
env_dict[tokens[0]] = tokens[1]
return env_dict
def sanitize_delim(name, delim):
return ".".join(name.strip(delim).split(delim))
class DockerStatsCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(DockerStatsCollector, self).get_default_config_help()
config_help.update({
'client_url': 'The url to connect to the docker daemon',
'name_from_env': 'If specified, use the named environment variable to populate container name',
'sanitize_slashes': 'Replace slashes in container name with \".\"\'s, defaults to True',
'ecs_mode': 'Enables pulling container name and env from \'tag\' docker label, and using task ARN instead of container id, defaults to False',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(DockerStatsCollector, self).get_default_config()
config.update({
'client_url': 'unix://var/run/docker.sock',
'name_from_env': None,
'path': 'docker',
'sanitize_slashes': True,
'ecs_mode': False,
})
return config
def collect(self):
"""
Collect docker stats
"""
# Require docker client lib to get stats
if docker is None:
self.log.error('Unable to import docker')
return None
try:
client = docker.Client(base_url=self.config['client_url'], version='auto')
container_ids = [container['Id'] for container in client.containers()]
for container_id in container_ids:
container = client.inspect_container(container_id)
name = container['Name']
idlabel = container_id[:12]
if self.config['name_from_env']:
# Grab name from environment variable if configured
env_dict = env_list_to_dict(container['Config']['Env'])
name = env_dict.get(self.config['name_from_env'], name)
if self.config['sanitize_slashes']:
name = sanitize_delim(name, "/")
if self.config['ecs_mode']:
labels = container['Config']['Labels']
tag = labels.get('tag', '')
arn = labels.get('com.amazonaws.ecs.task-arn', '')
if arn and tag:
# only grab the first part of the task UUID
parts = arn.split("/")
idlabel = parts[1][:8]
name = sanitize_delim(tag, "--")
metrics_prefix = '.'.join([name, idlabel, "docker"])
stats = client.stats(container_id, True, stream=False)
# CPU Stats
for ix, cpu_time in enumerate(stats['cpu_stats']['cpu_usage']['percpu_usage']):
metric_name = '.'.join([metrics_prefix, 'cpu' + str(ix), 'user'])
self.publish(metric_name,
int(self.derivative(metric_name,
cpu_time / 10000000.0,
diamond.collector.MAX_COUNTER)))
# Total CPU
metric_name = '.'.join([metrics_prefix, 'cpu_total', 'user'])
self.publish(metric_name,
int(self.derivative(metric_name,
stats['cpu_stats']['cpu_usage']['total_usage'] / 10000000.0,
diamond.collector.MAX_COUNTER)))
# Memory Stats
metric_name = '.'.join([metrics_prefix, 'mem', 'rss'])
self.publish(metric_name,
stats['memory_stats']['stats']['total_rss'])
metric_name = '.'.join([metrics_prefix, 'mem', 'limit'])
self.publish(metric_name,
stats['memory_stats']['limit'])
# Network Stats
for stat in [u'rx_bytes', u'tx_bytes']:
self.publish('.'.join([metrics_prefix, 'net', stat]),
stats['network'][stat])
return True
except SIGALRMException as e:
# sigalrm is raised if the collector takes too long
raise e
except Exception as e:
self.log.error("Couldn't collect from docker: %s", e)
return None
|
Python
| 0
|
@@ -3980,24 +3980,204 @@
twork Stats%0A
+ networks = stats.get('networks', %5B%5D)%0A if not networks:%0A networks = %7B'eth0': stats%5B'network'%5D%7D%0A%0A for network_name, network in networks.iteritems():%0A
for
@@ -4208,24 +4208,26 @@
tx_bytes'%5D:%0A
+
se
@@ -4269,16 +4269,30 @@
, 'net',
+ network_name,
stat%5D),
@@ -4315,32 +4315,25 @@
-stats%5B'
+
network
-'%5D
%5Bstat%5D)%0A
|
62095d1f6f711304ab92ad05734395c5fe284a9c
|
Remove dict comprehensions for 2.6 compatibility, fixes #1
|
funky/funky.py
|
funky/funky.py
|
# Python imports
import operator
import time
from functools import wraps, partial
_py_hash = hash
# Constants
LIST_TYPES = (list, tuple, set)
def first(iterable, default=None):
if not iterable:
# Empty iterator (list)
return default
return iterable[0]
def rest(iterable):
return iterable[1:]
def last(iterable):
return iterable[-1]
def get(iterable, index, default=None):
try:
return iterable[index]
except (IndexError, KeyError):
return default
def next(iterable, value, n=1, default=None):
if value in iterable:
index = iterable.index(value)
return get(iterable, index + n, default=default)
return default
def prev(*args, **kwargs):
kwargs['n'] = kwargs.get('n', 1) * -1
return next(*args, **kwargs)
def chainable(method):
@wraps(method)
def f(self, *args, **kwargs):
f(self, *args, **kwargs)
return self
f.is_chainable = True
return f
def list_from_args(args):
"""
Flatten list of args
So as to accept either an array
Or as many arguments
For example:
func(['x', 'y'])
func('x', 'y')
"""
# Empty args
if not args:
return []
# Get argument type
arg_type = type(args[0])
is_list = arg_type in LIST_TYPES
# Check that the arguments are uniforn (of same type)
same_type = all([
isinstance(arg, arg_type)
for arg in args
])
if not same_type:
raise Exception('Expected uniform arguments of same type !')
# Flatten iterables
# ['x', 'y'], ...
if is_list:
args_lists = map(list, args)
flattened_args = sum(args_lists, [])
return flattened_args
# Flatten set
# 'x', 'y'
return list(args)
# Decorator for list_from_args
def arglist(func):
@wraps(func)
def f(*args, **kwargs):
args_list = list_from_args(args)
return func(args_list, **kwargs)
return f
# Decorator for methods
def arglist_method(func):
@wraps(func)
def f(self, *args, **kwargs):
args_list = list_from_args(args)
return func(self, args_list, **kwargs)
return f
class Memoizer(object):
def __init__(self, func):
# Ugly hack but ...
self.is_methodified = False
self.orig_func = func
self.func = func
self.cache = {}
self.class_obj = None
def cache_key(self, args, kwargs):
sorted_kwargs = kwargs.items()
sorted_kwargs.sort()
arg_tuple = (self.class_obj,) + args + tuple(sorted_kwargs)
return hash(arg_tuple)
def has_cache(self, cache_key):
return cache_key in self.cache
def get_cache(self, cache_key):
return self.cache[cache_key]
def set_cache(self, cache_key, value):
self.cache[cache_key] = value
def del_cache(self, cache_key):
del self.cache[cache_key]
def clear(self):
self.cache = {}
def __call__(self, *args, **kwargs):
cache_key = self.cache_key(args, kwargs)
if not self.has_cache(cache_key):
value = self.func(*args, **kwargs)
self.set_cache(cache_key, value)
return self.get_cache(cache_key)
def __get__(self, obj, objtype):
"""Support instance methods."""
# Switch main object
self.class_obj = obj
self.func = partial(self.orig_func, obj)
return self
class TimedMemoizer(Memoizer):
def __init__(self, func, ttl):
self.timestamps = {}
self.ttl = ttl
super(TimedMemoizer, self).__init__(func)
def is_alive(self, cache_key):
time_diff = time.time() - self.timestamps[cache_key]
return time_diff < self.ttl
def has_cache(self, cache_key):
return super(TimedMemoizer, self).has_cache(cache_key) and self.is_alive(cache_key)
def set_cache(self, cache_key, value):
self.timestamps[cache_key] = time.time()
return super(TimedMemoizer, self).set_cache(cache_key, value)
def del_cache(self, cache_key):
self.tinestamps[cache_key]
return super(TimedMemoizer, self).del_cache(cache_key)
def clear(self):
self.timestamps = {}
return super(TimedMemoizer, self).clear()
# Cache calls
def memoize(func):
"""Cache a functions output for a given set of arguments"""
return Memoizer(func)
def timed_memoize(ttl):
def wrapper(func):
return wraps(func)(TimedMemoizer(func, ttl))
return wrapper
def transform(transform_func):
"""Apply a transformation to a functions return value"""
def decorator(func):
@wraps(func)
def f(*args, **kwargs):
return transform_func(
func(*args, **kwargs)
)
return f
return decorator
def identity(x):
return x
def hash_dict(obj):
return _py_hash(
tuple(
obj.items()
)
)
def hash(obj):
"""Supports hashing dictionaires
"""
if isinstance(obj, dict):
return hash_dict(obj)
return _py_hash(obj)
# Useful functions
def unique(collection, mapper=hash):
return type(collection)({
mapper(v): v for v in collection
}.values())
def true_only(iterable):
return filter(bool, iterable)
def first_true(iterable):
true_values = true_only(iterable)
if true_values:
return true_values[0]
return None
def pluck_single(key, obj):
if isinstance(obj, dict):
return obj.get(key)
return getattr(obj, key, None)
def pluck(collection, attribute_key):
extractor = partial(pluck_single, attribute_key)
return map(extractor, collection)
def subkey(dct, keys):
"""Get an entry from a dict of dicts by the list of keys to 'follow'
"""
key = keys[0]
if len(keys) == 1:
return dct[key]
return subkey(dct[key], keys[1:])
# Useful transforms
negate = transform(operator.not_)
uniquify = transform(unique)
|
Python
| 0
|
@@ -5156,17 +5156,21 @@
ection)(
-%7B
+dict(
%0A
@@ -5170,16 +5170,17 @@
+(
mapper(v
@@ -5184,11 +5184,20 @@
r(v)
-:
+,
v
+)%0A
for
@@ -5217,17 +5217,17 @@
ion%0A
-%7D
+)
.values(
|
29134a36b3b1d5db12fe4891d1f15191f7f1fa31
|
make collection paths unique to avoid all sorts of mayhem
|
src/compas_blender/utilities/collections.py
|
src/compas_blender/utilities/collections.py
|
import bpy
from typing import List, Text
from compas_blender.utilities import delete_objects
__all__ = [
"create_collection",
"create_collections",
"create_collections_from_path",
"clear_collection",
"clear_collections"
]
def create_collection(name: Text, parent: bpy.types.Collection = None) -> bpy.types.Collection:
"""Create a collection with the given name.
Parameters
----------
name : str
parent : :class:`bpy.types.Collection`, optional
Returns
-------
:class:`bpy.types.Collection`
"""
if not name:
return
collection = bpy.data.collections.get(name) or bpy.data.collections.new(name)
if not parent:
if collection.name not in bpy.context.scene.collection.children:
bpy.context.scene.collection.children.link(collection)
else:
if collection.name not in parent.children:
parent.children.link(collection)
return collection
def create_collections(names: List[Text]) -> List[bpy.types.Collection]:
"""Create multiple collections at once.
Parameters
----------
names : list of str
Returns
-------
list of :class:`bpy.types.Collection`
"""
collections = [create_collection(name) for name in names]
return collections
def create_collections_from_path(path: Text, separator: Text = '::') -> List[bpy.types.Collection]:
"""Create nested collections from a collection path string.
Parameters
----------
path : str
The collection path with collection names separated by the specified separator.
separator : str, optional
Returns
-------
list of :class:`bpy.types.Collection`
"""
names = path.split(separator)
collections = []
parent = None
for name in names:
collection = create_collection(name, parent=parent)
parent = collection
collections.append(collection)
return collections
def clear_collection(name: Text):
"""Clear the objects from a collection."""
objects = list(bpy.data.collections[name].objects)
if objects:
delete_objects(objects)
def clear_collections(collections: List[bpy.types.Collection]):
"""Clear the objects from multiple collections."""
for name in collections:
clear_collection(name)
|
Python
| 0
|
@@ -240,16 +240,246 @@
ns%22%0A%5D%0A%0A%0A
+def collection_path(collection, names=%5B%5D):%0A for parent in bpy.data.collections:%0A if collection.name in parent.children:%0A names.append(parent.name)%0A collection_path(parent, names)%0A return names%0A%0A%0A
def crea
@@ -817,108 +817,286 @@
urn%0A
+%0A
-collection = bpy.data.collections.get(name) or bpy.data.collections.
+if not parent:%0A%0A if name in bpy.data.collections:%0A count = 1%0A newname = f'%7Bname%7D.%7Bcount:04%7D'%0A while newname in bpy.data.collections:%0A count += 1%0A
new
-(
name
-)%0A if not parent:
+ = f'%7Bname%7D.%7Bcount:04%7D'%0A name = newname
%0A
@@ -1092,35 +1092,32 @@
newname%0A
-if
collection.name
@@ -1114,70 +1114,50 @@
tion
-.name not in bpy.context.scene.collection.children:%0A
+ = bpy.data.collections.new(name)%0A
bp
@@ -1152,18 +1152,16 @@
-
bpy.cont
@@ -1217,34 +1217,38 @@
else:%0A
-if
+path =
collection.name
@@ -1246,81 +1246,298 @@
tion
-.name not in parent.children:%0A parent.children.link(collection
+_path(parent)%5B::-1%5D + %5Bparent.name%5D%0A name = %22::%22.join(path) + %22::%22 + name%0A if name not in parent.children:%0A collection = bpy.data.collections.new(name)%0A parent.children.link(collection)%0A else:%0A collection = bpy.data.collections.get(name
)%0A
|
886396d69f2109f8fcfe2d92e39d73959f406ccf
|
Add Union type hint
|
src/poliastro/frames/util.py
|
src/poliastro/frames/util.py
|
from typing import Dict
from astropy.coordinates.baseframe import BaseCoordinateFrame
from poliastro.bodies import (
Body,
Earth,
Jupiter,
Mars,
Mercury,
Neptune,
Saturn,
SolarSystemPlanet,
Sun,
Uranus,
Venus,
)
from poliastro.constants import J2000
from .ecliptic import GeocentricMeanEcliptic, HeliocentricEclipticJ2000
from .enums import Planes
from .equatorial import (
GCRS,
HCRS,
JupiterICRS,
MarsICRS,
MercuryICRS,
NeptuneICRS,
SaturnICRS,
UranusICRS,
VenusICRS,
)
from .fixed import (
ITRS,
JupiterFixed,
MarsFixed,
MercuryFixed,
NeptuneFixed,
SaturnFixed,
UranusFixed,
VenusFixed,
)
_FRAME_MAPPING = {
Mercury: {Planes.EARTH_EQUATOR: MercuryICRS, Planes.BODY_FIXED: MercuryFixed},
Venus: {Planes.EARTH_EQUATOR: VenusICRS, Planes.BODY_FIXED: VenusFixed},
Earth: {
Planes.EARTH_EQUATOR: GCRS,
Planes.EARTH_ECLIPTIC: GeocentricMeanEcliptic,
Planes.BODY_FIXED: ITRS,
},
Mars: {Planes.EARTH_EQUATOR: MarsICRS, Planes.BODY_FIXED: MarsFixed},
Jupiter: {Planes.EARTH_EQUATOR: JupiterICRS, Planes.BODY_FIXED: JupiterFixed},
Saturn: {Planes.EARTH_EQUATOR: SaturnICRS, Planes.BODY_FIXED: SaturnFixed},
Uranus: {Planes.EARTH_EQUATOR: UranusICRS, Planes.BODY_FIXED: UranusFixed},
Neptune: {Planes.EARTH_EQUATOR: NeptuneICRS, Planes.BODY_FIXED: NeptuneFixed},
} # type: Dict[SolarSystemPlanet, Dict[Planes, BaseCoordinateFrame]]
_FRAME_MAPPING[Sun] = {
Planes.EARTH_EQUATOR: HCRS,
Planes.EARTH_ECLIPTIC: HeliocentricEclipticJ2000,
} # type: Dict[Body, Dict[Planes, BaseCoordinateFrame]]
def get_frame(attractor, plane, obstime=J2000):
"""Returns an appropriate reference frame from an attractor and a plane.
Available planes are Earth equator (parallel to GCRS) and Earth ecliptic.
The fundamental direction of both is the equinox of epoch (J2000).
An obstime is needed to properly locate the attractor.
Parameters
----------
attractor : ~poliastro.bodies.Body
Body that serves as the center of the frame.
plane : ~poliastro.frames.Planes
Fundamental plane of the frame.
obstime : ~astropy.time.Time
Time of the frame.
"""
try:
frames = _FRAME_MAPPING[attractor]
except KeyError:
raise NotImplementedError(
"Frames for orbits around custom bodies are not yet supported"
)
try:
frame_class = frames[plane]
except KeyError:
raise NotImplementedError(
f"A frame with plane {plane} around body {attractor} is not yet implemented"
)
return frame_class(obstime=obstime)
|
Python
| 0
|
@@ -16,17 +16,24 @@
ort Dict
+, Union
%0A
-
%0Afrom as
@@ -723,24 +723,113 @@
MAPPING = %7B%0A
+ Sun: %7BPlanes.EARTH_EQUATOR: HCRS, Planes.EARTH_ECLIPTIC: HeliocentricEclipticJ2000%7D,%0A
Mercury:
@@ -1536,16 +1536,28 @@
e: Dict%5B
+Union%5BBody,
SolarSys
@@ -1569,176 +1569,9 @@
anet
-, Dict%5BPlanes, BaseCoordinateFrame%5D%5D%0A%0A_FRAME_MAPPING%5BSun%5D = %7B%0A Planes.EARTH_EQUATOR: HCRS,%0A Planes.EARTH_ECLIPTIC: HeliocentricEclipticJ2000,%0A%7D # type: Dict%5BBody
+%5D
, Di
|
54fab9c1cb9e2888f7050392d38a94b4f6546741
|
fix branch reference
|
get_version.py
|
get_version.py
|
"""Return the short version string."""
from mpf._version import __short_version__
print(__short_version__)
|
Python
| 0
|
@@ -81,16 +81,30 @@
_%0Aprint(
+%22%7B%7D.x%22.format(
__short_
@@ -113,9 +113,10 @@
rsion__)
+)
%0A
|
293a2c5a691fc0531f26085884b0ee92117592e5
|
Use _instance_or_null in github3.git
|
github3/git.py
|
github3/git.py
|
# -*- coding: utf-8 -*-
"""
github3.git
===========
This module contains all the classes relating to Git Data.
See also: http://developer.github.com/v3/git/
"""
from __future__ import unicode_literals
from json import dumps
from base64 import b64decode
from .models import GitHubObject, GitHubCore, BaseCommit
from .users import User
from .decorators import requires_auth
class Blob(GitHubObject):
"""The :class:`Blob <Blob>` object.
See also: http://developer.github.com/v3/git/blobs/
"""
def _update_attributes(self, blob):
self._api = blob.get('url', '')
#: Raw content of the blob.
self.content = blob.get('content').encode()
#: Encoding of the raw content.
self.encoding = blob.get('encoding')
#: Decoded content of the blob.
self.decoded = self.content
if self.encoding == 'base64':
self.decoded = b64decode(self.content)
#: Size of the blob in bytes
self.size = blob.get('size')
#: SHA1 of the blob
self.sha = blob.get('sha')
def _repr(self):
return '<Blob [{0:.10}]>'.format(self.sha)
class GitData(GitHubCore):
"""The :class:`GitData <GitData>` object. This isn't directly returned to
the user (developer) ever. This is used to prevent duplication of some
common items among other Git Data objects.
"""
def _update_attributes(self, data):
#: SHA of the object
self.sha = data.get('sha')
self._api = data.get('url', '')
class Commit(BaseCommit):
"""The :class:`Commit <Commit>` object. This represents a commit made in a
repository.
See also: http://developer.github.com/v3/git/commits/
"""
def _update_attributes(self, commit):
super(Commit, self)._update_attributes(commit)
#: dict containing at least the name, email and date the commit was
#: created
self.author = commit.get('author', {}) or {}
# If GH returns nil/None then make sure author is a dict
self._author_name = self.author.get('name', '')
#: dict containing similar information to the author attribute
self.committer = commit.get('committer', {}) or {}
# blank the data if GH returns no data
self._commit_name = self.committer.get('name', '')
#: :class:`Tree <Tree>` the commit belongs to.
self.tree = None
if commit.get('tree'):
self.tree = Tree(commit.get('tree'), self)
def _repr(self):
return '<Commit [{0}:{1}]>'.format(self._author_name, self.sha)
def author_as_User(self):
"""Attempt to return the author attribute as a
:class:`User <github3.users.User>`. No guarantees are made about the
validity of this object, i.e., having a login or created_at object.
"""
return User(self.author, self)
def committer_as_User(self):
"""Attempt to return the committer attribute as a
:class:`User <github3.users.User>` object. No guarantees are made
about the validity of this object.
"""
return User(self.committer, self)
class Reference(GitHubCore):
"""The :class:`Reference <Reference>` object. This represents a reference
created on a repository.
See also: http://developer.github.com/v3/git/refs/
"""
def _update_attributes(self, ref):
self._api = ref.get('url', '')
#: The reference path, e.g., refs/heads/sc/featureA
self.ref = ref.get('ref')
#: :class:`GitObject <GitObject>` the reference points to
self.object = GitObject(ref.get('object', {}))
def _repr(self):
return '<Reference [{0}]>'.format(self.ref)
@requires_auth
def delete(self):
"""Delete this reference.
:returns: bool
"""
return self._boolean(self._delete(self._api), 204, 404)
@requires_auth
def update(self, sha, force=False):
"""Update this reference.
:param str sha: (required), sha of the reference
:param bool force: (optional), force the update or not
:returns: bool
"""
data = {'sha': sha, 'force': force}
json = self._json(self._patch(self._api, data=dumps(data)), 200)
if json:
self._update_attributes(json)
return True
return False
class GitObject(GitData):
"""The :class:`GitObject <GitObject>` object."""
def _update_attributes(self, obj):
super(GitObject, self)._update_attributes(obj)
#: The type of object.
self.type = obj.get('type')
def _repr(self):
return '<Git Object [{0}]>'.format(self.sha)
class Tag(GitData):
"""The :class:`Tag <Tag>` object.
See also: http://developer.github.com/v3/git/tags/
"""
def _update_attributes(self, tag):
super(Tag, self)._update_attributes(tag)
#: String of the tag
self.tag = tag.get('tag')
#: Commit message for the tag
self.message = tag.get('message')
#: dict containing the name and email of the person
self.tagger = tag.get('tagger')
#: :class:`GitObject <GitObject>` for the tag
self.object = GitObject(tag.get('object', {}))
def _repr(self):
return '<Tag [{0}]>'.format(self.tag)
class Tree(GitData):
"""The :class:`Tree <Tree>` object.
See also: http://developer.github.com/v3/git/trees/
"""
def _update_attributes(self, tree):
super(Tree, self)._update_attributes(tree)
#: list of :class:`Hash <Hash>` objects
self.tree = [Hash(t) for t in tree.get('tree', [])]
def _repr(self):
return '<Tree [{0}]>'.format(self.sha)
def recurse(self):
"""Recurse into the tree.
:returns: :class:`Tree <Tree>`
"""
json = self._json(self._get(self._api, params={'recursive': '1'}),
200)
return Tree(json, self) if json else None
class Hash(GitHubObject):
"""The :class:`Hash <Hash>` object.
See also: http://developer.github.com/v3/git/trees/#create-a-tree
"""
def _update_attributes(self, info):
#: Path to file
self.path = info.get('path')
#: File mode
self.mode = info.get('mode')
#: Type of hash, e.g., blob
self.type = info.get('type')
#: Size of hash
self.size = info.get('size')
#: SHA of the hash
self.sha = info.get('sha')
#: URL of this object in the GitHub API
self.url = info.get('url')
def _repr(self):
return '<Hash [{0}]>'.format(self.sha)
|
Python
| 0.000605
|
@@ -5942,42 +5942,42 @@
urn
-Tree(json, self) if json else None
+self._instance_or_null(Tree, json)
%0A%0A%0Ac
|
b14c6446ac16798f797f279818ae53adc549323e
|
Clean up wrap.py a bit
|
unnaturalcode/wrap.py
|
unnaturalcode/wrap.py
|
#!/usr/bin/env python
# Copyright 2013 Joshua Charles Campbell, Alex Wilson
#
# This file is part of UnnaturalCode.
#
# UnnaturalCode is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# UnnaturalCode is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with UnnaturalCode. If not, see <http://www.gnu.org/licenses/>.
import re, runpy, sys, traceback
from logging import debug, info, warning, error
print sys.path
name_err_extract = re.compile(r"^name\s+'([^']+)'")
def get_file_line(filename, line):
try:
with open(filename) as f:
return filename.readlines()[line - 1]
except:
return None
try:
runpy.run_path(sys.argv[1])
except SyntaxError as se:
print 'syntax error: {} {}:{}'.format(se.filename, se.lineno - 1,
se.offset)
except NameError as ne:
exctype, _, tb = sys.exc_info()
filename, line, func, text = traceback.extract_tb(tb)[-1]
name = name_err_extract.match(ne.message).group(1)
# note: text has all leading whitespace stripped, so the column
# we find for name will not be quite right.
column = (get_file_line(filename, line) or text).index(name)
print 'name error: {} {}:{}'.format(filename, line, column)
print [m.__file__ for m in sys.modules.values() if hasattr(m, '__file__')] + [sys.argv[1]]
|
Python
| 0
|
@@ -822,21 +822,39 @@
t re
-, runpy, sys,
+%0Aimport runpy%0Aimport sys%0Aimport
tra
@@ -910,16 +910,38 @@
error%0A%0A
+%0Adef main():%0A %0A
print sy
@@ -947,17 +947,25 @@
ys.path%0A
-%0A
+ %0A
name_err
@@ -1008,17 +1008,25 @@
'%5D+)'%22)%0A
-%0A
+ %0A
def get_
@@ -1056,14 +1056,22 @@
e):%0A
+
%09try:%0A
+
%09%09wi
@@ -1094,16 +1094,20 @@
) as f:%0A
+
%09%09%09retur
@@ -1139,16 +1139,20 @@
ne - 1%5D%0A
+
%09except:
@@ -1152,16 +1152,20 @@
except:%0A
+
%09%09return
@@ -1174,14 +1174,26 @@
one%0A
-%0A
+ %0A
try:%0A
+
%09run
@@ -1217,16 +1217,20 @@
rgv%5B1%5D)%0A
+
except S
@@ -1247,16 +1247,20 @@
as se:%0A
+
%09print '
@@ -1318,16 +1318,20 @@
no - 1,%0A
+
%09%09se.off
@@ -1335,16 +1335,20 @@
offset)%0A
+
except N
@@ -1363,16 +1363,20 @@
as ne:%0A
+
%09exctype
@@ -1400,16 +1400,20 @@
_info()%0A
+
%09filenam
@@ -1463,16 +1463,20 @@
tb)%5B-1%5D%0A
+
%09name =
@@ -1519,16 +1519,20 @@
roup(1)%0A
+
%09# note:
@@ -1588,16 +1588,20 @@
column%0A
+
%09# we fi
@@ -1637,16 +1637,20 @@
right.%0A
+
%09column
@@ -1703,16 +1703,20 @@
x(name)%0A
+
%09print '
@@ -1768,17 +1768,25 @@
column)%0A
-%0A
+ %0A
print %5Bm
@@ -1869,8 +1869,46 @@
gv%5B1%5D%5D%0A%0A
+if __name__ == '__main__':%0A main()%0A
|
914a7ae8480875942f6273cf70249f9f9fdf482a
|
Remove unused and unimplemented `retry_on_decode_error` option from modelzoo.util's `load_graphdef`. The option is no longer needed as loading itself will autodetect Exceptions during loading and retry
|
lucid/modelzoo/util.py
|
lucid/modelzoo/util.py
|
# Copyright 2018 The Lucid Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions for modelzoo models."""
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from google.protobuf.message import DecodeError
import logging
# create logger with module name, e.g. lucid.misc.io.reading
log = logging.getLogger(__name__)
from lucid.misc.io import load
def load_text_labels(labels_path):
return load(labels_path).splitlines()
def load_graphdef(model_url, reset_device=True, retry_on_decode_error=True):
"""Load GraphDef from a binary proto file."""
graph_def = load(model_url)
if reset_device:
for n in graph_def.node:
n.device = ""
return graph_def
def forget_xy(t):
"""Ignore sizes of dimensions (1, 2) of a 4d tensor in shape inference.
This allows using smaller input sizes, which create an invalid graph at higher
layers (for example because a spatial dimension becomes smaller than a conv
filter) when we only use early parts of it.
"""
shape = (t.shape[0], None, None, t.shape[3])
return tf.placeholder_with_default(t, shape)
|
Python
| 0.000001
|
@@ -1134,36 +1134,8 @@
True
-, retry_on_decode_error=True
):%0A
|
da73a9a5c63a3bbeee707dd47cf9a76b03f95819
|
Update SubnetCalculation.py
|
Main/SubnetCalculation.py
|
Main/SubnetCalculation.py
|
class subnet_calculation():
@staticmethod
def verify_IP(start, end, hosts):
container = [start, end]
startIP = start
endIP = end
try:
hosts = int(hosts)
except (TypeError, ValueError) as e:
print("cant convert", e)
count = 0
validNum = ["0","1","2","3","4","5","6","7","8","9"]
isValid = [] # order stored [startIP, endIP, hosts] as either 'valid' or 'not valid'
answer = []
for IP in container:
count = 0
numCount = 0
dotCount = 0
IP_min = 7
IP_max = 15
validNum = ["0","1","2","3","4","5","6","7","8","9"]
IPlist = []
# following block splits IP at the dots and verifys that no octet is greater than 255
# then counts the number of dots.
newIP = IP.split(".")
for i in newIP:
try:
for j in i:
if j not in validNum:
answer.append("not valid0 " + str(i))
break
except:
pass
if i == "":
answer.append("not valid1 " + str(i))
break
try:
if int(i) > 255:
answer.append("not valid2 " + str(i))
break
except:
pass
for i in IP:
if i == ".":
dotCount += 1
if dotCount != 3:
answer.append("not valid3 " + str(i))
break
for i in IP:
IPlist.append(str(i))
# reset the counters to 0
count = 0
numCount = 0
dotCount = 0
#this block
for i in range(len(IP)):
if (count == 0 and IPlist[i] not in validNum):
answer.append("not valid4 " + str(i))
break
if (IPlist[i] in validNum and count >= 0):
if numCount > 4 and dotCount == 0:
print(numCount,IPlist[i],dotCount)
answer.append("not valid5 " + str(i))
break
numCount += 1
count += 1
if (IPlist[i] != "."):
numCount += 1
elif (IPlist[i] == "." and count > 0):
try:
if IPlist[i + 1] == ".":
answer.append("not valid6 " + i)
break
except:
pass
dotCount += 1
count +=1
if (IPlist[i] not in validNum and IPlist[i] != "."):
answer.append("not valid7 " + str(i))
break
if dotCount > 3:
answer.append("not valid8 " + str(i))
break
if numCount < 4:
answer.append("not valid9 " + str(i))
break
if (len(IPlist) < IP_min or len(IPlist) > IP_max):
answer.append("not valid10 " + str(i))
break
answer.append("valid")
if isinstance(hosts, int) and hosts > 0 and hosts < 65536:
answer.append("valid")
else:
answer.append("not valid11 " + str(i))
return answer # needs to pass to Verify_Range if passing all previous tests (ex. 127.0.0.1 is not comparable against 192.168.0.1)
@staticmethod
def Verify_Range(start, end):
pass
@staticmethod
def calculateMask(start, end, hosts):
pass
|
Python
| 0
|
@@ -473,16 +473,51 @@
er = %5B%5D%0A
+ %0A # Container Block:
%0A
@@ -924,16 +924,66 @@
it(%22.%22)%0A
+ %0A # newIP Container Block:%0A
|
75be8ed7040cd43aa0a41cba56da48942972ca42
|
Add Testts For Models
|
monkey_pdns/app/tests.py
|
monkey_pdns/app/tests.py
|
from django.test import TestCase
from .views import hello
class View_hello_tests(TestCase):
def test_hello(self):
response = self.client.get('/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'Hello World!')
|
Python
| 0
|
@@ -30,33 +30,132 @@
ase%0A
-%0Afrom .views import hello
+from django.contrib.auth.models import User%0Afrom .views import hello%0Afrom .models import Zone, Sub_Zone, Record, Record_Type
%0A%0Acl
@@ -360,8 +360,1642 @@
rld!')%0A%0A
+class Record_Type_Test(TestCase):%0A%0A def setUp(self):%0A Record_Type.objects.create(name=%22A%22)%0A%0A def test_record_type(self):%0A created_a = Record_Type.objects.get(name=%22A%22)%0A self.assertEqual(str(created_a), %22A%22)%0A%0A%0Aclass Zone_Test(TestCase):%0A%0A def setUp(self):%0A Zone.objects.create(name=%22test.domain.com.%22)%0A%0A def test_zone(self):%0A created_zone = Zone.objects.get(name=%22test.domain.com.%22)%0A self.assertEqual(str(created_zone), %22test.domain.com.%22)%0A%0Aclass Sub_Zone_Test(TestCase):%0A%0A def setUp(self):%0A self.user = User.objects.create_user(username=%22test_user%22, password=%22test%22)%0A self.zone = Zone.objects.create(name=%22test.domain.com.%22)%0A Sub_Zone.objects.create(owner=self.user, prefix=%22new%22, super_zone=self.zone)%0A%0A def test_sub_zone(self):%0A created_sub_zone = Sub_Zone.objects.get(owner=self.user, prefix=%22new%22, super_zone=self.zone)%0A self.assertEqual(str(created_sub_zone), %22new.test.domain.com.%22)%0A%0Aclass Record_Test(TestCase):%0A%0A def setUp(self):%0A self.record = Record_Type.objects.create(name=%22A%22)%0A self.user = User.objects.create_user(username=%22test_user%22, password=%22test%22)%0A self.zone = Zone.objects.create(name=%22test.domain.com.%22)%0A self.sub_zone = Sub_Zone.objects.create(owner=self.user, prefix=%22new%22, super_zone=self.zone)%0A Record.objects.create(prefix=%22www%22, type=self.record, zone=self.sub_zone, context=%22140.115.50.58%22)%0A%0A def test_record(self):%0A created_record = Record.objects.get(zone=self.sub_zone)%0A self.assertEqual(str(created_record), %22www.new.test.domain.com. A 140.115.50.58%22)%0A
|
ab819232e0e036709ff6098b2d9f259fc8956ca2
|
add in sentinel support
|
update_school_data.py
|
update_school_data.py
|
import governorhub
import logging
import redis
import os
import loggly.handlers
from datetime import datetime
from similar_schools import update_similar_schools
from dfe_data import update_dfe_data
logging.basicConfig(level=logging.INFO)
# Turn off requests INFO level logging
requests_log = logging.getLogger("requests")
requests_log.setLevel(logging.WARNING)
REDIS_HOST = os.environ.get('REDIS_HOST', '127.0.0.1')
REDIS_PORT = os.environ.get('REDIS_PORT', 6379)
REDIS_PASSWORD = os.environ.get('REDIS_PASSWORD', None)
LOGGLY_TOKEN = os.environ.get('LOGGLY_TOKEN', None)
UPDATE_CHAN = 'or2:school:updatedata:channel'
UPDATE_Q = 'or2:school:updatedataq'
if LOGGLY_TOKEN is not None:
handler = loggly.handlers.HTTPSHandler('https://logs-01.loggly.com/inputs/%s/tag/school-data' % LOGGLY_TOKEN)
logging.getLogger('').addHandler(handler)
governorhub.connect()
School = governorhub.ModelType('school')
def update_school(school):
if getattr(school, 'manualData', False):
logging.warning('School requested that has manual data: %s. Not processing' % school._id)
return
update_similar_schools(school)
update_dfe_data(school)
setattr(school, 'lastRefreshed', datetime.now())
school.save()
def clear_queue(client):
while True:
try:
schoolId = client.lpop(UPDATE_Q)
if schoolId is None:
break
schoolId = schoolId.decode('utf-8')
try:
logging.info('Updating ' + schoolId)
school = School.get(schoolId)
update_school(school)
logging.info('Updated ' + schoolId)
except Exception as ex:
logging.error('Error updating data for school: ' + schoolId)
logging.exception(ex)
except Exception as ex:
logging.exception(ex)
def listen_for_requests():
client = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT, password=REDIS_PASSWORD)
clear_queue(client)
ps = client.pubsub()
ps.subscribe(UPDATE_CHAN)
# Hang until we get a message
try:
for message in ps.listen():
try:
if message['type'] == 'message':
data = message['data'].decode('utf-8')
if data == 'update':
clear_queue(client)
except Exception as ex:
logging.exception(ex)
finally:
ps.close()
if __name__ == '__main__':
listen_for_requests()
|
Python
| 0
|
@@ -40,16 +40,52 @@
t redis%0A
+from redis.sentinel import Sentinel%0A
import o
@@ -552,16 +552,185 @@
', None)
+%0ASENTINEL_HOST = os.environ.get('SENTINEL_HOST', None)%0ASENTINEL_PORT = os.environ.get('SENTINEL_PORT', 26379)%0ASENTINEL_MASTER = os.environ.get('SENTINEL_MASTER', 'base')
%0A%0ALOGGLY
@@ -1967,16 +1967,46 @@
ests():%0A
+ if SENTINEL_HOST is None:%0A
client
@@ -2084,16 +2084,132 @@
ASSWORD)
+%0A else:%0A sentinel = Sentinel(%5B(SENTINEL_HOST, SENTINEL_PORT)%5D)%0A client = sentinel.master_for(SENTINEL_MASTER)
%0A%0A clea
|
c7a90a915af95abeda81de602287df4fcbd1555a
|
improve error message
|
mrec/base_recommender.py
|
mrec/base_recommender.py
|
try:
import cPickle as pickle
except ImportError:
import pickle
import numpy as np
from scipy.sparse import csr_matrix
class BaseRecommender(object):
"""
Minimal interface to be implemented by recommenders, along with
some helper methods. A concrete recommender must implement the
recommend_items() method and should provide its own implementation
of __str__() so that it can be identified when printing results.
Notes
=====
In most cases you should inherit from either
`mrec.mf.recommender.MatrixFactorizationRecommender` or
`mrec.item_similarity.recommender.ItemSimilarityRecommender`
and *not* directly from this class.
These provide more efficient implementations of save(), load()
and the batch methods to recommend items.
"""
def recommend_items(self,dataset,u,max_items=10,return_scores=True):
"""
Recommend new items for a user.
Parameters
==========
dataset : scipy.sparse.csr_matrix
User-item matrix containing known items.
u : int
Index of user for which to make recommendations.
max_items : int
Maximum number of recommended items to return.
return_scores : bool
If true return a score along with each recommended item.
Returns
=======
recs : list
List of (idx,score) pairs if return_scores is True, else
just a list of idxs.
"""
raise NotImplementedError('you must implement recommend_items()')
def save(self,filepath):
"""
Serialize model to file.
Parameters
==========
filepath : str
Filepath to write to, which must have the '.npz' suffix.
Notes
=====
Internally numpy.savez may be used to serialize the model and
this would add the '.npz' suffix to the supplied filepath if
it were not already present, which would most likely cause errors
in client code.
"""
if not filepath.endswith('.npz'):
raise ValueError('filepath must have ".npz" suffix')
archive = self._create_archive()
if archive:
np.savez(filepath,**archive)
else:
pickle.dump(self,open(filepath,'w'))
def _create_archive(self):
"""
Optionally return a dict of fields to be serialized
in a numpy archive: this lets you store arrays efficiently
by separating them from the model itself.
Returns
=======
archive : dict
Fields to serialize, must include the model itself
under the key 'model'.
"""
pass
@staticmethod
def load(filepath):
"""
Load a recommender model from file after it has been serialized with
save().
Parameters
==========
filepath : str
The filepath to read from.
"""
r = np.load(filepath)
if isinstance(r,BaseRecommender):
model = r
else:
model = np.loads(str(r['model']))
model._load_archive(r) # restore any fields serialized separately
return model
def _load_archive(archive):
"""
Load fields from a numpy archive.
Notes
=====
This is called by the static load() method and should be used
to restore the fields returned by _create_archive().
"""
pass
@staticmethod
def read_recommender_description(filepath):
"""
Read a recommender model description from file after it has
been saved by save(), without loading any additional
associated data into memory.
Parameters
----------
filepath : str
The filepath to read from.
"""
r = np.load(filepath,mmap_mode='r')
if isinstance(r,BaseRecommender):
model = r
else:
model = np.loads(str(r['model']))
return str(model)
def __str__(self):
if hasattr(self,'description'):
return self.description
return 'unspecified recommender: you should set self.description or implement __str__()'
def batch_recommend_items(self,dataset,max_items=10,return_scores=True,show_progress=False):
"""
Recommend new items for all users in the training dataset.
Parameters
==========
dataset : scipy.sparse.csr_matrix
User-item matrix containing known items.
max_items : int
Maximum number of recommended items to return.
return_scores : bool
If true return a score along with each recommended item.
show_progress: bool
If true print something to stdout to show progress.
Returns
=======
recs : list of lists
Each entry is a list of (idx,score) pairs if return_scores is True,
else just a list of idxs.
Notes
=====
This provides a default implementation, you will be able to optimize
this for most recommenders.
"""
recs = []
for u in xrange(self.num_users):
if show_progress and u%1000 == 0:
print u,'..',
recs.append(self.recommend_items(dataset,u,max_items,return_scores))
if show_progress:
print
return recs
def range_recommend_items(self,dataset,user_start,user_end,max_items=10,return_scores=True):
"""
Recommend new items for a range of users in the training dataset.
Assumes you've already called fit() to learn the similarity matrix.
Parameters
==========
dataset : scipy.sparse.csr_matrix
User-item matrix containing known items.
user_start : int
Index of first user in the range to recommend.
user_end : int
Index one beyond last user in the range to recommend.
max_items : int
Maximum number of recommended items to return.
return_scores : bool
If true return a score along with each recommended item.
Returns
=======
recs : list of lists
Each entry is a list of (idx,score) pairs if return_scores is True,
else just a list of idxs.
Notes
=====
This provides a default implementation, you will be able to optimize
this for most recommenders.
"""
return [self.recommend_items(dataset,u,max_items,return_scores) for u in xrange(user_start,user_end)]
def _zero_known_item_scores(self,r,train):
"""
Helper function to set predicted scores/ratings for training items
to zero or less, to avoid recommending already known items.
Parameters
==========
r : numpy.ndarray or scipy.sparse.csr_matrix
Predicted scores/ratings.
train : scipy.sparse.csr_matrix
The training user-item matrix, which can include zero-valued entries.
Returns
=======
r_safe : scipy.sparse.csr_matrix
r_safe is equal to r except that r[u,i] <= 0 for all u,i with entries
in train.
"""
col = train.indices
if isinstance(r,csr_matrix):
max_score = r.data.max()
else:
max_score = r.max()
data = max_score * np.ones(col.shape)
# build up the row (user) indices
# - we can't just use row,col = train.nonzero() as this eliminates
# u,i for which train[u,i] has been explicitly set to zero
row = np.zeros(col.shape)
for u in xrange(train.shape[0]):
start,end = train.indptr[u],train.indptr[u+1]
if end > start:
row[start:end] = u
return r - csr_matrix((data,(row,col)),shape=r.shape)
|
Python
| 0.000008
|
@@ -2107,24 +2107,32 @@
eError('
+invalid
filepath
must ha
@@ -2123,16 +2123,21 @@
filepath
+ %7B0%7D,
must ha
@@ -2153,16 +2153,33 @@
suffix'
+.format(filepath)
)%0A%0A
|
493637ace6881defedee22971f3bc39fe9a5bd0a
|
Make it compatible with Bob
|
freesas/test/__init__.py
|
freesas/test/__init__.py
|
#!usr/bin/env python
# coding: utf-8
__author__ = "Jérôme Kieffer"
__license__ = "MIT"
__date__ = "05/09/2017"
__copyright__ = "2015, ESRF"
import unittest
from .test_all import suite
def run():
runner = unittest.TextTestRunner()
return runner.run(suite())
if __name__ == '__main__':
run()
|
Python
| 0.000001
|
@@ -97,18 +97,18 @@
= %22
-05/09
+15/01
/20
+2
1
-7
%22%0A__
@@ -126,16 +126,21 @@
= %222015
+-2021
, ESRF%22%0A
@@ -140,16 +140,27 @@
ESRF%22%0A%0A
+import sys%0A
import u
@@ -208,12 +208,79 @@
run
+_tests
():%0A
+ %22%22%22Run test complete test_suite%22%22%22%0A mysuite = suite()%0A
@@ -322,14 +322,14 @@
-return
+if not
run
@@ -340,17 +340,168 @@
run(
+my
suite
-())%0A
+).wasSuccessful():%0A print(%22Test suite failed%22)%0A return 1%0A else:%0A print(%22Test suite succeeded%22)%0A return 0%0A%0A%0Arun = run_tests
%0A%0Aif
@@ -529,14 +529,30 @@
_':%0A
-run(
+sys.exit(run_tests()
)%0A
|
1697e0a20b14c89cf2db209ef03cb1dc551b14a1
|
Bump version
|
cider/__init__.py
|
cider/__init__.py
|
from .core import Cider
__author__ = "Michael Sanders"
__version__ = "1.0"
__all__ = ['Cider']
|
Python
| 0
|
@@ -70,9 +70,9 @@
%221.
-0
+1
%22%0A__
@@ -84,13 +84,13 @@
= %5B
-'
+%22
Cider
-'
+%22
%5D%0A
|
e0a8f8f6765a071ba71191b6e047b861812ec2f9
|
Update settings.py
|
utilities/settings.py
|
utilities/settings.py
|
import math
#Tilt pot setpoints .158
kMaxDown = .79
kMaxUp = kMaxDown - .590
kTop = kMaxUp + .050
kTopShot = .292
kTopShotAtBase = .281
kBottom = kMaxDown - .050
kShootLevel = .646
kShootAtBase = .528
kShootRamp = .400
kLongShot = .600
class Settings():
"""Robot mapping. Values that are changed often go here."""
#Numbers to be changed through drive station
num_precision_one = 0.80
num_precision_two = 0.50
num_scaling = 1.25
num_macro_timeout = 15
|
Python
| 0.000001
|
@@ -47,10 +47,11 @@
= .
-79
+800
%0AkMa
|
7503e1e505dc70c93713d8848df3cbe1d5a6f44c
|
Improve English tag map. Re #593, #3311
|
spacy/lang/en/tag_map.py
|
spacy/lang/en/tag_map.py
|
# coding: utf8
from __future__ import unicode_literals
from ...symbols import POS, PUNCT, SYM, ADJ, CCONJ, NUM, DET, ADV, ADP, X, VERB
from ...symbols import NOUN, PROPN, PART, INTJ, SPACE, PRON
TAG_MAP = {
".": {POS: PUNCT, "PunctType": "peri"},
",": {POS: PUNCT, "PunctType": "comm"},
"-LRB-": {POS: PUNCT, "PunctType": "brck", "PunctSide": "ini"},
"-RRB-": {POS: PUNCT, "PunctType": "brck", "PunctSide": "fin"},
"``": {POS: PUNCT, "PunctType": "quot", "PunctSide": "ini"},
'""': {POS: PUNCT, "PunctType": "quot", "PunctSide": "fin"},
"''": {POS: PUNCT, "PunctType": "quot", "PunctSide": "fin"},
":": {POS: PUNCT},
"$": {POS: SYM, "Other": {"SymType": "currency"}},
"#": {POS: SYM, "Other": {"SymType": "numbersign"}},
"AFX": {POS: ADJ, "Hyph": "yes"},
"CC": {POS: CCONJ, "ConjType": "coor"},
"CD": {POS: NUM, "NumType": "card"},
"DT": {POS: DET},
"EX": {POS: ADV, "AdvType": "ex"},
"FW": {POS: X, "Foreign": "yes"},
"HYPH": {POS: PUNCT, "PunctType": "dash"},
"IN": {POS: ADP},
"JJ": {POS: ADJ, "Degree": "pos"},
"JJR": {POS: ADJ, "Degree": "comp"},
"JJS": {POS: ADJ, "Degree": "sup"},
"LS": {POS: PUNCT, "NumType": "ord"},
"MD": {POS: VERB, "VerbType": "mod"},
"NIL": {POS: ""},
"NN": {POS: NOUN, "Number": "sing"},
"NNP": {POS: PROPN, "NounType": "prop", "Number": "sing"},
"NNPS": {POS: PROPN, "NounType": "prop", "Number": "plur"},
"NNS": {POS: NOUN, "Number": "plur"},
"PDT": {POS: ADJ, "AdjType": "pdt", "PronType": "prn"},
"POS": {POS: PART, "Poss": "yes"},
"PRP": {POS: PRON, "PronType": "prs"},
"PRP$": {POS: ADJ, "PronType": "prs", "Poss": "yes"},
"RB": {POS: ADV, "Degree": "pos"},
"RBR": {POS: ADV, "Degree": "comp"},
"RBS": {POS: ADV, "Degree": "sup"},
"RP": {POS: PART},
"SP": {POS: SPACE},
"SYM": {POS: SYM},
"TO": {POS: PART, "PartType": "inf", "VerbForm": "inf"},
"UH": {POS: INTJ},
"VB": {POS: VERB, "VerbForm": "inf"},
"VBD": {POS: VERB, "VerbForm": "fin", "Tense": "past"},
"VBG": {POS: VERB, "VerbForm": "part", "Tense": "pres", "Aspect": "prog"},
"VBN": {POS: VERB, "VerbForm": "part", "Tense": "past", "Aspect": "perf"},
"VBP": {POS: VERB, "VerbForm": "fin", "Tense": "pres"},
"VBZ": {
POS: VERB,
"VerbForm": "fin",
"Tense": "pres",
"Number": "sing",
"Person": 3,
},
"WDT": {POS: ADJ, "PronType": "int|rel"},
"WP": {POS: NOUN, "PronType": "int|rel"},
"WP$": {POS: ADJ, "Poss": "yes", "PronType": "int|rel"},
"WRB": {POS: ADV, "PronType": "int|rel"},
"ADD": {POS: X},
"NFP": {POS: PUNCT},
"GW": {POS: X},
"XX": {POS: X},
"BES": {POS: VERB},
"HVS": {POS: VERB},
"_SP": {POS: SPACE},
}
|
Python
| 0
|
@@ -774,19 +774,17 @@
: %7BPOS:
-ADJ
+X
, %22Hyph%22
@@ -1499,19 +1499,19 @@
: %7BPOS:
-ADJ
+DET
, %22AdjTy
@@ -1598,20 +1598,19 @@
: %7BPOS:
-PRON
+DET
, %22PronT
@@ -1633,35 +1633,35 @@
%22PRP$%22: %7BPOS:
-ADJ
+DET
, %22PronType%22: %22p
@@ -2426,27 +2426,27 @@
WDT%22: %7BPOS:
-ADJ
+DET
, %22PronType%22
@@ -2471,27 +2471,27 @@
%22WP%22: %7BPOS:
-NOU
+PRO
N, %22PronType
@@ -2522,19 +2522,19 @@
: %7BPOS:
-ADJ
+DET
, %22Poss%22
|
4f905ac9e6e75975acbda7ad943751da07c409ec
|
Add test for ASCII filenames (#4345)
|
spacy/tests/test_misc.py
|
spacy/tests/test_misc.py
|
# coding: utf-8
from __future__ import unicode_literals
import pytest
import os
import ctypes
from pathlib import Path
from spacy import util
from spacy import prefer_gpu, require_gpu
from spacy.compat import symlink_to, symlink_remove, path2str, is_windows
from spacy._ml import PrecomputableAffine
from subprocess import CalledProcessError
@pytest.fixture
def symlink_target():
return Path("./foo-target")
@pytest.fixture
def symlink():
return Path("./foo-symlink")
@pytest.fixture(scope="function")
def symlink_setup_target(request, symlink_target, symlink):
if not symlink_target.exists():
os.mkdir(path2str(symlink_target))
# yield -- need to cleanup even if assertion fails
# https://github.com/pytest-dev/pytest/issues/2508#issuecomment-309934240
def cleanup():
# Remove symlink only if it was created
if symlink.exists():
symlink_remove(symlink)
os.rmdir(path2str(symlink_target))
request.addfinalizer(cleanup)
@pytest.fixture
def is_admin():
"""Determine if the tests are run as admin or not."""
try:
admin = os.getuid() == 0
except AttributeError:
admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
return admin
@pytest.mark.parametrize("text", ["hello/world", "hello world"])
def test_util_ensure_path_succeeds(text):
path = util.ensure_path(text)
assert isinstance(path, Path)
@pytest.mark.parametrize("package", ["numpy"])
def test_util_is_package(package):
"""Test that an installed package via pip is recognised by util.is_package."""
assert util.is_package(package)
@pytest.mark.parametrize("package", ["thinc"])
def test_util_get_package_path(package):
"""Test that a Path object is returned for a package name."""
path = util.get_package_path(package)
assert isinstance(path, Path)
def test_PrecomputableAffine(nO=4, nI=5, nF=3, nP=2):
model = PrecomputableAffine(nO=nO, nI=nI, nF=nF, nP=nP)
assert model.W.shape == (nF, nO, nP, nI)
tensor = model.ops.allocate((10, nI))
Y, get_dX = model.begin_update(tensor)
assert Y.shape == (tensor.shape[0] + 1, nF, nO, nP)
assert model.d_pad.shape == (1, nF, nO, nP)
dY = model.ops.allocate((15, nO, nP))
ids = model.ops.allocate((15, nF))
ids[1, 2] = -1
dY[1] = 1
assert model.d_pad[0, 2, 0, 0] == 0.0
model._backprop_padding(dY, ids)
assert model.d_pad[0, 2, 0, 0] == 1.0
model.d_pad.fill(0.0)
ids.fill(0.0)
dY.fill(0.0)
ids[1, 2] = -1
ids[1, 1] = -1
ids[1, 0] = -1
dY[1] = 1
assert model.d_pad[0, 2, 0, 0] == 0.0
model._backprop_padding(dY, ids)
assert model.d_pad[0, 2, 0, 0] == 3.0
def test_prefer_gpu():
assert not prefer_gpu()
def test_require_gpu():
with pytest.raises(ValueError):
require_gpu()
def test_create_symlink_windows(
symlink_setup_target, symlink_target, symlink, is_admin
):
"""Test the creation of symlinks on windows. If run as admin or not on windows it should succeed, otherwise a CalledProcessError should be raised."""
assert symlink_target.exists()
if is_admin or not is_windows:
try:
symlink_to(symlink, symlink_target)
assert symlink.exists()
except CalledProcessError as e:
pytest.fail(e)
else:
with pytest.raises(CalledProcessError):
symlink_to(symlink, symlink_target)
assert not symlink.exists()
|
Python
| 0.000006
|
@@ -3432,24 +3432,326 @@
rt not symlink.exists()%0A
+%0A%0Adef test_ascii_filenames():%0A %22%22%22Test that all filenames in the project are ASCII.%0A See: https://twitter.com/_inesmontani/status/1177941471632211968%0A %22%22%22%0A root = Path(__file__).parent.parent%0A for path in root.glob(%22**/*%22):%0A assert all(ord(c) %3C 128 for c in path.name), path.name%0A
|
d372d32808c25a38644e4a3c5c5a33e608ad10cf
|
Remove odoo.api multi as deprecated in Odoo 13.0
|
models/hr_complaint.py
|
models/hr_complaint.py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from datetime import date
from odoo import api, fields, models
from openerp.tools.translate import _
from openerp.tools import DEFAULT_SERVER_DATETIME_FORMAT as DF
class HrComplaint(models.Model):
_name = "hr_complaint.complaint"
_description = "Employee Complaint"
_inherit = "mail.thread"
name = fields.Char("Name")
complaint_date = fields.Date("Date of Complaint", required=True, default=date.today())
employee_id = fields.Many2one('hr.employee', "Employee", required=True)
complaint_mode = fields.Selection([
('0', 'Telephone'),
('1', 'Other Verbal'),
('2', 'In writing')], "Method of Complaint", help="This is the mode by which the complaint was received.")
complaint_type = fields.Selection([
('0', 'Formal'),
('1', 'Informal')], "Type of Complaint")
complaint_source = fields.Selection([
('0', 'Client'),
('1', 'Public'),
('2', 'Other Employee'),
('3', 'Management')], "Source of Complaint")
complaint_source_text = fields.Char('Complainant',size=100)
complaint_outline = fields.Text('Outline of Complaint')
complaint_evidence = fields.Text('Evidence')
complaint_witnesses = fields.Text('Witnesses')
recorded_by = fields.Many2one('hr.employee',"Complaint Recorded By",required=True)
complaint_assessment = fields.Selection([
('0', 'Trivial'),
('1', 'Minor'),
('2', 'Moderate'),
('3', 'Serious'),
('4', 'Very Serious')], "Seriousness of complaint")
#allegations questions
stealing = fields.Boolean('Stealing')
fraud = fields.Boolean('Fraud')
violence = fields.Boolean('Threats and/or violence')
safety = fields.Boolean('Safety breaches')
discrimination = fields.Boolean('Discrimination')
#indicator question
policy = fields.Boolean('Breach of contract/policy')
client_contract = fields.Boolean('Breach of the company\'s contracts with clients')
directions = fields.Boolean('Refusal to follow reasonable directions')
performance = fields.Boolean('Unsatisfactory performance')
behaviour = fields.Boolean('Inappropriate behaviour')
property = fields.Boolean('Misuse of company property')
evidence_sufficient = fields.Boolean('Is there sufficient evidence to investigate?')
investigation_required =fields.Selection([
('1', 'Yes - Formal'),
('2', 'Yes - Informal'),
('3', 'No')], 'Does the complaint require investigation?')
complaint_refer = fields.Boolean('Does the complaint have to be referred to another organisation?')
investigation_plan = fields.Text('Investigation methods/plan')
assigned_to = fields.Many2one('hr.employee',"Investigator")
assessor = fields.Many2one('hr.employee',"Assessor")
assess_date = fields.Date('Assessment Completed')
substantiated = fields.Boolean('Has the complaint been substantitated?')
action_detail = fields.Text('Details')
issues = fields.Text('Issues discovered')
action_taken = fields.Selection([
('1', 'Disciplinary Action'),
('2', 'Formal Warning'),
('3', 'Informal Warning'),
('4', 'Record without warning'),
('5', 'Remedial'),
('6', 'No action')], 'Action to be taken', track_visibility='onchange')
record_employee = fields.Boolean('Record on employee file', track_visibility='onchange')
notes = fields.Text('Notes', track_visibility='onchange')
completed_by = fields.Many2one('hr.employee','Completed By')
state = fields.Selection([
('draft', 'New'),
('insufficient', 'Insufficient Evidence'),
('progress', 'Investigation in progress'),
('wait', 'Waiting for external investigation'),
('unsubstantiated','Unsubstantiated'),
('done', 'Done')], 'Status', required=True, track_visibility='onchange', default='draft')
date_eta = fields.Date('To be completed')
date_close = fields.Date('Date closed')
@api.onchange('employee_id','complaint_date')
def change_complaint(self):
if self.employee_id:
self.name = self.employee_id.name
else:
self.name = ''
if self.complaint_date:
self.name += ' (' + str(self.complaint_date) + ')'
_order = 'complaint_date desc'
class HrEmployee(models.Model):
_inherit="hr.employee"
complaint_count = fields.Integer(compute='_compute_ccount', store=False, string='Complaints')
@api.multi
def _compute_ccount(self):
Complaint = self.env['hr_complaint.complaint']
self.complaint_count = Complaint.search_count([('employee_id','=',self.id)])
|
Python
| 0
|
@@ -5260,20 +5260,8 @@
)%0A%09%0A
-%09@api.multi%0A
%09def
|
0f4d44722bf5162922a7ddbfb00fcd042dc3de9d
|
clean up _restore_cache_key a bit
|
corehq/ex-submodules/casexml/apps/phone/restore_caching.py
|
corehq/ex-submodules/casexml/apps/phone/restore_caching.py
|
import hashlib
import logging
from casexml.apps.phone.const import RESTORE_CACHE_KEY_PREFIX, ASYNC_RESTORE_CACHE_KEY_PREFIX
from dimagi.utils.couch.cache.cache_core import get_redis_default_cache
logger = logging.getLogger(__name__)
def _restore_cache_key(domain, prefix, user_id, version, sync_log_id, device_id):
# to invalidate all restore cache keys, increment the number below
hashable_key = '8-{domain}-{prefix}-{user}-{version}-{sync_log_id}-{device_id}'.format(
domain=domain,
prefix=prefix,
user=user_id,
version=version or '',
sync_log_id=sync_log_id or '',
device_id=device_id or '',
)
return hashlib.md5(hashable_key).hexdigest()
def _restore_payload_path_cache_key(domain, user_id, sync_log_id, device_id):
return _restore_cache_key(
domain=domain,
prefix=RESTORE_CACHE_KEY_PREFIX,
user_id=user_id,
version='2.0',
sync_log_id=sync_log_id,
device_id=device_id,
)
def _async_restore_task_id_cache_key(domain, user_id, sync_log_id, device_id):
return _restore_cache_key(
domain=domain,
prefix=ASYNC_RESTORE_CACHE_KEY_PREFIX,
user_id=user_id,
version=None,
sync_log_id=sync_log_id,
device_id=device_id,
)
class CacheAccessor(object):
cache_key = None
timeout = None
debug_info = None
def get_value(self):
logger.debug('getting {}'.format(self.debug_info))
return get_redis_default_cache().get(self.cache_key)
def set_value(self, value, timeout=None):
logger.debug('setting {}'.format(self.debug_info))
if timeout is None:
timeout = self.timeout
get_redis_default_cache().set(self.cache_key, value, timeout=timeout)
def invalidate(self):
logger.debug('invalidating {}'.format(self.debug_info))
get_redis_default_cache().delete(self.cache_key)
class RestorePayloadPathCache(CacheAccessor):
def __init__(self, domain, user_id, sync_log_id, device_id):
self.cache_key = _restore_payload_path_cache_key(domain, user_id, sync_log_id, device_id)
self.debug_info = ('RestorePayloadPathCache', domain, user_id, sync_log_id, device_id)
class AsyncRestoreTaskIdCache(CacheAccessor):
timeout = 24 * 60 * 60
def __init__(self, domain, user_id, sync_log_id, device_id):
self.cache_key = _async_restore_task_id_cache_key(domain, user_id, sync_log_id, device_id)
self.debug_info = ('AsyncRestoreTaskIdCache', domain, user_id, sync_log_id, device_id)
|
Python
| 0.000001
|
@@ -280,17 +280,8 @@
_id,
- version,
syn
@@ -298,24 +298,24 @@
device_id):%0A
+
# to inv
@@ -397,45 +397,35 @@
= '
-8-%7Bdomain%7D-%7Bprefix%7D-%7Buser%7D-%7Bversion%7D-
+0,%7Bprefix%7D,%7Bdomain%7D,%7Buser%7D,
%7Bsyn
@@ -437,9 +437,9 @@
_id%7D
--
+,
%7Bdev
@@ -527,39 +527,8 @@
id,%0A
- version=version or '',%0A
@@ -856,31 +856,8 @@
id,%0A
- version='2.0',%0A
@@ -1094,32 +1094,32 @@
CHE_KEY_PREFIX,%0A
+
user_id=
@@ -1131,30 +1131,8 @@
id,%0A
- version=None,%0A
|
555e76a62f0ec955932f95bec444e7c360f23241
|
use environment variable to set port
|
server.py
|
server.py
|
from flask import Flask
from flask import jsonify
from flask import render_template
from flask.ext.cors import CORS
from api import api
app = Flask(__name__)
cors = CORS(app)
if __name__ == "__main__":
app.register_blueprint(api)
app.run(debug=True, port=5100)
|
Python
| 0.000001
|
@@ -1,24 +1,35 @@
+import os%0A%0A
from flask import Flask%0A
@@ -121,16 +121,17 @@
t CORS%0A%0A
+%0A
from api
@@ -210,16 +210,64 @@
ain__%22:%0A
+ port = os.environ.setdefault(%22PORT%22, %225000%22)%0A%0A
app.re
@@ -319,10 +319,15 @@
ort=
-5100
+int(port)
)%0A
|
828c78566879412c6e2cc6981af9fa1adb5bdcf4
|
return result files, not just names
|
server.py
|
server.py
|
import config
import glob
import flask
import os
import json
app = flask.Flask(__name__)
@app.route("/versions/")
def get_recommended_versions():
return flask.jsonify({"versions" : config.recommended_versions})
@app.route("/results", methods=['GET', 'POST'])
def submit_result():
if flask.request.method == "POST":
pass
else:
# look in results directory
results = []
for path in glob.glob(os.path.join(config.results_dir,'[!_]*.json')):
results.append(path)
return flask.jsonify({"result_files":results})
@app.route("/experiments/")
@app.route("/experiments/<name>")
def get_experiment_list(name=None):
experiments = {}
for path in glob.glob(os.path.join(config.experiments_dir,'[!_]*.py')):
# get name of file and path
file_name, ext = os.path.splitext(os.path.basename(path))
experiments[file_name] = path
if name in experiments:
return "Experiment found"
else:
return flask.jsonify({"experiments" : experiments.keys()})
@app.route("/clients/")
@app.route("/clients/<name>")
def get_clients(name=None):
clients = {}
with open(config.clients_file) as clients_fh:
clients = json.load(clients_fh)
if name not in clients:
return flask.jsonify(clients)
else:
return flask.jsonify(client[name])
if __name__ == "__main__":
app.run(debug=True)
|
Python
| 0.000001
|
@@ -400,10 +400,10 @@
s =
-%5B%5D
+%7B%7D
%0A
@@ -493,28 +493,209 @@
-results.ap
+# get name of file and path%0A file_name, ext = os.path.splitext(os.path.basename(path))%0A with o
pen
-d
(path)
+ as result_file:%0A results%5Bfile_name%5D = json.load(result_file)
%0A
@@ -728,21 +728,16 @@
%7B%22result
-_file
s%22:resul
@@ -861,17 +861,68 @@
ts = %7B%7D%0A
+ # look for experiments in experiments directory
%0A
-
for
@@ -1130,16 +1130,105 @@
= path%0A%0A
+ if name == None:%0A return flask.jsonify(%7B%22experiments%22 : experiments.keys()%7D)%0A%0A
if n
@@ -1310,59 +1310,30 @@
urn
-flask.jsonify(%7B%22experiments%22 : experiments.keys()%7D)
+%22Experiment not found%22
%0A%0A@a
@@ -1536,22 +1536,15 @@
ame
-not in clients
+== None
:%0A
@@ -1579,24 +1579,39 @@
lients)%0A
+%0A
-else
+if name in clients
:%0A
@@ -1655,17 +1655,48 @@
+else:%0A
-%0A
+return %22Client not found%22%0A
%0Aif
|
531d40748a6b49fa55f54fb834f21f8d1c2cf156
|
Use most compact encoding
|
server.py
|
server.py
|
# -*- coding: utf-8 -*-
# Copyright 2013 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'ericbidelman@chromium.org (Eric Bidelman)'
import json
import logging
import os
import webapp2
from google.appengine.api import memcache
from google.appengine.api import urlfetch
from google.appengine.api import users
import common
import models
import settings
import uma
def normalized_name(val):
return val.lower().replace(' ', '').replace('/', '')
def first_of_milestone(feature_list, milestone, start=0):
for i in xrange(start, len(feature_list)):
f = feature_list[i]
if (str(f['shipped_milestone']) == str(milestone) or
f['impl_status_chrome'] == str(milestone)):
return i
return -1
class MainHandler(common.ContentHandler, common.JSONHandler):
def __get_omaha_data(self):
omaha_data = memcache.get('omaha_data')
if omaha_data is None:
result = urlfetch.fetch('http://omahaproxy.appspot.com/all.json')
if result.status_code == 200:
omaha_data = json.loads(result.content)
memcache.set('omaha_data', omaha_data, time=86400) # cache for 24hrs.
return omaha_data
def __annotate_first_of_milestones(self, feature_list):
try:
omaha_data = self.__get_omaha_data()
win_versions = omaha_data[0]['versions']
for v in win_versions:
s = v.get('version') or v.get('prev_version')
LATEST_VERSION = int(s.split('.')[0])
break
# TODO(ericbidelman) - memcache this calculation as part of models.py
milestones = range(1, LATEST_VERSION + 1)
milestones.reverse()
versions = [
models.IMPLEMENATION_STATUS[models.NO_ACTIVE_DEV],
models.IMPLEMENATION_STATUS[models.PROPOSED],
models.IMPLEMENATION_STATUS[models.IN_DEVELOPMENT],
]
versions.extend(milestones)
versions.append(models.IMPLEMENATION_STATUS[models.NO_LONGER_PURSUING])
last_good_idx = 0
for i, version in enumerate(versions):
idx = first_of_milestone(feature_list, version, start=last_good_idx)
if idx != -1:
feature_list[idx]['first_of_milestone'] = True
last_good_idx = idx
except Exception as e:
logging.error(e)
def __get_feature_list(self):
feature_list = models.Feature.get_chronological() # Memcached
self.__annotate_first_of_milestones(feature_list)
return feature_list
def get(self, path, feature_id=None):
# Default to features page.
# TODO: remove later when we want an index.html
if not path:
return self.redirect('/features')
# Default /metrics to CSS ranking.
# TODO: remove later when we want /metrics/index.html
if path == 'metrics' or path == 'metrics/css':
return self.redirect('/metrics/css/popularity')
# Remove trailing slash from URL and redirect. e.g. /metrics/ -> /metrics
if feature_id == '':
return self.redirect(self.request.path.rstrip('/'))
template_data = {}
if path.startswith('features'):
if path.endswith('.json'): # JSON request.
feature_list = self.__get_feature_list()
return common.JSONHandler.get(self, feature_list, formatted=True)
elif path.endswith('.xml'): # Atom feed request.
filterby = None
category = self.request.get('category', None)
if category is not None:
for k,v in models.FEATURE_CATEGORIES.iteritems():
normalized = normalized_name(v)
if category == normalized:
filterby = ('category =', k)
break
feature_list = models.Feature.get_all( # Memcached
limit=settings.RSS_FEED_LIMIT,
filterby=filterby,
order='-updated')
return self.render_atom_feed('Features', feature_list)
else:
# if settings.PROD:
# feature_list = self.__get_feature_list()
# else:
# result = urlfetch.fetch(
# self.request.scheme + '://' + self.request.host +
# '/static/js/mockdata.json')
# feature_list = json.loads(result.content)
# template_data['features'] = json.dumps(feature_list)
template_data['categories'] = [
(v, normalized_name(v)) for k,v in
models.FEATURE_CATEGORIES.iteritems()]
template_data['IMPLEMENATION_STATUSES'] = [
{'key': k, 'val': v} for k,v in
models.IMPLEMENATION_STATUS.iteritems()]
template_data['VENDOR_VIEWS'] = [
{'key': k, 'val': v} for k,v in
models.VENDOR_VIEWS.iteritems()]
template_data['WEB_DEV_VIEWS'] = [
{'key': k, 'val': v} for k,v in
models.WEB_DEV_VIEWS.iteritems()]
template_data['STANDARDS_VALS'] = [
{'key': k, 'val': v} for k,v in
models.STANDARDIZATION.iteritems()]
elif path.startswith('metrics/css/timeline'):
properties = sorted(uma.CSS_PROPERTY_BUCKETS.items(), key=lambda x:x[1])
template_data['CSS_PROPERTY_BUCKETS'] = json.dumps(properties)
elif path.startswith('metrics/feature/timeline'):
properties = sorted(uma.FEATUREOBSERVER_BUCKETS.items(), key=lambda x:x[1])
template_data['FEATUREOBSERVER_BUCKETS'] = json.dumps(properties)
self.render(data=template_data, template_path=os.path.join(path + '.html'))
# Main URL routes.
routes = [
('/(.*)/([0-9]*)', MainHandler),
('/(.*)', MainHandler),
]
app = webapp2.WSGIApplication(routes, debug=settings.DEBUG)
app.error_handlers[404] = common.handle_404
if settings.PROD and not settings.DEBUG:
app.error_handlers[500] = common.handle_500
|
Python
| 0.999259
|
@@ -4643,28 +4643,65 @@
n.dumps(
-feature_list
+%0A # feature_list, separators=(',',':')
)%0A%0A
@@ -5549,26 +5549,59 @@
n.dumps(
-properties
+%0A properties, separators=(',',':')
)%0A el
@@ -5790,26 +5790,59 @@
n.dumps(
-properties
+%0A properties, separators=(',',':')
)%0A%0A s
|
0fc2cc8aafc1cf778ecc12439becf8482cb47097
|
resolve imports
|
setapp.py
|
setapp.py
|
import os
import cherrypy
from ws4py.server.cherrypyserver import WebSocketPlugin, WebSocketTool
from app.setutils import Card
import webservices.solitairegame
class SetApp:
homepage = ''
@cherrypy.expose
def index(self):
try:
return open(self.homepage)
except FileNotFoundError:
raise cherrypy.HTTPRedirect('/solitaire', 302)
@staticmethod
def json_to_cards(blob):
return [Card(*[getattr(Card, key)(obj[key])
for key in ['number', 'color', 'shading', 'shape']]) for obj in blob]
if __name__ == '__main__':
base_conf = {
'/': {
'tools.staticdir.root': os.path.abspath(os.getcwd()),
'tools.sessions.on': True,
'tools.trailing_slash.on': False
},
'/game': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.response_headers.on': True,
'tools.response_headers.headers': [('Content-Type', 'application/json')]
},
'/static': {
'tools.staticdir.on': True,
'tools.staticdir.dir': 'public'
},
'/bower_components': {
'tools.staticdir.on': True,
'tools.staticdir.dir': 'bower_components'
}
}
mp_conf = base_conf.copy()
mp_conf.update({
'/join': {
'request.dispatch': cherrypy.dispatch.MethodDispatcher(),
'tools.response_headers.on': True,
'tools.response_headers.headers': [('Content-Type', 'application/json')]
}
})
cherrypy.config.update({
'server.socket_host': '0.0.0.0',
'server.socket_port': int(os.environ.get('PORT', 8080)),
})
WebSocketPlugin(cherrypy.engine).subscribe()
cherrypy.tools.websocket = WebSocketTool()
cherrypy.tree.mount(webservices.solitairegame.SolitaireApp(), '/solitaire', base_conf)
cherrypy.tree.mount(MultiplayerApp(), '/multiplayer', mp_conf)
cherrypy.quickstart(SetApp(), '/', base_conf) # needs to be mounted last
cherrypy.engine.start()
cherrypy.engine.block()
|
Python
| 0.000021
|
@@ -155,16 +155,51 @@
iregame%0A
+import webservices.multiplayergame%0A
%0A%0Aclass
@@ -1961,16 +1961,44 @@
e.mount(
+webservices.multiplayergame.
Multipla
|
6b276c384141bd2f2a22523184db349a91849169
|
Determine supported http methods from the classes.
|
massa/api.py
|
massa/api.py
|
# -*- coding: utf-8 -*-
from flask import Blueprint, jsonify, g, request, url_for
from flask.views import MethodView
from .domain import EntityNotFoundError, InvalidInputError
def endpoint(f):
def wrapper(*args, **kwargs):
try:
rv = f(*args, **kwargs)
except EntityNotFoundError as e:
rv = {'message': e.message}, 404
except InvalidInputError as e:
rv = {'message': e.message, 'details': e.details}, 400
msg = [rv, 200, {}]
if isinstance(rv, tuple):
for index, value in enumerate(rv):
msg[index] = value
body, code, headers = msg
response = jsonify(body)
for key, value in headers.iteritems():
response.headers[key] = value
return response, code
return wrapper
def payload():
return request.get_json() or request.form.to_dict()
class ApiView(MethodView):
decorators = [endpoint]
class MeasurementList(ApiView):
def get(self):
service = g.sl('measurement_service')
return {'items': service.find_all()}
def post(self):
service = g.sl('measurement_service')
id = service.create(**payload())
location = url_for('api.measurement_item', id=id, _external=True)
return service.get(id), 201, {'Location': location}
class MeasurementItem(ApiView):
def get(self, id):
service = g.sl('measurement_service')
return service.get(id)
def put(self, id):
service = g.sl('measurement_service')
service.update(id, **payload())
return service.get(id), 200
def delete(self, id):
service = g.sl('measurement_service')
service.delete(id)
return '', 204
bp = Blueprint('api', __name__)
bp.add_url_rule(
'/measurements/',
view_func=MeasurementList.as_view('measurement_list'),
methods=['GET', 'POST']
)
bp.add_url_rule(
'/measurements/<id>',
view_func=MeasurementItem.as_view('measurement_item'),
methods=['GET', 'PUT', 'DELETE']
)
|
Python
| 0
|
@@ -1874,36 +1874,8 @@
'),%0A
- methods=%5B'GET', 'POST'%5D%0A
)%0A%0Ab
@@ -1979,43 +1979,6 @@
'),%0A
- methods=%5B'GET', 'PUT', 'DELETE'%5D%0A
)%0A
|
fc4a04b72032fdce5447902d705c8e1441e27e70
|
Save history for projects and new makers
|
makerbase/views/api.py
|
makerbase/views/api.py
|
from datetime import datetime
import json
import traceback
from flask import abort, request, Response
from flask.views import MethodView
from flaskext.login import login_required, current_user
from werkzeug.datastructures import MultiDict
from makerbase import app
from makerbase.forms import MakerForm, ProjectForm, ParticipationForm, ProjectAddParticipationForm
from makerbase.models import Robject
from makerbase.models import *
class RobjectView(MethodView):
def dispatch_request(self, *args, **kwargs):
try:
return super(RobjectView, self).dispatch_request(*args, **kwargs)
except Exception, exc:
return Response(json.dumps({
"errors": [traceback.format_exc().split('\n')],
}), 500)
@staticmethod
def json_plus_robjects(obj):
if isinstance(obj, Robject):
return obj.get_api_data()
raise TypeError('%r is not a robject' % obj)
def render(self, obj):
return json.dumps(obj, default=self.json_plus_robjects)
class ResourceView(RobjectView):
def get(self, slug):
obj = self.objclass.get(slug)
if obj is None:
abort(404)
return self.render(obj)
def history_for_post(self, obj, form):
pass
def history_for_put(self, obj, form):
pass
@login_required
def post(self, slug):
obj = self.objclass.get(slug)
if obj is None:
abort(404)
data = json.loads(request.data)
form = self.formclass(MultiDict(data), obj)
if not form.validate():
return Response(json.dumps({
'errors': form.errors,
}), 400)
form.populate_obj(obj)
del obj.reason
self.history_for_post(obj, form)
obj.save()
return self.render(obj)
@login_required
def put(self, slug):
data = json.loads(request.data)
form = self.formclass(MultiDict(data))
if not form.validate():
return Response(json.dumps({
'errors': form.errors,
}), 400)
obj = self.objclass.get(slug)
if obj is None:
obj = self.objclass(slug)
form.populate_obj(obj)
obj.save()
return self.render(obj)
class MakerAPI(ResourceView):
objclass = Maker
formclass = MakerForm
def history_for_post(self, obj, form):
history = History(
action='edit',
reason=form.reason.data,
when=datetime.utcnow().replace(microsecond=0).isoformat(),
)
history.add_link(current_user, tag='user')
history.save()
obj.add_link(history, tag='history')
class ProjectAPI(ResourceView):
objclass = Project
formclass = ProjectForm
class ParticipationAPI(ResourceView):
objclass = Participation
formclass = ParticipationForm
class ProjectPartiesAPI(RobjectView):
def get(self, slug):
proj = Project.get(slug)
if proj is None:
abort(404)
return self.render(list(proj.parties))
@login_required
def post(self, slug):
proj = Project.get(slug)
if proj is None:
abort(404)
data = json.loads(request.data)
form = ProjectAddParticipationForm(MultiDict(data))
if not form.validate():
return Response(json.dumps({
'errors': form.errors,
}), 400)
party = Participation()
form.populate_obj(party)
del party.maker
party.add_link(proj, tag='project')
maker = Maker.get(form.maker.data)
if maker is None:
return Response(json.dumps({
'errors': {
'maker': ['Maker ID is invalid'],
}
}), 400)
party.add_link(maker, tag='maker')
party.save()
maker.add_link(party, tag='participation')
maker.save()
proj.add_link(party, tag='participation')
proj.save()
return self.render(party)
app.add_url_rule('/api/maker/<slug>', view_func=MakerAPI.as_view('api_maker'))
app.add_url_rule('/api/project/<slug>', view_func=ProjectAPI.as_view('api_project'))
app.add_url_rule('/api/project/<slug>/parties', view_func=ProjectPartiesAPI.as_view('api_project_parties'))
app.add_url_rule('/api/participation/<slug>', view_func=ParticipationAPI.as_view('api_participation'))
|
Python
| 0
|
@@ -1217,32 +1217,28 @@
def
+make_
history
-_for_post
(self, o
@@ -1237,32 +1237,40 @@
(self, obj, form
+, action
):%0A pass%0A
@@ -1268,68 +1268,291 @@
-pass%0A%0A def history_for_put(self, obj, form):%0A pass
+history = History(%0A action=action,%0A reason=form.reason.data,%0A when=datetime.utcnow().replace(microsecond=0).isoformat(),%0A )%0A history.add_link(current_user, tag='user')%0A history.save()%0A%0A obj.add_link(history, tag='history')
%0A%0A
@@ -1974,32 +1974,28 @@
self.
+make_
history
-_for_post
(obj, fo
@@ -1996,16 +1996,24 @@
bj, form
+, 'edit'
)%0A
@@ -2448,32 +2448,79 @@
pulate_obj(obj)%0A
+ self.make_history(obj, form, 'create')%0A
obj.save
@@ -2640,344 +2640,8 @@
rm%0A%0A
- def history_for_post(self, obj, form):%0A history = History(%0A action='edit',%0A reason=form.reason.data,%0A when=datetime.utcnow().replace(microsecond=0).isoformat(),%0A )%0A history.add_link(current_user, tag='user')%0A history.save()%0A%0A obj.add_link(history, tag='history')%0A%0A
%0Acla
@@ -2822,24 +2822,85 @@
pationForm%0A%0A
+ def make_history(self, obj, form, action):%0A pass%0A%0A
%0Aclass Proje
|
323ca8a6d8a9cfc7db2e60b515f09158ec3e0fe6
|
Rename lookup values
|
motor_interface/scripts/motor_interface.py
|
motor_interface/scripts/motor_interface.py
|
#!/usr/bin/env python
import rospy
import numpy
import math
import Adafruit_PCA9685
from vortex_msgs.msg import Float64ArrayStamped
class MotorInterface(object):
def __init__(self):
rospy.init_node('motor_interface', anonymous=False)
self.pub = rospy.Publisher('debug/thruster_pwm', Float64ArrayStamped, queue_size=10)
self.sub = rospy.Subscriber('thruster_forces', Float64ArrayStamped, self.callback)
self.PWM_BITS_PER_PERIOD = 4096.0 # 12 bit PWM
self.FREQUENCY = 249 # Max 400 Hz
self.FREQUENCY_MEASURED = 251.2 # Use this for better precision
self.PERIOD_LENGTH_IN_MICROSECONDS = 1000000.0/self.FREQUENCY_MEASURED
self.THRUST_RANGE_LIMIT = 100
self.ENABLE_RATE_LIMITER = False
self.T100_thrust = rospy.get_param('/thrusters/characteristics/thrust')
self.T100_pulse_width = rospy.get_param('/thrusters/characteristics/pulse_width')
self.num_thrusters = rospy.get_param('/propulsion/thrusters/num')
self.max_rate = rospy.get_param('/thrusters/rate_of_change/max')
self.motor_connection_enabled = rospy.get_param('/motor_interface/motor_connection_enabled')
self.rate_limiting_enabled = rospy.get_param('/motor_interface/rate_limiting_enabled')
self.prev_time = rospy.get_rostime()
self.is_initialized = False
# The setpoint is the desired value (input)
self.thrust_setpoint = numpy.zeros(self.num_thrusters)
# The reference is the output value (rate limited)
self.thrust_reference = numpy.zeros(self.num_thrusters)
# Initialize the PCA9685 using the default address (0x40)
if (self.motor_connection_enabled):
self.pca9685 = Adafruit_PCA9685.PCA9685()
self.pca9685.set_pwm_freq(self.FREQUENCY)
# Initialize outputs to zero newton
neutral_pulse_width = self.microsecs_to_bits(self.thrust_to_microsecs(0))
if (self.motor_connection_enabled):
for i in range(self.num_thrusters):
self.pca9685.set_pwm(i, 0, neutral_pulse_width)
print 'Launching node motor_interface at', self.FREQUENCY, 'Hz'
def callback(self, msg):
if not self.healthy_message(msg):
return
if not self.is_initialized:
self.prev_time = msg.header.stamp
self.is_initialized = True
rospy.loginfo('Initialized motor_interface')
return
curr_time = msg.header.stamp
dt = (curr_time - self.prev_time).to_sec()
if (dt <= 0) and self.rate_limiting_enabled:
rospy.logwarn_throttle(1, 'Motor interface: Zero time difference between messages, ignoring...')
return
self.prev_time = curr_time
thrust_setpoint_list = msg.data
self.thrust_setpoint = thrust_setpoint_list
self.update_reference(dt)
self.set_pwm()
def thrust_to_microsecs(self, thrust):
return numpy.interp(thrust, self.T100_thrust, self.T100_pulse_width)
def microsecs_to_bits(self, microsecs):
duty_cycle_normalized = microsecs / self.PERIOD_LENGTH_IN_MICROSECONDS
return int(round(self.PWM_BITS_PER_PERIOD * duty_cycle_normalized))
def update_reference(self, dt):
if self.rate_limiting_enabled:
rate_of_change = (self.thrust_setpoint - self.thrust_reference)/dt
for i in range(self.num_thrusters):
if rate_of_change[i] > self.max_rate:
self.thrust_reference[i] += dt * self.max_rate
elif rate_of_change[i] < -self.max_rate:
self.thrust_reference[i] -= dt * self.max_rate
else:
self.thrust_reference[i] = self.thrust_setpoint[i]
else:
self.thrust_reference = self.thrust_setpoint
def set_pwm(self):
microsecs = [None]*self.num_thrusters
for i in range(self.num_thrusters):
microsecs[i] = self.thrust_to_microsecs(self.thrust_reference[i])
pwm_bits = self.microsecs_to_bits(microsecs[i])
if (self.motor_connection_enabled):
self.pca9685.set_pwm(i, 0, pwm_bits)
# Publish outputs for debug
debug_msg = Float64ArrayStamped()
debug_msg.header.stamp = rospy.get_rostime()
debug_msg.data = microsecs
self.pub.publish(debug_msg)
def healthy_message(self, msg):
if (len(msg.data) != self.num_thrusters):
rospy.logwarn_throttle(1, 'Motor interface: Wrong number of thrusters, ignoring...')
return False
for t in msg.data:
if math.isnan(t) or math.isinf(t) or (abs(t) > self.THRUST_RANGE_LIMIT):
rospy.logwarn_throttle(1, 'Motor interface: Message out of range, ignoring...')
return False
return True
if __name__ == '__main__':
try:
motor_interface = MotorInterface()
rospy.spin()
except rospy.ROSInterruptException:
pass
|
Python
| 0.000001
|
@@ -820,28 +820,30 @@
self.
-T100
+lookup
_thrust
@@ -907,28 +907,30 @@
self.
-T100
+lookup
_pulse_width
@@ -3074,20 +3074,22 @@
t, self.
-T100
+lookup
_thrust,
@@ -3098,12 +3098,14 @@
elf.
-T100
+lookup
_pul
|
3cc3c22acbdc84124bade7c6f81af016951cb4cd
|
Fix typo in stream_spec.
|
spec/data/stream_spec.py
|
spec/data/stream_spec.py
|
import mock
from data import stream
from spec.mamba import *
with description('stream'):
with it('instantiates without errors'):
expect(calling(stream.Stream)).not_to(raise_error)
with it('subscribes without errors'):
s = stream.Stream()
expect(calling(s.subscribe, mock.Mock())).not_to(raise_error)
with it('publishes to subscrbers'):
s = stream.Stream()
observer = mock.Mock()
s.subscribe(observer)
s.publish_value(1)
expect(observer.on_next.call_args).to(equal(mock.call(1)))
|
Python
| 0
|
@@ -343,16 +343,17 @@
o subscr
+i
bers'):%0A
|
31a8107dafdf20a67360d39d1b11cd1c9070f6b5
|
Make search_children compatible with multi transactional analysis
|
mythril/analysis/modules/external_calls.py
|
mythril/analysis/modules/external_calls.py
|
from z3 import *
from mythril.analysis.ops import *
from mythril.analysis.report import Issue
from mythril.analysis import solver
from mythril.analysis.swc_data import REENTRANCY
import re
import logging
from mythril.laser.ethereum.cfg import JumpType
"""
MODULE DESCRIPTION:
Check for call.value()() to external addresses
"""
MAX_SEARCH_DEPTH = 64
def search_children(statespace, node, start_index=0, depth=0, results=None):
if results is None:
results = []
logging.debug("SEARCHING NODE %d", node.uid)
if depth < MAX_SEARCH_DEPTH:
n_states = len(node.states)
if n_states > start_index:
for j in range(start_index, n_states):
if node.states[j].get_current_instruction()["opcode"] == "SSTORE":
results.append(node.states[j].get_current_instruction()["address"])
children = []
for edge in statespace.edges:
if edge.node_from == node.uid and edge.type != JumpType.Transaction:
children.append(statespace.nodes[edge.node_to])
if len(children):
for node in children:
results += search_children(
statespace, node, depth=depth + 1, results=results
)
return results
calls_visited = []
def execute(statespace):
issues = []
for call in statespace.calls:
state = call.state
address = state.get_current_instruction()["address"]
if call.type == "CALL":
logging.info(
"[EXTERNAL_CALLS] Call to: %s, value = %s, gas = %s"
% (str(call.to), str(call.value), str(call.gas))
)
if (
call.to.type == VarType.SYMBOLIC
and (call.gas.type == VarType.CONCRETE and call.gas.val > 2300)
or (call.gas.type == VarType.SYMBOLIC and "2300" not in str(call.gas))
):
description = "This contract executes a message call to "
target = str(call.to)
user_supplied = False
if "calldata" in target or "caller" in target:
if "calldata" in target:
description += "an address provided as a function argument. "
else:
description += "the address of the transaction sender. "
user_supplied = True
else:
m = re.search(r"storage_([a-z0-9_&^]+)", str(call.to))
if m:
idx = m.group(1)
func = statespace.find_storage_write(
state.environment.active_account.address, idx
)
if func:
description += (
"an address found at storage slot "
+ str(idx)
+ ". "
+ "This storage slot can be written to by calling the function `"
+ func
+ "`. "
)
user_supplied = True
if user_supplied:
description += (
"Generally, it is not recommended to call user-supplied addresses using Solidity's call() construct. "
"Note that attackers might leverage reentrancy attacks to exploit race conditions or manipulate this contract's state."
)
issue = Issue(
contract=call.node.contract_name,
function=call.node.function_name,
address=address,
title="Message call to external contract",
_type="Warning",
description=description,
swc_id=REENTRANCY,
)
else:
description += "to another contract. Make sure that the called contract is trusted and does not execute user-supplied code."
issue = Issue(
contract=call.node.contract_name,
function=call.node.function_name,
address=address,
title="Message call to external contract",
_type="Informational",
description=description,
swc_id=REENTRANCY,
)
issues.append(issue)
if address not in calls_visited:
calls_visited.append(address)
logging.debug(
"[EXTERNAL_CALLS] Checking for state changes starting from "
+ call.node.function_name
)
# Check for SSTORE in remaining instructions in current node & nodes down the CFG
state_change_addresses = search_children(
statespace, call.node, call.state_index + 1, depth=0, results=[]
)
logging.debug(
"[EXTERNAL_CALLS] Detected state changes at addresses: "
+ str(state_change_addresses)
)
if len(state_change_addresses):
for address in state_change_addresses:
description = (
"The contract account state is changed after an external call. "
"Consider that the called contract could re-enter the function before this "
"state change takes place. This can lead to business logic vulnerabilities."
)
issue = Issue(
contract=call.node.contract_name,
function=call.node.function_name,
address=address,
title="State change after external call",
_type="Warning",
description=description,
swc_id=REENTRANCY,
)
issues.append(issue)
return issues
|
Python
| 0.000001
|
@@ -367,16 +367,21 @@
hildren(
+%0A
statespa
@@ -389,16 +389,32 @@
e, node,
+ transaction_id,
start_i
@@ -442,16 +442,17 @@
lts=None
+%0A
):%0A i
@@ -718,16 +718,38 @@
if
+(%0A
node.sta
@@ -802,16 +802,114 @@
%22SSTORE%22
+%0A and node.states%5Bj%5D.current_transaction.id == transaction_id%0A )
:%0A
@@ -1293,32 +1293,32 @@
earch_children(%0A
-
@@ -1338,16 +1338,32 @@
e, node,
+ transaction_id,
depth=d
@@ -5290,50 +5290,205 @@
ace,
- call.node, call.state_index + 1, depth=0,
+%0A call.node,%0A call.state.current_transaction.id,%0A call.state_index + 1,%0A depth=0,%0A
res
@@ -5494,16 +5494,17 @@
sults=%5B%5D
+,
%0A
|
e6751c2741d2605713e631b9984cde7b8234433e
|
Remove spurious log statement
|
nbgrader/auth/hubauth.py
|
nbgrader/auth/hubauth.py
|
"""JupyterHub authenticator."""
import requests
import os
import json
from subprocess import check_output
from flask import request, redirect, abort
from IPython.utils.traitlets import Unicode, Int, List, Bool
from nbgrader.html.formgrade import blueprint
from .base import BaseAuth
class HubAuth(BaseAuth):
"""Jupyter hub authenticator."""
graders = List([], config=True, help="List of JupyterHub user names allowed to grade.")
proxy_address = Unicode(config=True, help="Address of the configurable-http-proxy server.")
def _proxy_address_default(self):
return self._ip
proxy_port = Int(8001, config=True, help="Port of the configurable-http-proxy server.")
hub_address = Unicode(config=True, help="Address of the hub server.")
def _hub_address_default(self):
return self._ip
hub_port = Int(8000, config=True, help="Port of the hub server.")
hubapi_address = Unicode(config=True, help="Address of the hubapi server.")
def _hubapi_address_default(self):
return self._ip
hubapi_port = Int(8081, config=True, help="Port of the hubapi server.")
hubapi_cookie = Unicode("jupyter-hub-token", config=True, help="Name of the cookie used by JupyterHub")
notebook_url_prefix = Unicode(None, config=True, allow_none=True, help="""
Relative path of the formgrader with respect to the hub's user base
directory. No trailing slash. i.e. "Documents" or "Documents/notebooks". """)
def _notebook_url_prefix_changed(self, name, old, new):
self.notebook_url_prefix = new.strip('/')
hub_base_url = Unicode(config=True, help="Base URL of the hub server.")
def _hub_base_url_default(self):
return 'http://{}:{}'.format(self.hub_address, self.hub_port)
generate_hubapi_token = Bool(False, config=True, help="""Use `jupyterhub token` as a default
for HubAuth.hubapi_token instead of $JPY_API_TOKEN.""")
hub_db = Unicode(config=True, help="""Path to JupyterHub's database. Only
manditory if `generate_hubapi_token` is True.""")
hubapi_token = Unicode(config=True, help="""JupyterHub API auth token.
Generated by running `jupyterhub token`. If not explicitly set,
nbgrader will use $JPY_API_TOKEN as the API token.""")
def _hubapi_token_default(self):
if self.generate_hubapi_token:
return check_output([
'jupyterhub', 'token', '--db={}'.format(self.hub_db)
]).decode('utf-8').strip()
else:
return os.environ.get('JPY_API_TOKEN', '')
proxy_token = Unicode(config=True, help="""JupyterHub configurable proxy
auth token. If not explicitly set, nbgrader will use
$CONFIGPROXY_AUTH_TOKEN as the API token.""")
def _proxy_token_default(self):
return os.environ.get('CONFIGPROXY_AUTH_TOKEN', '')
remap_url = Unicode(config=True, help="""Suffix appened to
`HubAuth.hub_base_url` to form the full URL to the formgrade server. By
default this is '/hub/{{NbGraderConfig.course_id}}'. Change this if you
plan on running more than one formgrade server behind one JupyterHub
instance.""")
def _remap_url_default(self):
return '/hub/nbgrader/' + self.parent.course_id
def _remap_url_changed(self, name, old, new):
self.remap_url = new.rstrip('/')
def __init__(self, *args, **kwargs):
super(HubAuth, self).__init__(*args, **kwargs)
# Create base URLs for the hub and proxy.
self._hubapi_base_url = 'http://{}:{}'.format(self.hubapi_address, self.hubapi_port)
self._proxy_base_url = 'http://{}:{}'.format(self.proxy_address, self.proxy_port)
# Register self as a route of the configurable-http-proxy and then
# update the base_url to point to the new path.
response = self._proxy_request('/api/routes' + self.remap_url, method='POST', body={
'target': self._base_url
})
if response.status_code != 201:
raise Exception('Error while trying to add JupyterHub route. {}: {}'.format(response.status_code, response.text))
self._base_url = self.hub_base_url + self.remap_url
# Redirect all formgrade request to the correct API method.
self._app.register_blueprint(blueprint, static_url_path=self.remap_url + '/static', url_prefix=self.remap_url, url_defaults={'name': 'hub'})
def authenticate(self):
"""Authenticate a request.
Returns a boolean or flask redirect."""
# If auth cookie doesn't exist, redirect to the login page with
# next set to redirect back to the this page.
if 'jupyter-hub-token' not in request.cookies:
return redirect(self.hub_base_url + '/hub/login?next=' + self.hub_base_url + self.remap_url)
cookie = request.cookies[self.hubapi_cookie]
# Check with the Hub to see if the auth cookie is valid.
response = self._hubapi_request('/hub/api/authorizations/cookie/' + self.hubapi_cookie + '/' + cookie)
if response.status_code == 200:
# Auth information recieved.
data = response.json()
self.log.info(data)
if 'name' in data:
user = data['name']
# Check if the user name is registered as a grader.
if user in self.graders:
self._user = user
return True
else:
self.log.warn('Unauthorized user "%s" attempted to access the formgrader.' % user)
else:
self.log.warn('Malformed response from the JupyterHub auth API.')
abort(500, "Failed to check authorization, malformed response from Hub auth.")
elif response.status_code == 403:
self.log.error("I don't have permission to verify cookies, my auth token may have expired: [%i] %s", response.status_code, response.reason)
abort(500, "Permission failure checking authorization, I may need to be restarted")
elif response.status_code >= 500:
self.log.error("Upstream failure verifying auth token: [%i] %s", response.status_code, response.reason)
abort(502, "Failed to check authorization (upstream problem)")
elif response.status_code >= 400:
self.log.warn("Failed to check authorization: [%i] %s", response.status_code, response.reason)
abort(500, "Failed to check authorization")
else:
# Auth invalid, reauthenticate.
return redirect(self.hub_base_url + '/hub/login?next=' + self.hub_base_url + self.remap_url)
return False
def notebook_server_exists(self):
"""Does the notebook server exist?"""
return True
def get_notebook_url(self, relative_path):
"""Gets the notebook's url."""
if self.notebook_url_prefix is not None:
relative_path = self.notebook_url_prefix + '/' + relative_path
return self.hub_base_url + "/user/{}/notebooks/{}".format(
self._user,
relative_path)
def _hubapi_request(self, *args, **kwargs):
return self._request('hubapi', *args, **kwargs)
def _proxy_request(self, *args, **kwargs):
return self._request('proxy', *args, **kwargs)
def _request(self, service, relative_path, method='GET', body=None):
base_url = getattr(self, '_%s_base_url' % service)
token = getattr(self, '%s_token' % service)
data = body
if isinstance(data, (dict,)):
data = json.dumps(data)
return requests.request(method, base_url + relative_path, headers={
'Authorization': 'token %s' % token
}, data=data)
|
Python
| 0.000004
|
@@ -5171,40 +5171,8 @@
n()%0A
- self.log.info(data)%0A
|
014409ba87895f984a453347bcfb729e157d6be0
|
Use pkg_resources to locate data folder within package
|
marcottievents/base.py
|
marcottievents/base.py
|
import re
import sys
import logging
from contextlib import contextmanager
from sqlalchemy.engine import create_engine
from sqlalchemy.orm.session import Session
from .version import __version__
from etl.ecsv import CSVExtractor
from etl import ETL, MarcottiTransform, MarcottiLoad
logger = logging.getLogger(__name__)
class Marcotti(object):
def __init__(self, config):
logger.info("Marcotti-MLS v{0}: Python {1} on {2}".format(
__version__, sys.version, sys.platform))
logger.info("Opened connection to {0}".format(self._public_db_uri(config.database_uri)))
self.settings = config
self.engine = create_engine(config.database_uri)
self.connection = self.engine.connect()
@staticmethod
def _public_db_uri(uri):
"""
Strip out database username/password from database URI.
:param uri: Database URI string.
:return: Database URI with username/password removed.
"""
return re.sub(r"//.*@", "//", uri)
def create_db(self, base):
logger.info("Creating data models")
base.metadata.create_all(self.connection)
def initial_load(self, lang=None):
"""
Load validation data into database.
:param lang: Language of country names to be loaded ('es' for Spanish or None for English)
"""
with self.create_session() as sess:
etl = ETL(transform=MarcottiTransform, load=MarcottiLoad, session=sess)
csv_obj = CSVExtractor(None)
for entity in ['years', 'seasons']:
logger.info("Loading {}".format(entity.capitalize()))
data = getattr(csv_obj, entity)(self.settings.START_YEAR, self.settings.END_YEAR)
etl.workflow(entity, data)
csv_validation = CSVExtractor('data')
for entity in ['countries', 'modifiers', 'positions', 'surfaces', 'timezones']:
logger.info("Loading {}".format(entity.capitalize()))
if entity == 'countries':
lang_element = [lang] if lang else []
data_file = '{}.csv'.format('-'.join([entity]+lang_element))
else:
data_file = '{}.csv'.format(entity)
data = getattr(csv_validation, entity)(data_file)
etl.workflow(entity, data)
@contextmanager
def create_session(self):
session = Session(self.connection)
logger.info("Create session {0} with {1}".format(
id(session), self._public_db_uri(str(self.engine.url))))
try:
yield session
session.commit()
logger.info("Committing remaining transactions to database")
except Exception as ex:
session.rollback()
logger.exception("Database transactions rolled back")
raise ex
finally:
logger.info("Session {0} with {1} closed".format(
id(session), self._public_db_uri(str(self.engine.url))))
session.close()
class MarcottiConfig(object):
"""
Base configuration class for Marcotti-Events. Contains one method that defines the database URI.
This class is to be subclassed and its attributes defined therein.
"""
@property
def database_uri(self):
if getattr(self, 'DIALECT') == 'sqlite':
uri = r'sqlite://{p.DBNAME}'.format(p=self)
else:
uri = r'{p.DIALECT}://{p.DBUSER}:{p.DBPASSWD}@{p.HOSTNAME}:{p.PORT}/{p.DBNAME}'.format(p=self)
return uri
|
Python
| 0
|
@@ -29,16 +29,37 @@
logging%0A
+import pkg_resources%0A
from con
@@ -1840,14 +1840,66 @@
tor(
+pkg_resources.resource_filename('marcottievents',
'data
-'
+/')
)%0A
|
e2a1830d03df1998e99ec7fae8ce2ef06ae737ab
|
allow dockerfile
|
otto/otto.py
|
otto/otto.py
|
"""ShutIt module. See http://shutit.tk
"""
from shutit_module import ShutItModule
class otto(ShutItModule):
def build(self, shutit):
# Some useful API calls for reference. See shutit's docs for more info and options:
#
# ISSUING BASH COMMANDS
# shutit.send(send,expect=<default>) - Send a command, wait for expect (string or compiled regexp)
# to be seen before continuing. By default this is managed
# by ShutIt with shell prompts.
# shutit.multisend(send,send_dict) - Send a command, dict contains {expect1:response1,expect2:response2,...}
# shutit.send_and_get_output(send) - Returns the output of the sent command
# shutit.send_and_match_output(send, matches)
# - Returns True if any lines in output match any of
# the regexp strings in the matches list
# shutit.send_until(send,regexps) - Send command over and over until one of the regexps seen in the output.
# shutit.run_script(script) - Run the passed-in string as a script
# shutit.install(package) - Install a package
# shutit.remove(package) - Remove a package
# shutit.login(user='root', command='su -')
# - Log user in with given command, and set up prompt and expects.
# Use this if your env (or more specifically, prompt) changes at all,
# eg reboot, bash, ssh
# shutit.logout(command='exit') - Clean up from a login.
#
# COMMAND HELPER FUNCTIONS
# shutit.add_to_bashrc(line) - Add a line to bashrc
# shutit.get_url(fname, locations) - Get a file via url from locations specified in a list
# shutit.get_ip_address() - Returns the ip address of the target
# shutit.command_available(command) - Returns true if the command is available to run
#
# LOGGING AND DEBUG
# shutit.log(msg,add_final_message=False) -
# Send a message to the log. add_final_message adds message to
# output at end of build
# shutit.pause_point(msg='') - Give control of the terminal to the user
# shutit.step_through(msg='') - Give control to the user and allow them to step through commands
#
# SENDING FILES/TEXT
# shutit.send_file(path, contents) - Send file to path on target with given contents as a string
# shutit.send_host_file(path, hostfilepath)
# - Send file from host machine to path on the target
# shutit.send_host_dir(path, hostfilepath)
# - Send directory and contents to path on the target
# shutit.insert_text(text, fname, pattern)
# - Insert text into file fname after the first occurrence of
# regexp pattern.
# shutit.delete_text(text, fname, pattern)
# - Delete text from file fname after the first occurrence of
# regexp pattern.
# shutit.replace_text(text, fname, pattern)
# - Replace text from file fname after the first occurrence of
# regexp pattern.
# ENVIRONMENT QUERYING
# shutit.host_file_exists(filename, directory=False)
# - Returns True if file exists on host
# shutit.file_exists(filename, directory=False)
# - Returns True if file exists on target
# shutit.user_exists(user) - Returns True if the user exists on the target
# shutit.package_installed(package) - Returns True if the package exists on the target
# shutit.set_password(password, user='')
# - Set password for a given user on target
#
# USER INTERACTION
# shutit.get_input(msg,default,valid[],boolean?,ispass?)
# - Get input from user and return output
# shutit.fail(msg) - Fail the program and exit with status 1
#
shutit.install('git')
shutit.install('golang')
shutit.install('golang-golang-x-tools')
shutit.install('build-essential')
shutit.install('zip')
#shutit.install('golang-golang-x-net-dev')
shutit.add_to_bashrc('export GOPATH=/usr/share/go')
shutit.add_to_bashrc('export PATH=$PATH:/usr/lib/go/bin')
shutit.send('export GOPATH=/usr/share/go')
shutit.send('export PATH=$PATH:/usr/lib/go/bin')
shutit.send('git clone https://github.com/hashicorp/otto.git')
shutit.send('cd otto')
shutit.send('go get',check_exit=False)
shutit.send('go get -u github.com/jteeuwen/go-bindata/...')
shutit.send('go get github.com/mitchellh/gox')
shutit.send('cd ..')
shutit.send('mv otto $GOPATH/src')
shutit.send('cd $GOPATH/src/otto')
shutit.send('mkdir -p /usr/share/go/bin')
shutit.send('make bin')
return True
def get_config(self, shutit):
# CONFIGURATION
# shutit.get_config(module_id,option,default=None,boolean=False)
# - Get configuration value, boolean indicates whether the item is
# a boolean type, eg get the config with:
# shutit.get_config(self.module_id, 'myconfig', default='a value')
# and reference in your code with:
# shutit.cfg[self.module_id]['myconfig']
return True
def test(self, shutit):
# For test cycle part of the ShutIt build.
return True
def finalize(self, shutit):
# Any cleanup required at the end.
return True
def is_installed(self, shutit):
return False
def module():
return otto(
'shutit.tk.otto.otto.otto', 789610974.00,
description='',
maintainer='',
delivery_methods=['docker'],
depends=['shutit.tk.setup']
)
|
Python
| 0.000002
|
@@ -5938,16 +5938,29 @@
'docker'
+,'dockerfile'
%5D,%0A%09%09dep
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.