commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
|---|---|---|---|---|---|---|---|
d871328e94f804a2a296f4fba44751fa98e498d1
|
Update custom dashboard response
|
rhizome/api/resources/custom_dashboard.py
|
rhizome/api/resources/custom_dashboard.py
|
from tastypie.resources import ALL
from rhizome.api.resources.base_model import BaseModelResource
from rhizome.api.exceptions import DatapointsException
from rhizome.models import CustomDashboard, CustomChart, ChartToDashboard
import json
class CustomDashboardResource(BaseModelResource):
class Meta(BaseModelResource.Meta):
resource_name = 'custom_dashboard'
filtering = {
"id": ALL,
}
always_return_data = True
def get_detail(self, request, **kwargs):
requested_id = kwargs['pk']
bundle = self.build_bundle(request=request)
response_data = CustomDashboard.objects.get(id=requested_id).__dict__
response_data.pop('_state')
chart_data = [c for c in CustomChart.objects\
.filter(charttodashboard__dashboard_id = requested_id).values()]
response_data['charts'] = chart_data
bundle.data = response_data
return self.create_response(request, bundle)
def obj_create(self, bundle, **kwargs):
post_data = bundle.data
user_id = bundle.request.user.id
try:
dash_id = int(post_data['id'])
except KeyError:
dash_id = None
title = post_data['title']
try:
description = post_data['description']
except KeyError:
description = ''
try:
layout = int(post_data['layout'])
except KeyError:
layout = 0
defaults = {
'id': dash_id,
'title': title,
'description': description,
'layout': layout
}
if(CustomDashboard.objects.filter(title=title).count() > 0 and (dash_id is None)):
raise DatapointsException('the custom dashboard "{0}" already exists'.format(title))
dashboard, created = CustomDashboard.objects.update_or_create(id=dash_id, defaults=defaults)
bundle.obj = dashboard
bundle.data['id'] = dashboard.id
## optionally add charts to the dashboard ##
try:
chart_uuids = post_data['chart_uuids']
self.upsert_chart_uuids(dashboard.id, chart_uuids)
except KeyError:
pass
return bundle
def upsert_chart_uuids(self, dashboard_id, chart_uuids):
chart_ids = CustomChart.objects.filter(uuid__in = chart_uuids)\
.values_list('id',flat=True)
batch = [ChartToDashboard(**{
'chart_id': c_id,
'dashboard_id': dashboard_id
}) for c_id in chart_ids]
ChartToDashboard.objects.filter(dashboard_id = dashboard_id).delete()
ChartToDashboard.objects.bulk_create(batch)
def obj_delete_list(self, bundle, **kwargs):
"""
"""
obj_id = int(bundle.request.GET[u'id'])
CustomChart.objects.filter(dashboard_id=obj_id).delete()
CustomDashboard.objects.filter(id=obj_id).delete()
def get_object_list(self, request):
'''
'''
try:
dash_id = request.GET['id']
return CustomDashboard.objects.filter(id=dash_id).values()
except KeyError:
return CustomDashboard.objects.all().values()
|
Python
| 0
|
@@ -2292,24 +2292,105 @@
rt_uuids):%0A%0A
+ if type(chart_uuids) == unicode:%0A chart_uuids = %5Bchart_uuids%5D%0A
char
|
7e13edfea2ee0c055f890fba08fa645141cd2f7d
|
add colourbar
|
helix.py
|
helix.py
|
# Create the data.
from numpy import pi, sin, cos, mgrid
[u,v] = mgrid[-5:5:0.01,0:2*pi+0.1:0.1]
a=2
x = u*cos(v)
y = u*sin(v)
z = a*v
K=-a**2/(u**2 +a**2)**2
from mayavi import mlab
s = mlab.mesh(x, y, z,scalars=K)
mlab.show()
|
Python
| 0.000001
|
@@ -211,16 +211,83 @@
lars=K)%0A
+mlab.colorbar(orientation='horizontal',title='Gaussian Curvature')%0A
mlab.sho
|
b5146035b7f4ae641a53bb956e9afee62c50c347
|
Change cache directory for vendor LST
|
kotori/vendor/lst/h2m/util.py
|
kotori/vendor/lst/h2m/util.py
|
# -*- coding: utf-8 -*-
# (c) 2015 Andreas Motl, Elmyra UG <andreas.motl@elmyra.de>
import os
from appdirs import user_cache_dir
from kotori.daq.intercom.c import LibraryAdapter, StructRegistryByID
#from kotori.daq.intercom.cffi_adapter import LibraryAdapterCFFI
def setup_h2m_structs_pyclibrary():
cache_dir = user_cache_dir('lst', 'elmyra')
if not os.path.isdir(cache_dir): os.makedirs(cache_dir)
lib_dir = os.path.join(os.path.dirname(__file__), 'cpp')
library = LibraryAdapter(u'h2m_structs.h', u'h2m_structs.so', include_path=lib_dir, library_path=lib_dir, cache_path=cache_dir)
struct_registry = StructRegistryByID(library)
return struct_registry
def setup_h2m_structs_cffi():
cache_dir = user_cache_dir('lst', 'elmyra')
if not os.path.isdir(cache_dir): os.makedirs(cache_dir)
lib_dir = os.path.join(os.path.dirname(__file__), 'cpp')
library = LibraryAdapterCFFI(u'h2m_structs.h', u'h2m_structs.so', include_path=lib_dir, library_path=lib_dir, cache_path=cache_dir)
struct_registry = StructRegistryByID(library)
return struct_registry
setup_h2m_structs = setup_h2m_structs_pyclibrary
|
Python
| 0
|
@@ -301,32 +301,45 @@
cache_dir =
+os.path.join(
user_cache_dir('
@@ -334,37 +334,38 @@
_cache_dir('
-lst', 'elmyra
+kotori'), 'lst
')%0A if no
@@ -732,16 +732,29 @@
e_dir =
+os.path.join(
user_cac
@@ -765,21 +765,22 @@
ir('
-lst', 'elmyra
+kotori'), 'lst
')%0A
|
e6b11c0c110d0457cc31d7d798a2b35e19a0f56e
|
fix wrong parser
|
slackn/cli.py
|
slackn/cli.py
|
import sys
import logging
from argparse import ArgumentParser
from slackn.core import Queue, Notifier
from slackn.version import version
log = logging.getLogger('slackn')
def get_queue(s):
if ':' in s:
host,port = s.split(':')
else:
host,port = (s, 6379)
return Queue(host,port)
def process():
parser = ArgumentParser(description='slackn_process v%s' % version)
parser.add_argument('--slack-channel',
help='channel to send notifications')
parser.add_argument('--slack-token',
help='channel to send notifications')
parser.add_argument('--redis',
default='127.0.0.1:6379',
help='redis host:port to connect to')
args = parser.parse_args()
queue = get_queue(args.redis)
notifier = Notifier(args.slack_token, args.slack_channel)
for hostname,msgs in queue.dump().items():
notifier.add_host(hostname, msgs)
queue.increment('sent', len(msgs))
notifier.send()
def notify():
common_parser = ArgumentParser(add_help=False)
common_parser.add_argument('--redis',
help='redis host to connect to (127.0.0.1:6379)',
default='127.0.0.1:6379')
parser = ArgumentParser(description='slackn-notify %s' % version,
parents=[common_parser])
subparsers = parser.add_subparsers(description='notification type',
dest='subcommand')
parser_host = subparsers.add_parser('host')
parser_host.add_argument('hostname')
parser_host.add_argument('hoststate')
parser_host.add_argument('hostoutput')
parser_host.add_argument('nagiostype')
parser_service = subparsers.add_parser('service')
parser_service.add_argument('hostname')
parser_service.add_argument('servicedesc')
parser_service.add_argument('servicestate')
parser_service.add_argument('serviceoutput')
parser_host.add_argument('nagiostype')
args = parser.parse_args()
if not args.subcommand:
print('no notification type provided')
sys.exit(1)
queue = get_queue(args.redis)
notify_args = { k:v for k,v in args.__dict__.items() }
for k in ('redis','subcommand'):
del notify_args[k]
notify_args['type'] = args.subcommand
queue.submit(notify_args)
|
Python
| 0.998471
|
@@ -1979,36 +1979,39 @@
ut')%0A parser_
-host
+service
.add_argument('n
|
463d044cfa70de6bde04c380c459274acb71a1b6
|
add database
|
hello.py
|
hello.py
|
from flask import Flask, render_template, session, redirect, url_for, flash
from flask.ext.script import Manager
from flask.ext.bootstrap import Bootstrap
from flask.ext.moment import Moment
from flask.ext.wtf import Form
from wtforms import StringField, SubmitField
from wtforms.validators import Required
app = Flask(__name__)
app.config['SECRET_KEY'] = 'hard to guess string'
manager = Manager(app)
bootstrap = Bootstrap(app)
moment = Moment(app)
class NameForm(Form):
name = StringField('What\'s your name?', validators = [Required()])
submit = SubmitField('Submit')
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
@app.route('/', methods = ['GET', 'POST'])
def index():
form = NameForm()
if form.validate_on_submit():
old_name = session.get('name')
if old_name is not None and old_name != form.name.data:
flash('Looks like you have changed your name!')
session['name'] = form.name.data
form.name.data = ''
return redirect(url_for('index'))
return render_template('index.html', form=form, name=session.get('name'))
if __name__ == '__main__':
manager.run()
|
Python
| 0.000001
|
@@ -304,152 +304,874 @@
red%0A
-%0Aapp = Flask(__name__)%0Aapp.config%5B'SECRET_KEY'%5D = 'hard to guess string'%0A%0Amanager = Manager(app)%0Abootstrap = Bootstrap(app)%0Amoment = Moment(app)
+from flask.ext.sqlalchemy import SQLAlchemy%0A%0Abasedir = os.path.abspath(os.path.dirname(__file__))%0A%0Aapp = Flask(__name__)%0Aapp.config%5B'SECRET_KEY'%5D = 'hard to guess string'%0Aapp.config%5B'SQLALCHEMY_DATABASE_URI'%5D = %5C%0A'sqlite:///' + os.path.join(basedir, 'data.sqlite')%0Aapp.config%5B'SQLALCHEMY_COMMIT_ON_TEARDOWN'%5D = True%0A%0Adb = SQLAlchemy(app)%0A%0Amanager = Manager(app)%0Abootstrap = Bootstrap(app)%0Amoment = Moment(app)%0A%0A%0Aclass Role(db.Model):%0A __tablename__ = 'roles'%0A id = db.Column(db.Integer, primary_key = True)%0A name = db.Column(db.String(64), unique = True)%0A%0A def __repr__(self):%0A return '%3CRole %25r%3E' %25 self.name%0A%0Aclass User(db.Model):%0A __tablename__ = 'users'%0A id = db.Column(db.Integer, primary_key = True)%0A username = db.Column(db.String(64), unique = True, index = True)%0A%0A def __repr__(self):%0A return '%3CUser %25r%3E' %25 self.username
%0A%0A%0Ac
|
b07ada0833c1d8319e946e7444b2f2c1337c15d1
|
Add some debug
|
hello.py
|
hello.py
|
from flask import Flask, render_template, jsonify, request
import pandas as pd
import networkx as nx
import pygraphviz as pgv
import json
import tempfile
import numpy as np
import brewer2mpl
from StringIO import StringIO
app = Flask(__name__)
app.config['DEBUG'] = True
@app.route('/')
def hello():
return render_template('index.html', name="Julia")
@app.route('/graph', methods=["POST"])
def get_image():
history = StringIO(request.form["history"])
pair_counts, node_totals = get_statistics(history)
G = create_graph(pair_counts[:20], node_totals)
response = {'graph': dot_draw(G, tmp_dir="./tmp")}
return jsonify(response)
def dot_draw(G, prog="circo", tmp_dir="/tmp"):
# Hackiest code :)
tmp_dot = tempfile.mktemp(dir=tmp_dir, suffix=".dot")
tmp_image = tempfile.mktemp(dir=tmp_dir, suffix=".png")
nx.write_dot(G, tmp_dot)
dot_graph = pgv.AGraph(tmp_dot)
dot_graph.draw(tmp_image, prog=prog)
with open(tmp_image) as f:
data = f.read()
return data.encode("base64")
def getwidth(node, node_totals):
count = np.sqrt(node_totals[node])
count /= float(sum(np.sqrt(node_totals)))
count *= 20
count = max(count, 0.3)
count = min(count, 4)
return count
def get_colors(nodes):
n_colors = 8
colors = {}
set2 = brewer2mpl.get_map('Dark2', 'qualitative', n_colors).hex_colors
for i, node in enumerate(nodes):
colors[node] = set2[i % n_colors]
return colors
def create_graph(pair_counts, node_totals):
G = nx.DiGraph()
node_colors = get_colors(list(node_totals.index))
for (frm, to), count in pair_counts.iterrows():
G.add_edge(frm, to, penwidth=float(count) / 8, color=node_colors[frm])
for node in G.nodes():
G.node[node]['width'] = getwidth(node, node_totals)
G.node[node]['height'] = G.node[node]['width']
G.node[node]['color'] = node_colors[node]
G.node[node]['label'] = "%s (%d%%)" % (node, int(node_totals[node] / float(sum(node_totals)) * 100) )
return G
def get_statistics(text):
df = pd.read_csv(text, sep=' ', header=None, names=["row", "command"], index_col="row")
pairs = pd.DataFrame(index=range(len(df) - 1))
pairs['dist'] = df.index[1:].values - df.index[:-1].values
pairs['from'] = df['command'][:-1].values
pairs['to'] = df['command'][1:].values
node_totals = df['command'].value_counts()
close_pairs = pairs[pairs.dist == 1]
pair_counts = close_pairs.groupby(['from', 'to']).aggregate(len).rename(columns= {'dist': 'count'})
pair_counts = pair_counts.sort('count', ascending=False)
return pair_counts, node_totals
if __name__ == "__main__":
app.run(port=5001)
|
Python
| 0.000009
|
@@ -403,24 +403,60 @@
et_image():%0A
+ import os%0A print os.getcwd()%0A
history
|
b7f787e0999055f3bf25ba0621df93973d259ac9
|
use CYAN_URL instead of DATABASE_URL
|
hello.py
|
hello.py
|
from flask import Flask, render_template, jsonify, request, g
import pandas as pd
import networkx as nx
import pygraphviz as pgv
import json
import tempfile
import numpy as np
import brewer2mpl
from StringIO import StringIO
import psycopg2
import urlparse
import os
app = Flask(__name__)
app.config['DEBUG'] = True
@app.route('/')
def hello():
return render_template('index.html')
@app.route('/display/<num>')
def display_graph(num=None):
cursor = g.conn.cursor()
#try:
print num
cursor.execute("SELECT logfile FROM log WHERE id = %s", (num,));
history = cursor.fetchone()[0]
svg = create_svg(history)
#except:
# g.conn.rollback()
# svg = "Page not found"
return render_template("display_graph.html", svg=svg)
def create_svg(history):
pair_counts, node_totals = get_statistics(StringIO(history))
G = create_graph(pair_counts[pair_counts['count'] >= 3], node_totals)
return dot_draw(G, tmp_dir="./tmp")
@app.route('/graph', methods=["POST"])
def get_image():
history = request.form["history"]
svg = create_svg(history)
row_id = write_to_db(history)
response = jsonify({'graph': svg, 'id': row_id})
return response
def write_to_db(history):
cursor = g.conn.cursor()
query = "INSERT INTO log (logfile) VALUES (%s) RETURNING id;"
try:
cursor.execute(query, (history,))
row_id = cursor.fetchone()[0]
g.conn.commit()
except psycopg2.IntegrityError:
g.conn.rollback()
select_query = "SELECT id FROM log WHERE logfile = %s;"
cursor.execute(select_query, (history,))
row_id = cursor.fetchone()[0]
print "Not adding data -- it already exists"
return row_id
def dot_draw(G, prog="dot", tmp_dir="/tmp"):
# Hackiest code :)
tmp_dot = tempfile.mktemp(dir=tmp_dir, suffix=".dot")
tmp_image = tempfile.mktemp(dir=tmp_dir, suffix=".svg")
nx.write_dot(G, tmp_dot)
dot_graph = pgv.AGraph(tmp_dot)
dot_graph.draw(tmp_image, prog=prog)
with open(tmp_image) as f:
data = f.read()
return data
def getwidth(node, node_totals):
count = np.sqrt(node_totals[node])
count /= float(sum(np.sqrt(node_totals)))
count *= 6
count = max(count, 0.1)
count = min(count, 4)
return count
def get_colors(nodes):
n_colors = 8
colors = {}
set2 = brewer2mpl.get_map('Dark2', 'qualitative', n_colors).hex_colors
for i, node in enumerate(nodes):
colors[node] = set2[i % n_colors]
return colors
def create_graph(pair_counts, node_totals):
G = nx.DiGraph()
node_colors = get_colors(list(node_totals.index))
for (frm, to), count in pair_counts.iterrows():
G.add_edge(frm, to, penwidth=float(count) / 8, color=node_colors[frm])
for node in G.nodes():
G.node[node]['width'] = getwidth(node, node_totals)
G.node[node]['height'] = G.node[node]['width']
G.node[node]['fontsize'] = 10
G.node[node]['color'] = node_colors[node]
G.node[node]['label'] = "%s (%d%%)" % (node, int(node_totals[node] / float(sum(node_totals)) * 100) )
return G
def get_statistics(text):
df = pd.read_csv(text, sep=' ', header=None, names=["row", "command"], index_col="row")
pairs = pd.DataFrame(index=range(len(df) - 1))
pairs['dist'] = df.index[1:].values - df.index[:-1].values
pairs['from'] = df['command'][:-1].values
pairs['to'] = df['command'][1:].values
node_totals = df['command'].value_counts()
close_pairs = pairs[pairs.dist == 1]
pair_counts = close_pairs.groupby(['from', 'to']).aggregate(len).rename(columns= {'dist': 'count'})
pair_counts = pair_counts.sort('count', ascending=False)
return pair_counts, node_totals
@app.before_request
def before():
try:
os.mkdir("tmp")
except OSError:
pass
g.conn = db_connect()
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
def db_connect():
urlparse.uses_netloc.append("postgres")
db_url = os.environ["DATABASE_URL"]
url = urlparse.urlparse(db_url)
conn = psycopg2.connect(
database=url.path[1:],
user=url.username,
password=url.password,
host=url.hostname,
port=url.port
)
return conn
if __name__ == "__main__":
app.run(port=5001)
|
Python
| 0.000001
|
@@ -4078,16 +4078,30 @@
on%5B%22
-DATABASE
+HEROKU_POSTGRESQL_CYAN
_URL
|
c3febe727bab959fcbc0bc0159389995adca129c
|
Improve debug output. Print register contents in hex.
|
epiphany/test/machine.py
|
epiphany/test/machine.py
|
from pydgin.debug import Debug
from epiphany.machine import State
from epiphany.sim import new_memory
from epiphany.isa import reg_map
from epiphany.utils import bits2float
possible_attributes = "AN AZ AC AV AVS BN BV BIS BVS BUS BZ pc".split()
def new_state(mem=None, debug=Debug(), **args):
if mem is None:
mem = new_memory()
state = State(mem, debug)
for attr in possible_attributes:
if attr in args:
setattr(state, attr, args[attr])
for arg, value in args.items():
if arg in possible_attributes:
continue
elif arg.startswith("rf") and arg[2].isdigit():
index = int(arg[2:])
if index >= 107:
raise ValueError("The Epiphany only has 107 registers cannot set rf[%d]." %
index)
state.set_register(index, value)
elif arg.startswith("rf") and arg[2:] in reg_map:
state.set_register(reg_map[arg[2:]], value)
else:
raise KeyError('No such register: {0}'.format(arg[2:]))
return state
class StateChecker(object):
"""Used only for testing.
check() tests whether registers and flags of interest are equal to a
given other state.
"""
epsilon = 0.0001
def __init__(self, **args):
self.interesting_state = []
for attr in possible_attributes:
if attr in args:
self.interesting_state.append(attr)
setattr(self, attr, args[attr])
self.expected_registers = []
for arg, value in args.items():
if arg in possible_attributes:
continue
elif arg.startswith("rf") and arg[2].isdigit():
index = int(arg[2:])
if index >= 107:
raise ValueError("The Epiphany only has 107 registers cannot set rf[%d]." %
index)
self.expected_registers.append((index, value))
elif arg.startswith("rf") and arg[2:] in reg_map:
self.expected_registers.append((reg_map[arg[2:]], value))
else:
raise KeyError('No such register: {0}'.format(arg[2:]))
def check_flags(self, state):
"""Check all machine flags against an expected state.
"""
for attr in possible_attributes:
if attr in self.interesting_state:
expected = getattr(self, attr)
got = getattr(state, attr)
if expected != got:
raise ValueError("Flag %s differs. Expected: %s got: %s" %
(attr, expected, got))
def check_memory(self, memory, state):
"""Check whether locations in memory are set as expected.
The 'memory' argument should be an iterable containing 3-tuples of
address, size (in number of bytes), and expected value.
"""
if memory is None or memory == []:
return
for (location, size, expected) in memory:
got = state.mem.read(location, size)
if expected != got:
raise ValueError("Memory location %s differs. Expected: %s got: %s" %
(location, expected, got))
def check(self, state, memory=[]):
"""Check all registers and flags against an expected state.
"""
for index, expected in self.expected_registers:
got = state.rf[index]
if index > 63:
reg_name = (key for key, value in reg_map.items() if value==index).next()
else:
reg_name = index
if expected != got:
raise ValueError("Register %s differs. Expected: %s got: %s" %
(reg_name, expected, got))
self.check_flags(state)
self.check_memory(memory, state)
def fp_check(self, state, memory=[]):
"""Check all registers and flags against an expected state.
For registers, convert the contents to a Python float and check that
the state and expected state do not differ by more than self.epsilon.
"""
for index, expected in self.expected_registers:
got = state.rf[index]
if index > 63:
reg_name = (key for key, value in reg_map.items() if value==index).next()
else:
reg_name = index
if abs(bits2float(expected) - bits2float(got)) > self.epsilon:
raise ValueError("Register %s differs by more than %.4f. Expected: %s got: %s" %
(reg_name, self.epsilon,
bits2float(expected), bits2float(got)))
self.check_flags(state)
self.check_memory(memory, state)
|
Python
| 0
|
@@ -3258,24 +3258,28 @@
cation,
+hex(
expected
, got))%0A
@@ -3262,39 +3262,45 @@
on, hex(expected
+)
,
+hex(
got))
+)
%0A%0A def check(
@@ -3817,24 +3817,28 @@
g_name,
+hex(
expected
, got))%0A
@@ -3829,23 +3829,29 @@
expected
+)
,
+hex(
got))
+)
%0A
|
9de3dacc7c687bc5e4d11a5a334f5ef5cc4d2f37
|
Fix call to genome mapping code
|
rnacentral_pipeline/cli/genome_mapping.py
|
rnacentral_pipeline/cli/genome_mapping.py
|
# -*- coding: utf-8 -*-
"""
Copyright [2009-2018] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import click
from rnacentral_pipeline.rnacentral import genome_mapping
@click.group('genome-mapping')
def cli():
"""
This group of commands deals with figuring out what data to map as well as
parsing the result into a format for loading.
"""
pass
@cli.command('select-hits')
@click.argument('assembly_id')
@click.argument('hits', default='-', type=click.File('r'))
@click.argument('output', default='-', type=click.File('w'))
def select_hits(assembly_id, hits, output):
genome_mapping.blat.write_selected(assembly_id, hits, output)
@cli.command('url-for')
@click.option('--host', default='ensembl')
@click.argument('species')
@click.argument('assembly_id')
@click.argument('output', default='-', type=click.File('w'))
def find_remote_url(species, assembly_id, output, host=None):
url = genome_mapping.urls.url_for(species, assembly_id, host=host)
output.write(url)
@cli.command('urls-for')
@click.argument('filename', default='-', type=click.File('r'))
@click.argument('output', default='-', type=click.File('w'))
def find_remote_urls(filename, output):
genome_mapping.urls.write_urls_for(filename, output)
|
Python
| 0.000002
|
@@ -660,16 +660,72 @@
tral
- import
+.genome_mapping import urls%0Afrom rnacentral_pipeline.rnacentral.
geno
@@ -734,16 +734,28 @@
_mapping
+ import blat
%0A%0A%0A@clic
@@ -1174,31 +1174,16 @@
t):%0A
-genome_mapping.
blat.wri
@@ -1481,31 +1481,16 @@
url =
-genome_mapping.
urls.url
@@ -1744,27 +1744,12 @@
t):%0A
+
-genome_mapping.
urls
|
6424edf4186236443ba4ec5a1b2ffcc26de7c695
|
add classifications
|
fl/__init__.py
|
fl/__init__.py
|
# encoding=utf-8
from pupa.scrape import Jurisdiction, Organization
from .votes import FlVoteScraper
from .bills import FlBillScraper
from .people import FlPersonScraper
class Florida(Jurisdiction):
division_id = "ocd-division/country:us/state:fl"
classification = "government"
name = "Florida"
url = "http://myflorida.com"
scrapers = {
# "votes": FlVoteScraper,
"bills": FlBillScraper,
"people": FlPersonScraper,
}
parties = [{'name': 'Republican'},
{'name': 'Democratic'},
{'name': 'Independent'}]
legislative_sessions = [
{'name': '2011 Regular Session', 'identifier': '2011', },
{'name': '2012 Regular Session', 'identifier': '2012', },
{'name': '2012 Extraordinary Apportionment Session', 'identifier': '2012B', },
{'name': '2013 Regular Session', 'identifier': '2013', },
{'name': '2014 Regular Session', 'identifier': '2014', },
{'name': '2014 Special Session A', 'identifier': '2014A', },
{'name': '2015 Regular Session', 'identifier': '2015', },
{'name': '2015 Special Session A', 'identifier': '2015A', },
{'name': '2015 Special Session B', 'identifier': '2015B', },
{'name': '2016 Regular Session', 'identifier': '2016', },
]
def get_organizations(self):
legis = Organization(name="Florida Legislature", classification="legislature")
upper = Organization('Florida Senate', classification='upper', parent_id=legis._id)
lower = Organization('Florida House', classification='lower', parent_id=legis._id)
for n in range(1, 41):
upper.add_post(label=str(n), role='Senator')
for n in range(1, 121):
lower.add_post(label=str(n), role='Representative')
yield legis
yield upper
yield lower
|
Python
| 0.000317
|
@@ -539,48 +539,8 @@
ic'%7D
-,%0A %7B'name': 'Independent'%7D
%5D%0A
@@ -629,16 +629,43 @@
'2011',
+'classification': 'primary'
%7D,%0A
@@ -722,16 +722,43 @@
'2012',
+'classification': 'primary'
%7D,%0A
@@ -832,24 +832,51 @@
': '2012B',
+'classification': 'special'
%7D,%0A %7B
@@ -929,16 +929,43 @@
'2013',
+'classification': 'primary'
%7D,%0A
@@ -1022,16 +1022,43 @@
'2014',
+'classification': 'primary'
%7D,%0A
@@ -1114,24 +1114,51 @@
': '2014A',
+'classification': 'special'
%7D,%0A %7B
@@ -1211,16 +1211,43 @@
'2015',
+'classification': 'primary'
%7D,%0A
@@ -1307,16 +1307,43 @@
2015A',
+'classification': 'special'
%7D,%0A
@@ -1403,16 +1403,43 @@
2015B',
+'classification': 'special'
%7D,%0A
@@ -1496,16 +1496,43 @@
'2016',
+'classification': 'primary'
%7D,%0A %5D
|
d915b43cea8e4ebb5792daaa4d537d4986fc3d0f
|
document for command decorator
|
flask_slack.py
|
flask_slack.py
|
"""
flask_slack
~~~~~~~~~~~~~~~
Slack extension for Flask.
:copyright: (c) 2014 by VeryCB.
:license: BSD, see LICENSE for more details.
"""
from six import string_types
__all__ = ('Slack',)
__version__ = '0.1.2'
__author__ = 'VeryCB <imcaibin@gmail.com>'
class Slack(object):
def __init__(self, app=None):
self._commands = {}
self.team_id = None
if app:
self.init_app(app)
def init_app(self, app=None):
"""Initialize application configuration"""
config = getattr(app, 'config', app)
self.team_id = config.get('TEAM_ID')
def command(self, command, token, team_id=None, methods=['GET'], **kwargs):
if team_id is None:
team_id = self.team_id
if team_id is None:
raise RuntimeError('TEAM_ID is not found in your configuration!')
def deco(func):
self._commands[(team_id, command)] = (func, token, methods, kwargs)
return func
return deco
def dispatch(self):
from flask import request
method = request.method
data = request.args
if method == 'POST':
data = request.form
token = data.get('token')
team_id = data.get('team_id')
command = data.get('command')
if isinstance(command, string_types):
command = command.strip().lstrip('/')
try:
self.validate(command, token, team_id, method)
except SlackError as e:
return self.response(e.msg)
func, _, _, kwargs = self._commands[(team_id, command)]
kwargs.update(data.to_dict())
return func(**kwargs)
dispatch.methods = ['GET', 'POST']
def validate(self, command, token, team_id, method):
if (team_id, command) not in self._commands:
raise SlackError('Command {0} is not found in team {1}'.format(
command, team_id))
func, _token, methods, kwargs = self._commands[(team_id, command)]
if method not in methods:
raise SlackError('{} request is not allowed'.format(method))
if token != _token:
raise SlackError('Your token {} is invalid'.format(token))
def response(self, text):
from flask import Response
return Response(text, content_type='text/plain; charset=utf-8')
class SlackError(Exception):
def __init__(self, msg):
self.msg = msg
|
Python
| 0.000001
|
@@ -689,24 +689,628 @@
**kwargs):%0A
+ %22%22%22A decorator used to register a command.%0A%0A :param command: the command to register%0A :param token: your command token provided by slack%0A :param team_id: optional. your team_id provided by slack.%0A You can also specify the %22TEAM_ID%22 in app%0A configuration file for one-team project%0A :param methods: optional. HTTP methods which are accepted to%0A execute the command%0A :param kwargs: optional. the optional arguments which will be passed%0A to your register method%0A %22%22%22%0A
if t
|
6e9a0df29ba488a96293e938ed96561ee709fc4b
|
Improve heatmap plotting
|
smps/plots.py
|
smps/plots.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import matplotlib.ticker as mtick
from matplotlib.ticker import ScalarFormatter
import seaborn as sns
default_cmap = sns.cubehelix_palette(8, as_cmap=True)
rc_log = {
'xtick.major.size': 10.0,
'xtick.minor.size': 8.0,
'ytick.major.size': 10.0,
'ytick.minor.size': 8.0,
'xtick.color': '0.0',
'ytick.color': '0.0',
'axes.linewidth': 1.75
}
def heatmap(X, Y, Z, ax=None, kind='log', cbar=True, cmap=default_cmap,
fig_kws=None, cbar_kws=None, **kwargs):
"""
"""
cbar_min = kwargs.pop('cbar_min', Z.min() if Z.min() > 0.0 else 1.)
cbar_max = kwargs.pop('cbar_max', Z.max())
if fig_kws is None:
fig_kws = dict(figsize=(16,8))
if cbar_kws is None:
cbar_kws = dict(label='$dN/dlogD_p \; [cm^{-3}]$')
if ax is None:
plt.figure(**fig_kws)
ax = plt.gca()
im = ax.pcolormesh(X, Y, Z, norm=LogNorm(vmin=cbar_min, vmax=cbar_max), cmap=cmap)
ax.set_ylim([Y.min(), Y.max()])
if kind == 'log':
ax.semilogy()
ax.yaxis.set_major_formatter(ScalarFormatter())
ax.set_ylabel("$D_p \; [nm]$")
if cbar:
clb = plt.colorbar(im, **cbar_kws)
return ax
def histplot(histogram, bins, ax=None, plot_kws=None, fig_kws=None, **kwargs):
"""Plot the histogram in the form of a bar chart."""
if isinstance(histogram, pd.DataFrame):
histogram = histogram.mean().values
if fig_kws is None:
fig_kws = dict(figsize=(16,8))
if plot_kws is None:
plot_kws = dict(alpha=1, edgecolor=None, linewidth=0)
if ax is None:
plt.figure(**fig_kws)
ax = plt.gca()
ax.bar(left=bins[:, 0], height=histogram, width=bins[:, -1] - bins[:, 0],
align='edge', **plot_kws)
ax.semilogx()
ax.set_xlabel("$D_p \; [\mu m]$")
ax.xaxis.set_major_formatter(mtick.FormatStrFormatter("%.4g"))
return ax
|
Python
| 0
|
@@ -238,16 +238,45 @@
n as sns
+%0Afrom numpy import nan_to_num
%0A%0Adefaul
@@ -587,16 +587,43 @@
ar=True,
+ hide_low=True,%0A
cmap=de
@@ -633,28 +633,16 @@
lt_cmap,
-%0A
fig_kws
@@ -806,24 +806,300 @@
, Z.max())%0A%0A
+ # Copy to avoid modifying original data%0A Z_plot = Z.copy()%0A%0A if hide_low:%0A # Hide NaN values%0A Z_plot = nan_to_num(Z_plot)%0A%0A # Increase values below cbar_min to cbar_min%0A below_min = Z_plot %3C cbar_min%0A Z_plot%5Bbelow_min%5D = cbar_min%0A%0A
if fig_k
@@ -1338,16 +1338,21 @@
(X, Y, Z
+_plot
, norm=L
@@ -1388,16 +1388,39 @@
ar_max),
+%0A
cmap=cm
|
40d9ceb14c57c109e8f6371b1a4c677fa33e1669
|
Bump base package requirements (#10078)
|
snmp/setup.py
|
snmp/setup.py
|
# (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from codecs import open # To use a consistent encoding
from os import path
from setuptools import setup
HERE = path.dirname(path.abspath(__file__))
# Get version info
ABOUT = {}
with open(path.join(HERE, 'datadog_checks', 'snmp', '__about__.py')) as f:
exec(f.read(), ABOUT)
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
def get_dependencies():
dep_file = path.join(HERE, 'requirements.in')
if not path.isfile(dep_file):
return []
with open(dep_file, encoding='utf-8') as f:
return f.readlines()
CHECKS_BASE_REQ = 'datadog-checks-base>=15.7.0'
setup(
name='datadog-snmp',
version=ABOUT['__version__'],
description='The SNMP check',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='datadog agent snmp check',
# The project's main homepage.
url='https://github.com/DataDog/integrations-core',
# Author details
author='Datadog',
author_email='packages@datadoghq.com',
# License
license='BSD',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
],
# The package we're going to ship
packages=['datadog_checks.snmp'],
# Run-time dependencies
install_requires=[CHECKS_BASE_REQ],
extras_require={'deps': get_dependencies()},
# Extra files to ship with the wheel package
include_package_data=True,
)
|
Python
| 0
|
@@ -788,12 +788,12 @@
se%3E=
-15.7
+21.1
.0'%0A
|
17bb20d7f7b236feb1c2eff9d71420e672c87431
|
Fix typo
|
djchoices/choices.py
|
djchoices/choices.py
|
import re
from django.core.exceptions import ValidationError]
try:
from collections import OrderedDict
except ImportError: # Py2.6, fall back to Django's implementation
from django.utils.datastructures import SortedDict as OrderedDict
try:
from django.utils import six
except ImportError:
import six
__all__ = ["ChoiceItem", "DjangoChoices", "C"]
### Support Functionality (Not part of public API ###
class Labels(dict):
def __getattribute__(self, name):
result = dict.get(self, name, None)
if result is not None:
return result
else:
raise AttributeError("Label for field %s was not found." % name)
def __setattr__(self, name, value):
self[name] = value
### End Support Functionality ###
class ChoiceItem(object):
"""
Describes a choice item. The label is usually the field name so label can
normally be left blank. Set a label if you need characters that are illegal
in a python identifier name (ie: "DVD/Movie").
"""
order = 0
def __init__(self, value=None, label=None, order=None):
self.value = value
if order:
self.order = order
else:
ChoiceItem.order += 1
self.order = ChoiceItem.order
self.label = label
# Shorter convenience alias.
C = ChoiceItem
class DjangoChoicesMeta(type):
"""
Metaclass that writes the choices class.
"""
name_clean = re.compile(r"_+")
def __new__(cls, name, bases, attrs):
class StaticProp(object):
def __init__(self, value):
self.value = value
def __get__(self, obj, objtype):
return self.value
fields = {}
labels = Labels()
values = {}
choices = []
# Get all the fields from parent classes.
parents = [b for b in bases if isinstance(b, DjangoChoicesMeta)]
for kls in parents:
for field_name in kls._fields:
fields[field_name] = kls._fields[field_name]
# Get all the fields from this class.
for field_name in attrs:
val = attrs[field_name]
if isinstance(val, ChoiceItem):
fields[field_name] = val
fields = OrderedDict(sorted(fields.items(), key=lambda x: x[1].order))
for field_name in fields:
val = fields[field_name]
if isinstance(val, ChoiceItem):
if not val.label is None:
label = val.label
else:
label = cls.name_clean.sub(" ", field_name)
val0 = label if val.value is None else val.value
choices.append((val0, label))
attrs[field_name] = StaticProp(val0)
setattr(labels, field_name, label)
values[val.value or label] = label
else:
choices.append((field_name, val.choices))
attrs["choices"] = StaticProp(tuple(choices))
attrs["labels"] = labels
attrs["values"] = values
attrs["_fields"] = fields
return super(DjangoChoicesMeta, cls).__new__(cls, name, bases, attrs)
class DjangoChoices(six.with_metaclass(DjangoChoicesMeta)):
order = 0
choices = ()
labels = Labels()
values = {}
@classmethod
def validator(cls, value):
if value not in cls.values:
raise ValidationError('Select a valid choice. %(value)s is not '
'one of the available choices.')
|
Python
| 0.999999
|
@@ -53,17 +53,16 @@
ionError
-%5D
%0A%0Atry:%0A
|
0a0ebb7dd3267d727e6af598f6d964cd4d73fd69
|
Add TODO for multiple e-mail verification clicks.
|
eduid_signup/utils.py
|
eduid_signup/utils.py
|
from uuid import uuid4
from hashlib import sha256
import datetime
from pyramid.httpexceptions import HTTPInternalServerError
from eduid_signup.i18n import TranslationString as _
from eduid_signup.compat import text_type
def generate_verification_link(request):
code = text_type(uuid4())
link = request.route_url("email_verification_link", code=code)
return (link, code)
def verify_email_code(collection, code):
result = collection.find_and_modify(
{
"code": code,
"verified": False
}, {
"$set": {
"verified": True,
"verified_ts": datetime.utcnow(),
}
},
new=True,
safe=True
)
if result is None:
raise HTTPInternalServerError(_("Your email can't be verified now, "
"try it later"))
return True
def check_email_status(db, email):
"""
Check the email registration status.
If the email doesn't exist in database, then return 'new'.
If exists and it hasn't been verified, then return 'not_verified'.
If exists and it has been verified before, then return 'verified'.
"""
email = db.registered.find_one({'email': email})
if not email:
return 'new'
if email.get('verified', False):
return 'verified'
else:
return 'not_verified'
def generate_auth_token(shared_key, email, nonce, timestamp, generator=sha256):
"""
The shared_key is a secret between the two systems
The public word must must go through form POST or GET
"""
return generator("{0}|{1}|{2}|{3}".format(
shared_key, email, nonce, timestamp)).hexdigest()
|
Python
| 0
|
@@ -717,16 +717,257 @@
%0A )%0A%0A
+ # XXX need to handle user clicking on confirmation link more than%0A # once gracefully. Should show page saying that e-mail address was%0A # already confirmed, but NOT allow user to auth_token login to%0A # dashboard from that page.%0A%0A
if r
|
0019d3a4d512c4b7da2670872cd880bbe76edd80
|
Bump version to 0.4.0
|
arghelper.py
|
arghelper.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014-2016 The arghelper developers. All rights reserved.
# Project site: https://github.com/questrail/arghelper
# Use of this source code is governed by a MIT-style license that
# can be found in the LICENSE.txt file for the project.
"""Provide helper functions for argparse
"""
# Try to future proof code so that it's Python 3.x ready
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
# Standard module imports
import argparse
import sys
import os
# The version as used in the setup.py
__version__ = '0.3.2'
def extant_file(arg):
"""Facade for extant_item(arg, arg_type="file")
"""
return extant_item(arg, "file")
def extant_dir(arg):
"""Facade for extant_item(arg, arg_type="directory")
"""
return extant_item(arg, "directory")
def extant_item(arg, arg_type):
"""Determine if parser argument is an existing file or directory.
This technique comes from http://stackoverflow.com/a/11541450/95592
and from http://stackoverflow.com/a/11541495/95592
Args:
arg: parser argument containing filename to be checked
arg_type: string of either "file" or "directory"
Returns:
If the file exists, return the filename or directory.
Raises:
If the file does not exist, raise a parser error.
"""
if arg_type == "file":
if not os.path.isfile(arg):
raise argparse.ArgumentError(
None,
"The file {arg} does not exist.".format(arg=arg))
else:
# File exists so return the filename
return arg
elif arg_type == "directory":
if not os.path.isdir(arg):
raise argparse.ArgumentError(
None,
"The directory {arg} does not exist.".format(arg=arg))
else:
# Directory exists so return the directory name
return arg
def parse_config_input_output(args=sys.argv):
"""Parse the args using the config_file, input_dir, output_dir pattern
Args:
args: sys.argv
Returns:
The populated namespace object from parser.parse_args().
Raises:
TBD
"""
parser = argparse.ArgumentParser(
description='Process the input files using the given config')
parser.add_argument(
'config_file',
help='Configuration file.',
metavar='FILE', type=extant_file)
parser.add_argument(
'input_dir',
help='Directory containing the input files.',
metavar='DIR', type=extant_dir)
parser.add_argument(
'output_dir',
help='Directory where the output files should be saved.',
metavar='DIR', type=extant_dir)
return parser.parse_args(args[1:])
def parse_config(args=sys.argv):
"""Parse the args using the config_file pattern
Args:
args: sys.argv
Returns:
The populated namespace object from parser.parse_args().
Raises:
TBD
"""
parser = argparse.ArgumentParser(
description='Read in the config file')
parser.add_argument(
'config_file',
help='Configuration file.',
metavar='FILE', type=extant_file)
return parser.parse_args(args[1:])
|
Python
| 0.000001
|
@@ -666,11 +666,11 @@
'0.
-3.2
+4.0
'%0A%0A%0A
|
4d0caec3d3c42c7d1e342c934e987d9f602cc26b
|
return value
|
scripts/geodata/openaddresses/formatter.py
|
scripts/geodata/openaddresses/formatter.py
|
import csv
import os
import six
import yaml
from geodata.address_expansions.abbreviations import abbreviate
from geodata.address_expansions.gazetteers import street_types_gazetteer, unit_types_gazetteer
from geodata.address_formatting.formatter import AddressFormatter
from geodata.addresses.components import AddressComponents
from geodata.csv_utils import tsv_string, unicode_csv_reader
this_dir = os.path.realpath(os.path.dirname(__file__))
OPENADDRESSES_PARSER_DATA_CONFIG = os.path.join(this_dir, os.pardir, os.pardir, os.pardir,
'resources', 'parser', 'data_sets', 'openaddresses.yaml')
OPENADDRESS_FORMAT_DATA_TAGGED_FILENAME = 'openaddresses_formatted_addresses_tagged.tsv'
OPENADDRESS_FORMAT_DATA_FILENAME = 'openaddresses_formatted_addresses.tsv'
class OpenAddressesFormatter(object):
def __init__(self, language_rtree):
self.language_rtree = language_rtree
config = yaml.load(open(OPENADDRESSES_PARSER_DATA_CONFIG))
self.config = config['global']
self.country_configs = config['countries']
self.formatter = AddressFormatter()
def get_property(self, key, *configs):
for config in configs:
value = config.get(key, None)
if value is not None:
return value
return None
@staticmethod
def validate_postcode(postcode):
return not all((c == '0' for c in postcode))
openaddresses_validators = {
AddressFormatter.POSTCODE: validate_postcode
}
def formatted_addresses(self, path, configs, tag_components=True):
abbreviate_street_prob = self.get_property('abbreviate_street_probability', *configs)
separate_street_prob = self.get_property('separate_street_probability', *configs) or 0.0
abbreviate_unit_prob = self.get_property('abbreviate_unit_probability', *configs)
separate_unit_prob = self.get_property('separate_unit_probability', *configs) or 0.0
field_map = self.get_property('field_map', *configs)
if not field_map:
return
field_map = {f['field_name']: f['component'] for f in field_map}
f = open(path)
reader = unicode_csv_reader(f)
headers = reader.next()
header_indices = {i: field_map[k] for i, k in enumerate(headers) if k in field_map}
latitude_index = headers.index('LAT')
longitude_index = headers.index('LON')
for row in reader:
try:
latitude = float(row[latitude_index])
longitude = float(row[longitude_index])
except (ValueError, TypeError):
continue
components = {}
for i, key in six.iteritems(header_indices):
value = row[i].strip()
if not value:
continue
validator = self.openaddresses_validators.get(key, None)
if validator is not None and not validator(value):
continue
components[key] = value
if components:
country, candidate_languages, language_props = self.language_rtree.country_and_languages(latitude, longitude)
if not (country and candidate_languages):
continue
if not language:
language = AddressComponents.address_language(components, candidate_languages)
street = components.get(AddressFormatter.ROAD, None)
if street is not None:
street = abbreviate(street_types_gazetteer, street, language,
abbreviate_prob=abbreviate_street_prob,
separate_prob=separate_street_prob)
components[AddressFormatter.ROAD] = street
unit = components.get(AddressFormatter.UNIT, None)
if unit is not None:
unit = abbreviate(unit_types_gazetteer, unit, language,
abbreviate_prob=abbreviate_unit_prob,
separate_prob=separate_unit_prob)
formatted = self.formatter.format_address(components, country,
language=language, tag_components=tag_components)
yield formatted
def build_training_data(self, base_dir, out_dir, tag_components=True):
if tag_components:
formatted_tagged_file = open(os.path.join(out_dir, OPENADDRESS_FORMAT_DATA_TAGGED_FILENAME), 'w')
writer = csv.writer(formatted_tagged_file, 'tsv_no_quote')
else:
formatted_tagged_file = open(os.path.join(out_dir, OPENADDRESS_FORMAT_DATA_FILENAME), 'w')
writer = csv.writer(formatted_tagged_file, 'tsv_no_quote')
i = 0
for country, config in six.iteritems(self.country_configs):
for file_props in config.get('files', []):
filename = file_props['filename']
path = os.path.join(base_dir, country_code, filename)
configs = (file_props, config, self.config)
for formatted_address in self.build_training_data_for_file(path, configs, tag_components=tag_components):
if formatted_address and formatted_address.strip():
continue
formatted_address = tsv_string(formatted_address)
if not formatted_address or not formatted_address.strip():
continue
if tag_components:
row = (language, country, formatted_address)
else:
row = (formatted_address,)
writer.writerow(row)
i += 1
if i % 1000 == 0 and i > 0:
print('did {} formatted addresses'.format(i))
for subdir, subdir_config in six.iteritems(config.get('subdirs', {})):
for file_props in config.get('files', []):
filename = file_props['filename']
path = os.path.join(base_dir, country, subdir, filename)
configs = (file_props, subdir_config, config, self.config)
for formatted_address in self.build_training_data_for_file(path, configs, tag_components=tag_components):
if formatted_address and formatted_address.strip():
continue
formatted_address = tsv_string(formatted_address)
if not formatted_address or not formatted_address.strip():
continue
if tag_components:
row = (language, country, formatted_address)
else:
row = (formatted_address,)
writer.writerow(row)
i += 1
if i % 1000 == 0 and i > 0:
print('did {} formatted addresses'.format(i))
|
Python
| 0.000007
|
@@ -4382,16 +4382,36 @@
yield
+ (language, country,
formatt
@@ -4412,16 +4412,17 @@
ormatted
+)
%0A%0A de
@@ -5133,13 +5133,8 @@
ntry
-_code
, fi
@@ -5212,32 +5212,51 @@
for
+ language, country,
formatted_addre
@@ -6381,16 +6381,35 @@
for
+ language, country,
formatt
|
46483b7e551e5180ff36d6892221e4b583f107ac
|
Use HTTPS for oauth/authenticate step.
|
le_social/twitter/views.py
|
le_social/twitter/views.py
|
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.shortcuts import redirect
from django.views import generic
from twitter import Twitter, OAuth, TwitterError
from twitter.oauth_dance import parse_oauth_tokens
class OAuthMixin(object):
consumer_key = None
consumer_secret = None
def get_consumer_key(self):
if self.consumer_key is not None:
return self.consumer_key
if hasattr(settings, 'CONSUMER_KEY'):
return settings.CONSUMER_KEY
else:
raise ImproperlyConfigured("Set settings.CONSUMER_KEY or the "
"consumer_key attribute or "
"implement get_consumer_key")
def get_consumer_secret(self):
if self.consumer_secret is not None:
return self.consumer_secret
if hasattr(settings, 'CONSUMER_SECRET'):
return settings.CONSUMER_SECRET
else:
raise ImproperlyConfigured("Set settings.CONSUMER_SECRET or the "
"consumer_secret attribute or "
"implement get_consumer_secret")
class Authorize(generic.View, OAuthMixin):
"""
A base class for the authorize view. Just sets the request token
in the session and redirects to twitter.
"""
def get(self, request, force_login=False, *args, **kwargs):
callback = self.build_callback()
oauth = OAuth('', '',
self.get_consumer_key(),
self.get_consumer_secret())
api = Twitter(auth=oauth, secure=True, format='', api_version=None)
(oauth.token, oauth.token_secret) = parse_oauth_tokens(
api.oauth.request_token(oauth_callback=callback))
request.session['request_token'] = (oauth.token, oauth.token_secret)
url = 'http://api.twitter.com/oauth/authenticate?oauth_token=%s' % oauth.token
if force_login:
url += '&force_login=true'
return redirect(url)
def build_callback(self):
""" Override this if you'd like to specify a callback URL"""
return None
class Callback(generic.View, OAuthMixin):
"""
A base class for the return callback. Subclasses must define:
- error(error_msg, exception=None): what to do when
something goes wrong? Must return an HttpResponse
- success(auth): what to do on successful auth? Do
some stuff with the twitter.OAuth object and return
an HttpResponse
"""
def get(self, request, *args, **kwargs):
verifier = request.GET.get('oauth_verifier', None)
if verifier is None:
return self.error('No verifier code')
if not 'request_token' in request.session:
return self.error('No request token found in the session')
request_token = request.session.pop('request_token')
request.session.modified = True
oauth = OAuth(request_token[0], request_token[1],
self.get_consumer_key(),
self.get_consumer_secret())
api = Twitter(auth=oauth, secure=True, format='', api_version=None)
try:
(oauth.token, oauth.token_secret) = parse_oauth_tokens(
api.oauth.access_token(oauth_verifier=verifier))
except TwitterError as e:
return self.error('Failed to get an access token')
return self.success(oauth)
def success(self, auth):
"""
Twitter authentication successful, do some stuff with his key.
"""
raise NotImplementedError("You need to provide an implementation of "
"success(auth)")
def error(self, message, exception=None):
"""
Meh. Something broke.
"""
raise NotImplementedError("You need to provide an implementation of "
"error(message, exception=None)")
|
Python
| 0
|
@@ -1912,16 +1912,17 @@
= 'http
+s
://api.t
|
f9141964ffa4ed36420b8ba564407c2ca661ac46
|
edit on glitter
|
glitter.py
|
glitter.py
|
from willie.module import commands
import random
@commands('glitter')
def ans(bot, trigger):
bot.reply("*'-.*\(^O^)/*.-'*")
|
Python
| 0
|
@@ -96,12 +96,10 @@
bot.
-repl
+sa
y(%22*
|
39c9e7dd34830eb70ce412c4feb1e5eda66aa08a
|
Add history to console
|
inbox.py
|
inbox.py
|
#!/usr/bin/env python
import argparse
import signal
import sys
import os
import subprocess
from time import sleep
import logging as log
# Make logging prettified
import tornado.options
tornado.options.parse_command_line()
DEFAULT_PORT = 8888
PATH_TO_MONGO_DATABSE = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "database/mongo/")
# Trying to get this to work
def install(args):
print "\033[95mCreating virtualenv...\033[0m"
os.system('virtualenv --clear .')
os.system('virtualenv --distribute --no-site-packages .')
print "\033[95mActivating virtualenv...\033[0m"
os.system('source bin/activate')
print "\033[95mInstalling dependencies...\033[0m"
os.system('pip install -r requirements.txt')
print "\033[95mDone!\033[0m"
start()
def start_mongo():
# Start Mongo
log.info("Starting Mongo. DB at %s" % PATH_TO_MONGO_DATABSE)
if not os.path.exists(PATH_TO_MONGO_DATABSE):
os.makedirs(PATH_TO_MONGO_DATABSE)
args = ['mongod', '--dbpath', PATH_TO_MONGO_DATABSE, '--fork', '--logpath=/tmp/inbox-mongo.log']
mongod_process = subprocess.Popen(args, stdout=subprocess.PIPE, stdin=subprocess.PIPE, stderr=subprocess.STDOUT)
mongod_process.communicate()
sleep(1) # for mongo
def start(args=None):
if not args:
port = DEFAULT_PORT
else:
port = args.port
commit = subprocess.check_output(["git", "describe", "--tags"])
print """
\033[94m Welcome to... \033[0m\033[1;95m
_____ _
|_ _| | |
| | _ __ | |__ _____ __
| | | '_ \| '_ \ / _ \ \/ /
_| |_| | | | |_) | (_) > <
|_____|_| |_|_.__/ \___/_/\_\\ \033[0m
""" + commit + """
Use CTRL-C to stop.
"""
# consider doing this to delete the database
# import shutil
# shutil.rmtree('/db')
try: start_mongo()
except Exception, e:
raise e
stop(None)
# Start Tornado
from server.app import startserver
try:
startserver(port)
except Exception, e:
raise e
stop(None)
def stop(args):
print """
\033[91m Cleaning up...
\033[0m"""
from server.app import stopserver
stopserver()
# Stop mongo
log.info("Stopping Mongo.")
os.system("pkill mongod")
print """
\033[91m Stopped.
\033[0m"""
# os.system("stty echo")
sys.exit(0)
def console(args):
import code
import pymongo
db = pymongo.MongoClient().test
code.interact(local={'db': db},
banner='Python %s on %s\nInbox console'
% (sys.version.replace('\n', ' '), sys.platform))
def signal_handler(signal, frame):
stop(None)
def main():
signal.signal(signal.SIGINT, signal_handler)
parser = argparse.ArgumentParser(description="Inbox App")
subparsers = parser.add_subparsers()
parser_install = subparsers.add_parser('install')
parser_install.set_defaults(func=install)
parser_start = subparsers.add_parser('start')
parser_start.add_argument('--port', help='Port to run the server', required=False, default=DEFAULT_PORT)
parser_start.set_defaults(func=start)
parser_stop = subparsers.add_parser('stop')
parser_stop.set_defaults(func=stop)
parser_console = subparsers.add_parser('console')
parser_console.set_defaults(func=console)
args = parser.parse_args()
args.func(args)
if __name__=="__main__":
main()
|
Python
| 0
|
@@ -2426,67 +2426,467 @@
ort
-code%0A import pymongo%0A db = pymongo.MongoClient().test
+pymongo%0A env = %7B'db': pymongo.MongoClient().test%7D%0A%0A # Based on http://docs.python.org/2/tutorial/interactive.html%0A # except it's 2013 and we have closures.%0A import atexit%0A import readline%0A import rlcompleter%0A history_path = os.path.expanduser('~/.pyhistory.inbox')%0A if os.path.exists(history_path):%0A readline.read_history_file(history_path)%0A atexit.register(lambda: readline.write_history_file(history_path))%0A%0A import code
%0A
@@ -2910,18 +2910,11 @@
cal=
-%7B'db': db%7D
+env
,%0A
|
4c64f695bfd9b5ce3d3d1352672aab5b0798f353
|
Add instance of nstat_test_run
|
stress_test/test_type.py
|
stress_test/test_type.py
|
# Copyright (c) 2016 Intracom S.A. Telecom Solutions. All rights reserved.
#
# This program and the accompanying materials are made available under the
# terms of the Eclipse Public License v1.0 which accompanies this distribution,
# and is available at http://www.eclipse.org/legal/epl-v10.html
""" Controller Class- All controller-related functionality is here"""
import json
import logging
import os
import time
import stress_test.controller
import stress_test.emulator
import stress_test.nb_generator
import stress_test.test_type
import stress_test.test_run
import sys
import util.file_ops
import util.netutil
import util.process
import queue
class TestType:
def __init__(self, args):
"""
"""
self.nstat_test_type = args.test_type
def load_test_conf(self, args):
""" Loading test configuration for NSTAT experiment. Parsing
configuration options from JSON input file
:param args: ArgumentParser object containing user specified
parameters (i.e test type, controller base directory, generator base
directory) when running NSTAT
:returns: test_config:
:rtype: test_config: python object resulting from a deserialized file
like object containing a json document
:type args: ArgumentParser object
"""
json_conf = {}
with open(args.json_config) as conf_file:
json_conf = json.load(conf_file)
ctrl_base_dir = args.ctrl_base_dir
sb_emu_base_dir = args.sb_emu_base_dir
controller = stress_test.controller.Controller.new(ctrl_base_dir,
json_conf)
sb_emulator = stress_test.emulator.SBEmu.new(sb_emu_base_dir,
json_conf)
'''
if hasattr(args, 'nb_emu_base_dir'):
nb_emu_base_dir = args.nb_emu_base_dir
nb_emulator = stress_test.nb_generator.NBgen(nb_emu_base_dir,
json_conf,
controller,
sb_emulator)
'''
return json_conf
def set_test_log_level(self, args):
"""Setting log level for NSTAT experiment
:param args: ArgumentParser object containing user specified
parameters (i.e test type, controller base directory, generator base
directory) when running NSTAT
:type args: ArgumentParser object
"""
logging_format = '[%(asctime)s %(levelname)7s ] %(message)s'
if args.logging_level == 'INFO':
logging.basicConfig(level=logging.INFO, stream=sys.stdout,
format=logging_format)
elif args.logging_level == 'ERROR':
logging.basicConfig(level=logging.ERROR, stream=sys.stdout,
format=logging_format)
else:
logging.basicConfig(level=logging.DEBUG, stream=sys.stdout,
format=logging_format)
if args.log_file:
open(args.log_file, 'a').close()
file_logging_handler = logging.FileHandler(filename=args.log_file,
mode='w')
if args.logging_level == 'INFO':
file_logging_handler.setLevel(level=logging.INFO)
elif args.logging_level == 'ERROR':
file_logging_handler.setLevel(level=logging.ERROR)
else:
file_logging_handler.setLevel(level=logging.DEBUG)
def test_selector(self, args):
"""
"""
self.set_test_log_level(args)
json_conf = self.load_test_conf(args)
# compose full NSTAT test type depending on cli argument test_type and
# emulator type
sb_emulator_name = json_conf['sb_emulator_name']
nstat_test_type_run = args.test_type + '_' + sb_emulator_name.lower()
# Run the test
if nstat_test_type_run == 'sb_active_scalability_mtcbench':
if not args.bypass_test:
logging.info('[nstat_orchestrator] running test: {0}'.
format(nstat_test_type_run))
stress_test.test_run.sb_active_scalability_cbench_run(
args.json_output,
args.ctrl_base_dir,
args.sb_gen_base_dir,
json_conf,
args.output_dir)
# sb_active_stability_mtcbench
elif nstat_test_type_run == 'sb_active_stability_mtcbench':
if not args.bypass_test:
logging.info('[nstat_orchestrator] running test:{0}'.
format(nstat_test_type_run))
exit()
sb_active_stability_cbench.sb_active_stability_cbench_run(
args.json_output,
args.ctrl_base_dir,
args.sb_gen_base_dir,
json_conf,
args.output_dir)
elif nstat_test_type_run == 'sb_active_scalability_multinet':
if not args.bypass_test:
logging.info('[nstat_orchestrator] running test: {0}'.
format(nstat_test_type_run))
exit()
oftraf_path = get_oftraf_path()
sb_active_scalability_multinet.sb_active_scalability_multinet_run(
args.json_output,
args.ctrl_base_dir,
args.sb_gen_base_dir,
json_conf,
args.output_dir,
oftraf_path)
elif nstat_test_type_run == 'sb_idle_scalability_mtcbench':
if not args.bypass_test:
logging.info('[nstat_orchestrator] running test: {0}'.
format(nstat_test_type_run))
exit()
sb_idle_scalability_cbench.sb_idle_scalability_cbench_run(
args.json_output,
args.ctrl_base_dir,
args.sb_gen_base_dir,
json_conf,
args.output_dir)
elif nstat_test_type_run == 'sb_idle_scalability_multinet':
if not args.bypass_test:
logging.info('[nstat_orchestrator] running test: {0}'.
format(nstat_test_type_run))
exit()
sb_idle_scalability_multinet.sb_idle_scalability_multinet_run(
args.json_output,
args.ctrl_base_dir,
args.sb_gen_base_dir,
json_conf,
args.output_dir)
elif nstat_test_type_run == 'sb_idle_stability_multinet':
if not args.bypass_test:
logging.info('[nstat_orchestrator] running test: {0}'.
format(nstat_test_type_run))
exit()
oftraf_path = get_oftraf_path()
sb_idle_stability_multinet.sb_idle_stability_multinet_run(
args.json_output,
args.ctrl_base_dir,
args.sb_gen_base_dir,
json_conf,
args.output_dir,
oftraf_path)
elif nstat_test_type_run == 'nb_active_scalability_multinet':
if not args.bypass_test:
logging.info('[nstat_orchestrator] running test: {0}'.
format(nstat_test_type_run))
exit()
nb_active_scalability_multinet.nb_active_scalability_multinet_run(
args.json_output,
args.ctrl_base_dir,
args.nb_gen_base_dir,
args.sb_gen_base_dir,
json_conf,
args.output_dir,
args.logging_level)
else:
logging.error('[nstat_orchestrator] not valid test configuration')
exit(0)
|
Python
| 0.000836
|
@@ -4062,16 +4062,76 @@
.lower()
+%0A nstat_test_run = stress_test.test_run.TestRun(args)
%0A%0A
@@ -4398,28 +4398,22 @@
+n
st
-ress_test.
+at_
test_run
|
3a83ff315db6f34fb8e656309580060cf708b8a1
|
Refactor request body
|
request.py
|
request.py
|
'''
Code adapted from https://westus.dev.cognitive.microsoft.com/docs/services/TextAnalytics.V2.0/operations/56f30ceeeda5650db055a3c9
'''
import http.client, urllib.request, urllib.parse, urllib.error
import script
def main():
'''
Sends a single POST request with a test bit of text.
'''
headers = headers()
params = urllib.parse.urlencode({})
body = {
"documents": [
{
"language": "en",
"id": "1",
"text": "I had a wonderful experience! The rooms were wonderful and the staff were helpful."
}
]
}
try:
conn = http.client.HTTPSConnection('westus.api.cognitive.microsoft.com')
conn.request("POST", "/text/analytics/v2.0/sentiment?%s" % params, str(body), headers)
response = conn.getresponse()
data = response.read()
print(data) # score is on a scale from 0 to 1, with 0 being the most negative sentiment and 1 being the most positive sentiment. Includes some metadata.
conn.close()
except Exception as e:
print("[Errno {0}] {1}".format(e.errno, e.strerror))
def generate_headers():
api_key = script.get_api_key()
headers = {
# Request headers
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': api_key
}
return headers
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -207,16 +207,35 @@
t script
+%0Aimport numpy as np
%0A%0Adef ma
@@ -385,205 +385,261 @@
-body = %7B%0A %09%22documents%22: %5B%0A %09%09%7B%0A %09%09%22language%22: %22en%22,%0A %09%09%22id%22: %221%22,%0A %09%09%22text%22: %22I had a wonderful experience! The rooms were wonderful and the staff were helpful.%22%0A %09%09%7D%0A %09%5D%0A %7D
+sample_text = 'I had a wonderful experience! The rooms were wonderful and the staff were helpful.' # from default given at https://www.microsoft.com/cognitive-services/en-us/text-analytics-api%0A body = body_from_string_vectors(np.array(%5Bsample_text%5D))
%0A
@@ -1080,16 +1080,16 @@
n as e:%0A
-
%09pri
@@ -1139,16 +1139,417 @@
rror))%0A%0A
+def body_from_string_vectors(vector):%0A '''%0A Takes in a numpy vector of strings, each string representing a separate quote from someone.%0A '''%0A body_documents_list = %5B%5D%0A for string in vector:%0A body_documents_list += %7B%0A 'language': 'en',%0A 'id': '1',%0A 'text': string%0A %7D%0A body = %7B%0A 'documents': %7B%0A body_documents_list%0A %7D%0A %7D%0A%0A
def gene
|
1e16bf4202cc50bc21552e5d5504c58ab792084e
|
REFACTOR : repr 'Optional' validator now done on Optional object.
|
strictyaml/validators.py
|
strictyaml/validators.py
|
from ruamel.yaml.comments import CommentedSeq, CommentedMap
from strictyaml.exceptions import YAMLValidationError
from strictyaml.exceptions import raise_exception
from strictyaml.representation import YAML
import sys
if sys.version_info[0] == 3:
unicode = str
class Optional(object):
def __init__(self, key):
self.key = key
class Validator(object):
def __or__(self, other):
return OrValidator(self, other)
def __call__(self, chunk):
return self.validate(chunk)
class OrValidator(Validator):
def __init__(self, validator_a, validator_b):
self._validator_a = validator_a
self._validator_b = validator_b
def validate(self, chunk):
try:
return self._validator_a(chunk)
except YAMLValidationError:
return self._validator_b(chunk)
def __repr__(self):
return u"{0} | {1}".format(repr(self._validator_a), repr(self._validator_b))
class MapPattern(Validator):
def __init__(self, key_validator, value_validator):
self._key_validator = key_validator
self._value_validator = value_validator
def validate(self, chunk):
return_snippet = chunk.contents
if not isinstance(return_snippet, CommentedMap):
raise_exception(
"when expecting a mapping",
"found non-mapping",
chunk,
)
else:
for key, value in chunk.contents.items():
valid_key = self._key_validator(chunk.key(key))
valid_val = self._value_validator(chunk.val(key))
return_snippet[valid_key] = valid_val
del return_snippet[valid_key]
return_snippet[valid_key] = self._value_validator(
chunk.val(key)
)
return YAML(return_snippet, chunk=chunk, validator=self)
def __repr__(self):
return u"MapPattern({0}, {1})".format(
repr(self._key_validator), repr(self._value_validator)
)
class Map(Validator):
def __init__(self, validator):
self._validator = validator
self._validator_dict = {
key.key if isinstance(key, Optional) else key: value for key, value in validator.items()
}
self._required_keys = [key for key in validator.keys() if not isinstance(key, Optional)]
def __repr__(self):
return u"Map({{{0}}})".format(', '.join([
'{0}: {1}'.format(
'Optional("{0}")'.format(key.key) if type(key) is Optional else '"{0}"'.format(key),
repr(value)
) for key, value in self._validator.items()
]))
def validate(self, chunk):
return_snippet = chunk.contents
if type(chunk.contents) != CommentedMap:
raise_exception(
"when expecting a mapping",
"found non-mapping",
chunk,
)
else:
found_keys = set()
for key, value in chunk.contents.items():
if key not in self._validator_dict.keys():
raise_exception(
u"while parsing a mapping",
u"unexpected key not in schema '{0}'".format(unicode(key)),
chunk.key(key)
)
found_keys.add(key)
validator = self._validator_dict[key]
del return_snippet[key]
return_snippet[
YAML(key, chunk=chunk.key(key), validator=validator)
] = validator(chunk.val(key))
if not set(self._required_keys).issubset(found_keys):
raise_exception(
u"while parsing a mapping",
u"required key(s) '{0}' not found".format(
"', '".join(sorted(list(set(self._required_keys).difference(found_keys))))
),
chunk,
)
return YAML(return_snippet, chunk=chunk, validator=self)
class Seq(Validator):
def __init__(self, validator):
self._validator = validator
def __repr__(self):
return "Seq({0})".format(repr(self._validator))
def validate(self, chunk):
return_snippet = chunk.contents
if not isinstance(return_snippet, CommentedSeq):
raise_exception(
"when expecting a sequence",
"found non-sequence",
chunk,
)
else:
for i, item in enumerate(chunk.contents):
return_snippet[i] = self._validator(chunk.index(i))
return YAML(return_snippet, chunk=chunk, validator=self)
class FixedSeq(Validator):
def __init__(self, validators):
self._validators = validators
def __repr__(self):
return "FixedSeq({0})".format(repr(self._validators))
def validate(self, chunk):
return_snippet = chunk.contents
if not isinstance(return_snippet, CommentedSeq):
raise_exception(
"when expecting a sequence of {0} elements".format(len(self._validators)),
"found non-sequence",
chunk,
)
else:
if len(self._validators) != len(chunk.contents):
raise_exception(
"when expecting a sequence of {0} elements".format(len(self._validators)),
"found a sequence of {0} elements".format(len(chunk.contents)),
chunk,
)
for i, item_and_val in enumerate(zip(chunk.contents, self._validators)):
item, validator = item_and_val
return_snippet[i] = validator(chunk.index(i))
return YAML(return_snippet, chunk=chunk, validator=self)
class UniqueSeq(Validator):
def __init__(self, validator):
self._validator = validator
def __repr__(self):
return "UniqueSeq({0})".format(repr(self._validator))
def validate(self, chunk):
return_snippet = chunk.contents
if type(chunk.contents) != CommentedSeq:
raise_exception(
"when expecting a unique sequence",
"found non-sequence",
chunk,
)
else:
existing_items = set()
for i, item in enumerate(chunk.contents):
if item in existing_items:
raise_exception(
"while parsing a sequence",
"duplicate found",
chunk
)
else:
existing_items.add(item)
return_snippet[i] = self._validator(chunk.index(i))
return YAML(return_snippet, chunk=chunk, validator=self)
|
Python
| 0
|
@@ -338,16 +338,91 @@
= key%0A%0A
+ def __repr__(self):%0A return u'Optional(%22%7B0%7D%22)'.format(self.key)%0A
%0Aclass V
@@ -2571,86 +2571,12 @@
-'Optional(%22%7B0%7D%22)'.format(key.key) if type(key) is Optional else '%22%7B0%7D%22'.format
+repr
(key
@@ -2605,16 +2605,17 @@
r(value)
+,
%0A
|
c5d68743bf6392ae5e4c6bd80ed6727bfebf77fd
|
Solve basic/string2.py Please enter the commit message for your changes. Lines starting
|
basic/string2.py
|
basic/string2.py
|
#!/usr/bin/python2.4 -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic string exercises
# D. verbing
# Given a string, if its length is at least 3,
# add 'ing' to its end.
# Unless it already ends in 'ing', in which case
# add 'ly' instead.
# If the string length is less than 3, leave it unchanged.
# Return the resulting string.
def verbing(s):
# +++your code here+++
return
# E. not_bad
# Given a string, find the first appearance of the
# substring 'not' and 'bad'. If the 'bad' follows
# the 'not', replace the whole 'not'...'bad' substring
# with 'good'.
# Return the resulting string.
# So 'This dinner is not that bad!' yields:
# This dinner is good!
def not_bad(s):
# +++your code here+++
return
# F. front_back
# Consider dividing a string into two halves.
# If the length is even, the front and back halves are the same length.
# If the length is odd, we'll say that the extra char goes in the front half.
# e.g. 'abcde', the front half is 'abc', the back half 'de'.
# Given 2 strings, a and b, return a string of the form
# a-front + b-front + a-back + b-back
def front_back(a, b):
# +++your code here+++
return
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# main() calls the above functions with interesting inputs,
# using the above test() to check if the result is correct or not.
def main():
print 'verbing'
test(verbing('hail'), 'hailing')
test(verbing('swiming'), 'swimingly')
test(verbing('do'), 'do')
print
print 'not_bad'
test(not_bad('This movie is not so bad'), 'This movie is good')
test(not_bad('This dinner is not that bad!'), 'This dinner is good!')
test(not_bad('This tea is not hot'), 'This tea is not hot')
test(not_bad("It's bad yet not"), "It's bad yet not")
print
print 'front_back'
test(front_back('abcd', 'xy'), 'abxcdy')
test(front_back('abcde', 'xyz'), 'abcxydez')
test(front_back('Kitten', 'Donut'), 'KitDontenut')
if __name__ == '__main__':
main()
|
Python
| 0.998948
|
@@ -525,38 +525,132 @@
g(s):%0A
-# +++your code here+++
+result=s%0A if len(s)%3E=3:%0A if s%5B-3:%5D=='ing':%0A result=result+'ly'%0A else :%0A result=result+'ing'
%0A retur
@@ -646,25 +646,31 @@
ng'%0A return
-%0A
+ result
%0A%0A# E. not_b
@@ -959,38 +959,184 @@
d(s):%0A
-# +++your code here+++
+notindex=s.find('not')%0A badindex=s.find('bad')%0A result=s%0A if ((notindex%3E-1) & (badindex%3E-1))&(notindex%3Cbadindex):%0A result=s.replace(s%5Bnotindex:badindex+3%5D,'good')
%0A retur
@@ -1132,24 +1132,31 @@
d')%0A return
+ result
%0A%0A%0A# F. fron
@@ -1542,39 +1542,176 @@
:%0A
-# +++your code here+++%0A return
+ahalf=int(round(len(a)/2.0))%0A bhalf=int(round(len(b)/2.0))%0A afront=a%5B:ahalf%5D%0A aback=a%5Bahalf:%5D%0A bfront=b%5B:bhalf%5D%0A bback=b%5Bbhalf:%5D%0A return afront+bfront+aback+bback
%0A%0A%0A#
|
6f7a75dd98c1f2cc82fe357a3c7cd2147f70db9d
|
add download_date to API download of single and multiple URLs
|
newsplease/__init__.py
|
newsplease/__init__.py
|
import os
import sys
import urllib
from six.moves import urllib
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
from newsplease.pipeline.extractor import article_extractor
from newsplease.crawler.items import NewscrawlerItem
from dotmap import DotMap
from newsplease.pipeline.pipelines import ExtractedInformationStorage
from newsplease.crawler.simple_crawler import SimpleCrawler
class NewsPlease:
"""
Access news-please functionality via this interface
"""
@staticmethod
def from_warc(warc_record):
"""
Extracts relevant information from a WARC record. This function does not invoke scrapy but only uses the article
extractor.
:return:
"""
html = str(warc_record.raw_stream.read())
url = warc_record.rec_headers.get_header('WARC-Target-URI')
download_date = warc_record.rec_headers.get_header('WARC-Date')
article = NewsPlease.from_html(html, url=url, download_date=download_date)
return article
@staticmethod
def from_html(html, url=None, download_date=None):
"""
Extracts relevant information from an HTML page given as a string. This function does not invoke scrapy but only
uses the article extractor. If you have the original URL make sure to provide it as this helps NewsPlease
to extract the publishing date and title.
:param html:
:param url:
:return:
"""
extractor = article_extractor.Extractor(
['newspaper_extractor', 'readability_extractor', 'date_extractor', 'lang_detect_extractor'])
title_encoded = ''.encode()
if not url:
url = ''
# if an url was given, we can use that as the filename
filename = urllib.parse.quote_plus(url) + '.json'
item = NewscrawlerItem()
item['spider_response'] = DotMap()
item['spider_response'].body = html
item['url'] = url
item['source_domain'] = urllib.parse.urlparse(url).hostname.encode() if url != '' else ''.encode()
item['html_title'] = title_encoded
item['rss_title'] = title_encoded
item['local_path'] = None
item['filename'] = filename
item['download_date'] = download_date
item['modified_date'] = None
item = extractor.extract(item)
tmp_article = ExtractedInformationStorage.extract_relevant_info(item)
final_article = ExtractedInformationStorage.convert_to_class(tmp_article)
# final_article = DotMap(tmp_article)
return final_article
@staticmethod
def from_url(url):
"""
Crawls the article from the url and extracts relevant information.
:param url:
:return: A dict containing all the information of the article. Else, None.
"""
articles = NewsPlease.from_urls([url])
if url in articles.keys():
return articles[url]
else:
return None
@staticmethod
def from_urls(urls):
"""
Crawls articles from the urls and extracts relevant information.
:param urls:
:return: A dict containing given URLs as keys, and extracted information as corresponding values.
"""
results = {}
if len(urls) == 0:
pass
elif len(urls) == 1:
url = urls[0]
html = SimpleCrawler.fetch_url(url)
results[url] = NewsPlease.from_html(html, url)
else:
results = SimpleCrawler.fetch_urls(urls)
for url in results:
results[url] = NewsPlease.from_html(results[url], url)
return results
@staticmethod
def from_file(path):
"""
Crawls articles from the urls and extracts relevant information.
:param path: path to file containing urls (each line contains one URL)
:return: A dict containing given URLs as keys, and extracted information as corresponding values.
"""
with open(path) as f:
content = f.readlines()
content = [x.strip() for x in content]
urls = list(filter(None, content))
return NewsPlease.from_urls(urls)
|
Python
| 0
|
@@ -1,12 +1,28 @@
+import datetime%0A
import os%0Aim
@@ -3273,16 +3273,94 @@
lts = %7B%7D
+%0A download_date = datetime.datetime.now().strftime('%25Y-%25m-%25d %25H:%25M:%25S')
%0A%0A
@@ -3557,16 +3557,31 @@
tml, url
+, download_date
)%0A
@@ -3673,16 +3673,16 @@
esults:%0A
-
@@ -3742,16 +3742,31 @@
rl%5D, url
+, download_date
)%0A%0A
|
8fa83b41e1c391e33324d496b8a03ebb5e23d6b8
|
call commands with proper arguments
|
bastion/shell.py
|
bastion/shell.py
|
"""
Bastion shell.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
from bastion.filesystem import FileSystem
from bastion.commands import *
from bastion.validators import validate_command
from bastion.validators import validate_mkfs
def accept_input(validator=None):
"""
Accept input and validate it with the validator passed in, if any.
:param validator:
:return: str
"""
try:
if validator is None:
text = raw_input("bastion> ")
else:
text = raw_input("bastion> ")
if not validator(text):
return None
except KeyboardInterrupt:
print("Exitting!")
sys.exit(0)
return text
class Shell(object):
"""
Main class for the shell.
"""
def __init__(self):
self.current_line = ""
self.file_system = self.create_filesystem_object()
self.current_directory = self.file_system.root
@staticmethod
def create_filesystem_object():
"""
Create an instance of the FileSystem.
:return: FileSystem
"""
file_system = FileSystem()
return file_system
def run(self):
"""
Execution loop of the shell.
:return:
"""
while True:
print("Type mkfs to create a new file system.")
text = accept_input(validator=validate_mkfs)
if text is None:
continue
if text == 'mkfs':
MKFS(self).run()
break
while True:
prompt_input = accept_input(validator=validate_command)
if prompt_input is None:
continue
self.parse(prompt_input)
# Parse the next line and call the related command
def parse(self, prompt_input):
"""
Parse a single line of input.
:param prompt_input:
:return:
"""
input_pieces = str.split(prompt_input)
if len(input_pieces) == 0:
print("no") # TODO: tell user to try again
return
cmd_str = input_pieces[0]
# TODO: refactor the length of the input_pieces-1 as number_of_arguments
# Determine command
if cmd_str == 'mkfs':
if len(input_pieces) != 1:
print("no") # TODO: tell user to try again
return
return MKFS(self).run
elif cmd_str == 'open':
if len(input_pieces) != 3:
print("no") # TODO: tell user to try again
return
return Open(self, args).run()
elif cmd_str == 'read':
if len(input_pieces) != 3:
print("no") # TODO: tell user to try again
return
return Read(self, args).run()
elif cmd_str == 'write':
if len(input_pieces) != 3:
print("no") # TODO: tell user to try again
return
return Write(self, args).run()
elif cmd_str == 'seek':
if len(input_pieces) != 3:
print("no") # TODO: tell user to try again
return
return Seek(self, args).run()
elif cmd_str == 'close':
if len(input_pieces) != 2:
print("no") # TODO: tell user to try again
return
return Close(self, args).run()
elif cmd_str == 'mkdir':
if len(input_pieces) != 2:
print("no") # TODO: tell user to try again
return
return MKDIR(self, args).run()
elif cmd_str == 'rmdir':
if len(input_pieces) != 2:
print("no") # TODO: tell user to try again
return
return RMDIR(self, args).run()
elif cmd_str == 'cd':
if len(input_pieces) != 2:
print("no") # TODO: tell user to try again
return
return CD(self, args).run()
elif cmd_str == 'ls':
if len(input_pieces) != 1:
print("no") # TODO: tell user to try again
return
return LS().run()
elif cmd_str == 'cat':
if len(input_pieces) != 2:
print("no") # TODO: tell user to try again
return
return CAT(self, args).run()
elif cmd_str == 'tree':
if len(input_pieces) != 1:
print("no") # TODO: tell user to try again
return
return Tree().run()
elif cmd_str == 'import':
if len(input_pieces) != 3:
print("no") # TODO: tell user to try again
return
return Import(self, args).run()
elif cmd_str == 'export':
if len(input_pieces) != 3:
print("no") # TODO: tell user to try again
return
return Export(self, args).run()
else:
print("no")
# TODO: Tell user to try again
# TODO: Call specific command init with arguments
# file_system, current_directory, and specific command arguments
|
Python
| 0.000015
|
@@ -2232,16 +2232,76 @@
rguments
+%0A # TODO: parse arguments and determine type validity
%0A%0A
@@ -2690,28 +2690,38 @@
Open(self,
-args
+filename, flag
).run()%0A
@@ -2897,28 +2897,32 @@
Read(self,
-args
+fd, size
).run()%0A
@@ -3096,36 +3096,42 @@
urn Write(self,
-args
+fd, string
).run()%0A
@@ -3303,28 +3303,34 @@
Seek(self,
-args
+fd, offset
).run()%0A
@@ -3508,28 +3508,26 @@
Close(self,
-args
+fd
).run()%0A
@@ -3701,36 +3701,39 @@
urn MKDIR(self,
-args
+dirname
).run()%0A
@@ -3907,28 +3907,31 @@
RMDIR(self,
-args
+dirname
).run()%0A
@@ -4103,28 +4103,31 @@
rn CD(self,
-args
+dirname
).run()%0A
@@ -4299,24 +4299,28 @@
rn LS().run(
+self
)%0A el
@@ -4488,28 +4488,32 @@
n CAT(self,
-args
+filename
).run()%0A
@@ -4687,16 +4687,20 @@
rn Tree(
+self
).run()%0A
@@ -4880,36 +4880,49 @@
rn Import(self,
-args
+srcname, destname
).run()%0A
@@ -5106,12 +5106,25 @@
lf,
-args
+srcname, destname
).ru
@@ -5213,135 +5213,4 @@
in%0A%0A
- # TODO: Call specific command init with arguments%0A # file_system, current_directory, and specific command arguments%0A
|
634aa9818875c15c3db0ac0763fc15889936b79e
|
Add a structure test macro to make test writing easier.
|
tests.bzl
|
tests.bzl
|
# Copyright 2017 Google Inc. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rule for running structure tests."""
def _impl(ctx):
ext_run_location = ctx.executable._structure_test.short_path
config_location = ctx.file.config.short_path
load_location = ctx.executable.image.short_path
# docker_build rules always generate an image named 'bazel/$package:$name'.
image_name = "bazel/%s:%s" % (ctx.attr.image.label.package, ctx.attr.image.label.name)
# Generate a shell script to execute ext_run with the correct flags.
test_contents = """\
#!/bin/bash
set -ex
# Execute the image loader script.
%s
# Run the tests.
%s \
-i %s \
-c %s""" % (load_location, ext_run_location, image_name, config_location)
ctx.file_action(
output=ctx.outputs.executable,
content=test_contents
)
return struct(runfiles=ctx.runfiles(files = [
ctx.executable._structure_test,
ctx.executable.image,
ctx.file.config] +
ctx.attr.image.files.to_list() +
ctx.attr.image.data_runfiles.files.to_list()
),
)
structure_test = rule(
attrs = {
"_structure_test": attr.label(
default = Label("//structure_tests:ext_run"),
cfg = "target",
allow_files = True,
executable = True,
),
"image": attr.label(
mandatory = True,
executable = True,
cfg = "target",
),
"config": attr.label(
mandatory = True,
allow_files = True,
single_file = True,
),
},
executable = True,
test = True,
implementation = _impl,
)
|
Python
| 0
|
@@ -628,16 +628,94 @@
ts.%22%22%22%0A%0A
+load(%0A %22@io_bazel_rules_docker//docker:docker.bzl%22,%0A %22docker_build%22,%0A)%0A%0A
def _imp
@@ -2256,12 +2256,383 @@
= _impl,%0A)%0A
+%0Adef structure_test_with_files(name, image, config, files):%0A %22%22%22A macro for including extra files inside an image before testing it.%22%22%22%0A child_image_name = %22%25s.child_image%22 %25 name%0A docker_build(%0A name = child_image_name,%0A base = image,%0A files = files,%0A )%0A%0A structure_test(%0A name = name,%0A image = child_image_name,%0A config = config,%0A )%0A
|
f5df42e6049b31b1c147da7160e0595e595c6dbc
|
Add logging to grade
|
grade.py
|
grade.py
|
#Grader called by pyxserver_wsgi.py
#Loads a grader file, which is a dict containing the prompt of the question,
#a feature extractor object, and a trained model.
#Extracts features and runs trained model on the submission to produce a final score.
#Correctness determined by ratio of score to max possible score.
#Requires aspell to be installed and added to the path.
import sys
import pickle
import os
import numpy
base_path = os.path.dirname(__file__)
sys.path.append(base_path)
from essay_set import EssaySet
#Imports needed to unpickle grader data
import feature_extractor
import sklearn.ensemble
def grade(grader_path,submission,sandbox=None):
results = {'errors': [],'tests': [],'correct': False,'score': 0, 'feedback' : []}
#Try to find and load the model file
try:
grader_data=pickle.load(file(grader_path,"r"))
except:
results['errors'].append("Could not find a valid model file.")
grader_set=EssaySet(type="test")
#Try to add essays to essay set object
try:
grader_set.add_essay(str(submission),0)
grader_set.update_prompt(str(grader_data['prompt']))
except:
results['errors'].append("Essay could not be added to essay set:{0}".format(submission))
#Try to extract features from submission and assign score via the model
try:
grader_feats=grader_data['extractor'].gen_feats(grader_set)
results['feedback']=grader_data['extractor'].gen_feedback(grader_set)
results['score']=int(grader_data['model'].predict(grader_feats)[0])
except:
results['errors'].append("Could not extract features and score essay.")
#Determine maximum score and correctness of response
max_score=numpy.max(grader_data['model'].classes_)
if results['score']/float(max_score) >= .66:
results['correct']=True
else:
results['correct']=False
return results
|
Python
| 0
|
@@ -411,16 +411,31 @@
rt numpy
+%0Aimport logging
%0A%0Abase_p
@@ -616,16 +616,51 @@
semble%0A%0A
+log = logging.getLogger(__name__)%0A%0A
def grad
@@ -699,16 +699,99 @@
=None):%0A
+ log.debug(%22Grader path: %7B0%7D%5Cn Submission: %7B1%7D%22.format(grader_path,submission))%0A
resu
@@ -1676,32 +1676,33 @@
)%5B0%5D)%0A except
+
:%0A result
|
f96989d067f6fd073d04f96bdf2ae314c9b02d49
|
Use request helper function in LayersScraper
|
uoftscrapers/scrapers/utils/layers.py
|
uoftscrapers/scrapers/utils/layers.py
|
import requests
import json
from . import Scraper
class LayersScraper:
"""A superclass for scraping Layers of the UofT Map.
Map is located at http://map.utoronto.ca
"""
host = 'http://map.utoronto.ca/'
s = requests.Session()
@staticmethod
def get_layers_json(campus):
"""Retrieve the JSON structure from host."""
Scraper.logger.info('Retrieving map layers for %s.' % campus.upper())
headers = {
'Referer': LayersScraper.host
}
html = LayersScraper.s.get('%s%s%s' % (
LayersScraper.host,
'data/map/',
campus
), headers=headers).text
data = json.loads(html)
return data['layers']
@staticmethod
def get_value(entry, val, number=False):
"""Retrieve the desired value from the parsed response dictionary."""
if val in entry.keys():
return entry[val]
else:
return 0 if number else ''
|
Python
| 0
|
@@ -218,35 +218,8 @@
ca/'
-%0A s = requests.Session()
%0A%0A
@@ -419,29 +419,16 @@
ders = %7B
-%0A
'Referer
@@ -448,25 +448,16 @@
per.host
-%0A
%7D%0A
@@ -462,29 +462,23 @@
-html = Layers
+data =
Scraper.
s.ge
@@ -473,18 +473,16 @@
Scraper.
-s.
get('%25s%25
@@ -596,47 +596,21 @@
ders
-).text%0A%0A data = json.loads(html)
+, json=True)%0A
%0A
|
b747da4fe99372e53850a964f450c7b00a4d81c9
|
Add node add/delete, edge del
|
graph.py
|
graph.py
|
class SimpleGraph(object):
"""This is a simple graph program that will allow us
to impliment a graph data structure"""
def __init__(self, dict_graph={}):
self.dict_graph = dict_graph
def node(self):
'''return a list of all nodes in the graph'''
return list(__dict_graph.keys())
def edges():
'''return a list of all edges in the graph'''
return list(add_edge())
def add_node(self, n):
'''adds a new node 'n' to the graph'''
pass
def add_edge(self, n1, n2):
'''adds a new edge to the graph connecting 'n1' and 'n2',
if either n1 or n2 are not already present in the graph,
they should be added.'''
edge = set(edge)
(n1, n2) = tuple(edge)
if n1 in self.dict_graph:
self.dict_graph[n1].append(n2)
else:
self.dict_graph[n1] == [n2]
def del_node(self, n):
'''deletes the node 'n' from the graph,
raises an error if no such node exists'''
pass
def del_edge(self, n1, n2):
'''deletes the edge connecting 'n1' and 'n2' from the graph,
raises an error if no such edge exists'''
pass
def has_node(self, n):
'''True if node 'n' is contained in the graph, False if not.'''
pass
def neighbors(self, n):
'''returns the list of all nodes connected to 'n' by edges,
raises an error if n is not in g'''
pass
def adjacent(self, n1, n2):
'''returns True if there is an edge connecting n1 and n2, False if not,
raises an error if either of the supplied nodes are not in g'''
pass
|
Python
| 0.000001
|
@@ -492,36 +492,96 @@
raph'''%0A
-pass
+if n not in self.dict_graph:%0A self.dict_graph%5Bn%5D = %5B%5D
%0A%0A def add_ed
@@ -766,64 +766,8 @@
'''%0A
- edge = set(edge)%0A (n1, n2) = tuple(edge)%0A
@@ -1019,36 +1019,153 @@
ists'''%0A
-pass
+try:%0A del self.dict_graph%5Bn%5D%0A except KeyError:%0A raise ValueError('That node does not exist')
%0A%0A def del_ed
@@ -1301,36 +1301,163 @@
ists'''%0A
-pass
+try:%0A self.dict_graph%5Bn1%5D.remove(n2)%0A except ValueError:%0A raise ValueError('That edge does not exist')
%0A%0A def has_no
@@ -1541,36 +1541,59 @@
not.'''%0A
-pass
+return n in self.dict_graph
%0A%0A def neighb
@@ -1706,32 +1706,36 @@
if n is not in g
+raph
'''%0A pass
@@ -1730,20 +1730,16 @@
-pass
%0A%0A de
|
4dbde6b8c33a85508ae9c375fef4d4caabfb4d15
|
add function build_valid_filename
|
nlp/extractors/base.py
|
nlp/extractors/base.py
|
import re
class BaseExtractor(object):
def _extract(self, html):
result =[]
return result
def find_between(self, text, s1, s2=None):
if not s1:
raise Exception('s1 is None!')
pos1 = text.find(s1)
if s2 and pos1 != -1:
pos2 = text.find(s2, pos1)
else:
pos2 = -1
if pos2 != -1 and pos2>pos1:
return text[pos1+len(s1):pos2]
else:
return ''
def extract(self, html):
return self._extract(html)
class BaseRegexExtractor(object):
regex = None
def _extract(self, html, regex=None):
result =[]
if regex == None:
regex = self.regex
if regex == None:
return result
p = re.compile(regex)
result = p.findall(html)
return result
def extract(self, html, regex=None):
return self._extract(html, regex=regex)
|
Python
| 0.00008
|
@@ -46,35 +46,47 @@
def
-_extract
+build_valid_filename
(self,
-html
+text
):%0D%0A
@@ -85,34 +85,222 @@
):%0D%0A
-result =%5B%5D
+dst=text%0D%0A for x in '%5Ct%5Cn%5C':;%22,.%5B%5D()%7B%7D~!@#$%25%5E&*_+-=/%3C%3E?':%0D%0A dst=dst.replace(x,' ')%0D%0A dst=dst.replace(' ','-').replace('--','-').replace('--','-')%0D%0A dst=dst.strip('-')
%0D%0A re
@@ -296,38 +296,37 @@
%0A return
-result
+dst%0D%0A
%0D%0A%0D%0A def find
@@ -686,24 +686,100 @@
eturn ''%0D%0A%0D%0A
+ def _extract(self, html):%0D%0A result =%5B%5D%0D%0A return result%0D%0A%0D%0A
def extr
|
a2837ab778d39e66c6178dae34a3bebdc638061f
|
fix test
|
py/test/testing/test_outcome.py
|
py/test/testing/test_outcome.py
|
import py
import marshal
class TestRaises:
def test_raises(self):
py.test.raises(ValueError, "int('qwe')")
def test_raises_exec(self):
py.test.raises(ValueError, "a,x = []")
def test_raises_syntax_error(self):
py.test.raises(SyntaxError, "qwe qwe qwe")
def test_raises_function(self):
py.test.raises(ValueError, int, 'hello')
def test_importorskip():
from py.__.test.outcome import Skipped
try:
sys = py.test.importorskip("sys")
assert sys == py.std.sys
#path = py.test.importorskip("os.path")
#assert path == py.std.os.path
py.test.raises(Skipped, "py.test.importorskip('alskdj')")
py.test.raises(SyntaxError, "py.test.importorskip('x y z')")
py.test.raises(SyntaxError, "py.test.importorskip('x=y')")
path = py.test.importorskip("py", minversion=".".join(py.__version__))
py.test.raises(Skipped, """
py.test.importorskip("py", minversion="5.0")
""")
except Skipped:
print py.code.ExceptionInfo()
py.test.fail("spurious skip")
def test_pytest_exit():
try:
py.test.exit("hello")
except:
excinfo = py.code.ExceptionInfo()
assert excinfo.errisinstance(KeyboardInterrupt)
|
Python
| 0.000001
|
@@ -894,16 +894,93 @@
ion__))%0A
+ mod = py.std.new.module(%22hello123%22)%0A mod.__version__ = %221.3%22 %0A
@@ -1033,34 +1033,40 @@
t.importorskip(%22
-py
+hello123
%22, minversion=%225
|
9b7b65374df1ab8cf9b0d925535d9dd542b3c433
|
Use rounding decimal field in BomItem form
|
InvenTree/part/forms.py
|
InvenTree/part/forms.py
|
"""
Django Forms for interacting with Part objects
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from InvenTree.forms import HelperForm
from InvenTree.helpers import GetExportFormats
from mptt.fields import TreeNodeChoiceField
from django import forms
from django.utils.translation import ugettext as _
from .models import Part, PartCategory, PartAttachment
from .models import BomItem
from .models import PartParameterTemplate, PartParameter
from .models import PartTestTemplate
from common.models import Currency
class PartImageForm(HelperForm):
""" Form for uploading a Part image """
class Meta:
model = Part
fields = [
'image',
]
class EditPartTestTemplateForm(HelperForm):
""" Class for creating / editing a PartTestTemplate object """
class Meta:
model = PartTestTemplate
fields = [
'part',
'test_name',
'description',
'required',
'requires_value',
'requires_attachment',
]
class BomExportForm(forms.Form):
""" Simple form to let user set BOM export options,
before exporting a BOM (bill of materials) file.
"""
file_format = forms.ChoiceField(label=_("File Format"), help_text=_("Select output file format"))
cascading = forms.BooleanField(label=_("Cascading"), required=False, initial=False, help_text=_("Download cascading / multi-level BOM"))
def get_choices(self):
""" BOM export format choices """
return [(x, x.upper()) for x in GetExportFormats()]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['file_format'].choices = self.get_choices()
class BomValidateForm(HelperForm):
""" Simple confirmation form for BOM validation.
User is presented with a single checkbox input,
to confirm that the BOM for this part is valid
"""
validate = forms.BooleanField(required=False, initial=False, help_text=_('Confirm that the BOM is correct'))
class Meta:
model = Part
fields = [
'validate'
]
class BomUploadSelectFile(HelperForm):
""" Form for importing a BOM. Provides a file input box for upload """
bom_file = forms.FileField(label='BOM file', required=True, help_text=_("Select BOM file to upload"))
class Meta:
model = Part
fields = [
'bom_file',
]
class EditPartAttachmentForm(HelperForm):
""" Form for editing a PartAttachment object """
class Meta:
model = PartAttachment
fields = [
'part',
'attachment',
'comment'
]
class SetPartCategoryForm(forms.Form):
""" Form for setting the category of multiple Part objects """
part_category = TreeNodeChoiceField(queryset=PartCategory.objects.all(), required=True, help_text=_('Select part category'))
class EditPartForm(HelperForm):
""" Form for editing a Part object """
field_prefix = {
'keywords': 'fa-key',
'link': 'fa-link',
'IPN': 'fa-hashtag',
}
deep_copy = forms.BooleanField(required=False,
initial=True,
help_text=_("Perform 'deep copy' which will duplicate all BOM data for this part"),
widget=forms.HiddenInput())
confirm_creation = forms.BooleanField(required=False,
initial=False,
help_text=_('Confirm part creation'),
widget=forms.HiddenInput())
class Meta:
model = Part
fields = [
'deep_copy',
'confirm_creation',
'category',
'name',
'IPN',
'description',
'revision',
'keywords',
'variant_of',
'link',
'default_location',
'default_supplier',
'units',
'minimum_stock',
]
class EditPartParameterTemplateForm(HelperForm):
""" Form for editing a PartParameterTemplate object """
class Meta:
model = PartParameterTemplate
fields = [
'name',
'units'
]
class EditPartParameterForm(HelperForm):
""" Form for editing a PartParameter object """
class Meta:
model = PartParameter
fields = [
'part',
'template',
'data'
]
class EditCategoryForm(HelperForm):
""" Form for editing a PartCategory object """
field_prefix = {
'default_keywords': 'fa-key',
}
class Meta:
model = PartCategory
fields = [
'parent',
'name',
'description',
'default_location',
'default_keywords',
]
class EditBomItemForm(HelperForm):
""" Form for editing a BomItem object """
class Meta:
model = BomItem
fields = [
'part',
'sub_part',
'quantity',
'reference',
'overage',
'note'
]
# Prevent editing of the part associated with this BomItem
widgets = {'part': forms.HiddenInput()}
class PartPriceForm(forms.Form):
""" Simple form for viewing part pricing information """
quantity = forms.IntegerField(
required=True,
initial=1,
help_text=_('Input quantity for price calculation')
)
currency = forms.ModelChoiceField(queryset=Currency.objects.all(), label='Currency', help_text=_('Select currency for price calculation'))
class Meta:
model = Part
fields = [
'quantity',
'currency',
]
|
Python
| 0.000001
|
@@ -199,16 +199,70 @@
tFormats
+%0Afrom InvenTree.fields import RoundingDecimalFormField
%0A%0Afrom m
@@ -5056,32 +5056,106 @@
tem object %22%22%22%0A%0A
+ quantity = RoundingDecimalFormField(max_digits=10, decimal_places=5)%0A%0A
class Meta:%0A
|
5801c152ea9f83e03a0f6ca8083410104d01df69
|
Check for invalid config object
|
nmi_mysql/nmi_mysql.py
|
nmi_mysql/nmi_mysql.py
|
"""
Custom mysql wrapper for pymysql
Usefull for raw queries and scripting
"""
import re
import logging
import pymysql.cursors
from queue import Queue
class DB(object):
def __init__(self, conf, max_pool_size=20):
self.conf = conf
self.max_pool_size = max_pool_size
self._initialize_pool()
def _initialize_pool(self):
self.pool = Queue(maxsize=self.max_pool_size)
for _ in range(0, self.max_pool_size):
self.pool.put_nowait(Connection(self.conf))
def connect(self):
con = self.pool.get(True)
con.connect()
return con
def close(self, con):
con.close()
self.pool.put_nowait(con)
class Connection(object):
def __init__(self, conf):
self.logger = logging.getLogger('database')
self.host = conf['host']
self.user = conf['user']
self.password = conf['password']
self.db_conn = conf['db']
self.port = int(conf['port'])
self.handle = None
self.connected = False
def __del__(self):
self.close()
def connect(self):
self.logger.info('Trying to connect to mysql database')
try:
con = pymysql.connect(
host=self.host,
user=self.user,
password=self.password,
db=self.db_conn,
port=self.port,
charset='utf8mb4',
cursorclass=pymysql.cursors.DictCursor
)
except Exception as err:
self.logger.error('Failed to connect to db')
self.logger.warn('Error:')
self.logger.info(err)
raise err
self.logger.info('Connection to mysql')
self.connected = True
self.handle = con
return True
def close(self):
try:
if self.connected:
self.handle.close()
self.connected = False
self.handle = None
except Exception as err:
self.logger.warn('Failed to close connection')
self.logger.warn(err)
raise err
return None
def query(self, _query, _params=None):
"""
self.handle holds the connection
_query is the query
_params holds the variables need by the query
"""
result = None
query = _query
if _params:
query = self.generate_query(_query, _params)
try:
with self.handle.cursor() as cursor:
cursor.execute(query, ())
if query.lower().strip().find('select') == 0:
result = list(cursor.fetchall())
else:
result = {
'affected_rows': cursor.rowcount
}
except Exception as err:
self.logger.warn(err)
raise err
self.handle.commit()
return result
def generate_query(self, _query, _params):
query = re.sub('\?', '%s', _query)
if not isinstance(_params, list):
return query % self.to_string(_params)
params = []
values = []
for param in _params:
if isinstance(param, tuple):
values.append('(' + self.to_string(param) + ')')
else:
params.append(self.to_string(param))
if values:
params = ', '.join(values)
query = query % params[1:-1]
else:
query = query % tuple(params)
return query
def to_string(self, temp):
if isinstance(temp, (list, tuple)):
tmp = []
for item in temp:
if isinstance(item, str):
item = item.replace('%', '%%')
tmp.append(self.handle.escape(item))
return ', '.join(tmp)
elif isinstance(temp, dict):
tmp = []
for key in temp:
if isinstance(temp[key], str):
temp[key] = temp[key].replace('%', '%%')
tmp.append(key + ' = ' + self.handle.escape(temp[key]))
return ', '.join(tmp)
elif isinstance(temp, str):
return self.handle.escape(temp.replace('%', '%%'))
else:
return self.handle.escape(temp)
|
Python
| 0.000001
|
@@ -155,16 +155,75 @@
Queue%0A%0A%0A
+CONFIG_KEYS = %5B'host', 'user', 'password', 'db', 'port'%5D%0A%0A%0A
class DB
@@ -281,16 +281,196 @@
ze=20):%0A
+ self.logger = logging.getLogger('database')%0A%0A for c in CONFIG_KEYS:%0A if c not in conf:%0A return self.logger.error('Invalid config object')%0A%0A
|
5fd1e9266a9d4c7d0e06baa61087446eee11dc9c
|
Correct the static URL
|
defivelo/settings/base.py
|
defivelo/settings/base.py
|
"""
Django settings for defivelo project.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
import os
import dj_database_url
import pytz
from django.contrib import messages
from . import get_env_variable
from .. import get_project_root_path
gettext = lambda s: s
PROJECT_ROOT = get_project_root_path()
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = get_env_variable('SECRET_KEY', '')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = bool(get_env_variable('DEBUG', False))
ALLOWED_HOSTS = tuple(get_env_variable('ALLOWED_HOSTS', '').splitlines())
# Application definition
UPSTREAM_APPS = (
'bootstrap3',
'registration_bootstrap3',
'django_admin_bootstrapped',
'django.contrib.sites',
'allauth',
'allauth.account',
'allauth.socialaccount',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'parler',
'compressor',
'stronghold',
)
# Project apps tested by jenkins (everything in apps/)
APPS_DIR = os.path.join(PROJECT_ROOT, 'apps')
PROJECT_APPS = tuple(['apps.' + aname
for aname in os.listdir(APPS_DIR)
if os.path.isdir(os.path.join(APPS_DIR, aname))])
INSTALLED_APPS = UPSTREAM_APPS + PROJECT_APPS
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'stronghold.middleware.LoginRequiredMiddleware',
)
ROOT_URLCONF = 'defivelo.urls'
WSGI_APPLICATION = 'defivelo.wsgi.application'
# Bootstrap the admin
DAB_FIELD_RENDERER = 'django_admin_bootstrapped.renderers.BootstrapFieldRenderer'
MESSAGE_TAGS = {
messages.SUCCESS: 'alert-success success',
messages.WARNING: 'alert-warning warning',
messages.ERROR: 'alert-danger error'
}
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(PROJECT_ROOT, 'defivelo/templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.request',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
],
'debug': DEBUG
},
},
]
COMPRESS_PRECOMPILERS = (
('text/x-scss', 'sassc {infile} {outfile}'),
)
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': dj_database_url.parse(get_env_variable('DATABASE_URL'))
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
# Internationalization
LANGUAGE_CODE = 'fr'
TIME_ZONE = 'Europe/Zurich'
USE_I18N = True
USE_L10N = True
USE_TZ = True
LANGUAGES = (
('fr', gettext('French')),
('de', gettext('German')),
)
PARLER_LANGUAGES = {
None: (
[{'code': lang[0]} for lang in LANGUAGES]
),
'default': {
}
}
# This allows you to put project-wide translations in the "locale" directory of
# your project
LOCALE_PATHS = (
os.path.join(PROJECT_ROOT, 'locale'),
)
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = get_env_variable('STATIC_URL', '/static/')
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# This is usually not used in a dev env, hence the default value
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = get_env_variable('STATIC_ROOT', '/tmp/static')
STATICFILES_DIRS = (
os.path.join(PROJECT_ROOT, "defivelo/static"),
)
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
"compressor.finders.CompressorFinder",
)
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = get_env_variable('MEDIA_URL', '/media/')
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = get_env_variable('MEDIA_ROOT', '/tmp/static/media')
# Adapt Stronghold for allauth
STRONGHOLD_PUBLIC_NAMED_URLS = ['account_login', 'account_reset_password']
STRONGHOLD_PUBLIC_URLS = [r'^/admin/.*$']
AUTHENTICATION_BACKENDS = (
# Needed to login by username in Django admin, regardless of `allauth`
'django.contrib.auth.backends.ModelBackend',
# `allauth` specific authentication methods, such as login by e-mail
'allauth.account.auth_backends.AuthenticationBackend',
)
ACCOUNT_AUTHENTICATION_METHOD = 'email'
ACCOUNT_EMAIL_VERIFICATION = 'mandatory'
ACCOUNT_EMAIL_REQUIRED = True
ACCOUNT_USERNAME_REQUIRED = False
def defivelo_user_display(u):
if u.first_name and u.last_name:
return u'{first} {last}'.format(first=u.first_name, last=u.last_name)
else:
return u.email
ACCOUNT_USER_DISPLAY = defivelo_user_display
ACCOUNT_ADAPTER = 'defivelo.accounts.NoSignupAccountAdapter'
LOGIN_REDIRECT_URL = '/'
SITE_ID = 1
|
Python
| 0.999823
|
@@ -983,20 +983,16 @@
count',%0A
-
%0A 'dj
@@ -1170,20 +1170,16 @@
files',%0A
-
%0A 'pa
@@ -4665,22 +4665,82 @@
OT',
- '/tmp/static'
+%0A os.path.join(PROJECT_ROOT, 'static_files')
)%0A%0AS
|
f0f1fb06896294f2657083aa7a077d852ea8bb4b
|
add sort order
|
ats/admin.py
|
ats/admin.py
|
from django.contrib import admin
from .models import ProjectWorker
class ProjectWorkerAdmin(admin.ModelAdmin):
list_filter = ['user', 'project', 'job']
admin.site.register(ProjectWorker, ProjectWorkerAdmin)
|
Python
| 0.000088
|
@@ -152,16 +152,58 @@
'job'%5D%0A
+ ordering = %5B'user', 'project', 'job'%5D%0A
%0A%0Aadmin.
|
86c82a4215aeffb3ddf0a195c8a556fc5a32667a
|
fix save method if object is updated
|
nosql_schema/schema.py
|
nosql_schema/schema.py
|
import nosqlite
from fields import Field
from exceptions import ValidationError
import config as base_config
class Schema:
@staticmethod
def get_config():
config = vars(base_config)
if 'DATABASE_PATH' not in config:
config['DATABASE_PATH'] = 'database.db'
return config
def __init__(self, **kwargs):
self.config = Schema.get_config()
self._id = None
attributes = self.__class__.__dict__
# creation by dictionary -> see find / find_one
try:
field_dictionary = kwargs.pop('__dictionary')
if field_dictionary:
setattr(self, '_id', field_dictionary.pop('_id'))
except KeyError:
field_dictionary = None
# set default values, override with passed values, then with __dictionary
for k, v in attributes.iteritems():
if isinstance(v, Field):
setattr(self, k, v.default)
if k in kwargs:
setattr(self, k, kwargs.pop(k))
if field_dictionary and k in field_dictionary:
setattr(self, k, field_dictionary.pop(k))
def save(self):
if not self.__validate():
return False
# convert attributes to document
document = self.__to_dict()
if self._id is not None:
# update
with nosqlite.Connection(self.config['DATABASE_PATH']) as db:
collection_name = self.__class__.__name__
collection = db[collection_name]
collection.update({'_id': document['_id']}, document)
return self._id
else:
# insert
with nosqlite.Connection(self.config['DATABASE_PATH']) as db:
collection_name = self.__class__.__name__
collection = db[collection_name]
document = collection.insert(document)
self._id = document['_id']
return self._id
def delete(self):
document = self.__to_dict()
if '_id' in document:
with nosqlite.Connection(self.config['DATABASE_PATH']) as db:
collection_name = self.__class__.__name__
collection = db[collection_name]
return collection.delete({'_id': document['_id']})
def __validate(self):
attributes = self.__class__.__dict__
document = self.__to_dict()
for k, v in attributes.iteritems():
if isinstance(v, Field):
# workaround for getattr(self, k) as it returns class attribute if value is None?!
value = document.get(k)
if not v.validate(value=value):
raise ValidationError('Invalid value "{0}" for field "{1}"'.format(value, k))
return True
def __to_dict(self):
# remove all undefined attributes and add defined attributes
raw_document = self.__dict__
document = dict()
attributes = self.__class__.__dict__
for k, v in attributes.iteritems():
if isinstance(v, Field):
if k in raw_document:
document[k] = raw_document[k]
else:
# actually not necessary
document[k] = None
if '_id' in raw_document and raw_document['_id'] is not None:
document['_id'] = raw_document['_id']
return document
# class methods
@classmethod
def find(cls, query=None, limit=None):
config = Schema.get_config()
with nosqlite.Connection(config['DATABASE_PATH']) as db:
collection_name = cls.__name__
collection = db[collection_name]
documents = []
results = collection.find(query, limit)
for document in results:
instance = cls(__dictionary=document)
documents.append(instance)
return documents
@classmethod
def find_one(cls, query=None):
config = Schema.get_config()
with nosqlite.Connection(config['DATABASE_PATH']) as db:
collection_name = cls.__name__
collection = db[collection_name]
document = collection.find_one(query)
return cls(__dictionary=document)
|
Python
| 0.000001
|
@@ -1592,34 +1592,8 @@
ate(
-%7B'_id': document%5B'_id'%5D%7D,
docu
|
6447899ec344d14fbb78b9a2bbbe8b75451f10f2
|
Set isolation level to reapeatable read
|
pyophase/settings_production.py
|
pyophase/settings_production.py
|
"""
This is the settings file used in production.
First, it imports all default settings, then overrides respective ones.
Secrets are stored in and imported from an additional file, not set under version control.
"""
from pyophase import settings_secrets as secrets
from .settings import *
SECRET_KEY = secrets.SECRET_KEY
DEBUG = False
ALLOWED_HOSTS = ['.fachschaft.informatik.tu-darmstadt.de', '.d120.de']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': 'localhost',
'NAME': 'pyophase',
'USER': 'pyophase',
'PASSWORD': secrets.DB_PASSWORD,
'OPTIONS': {
'init_command': "SET sql_mode='STRICT_TRANS_TABLES'"
}
}
}
STATIC_URL = '/ophase/static/'
LOGIN_URL = '/ophase/accounts/login/'
MEDIA_URL = '/ophase/media/'
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
ADMINS = (('pyophase-dev', 'pyophase-dev@fachschaft.informatik.tu-darmstadt.de'),)
SERVER_EMAIL = "pyophase@fachschaft.informatik.tu-darmstadt.de"
DEFAULT_FROM_EMAIL = SERVER_EMAIL
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'mail.d120.de'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_USER = 'pyophase'
EMAIL_HOST_PASSWORD = secrets.MAIL_PASSWORD
TUID_FORCE_SERVICE_URL = 'https://www.fachschaft.informatik.tu-darmstadt.de/ophase/sso/login/'
FILE_UPLOAD_PERMISSIONS = 0o644
|
Python
| 0
|
@@ -695,16 +695,66 @@
TABLES'%22
+,%0A 'isolation_level': %22repeatable read%22
%0A
|
4839121f90934f7e52e51c05d052d27124680be7
|
Remove confusing and useless "\n"
|
pyqode/python/backend/server.py
|
pyqode/python/backend/server.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Main server script for a pyqode.python backend. You can directly use this
script in your application if it fits your needs or use it as a starting point
for writing your own server.
::
usage: server.py [-h] [-s [SYSPATH [SYSPATH ...]]] port
positional arguments:
port the local tcp port to use to run the server
optional arguments:
-h, --help show this help message and exit
-s [SYSPATH [SYSPATH ...]], --syspath [SYSPATH [SYSPATH ...]]
"""
import argparse
import sys
if __name__ == '__main__':
"""
Server process' entry point
"""
# setup argument parser and parse command line args
parser = argparse.ArgumentParser()
parser.add_argument("port", help="the local tcp port to use to run "
"the server")
parser.add_argument('-s', '--syspath', nargs='*')
args = parser.parse_args()
# add user paths to sys.path
if args.syspath:
for path in args.syspath:
print('append path %s to sys.path\n' % path)
sys.path.append(path)
from pyqode.core import backend
from pyqode.python.backend.workers import JediCompletionProvider
# setup completion providers
backend.CodeCompletionWorker.providers.append(JediCompletionProvider())
backend.CodeCompletionWorker.providers.append(
backend.DocumentWordsProvider())
# starts the server
backend.serve_forever(args)
|
Python
| 0.000072
|
@@ -1080,10 +1080,8 @@
path
-%5Cn
' %25
|
31852bbf09e4f416f93c7720ecd9eca8cfe32d38
|
Update version
|
pyramid_request_log/__init__.py
|
pyramid_request_log/__init__.py
|
from __future__ import absolute_import
from .config import includeme
__version__ = '0.6'
|
Python
| 0
|
@@ -85,7 +85,7 @@
'0.
-6
+7
'%0A
|
7b18161879473793c0be5b369ca332cbcc458d7d
|
Change wrong import in f2py tool.
|
numscons/tools/f2py.py
|
numscons/tools/f2py.py
|
"""f2py Tool
Tool-specific initialization for f2py.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
import os
from os.path import join as pjoin, dirname as pdirname
import re
import sys
import subprocess
import SCons.Action
import SCons.Defaults
import SCons.Scanner
import SCons.Tool
import SCons.Util
from SCons.Node.FS.default_fs import Entry
# XXX: this whole thing needs cleaning !
def F2pySuffixEmitter(env, source):
return '$F2PYCFILESUFFIX'
def F2pyEmitter(target, source, env):
build_dir = pdirname(str(target[0]))
target.append(Entry(pjoin(build_dir,
_mangle_fortranobject(str(target[0]),
'fortranobject.c'))))
if _is_pyf(str(source[0])):
basename = os.path.splitext(os.path.basename(str(target[0])))
basename = basename[0]
basename = basename.split('module')[0]
target.append(Entry(pjoin(build_dir, '%s-f2pywrappers.f' % basename)))
return (target, source)
def _mangle_fortranobject(targetname, filename):
basename = os.path.splitext(os.path.basename(targetname))[0]
return '%s_%s' % (basename, filename)
def _is_pyf(source_file):
return os.path.splitext(source_file)[1] == '.pyf'
def _f2py_cmd_exec(cmd):
"""Executes a f2py command.
cmd should be a sequence.
The point is to execute in a new process to avoid race issues when using
multible jobs with scons."""
f2py_cmd = [sys.executable, '-c',
'"from numpy.f2py.f2py2e import run_main;run_main(%s)"' % repr(cmd)]
p = subprocess.Popen(" ".join(f2py_cmd), shell = True, stdout = subprocess.PIPE)
for i in p.stdout.readlines():
print i.rstrip('\n')
id, st = os.waitpid(p.pid, 0)
return st
def _pyf2c(target, source, env):
import numpy.f2py
import shutil
# We need filenames from source/target for path handling
target_file_names = [str(i) for i in target]
source_file_names = [str(i) for i in source]
# Get source files necessary for f2py generated modules
d = os.path.dirname(numpy.f2py.__file__)
source_c = pjoin(d,'src','fortranobject.c')
# Copy source files for f2py generated modules in the build dir
build_dir = pdirname(target_file_names[0])
# XXX: blah
if build_dir == '':
build_dir = '.'
try:
shutil.copy(source_c,
pjoin(build_dir,
_mangle_fortranobject(target_file_names[0],
'fortranobject.c')))
except IOError, e:
msg = "Error while copying fortran source files (error was %s)" % str(e)
raise IOError(msg)
basename = os.path.basename(str(target[0]).split('module')[0])
# XXX: handle F2PYOPTIONS being a string instead of a list
if _is_pyf(source_file_names[0]):
# XXX: scons has a way to force buidler to only use one source file
if len(source_file_names) > 1:
raise NotImplementedError("FIXME: multiple source files")
wrapper = pjoin(build_dir, '%s-f2pywrappers.f' % basename)
cmd = env['F2PYOPTIONS'] + [source_file_names[0], '--build-dir', build_dir]
st = _f2py_cmd_exec(cmd)
if not os.path.exists(wrapper):
f = open(wrapper, 'w')
f.close()
else:
cmd = env['F2PYOPTIONS'] + source_file_names + ['--build-dir', build_dir]
# fortran files, we need to give the module name
cmd.extend(['--lower', '-m', basename])
st = _f2py_cmd_exec(cmd)
return 0
def generate(env):
"""Add Builders and construction variables for swig to an Environment."""
import numpy.f2py
d = pdirname(numpy.f2py.__file__)
c_file, cxx_file = SCons.Tool.createCFileBuilders(env)
c_file.suffix['.pyf'] = F2pySuffixEmitter
c_file.add_action('.pyf', SCons.Action.Action(_pyf2c))
c_file.add_emitter('.pyf', F2pyEmitter)
env['F2PYOPTIONS'] = SCons.Util.CLVar('')
env['F2PYBUILDDIR'] = ''
env['F2PYCFILESUFFIX'] = 'module$CFILESUFFIX'
env['F2PYINCLUDEDIR'] = pjoin(d, 'src')
# XXX: adding a scanner using c_file.add_scanner does not work...
expr = '(<)include_file=(\S+)>'
scanner = SCons.Scanner.ClassicCPP("F2PYScan", ".pyf", "F2PYPATH", expr)
env.Append(SCANNERS = scanner)
env['BUILDERS']['F2py'] = SCons.Builder.Builder(action = _pyf2c,
emitter = F2pyEmitter,
suffix = F2pySuffixEmitter)
def exists(env):
try:
import numpy.f2py
st = 1
except ImportError, e:
print "Warning : f2py tool not found, error was %s" % e
st = 0
return st
|
Python
| 0
|
@@ -428,17 +428,24 @@
.Node.FS
-.
+ import
default_
@@ -451,20 +451,8 @@
_fs
-import Entry
%0A%0A#
@@ -647,32 +647,43 @@
target.append(
+default_fs.
Entry(pjoin(buil
@@ -1035,16 +1035,27 @@
.append(
+default_fs.
Entry(pj
|
3c3c452426b7568675028ec9def514eb6d501e35
|
Fix flake8 errors
|
qmxgraph/decoration_contents.py
|
qmxgraph/decoration_contents.py
|
import attr
from qmxgraph.extra_attr_validators import tuple_of
asdict = attr.asdict
_is_int = attr.validators.instance_of(int)
_is_str = attr.validators.instance_of(str)
@attr.s(frozen=True, slots=True)
class Image:
"""
Represet an image tag that could be embedded into a table contents.
The image's width and height are required since mxgraph will render the
html in a helper container in order to get the cell's size. To avoid the
cell size to be wrongly calculated we got some options like passing the
image's size explicitly (as done here) or force the user to pre load the
images so when rendering the html the image is already loaded and the
correct size is used.
:ivar str tag:
:ivar str source: The URL to the image, data URIs can be used.
:ivar int width: The desired width for the image.
:ivar int height: The desired height for the image.
"""
tag = attr.ib(default='img', init=False)
src = attr.ib(validator=_is_str)
width = attr.ib(validator=_is_int)
height = attr.ib(validator=_is_int)
@attr.s(frozen=True, slots=True)
class TableData:
"""
Represents the contents of a table's cell when inserting or updating a
table in the graph.
:ivar str tag:
:ivar tuple[union[str,Image]] contents: The table cell's contents.
:ivar int colspan: The number of columns the cell should span into.
:ivar int rowspan: The number of rows the cell should span into.
:ivar optional[str] style: A inline style for the element.
"""
tag = attr.ib(default='td', init=False)
contents = attr.ib(validator=tuple_of(str, Image),
convert=tuple)
colspan = attr.ib(default=1, validator=_is_int)
rowspan = attr.ib(default=1, validator=_is_int)
style = attr.ib(default=None, validator=attr.validators.optional(_is_str))
@attr.s(frozen=True, slots=True)
class TableRow:
"""
Represents the contents of a table's row when inserting or updating a
table in the graph.
:ivar str tag:
:ivar tuple[union[str,TableData]] contents: The row's cells. Normal `str`
elements will be interpreted as `TableData` elements with all the
default values and it's contents equal to a tuple of one element (the
`str` used).
"""
tag = attr.ib(default='tr', init=False)
contents = attr.ib(validator=tuple_of(str, TableData),
convert=tuple)
@attr.s(frozen=True, slots=True)
class Table:
"""
Represents the contents of a table when inserting or updating a table in
the graph.
:ivar str tag:
:ivar tuple[TableRow] contents: The table rows.
"""
tag = attr.ib(default='table', init=False)
contents = attr.ib(validator=tuple_of(TableRow), convert=tuple)
def contents_after(self, caption):
"""
Useful for testing: truncates the contents after the first row with the given caption and return it as a list.
:rtype: tuple[TableRow]
"""
seen_captions = []
def get_caption(row):
first_row_content = row.contents[0]
if isinstance(first_row_content, TableData):
return first_row_content.contents[0]
return first_row_content
for index, row in enumerate(self.contents):
row_caption = get_caption(row)
if row_caption == caption:
break
seen_captions.append(row_caption)
else:
__tracebackhide__ = True
assert False, '\nCould not find row with caption "{}" in\n{}'.format(caption, seen_captions)
return tuple(self.contents[index + 1:])
|
Python
| 0.000004
|
@@ -2899,16 +2899,24 @@
row with
+%0A
the giv
@@ -3519,21 +3519,13 @@
-assert False,
+msg =
'%5Cn
@@ -3568,16 +3568,46 @@
in%5Cn%7B%7D'
+%0A assert False, msg
.format(
@@ -3678,9 +3678,8 @@
+ 1:%5D)%0A
-%0A
|
314d848288474cd39445cc940711435c04b0be1a
|
version bumped
|
oandapyV20/__init__.py
|
oandapyV20/__init__.py
|
__title__ = "OANDA REST V20 API Wrapper"
__version__ = "0.2.2"
__author__ = "Feite Brekeveld"
__license__ = "MIT"
__copyright__ = "Copyright 2016 Feite Brekeveld"
# Version synonym
VERSION = __version__
# Set default logging handler to avoid "No handler found" warnings.
import logging
try:
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
from .oandapyV20 import API
from .exceptions import V20Error
|
Python
| 0
|
@@ -53,17 +53,17 @@
= %220.2.
-2
+3
%22%0A__auth
|
3d6adf735cc54b0297ff0cd475aabe39de18840c
|
return an ElementTree object from make_api_call in xml kind
|
oauth_access/access.py
|
oauth_access/access.py
|
import httplib2
import logging
import socket
import urllib
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.core.urlresolvers import reverse
from django.utils import simplejson as json
from django.contrib.sites.models import Site
import oauth2 as oauth
from oauth_access.utils.anyetree import etree
from oauth_access.utils.loader import load_path_attr
logger = logging.getLogger("oauth_access.access")
class ServiceFail(Exception):
pass
class OAuthAccess(object):
def __init__(self, service):
self.service = service
self.signature_method = oauth.SignatureMethod_HMAC_SHA1()
self.consumer = oauth.Consumer(self.key, self.secret)
@property
def key(self):
return self._obtain_setting("keys", "KEY")
@property
def secret(self):
return self._obtain_setting("keys", "SECRET")
@property
def request_token_url(self):
return self._obtain_setting("endpoints", "request_token")
@property
def access_token_url(self):
return self._obtain_setting("endpoints", "access_token")
@property
def authorize_url(self):
return self._obtain_setting("endpoints", "authorize")
def _obtain_setting(self, k1, k2):
name = "OAUTH_ACCESS_SETTINGS"
service = self.service
try:
return getattr(settings, name)[service][k1][k2]
except AttributeError:
raise ImproperlyConfigured("%s must be defined in settings" % (name,))
except KeyError, e:
key = e.args[0]
if key == service:
raise ImproperlyConfigured("%s must contain '%s'" % (name, service))
elif key == k1:
raise ImproperlyConfigured("%s must contain '%s' for '%s'" % (name, k1, service))
elif key == k2:
raise ImproperlyConfigured("%s must contain '%s' for '%s' in '%s'" % (name, k2, k1, service))
else:
raise
def unauthorized_token(self):
if not hasattr(self, "_unauthorized_token"):
self._unauthorized_token = self.fetch_unauthorized_token()
return self._unauthorized_token
def fetch_unauthorized_token(self):
current_site = Site.objects.get(pk=settings.SITE_ID)
# @@@ http fix
base_url = "http://%s" % (current_site.domain,)
callback_url = reverse("oauth_access_callback", kwargs={
"service": self.service,
})
parameters = {
"oauth_callback": "%s%s" % (base_url, callback_url),
}
client = oauth.Client(self.consumer)
response, content = client.request(self.request_token_url,
method = "POST",
# parameters must be urlencoded (which are then re-decoded by
# and re-encoded by oauth2 -- seems lame)
body = urllib.urlencode(parameters),
)
try:
return oauth.Token.from_string(content)
except KeyError, e:
if e.args[0] == "oauth_token":
raise ServiceFail()
raise
def authorized_token(self, token, verifier=None):
parameters = {}
if verifier:
parameters.update({
"oauth_verifier": verifier,
})
client = oauth.Client(self.consumer, token=token)
response, content = client.request(self.access_token_url,
method = "POST",
# parameters must be urlencoded (which are then re-decoded by
# oauth2 -- seems lame)
body = urllib.urlencode(parameters),
)
return oauth.Token.from_string(content)
def check_token(self, unauth_token, parameters):
token = oauth.Token.from_string(unauth_token)
if token.key == parameters.get("oauth_token", "no_token"):
verifier = parameters.get("oauth_verifier")
return self.authorized_token(token, verifier)
else:
return None
def callback(self, *args, **kwargs):
cb = load_path_attr(self._obtain_setting("endpoints", "callback"))
return cb(*args, **kwargs)
def authorization_url(self, token):
request = oauth.Request.from_consumer_and_token(
self.consumer,
token = token,
http_url = self.authorize_url,
)
request.sign_request(self.signature_method, self.consumer, token)
return request.to_url()
def make_api_call(self, kind, url, token, method="GET", **kwargs):
if isinstance(token, basestring):
token = oauth.Token.from_string(token)
client = oauth.Client(self.consumer, token=token)
response, content = client.request(url, method=method, force_auth_header=True)
if not content:
raise ServiceFail("no content")
logger.debug("response: %r" % response)
logger.debug("content: %r" % content)
if kind == "raw":
return content
elif kind == "json":
try:
return json.loads(content)
except ValueError:
# @@@ might be better to return a uniform cannot parse
# exception and let caller determine if it is service fail
raise ServiceFail("JSON parse error")
elif kind == "xml":
return etree.fromstring(content)
else:
raise Exception("unsupported API kind")
|
Python
| 0.000005
|
@@ -5405,16 +5405,34 @@
return
+etree.ElementTree(
etree.fr
@@ -5440,32 +5440,33 @@
mstring(content)
+)
%0A else:%0A
|
6b966f68aab979bb6dab55f08ec7c5b3807295da
|
Update hello.py
|
hello.py
|
hello.py
|
import sqlite3
from flask import Flask
from flask import g
import cf_deployment_tracker
import os
# Emit Bluemix deployment event
cf_deployment_tracker.track()
app = Flask(__name__)
# On Bluemix, get the port number from the environment variable VCAP_APP_PORT
# When running this app on the local machine, default the port to 8080
port = int(os.getenv('VCAP_APP_PORT', 8080))
DATABASE = './database.db'
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
return db
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
def init_db():
with app.app_context():
db = get_db()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
def query_db(query, args=(), one=False):
cur = get_db().execute(query, args)
rv = cur.fetchall()
cur.close()
return (rv[0] if rv else None) if one else rv
@app.route('/')
def hello_world():
return 'Hello World! I am running on port ' + str(port)
@app.route('/placeOrder')
def placeOrder():
return return app.send_static_file('/placeOrder/index.html')
def insert(table, fields=(), values=()):
db = getattr(g, '_database', None)
cur = db.cursor()
query = 'INSERT INTO %s (%s) VALUES (%s)' % (
table,
', '.join(fields),
', '.join(['?'] * len(values))
)
cur.execute(query, values)
db.commit()
id = cur.lastrowid
cur.close()
return id
@app.route('/CreatePool', methods = ['POST'])
def CreatePool():
db = get_db()
insert("Pools", ("restaurant", "return_time", "num_orders", "pickup_location", "has_arrived"), ("in n out", "1478939164", "5", "room 383", False))
return "Pool Created"
if __name__ == '__main__':
app.run(host='0.0.0.0', port=port)
|
Python
| 0.000001
|
@@ -1254,16 +1254,1852 @@
html')%0A%0A
+@app.route('/processOrder', methods = %5B'POST'%5D)%0Adef processOrder():%0A if request.method == 'POST':%0A nonce = request.form%5B'nonce'%5D%0A name = request.form%5B'username'%5D%0A order = request.form%5B'order'%5D%0A cost = int(request.form%5B'cost'%5D)%0A phoneNumber = request.form%5B'phoneNumber'%5D%0A userDetails = %7B'name':name, 'order':order, 'cost':cost, 'phoneNumber':phoneNumber%7D%0A%0A print(userDetails)%0A%0A success = processTransaction(nonce,cost)%0A if success:%0A return app.send_static_file('thankYou.html')%0A else:%0A return app.send_static_file('index.html')%0A%0A %0Adef processTransaction(nonce,cost):%0A api_instance = TransactionApi()%0A # Every payment you process with the SDK must have a unique idempotency key.%0A # If you're unsure whether a particular payment succeeded, you can reattempt%0A # it with the same idempotency key without worrying about double charging%0A # the buyer.%0A idempotency_key = str(uuid.uuid1())%0A%0A # Monetary amounts are specified in the smallest unit of the applicable currency.%0A # This amount is in cents. It's also hard-coded for $1.00, which isn't very useful.%0A amount = %7B'amount':cost, 'currency': 'USD'%7D%0A body = %7B'idempotency_key': idempotency_key, 'card_nonce': nonce, 'amount_money': amount%7D%0A%0A # The SDK throws an exception if a Connect endpoint responds with anything besides%0A # a 200-level HTTP code. This block catches any exceptions that occur from the request.%0A try:%0A # Charge%0A api_response = api_instance.charge(access_token, location_id, body)%0A res = api_response.transaction%0A #Push to Db #TODO%0A return True%0A except ApiException as e:%0A res = %22Exception when calling TransactionApi-%3Echarge: %7B%7D%22.format(e)%0A print(res); #For Debuggig%0A return False%0A%0A%0A
def inse
|
3c46550771f5dc588ae9a0cf61a6980fd0315e6f
|
Fix e.details handling
|
odlclient/v2/client.py
|
odlclient/v2/client.py
|
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Author: Endre Karlson <endre.karlson@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import requests
from odlclient.openstack.common.apiclient import client
from odlclient.v2.bridge_domain import BridgeDomain
from odlclient.v2.connection_manager import ConnectionManager
from odlclient.v2.node import NodeManager
from odlclient.v2.ovsdb import OvsdbManager
from odlclient.v2.subnet import SubnetManager
from odlclient.v2.staticroute import StaticRouteManager
LOG = logging.getLogger(__name__)
class HTTPClient(client.HTTPClient):
"""
Modified HTTPClient to take a endpoint and doesn't use X-Auth-Token
"""
user_agent = "odlclient.openstack.common.apiclient"
def __init__(self,
endpoint,
username=None,
password=None,
original_ip=None,
verify=True,
cert=None,
timeout=None,
timings=False,
keyring_saver=None,
debug=False,
user_agent=None,
http=None):
self.endpoint = endpoint
self.username = username
self.password = password
self.original_ip = original_ip
self.timeout = timeout
self.verify = verify
self.cert = cert
self.keyring_saver = keyring_saver
self.debug = debug
self.user_agent = user_agent or self.user_agent
self.times = [] # [("item", starttime, endtime), ...]
self.timings = timings
# requests within the same session can reuse TCP connections from pool
self.http = http or requests.Session()
if self.username and self.password:
self.http.auth = (self.username, self.password)
def client_request(self, client, method, url, **kwargs):
try:
return self.request(
method, self.concat_url(self.endpoint, url), **kwargs)
except Exception as e:
LOG.error("Error from server below:\n%s", e.details)
raise
class Client(client.BaseClient):
def __init__(self, *args, **kw):
super(Client, self).__init__(*args, **kw)
self.bridge_domain = BridgeDomain(self)
self.connection_manager = ConnectionManager(self)
self.nodes = NodeManager(self)
self.ovsdb = OvsdbManager(self)
self.subnets = SubnetManager(self)
self.staticroutes = StaticRouteManager(self)
|
Python
| 0.000003
|
@@ -2547,16 +2547,58 @@
n as e:%0A
+ if hasattr(e, 'details'):%0A
|
791b6720e489353bb5a2b35906dd88f558f26c33
|
Handle NotImplementedError
|
ogn/gateway/process.py
|
ogn/gateway/process.py
|
import logging
from ogn.commands.dbutils import session
from ogn.model import AircraftBeacon, ReceiverBeacon, Location
from ogn.parser import parse, ParseError
logger = logging.getLogger(__name__)
def replace_lonlat_with_wkt(message):
location = Location(message['longitude'], message['latitude'])
message['location_wkt'] = location.to_wkt()
del message['latitude']
del message['longitude']
return message
def message_to_beacon(raw_message, reference_date):
beacon = None
if raw_message[0] != '#':
try:
message = parse(raw_message, reference_date)
if message['aprs_type'] == 'position':
message = replace_lonlat_with_wkt(message)
if message['beacon_type'] == 'aircraft_beacon':
beacon = AircraftBeacon(**message)
elif message['beacon_type'] == 'receiver_beacon':
beacon = ReceiverBeacon(**message)
else:
print("Whoops: what is this: {}".format(message))
except ParseError as e:
logger.error('Received message: {}'.format(raw_message))
logger.error('Drop packet, {}'.format(e.message))
except TypeError as e:
logger.error('TypeError: {}'.format(raw_message))
return beacon
def process_beacon(raw_message, reference_date=None):
beacon = message_to_beacon(raw_message, reference_date)
if beacon is not None:
session.add(beacon)
session.commit()
logger.debug('Received message: {}'.format(raw_message))
|
Python
| 0.000001
|
@@ -1008,32 +1008,170 @@
ormat(message))%0A
+ except NotImplementedError as e:%0A logger.error('Received message: %7B%7D'.format(raw_message))%0A logger.error(e)%0A
except P
|
79f545795c864793bbeee3d2f08ff241bdc5c73d
|
fix spacing
|
ogusa/tests/test_SS.py
|
ogusa/tests/test_SS.py
|
import pytest
import json
import pickle
import numpy as np
import os
from ogusa import SS
CUR_PATH = os.path.abspath(os.path.dirname(__file__))
def test_SS_fsolve():
# Test SS.SS_fsolve function. Provide inputs to function and
# ensure that output returned matches what it has been before.
with open(os.path.join(CUR_PATH,
'test_io_data/SS_fsolve_inputs.pkl'),
'rb') as f:
input_tuple = pickle.load(f)
guesses, params = input_tuple
test_list = SS.SS_fsolve(guesses, params)
with open(os.path.join(CUR_PATH,
'test_io_data/SS_fsolve_outputs.pkl'),
'rb') as f:
expected_list = pickle.load(f)
assert(np.allclose(np.array(test_list), np.array(expected_list)))
def test_SS_fsolve_reform():
# Test SS.SS_fsolve_reform function. Provide inputs to function and
# ensure that output returned matches what it has been before.
with open(os.path.join(CUR_PATH,
'test_io_data/SS_fsolve_reform_inputs.pkl'),
'rb') as f:
input_tuple = pickle.load(f)
guesses, params = input_tuple
test_list = SS.SS_fsolve_reform(guesses, params)
with open(os.path.join(CUR_PATH,
'test_io_data/SS_fsolve_reform_outputs.pkl'),
'rb') as f:
expected_list = pickle.load(f)
assert(np.allclose(np.array(test_list), np.array(expected_list)))
def test_SS_fsolve_reform_baselinespend():
# Test SS.SS_fsolve_reform_baselinespend function. Provide inputs
# to function and ensure that output returned matches what it has
# been before.
with open(os.path.join(CUR_PATH,
'test_io_data/SS_fsolve_reform_baselinespend_inputs.pkl'),
'rb') as f:
input_tuple = pickle.load(f)
guesses, params = input_tuple
test_list = SS.SS_fsolve_reform_baselinespend(guesses, params)
with open(os.path.join(CUR_PATH,
'test_io_data/SS_fsolve_reform_baselinespend_outputs.pkl'),
'rb') as f:
expected_list = pickle.load(f)
assert(np.allclose(np.array(test_list), np.array(expected_list)))
def test_SS_solver():
# Test SS.SS_solver function. Provide inputs to function and
# ensure that output returned matches what it has been before.
with open(os.path.join(CUR_PATH,
'test_io_data/SS_solver_inputs.pkl'),
'rb') as f:
input_tuple = pickle.load(f)
(b_guess_init, n_guess_init, rss, T_Hss, factor_ss, Yss, params,
baseline, fsolve_flag, baseline_spending) = input_tuple
test_dict = SS.SS_solver(
b_guess_init, n_guess_init, rss, T_Hss, factor_ss, Yss, params,
baseline, fsolve_flag, baseline_spending)
with open(os.path.join(CUR_PATH,
'test_io_data/SS_solver_outputs.pkl'),
'rb') as f:
expected_dict = pickle.load(f)
for k, v in expected_dict.iteritems():
assert(np.allclose(test_dict[k], v))
def test_inner_loop():
# Test SS.inner_loop function. Provide inputs to function and
# ensure that output returned matches what it has been before.
with open(os.path.join(CUR_PATH,
'test_io_data/inner_loop_inputs.pkl'),
'rb') as f:
input_tuple = pickle.load(f)
(outer_loop_vars, params, baseline, baseline_spending) = input_tuple
test_tuple = SS.inner_loop(
outer_loop_vars, params, baseline, baseline_spending)
with open(os.path.join(CUR_PATH,
'test_io_data/inner_loop_outputs.pkl'),
'rb') as f:
expected_tuple = pickle.load(f)
for i, v in enumerate(expected_tuple):
assert(np.allclose(test_tuple[i], v))
def test_euler_equation_solver():
# Test SS.inner_loop function. Provide inputs to function and
# ensure that output returned matches what it has been before.
with open(os.path.join(CUR_PATH,
'test_io_data/euler_eqn_solver_inputs.pkl'),
'rb') as f:
input_tuple = pickle.load(f)
(guesses, params) = input_tuple
test_list = SS.euler_equation_solver(guesses, params)
with open(os.path.join(CUR_PATH,
'test_io_data/euler_eqn_solver_outputs.pkl'),
'rb') as f:
expected_list = pickle.load(f)
assert(np.allclose(np.array(test_list), np.array(expected_list)))
def test_create_steady_state_parameters():
# Test that SS parameters creates same objects with same inputs.
with open(os.path.join(CUR_PATH,
'test_io_data/create_params_inputs.pkl'),
'rb') as f:
input_dict = pickle.load(f)
test_tuple = SS.create_steady_state_parameters(**input_dict)
with open(os.path.join(CUR_PATH,
'test_io_data/create_params_outputs.pkl'),
'rb') as f:
expected_tuple = pickle.load(f)
for i, v in enumerate(expected_tuple):
for i2, v2 in enumerate(v):
try:
assert(all(test_tuple[i][i2]==v2))
except ValueError:
assert((test_tuple[i][i2]==v2).all())
except TypeError:
assert(test_tuple[i][i2]==v2)
@pytest.mark.parametrize('input_path,expected_path',
[('run_SS_open_unbal_inputs.pkl',
'run_SS_open_unbal_outputs.pkl'),
('run_SS_closed_balanced_inputs.pkl',
'run_SS_closed_balanced_outputs.pkl')],
ids=['Open, Unbalanced', 'Closed Balanced'])
def test_run_SS(input_path, expected_path):
# Test SS.run_SS function. Provide inputs to function and
# ensure that output returned matches what it has been before.
with open(os.path.join(CUR_PATH, 'test_io_data', input_path),
'rb') as f:
input_tuple = pickle.load(f)
(income_tax_params, ss_params, iterative_params, chi_params,
small_open_params, baseline, baseline_spending, baseline_dir) =\
input_tuple
test_dict = SS.run_SS(
income_tax_params, ss_params, iterative_params, chi_params,
small_open_params, baseline, baseline_spending, baseline_dir)
with open(os.path.join(CUR_PATH, 'test_io_data', expected_path),
'rb') as f:
expected_dict = pickle.load(f)
for k, v in expected_dict.iteritems():
assert(np.allclose(test_dict[k], v))
|
Python
| 0.000054
|
@@ -5171,18 +5171,20 @@
e%5Bi%5D%5Bi2%5D
+
==
+
v2))%0A
@@ -5252,18 +5252,20 @@
e%5Bi%5D%5Bi2%5D
+
==
+
v2).all(
@@ -5337,18 +5337,20 @@
e%5Bi%5D%5Bi2%5D
+
==
+
v2)%0A%0A%0A@p
|
77733f09717a2b61d5773c14b0b5a357f155dd1e
|
make handler authenticated and change to request body
|
oidenbterm/handlers.py
|
oidenbterm/handlers.py
|
import oide.lib.decorators
import oide.settings as global_settings
import oide.apps.filebrowser.settings as app_settings
from oide.lib.handlers.base import BaseHandler
from oidenbterm.mixins.kernel_mixin import KernelMixin
from terminado import TermSocket
from traitlets import Integer
import json
import tornado.web
import jupyter_client
class AuthTermSocket(TermSocket,BaseHandler):
@oide.lib.decorators.authenticated
def get(self, *args, **kwargs):
return super(AuthTermSocket, self).get(*args, **kwargs)
class KernelHandler(BaseHandler, KernelMixin):
def post(self):
code = self.get_argument("code")
msg_id = self.kernel.execute(code)
# print(msg_id)
res = []
while True:
# print('stuck here')
try:
msg = self.kernel.shell_channel.get_msg(Integer(10, config=True))
# print(msg)
except Empty:
# print('Empty')
pass
# This indicates that something bad happened, as AFAIK this should return...
# self.log.error("Timeout waiting for execute reply")
# raise KnitpyException("Timeout waiting for execute reply.")
if msg['parent_header'].get('msg_id') == msg_id:
# It's finished, and we got our reply, so next look at the results
break
else:
# print('something')
# not our reply
# self.log.debug("Discarding message from a different client: %s" % msg)
continue
# Now look at the results of our code execution and earlier completion requests
# We handle messages until the kernel indicates it's ide again
status_idle_again = False
while True:
# print('stuck here now')
try:
msg = self.kernel.get_iopub_msg(Integer(10, config=True))
# print('doing something')
except Exception:
# print('Empty')
pass
# There should be at least some messages: we just executed code!
# The only valid time could be when the timeout happened too early (aka long
# running code in the document) -> we handle that below
# self.log.warn("Timeout waiting for expected IOPub output")
break
# print(msg['parent_header'].get('msg_id') != msg_id)
if msg['parent_header'].get('msg_id') != msg_id:
if msg['parent_header'].get(u'msg_type') != u'is_complete_request':
# print('output')
pass
# not an output from our execution and not one of the complete_requests
# self.log.debug("Discarding output from a different client: %s" % msg)
else:
# print('something too')
pass
# complete_requests are ok
continue
# Here we have some message which corresponds to our code execution
msg_type = msg['msg_type']
content = msg['content']
# print('Out')
# The kernel indicates some status: executing -> idle
if msg_type == 'status':
if content['execution_state'] == 'idle':
# When idle, the kernel has executed all input
status_idle_again = True
break
else:
# the "starting execution" messages
continue
elif msg_type == 'clear_output':
# we don't handle that!?
# self.log.debug("Discarding unexpected 'clear_output' message: %s" % msg)
continue
## So, from here on we have a messages with real content
# self.write(content)
res.append(content)
if not status_idle_again:
pass
# self.log.error("Code lines didn't execute in time. Don't use long-running code in "
# "documents or increase the timeout!")
# self.log.error("line(s): %s" % lines)
self.write({'res': res})
|
Python
| 0
|
@@ -575,64 +575,149 @@
-def post(self):%0A code = self.get_argument(
+@oide.lib.decorators.authenticated%0A def post(self):%0A # print self.request.body%0A code = json.loads(self.request.body)%5B
%22code%22
-)
+%5D
%0A
|
5c5f7981905c757cd5a750c2b2d09ea6bc6f1f28
|
Add BoolTypeFactory class
|
dataproperty/_factory.py
|
dataproperty/_factory.py
|
# encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <gogogo.vm@gmail.com>
"""
from __future__ import absolute_import
import abc
import six
from .converter import NopConverterCreator
from .converter import IntegerConverterCreator
from .converter import FloatConverterCreator
from .converter import DateTimeConverterCreator
from ._type_checker_creator import NoneTypeCheckerCreator
from ._type_checker_creator import IntegerTypeCheckerCreator
from ._type_checker_creator import FloatTypeCheckerCreator
from ._type_checker_creator import DateTimeTypeCheckerCreator
from ._type_checker_creator import InfinityCheckerCreator
from ._type_checker_creator import NanCheckerCreator
@six.add_metaclass(abc.ABCMeta)
class TypeConverterFactoryInterface(object):
"""
Abstract factory class of type converter.
"""
@abc.abstractproperty
def type_checker_factory(self): # pragma: no cover
pass
@abc.abstractproperty
def value_converter_factory(self): # pragma: no cover
pass
class NoneTypeFactory(TypeConverterFactoryInterface):
@property
def type_checker_factory(self):
return NoneTypeCheckerCreator()
@property
def value_converter_factory(self):
return NopConverterCreator()
class IntegerTypeFactory(TypeConverterFactoryInterface):
@property
def type_checker_factory(self):
return IntegerTypeCheckerCreator()
@property
def value_converter_factory(self):
return IntegerConverterCreator()
class FloatTypeFactory(TypeConverterFactoryInterface):
@property
def type_checker_factory(self):
return FloatTypeCheckerCreator()
@property
def value_converter_factory(self):
return FloatConverterCreator()
class DateTimeTypeFactory(TypeConverterFactoryInterface):
@property
def type_checker_factory(self):
return DateTimeTypeCheckerCreator()
@property
def value_converter_factory(self):
return DateTimeConverterCreator()
class InfinityTypeFactory(TypeConverterFactoryInterface):
@property
def type_checker_factory(self):
return InfinityCheckerCreator()
@property
def value_converter_factory(self):
return FloatConverterCreator()
class NanTypeFactory(TypeConverterFactoryInterface):
@property
def type_checker_factory(self):
return NanCheckerCreator()
@property
def value_converter_factory(self):
return FloatConverterCreator()
|
Python
| 0
|
@@ -267,32 +267,76 @@
onverterCreator%0A
+from .converter import BoolConverterCreator%0A
from .converter
@@ -537,32 +537,90 @@
eCheckerCreator%0A
+from ._type_checker_creator import BoolTypeCheckerCreator%0A
from ._type_chec
@@ -2087,32 +2087,271 @@
rterCreator()%0A%0A%0A
+class BoolTypeFactory(TypeConverterFactoryInterface):%0A%0A @property%0A def type_checker_factory(self):%0A return BoolTypeCheckerCreator()%0A%0A @property%0A def value_converter_factory(self):%0A return BoolConverterCreator()%0A%0A%0A
class InfinityTy
|
f8fd1a1b7e0eb2bcbcd64fa6a352862fceea80de
|
Use two level danger level.
|
autopilot.py
|
autopilot.py
|
# -*- coding: utf-8 -*-
"""
Autopilots for the DriveIt Gym environment.
@author: Jean-Claude Manoli
"""
import numpy as np
from belief import BeliefTracking
epsilon = 0.05
class Autopilot(object):
def __init__(self, car, other_cars=None):
self.car = car
self.tracker = BeliefTracking(car, other_cars, normalize=True)
self.belief, self.deltas = [], []
self.action = 0
def reset(self, observation):
belief = self.tracker.reset(observation)
self.deltas = np.zeros(np.shape(belief))
self.belief = belief
return belief
def observe(self, observation, dt):
belief = self.tracker.update(self.action, observation, dt)
self.deltas = belief - self.belief
self.belief = belief
return belief
def act(self):
self.action = self._act()
return self.action
def _act(self): raise NotImplementedError
class LookAheadPilot(Autopilot):
def __init__(self, car, other_cars=None,
ky=3.0, kdy=10.0,
kth=3.0, kdth=10.0,
kka=3.0, kdka=-3.0):
super().__init__(car, other_cars)
self.params = ky, kdy, kth, kdth, kka, kdka
def _danger(self, dist, ddist, x):
d, dd = False, 1
if dist[0] < 0.5 and x < 0.0 and x > -1.0:
d = True
for i in range(0, min(3, len(dist))):
dd = min(dd, ddist[i])
if dist[i] < (0.25 if i == 0 else 0.95):
d = True
return d and dd < 0.0
def _act(self):
x, y, th, v, k, kt, ka, *dist = self.belief #pylint: disable=W0612
dx, dy, dth, dv, dk, dkt, dka, *ddist = self.deltas #pylint: disable=W0612
ky, kdy, kth, kdth, kka, kdka = self.params
fy = ky * y + kdy * dy
fth = kth * dth + kdth * dth
fk = kka * (ka - k) + kdka * (dka - k)
f = -fy + fth + fk - k
if f > epsilon: action = 1
elif f < -epsilon: action = 2
else: action = 0
if self._danger(dist, ddist, x):
action += 6
else:
safe_throttle = self.car.specs.safe_turn_speed( \
max(abs(k), abs(ka)), 0.9) / self.car.specs.v_max
if v < safe_throttle - epsilon:
action += 3
elif v > safe_throttle + epsilon:
action += 6
return action
|
Python
| 0.000004
|
@@ -1252,16 +1252,21 @@
d, dd
+, ddd
= False
@@ -1267,17 +1267,26 @@
False,
-1
+False, 1.0
%0A
@@ -1301,17 +1301,17 @@
%5B0%5D %3C 0.
-5
+8
and x %3C
@@ -1324,19 +1324,19 @@
nd x %3E -
-1.0
+0.8
:%0A
@@ -1334,32 +1334,33 @@
8:%0A d
+d
= True%0A
@@ -1411,16 +1411,17 @@
dd
+d
= min(d
@@ -1503,32 +1503,136 @@
d = True%0A
+ if dist%5Bi%5D %3C (0.20 if i == 0 else 0.80):%0A dd = True%0A %0A
return d
@@ -1635,21 +1635,17 @@
rn d
- and dd %3C 0.0
+, dd, ddd
%0A%0A
@@ -2134,18 +2134,28 @@
-if
+d, dd, ddd =
self._d
@@ -2179,52 +2179,9 @@
, x)
-:
%0A
- action += 6%0A else:%0A
@@ -2234,20 +2234,16 @@
peed( %5C%0A
-
@@ -2304,22 +2304,28 @@
- if
+if not d and
v %3C saf
@@ -2350,36 +2350,32 @@
on:%0A
-
action += 3%0A
@@ -2378,25 +2378,48 @@
-
elif
+dd or (d and ddd %3C 0.0) or
v %3E safe
@@ -2431,36 +2431,32 @@
ttle + epsilon:%0A
-
acti
|
fa9fec30dbc91a1f968661acebc3a0b653781e70
|
Update the actual filename if we are doing Windows safe character replacement.
|
videodownloader/providers/__init__.py
|
videodownloader/providers/__init__.py
|
__license__ = '''
Copyright 2010 Jake Wharton
py-video-downloader is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
py-video-downloader is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General
Public License along with py-video-downloader. If not, see
<http://www.gnu.org/licenses/>.
'''
from datetime import datetime
import os
import re
import sys
import urllib2
__all__ = ['Vimeo', 'YouTube']
IS_WINDOWS = (sys.platform == 'win32' or sys.platform == 'cygwin')
class Provider(object):
HEADERS = {
'User-Agent': 'Mozilla/5.0 (X11; U; Linux i686) Gecko/20071127 Firefox/2.0.0.11',
}
def __init__(self, id, title=None, ext='video', format=None, debug=False):
#Save debugging flag immediately
self.debugging = debug
self.id = id
self._debug('Provider', '__init__', 'id', self.id)
self.title = id if title is None else title
self._debug('Provider', '__init__', 'title', self.title)
self.format = format
if self.format is not None:
self._debug('Provider', '__init__', 'format', self.format)
self.fileext = ext
self._debug('Provider', '__init__', 'fileext', self.fileext)
self.filename = title
self._debug('Provider', '__init__', 'filename', self.filename)
def _pre_download(self):
'''
Optional callback which occurs before the download takes place.
'''
self._debug('Provider', '_pre_download', 'No callback supplied.')
def _in_download(self, url):
'''
Optional callback which occurs after the download url is opened by urllib2.
'''
self._debug('Provider', '_in_download', 'No callback supplied.')
def _post_download(self, success):
'''
Optional callback which occurs after the download has finished.
'''
self._debug('Provider', '_post_download', 'No callback supplied.')
def get_download_url(self):
'''
Required to be overriden by implementing class
'''
raise NotImplementedError('Provider did implement "get_download_url()". Cannot download video.')
def run(self):
'''
Download the video.
'''
#Callback
self._debug('Provider', 'run', 'Running pre-download callback.')
self._pre_download()
url = None
out = None
success = False
try:
url = Provider._download(self.get_download_url())
#Callback
self._debug('Provider', 'run', 'Running in-download callback.')
self._in_download(url)
filename = self.filename + self.fileext
#Invalid filename character fix
if IS_WINDOWS:
filename = re.sub(ur'[?\[\]\/\\=+<>:;",*]+', '_', filename, re.UNICODE)
self._debug('Provider', 'run', 'filename', filename)
#Save the stream to the output file
out = open(filename, 'wb')
out.write(url.read())
#We are done therefore success!
success = True
finally:
if out is not None:
out.close()
if url is not None:
url.close()
#Callback
self._debug('Provider', 'run', 'Running post-download callback.')
self._debug('Provider', 'run', 'success', success)
self._post_download(success)
def _debug(self, cls, method, *args):
if self.debugging:
if len(args) == 2:
print '%s %s:%s %s = %s' % (datetime.now(), cls, method, args[0], args[1])
else:
print '%s %s:%s - %s' % (datetime.now(), cls, method, ' '.join(args))
@staticmethod
def _download(url):
return urllib2.urlopen(urllib2.Request(url, headers=Provider.HEADERS))
from vimeo import Vimeo
from youtube import YouTube
|
Python
| 0
|
@@ -3013,60 +3013,8 @@
l)%0A%0A
- filename = self.filename + self.fileext%0A
@@ -3096,16 +3096,21 @@
+self.
filename
@@ -3129,24 +3129,17 @@
'%5B?%5C
-%5B%5C%5D%5C
/%5C%5C
-=+
%3C%3E:
-;%22,*
+%22*%7C
%5D+',
@@ -3144,16 +3144,21 @@
', '_',
+self.
filename
@@ -3171,16 +3171,68 @@
NICODE)%0A
+ filename = self.filename + self.fileext%0A
|
27df09cd98d9128d89d9d9d26ee0e89223fbd990
|
document idlerpg's external dependencies
|
libqtile/widget/idlerpg.py
|
libqtile/widget/idlerpg.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Tycho Andersen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from . import base
from .generic_poll_text import GenPollUrl
import datetime
class IdleRPG(GenPollUrl):
"""
A widget for monitoring and displaying IdleRPG stats.
::
# display idlerpg stats for the player 'pants' on freenode's #idlerpg
widget.IdleRPG(url="http://xethron.lolhosting.net/xml.php?player=pants")
"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
('format', 'IdleRPG: {online} TTL: {ttl}', 'Display format'),
('json', False, 'Not json :)'),
('xml', True, 'Is XML :)'),
]
def __init__(self, **config):
GenPollUrl.__init__(self, **config)
self.add_defaults(IdleRPG.defaults)
def parse(self, body):
formatted = {}
for k, v in body['player'].items():
if k == 'ttl':
formatted[k] = str(datetime.timedelta(seconds=int(v)))
elif k == 'online':
formatted[k] = "online" if v == "1" else "offline"
else:
formatted[k] = v
return self.format.format(**formatted)
|
Python
| 0
|
@@ -1451,16 +1451,110 @@
=pants%22)
+%0A%0A Widget requirements: xmltodict_.%0A%0A .. _xmltodict: https://pypi.org/project/xmltodict/
%0A %22%22%22
|
b736e69a88d3caf288f55216830a37df3b2da57c
|
Generate docs correctly if pexpect is not available.
|
docs/autogen_api.py
|
docs/autogen_api.py
|
#!/usr/bin/env python
"""Script to auto-generate our API docs.
"""
# stdlib imports
import os
import sys
# local imports
sys.path.append(os.path.abspath('sphinxext'))
from apigen import ApiDocWriter
#*****************************************************************************
if __name__ == '__main__':
pjoin = os.path.join
package = 'IPython'
outdir = pjoin('source','api','generated')
docwriter = ApiDocWriter(package,rst_extension='.txt')
# You have to escape the . here because . is a special char for regexps.
# You must do make clean if you change this!
docwriter.package_skip_patterns += [r'\.fixes$',
r'\.external$',
r'\.extensions',
r'\.kernel\.config',
r'\.attic',
r'\.quarantine',
r'\.deathrow',
r'\.config\.default',
r'\.config\.profile',
r'\.frontend',
r'\.gui'
]
docwriter.module_skip_patterns += [ r'\.core\.fakemodule',
# XXX These need fixing, disabling for
# now but we need to figure out why
# they are breaking. Error from sphinx
# for each group copied below
# AttributeError: __abstractmethods__
r'\.core\.component',
r'\.utils\.traitlets',
# AttributeError: __provides__
r'\.kernel\.clusterdir',
r'\.kernel\.configobjfactory',
r'\.kernel\.fcutil',
r'\.kernel\.ipcontrollerapp',
r'\.kernel\.launcher',
r'\.kernel\.task',
r'\.kernel\.winhpcjob',
r'\.testing\.util',
# Keeping these disabled is OK
r'\.cocoa',
r'\.ipdoctest',
r'\.Gnuplot',
r'\.frontend\.process\.winprocess',
r'\.Shell',
]
docwriter.write_api_docs(outdir)
docwriter.write_index(outdir, 'gen',
relative_to = pjoin('source','api')
)
print '%d files written' % len(docwriter.written_modules)
|
Python
| 0.999999
|
@@ -2897,16 +2897,337 @@
%5D%0A
+ %0A # If we don't have pexpect, we can't load irunner, so skip any code that%0A # depends on it%0A try:%0A import pexpect%0A except ImportError:%0A docwriter.module_skip_patterns += %5Br'%5C.lib%5C.irunner',%0A r'%5C.testing%5C.mkdoctests'%5D%0A # Now, generate the outputs%0A
docw
|
79006fa1fe0bf78d10a1951b2cc20ba5ff245e4b
|
Provide 'self' argument to instance method
|
sklearn/utils/tests/test_metaestimators.py
|
sklearn/utils/tests/test_metaestimators.py
|
from sklearn.utils.metaestimators import if_delegate_has_method
from nose.tools import assert_true
class Prefix(object):
def func():
pass
class MockMetaEstimator(object):
"""This is a mock meta estimator"""
a_prefix = Prefix()
@if_delegate_has_method(delegate="a_prefix")
def func(self):
"""This is a mock delegated function"""
pass
def test_delegated_docstring():
assert_true("This is a mock delegated function"
in str(MockMetaEstimator.__dict__['func'].__doc__))
assert_true("This is a mock delegated function"
in str(MockMetaEstimator.func.__doc__))
assert_true("This is a mock delegated function"
in str(MockMetaEstimator().func.__doc__))
|
Python
| 0.999981
|
@@ -129,16 +129,20 @@
ef func(
+self
):%0A
|
4edd9e21f910e156c5b5a73a66b4d63923b8e86d
|
Make the script Python 3 compatible
|
rsatool.py
|
rsatool.py
|
#!/usr/bin/env python2
import base64, fractions, optparse, random
import gmpy
from pyasn1.codec.der import encoder
from pyasn1.type.univ import *
PEM_TEMPLATE = '-----BEGIN RSA PRIVATE KEY-----\n%s-----END RSA PRIVATE KEY-----\n'
DEFAULT_EXP = 65537
def factor_modulus(n, d, e):
"""
Efficiently recover non-trivial factors of n
See: Handbook of Applied Cryptography
8.2.2 Security of RSA -> (i) Relation to factoring (p.287)
http://www.cacr.math.uwaterloo.ca/hac/
"""
t = (e * d - 1)
s = 0
while True:
quotient, remainder = divmod(t, 2)
if remainder != 0:
break
s += 1
t = quotient
found = False
while not found:
i = 1
a = random.randint(1,n-1)
while i <= s and not found:
c1 = pow(a, pow(2, i-1, n) * t, n)
c2 = pow(a, pow(2, i, n) * t, n)
found = c1 != 1 and c1 != (-1 % n) and c2 == 1
i += 1
p = fractions.gcd(c1-1, n)
q = (n / p)
return p, q
class RSA:
def __init__(self, p=None, q=None, n=None, d=None, e=DEFAULT_EXP):
"""
Initialize RSA instance using primes (p, q)
or modulus and private exponent (n, d)
"""
self.e = e
if p and q:
assert gmpy.is_prime(p), 'p is not prime'
assert gmpy.is_prime(q), 'q is not prime'
self.p = p
self.q = q
elif n and d:
self.p, self.q = factor_modulus(n, d, e)
else:
raise ArgumentError('Either (p, q) or (n, d) must be provided')
self._calc_values()
def _calc_values(self):
self.n = self.p * self.q
phi = (self.p - 1) * (self.q - 1)
self.d = gmpy.invert(self.e, phi)
# CRT-RSA precomputation
self.dP = self.d % (self.p - 1)
self.dQ = self.d % (self.q - 1)
self.qInv = gmpy.invert(self.q, self.p)
def to_pem(self):
"""
Return OpenSSL-compatible PEM encoded key
"""
return PEM_TEMPLATE % base64.encodestring(self.to_der())
def to_der(self):
"""
Return parameters as OpenSSL compatible DER encoded key
"""
seq = Sequence()
for x in [0, self.n, self.e, self.d, self.p, self.q, self.dP, self.dQ, self.qInv]:
seq.setComponentByPosition(len(seq), Integer(x))
return encoder.encode(seq)
def dump(self, verbose):
vars = ['n', 'e', 'd', 'p', 'q']
if verbose:
vars += ['dP', 'dQ', 'qInv']
for v in vars:
self._dumpvar(v)
def _dumpvar(self, var):
val = getattr(self, var)
parts = lambda s, l: '\n'.join([s[i:i+l] for i in xrange(0, len(s), l)])
if len(str(val)) <= 40:
print '%s = %d (%#x)\n' % (var, val, val)
else:
print '%s =' % var
print parts('%x' % val, 80) + '\n'
if __name__ == '__main__':
parser = optparse.OptionParser()
parser.add_option('-p', dest='p', help='prime', type='int')
parser.add_option('-q', dest='q', help='prime', type='int')
parser.add_option('-n', dest='n', help='modulus', type='int')
parser.add_option('-d', dest='d', help='private exponent', type='int')
parser.add_option('-e', dest='e', help='public exponent (default: %d)' % DEFAULT_EXP, type='int', default=DEFAULT_EXP)
parser.add_option('-o', dest='filename', help='output filename')
parser.add_option('-f', dest='format', help='output format (DER, PEM) (default: PEM)', type='choice', choices=['DER', 'PEM'], default='PEM')
parser.add_option('-v', dest='verbose', help='also display CRT-RSA representation', action='store_true', default=False)
try:
(options, args) = parser.parse_args()
if options.p and options.q:
print 'Using (p, q) to initialise RSA instance\n'
rsa = RSA(p=options.p, q=options.q, e=options.e)
elif options.n and options.d:
print 'Using (n, d) to initialise RSA instance\n'
rsa = RSA(n=options.n, d=options.d, e=options.e)
else:
parser.print_help()
parser.error('Either (p, q) or (n, d) needs to be specified')
rsa.dump(options.verbose)
if options.filename:
print 'Saving %s as %s' % (options.format, options.filename)
if options.format == 'PEM':
data = rsa.to_pem()
elif options.format == 'DER':
data = rsa.to_der()
fp = open(options.filename, 'wb')
fp.write(data)
fp.close()
except optparse.OptionValueError, e:
parser.print_help()
parser.error(e.msg)
|
Python
| 0.999675
|
@@ -2046,16 +2046,17 @@
return
+(
PEM_TEMP
@@ -2096,16 +2096,35 @@
o_der())
+.decode()).encode()
%0A%0A de
@@ -2749,17 +2749,16 @@
or i in
-x
range(0,
@@ -2817,25 +2817,25 @@
print
-
+(
'%25s = %25d (%25#
@@ -2857,16 +2857,17 @@
al, val)
+)
%0A
@@ -2890,17 +2890,17 @@
print
-
+(
'%25s =' %25
@@ -2903,16 +2903,17 @@
=' %25 var
+)
%0A
@@ -2922,17 +2922,17 @@
print
-
+(
parts('%25
@@ -2951,16 +2951,17 @@
) + '%5Cn'
+)
%0A%0A%0Aif __
@@ -3852,33 +3852,33 @@
print
-
+(
'Using (p, q) to
@@ -3896,32 +3896,33 @@
RSA instance%5Cn'
+)
%0A rsa
@@ -4022,17 +4022,17 @@
print
-
+(
'Using (
@@ -4066,16 +4066,17 @@
tance%5Cn'
+)
%0A
@@ -4331,17 +4331,17 @@
print
-
+(
'Saving
@@ -4386,16 +4386,17 @@
ilename)
+)
%0A%0A%0A
@@ -4678,17 +4678,19 @@
lueError
-,
+ as
e:%0A
|
f9f3ca75e8151b1467fddffe390aee6a8fe00259
|
Change configuration for wsgi settings
|
dev_cloud/web_service/wsgi.py
|
dev_cloud/web_service/wsgi.py
|
# -*- coding: utf-8 -*-
# @COPYRIGHT_begin
#
# Copyright [2015] Michał Szczygieł, M4GiK Software
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @COPYRIGHT_end
"""
WSGI config for web_service project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
activate_this = '../.pyenv/bin/activate_this.py '
execfile(activate_this, dict(__file__=activate_this))
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "core.settings.prod")
os.environ["CELERY_LOADER"] = "django"
import djcelery
djcelery.setup_loader()
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
|
Python
| 0
|
@@ -881,57 +881,89 @@
%22%22%22%0A
-activate_this = '../.pyenv/bin/activate_this.py '
+from core.settings.config import ENVIROMENT_PATH%0A%0Aactivate_this = ENVIROMENT_PATH
%0Aexe
|
f3724421fa859a5970e66353b6a311aa14b866ec
|
Add additional spacing to improve readability
|
labs/lab-5/ex5-1.log.py
|
labs/lab-5/ex5-1.log.py
|
#!/usr/bin/python
#
# Copyright 2016 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import time
from log_utils import follow
if __name__ == '__main__':
# We are expecting two arguments
# The first is the name of the script
# The second is a path to a log file
if len(sys.argv) == 2:
# Open our file for reading
log_file = open(sys.argv[1], "r")
# Create our iterable function
log_lines = follow(log_file)
# Process the lines as they are appended
for line in log_lines:
# Strip out the new line an print the line
print("{0}".format(line.strip()))
else:
# Incorrect number of arguments
# Output usage to standard out
sys.stderr.write("usage: {0} <path>\n".format(os.path.basename(sys.argv[0])))
|
Python
| 0.000003
|
@@ -914,16 +914,17 @@
%5D, %22r%22)%0A
+%0A
@@ -991,16 +991,17 @@
g_file)%0A
+%0A
|
1adb2c16780a2bbdf5a40368a856dc4b6e9df9f6
|
build out initial email example, small change to README
|
example-emailed_daily_operational_report.py
|
example-emailed_daily_operational_report.py
|
import liveengage_data_app as le_api
app = le_api.LiveEngageDataApp(account_number='xx',
keys_and_secrets={
'consumer_key':'xx',
'consumer_secret':'xx',
'token_key':'xx',
'token_secret':'xx'
},
services=[
#'engHistDomain',
'leDataReporting',
'accountConfigReadOnly_skills',
#'accountConfigReadOnly_users',
'accountConfigReadOnly_agentGroups'
])
print(str(app))
# Grab data for the timeframe of the last 24 hours.
# Put the data in only one bucket.
rt_data = app.get_rt_operational_data(minute_timeframe='1440', in_buckets_of='1440')
print(str(rt_data['errors'])) if rt_data['errors'] else print('Main data: ok')
for method in rt_data['success']:
details = method + 'data: '
print(details + str(rt_data['success'][method]['errors'])) if rt_data['success'][method]['errors'] else print(details + 'ok')
agent_groups_data = app.get_agent_groups_data()
print(str(agent_groups_data['errors'])) if agent_groups_data['errors'] else print('ok')
skills_data = app.get_skills_data()
print(str(skills_data['errors'])) if skills_data['errors'] else print('ok')
|
Python
| 0.000361
|
@@ -31,16 +31,1506 @@
le_api%0A%0A
+def get_skills(application):%0A skills_data = application.get_skills_data()%0A print(str(skills_data%5B'errors'%5D)) if skills_data%5B'errors'%5D else print('ok')%0A account_skills = %5B%7B%7D%5D%0A for skill in skills_data:%0A account_skills.append(%7B%0A 'id': skill%5B'id'%5D,%0A 'name': skill%5B'name'%5D,%0A %7D)%0A return account_skills%0A%0Adef get_users(application):%0A users_data = application.get_user_data()%0A print(str(users_data%5B'errors'%5D)) if users_data%5B'errors'%5D else print('ok')%0A account_agents = %5B%7B%7D%5D%0A for user in users_data:%0A account_agents.append(%7B%0A 'fullName': user%5B'fullName'%5D,%0A 'id': user%5B'id'%5D,%0A 'memberOf': user%5B'memberOf'%5D%5B'agentGroupId'%5D,%0A 'skills': user%5B'skillIds'%5D,%0A %7D)%0A return account_agents%0A%0Adef get_groups(application):%0A agent_groups_data = app.get_agent_groups_data()%0A print(str(agent_groups_data%5B'errors'%5D)) if agent_groups_data%5B'errors'%5D else print('ok')%0A account_groups = %5B%7B%7D%5D%0A for group in agent_groups_data:%0A account_groups.append(%7B%0A 'id': group%5B'id'%5D,%0A 'name': group%5B'name'%5D,%0A %7D)%0A return account_groups%0A%0Adef enrich_users_data(groups, skills, users):%0A for user in users:%0A for group in groups:%0A if user%5B'memberOf'%5D == group%5B'id'%5D:%0A user%5B'memberOf'%5D = group%0A for skill in skills:%0A if skill in user%5B'skills'%5D:%0A user%5B'skills'%5D = skill%0A return users%0A%0A
app = le
@@ -2026,32 +2026,33 @@
+#
'accountConfigRe
@@ -2091,33 +2091,32 @@
-#
'accountConfigRe
@@ -2154,32 +2154,33 @@
+#
'accountConfigRe
@@ -2298,18 +2298,16 @@
4 hours.
-%0A#
Put the
@@ -2703,247 +2703,407 @@
ent_
-groups_data = app.get_agent_groups_data()%0Aprint(str(agent_groups_data%5B'errors'%5D)) if agent_groups_data%5B'errors'%5D else print('ok')%0A%0Askills_data = app.
+data = enrich_users_data(%0A get_groups(app), %0A get_skills(app), %0A get_agents(app),%0A )%0A%0A# Need to write functions below this line%0Aagent_csv = build_agent_csv(rt_data%5B'agentactivity'%5D, agent_data)%0Aqueue_csv = build_queue_csv(rt_data%5B'queuehealth'%5D, a
ge
+n
t_
-skills_
data
-(
)%0A
-print(str(skills_data%5B'errors'%5D)) if skills_data%5B'errors'%5D else print('ok')%0A
+eng_activity_csv = build_eng_activity_csv(rt_data%5B'engactivity'%5D, agent_data)%0A%0Amail_reports(%5Bagent_csv, queue_csv, eng_activity_csv%5D)
|
3d4c4784d0ca5c56c9159b62016717596f5f92e9
|
add dbase
|
ostap/io/dbase.py
|
ostap/io/dbase.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
# @file compress_dbase.py
#
# Helper module to use databases
#
# @author Vanya BELYAEV Ivan.Belyaev@itep.ru
# @date 2020-05-16
# =============================================================================
""" Helper module to use databases
"""
# =============================================================================
__author__ = "Vanya BELYAEV Ivan.Belyaev@itep.ru"
__date__ = "2020-05-16"
__version__ = "$Revision:$"
# =============================================================================
__all__ = (
'whichdb' , ## guess database type
'dbopen' , ## open database
)
# =============================================================================
import sys
from ostap.logger.logger import getLogger
if '__main__' == __name__ : logger = getLogger ( 'ostap.io.compress_shelve' )
else : logger = getLogger ( __name__ )
# =============================================================================
if sys.version_info.major < 3 : ## PYTHON2
## for python2 <code>bdsdb</code> is part of the standard library
from anydbm import open as dbopen
from whichdb import whichdb
else : ## PYTHON3
## for python3 <code>bdsdb</code> is not a part of the standard library
## make a try to use <code>bdsdb3</code>
try :
import bdsdb3
except ImportError :
bsddb3 = None
if bdsdb3 :
## <code>bdsdb3</code> is available, try to use it as a defauld database
import dbm, io, struct
# =====================================================================
## Guess which db package to use to open a db file.
#
# Return values:
# - None if the database file can't be read;
# - empty string if the file can be read but can't be recognized
# - the name of the dbm submodule (e.g. "ndbm" or "gnu") if recognized.
#
# Importing the given module may still fail, and opening the
# database using that module may still fail.
#
# - Actually it is a bit extended form of <code>dbm.whichdb</code>
# that accounnt for <code>bdsdb3</code>
def whichdb ( filename ) :
"""Guess which db package to use to open a db file.
Return values:
- None if the database file can't be read;
- empty string if the file can be read but can't be recognized
- the name of the dbm submodule (e.g. 'ndbm' or 'gnu') if recognized.
Importing the given module may still fail, and opening the
database using that module may still fail.
- Actually it is a bit extended form of `dbm.whichdb`
that accounts for `bdsdb3`
"""
## use the standard function
tst = dbm.wichdb ( filename )
## identified or non-existing DB ?
if tst or tsts is None : return tst
## non-identified DB
## check for bsddb magic numbers (from python2)
try :
with io.open ( filename ,'rb' ) as f :
# Read the start of the file -- the magic number
s16 = f.read(16)
except OSError :
return None
s = s16[0:4]
# Return "" if not at least 4 bytes
if len(s) != 4:
return ""
# Convert to 4-byte int in native byte order -- return "" if impossible
try:
( magic, ) = struct.unpack("=l", s)
except struct.error:
return ""
# Check for GNU dbm
if magic in (0x13579ace, 0x13579acd, 0x13579acf):
return "dbm.gnu"
# Check for old Berkeley db hash file format v2
if magic in (0x00061561, 0x61150600):
return "bsddb185"
# Later versions of Berkeley db hash file have a 12-byte pad in
# front of the file type
try:
(magic,) = struct.unpack("=l", s16[-4:])
except struct.error:
return ""
# Check for BSD hash
if magic in (0x00061561, 0x61150600):
return "dbhash"
# Unknown
return ""
# =====================================================================
## Open or create database at path given by *file*.
#
# Optional argument *flag* can be 'r' (default) for read-only access, 'w'
# for read-write access of an existing database, 'c' for read-write access
# to a new or existing database, and 'n' for read-write access to a new
# database.
#
# Note: 'r' and 'w' fail if the database doesn't exist; 'c' creates it
# only if it doesn't exist; and 'n' always creates a new database.
#
# - Actually it is a bit extended form of <code>dbm.open</code>, that
# accounts for <code>bsbdb3</code>
def dbopen ( file , flag = 'r' , mode=0o666 ):
"""Open or create database at path given by *file*.
Optional argument *flag* can be 'r' (default) for read-only access, 'w'
for read-write access of an existing database, 'c' for read-write access
to a new or existing database, and 'n' for read-write access to a new
database.
Note: 'r' and 'w' fail if the database doesn't exist; 'c' creates it
only if it doesn't exist; and 'n' always creates a new database.
- Actually it is a bit extended form of `dbm.open` that accounts for `bdsdb3`
"""
result = whichdb ( file ) if 'n' not in flag else None
if result is None :
# db doesn't exist or 'n' flag was specified to create a new db
if 'c' in flag or 'n' in flag:
# file doesn't exist and the new flag was used so use bdsdb3
return bdsdb3.hasopen ( flag , mode )
raise dbm.error[0] ( "db file '%s' doesn't exist; use 'c' or 'n' flag to create a new db" % file )
elif result in ( 'bdsdb' , 'dbhash' , 'bdsdb3' , 'dbddb185' ) :
return bdsdb3.hasopen ( flag , mode )
return dbm.open ( file , flag , mode )
else :
from dbm import open as dbopen
from dbm import whichdb
# =============================================================================
if '__main__' == __name__ :
from ostap.utils.docme import docme
docme ( __name__ , logger = logger )
# =============================================================================
## The END
# =============================================================================
|
Python
| 0.000001
|
@@ -1549,18 +1549,18 @@
import b
-d
s
+d
db3%0D%0A
@@ -1615,18 +1615,18 @@
if b
-d
s
+d
db3 :%0D%0A
@@ -6678,34 +6678,34 @@
return b
-d
s
+d
db3.hasopen ( fl
@@ -6985,18 +6985,18 @@
return b
-d
s
+d
db3.haso
|
892ce48164c054079820ff712405375fe9eaaafb
|
remove simple analyzer
|
readthedocs/search/documents.py
|
readthedocs/search/documents.py
|
import logging
from django.conf import settings
from django_elasticsearch_dsl import DocType, Index, fields
from elasticsearch import Elasticsearch
from readthedocs.projects.models import HTMLFile, Project
project_conf = settings.ES_INDEXES['project']
project_index = Index(project_conf['name'])
project_index.settings(**project_conf['settings'])
page_conf = settings.ES_INDEXES['page']
page_index = Index(page_conf['name'])
page_index.settings(**page_conf['settings'])
log = logging.getLogger(__name__)
class RTDDocTypeMixin:
def update(self, *args, **kwargs):
# Hack a fix to our broken connection pooling
# This creates a new connection on every request,
# but actually works :)
log.info('Hacking Elastic indexing to fix connection pooling')
self.using = Elasticsearch(**settings.ELASTICSEARCH_DSL['default'])
super().update(*args, **kwargs)
@project_index.doc_type
class ProjectDocument(RTDDocTypeMixin, DocType):
# Metadata
url = fields.TextField(attr='get_absolute_url')
users = fields.NestedField(
properties={
'username': fields.TextField(),
'id': fields.IntegerField(),
}
)
language = fields.KeywordField()
modified_model_field = 'modified_date'
class Meta:
model = Project
fields = ('name', 'slug', 'description')
ignore_signals = True
@classmethod
def faceted_search(cls, query, user, language=None):
from readthedocs.search.faceted_search import ProjectSearch
kwargs = {
'user': user,
'query': query,
}
if language:
kwargs['filters'] = {'language': language}
return ProjectSearch(**kwargs)
@page_index.doc_type
class PageDocument(RTDDocTypeMixin, DocType):
# Metadata
project = fields.KeywordField(attr='project.slug')
version = fields.KeywordField(attr='version.slug')
path = fields.KeywordField(attr='processed_json.path')
full_path = fields.KeywordField(attr='path')
# Searchable content
title = fields.TextField(attr='processed_json.title')
sections = fields.NestedField(
attr='processed_json.sections',
properties={
'id': fields.KeywordField(),
'title': fields.TextField(),
'content': fields.TextField(),
}
)
domains = fields.NestedField(
properties={
'role_name': fields.KeywordField(),
# For linking to the URL
'anchor': fields.KeywordField(),
# For showing in the search result
'type_display': fields.TextField(),
'docstrings': fields.TextField(analyzer='simple'),
# Simple analyzer breaks on `.`,
# otherwise search results are too strict for this use case
'name': fields.TextField(analyzer='simple'),
}
)
modified_model_field = 'modified_date'
class Meta:
model = HTMLFile
fields = ('commit', 'build')
ignore_signals = True
def prepare_domains(self, html_file):
"""Prepares and returns the values for domains field."""
all_domains = []
try:
domains_qs = html_file.sphinx_domains.exclude(
domain='std',
type__in=['doc', 'label']
).iterator()
all_domains = [
{
'role_name': domain.role_name,
'anchor': domain.anchor,
'type_display': domain.type_display,
'docstrings': html_file.processed_json.get(
'domain_data', {}
).get(domain.anchor, ''),
'name': domain.name,
}
for domain in domains_qs
]
log.debug("[%s] [%s] Total domains for file %s are: %s" % (
html_file.project.slug,
html_file.version.slug,
html_file.path,
len(all_domains),
))
except Exception:
log.exception("[%s] [%s] Error preparing domain data for file %s" % (
html_file.project.slug,
html_file.version.slug,
html_file.path,
))
return all_domains
@classmethod
def faceted_search(
cls, query, user, projects_list=None, versions_list=None,
filter_by_user=True
):
from readthedocs.search.faceted_search import PageSearch
kwargs = {
'user': user,
'query': query,
'filter_by_user': filter_by_user,
}
filters = {}
if projects_list is not None:
filters['project'] = projects_list
if versions_list is not None:
filters['version'] = versions_list
kwargs['filters'] = filters
return PageSearch(**kwargs)
def get_queryset(self):
"""Overwrite default queryset to filter certain files to index."""
queryset = super().get_queryset()
# Do not index files that belong to non sphinx project
# Also do not index certain files
queryset = queryset.internal().filter(
project__documentation_type__contains='sphinx'
)
# TODO: Make this smarter
# This was causing issues excluding some valid user documentation pages
# excluded_files = [
# 'search.html',
# 'genindex.html',
# 'py-modindex.html',
# 'search/index.html',
# 'genindex/index.html',
# 'py-modindex/index.html',
# ]
# for ending in excluded_files:
# queryset = queryset.exclude(path=ending)
return queryset
|
Python
| 0.000001
|
@@ -2690,33 +2690,16 @@
xtField(
-analyzer='simple'
),%0A%0A
|
216fabd077d5868eebca5b08c27f9907d023a4d9
|
Add back erroneously removed validation statement
|
exdb/forms.py
|
exdb/forms.py
|
from datetime import datetime
from django import forms
from django.core.exceptions import ValidationError
from django.utils.timezone import now
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe
from django.utils.html import format_html
from django.utils.translation import ugettext as _
from django.forms import ModelForm
from django.utils.timezone import utc
from exdb.models import Experience, ExperienceComment
class SubtypeSelect(forms.SelectMultiple):
def render_option(self, selected_choices, option_value, option_label):
if option_value is None:
option_value = '' # pragma: no cover
option_value = force_text(option_value)
if option_value in selected_choices:
selected_html = mark_safe(' selected="selected"')
else:
selected_html = ''
css_classes = []
choice_dict = {str(c.pk): c for c in self.choices.queryset}
if option_value in choice_dict and not choice_dict[option_value].needs_verification:
css_classes.append('no-verification')
else:
css_classes.append('verification')
return format_html('<option class="{}" value="{}"{}>{}</option>',
' '.join(css_classes),
option_value,
selected_html,
force_text(option_label))
class ExperienceSaveForm(ModelForm):
class Meta:
model = Experience
fields = [
'name',
'type',
'subtypes',
'description',
'goals',
'planners',
'recognition',
'start_datetime',
'end_datetime',
'audience',
'attendance',
'keywords',
'next_approver',
'guest',
'guest_office',
'funds',
'conclusion',
]
widgets = {
'description': forms.Textarea(attrs={'cols': 40, 'rows': 4}),
'goals': forms.Textarea(attrs={'cols': 40, 'rows': 4}),
'subtypes': SubtypeSelect(),
'conclusion': forms.Textarea(attrs={'cols': 40, 'rows': 4}),
}
labels = {
'start_datetime': 'Start Time',
'end_datetime': 'End Time',
'next_approver': 'Supervisor',
'name': 'Title',
}
def __init__(self, *args, **kwargs):
when = kwargs.pop('when', now())
submit = kwargs.pop('submit', None)
super(ExperienceSaveForm, self).__init__(*args, **kwargs)
self.when = when
self.approval_form = submit
class ExperienceSubmitForm(ExperienceSaveForm):
def clean(self):
ex_subtype = self.cleaned_data.get('subtypes')
needs_verification = True
if ex_subtype:
needs_verification = any(x.needs_verification for x in ex_subtype)
min_dt = datetime.min.replace(tzinfo=utc)
max_dt = datetime.max.replace(tzinfo=utc)
# conditions format (validation_check, validation_error),
conditions = (
(not self.cleaned_data.get('description'), ValidationError(_('A description is required'))),
(not self.cleaned_data.get('end_datetime'), ValidationError(_('An end time is required'))),
(not self.cleaned_data.get('start_datetime'), ValidationError(_('A start time is required'))),
(not self.cleaned_data.get('type'), ValidationError(_('The type field is required'))),
(not ex_subtype, ValidationError(_('The subtype field is required'))),
(needs_verification and not self.approval_form and
not self.cleaned_data.get('next_approver'),
ValidationError(_('Please select the supervisor to review this experience'))),
(needs_verification is False and (self.cleaned_data.get('start_datetime', max_dt) > self.when),
ValidationError(_('This experience must have a start date in the past'))),
(needs_verification and (self.cleaned_data.get('start_datetime', max_dt) < self.when),
ValidationError(_('This experience must have a start date in the future'))),
(self.cleaned_data.get('start_datetime', max_dt) >= self.cleaned_data.get('end_datetime', min_dt),
ValidationError(_('Start time must be before end time'))),
(needs_verification is False and (not self.cleaned_data.get(
'attendance') or self.cleaned_data.get('attendance') < 1),
ValidationError(_('An attendance is required'))),
(needs_verification is False and not self.cleaned_data.get('audience'),
ValidationError(_('An audience is required'))),
(needs_verification and self.cleaned_data.get('attendance'),
ValidationError(_('An attendance is not allowed yet'))),
(needs_verification and self.cleaned_data.get('next_approver')
and not self.cleaned_data.get('next_approver').is_hallstaff(),
ValidationError(_('Supervisor must have permissions to approve and deny experiences'))),
(needs_verification is False and not self.cleaned_data.get('conclusion'),
ValidationError(_('A conclusion is required'))),
)
validation_errors = []
for condition, invalid in conditions:
if condition:
validation_errors.append(invalid)
if validation_errors:
raise ValidationError(validation_errors)
# If user passes conclusion and exp needs verification
# Remove the conclusion since the experience hasn't happened yet.
if needs_verification and self.cleaned_data.get('conclusion'):
self.cleaned_data['conclusion'] = ""
return self.cleaned_data
class ExperienceConclusionForm(ModelForm):
class Meta:
model = Experience
fields = ['attendance', 'conclusion']
widgets = {
'conclusion': forms.Textarea(attrs={'cols': 40, 'rows': 4}),
}
def clean(self):
conditions = (
(not self.cleaned_data.get('attendance'), ValidationError(_('There must be an attendance'))),
(self.cleaned_data.get('attendance') and self.cleaned_data.get('attendance') < 0,
ValidationError(_('There cannot be a negative attendance'))),
(not self.cleaned_data.get('conclusion'), ValidationError(_('Please enter a conclusion'))),
)
validation_errors = []
for condition, invalid in conditions:
if condition:
validation_errors.append(invalid)
if validation_errors:
raise ValidationError(validation_errors)
return self.cleaned_data
class ApprovalForm(ModelForm):
message = forms.CharField(widget=forms.Textarea(attrs={'cols': 40, 'rows': 4}))
class Meta:
model = ExperienceComment
exclude = ['experience', 'author', 'timestamp']
def clean(self):
if not self.cleaned_data.get('message'):
raise ValidationError(_('There must be a comment if the Experience is denied.'))
return self.cleaned_data
|
Python
| 0.000001
|
@@ -4110,26 +4110,26 @@
datetime', m
-ax
+in
_dt) %3C self.
@@ -4124,32 +4124,59 @@
_dt) %3C self.when
+ and not self.approval_form
),%0A
|
7760765e32b81bad6957d333850293db86a2bbcf
|
Fixes 'Document instance has no attribute 'file_basename''
|
doc2text/__init__.py
|
doc2text/__init__.py
|
import PyPDF2 as pyPdf
import PythonMagick
import os
import mimetypes
import cv2
from .page import Page
acceptable_mime = ["image/bmp", "image/png", "image/tiff", "image/jpg", "video/JPEG", "video/jpeg2000"]
def main():
"""Entry point for the application script"""
print("Call your main application code here")
FileNotAcceptedException = Exception('The filetype is not acceptable. We accept bmp, png, tiff, jpg, jpeg, jpeg2000, and PDF.')
class Document:
def __init__(self):
self.pages = []
self.processed_pages = []
self.page_content = []
self.prepared = False
self.error = None
def read(self, path):
filename, self.file_extension = os.path.splitext(path)
self.path = path
self.filename = os.path.basename(path)
self.mime_type = mimetypes.guess_type(path)
self.file_basepath = os.path.dirname(path)
# If the file is a pdf, split the pdf and prep the pages.
if self.mime_type[0] == "application/pdf":
file_temp = open(self.path, 'rb')
pdf_reader = pyPdf.PdfFileReader(file_temp)
self.num_pages = pdf_reader.numPages
try:
for i in xrange(self.num_pages):
output = pyPdf.PdfFileWriter()
output.addPage(pdf_reader.getPage(i))
path = 'temp.pdf'
im_path = 'temp.png'
with open(path, 'wb') as f:
output.write(f)
im = PythonMagick.Image()
im.density("300")
im.read(path)
im.write(im_path)
orig_im = cv2.imread(im_path, 0)
page = Page(orig_im, i)
self.pages.append(page)
os.remove(path)
os.remove(im_path)
self.prepared = True
except Exception as e:
self.error = e
raise
# If the file is an image, think of it as a 1-page pdf.
elif self.mime_type[0] in acceptable_mime:
self.num_pages = 1
im = PythonMagick.Image()
im.density("300")
im.read(path)
temp_path = os.path.normpath(os.path.join(
self.file_basepath, self.file_basename + '_temp.png'
))
img.write(temp_path)
orig_im = cv2.imread(temp_path, 0)
os.remove(temp_path)
page = Page(orig_im, 0)
self.pages.append(page)
# Otherwise, out of luck.
else:
print self.mime_type[0]
raise FileNotAcceptedException
def process(self):
for page in self.pages:
new = page
new.crop()
new.deskew()
self.processed_pages.append(new)
def extract_text(self):
if len(self.processed_pages) > 0:
for page in self.processed_pages:
new = page
text = new.extract_text()
self.page_content.append(text)
else:
raise Exception('You must run `process()` first.')
def get_text(self):
if len(self.page_content) > 0:
return "\n".join(self.page_content)
else:
raise Exception('You must run `extract_text()` first.')
def save_pages(self):
# TODO
stuff = stuff
|
Python
| 0.997427
|
@@ -666,24 +666,29 @@
+self.
filename
, self.f
@@ -683,126 +683,140 @@
name
-, self.file_extension = os.path.splitext(path)%0A self.path = path%0A self.filename = os.path.basename(path)
+ = os.path.basename(path)%0A self.file_basename, self.file_extension = os.path.splitext(self.filename)%0A self.path = path
%0A
|
d40f1fe493ec2c71d84ac84f5dc989c68de321ca
|
add version option
|
batch_isp.py
|
batch_isp.py
|
import argparse
from parts import Parts
from pgm_error import PgmError
from operations import Operations
from serial_io import SerialIO
class BatchISP:
def __init__(self):
parser = argparse.ArgumentParser(
description='Linux remake of Atmel\'s BatchISP utility.')
parser.add_argument('-device', type=str, required=True,
help="Device type, ? for list.")
parser.add_argument('-port', type=str,
help="Port/interface to connect.")
parser.add_argument('-hardware', type=str,
help="{ RS232 | TODO }")
parser.add_argument('-operation', type=str, required=True, nargs='*',
help="... ??? TODO")
self._args = parser.parse_args()
self._parser = parser
def _getIOByHardwareName(self, hardware):
if hardware == 'RS232':
if self._args.port is None:
raise PrgError("Port not specified for RS232")
return SerialIO(self._args.port)
else:
raise PrgError("Unsupported hardware: %s" % hardware)
def run(self):
if self._args.device == '?':
parts = Parts()
print([part.getName() for part in parts.list()])
return 0
try:
part = Parts().getPartByName(self._args.device)
if not self._args.hardware is None:
hw = sef._args.hardware
else:
hw = part.listHardware()
if len(hw) != 1:
raise PrgError("Cannot determine hardware select one of: %s" % hw)
hw = hw[0]
io = self._getIOByHardwareName(hw)
operations = Operations(part, io)
for op in self._args.operation:
print(op)
except PgmError as e:
print(e)
exit(1)
|
Python
| 0.000001
|
@@ -590,16 +590,100 @@
ODO %7D%22)%0A
+ parser.add_argument('-version', action='version', version='%25(prog)s 0.0.0')%0A
|
84fc6f4e05c30e368b869b0e5af80b90db5b0ace
|
Write generic push instructions for the time being
|
Lib/extractor/stream.py
|
Lib/extractor/stream.py
|
# -*- coding: utf-8 -*-
from fontTools.misc.textTools import num2binary
from fontTools.ttLib.tables.ttProgram import streamOpcodeDict, opcodeDict
from io import BytesIO
class InstructionStream(object):
"""
:param program_bytes: The program bytecode.
:type program_bytes: bytes
The instruction stream.
"""
def __init__(self, instruction_processor=None, program_bytes=b""):
self.io = BytesIO(program_bytes)
def rewind(self):
"""
Rewind the instruction pointer to the beginning of the stream.
"""
self.io.seek(0)
def read_byte(self):
"""
Read a byte from the instruction stream and advance the instruction
pointer. Returns the value as a tuple of (byte, int).
"""
b = self.io.read(1)
if not b:
return False
return b, int.from_bytes(b, byteorder="big", signed=False)
def read_word(self):
"""
Read a word from the instruction stream and advance the instruction
pointer. Returns the value as a tuple of (word, int).
"""
w = self.io.read(2)
if not w:
return False
return w, int.from_bytes(w, byteorder="big", signed=True)
def __repr__(self):
"""
Print the instructions from the bytecode in the current stream starting
at the beginning.
"""
self.rewind()
asm = ""
indent = 0
more = True
while more:
opcode = self.io.read(1)
if opcode:
opcode = int.from_bytes(opcode, byteorder="big", signed=False)
cmd_info = streamOpcodeDict.get(opcode, None)
if cmd_info is None:
cmd_info = opcodeDict.get(opcode, None)
if cmd_info is None:
print(
asm + "\n"
"Illegal opcode 0x%02x at offset 0x%04x."
% (int(opcode), self.io.tell(),)
)
raise KeyError
cmd_name, arg_bits, base_opcode, name = cmd_info
args = []
if cmd_name in ("EIF", "ELSE", "ENDF"):
indent -= 1
if cmd_name in ("NPUSHB", "NPUSHW", "PUSHB", "PUSHW"):
# PUSH instructions read their arguments from the stream
if cmd_name.startswith("PUSH"):
# Take number of arguments from the opcode
num_args = opcode - base_opcode + 1
else:
# Take number of arguments from the stream
_, num_args = self.read_byte()
args.append(str(num_args))
if cmd_name.endswith("B"):
for n in range(num_args):
_, i = self.read_byte()
args.append(str(i))
else:
for n in range(num_args):
_, i = self.read_word()
args.append(str(i))
arg_bits = 0 # Don't output bits for push instructions
if arg_bits == 0:
asm += "\n%s%s" % (" " * indent, cmd_name,)
else:
asm += "\n%s%s[%s]" % (
" " * indent,
cmd_name,
num2binary(opcode - base_opcode, arg_bits),
)
if args:
asm += " " + " ".join(args)
if cmd_name in ("ELSE", "FDEF", "IF"):
indent += 1
else:
more = False
return asm.strip()
|
Python
| 0.000002
|
@@ -2731,32 +2731,34 @@
+ #
args.append(str
@@ -2769,16 +2769,90 @@
_args))%0A
+ # TODO: Reactivate when NPUSH can be roundtripped%0A
@@ -3291,16 +3291,96 @@
ructions
+%0A cmd_name = %22push%22 # Write generic push instruction for now
%0A%0A
|
4943fc51157cf9f03102cb58fbd9f73da5e199e9
|
Add additional logic checks in CheckoutLineDelete mutation
|
saleor/graphql/checkout/mutations.py
|
saleor/graphql/checkout/mutations.py
|
import graphene
from django.db import transaction
from ...account.models import Address
from ...checkout import models
from ...checkout.utils import add_variant_to_cart
from ..account.types import AddressInput, User
from ..core.mutations import BaseMutation, ModelMutation
from ..order.mutations.draft_orders import check_lines_quantity
from ..product.types import ProductVariant
from ..utils import get_node, get_nodes
from .types import Checkout, CheckoutLine
class CheckoutLineInput(graphene.InputObjectType):
quantity = graphene.Int(description='quantity')
variant_id = graphene.ID(description='ID of product variant')
class CheckoutCreateInput(graphene.InputObjectType):
lines = graphene.List(
CheckoutLineInput, description='Checkout lines')
email = graphene.String(description='Customer email')
shipping_address = AddressInput(description='Shipping address')
class CheckoutCreate(ModelMutation):
class Arguments:
input = CheckoutCreateInput(
required=True, description='Data required to create Checkout')
class Meta:
description = 'Create a new Checkout'
model = models.Cart
return_field_name = 'checkout'
@classmethod
def clean_input(cls, info, instance, input, errors):
shipping_address = input.pop('shipping_address', None)
cleaned_input = super().clean_input(info, instance, input, errors)
lines = input.pop('lines', None)
if lines:
variant_ids = [line.get('variant_id') for line in lines]
variants = get_nodes(variant_ids, ProductVariant)
quantities = [line.get('quantity') for line in lines]
line_errors = check_lines_quantity(variants, quantities)
if line_errors:
for err in line_errors:
cls.add_error(errors, field=err[0], message=err[1])
else:
cleaned_input['variants'] = variants
cleaned_input['quantities'] = quantities
if shipping_address:
shipping_address = Address(**shipping_address)
cls.clean_instance(shipping_address, errors)
cleaned_input['shipping_address'] = shipping_address
return cleaned_input
@classmethod
def save(cls, info, instance, cleaned_input):
shipping_address = cleaned_input.get('shipping_address')
if shipping_address:
shipping_address.save()
instance.shipping_address = shipping_address
super().save(info, instance, cleaned_input)
instance.save(update_fields=['shipping_address'])
variants = cleaned_input.get('variants')
quantities = cleaned_input.get('quantities')
if variants and quantities:
for variant, quantity in zip(variants, quantities):
add_variant_to_cart(instance, variant, quantity)
class CheckoutLinesAdd(BaseMutation):
class Arguments:
checkout_id = graphene.ID(description='Checkout ID', required=True)
lines = graphene.List(
CheckoutLineInput, description='Checkout lines', required=True)
checkout = graphene.Field(Checkout, description='An updated checkout')
class Meta:
description = 'Adds a checkout line to existing checkout'
@classmethod
def mutate(cls, root, info, checkout_id, lines, replace=False):
checkout = get_node(info, checkout_id, only_type=Checkout)
variants, quantities = None, None
errors = []
if lines:
variant_ids = [line.get('variant_id') for line in lines]
variants = cls.get_nodes_or_error(
ids=variant_ids, errors=errors, field='variant_id',
only_type=ProductVariant)
quantities = [line.get('quantity') for line in lines]
line_errors = check_lines_quantity(variants, quantities)
if line_errors:
for err in line_errors:
cls.add_error(errors, field=err[0], message=err[1])
if variants and quantities:
for variant, quantity in zip(variants, quantities):
add_variant_to_cart(checkout, variant, quantity, replace=replace)
return CheckoutLinesAdd(checkout=checkout, errors=errors)
class CheckoutLinesUpdate(CheckoutLinesAdd):
checkout = graphene.Field(Checkout, description='An updated checkout')
class Meta:
description = 'Updates checkout line in existing checkout'
@classmethod
def mutate(cls, root, info, checkout_id, lines):
return super().mutate(root, info, checkout_id, lines, replace=True)
class CheckoutLineDelete(BaseMutation):
class Arguments:
checkout_id = graphene.ID(description='Checkout ID', required=True)
line_id = graphene.ID(description='Checkout line ID')
checkout = graphene.Field(Checkout, description='An updated checkout')
class Meta:
description = 'Deletes a checkout line'
@classmethod
def mutate(cls, root, info, checkout_id, line_id):
errors = []
checkout = cls.get_node_or_error(
info, checkout_id, errors, 'checkout_id', only_type=Checkout)
line = cls.get_node_or_error(
info, line_id, errors, 'line_id', only_type=CheckoutLine)
line.delete()
return CheckoutLinesAdd(checkout=checkout, errors=errors)
class CheckoutCustomerAttach(BaseMutation):
class Arguments:
checkout_id = graphene.ID(description='Checkout ID', required=True)
customer_id = graphene.ID(description='Customer ID', required=True)
checkout = graphene.Field(Checkout, description='An updated checkout')
@classmethod
def mutate(cls, root, info, checkout_id, customer_id):
errors = []
checkout = cls.get_node_or_error(
info, checkout_id, errors, 'checkout_id', only_type=Checkout)
customer = cls.get_node_or_error(
info, customer_id, errors, 'customer_id', only_type=User)
if checkout and customer:
checkout.user = customer
checkout.save(update_fields=['user'])
return CheckoutCustomerAttach(checkout=checkout, errors=errors)
class CheckoutCustomerDetach(BaseMutation):
class Arguments:
checkout_id = graphene.ID(description='Checkout ID', required=True)
checkout = graphene.Field(Checkout, description='An updated checkout')
@classmethod
def mutate(cls, root, info, checkout_id):
checkout = get_node(info, checkout_id, only_type=Checkout)
if checkout:
checkout.user = None
checkout.save(update_fields=['user'])
return CheckoutCustomerDetach(checkout=checkout)
class CheckoutShippingAddressUpdate(BaseMutation):
class Arguments:
checkout_id = graphene.ID(description='Checkout ID')
shipping_address = AddressInput(description='Shipping address')
checkout = graphene.Field(Checkout, description='An updated checkout')
class Meta:
description = 'Update shipping address in existing Checkout object'
@classmethod
def mutate(cls, root, info, checkout_id, shipping_address):
errors = {}
checkout = cls.get_node_or_error(
info, checkout_id, errors, 'checkout_id', only_type=Checkout)
shipping_address = Address(**shipping_address)
if checkout and shipping_address:
with transaction.atomic():
shipping_address.save()
checkout.shipping_address = shipping_address
checkout.save(update_fields=['shipping_address'])
return CheckoutShippingAddressUpdate(checkout=checkout, errors=errors)
class CheckoutEmailUpdate(BaseMutation):
class Arguments:
checkout_id = graphene.ID(description='Checkout ID')
email = graphene.String(required=True, description='email')
checkout = graphene.Field(Checkout, description='An updated checkout')
class Meta:
description = 'Update email address in existing Checkout object'
@classmethod
def mutate(cls, root, info, checkout_id, email):
errors = []
checkout = cls.get_node_or_error(
info, checkout_id, errors, 'checkout_id', only_type=Checkout)
if checkout:
checkout.email = email
checkout.save(update_fields=['email'])
return CheckoutEmailUpdate(checkout=checkout, errors=errors)
|
Python
| 0
|
@@ -5267,24 +5267,78 @@
eckoutLine)%0A
+ if line and line in checkout.lines.all():%0A
line
|
bd905379bc2e2f6830ee61a094ca8e4cff22ac1c
|
Remove hardcoded pducb factor.
|
behaviors.py
|
behaviors.py
|
from datastructure import EnlargeableArray
from prediction import predict_on_volume
from qrsim.tcpclient import UAVControls
import numpy as np
from numpy.linalg import norm
import numpy.random as rnd
class VelocityTowardsWaypointController(object):
def __init__(self, maxv, max_climb):
self.maxv = maxv
self.max_climb = max_climb
def get_controls(self, noisy_states, targets):
assert len(noisy_states) == len(targets)
controls = UAVControls(len(noisy_states), 'vel')
for uav in xrange(len(noisy_states)):
phi = noisy_states[uav].phi
theta = noisy_states[uav].theta
psi = noisy_states[uav].psi
X = np.matrix(
[[1, 0, 0],
[0, np.cos(phi), -np.sin(phi)],
[0, np.sin(phi), np.cos(phi)]])
Y = np.matrix(
[[np.cos(theta), 0, np.sin(theta)],
[0, 1, 0],
[-np.sin(theta), 0, np.cos(theta)]])
Z = np.matrix(
[[np.cos(psi), -np.sin(psi), 0],
[np.sin(psi), np.cos(psi), 0],
[0, 0, 1]])
world_v = 0.25 * np.array(
[self.maxv, self.maxv, self.max_climb]) * \
(targets[uav] - noisy_states[uav].position)
if norm(world_v[:2]) > self.maxv:
world_v[:2] *= self.maxv / norm(world_v[:2])
world_v[2] = np.clip(world_v[2], -self.max_climb, self.max_climb)
controls.U[uav, :] = np.dot(Z * Y * X, world_v)
return controls
class RandomMovement(object):
def __init__(self, maxv, height):
self.maxv = maxv
self.height = height
def get_controls(self, noisy_states, plume_measurement):
controls = UAVControls(len(noisy_states), 'vel')
for uav in xrange(len(noisy_states)):
# random velocity direction scaled by the max allowed velocity
xy_vel = rnd.rand(2) - 0.5
if norm(xy_vel) != 0:
xy_vel /= norm(xy_vel)
controls.U[uav, :2] = 0.5 * self.maxv * xy_vel
# if the uav is going astray we point it back to the center
p = np.asarray(noisy_states[uav].position[:2])
if norm(p) > 100:
controls.U[uav, :2] = -0.8 * self.maxv * p / norm(p)
# control height
controls.U[uav, 2] = max(-self.maxv, min(
self.maxv,
0.25 * self.maxv * (self.height - noisy_states[uav].z)))
return controls
class ToMaxVariance(object):
def __init__(
self, margin, predictor, grid_resolution, area,
duration_in_steps=1000):
self.margin = margin
self.predictor = predictor
self.grid_resolution = grid_resolution
self.area = area
self.expected_steps = duration_in_steps
self.step = 0
self._controller = VelocityTowardsWaypointController(3, 3)
def get_controls(self, noisy_states, plume_measurement):
if self.step == 0:
self.positions = EnlargeableArray(
(len(noisy_states), 3), self.expected_steps)
self.plume_measurements = EnlargeableArray(
(len(noisy_states),), self.expected_steps)
self.positions.append([s.position for s in noisy_states])
self.plume_measurements.append(plume_measurement)
self.step += 1
if self.positions.data.size // 3 < 2:
b = RandomMovement(3, np.mean(self.get_effective_area()[2]))
return b.get_controls(noisy_states, plume_measurement)
# FIXME remove or do only for scikit learn
#predictor = sklearn.base.clone(self.predictor)
self.predictor.fit(
self.positions.data.reshape((-1, 3)),
self.plume_measurements.data.flatten())
unused, mse, (x, y, z) = predict_on_volume(
self.predictor, self.get_effective_area(), self.grid_resolution)
wp_idx = np.unravel_index(np.argmax(mse), x.shape)
targets = np.array(
len(noisy_states) * [[x[wp_idx], y[wp_idx], z[wp_idx]]])
return self._controller.get_controls(noisy_states, targets)
def get_effective_area(self):
return self.area + np.array([self.margin, -self.margin])
class PDUCB(object):
def __init__(
self, margin, predictor, grid_resolution, area, kappa, gamma,
target_precision, duration_in_steps=1000):
self.margin = margin
self.predictor = predictor
self.grid_resolution = grid_resolution
self.area = area
self.kappa = kappa
self.gamma = gamma
self.target_precision = target_precision
self.expected_steps = duration_in_steps
self.step = 0
self._controller = VelocityTowardsWaypointController(3, 3)
self.targets = None
def __repr__(self):
return self.__class__.__name__ + '(margin=%(margin)r, ' \
'predictor=%(predictor)r, grid_resolution=%(grid_resolution)r, ' \
'area=%(area)r, kappa=%(kappa)r, gamma=%(gamma)r, ' \
'target_precision=%(target_precision)r)' % self.__dict__
def get_controls(self, noisy_states, plume_measurement):
if self.step == 0:
self.positions = EnlargeableArray(
(len(noisy_states), 3), self.expected_steps)
self.plume_measurements = EnlargeableArray(
(len(noisy_states),), self.expected_steps)
self.positions.append([s.position for s in noisy_states])
self.plume_measurements.append(plume_measurement)
self.step += 1
if self.positions.data.size // 3 < 2:
self.targets = np.array([s.position for s in noisy_states])
controls = UAVControls(len(noisy_states), 'vel')
controls.U.fill(0.0)
return controls
if norm(self.targets - noisy_states[0].position) < \
self.target_precision:
# FIXME remove or do only for scikit learn
#predictor = sklearn.base.clone(self.predictor)
self.predictor.fit(
self.positions.data.reshape((-1, 3)),
self.plume_measurements.data.flatten())
pred, mse, (x, y, z) = predict_on_volume(
self.predictor, self.get_effective_area(),
self.grid_resolution)
dist = np.apply_along_axis(
norm, 1, np.column_stack((x.flat, y.flat, z.flat)) -
self.positions.data[-1]).reshape(x.shape)
ducb = 0.15e-12 * np.log(pred + 1e-30) + \
self.kappa * np.sqrt(mse) + self.gamma * dist ** 2
wp_idx = np.unravel_index(np.argmax(ducb), x.shape)
self.targets = np.array(
len(noisy_states) * [[x[wp_idx], y[wp_idx], z[wp_idx]]])
return self._controller.get_controls(noisy_states, self.targets)
def get_effective_area(self):
return self.area + np.array([self.margin, -self.margin])
|
Python
| 0.000021
|
@@ -6580,19 +6580,8 @@
cb =
- 0.15e-12 *
np.
|
1ea72ca96f0f43bd80baa9fb41ec930ea02de271
|
fix name error
|
sfa/rspecs/sfa_rspec_converter.py
|
sfa/rspecs/sfa_rspec_converter.py
|
#!/usr/bin/python
from lxml import etree
from StringIO import StringIO
from sfa.util.xrn import *
from sfa.rspecs.sfa_rspec import SfaRSpec
from sfa.rspecs.pg_rspec import PGRSpec
class SfaRSpecConverter:
@staticmethod
def to_pg_rspec(rspec):
if isinstance(rspec, SfaRSpec):
sfa_rspec = rspec
else:
sfa_rspec = SfaRSpec(rspec=rspec)
pg_rspec = PGRSpec()
# get networks
networks = sfa_rspec.get_networks()
for network in networks:
# get nodes
sfa_node_elements = sfa_rspec.get_node_elements(network=network)
for sfa_node_element in sfa_node_elements:
# create node element
node_attrs = {}
node_attrs['exclusive'] = 'false'
node_attrs['component_manager_id'] = network
if sfa_node_element.find('hostname') != None:
node_attrs['component_name'] = sfa_node_element.find('hostname').text
if sfa_node_element.find('urn') != None:
node_attrs['component_id'] = sfa_node_element.find('urn').text
node_element = pg_rspec.add_element('node', node_attrs)
# create node_type element
for hw_type in ['plab-pc', 'pc']:
hdware_type_element = pg_rspec.add_element('hardware_type', {'name': hw_type}, parent=node_element)
# create available element
pg_rspec.add_element('available', {'now': 'true'}, parent=node_element)
# create locaiton element
# We don't actually associate nodes with a country.
# Set country to "unknown" until we figure out how to make
# sure this value is always accurate.
location = sfa_node_element.find('location')
if location != None:
location_attrs = {}
location_attrs['country'] = locatiton.get('country', 'unknown')
location_attrs['latitude'] = location.get('latitiue', 'None')
location_attrs['longitude'] = location.get('longitude', 'None')
pg_rspec.add_element('location', location_attrs, parent=node_element)
sliver_element = sfa_node_element.find('sliver')
if sliver_element != None:
pg_rspec.add_element('sliver_type', {'name': 'planetlab-vnode'}, parent=node_element)
return pg_rspec.toxml()
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
print SfaRSpecConverter.to_pg_rspec(sys.argv[1])
|
Python
| 0.000024
|
@@ -2005,16 +2005,16 @@
'%5D =
+
locati
-t
on.g
|
c0a075810e3d92295ade789c24d141c1dbba60c4
|
Add support for secret driver in create_secret
|
docker/api/secret.py
|
docker/api/secret.py
|
import base64
import six
from .. import utils
class SecretApiMixin(object):
@utils.minimum_version('1.25')
def create_secret(self, name, data, labels=None):
"""
Create a secret
Args:
name (string): Name of the secret
data (bytes): Secret data to be stored
labels (dict): A mapping of labels to assign to the secret
Returns (dict): ID of the newly created secret
"""
if not isinstance(data, bytes):
data = data.encode('utf-8')
data = base64.b64encode(data)
if six.PY3:
data = data.decode('ascii')
body = {
'Data': data,
'Name': name,
'Labels': labels
}
url = self._url('/secrets/create')
return self._result(
self._post_json(url, data=body), True
)
@utils.minimum_version('1.25')
@utils.check_resource('id')
def inspect_secret(self, id):
"""
Retrieve secret metadata
Args:
id (string): Full ID of the secret to remove
Returns (dict): A dictionary of metadata
Raises:
:py:class:`docker.errors.NotFound`
if no secret with that ID exists
"""
url = self._url('/secrets/{0}', id)
return self._result(self._get(url), True)
@utils.minimum_version('1.25')
@utils.check_resource('id')
def remove_secret(self, id):
"""
Remove a secret
Args:
id (string): Full ID of the secret to remove
Returns (boolean): True if successful
Raises:
:py:class:`docker.errors.NotFound`
if no secret with that ID exists
"""
url = self._url('/secrets/{0}', id)
res = self._delete(url)
self._raise_for_status(res)
return True
@utils.minimum_version('1.25')
def secrets(self, filters=None):
"""
List secrets
Args:
filters (dict): A map of filters to process on the secrets
list. Available filters: ``names``
Returns (list): A list of secrets
"""
url = self._url('/secrets')
params = {}
if filters:
params['filters'] = utils.convert_filters(filters)
return self._result(self._get(url, params=params), True)
|
Python
| 0
|
@@ -20,16 +20,38 @@
rt six%0A%0A
+from .. import errors%0A
from ..
@@ -63,16 +63,16 @@
t utils%0A
-
%0A%0Aclass
@@ -177,24 +177,37 @@
labels=None
+, driver=None
):%0A %22
@@ -431,24 +431,175 @@
o the secret
+%0A driver (DriverConfig): A custom driver configuration. If%0A unspecified, the default %60%60internal%60%60 driver will be used
%0A%0A
@@ -936,16 +936,16 @@
labels%0A
-
@@ -947,16 +947,281 @@
%7D%0A%0A
+ if driver is not None:%0A if utils.version_lt(self._version, '1.31'):%0A raise errors.InvalidVersion(%0A 'Secret driver is only available for API version %3E 1.31'%0A )%0A%0A body%5B'Driver'%5D = driver%0A%0A
|
3ee08a354df1f6e008d1d2de6552a0befa948365
|
Call _handle_query_error correctly
|
designate/mdns/handler.py
|
designate/mdns/handler.py
|
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Author: Kiall Mac Innes <kiall@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import dns
from oslo.config import cfg
from designate import exceptions
from designate import storage
from designate.openstack.common import log as logging
from designate.i18n import _LE
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class RequestHandler(object):
def __init__(self):
# Get a storage connection
storage_driver = cfg.CONF['service:mdns'].storage_driver
self.storage = storage.get_storage(storage_driver)
def __call__(self, request):
"""
:param request: DNS Request Message
:return: DNS Response Message
"""
context = request.environ['context']
if request.opcode() == dns.opcode.QUERY:
# Currently we expect exactly 1 question in the section
# TSIG places the pseudo records into the additional section.
if (len(request.question) != 1 or
request.question[0].rdclass != dns.rdataclass.IN):
return self._handle_query_error(request, dns.rcode.REFUSED)
q_rrset = request.question[0]
if q_rrset.rdtype == dns.rdatatype.AXFR:
response = self._handle_axfr(context, request)
else:
response = self._handle_record_query(context, request)
else:
# Unhandled OpCode's include STATUS, IQUERY, NOTIFY, UPDATE
response = self._handle_query_error(
context, request, dns.rcode.REFUSED)
return response
def _handle_query_error(self, context, request, rcode):
"""
Construct an error response with the rcode passed in.
:param request: The decoded request from the wire.
:param rcode: The response code to send back
:return: A dns response message with the response code set to rcode
"""
response = dns.message.make_response(request)
response.set_rcode(rcode)
return response
def _convert_to_rrset(self, context, recordset, domain=None):
# Fetch the domain or the config ttl if the recordset ttl is null
if recordset.ttl:
ttl = recordset.ttl
elif domain is not None:
ttl = domain.ttl
else:
domain = self.storage.get_domain(context, recordset.domain_id)
if domain.ttl:
ttl = domain.ttl
else:
ttl = CONF.default_ttl
# construct rdata from all the records
rdata = []
for record in recordset.records:
# dnspython expects data to be a string. convert record
# data from unicode to string
# For MX and SRV records add a priority field.
if recordset.type == "MX" or recordset.type == "SRV":
rdata.append(str.format(
"%d %s" % (record.priority, str(record.data))))
else:
rdata.append(str(record.data))
# Now put the records into dnspython's RRsets
# answer section has 1 RR set. If the RR set has multiple
# records, DNSpython puts each record in a separate answer
# section.
# RRSet has name, ttl, class, type and rdata
# The rdata has one or more records
r_rrset = dns.rrset.from_text_list(
recordset.name, ttl, dns.rdataclass.IN, recordset.type, rdata)
return r_rrset
def _handle_axfr(self, context, request):
response = dns.message.make_response(request)
q_rrset = request.question[0]
# First check if there is an existing zone
# TODO(vinod) once validation is separated from the api,
# validate the parameters
criterion = {'name': q_rrset.name.to_text()}
try:
domain = self.storage.find_domain(context, criterion)
except exceptions.DomainNotFound:
LOG.exception(_LE("got exception while handling axfr request. "
"Question is %(qr)s") % {'qr': q_rrset})
return self._handle_query_error(request, dns.rcode.REFUSED)
r_rrsets = []
# The AXFR response needs to have a SOA at the beginning and end.
criterion = {'domain_id': domain.id, 'type': 'SOA'}
soa_recordsets = self.storage.find_recordsets(context, criterion)
for recordset in soa_recordsets:
r_rrsets.append(self._convert_to_rrset(context, recordset, domain))
# Get all the recordsets other than SOA
criterion = {'domain_id': domain.id, 'type': '!SOA'}
recordsets = self.storage.find_recordsets(context, criterion)
for recordset in recordsets:
r_rrsets.append(self._convert_to_rrset(context, recordset, domain))
# Append the SOA recordset at the end
for recordset in soa_recordsets:
r_rrsets.append(self._convert_to_rrset(context, recordset, domain))
response.set_rcode(dns.rcode.NOERROR)
# TODO(vinod) check if we dnspython has an upper limit on the number
# of rrsets.
response.answer = r_rrsets
# For all the data stored in designate mdns is Authoritative
response.flags |= dns.flags.AA
return response
def _handle_record_query(self, context, request):
"""Handle a DNS QUERY request for a record"""
response = dns.message.make_response(request)
try:
q_rrset = request.question[0]
# TODO(vinod) once validation is separated from the api,
# validate the parameters
criterion = {
'name': q_rrset.name.to_text(),
'type': dns.rdatatype.to_text(q_rrset.rdtype)
}
recordset = self.storage.find_recordset(context, criterion)
r_rrset = self._convert_to_rrset(context, recordset)
response.set_rcode(dns.rcode.NOERROR)
response.answer = [r_rrset]
# For all the data stored in designate mdns is Authoritative
response.flags |= dns.flags.AA
except exceptions.NotFound:
# If an FQDN exists, like www.rackspace.com, but the specific
# record type doesn't exist, like type SPF, then the return code
# would be NOERROR and the SOA record is returned. This tells
# caching nameservers that the FQDN does exist, so don't negatively
# cache it, but the specific record doesn't exist.
#
# If an FQDN doesn't exist with any record type, that is NXDOMAIN.
# However, an authoritative nameserver shouldn't return NXDOMAIN
# for a zone it isn't authoritative for. It would be more
# appropriate for it to return REFUSED. It should still return
# NXDOMAIN if it is authoritative for a domain but the FQDN doesn't
# exist, like abcdef.rackspace.com. Of course, a wildcard within a
# domain would mean that NXDOMAIN isn't ever returned for a domain.
#
# To simply things currently this returns a REFUSED in all cases.
# If zone transfers needs different errors, we could revisit this.
response.set_rcode(dns.rcode.REFUSED)
return response
|
Python
| 0.000126
|
@@ -2053,34 +2053,8 @@
ror(
-%0A context,
requ
@@ -2132,33 +2132,24 @@
_error(self,
- context,
request, rc
@@ -2339,16 +2339,17 @@
end back
+.
%0A
|
346589edf9e331a87ff371e3bf52d7759ec5100a
|
Add the fix for DNS resolver bug in pycurl to allow hostname use
|
benchmark.py
|
benchmark.py
|
#/usr/bin/env/python
import timeit
import time
import string
import argparse
# Import clients, so script fails fast if not available
from pycurl import Curl
try:
from cStringIO import StringIO
except:
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import requests, urllib, urllib2, urllib3
def run_test(library, url, cycles, connection_reuse, options, setup_test, run_test, delay=None):
""" Runs a benchmark, showing start & stop
the setup_test is a String.template with $url as an option
the run_test allows for the same
"""
print("START testing {0} performance with {1} cycles and connection reuse {2}".format(library, cycles, connection_reuse))
print("Options: {0}".format(options))
run_cmd = string.Template(run_test).substitute(url=url)
if delay:
run_cmd = run_cmd + "; time.sleep({0})".format(delay)
setup_cmd = string.Template(setup_test).substitute(url=url)
mytime = timeit.timeit(stmt=run_cmd, setup=setup_cmd, number=cycles)
if delay:
mytime = mytime - (delay * cycles)
print("END testing result: {0}".format(mytime))
print(' ')
result = (library, connection_reuse, options, cycles, mytime)
return result
def run_all_benchmarks(url='', cycles=10, delay=None, **kwargs):
results = list()
headers = ('Library','Reuse Connections?','Options', 'Time')
tests = list()
# Library, cnxn_reuse, options, setup, run_stmt
# Requests
tests.append(('requests', False, '',
'import requests',
"r = requests.get('$url')"))
tests.append(('requests', True, '',
"import requests; \
session = requests.Session(); \
r = requests.Request('GET', '$url').prepare()",
"v = session.send(r)"))
# PyCurl
tests.append(('pycurl', True, "Reuse handle, don't save body",
"from pycurl import Curl; \
mycurl=Curl(); \
mycurl.setopt(mycurl.URL, '$url'); \
mycurl.setopt(mycurl.WRITEFUNCTION, lambda x: None)",
"mycurl.perform()"))
tests.append(('pycurl', True, "Reuse handle, save response to new cStringIO buffer",
"from pycurl import Curl; from cStringIO import StringIO; \
mycurl=Curl(); \
mycurl.setopt(mycurl.URL, '$url')",
"body = StringIO(); \
mycurl.setopt(mycurl.WRITEFUNCTION, body.write); \
mycurl.perform(); \
val = body.getvalue(); \
body.close()"))
tests.append(('pycurl', False, "Reuse handle, save response to new cStringIO buffer",
"from pycurl import Curl; from cStringIO import StringIO; \
mycurl=Curl(); \
mycurl.setopt(mycurl.URL, '$url'); \
body = StringIO(); \
mycurl.setopt(mycurl.FORBID_REUSE, 1)",
"body = StringIO(); \
mycurl.setopt(mycurl.WRITEFUNCTION, body.write); \
mycurl.perform(); \
val = body.getvalue(); \
body.close()"))
tests.append(('pycurl', False, "New handle, save response to new cStringIO buffer",
"from pycurl import Curl; from cStringIO import StringIO",
"body = StringIO(); \
mycurl=Curl(); \
body = StringIO(); \
mycurl.setopt(mycurl.URL, '$url'); \
mycurl.setopt(mycurl.WRITEFUNCTION, body.write); \
mycurl.perform(); \
val = body.getvalue(); \
body.close()"))
# URLLIB3
tests.append(('urllib3', True, '',
"import urllib3; http_pool = urllib3.PoolManager()",
"body = http_pool.urlopen('GET', '$url').read()"))
# URLLIB2
tests.append(('urllib2', False, '',
"import urllib2",
"body = urllib2.urlopen('$url').read()"))
# URLLIB
tests.append(('urllib', False, '',
"import urllib",
"body = urllib.urlopen('$url').read()"))
for test in tests:
my_result = run_test(test[0], url, cycles, test[1], test[2], test[3], test[4], delay=delay)
results.append((test[0], test[1], test[2], my_result))
if(__name__ == '__main__'):
parser = argparse.ArgumentParser(description="Benchmark different python request frameworks")
parser.add_argument('--url', metavar='u', type=str, default='http://localhost:5000/ping', help="URL to run requests against")
parser.add_argument('--cycles', metavar='c', type=int, default=10000, help="Number of cycles to run")
parser.add_argument('--delay', metavar='d', type=float, help="Delay in seconds between requests")
parser.add_argument('--output-file', metavar='o', type=str, help="NOT YET SUPPORTED: output file to write CSV results to")
args = vars(parser.parse_args())
if args.get('url') is None:
print("No URL supplied, you must supply a URL!")
exit(1)
print args
run_all_benchmarks(**args)
|
Python
| 0
|
@@ -3043,32 +3043,155 @@
ody.close()%22))%0A%0A
+ # The use of global DNS cache avoids a bug on some linux systems with libcurl %0A # playing badly with DNS resolvers%0A
tests.append
@@ -3463,32 +3463,96 @@
URL, '$url'); %5C%0A
+ mycurl.setopt(mycurl.DNS_USE_GLOBAL_CACHE, True); %5C%0A
mycu
|
dbf3af1de0bbbda178e5bbd1ca0473a83d8cb9b3
|
test triggering travis
|
fabre_test.py
|
fabre_test.py
|
#!/usr/bin/env python
# coding=UTF-8
import sys
sys.exit(0)
|
Python
| 0.000001
|
@@ -40,16 +40,30 @@
port sys
+%0Aimport pytest
%0A%0Asys.ex
|
4cff5b7a14dfda786fef4a869e72095b7d9d83e4
|
correct relative import, d'oh
|
pyjac/performance_tester/__main__.py
|
pyjac/performance_tester/__main__.py
|
import sys
import os
import .performance_tester as pt
from argparse import ArgumentParser
def main(args=None):
if args is None:
# command line arguments
parser = ArgumentParser(description='performance_tester.py: '
'tests pyJac performance'
)
parser.add_argument('-w', '--working_directory',
type=str,
default='performance',
help='Directory storing the mechanisms / data.'
)
parser.add_argument('-uoo', '--use_old_opt',
action='store_true',
default=False,
required=False,
help='If True, allows performance_tester to use '
'any old optimization files found'
)
parser.add_argument('-nt', '--num_omp_threads',
type=int,
default=12,
required=False,
help='Number of threads to use for OpenMP '
'parallelization of the C codes.'
)
args = parser.parse_args()
pt.performance_tester(os.path.dirname(os.path.abspath(pt.__file__)),
args.working_directory,
args.use_old_opt, args.num_omp_threads
)
if __name__ == '__main__':
sys.exit(main())
|
Python
| 0.000035
|
@@ -19,16 +19,22 @@
os%0A%0A
+from .
import
-.
perf
|
237e11dcf7d92a54c8deea6703f32817c49f4fa5
|
Add timeout to show name query
|
silence_notifier/communication.py
|
silence_notifier/communication.py
|
import logging
import random
import requests
from slackclient import SlackClient
class Communicator:
"""Class handling communication to Slack, and with Radio REST API."""
channel_mention = "<!channel>"
current_shows_uri = "/v2/sendinger/currentshows"
def __init__(self, slack_client: SlackClient, settings):
self.slack_client = slack_client
self.settings = settings
self.first_message_ts = None
self.username = None
self.userid = None
def send(self, message_type, num_minutes=None, reply_to=None, **kwargs):
"""Send the given message type to Slack.
Args:
message_type: The key of self.settings.messages to use to look up
message texts, from which one is picked at random.
num_minutes: If provided, the value of
self.settings.messages[message_type] is assumed to be a dict.
All items in that dict which have a key less than or equal to
num_minutes are chosen, or all messages from the highest key
which matches that criteria if
settings.messages['warnings_cumulative'] is False.
reply_to: If provided, the message sent will be a reply to the
given message. This should be the dictionary received from the
Slack message event.
kwargs: The text string will have {key} replaced with value for
each key=value given to this method.
"""
possible_unformatted_message = self.settings.messages[message_type]
if num_minutes is not None:
matching_messages = []
max_from_minute = 0
for from_minute, messages in possible_unformatted_message.items():
if from_minute <= num_minutes:
if self.settings.messages['warnings_cumulative']:
matching_messages.extend(messages)
elif from_minute >= max_from_minute:
matching_messages = messages
max_from_minute = from_minute
possible_unformatted_message = matching_messages
unformatted_message = random.choice(possible_unformatted_message)
formatted_message = unformatted_message.format(
channel=self.channel_mention,
**kwargs
)
self.send_custom(formatted_message, reply_to)
def send_custom(self, message, reply_to=None):
"""Send the given message to Slack.
Args:
message: The text to post on Slack.
reply_to: If provided, the message will be sent as a reply to the
message whose information is provided in this argument. Its
format is equal to the dictionary received in message events
from Slack.
"""
other_message_args = {
'channel': self.settings.channel,
}
if reply_to:
if 'thread_ts' in reply_to:
thread_ts = reply_to['thread_ts']
else:
thread_ts = reply_to['ts']
other_message_args['thread_ts'] = thread_ts
other_message_args['channel'] = reply_to['channel']
data = self.slack_client.api_call(
"chat.postMessage",
text=message,
as_user=True,
**other_message_args
)
assert data['ok'], data
if not self.first_message_ts:
self.first_message_ts = data['ts']
def thumb_up_msg(self, received_message):
"""React with :+1: to the given message.
Args:
received_message: The dictionary received from a Slack message event
"""
data = self.slack_client.api_call(
"reactions.add",
name="+1",
timestamp=received_message["ts"],
channel=received_message["channel"]
)
assert data['ok'], data
def get_userid(self):
"""Get the userid of the logged in bot."""
if not self.userid:
self.populate_identity()
return self.userid
def get_username(self):
"""Get the username of the logged in bot."""
if not self.username:
self.populate_identity()
return self.username
def populate_identity(self):
"""Populate username and userid of the logged in bot."""
data = self.slack_client.api_call(
"auth.test"
)
assert data['ok'], data
self.userid = data['user_id']
self.username = data['user']
def get_first_message_ts(self):
"""Get the ts of the first message sent by us in this session."""
return self.first_message_ts
def get_current_show(self):
"""Get the name of the show currently active on radio."""
r = requests.get(self.settings.rr_api + self.current_shows_uri)
try:
r.raise_for_status()
except Exception as e:
logging.exception("Error occurred while retrieving current show")
return "Ukjent (ikke kontakt med pappagorg, eller APIet er nede)"
data = r.json()
try:
return data["current"]["title"]
except KeyError:
logging.error("No show found when trying to obtain current show")
return "Ukjent (ingen sending i autoavvikler)"
|
Python
| 0.000003
|
@@ -4832,24 +4832,41 @@
n radio.%22%22%22%0A
+ try:%0A
r =
@@ -4923,30 +4923,31 @@
hows_uri
-)%0A try:
+, timeout=10.0)
%0A
|
61fed70623ec31695c616588477605aa40c9683c
|
Remove extra newline
|
examples/scripts/get-profile-connections.py
|
examples/scripts/get-profile-connections.py
|
#!/usr/bin/env python
###
# (C) Copyright (2012-2016) Hewlett Packard Enterprise Development LP
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
###
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import range
from future import standard_library
standard_library.install_aliases()
import sys
PYTHON_VERSION = sys.version_info[:3]
PY2 = (PYTHON_VERSION[0] == 2)
if PY2:
if PYTHON_VERSION < (2, 7, 9):
raise Exception('Must use Python 2.7.9 or later')
elif PYTHON_VERSION < (3, 4):
raise Exception('Must use Python 3.4 or later')
import hpOneView as hpov
from pprint import pprint
def acceptEULA(con):
# See if we need to accept the EULA before we try to log in
con.get_eula_status()
try:
if con.get_eula_status() is True:
print('EULA display needed')
con.set_eula('no')
except Exception as e:
print('EXCEPTION:')
print(e)
def login(con, credential):
# Login with given credentials
try:
con.login(credential)
except:
print('Login failed')
def output(con, name, profile, report):
if report:
print('\n{0:25} {1:3} {2:9} {3:12} {4:25} {5:17} {6:23} {7:8}'.format('serverProfile', 'cid', 'portId', 'functionType', 'Network', 'mac', 'wwpn', 'boot'))
print('\n{0:25} {1:3} {2:9} {3:12} {4:25} {5:17} {6:23} {7:8}'.format('-------------', '---', '------', '------------', '-------', '---', '----', '----'))
for conn in profile:
network = con.get_by_uri(conn['networkUri'])
wwpn = conn['wwpn'] if conn['wwpn'] else ''
boot = conn['boot']['priority'] if conn['boot']['priority'] else '' # boot priority can be null
print('{0:25} {1:3} {2:9} {3:12} {4:25} {5:17} {6:23} {7:8}'.format(name, conn['id'], conn['portId'], conn['functionType'], network['name'], conn['mac'], wwpn, boot))
else:
for conn in profile:
print(('\nGetting Profile Connections List for Profile %s' % name))
pprint(conn)
'''
Get a server profile's connection information. If no profile name is supplied
connection information will be returned for all server profiles.
'''
def get_profile_connections_list(con, srv, name, report):
for profile in srv.get_server_profiles():
if profile['connections']:
if name:
if profile['name'] == name:
output(con, profile['name'], profile['connections'], report)
else:
output(con, profile['name'], profile['connections'], report)
def main():
parser = argparse.ArgumentParser(add_help=True,
formatter_class=argparse.RawTextHelpFormatter,
description='''
Display Server Profile Connections
Usage: ''')
parser.add_argument('-a', dest='host', required=True,
help='''
HP OneView Appliance hostname or IP address''')
parser.add_argument('-u', dest='user', required=False,
default='Administrator',
help='''
HP OneView Username''')
parser.add_argument('-p', dest='passwd', required=True,
help='''
HP OneView Password''')
parser.add_argument('-c', dest='cert', required=False,
help='''
Trusted SSL Certificate Bundle in PEM (Base64 Encoded DER) Format''')
parser.add_argument('-y', dest='proxy', required=False,
help='''
Proxy (host:port format''')
parser.add_argument('-r', dest='report',
required=False, action='store_true',
help='''
Format the output using a human readable report format''')
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-n', dest='name',
help='''
Name of the server profile to get''')
group.add_argument('-g', dest='get_all',
action='store_true',
help='''
Get ALL server profiles and exit''')
args = parser.parse_args()
credential = {'userName': args.user, 'password': args.passwd}
con = hpov.connection(args.host)
srv = hpov.servers(con)
net = hpov.networking(con)
sts = hpov.settings(con)
if args.proxy:
con.set_proxy(args.proxy.split(':')[0], args.proxy.split(':')[1])
if args.cert:
con.set_trusted_ssl_bundle(args.cert)
login(con, credential)
acceptEULA(con)
# get a server profile's connection information
get_profile_connections_list(con, srv, '', args.report)
if __name__ == '__main__':
import sys
import argparse
sys.exit(main())
# vim:set shiftwidth=4 tabstop=4 expandtab textwidth=79:
|
Python
| 0.000002
|
@@ -2408,34 +2408,32 @@
%0A print('
-%5Cn
%7B0:25%7D %7B1:3%7D
|
959f99f6cce0b627fd803bc280fa20eb0fbeb373
|
Remove test code (#189)
|
terminalone/reports.py
|
terminalone/reports.py
|
# -*- coding: utf-8 -*-
"""Provides reporting data."""
from __future__ import absolute_import, division
import csv
from .config import SERVICE_BASE_PATHS
from .connection import Connection
from .errors import ClientError, T1Error
from .utils import compose
from .vendor import six
from .vendor.six.moves.urllib.parse import unquote, urlencode
from .xmlparser import ParseError, XMLParser
if six.PY3:
decode = True
else:
decode = False
class Report(Connection):
"""Object for pulling reports"""
_fields = {
'dimensions': ','.join,
'end_date': None,
'filter': compose(unquote, urlencode),
'having': compose(unquote, urlencode),
'metrics': ','.join,
'order': ','.join,
'start_date': None,
'time_window': None,
'time_rollup': None,
}
def __init__(self, session, report=None, properties=None, version=None, **kwargs):
super(Report, self).__init__(_create_session=False, **kwargs)
self.session = session
self.parameters = {}
self.version = version
if report is not None:
self.report = report
if properties is not None:
self.set(properties)
elif kwargs:
self.set(kwargs)
def __getattr__(self, attr):
if attr in self.parameters:
return self.parameters[attr]
else:
raise AttributeError(attr)
def __setattr__(self, key, value):
if key in self._fields:
self.parameters[key] = value
else:
super(Report, self).__setattr__(key, value)
def report_uri(self, report):
"""Get report URI stub from metadata info.
e.g. report_uri('https://api.../reporting/v1/std/performance') ->
'performance'
"""
metadata = self.metadata
if not hasattr(self, 'report'):
if report not in metadata['reports']:
raise ClientError('Invalid report')
return metadata['reports'][report]['URI_Data'].rsplit('/', 1)[-1]
else:
return metadata['URI_Data']
def set(self, data):
"""Set properties for report from given dict of properties.
Essentially a merge.
"""
for field, value in six.iteritems(data):
setattr(self, field, value)
def _get(self, path, params=None):
"""Base method customized for the mix of JSON and XML
:param path: str path to hit. Should not start with slash
:param params: dict query string params
"""
if self.version == 'beta':
print("test")
url = '/'.join(['https:/', self.api_base, SERVICE_BASE_PATHS['reports-beta'], path])
else:
url = '/'.join(['https:/', self.api_base, SERVICE_BASE_PATHS['reports'], path])
response = self.session.get(url, params=params, stream=True)
if not response.ok:
try:
result = XMLParser(response.content)
except ParseError as exc:
self.response = response
raise ClientError('Could not parse XML response: {!r}'.format(exc))
raise T1Error(result, None)
return response
@property
def metadata(self):
"""Fetch report metadata.
If no report is given, fetches metadata for all reports.
Caches for future use.
"""
if hasattr(self, '_metadata'):
return self._metadata
if hasattr(self, 'report'):
path = self.report + '/meta'
else:
path = 'meta'
res = self._get(path)
self._metadata = res.json()
return self._metadata
def get(self, as_dict=False):
"""Get report data. Returns tuple (headers, csv.Reader).
If as_dict == True, return (headers, csv.DictReader).
"""
if not hasattr(self, 'report'):
raise ClientError("Can't run get without report!")
params = {}
for key, value in six.iteritems(self.parameters):
if self._fields[key]:
params[key] = self._fields[key](value)
else:
params[key] = value
iter_ = self._get(self.report,
params=params).iter_lines(decode_unicode=decode)
if as_dict:
reader = csv.DictReader(iter_)
headers = reader.fieldnames
else:
reader = csv.reader(iter_)
headers = next(reader)
return headers, reader
|
Python
| 0
|
@@ -2601,34 +2601,8 @@
a':%0A
- print(%22test%22)%0A
|
126305c8bf5b659568a1cb40d731b7df0a75c553
|
Remove --mp-mode option from runtest.py
|
runtest.py
|
runtest.py
|
#!/usr/bin/env python3
import unittest
import sys
from argparse import ArgumentParser
import logging
import threading
from tests.util import GLOBAL
# **********
# Grab Tests
# **********
# FIXME:
# * Test redirect and response.url after redirect
GRAB_TEST_LIST = (
# *** Internal API
"tests.grab_api",
"tests.grab_transport",
"tests.response_class",
"tests.grab_debug", # FIXME: fix tests excluded for urllib3
# *** Response processing
"tests.grab_xml_processing",
"tests.grab_response_body_processing",
"tests.grab_charset",
"tests.grab_redirect",
"tests.grab_defusedxml",
"tests.grab_document",
# *** Network
"tests.grab_get_request",
"tests.grab_post_request",
"tests.grab_request", # FIXME: fix tests excluded for urllib3
"tests.grab_user_agent",
"tests.grab_cookies", # FIXME: fix tests excluded for urllib3
"tests.grab_url_processing",
"tests.grab_timeout",
# *** Refactor
"tests.grab_proxy",
"tests.grab_upload_file",
"tests.grab_limit_option",
"tests.grab_charset_issue",
"tests.grab_pickle", # FIXME: fix tests excluded for urllib3
"tests.proxy",
# *** Extensions
"tests.ext_text",
"tests.ext_rex",
"tests.ext_lxml",
"tests.ext_form",
"tests.ext_doc",
# *** Pycurl Test
"tests.pycurl_cookie",
# *** util.module
"tests.util_module",
"tests.util_log",
# *** grab.export
"tests.util_config",
"tests.script_crawl",
"tests.grab_error",
"tests.grab_deprecated",
"tests.ext_pyquery",
# *** process control
"tests.grab_sigint",
"tests.spider_sigint",
# *** Other things
"tests.raw_server",
"tests.misc",
"tests.lib_weblib",
)
# ************
# Spider Tests
# ************
SPIDER_TEST_LIST = (
"tests.spider_task",
"tests.spider",
"tests.spider_proxy",
"tests.spider_queue",
"tests.spider_misc",
"tests.spider_meta",
"tests.spider_error",
#'tests.spider_data',
"tests.spider_stat",
"tests.spider_multiprocess",
)
def setup_logging():
logging.basicConfig(level=logging.DEBUG)
for name, level in (
("grab.network", logging.INFO),
("tornado.access", logging.ERROR),
("tests.util", logging.INFO),
("grab.util", logging.INFO),
("grab.base", logging.INFO),
("grab.spider.base", logging.INFO),
("grab.spider.parser_pipeline", logging.INFO),
("grab.stat", logging.INFO),
):
logger = logging.getLogger(name)
logger.setLevel(level)
def main():
setup_logging()
parser = ArgumentParser()
parser.add_argument("-t", "--test", help="Run only specified tests")
parser.add_argument("--grab-transport", default="pycurl")
parser.add_argument("--network-service", default="threaded")
parser.add_argument(
"--test-grab",
action="store_true",
default=False,
help="Run tests for Grab::Spider",
)
parser.add_argument(
"--test-spider", action="store_true", default=False, help="Run tests for Grab"
)
parser.add_argument(
"--test-all",
action="store_true",
default=False,
help="Run tests for both Grab and Grab::Spider",
)
parser.add_argument(
"--backend-mongodb",
action="store_true",
default=False,
help="Run extra tests that depends on mongodb",
)
parser.add_argument(
"--backend-redis",
action="store_true",
default=False,
help="Run extra tests that depends on redis",
)
parser.add_argument(
"--mp-mode",
action="store_true",
default=False,
help="Enable multiprocess mode in spider tests",
)
parser.add_argument(
"-v",
"--verbose",
action="store_true",
default=False,
help="Enable verbose logging",
)
opts = parser.parse_args()
GLOBAL["grab_transport"] = opts.grab_transport
GLOBAL["network_service"] = opts.network_service
if opts.backend_mongodb:
GLOBAL["backends"].append("mongodb")
if opts.backend_redis:
GLOBAL["backends"].append("redis")
test_list = []
if opts.test_all:
test_list += GRAB_TEST_LIST
test_list += SPIDER_TEST_LIST
if opts.test_grab:
test_list += GRAB_TEST_LIST
if opts.test_spider:
test_list += SPIDER_TEST_LIST
if opts.test:
test_list += [opts.test]
if opts.verbose:
from grab.spider.base import logger_verbose
logger_verbose.setLevel(logging.DEBUG)
GLOBAL["mp_mode"] = opts.mp_mode
# Check tests integrity
# Ensure that all test modules are imported correctly
for path in test_list:
__import__(path, None, None, ["foo"])
loader = unittest.TestLoader()
suite = unittest.TestSuite()
for path in test_list:
mod_suite = loader.loadTestsFromName(path)
for some_suite in mod_suite:
for test in some_suite:
if not hasattr(test, "backend") or test.backend in GLOBAL["backends"]:
suite.addTest(test)
runner = unittest.TextTestRunner()
result = runner.run(suite)
th_list = list(threading.enumerate())
print("Active threads (%d):" % len(th_list))
for th in th_list:
print("Thread: %s (isAlive:%s)" % (th, th.isAlive()))
if result.wasSuccessful():
sys.exit(0)
else:
sys.exit(1)
if __name__ == "__main__":
main()
|
Python
| 0.000001
|
@@ -3615,169 +3615,8 @@
nt(%0A
- %22--mp-mode%22,%0A action=%22store_true%22,%0A default=False,%0A help=%22Enable multiprocess mode in spider tests%22,%0A )%0A parser.add_argument(%0A
@@ -4445,46 +4445,8 @@
G)%0A%0A
- GLOBAL%5B%22mp_mode%22%5D = opts.mp_mode%0A%0A
|
7596de67f67f5bdc9350067a896dcd4b7b4c7650
|
Stop requiring the path of the users file; only require the name.
|
gobbldygook.py
|
gobbldygook.py
|
#!/usr/bin/env python3
import argparse, csv, os
from course import Course, all_courses, all_labs, getCourse
from student import Student
def argument_parse():
parser = argparse.ArgumentParser(description="This program works best if you give it some data. However, we have some example stuff to show you anyway.)")
parser.add_argument('-l', "--load", default='users/example.yaml')
parser.add_argument('-f', "--find")
parser.add_argument("--demo")
parser.add_argument("--stress")
parser.add_argument("--debug")
return parser
def parse_filename(fname):
filename = fname.name
filename = filename.split('.')[0] # Remove the extension
filename = filename.split('/')[1] # Remove the path seperator
start_year, end_year, semester = filename.split(sep='-')
if semester == 's1':
semester = "fall"
elif semester == 's2':
semester = "spring"
elif semester == 'ss1':
semester = "summer session 1"
elif semester == 'ss2':
semester = "summer session 2"
return int(filename[0:4]), semester
def load_data(filename):
with open(filename) as infile:
year, semester = parse_filename(infile)
if year not in all_courses:
all_courses[year] = {}
if semester not in all_courses[year]:
all_courses[year][semester] = {}
infile.readline() # Remove the csv header line
csvfile = csv.reader(infile)
for row in csvfile:
tmp = Course(data=row)
if tmp.course_status == 'X':
pass
elif tmp.course_type == "Lab":
all_labs[tmp.id] = tmp
else:
all_courses[tmp.id] = tmp
all_courses[year][tmp.id] = tmp
all_courses[year][semester][tmp.id] = tmp
def read_data():
path = 'data/'
for filename in os.listdir(path):
if filename[0] is not '.':
load_data(path + filename)
def main():
parser = argument_parse()
args = parser.parse_args()
read_data()
user = Student(filename=args.load)
print(user)
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -360,26 +360,15 @@
lt='
-users/
example
-.yaml
')%0A%09
@@ -1811,16 +1811,25 @@
ilename=
+'users/'+
args.loa
@@ -1829,16 +1829,24 @@
rgs.load
++'.yaml'
)%0A%09print
|
d73c6addf064ba7b78c4874a6affc6bac6dfee1f
|
Add image feature detection
|
image.py
|
image.py
|
from __future__ import division
import numpy as np
import cv2
import time
from matplotlib import pyplot as plt
MIN_MATCH_COUNT = 200
# only using match count right now
MIN_MATCH_RATIO = .2
def compare(img1_name, img2_name):
"""
Return whether img1 and img2 differ signficiantly
Determined through feature matching and comparison
(the number of good matches must be greater than MIN_MATCH_COUNT)
"""
img1 = cv2.imread(img1_name)
img2 = cv2.imread(img2_name)
# Initiate SIFT detector
sift = cv2.xfeatures2d.SURF_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = sift.detectAndCompute(img2,None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1,des2,k=2)
# count the number of good matches
num_good_matches = 0
for m,n in matches:
if m.distance < 0.7*n.distance:
num_good_matches += 1
print('Number of good features matched: ' + str(num_good_matches))
return num_good_matches>MIN_MATCH_COUNT
|
Python
| 0
|
@@ -66,16 +66,20 @@
ort time
+, io
%0Afrom ma
@@ -107,16 +107,48 @@
t as plt
+%0Afrom google.cloud import vision
%0A%0AMIN_MA
@@ -1257,8 +1257,774 @@
H_COUNT%0A
+%0Adef features(img_path,labels=True,logos=True,landmarks=True):%0A %22%22%22%0A Returns a list of features from an image%0A%0A Optionally pass a certainty_threshold value to give a threshold in %5B0,1%5D on how certain%0A Google's identification is.%0A %22%22%22%0A v_c = vision.Client()%0A with io.open(img_path, 'rb') as image_file:%0A content = image_file.read()%0A img = v_c.image(content=content)%0A output = %5B%5D%0A if labels:%0A labels = %5Blabel.description for label in img.detect_labels()%5D%0A output += labels%0A if logos:%0A logos = %5Blogo.description for logo in img.detect_logos()%5D%0A output += logos%0A if landmarks:%0A landmarks = %5Blandmark.description for landmark in img.detect_landmarks()%5D%0A output += landmarks%0A return output%0A
|
377aef17394b2dabd6db7439d3cfcd4e0d54a3c2
|
Allow codata tests to be run as script.
|
scipy/constants/tests/test_codata.py
|
scipy/constants/tests/test_codata.py
|
import warnings
from scipy.constants import find
from numpy.testing import assert_equal
def test_find():
warnings.simplefilter('ignore', DeprecationWarning)
keys = find('weak mixing', disp=False)
assert_equal(keys, ['weak mixing angle'])
keys = find('qwertyuiop', disp=False)
assert_equal(keys, [])
keys = find('natural unit', disp=False)
assert_equal(keys, sorted(['natural unit of velocity',
'natural unit of action',
'natural unit of action in eV s',
'natural unit of mass',
'natural unit of energy',
'natural unit of energy in MeV',
'natural unit of momentum',
'natural unit of momentum in MeV/c',
'natural unit of length',
'natural unit of time']))
|
Python
| 0.000004
|
@@ -82,16 +82,34 @@
rt_equal
+, run_module_suite
%0A%0Adef te
@@ -988,8 +988,59 @@
ime'%5D))%0A
+%0Aif __name__ == %22__main__%22:%0A run_module_suite()%0A
|
67c2e8ba33b5bcc83f0242bece81f604d21939db
|
Fix editing error
|
pytest-profiling/pytest_profiling.py
|
pytest-profiling/pytest_profiling.py
|
from __future__ import absolute_import
import pytest
import os
import cProfile
import pstats
import pipes
import six
import errno
from hashlib import md5
LARGE_FILENAME_HASH_LEN = 8
def clean_filename(s):
forbidden_chars = set('/?<>\:*|"')
return six.text_type("".join(c if c not in forbidden_chars and ord(c) < 127 else '_'
for c in s))
class Profiling(object):
"""Profiling plugin for pytest."""
svg = False
svg_name = None
profs = []
combined = None
def __init__(self, svg):
self.svg = svg
self.profs = []
def pytest_sessionstart(self, session): # @UnusedVariable
try:
os.makedirs("prof")
except OSError:
pass
def pytest_sessionfinish(self, session, exitstatus): # @UnusedVariable
if self.profs:
combined = pstats.Stats(self.profs[0])
for prof in self.profs[1:]:
combined.add(prof)
self.combined = os.path.abspath(os.path.join("prof", "combined.prof"))
combined.dump_stats(self.combined)
if self.svg:
t.append("gprof2dot -f pstats $IN", "f-")
t.append("dot -Tsvg -o $OUT", "-f")
t.copy(self.combined, self.svg_name)
def pytest_terminal_summary(self, terminalreporter):
if self.combined:
terminalreporter.write("Profiling (from {prof}):\n".format(prof=self.combined))
pstats.Stats(self.combined, stream=terminalreporter).strip_dirs().sort_stats('cumulative').print_stats(20)
if self.svg_name:
terminalreporter.write("SVG profile in {svg}.\n".format(svg=self.svg_name))
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_call(self, item):
prof_filename = os.path.abspath(os.path.join("prof", clean_filename(item.name) + ".prof"))
prof = cProfile.Profile()
prof.enable()
yield
prof.disable()
try:
prof.dump_stats(prof_filename)
except EnvironmentError as err:
if err.errno != errno.ENAMETOOLONG:
raise
if len(item.name) < LARGE_FILENAME_HASH_LEN:
raise
hash_str = md5(item.name).hexdigest()[:LARGE_FILENAME_HASH_LEN]
prof_filename = os.path.join("prof", hash_str + ".prof")
prof.dump_stats(prof_filename)
self.profs.append(prof_filename)
def pytest_addoption(parser):
"""pytest_addoption hook for profiling plugin"""
group = parser.getgroup('Profiling')
group.addoption("--profile", action="store_true",
help="generate profiling information")
group.addoption("--profile-svg", action="store_true",
help="generate profiling graph (using gprof2dot and dot -Tsvg)")
def pytest_configure(config):
"""pytest_configure hook for profiling plugin"""
profile_enable = any(config.getvalue(x) for x in ('profile', 'profile_svg'))
if profile_enable:
config.pluginmanager.register(Profiling(config.getvalue('profile_svg')))
|
Python
| 0.000002
|
@@ -1122,16 +1122,139 @@
lf.svg:%0A
+ self.svg_name = os.path.abspath(os.path.join(%22prof%22, %22combined.svg%22))%0A t = pipes.Template()%0A
|
20d39fe954e9dc62bbe283ad89b7c140529a23df
|
Remove WindowDict.__contains__
|
gorm/window.py
|
gorm/window.py
|
from collections import deque, MutableMapping
class WindowDict(MutableMapping):
"""A dict that keeps every value that a variable has had over time.
Look up a revision number in this dict and it will give you the effective value as
of that revision. Keys should always be revision numbers. Once a key is set, all greater
keys are considered to be in this dict unless the value is ``None``. Keys after that one
aren't "set" until one's value is non-``None`` again.
Optimized for the cases where you look up the same revision repeatedly, or its neighbors.
"""
def seek(self, rev):
"""Arrange the caches in the optimal way for looking up the given revision."""
# TODO: binary search? Perhaps only when one or the other deque is very large?
while self._future and self._future[0][0] <= rev:
self._past.append(self._future.popleft())
while self._past and self._past[-1][0] > rev:
self._future.appendleft(self._past.pop())
def rev_before(self, rev):
"""Return the last rev prior to the given one on which the value changed."""
self.seek(rev)
return self._past[-1][0]
def rev_after(self, rev):
"""Return the next rev after the given one on which the value will change, or None if it never will."""
self.seek(rev)
if self._future:
return self._future[0][0]
def __init__(self, data={}):
self._past = deque(sorted(data.items()))
self._future = deque()
def __iter__(self):
for (rev, v) in self._past:
yield rev
for (rev, v) in self._future:
yield rev
def __contains__(self, k):
if not self._past or self._past[0][0] > k:
return False
self.seek(k)
return self._past[-1][1] is not None
def __len__(self):
return len(self._past) + len(self._future)
def __getitem__(self, rev):
self.seek(rev)
if not self._past:
raise KeyError("Revision {} is before the start of history".format(rev))
if self._past[-1][1] is None:
raise KeyError("No value since revision {}".format(self._past[-1][0]))
return self._past[-1][1]
def __setitem__(self, rev, v):
if not self._past:
self._past.append((rev, v))
elif rev < self._past[0][0]:
self._past.appendleft((rev, v))
elif rev == self._past[0][0]:
self._past[0] = (rev, v)
elif rev == self._past[-1][0]:
self._past[-1] = (rev, v)
elif rev > self._past[-1][0]:
if not self._future or rev < self._future[0][0]:
self._past.append((rev, v))
elif rev == self._future[0][0]:
self._future[0][0] = (rev, v)
elif rev == self._future[-1][0]:
self._future[-1][0] = (rev, v)
elif rev > self._future[-1][0]:
self._future.append((rev, v))
else:
self._future.append((rev, v))
inserted = sorted(self._future)
self._future = deque(inserted)
else:
# I was going to implement my own insertion sort here, but I gather Python already
# does that, via Timsort. I wonder if there's a way I can give it a hint, so it doesn't
# have to check for partial ordering? And maybe avoid reconstructing the deque?
self._past.append((rev, v))
inserted = sorted(self._past)
self._past = deque(inserted)
def __delitem__(self, rev):
name = '_past' if rev <= self._rev else '_future'
stack = getattr(self, name)
waste = deque()
setattr(self, name, waste)
deleted = False
while stack:
(r, v) = stack.popleft()
if r != rev:
waste.append((r, v))
else:
assert not deleted
deleted = True
if not deleted:
raise KeyError("Rev not present: {}".format(rev))
def __repr__(self):
return "WindowDict({})".format(repr(dict(self)))
|
Python
| 0.000055
|
@@ -1678,182 +1678,8 @@
ev%0A%0A
- def __contains__(self, k):%0A if not self._past or self._past%5B0%5D%5B0%5D %3E k:%0A return False%0A self.seek(k)%0A return self._past%5B-1%5D%5B1%5D is not None%0A%0A
|
0220872a2bb0ce8b6d4bb54c4f843a05e15ecd4d
|
Update docs version
|
docs/source/conf.py
|
docs/source/conf.py
|
import os
import sys
import sphinx_rtd_theme
# Source file types:
source_suffix = ['.rst', '.md']
# -*- coding: utf-8 -*-
#
# HiDi documentation build configuration file, created by
# sphinx-quickstart on Mon Apr 10 19:42:17 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
sys.path.insert(0, os.path.abspath('../../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinxcontrib.fulltoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.mathjax',
'sphinx.ext.ifconfig',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'HiDi'
copyright = u'2017, Vevo Engineering'
author = u'Vevo Engineering'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'0.0.2'
# The full version, including alpha/beta/rc tags.
release = u'0.0.2'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_rtd_theme"
# Theme path
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'HiDidoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'hidi', u'HiDi Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'HiDi', u'HiDi Documentation',
author, 'HiDi', 'One line description of project.',
'Miscellaneous'),
]
html_sidebars = {'**': [
'globaltoc.html',
'relations.html',
'sourcelink.html',
'searchbox.html'
], }
sidebar_includehidden = False
# Order autodoc docs by source order
autodoc_member_order = 'bysource'
|
Python
| 0
|
@@ -1882,17 +1882,17 @@
= u'0.0.
-2
+3
'%0A# The
@@ -1951,17 +1951,17 @@
= u'0.0.
-2
+3
'%0A%0A# The
|
d2f8e58ef18527f280b1fc4675afda9f679e4c37
|
Make WindowDict.seek() a little bit lazier
|
gorm/window.py
|
gorm/window.py
|
from collections import deque, MutableMapping, ItemsView, ValuesView
class WindowDictItemsView(ItemsView):
def __contains__(self, item):
(rev, v) = item
if self._mapping._past:
if rev < self._mapping._past[0][0]:
return False
elif self._mapping._future:
if rev < self._mapping._future[0][0]:
return False
else:
return False
for mrev, mv in self._mapping._past:
if mrev == rev:
return mv == v
for mrev, mv in self._mapping._future:
if mrev == rev:
return mv == v
return False
def __iter__(self):
yield from self._mapping._past
yield from self._mapping._future
class WindowDictValuesView(ValuesView):
def __contains__(self, value):
for rev, v in self._mapping._past:
if v == value:
return True
for rev, v in self._mapping._future:
if v == value:
return True
return False
def __iter__(self):
for rev, v in self._mapping._past:
yield v
for rev, v in self._mapping._future:
yield v
class WindowDict(MutableMapping):
"""A dict that keeps every value that a variable has had over time.
Look up a revision number in this dict and it will give you the effective value as
of that revision. Keys should always be revision numbers. Once a key is set, all greater
keys are considered to be in this dict unless the value is ``None``. Keys after that one
aren't "set" until one's value is non-``None`` again.
Optimized for the cases where you look up the same revision repeatedly, or its neighbors.
"""
__slots__ = ['_past', '_future']
def seek(self, rev):
"""Arrange the caches in the optimal way for looking up the given revision."""
# TODO: binary search? Perhaps only when one or the other deque is very large?
if self._past and self._future and self._past[-1][0] <= rev < self._future[0][0]:
return
while self._future and self._future[0][0] <= rev:
self._past.append(self._future.popleft())
while self._past and self._past[-1][0] > rev:
self._future.appendleft(self._past.pop())
def rev_before(self, rev):
"""Return the last rev prior to the given one on which the value changed."""
self.seek(rev)
return self._past[-1][0]
def rev_after(self, rev):
"""Return the next rev after the given one on which the value will change, or None if it never will."""
self.seek(rev)
if self._future:
return self._future[0][0]
def items(self):
return WindowDictItemsView(self)
def values(self):
return WindowDictValuesView(self)
def __init__(self, data={}):
self._past = deque(sorted(data.items()))
self._future = deque()
def __iter__(self):
for (rev, v) in self._past:
yield rev
for (rev, v) in self._future:
yield rev
def __len__(self):
return len(self._past) + len(self._future)
def __getitem__(self, rev):
self.seek(rev)
if not self._past:
raise KeyError("Revision {} is before the start of history".format(rev))
ret = self._past[-1][1]
if ret is None:
raise KeyError("Set, then deleted")
return ret
def __setitem__(self, rev, v):
if not self._past and not self._future:
self._past.append((rev, v))
elif rev < self._past[0][0]:
self._past.appendleft((rev, v))
elif rev == self._past[0][0]:
self._past[0] = (rev, v)
elif rev == self._past[-1][0]:
self._past[-1] = (rev, v)
elif rev > self._past[-1][0]:
if not self._future or rev < self._future[0][0]:
self._past.append((rev, v))
elif rev == self._future[0][0]:
self._future[0] = (rev, v)
elif rev == self._future[-1][0]:
self._future[-1] = (rev, v)
elif rev > self._future[-1][0]:
self._future.append((rev, v))
else:
self.seek(rev)
self._past.append((rev, v))
else:
self.seek(rev)
self._past.append((rev, v))
def __delitem__(self, rev):
name = '_past' if rev <= self._rev else '_future'
stack = getattr(self, name)
waste = deque()
setattr(self, name, waste)
deleted = False
while stack:
(r, v) = stack.popleft()
if r != rev:
waste.append((r, v))
else:
assert not deleted
deleted = True
if not deleted:
raise KeyError("Rev not present: {}".format(rev))
def __repr__(self):
return "WindowDict({})".format(repr(dict(self)))
class WindowDefaultDict(WindowDict):
__slots__ = ['_future', '_past', 'cls', 'args_munger', 'kwargs_munger']
def __init__(self, cls, args_munger=lambda k: tuple(), kwargs_munger=lambda k: {}, data={}):
super(WindowDefaultDict, self).__init__(data)
self.cls = cls
self.args_munger = args_munger
self.kwargs_munger = kwargs_munger
def __getitem__(self, k):
if k in self:
return super(WindowDefaultDict, self).__getitem__(k)
ret = self[k] = self.cls(*self.args_munger(k), **self.kwargs_munger(k))
return ret
class FuturistWindowDict(WindowDict):
def __setitem__(self, rev, v):
if not self._past and not self._future:
self._past.append((rev, v))
if self._future:
self.seek(rev)
if self._future:
raise ValueError("Already have some history after {}".format(rev))
if not self._past or rev > self._past[-1][0]:
self._past.append(rev, v)
elif rev == self._past[-1][0]:
self._past[-1] = (rev, v)
raise ValueError("Already have some history after {} (and my seek function is broken?)".format(rev))
|
Python
| 0.000001
|
@@ -2027,33 +2027,16 @@
ast and
-self._future and
self._pa
@@ -2048,25 +2048,48 @@
%5D%5B0%5D %3C= rev
-%3C
+and (not self._future or
self._futur
@@ -2087,32 +2087,39 @@
lf._future%5B0%5D%5B0%5D
+ %3E rev)
:%0A re
|
2ad3d8df999dcea21f3f12fdf417574effd6ceb6
|
Fix profiling
|
devito/core/autotuning.py
|
devito/core/autotuning.py
|
from __future__ import absolute_import
from collections import OrderedDict
from itertools import combinations
from functools import reduce
from operator import mul
import resource
from devito.logger import info, info_at
from devito.nodes import Iteration
from devito.parameters import configuration
from devito.visitors import FindNodes, FindSymbols
__all__ = ['autotune']
def autotune(operator, arguments, tunable):
"""
Acting as a high-order function, take as input an operator and a list of
operator arguments to perform empirical autotuning. Some of the operator
arguments are marked as tunable.
"""
at_arguments = arguments.copy()
# User-provided output data must not be altered
output = [i.name for i in operator.output]
for k, v in arguments.items():
if k in output:
at_arguments[k] = v.copy()
# Squeeze dimensions to minimize auto-tuning time
iterations = FindNodes(Iteration).visit(operator.body)
dim_mapper = {i.dim.name: i.dim for i in iterations}
squeezable = [i.dim.parent.symbolic_size.name for i in iterations
if i.is_Sequential and i.dim.is_Buffered]
# Attempted block sizes
mapper = OrderedDict([(i.argument.symbolic_size.name, i) for i in tunable])
blocksizes = [OrderedDict([(i, v) for i in mapper])
for v in options['at_blocksize']]
if configuration['autotuning'] == 'aggressive':
blocksizes = more_heuristic_attempts(blocksizes)
# How many temporaries are allocated on the stack?
# Will drop block sizes that might lead to a stack overflow
functions = FindSymbols('symbolics').visit(operator.body +
operator.elemental_functions)
stack_shapes = [i.shape for i in functions if i.is_TensorFunction and i._mem_stack]
stack_space = sum(reduce(mul, i, 1) for i in stack_shapes)*operator.dtype().itemsize
# Note: there is only a single loop over 'blocksize' because only
# square blocks are tested
timings = OrderedDict()
for bs in blocksizes:
illegal = False
for k, v in at_arguments.items():
if k in bs:
val = bs[k]
handle = at_arguments.get(mapper[k].original_dim.symbolic_size.name)
if val <= mapper[k].iteration.end(handle):
at_arguments[k] = val
else:
# Block size cannot be larger than actual dimension
illegal = True
break
elif k in squeezable:
at_arguments[k] = options['at_squeezer']
if illegal:
continue
# Make sure we remain within stack bounds, otherwise skip block size
dim_sizes = {}
for k, v in at_arguments.items():
if k in bs:
dim_sizes[mapper[k].argument.symbolic_size] = bs[k]
elif k in dim_mapper:
dim_sizes[dim_mapper[k].symbolic_size] = v
try:
bs_stack_space = stack_space.xreplace(dim_sizes)
except AttributeError:
bs_stack_space = stack_space
try:
if int(bs_stack_space) > options['at_stack_limit']:
continue
except TypeError:
# We should never get here
info_at("Couldn't determine stack size, skipping block size %s" % str(bs))
continue
# Use AT-specific profiler structs
at_arguments[operator.profiler.structname] = operator.profiler.setup()
operator.cfunction(*list(at_arguments.values()))
elapsed = sum(operator.profiler.timings.values())
timings[tuple(bs.items())] = elapsed
info_at("<%s>: %f" % (','.join('%d' % i for i in bs.values()), elapsed))
try:
best = dict(min(timings, key=timings.get))
info("Auto-tuned block shape: %s" % best)
except ValueError:
info("Auto-tuning request, but couldn't find legal block sizes")
return arguments
# Build the new argument list
tuned = OrderedDict()
for k, v in arguments.items():
tuned[k] = best[k] if k in mapper else v
# Reset the profiling struct
tuned[operator.profiler.structname] = operator.profiler.setup()
return tuned
def more_heuristic_attempts(blocksizes):
handle = []
for blocksize in blocksizes[:3]:
for i in blocksizes:
handle.append(OrderedDict(list(blocksize.items())[:-1] +
[list(i.items())[-1]]))
for blocksize in list(blocksizes):
ncombs = len(blocksize)
for i in range(ncombs):
for j in combinations(blocksize, i+1):
item = [(k, blocksize[k]*2 if k in j else v)
for k, v in blocksize.items()]
handle.append(OrderedDict(item))
unique = []
for i in blocksizes + handle:
if i not in unique:
unique.append(i)
return unique
options = {
'at_squeezer': 3,
'at_blocksize': [8, 16, 24, 32, 40, 64, 128],
'at_stack_limit': resource.getrlimit(resource.RLIMIT_STACK)[0] / 4
}
"""Autotuning options."""
|
Python
| 0.000015
|
@@ -3491,38 +3491,35 @@
erator.profiler.
-struct
+var
name%5D = operator
@@ -4189,16 +4189,62 @@
struct%0A
+ assert operator.profiler.varname in tuned%0A
tune
@@ -4263,22 +4263,19 @@
rofiler.
-struct
+var
name%5D =
|
ef1813af0e70661c9770de69178ce7b8fc8c544f
|
Add proper description of project to texinfo docs.
|
docs/source/conf.py
|
docs/source/conf.py
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
import os
import re
import sys
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../src'))
# -- Project information -----------------------------------------------------
project = 'OCSPdash'
copyright = '2018 Scott Colby and Charles Tapley Hoyt'
author = 'Scott Colby and Charles Tapley Hoyt'
# The full version, including alpha/beta/rc tags
release = '0.1.0-dev'
# The short X.Y version
parsed_version = re.match('(?P<major>\d+)\.(?P<minor>\d+)\.(?P<patch>\d+)(?:-(?P<release>[0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*))?(?:\+(?P<build>[0-9A-Za-z-]+(?:\.[0-9A-Za-z-]+)*))?',
release)
version = parsed_version.expand('\g<major>.\g<minor>.\g<patch>')
if parsed_version.group('release'):
tags.add('prerelease')
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
'sphinx_autodoc_typehints',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# Add readthedocs tag if building there
if os.environ.get('READTHEDOCS', None):
tags.add('readthedocs')
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'OCSPdashdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'OCSPdash.tex', 'OCSPdash Documentation',
'Scott Colby and Charles Tapley Hoyt', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'ocspdash', 'OCSPdash Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'OCSPdash', 'OCSPdash Documentation',
author, 'OCSPdash', 'One line description of project.',
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for autodoc extension ------------------------------------------
# This value selects if automatically documented members are sorted
# alphabetical (value 'alphabetical'), by member type (value 'groupwise')
# or by source order (value 'bysource'). The default is alphabetical.
# Note that for source order, the module must be a Python module with the source code available.
autodoc_member_order = 'bysource'
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'https://docs.python.org/3/': None}
# -- Options for todo extension ----------------------------------------------
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
|
Python
| 0
|
@@ -5352,42 +5352,89 @@
h',
-'One line desc
+%22A dashboard for the status of the top certificate autho
ri
-p
ti
-on of project.'
+es' OCSP responders.%22
,%0A
|
0e5118f0ec68ead39392e168efcc1a957162bb3f
|
Fix NIN example
|
examples/imagenet/nin.py
|
examples/imagenet/nin.py
|
import math
from chainer import FunctionSet, Variable
import chainer.functions as F
class NIN(FunctionSet):
"""Network-in-Network example model."""
insize = 227
def __init__(self):
w = math.sqrt(2) # MSRA scaling
super(NIN, self).__init__(
conv1 = F.Convolution2D( 3, 96, 11, wscale=w, stride=4),
conv1a = F.Convolution2D( 96, 96, 1, wscale=w),
conv1b = F.Convolution2D( 96, 96, 1, wscale=w),
conv2 = F.Convolution2D( 96, 256, 5, wscale=w, pad=2),
conv2a = F.Convolution2D( 256, 256, 1, wscale=w),
conv2b = F.Convolution2D( 256, 256, 1, wscale=w),
conv3 = F.Convolution2D( 256, 384, 3, wscale=w, pad=1),
conv3a = F.Convolution2D( 384, 384, 1, wscale=w),
conv3b = F.Convolution2D( 384, 384, 1, wscale=w),
conv4 = F.Convolution2D( 384, 1024, 3, wscale=w, pad=1),
conv4a = F.Convolution2D(1024, 1024, 1, wscale=w),
conv4b = F.Convolution2D(1024, 1000, 1, wscale=w),
)
def forward(self, x_data, y_data, train=True):
x = Variable(x_data, volatile=not train)
t = Variable(y_data, volatile=not train)
h = F.relu(self.conv1(x))
h = F.relu(self.conv1a(h))
h = F.relu(self.conv1b(h))
h = F.max_pooling_2d(h, 3, stride=2)
h = F.relu(self.conv2(h))
h = F.relu(self.conv2a(h))
h = F.relu(self.conv2b(h))
h = F.max_pooling_2d(h, 3, stride=2)
h = F.relu(self.conv3(h))
h = F.relu(self.conv3a(h))
h = F.relu(self.conv3b(h))
h = F.max_pooling_2d(h, 3, stride=2)
h = F.dropout(h, train=train)
h = F.relu(self.conv4(h))
h = F.relu(self.conv4a(h))
h = F.relu(self.conv4b(h))
h = F.average_pooling_2d(h, 6)
return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
|
Python
| 0.023101
|
@@ -1826,16 +1826,26 @@
h =
+F.reshape(
F.averag
@@ -1862,16 +1862,42 @@
2d(h, 6)
+, (x_data.shape%5B0%5D, 1000))
%0A
|
e4fbd6f8e13861053a4a29c776ae24b934639fa5
|
fix ports on yaml script
|
tools/scripts/mosaic/gen_yaml.py
|
tools/scripts/mosaic/gen_yaml.py
|
#!/usr/bin/env python3
#
# Create a yaml file for running a mosaic file
# Note: *requires pyyaml*
import argparse
import yaml
def address(port):
return ['127.0.0.1', port]
def create_peers(peers):
res = []
for p in peers:
res += [{'addr':address(p[1])}]
return res
def entity(role, port, peers):
return {'role':role, 'me':address(port), 'peers':create_peers(peers)}
def create_file(num_switches, num_nodes):
peers = []
peers += [('master', 40000)]
peers += [('timer', 40001)]
switch_ports = 50000
for i in range(num_switches):
peers += [('switch', switch_ports + i)]
node_ports = 60000
for i in range(num_nodes):
peers += [('node', node_ports + i)]
# convert to dictionaries
peers2 = []
for p in peers:
peers2 += [entity(p[0], p[1], peers)]
# dump out
print("---")
for i, p in enumerate(peers2):
print(yaml.dump(p, default_flow_style=True))
if i < len(peers2) - 1:
print("---")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--switches", type=int, help="number of switches", dest="num_switches", default=1)
parser.add_argument("-n", "--nodes", type=int, help="number of nodes", dest="num_nodes", default=1)
args = parser.parse_args()
create_file(args.num_switches, args.num_nodes)
if __name__ == '__main__':
main()
|
Python
| 0
|
@@ -537,17 +537,17 @@
s = 5000
-0
+1
%0A for
@@ -642,17 +642,17 @@
s = 6000
-0
+1
%0A for
|
45f60aaedb5a17fb1ce6fd14e6c4136f3411f14d
|
Fix for audio formats without bitrate
|
scanner.py
|
scanner.py
|
# coding: utf-8
import os, os.path
import time, mimetypes
import mutagen
import config, db
def get_mime(ext):
return mimetypes.guess_type('dummy.' + ext, False)[0] or config.get('mimetypes', ext) or 'application/octet-stream'
class Scanner:
def __init__(self, session):
self.__session = session
self.__tracks = db.Track.query.all()
self.__artists = db.Artist.query.all()
self.__folders = db.Folder.query.all()
self.__added_artists = 0
self.__added_albums = 0
self.__added_tracks = 0
self.__deleted_artists = 0
self.__deleted_albums = 0
self.__deleted_tracks = 0
def scan(self, folder):
for root, subfolders, files in os.walk(folder.path):
for f in files:
self.__scan_file(os.path.join(root, f), folder)
folder.last_scan = int(time.time())
def prune(self, folder):
for track in [ t for t in self.__tracks if t.root_folder.id == folder.id and not os.path.exists(t.path) ]:
self.__remove_track(track)
for album in [ album for artist in self.__artists for album in artist.albums if len(album.tracks) == 0 ]:
album.artist.albums.remove(album)
self.__session.delete(album)
self.__deleted_albums += 1
for artist in [ a for a in self.__artists if len(a.albums) == 0 ]:
self.__session.delete(artist)
self.__deleted_artists += 1
self.__cleanup_folder(folder)
def check_cover_art(self, folder):
folder.has_cover_art = os.path.isfile(os.path.join(folder.path, 'cover.jpg'))
for f in folder.children:
self.check_cover_art(f)
def __scan_file(self, path, folder):
tr = filter(lambda t: t.path == path, self.__tracks)
if tr:
tr = tr[0]
if not os.path.getmtime(path) > tr.last_modification:
return
tag = self.__try_load_tag(path)
if not tag:
self.__remove_track(tr)
return
else:
tag = self.__try_load_tag(path)
if not tag:
return
tr = db.Track(path = path, root_folder = folder, folder = self.__find_folder(path, folder))
self.__tracks.append(tr)
self.__added_tracks += 1
tr.disc = self.__try_read_tag(tag, 'discnumber', 1, lambda x: int(x[0].split('/')[0]))
tr.number = self.__try_read_tag(tag, 'tracknumber', 1, lambda x: int(x[0].split('/')[0]))
tr.title = self.__try_read_tag(tag, 'title')
tr.year = self.__try_read_tag(tag, 'date', None, lambda x: int(x[0].split('-')[0]))
tr.genre = self.__try_read_tag(tag, 'genre')
tr.duration = int(tag.info.length)
tr.album = self.__find_album(self.__try_read_tag(tag, 'artist'), self.__try_read_tag(tag, 'album'))
tr.bitrate = tag.info.bitrate / 1000
tr.content_type = get_mime(os.path.splitext(path)[1][1:])
tr.last_modification = os.path.getmtime(path)
def __find_album(self, artist, album):
ar = self.__find_artist(artist)
al = filter(lambda a: a.name == album, ar.albums)
if al:
return al[0]
al = db.Album(name = album, artist = ar)
self.__added_albums += 1
return al
def __find_artist(self, artist):
ar = filter(lambda a: a.name.lower() == artist.lower(), self.__artists)
if ar:
return ar[0]
ar = db.Artist(name = artist)
self.__artists.append(ar)
self.__session.add(ar)
self.__added_artists += 1
return ar
def __find_folder(self, path, folder):
path = os.path.dirname(path)
fold = filter(lambda f: f.path == path, self.__folders)
if fold:
return fold[0]
full_path = folder.path
path = path[len(folder.path) + 1:]
for name in path.split(os.sep):
full_path = os.path.join(full_path, name)
fold = filter(lambda f: f.path == full_path, self.__folders)
if fold:
folder = fold[0]
else:
folder = db.Folder(root = False, name = name, path = full_path, parent = folder)
self.__folders.append(folder)
return folder
def __try_load_tag(self, path):
try:
return mutagen.File(path, easy = True)
except:
return None
def __try_read_tag(self, metadata, field, default = None, transform = lambda x: x[0]):
try:
value = metadata[field]
if not value:
return default
if transform:
value = transform(value)
return value if value else default
except:
return default
def __remove_track(self, track):
track.album.tracks.remove(track)
track.folder.tracks.remove(track)
# As we don't have a track -> playlists relationship, SQLAlchemy doesn't know it has to remove tracks
# from playlists as well, so let's help it
for playlist in db.Playlist.query.filter(db.Playlist.tracks.contains(track)):
playlist.tracks.remove(track)
self.__session.delete(track)
self.__deleted_tracks += 1
def __cleanup_folder(self, folder):
for f in folder.children:
self.__cleanup_folder(f)
if len(folder.children) == 0 and len(folder.tracks) == 0 and not folder.root:
folder.parent = None
self.__session.delete(folder)
def stats(self):
return (self.__added_artists, self.__added_albums, self.__added_tracks), (self.__deleted_artists, self.__deleted_albums, self.__deleted_tracks)
|
Python
| 0.000003
|
@@ -2522,16 +2522,17 @@
rate =
+(
tag.info
@@ -2539,16 +2539,103 @@
.bitrate
+ if hasattr(tag.info, 'bitrate') else int(os.path.getsize(path) * 8 / tag.info.length))
/ 1000%0A
|
8e664b417d978d040d780dc252418fce087c47f4
|
Fix version option
|
src/htrun/htrun.py
|
src/htrun/htrun.py
|
#
# Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""Greentea Host Tests Runner."""
from multiprocessing import freeze_support
from htrun import init_host_test_cli_params
from htrun.host_tests_runner.host_test_default import DefaultTestSelector
from htrun.host_tests_toolbox.host_functional import handle_send_break_cmd
def main():
"""Drive command line tool 'htrun' which is using DefaultTestSelector.
1. Create DefaultTestSelector object and pass command line parameters.
2. Call default test execution function run() to start test instrumentation.
"""
freeze_support()
result = 0
cli_params = init_host_test_cli_params()
if cli_params.version: # --version
import pkg_resources # part of setuptools
version = pkg_resources.require("htrun")[0].version
print(version)
elif cli_params.send_break_cmd: # -b with -p PORT (and optional -r RESET_TYPE)
handle_send_break_cmd(
port=cli_params.port,
disk=cli_params.disk,
reset_type=cli_params.forced_reset_type,
baudrate=cli_params.baud_rate,
verbose=cli_params.verbose,
)
else:
test_selector = DefaultTestSelector(cli_params)
try:
result = test_selector.execute()
# Ensure we don't return a negative value
if result < 0 or result > 255:
result = 1
except (KeyboardInterrupt, SystemExit):
test_selector.finish()
result = 1
raise
else:
test_selector.finish()
return result
|
Python
| 0.999999
|
@@ -15,16 +15,21 @@
(c) 2021
+-2022
Arm Lim
@@ -852,21 +852,29 @@
equire(%22
-htrun
+greentea-host
%22)%5B0%5D.ve
|
625c70580770b5bb00a64d15e14d15c623db21ee
|
Update urls.py
|
taiga/base/utils/urls.py
|
taiga/base/utils/urls.py
|
import django_sites as sites
URL_TEMPLATE = "{scheme}://{domain}/{path}"
def build_url(path, scheme="http", domain="localhost"):
return URL_TEMPLATE.format(scheme=scheme, domain=domain, path=path.lstrip("/"))
def is_absolute_url(path):
"""Test wether or not `path` is absolute url."""
return path.startswith("http")
def get_absolute_url(path):
"""Return a path as an absolute url."""
if is_absolute_url(path):
return path
site = sites.get_current()
return build_url(path, scheme=site.scheme, domain=site.domain)
|
Python
| 0.000002
|
@@ -325,16 +325,44 @@
(%22http%22)
+ or path.startswith(%22https%22)
%0A%0A%0Adef g
|
1656cbd6b62690017af810e795b8a23b3907a1fa
|
bump 1.0.2
|
epubuilder/version.py
|
epubuilder/version.py
|
# coding=utf-8
__version__ = '1.0.1'
|
Python
| 0.000003
|
@@ -28,11 +28,11 @@
= '1.0.
-1
+2
'%0A
|
f69e4c5b18be30b8903a7826cc59b071fce1ba5a
|
Fix octaned8 results capture
|
wlauto/workloads/octaned8/__init__.py
|
wlauto/workloads/octaned8/__init__.py
|
# Copyright 2014-2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#pylint: disable=E1101,W0201
import os
import re
from wlauto import Workload, Parameter, Executable
from wlauto.common.resources import File
from wlauto.exceptions import ConfigError
regex_map = {
"Richards": (re.compile(r'Richards: (\d+.*)')),
"DeltaBlue": (re.compile(r'DeltaBlue: (\d+.*)')),
"Crypto": (re.compile(r'Crypto: (\d+.*)')),
"RayTrace": (re.compile(r'RayTrace: (\d+.*)')),
"EarleyBoyer": (re.compile(r'EarleyBoyer: (\d+.*)')),
"RegExp": (re.compile(r'RegExp: (\d+.*)')),
"Splay": (re.compile(r'Splay: (\d+.*)')),
"SplayLatency": (re.compile(r'SplayLatency: (\d+.*)')),
"NavierStokes": (re.compile(r'NavierStokes: (\d+.*)')),
"PdfJS": (re.compile(r'PdfJS: (\d+.*)')),
"Mandreel": (re.compile(r'Mandreel: (\d+.*)')),
"MandreelLatency": (re.compile(r'MandreelLatency: (\d+.*)')),
"Gameboy": (re.compile(r'Gameboy: (\d+.*)')),
"CodeLoad": (re.compile(r'CodeLoad: (\d+.*)')),
"Box2D": (re.compile(r'Box2D: (\d+.*)')),
"zlib": (re.compile(r'zlib: (\d+.*)')),
"Score": (re.compile(r'Score .*: (\d+.*)'))
}
class Octaned8(Workload):
name = 'octaned8'
description = """
Runs the Octane d8 benchmark.
This workload runs d8 binaries built from source and placed in the dependencies folder along
with test assets from https://github.com/chromium/octane which also need to be placed in an
assets folder within the dependencies folder.
Original source from::
https://github.com/v8/v8/wiki/D8%20on%20Android
"""
parameters = [
Parameter('timeout', kind=int, default=120,
description='Timeout, in seconds, for the script run time.'),
]
supported_platforms = ['android']
executables = ['d8', 'natives_blob.bin', 'snapshot_blob.bin']
def initialize(self, context): # pylint: disable=no-self-use
assets_dir = self.device.path.join(self.device.working_directory, 'assets')
self.device.execute('mkdir -p {}'.format(assets_dir))
assets_tar = 'octaned8-assets.tar'
fpath = context.resolver.get(File(self, assets_tar))
self.device.push_file(fpath, assets_dir, timeout=300)
self.command = 'cd {}; {} busybox tar -x -f {}'.format(assets_dir, self.device.busybox, assets_tar)
self.output = self.device.execute(self.command, timeout=self.timeout, check_exit_code=False)
for f in self.executables:
binFile = context.resolver.get(Executable(self, self.device.abi, f))
self.device_exe = self.device.install(binFile)
def setup(self, context):
self.logger.info('Copying d8 binaries to device')
assets_dir = self.device.path.join(self.device.working_directory, 'assets')
device_file = self.device.path.join(self.device.working_directory, 'octaned8.output')
self.command = 'cd {}; {}/d8 ./run.js >> {} 2>&1'.format(assets_dir, self.device.binaries_directory, device_file)
def run(self, context):
self.logger.info('Starting d8 tests')
self.output = self.device.execute(self.command, timeout=self.timeout, check_exit_code=False)
def update_result(self, context):
host_file = os.path.join(context.output_directory, 'octaned8.output')
device_file = self.device.path.join(self.device.working_directory, 'octaned8.output')
self.device.pull_file(device_file, host_file)
context.add_artifact('octaned8', host_file, 'data')
with open(os.path.join(host_file)) as octaned8_file:
for line in octaned8_file:
for label, regex in regex_map.iteritems():
match = regex.search(line)
if match:
context.result.add_metric(label, float(match.group(1)))
def finalize(self, context):
for f in self.executables:
self.device.uninstall_executable(f)
self.device.execute('rm {}'.format(self.device.path.join(self.device.working_directory, f)))
assets_dir = self.device.path.join(self.device.working_directory, 'assets')
self.device.execute('rm -rf {}'.format(assets_dir))
device_file = self.device.path.join(self.device.working_directory, 'octaned8.output')
self.device.execute('rm {}'.format(device_file))
|
Python
| 0
|
@@ -2146,16 +2146,20 @@
ameter('
+run_
timeout'
@@ -2179,17 +2179,17 @@
efault=1
-2
+8
0,%0A
@@ -2247,23 +2247,22 @@
the
-script run time
+test execution
.'),
@@ -2935,38 +2935,19 @@
elf.
+run_
timeout
-, check_exit_code=False
)%0A%0A
@@ -3657,38 +3657,19 @@
elf.
+run_
timeout
-, check_exit_code=False
)%0A%0A
@@ -3934,68 +3934,8 @@
ile)
-%0A context.add_artifact('octaned8', host_file, 'data')
%0A%0A
@@ -4245,24 +4245,82 @@
group(1)))%0A%0A
+ self.device.execute('rm %7B%7D'.format(device_file))%0A%0A
def fina
@@ -4605,32 +4605,32 @@
tory, 'assets')%0A
+
self.dev
@@ -4677,155 +4677,4 @@
r))%0A
- device_file = self.device.path.join(self.device.working_directory, 'octaned8.output')%0A self.device.execute('rm %7B%7D'.format(device_file))%0A
|
8100a67f4f47ad30d454fe1aa9b90e29300c1516
|
Update score_real script to have some better info/warnings
|
scripts/score_real_classification.py
|
scripts/score_real_classification.py
|
from __future__ import print_function
import sys
from collections import defaultdict
import pprint
def dict_to_string(d):
t = []
for i in d:
x = ":".join([str(i), str(d[i])])
t.append(x)
t = sorted(t, reverse = True, key = lambda x : float(x.split(":")[1]))
return ";".join(t)
if __name__ == "__main__":
lin_match_d = defaultdict(int)
sublin_match_d = defaultdict(int)
match_threshold = 0.005
for line in sys.stdin:
tokens = line.strip().split()
read_len = int(tokens[2].strip().split("/")[1])
hpv_match = int(tokens[2].strip().split("/")[0])
## We get some reads that look like ION Torrent barf - toss those:
if read_len < 50 or hpv_match < 15:
continue
lin_toks = tokens[3].strip().strip(";").split(";")
lin_kmer_counts = [int(i) for i in tokens[5].strip().strip(";").split(";")]
sublin_toks = tokens[4].strip().strip(";").split(";")
sublin_kmer_counts = [int(i) for i in tokens[6].strip().strip(";").split(";")]
l_trip = False
s_trip = False
l_match = ""
for i in range(0, len(lin_toks)):
t = lin_toks[i].split(":")
#if (float(t[1]) > match_threshold) and lin_kmer_counts[i] > 4:
if lin_kmer_counts[i] > 5:
if l_trip:
#l_match = ""
#sys.stderr.write("Read matches to two or more lineages\n" + tokens[0])
break
else:
l_trip = True
l_match = t[0]
s_match = ""
for i in range(0, len(sublin_toks)):
t = sublin_toks[i].split(":")
if sublin_kmer_counts[i] > 2 and float(t[1]) > match_threshold:
#if float(t[1]) > match_threshold:
if s_trip:
#s_match = ""
#sys.stderr.write("Read matches to two or more sublineages" + tokens[0] + "\n")
break
s_trip = True
s_match = t[0]
if l_match is not "" and s_match is not "" and l_match is not s_match[0]:
old = ""
if lin_kmer_counts[0] > 10 and sublin_kmer_counts[1] > 2 and lin_toks[0].split(":")[0] == sublin_toks[1].split(":")[0][0]:
old = s_match
s_match = sublin_toks[1].split(":")[0]
sys.stderr.write("Lin / Sublin mistmatch: " + l_match + " " + old)
sys.stderr.write( " " + old + "->" + s_match + "\n")
else:
s_match = ""
if l_match is not "":
lin_match_d[l_match] += 1
if s_match is not "":
sublin_match_d[s_match] += 1
l_total = 0
s_total = 0
for i in lin_match_d:
l_total += lin_match_d[i]
for i in sublin_match_d:
s_total += sublin_match_d[i]
l_pct_d = defaultdict(float)
s_pct_d = defaultdict(float)
for i in lin_match_d:
l_pct_d[i] = float(lin_match_d[i]) / float(l_total)
for i in sublin_match_d:
s_pct_d[i] = float(sublin_match_d[i]) / float(s_total)
low_read_lins = ""
if l_total < 1000:
low_read_lins = "WARN:low_lineage_counts:" + str(l_total)
else:
low_read_lins = "INFO:low_lineage_counts:" + str(l_total)
low_read_sublins = ""
if s_total < 1000:
low_read_sublins = "WARN:low_sublineage_counts:" + str(s_total)
else:
low_read_sublins = "INFO:low_sublineage_counts:" + str(s_total)
#pprint.pprint(l_pct_d)
#pprint.pprint(s_pct_d)
#pprint.pprint (sublin_match_d)
print( dict_to_string(l_pct_d), dict_to_string(s_pct_d), dict_to_string(sublin_match_d), low_read_lins, low_read_sublins)
|
Python
| 0
|
@@ -3315,20 +3315,16 @@
= %22INFO:
-low_
lineage_
@@ -3511,20 +3511,16 @@
= %22INFO:
-low_
sublinea
|
2847dfd37a2ea728a722db3cd718f85c71c3b136
|
update doc version
|
docs/source/conf.py
|
docs/source/conf.py
|
# -*- coding: utf-8 -*-
#
# python-nvd3 documentation build configuration file, created by
# sphinx-quickstart on Thu Dec 8 12:55:34 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
#import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'python-nvd3'
copyright = u'2013, Arezqui Belaid'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1.0'
# The full version, including alpha/beta/rc tags.
release = '0.1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'python-nvd3doc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'python-nvd3.tex', u'python-nvd3 Documentation',
u'Arezqui Belaid', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'python-nvd3', u'python-nvd3 Documentation',
[u'Arezqui Belaid'], 1)
]
|
Python
| 0
|
@@ -1646,25 +1646,25 @@
ersion = '0.
-1
+2
.0'%0A# The fu
@@ -1718,17 +1718,17 @@
se = '0.
-1
+2
.0'%0A%0A# T
|
84d6be3ef6c832a0dbb3e98a47491bc8ff372603
|
Make log message more verbose.
|
ticket.py
|
ticket.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Implements a subset of session tickets as proposed for TLS in RFC 5077:
https://tools.ietf.org/html/rfc5077
"""
import os
import random
import time
import const
import sys
import pickle
from Crypto.Cipher import AES
from Crypto.Hash import HMAC
from Crypto.Hash import SHA256
import obfsproxy.common.log as logging
import mycrypto
log = logging.get_obfslogger()
# Length of the ticket's name which is used to quickly identify issued tickets.
NAME_LENGTH = 16
# Length of the IV which is used for AES-CBC.
IV_LENGTH = 16
HMAC_KEY_LENGTH = 32
AES_KEY_LENGTH = 16
# Must be a multiple of 16 bytes due to AES' block size.
IDENTIFIER = "ScrambleSuitTicket"
# +------------+------------------+--------------+
# | 16-byte IV | 64-byte E(state) | 32-byte HMAC |
# +------------+------------------+--------------+
HMACKey = None
AESKey = None
creationTime = None
def rotateKeys( ):
log.debug("Rotating session ticket keys.")
global HMACKey
global AESKey
global creationTime
HMACKey = mycrypto.strong_random(HMAC_KEY_LENGTH)
AESKey = mycrypto.strong_random(AES_KEY_LENGTH)
creationTime = int(time.time())
try:
with open(const.DATA_DIRECTORY + const.KEY_STORE, "wb") as fd:
pickle.dump([creationTime, HMACKey, AESKey], fd)
fd.close()
except IOError as e:
log.error("Error opening ticket key file: %s." % e)
def loadKeys( ):
"""Try to load the AES and HMAC key from the key store."""
log.debug("Reading session ticket keys from file.")
global HMACKey
global AESKey
global creationTime
if not os.path.exists(const.DATA_DIRECTORY + const.KEY_STORE):
rotateKeys()
return
try:
with open(const.DATA_DIRECTORY + const.KEY_STORE, "rb") as fd:
creationTime, HMACKey, AESKey = pickle.load(fd)
fd.close()
except IOError as e:
log.error("Error opening ticket key file: %s." % e)
def checkKeys( ):
"""Load the AES and the HMAC key if they are not defined yet. If they are
expired, rotate the keys."""
if (HMACKey is None) or (AESKey is None):
loadKeys()
if (int(time.time()) - creationTime) > const.KEY_ROTATION_TIME:
rotateKeys()
def decryptTicket( ticket ):
"""Verifies the validity, decrypts and finally returns the given potential
ticket as a ProtocolState object. If the ticket is invalid, `None' is
returned."""
assert len(ticket) == const.TICKET_LENGTH
global HMACKey
global AESKey
global creationTime
log.debug("Attempting to verify and decrypt %d-byte ticket." % len(ticket))
checkKeys()
# Verify if HMAC is correct.
hmac = HMAC.new(HMACKey, ticket[0:80], digestmod=SHA256).digest()
if hmac != ticket[80:const.TICKET_LENGTH]:
log.debug("Invalid HMAC. Probably no ticket.")
return None
# Decrypt ticket to obtain state.
aes = AES.new(AESKey, mode=AES.MODE_CBC, IV=ticket[0:16])
plainTicket = aes.decrypt(ticket[16:80])
issueDate = plainTicket[0:10]
identifier = plainTicket[10:28]
masterKey = plainTicket[28:60]
if not (identifier == IDENTIFIER):
log.error("Valid HMAC but invalid identifier. This could be a bug.")
return None
return ProtocolState(masterKey, int(issueDate.encode('hex'), 16))
class ProtocolState( object ):
"""Describes the protocol state of a ScrambleSuit server which is part of a
session ticket. The state can be used to bootstrap a ScrambleSuit session
without the client unlocking the puzzle."""
def __init__( self, masterKey, issueDate=int(time.time()) ):
self.identifier = IDENTIFIER
#self.protocolVersion = None
self.masterKey = masterKey
#self.clientIdentity = None
self.issueDate = issueDate
# Pad to multiple of 16 bytes due to AES' block size.
self.pad = "\0\0\0\0"
def isValid( self ):
"""Returns `True' if the protocol state is valid, i.e., if the life time
has not expired yet. Otherwise, `False' is returned."""
assert self.issueDate
now = int(time.time())
if (now - self.issueDate) > const.SESSION_TICKET_LIFETIME:
log.debug("Ticket is not valid anymore.")
return False
return True
def __repr__( self ):
return self.issueDate + self.identifier + self.masterKey + self.pad
class SessionTicket( object ):
"""Encapsulates a session ticket which can be used by the client to gain
access to a ScrambleSuit server without solving the served puzzle."""
def __init__( self, masterKey ):
"""Initialize a new session ticket which contains `masterKey'. The
parameter `symmTicketKey' is used to encrypt the ticket and
`hmacTicketKey' is used to authenticate the ticket when issued."""
assert len(masterKey) == const.MASTER_KEY_SIZE
checkKeys()
# The random name is used to recognize previously issued tickets.
#self.keyName = mycrypto.weak_random(NAME_LENGTH)
# Initialization vector for AES-CBC.
self.IV = mycrypto.strong_random(IV_LENGTH)
# The server's actual (encrypted) protocol state.
self.state = ProtocolState(masterKey)
# AES and HMAC key to protect the ticket.
self.symmTicketKey = AESKey
self.hmacTicketKey = HMACKey
def issue( self ):
"""Encrypt and authenticate the ticket and return the result which is
ready to be sent over the wire. In particular, the ticket name (for
bookkeeping) as well as the actual encrypted ticket is returned."""
self.state.issueDate = "%d" % time.time()
# Encrypt the protocol state.
aes = AES.new(self.symmTicketKey, mode=AES.MODE_CBC, IV=self.IV)
state = repr(self.state)
assert (len(state) % AES.block_size) == 0
cryptedState = aes.encrypt(state)
# Authenticate ticket name, IV and the encrypted state.
hmac = HMAC.new(self.hmacTicketKey, self.IV + \
cryptedState, digestmod=SHA256).digest()
ticket = self.IV + cryptedState + hmac
log.debug("Returning %d-byte ticket." % (len(self.IV) +
len(cryptedState) + len(hmac)))
return ticket
# Alias class name in order to provide a more intuitive API.
new = SessionTicket
|
Python
| 0.001404
|
@@ -1493,16 +1493,20 @@
et keys
+k_S
from fil
|
5adbbc5954c58d01d70726230857379adffed511
|
bump the timeout for instance creation (#3468)
|
spanner/cloud-client/backup_sample_test.py
|
spanner/cloud-client/backup_sample_test.py
|
# Copyright 2020 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.cloud import spanner
import pytest
import random
import string
import backup_sample
def unique_instance_id():
""" Creates a unique id for the database. """
return 'test-instance-{}'.format(''.join(random.choice(
string.ascii_lowercase + string.digits) for _ in range(5)))
def unique_database_id():
""" Creates a unique id for the database. """
return 'test-db-{}'.format(''.join(random.choice(
string.ascii_lowercase + string.digits) for _ in range(5)))
def unique_backup_id():
""" Creates a unique id for the backup. """
return 'test-backup-{}'.format(''.join(random.choice(
string.ascii_lowercase + string.digits) for _ in range(5)))
INSTANCE_ID = unique_instance_id()
DATABASE_ID = unique_database_id()
RESTORE_DB_ID = unique_database_id()
BACKUP_ID = unique_backup_id()
@pytest.fixture(scope='module')
def spanner_instance():
spanner_client = spanner.Client()
instance_config = '{}/instanceConfigs/{}'.format(
spanner_client.project_name, 'regional-us-central1')
instance = spanner_client.instance(INSTANCE_ID, instance_config)
op = instance.create()
op.result(30) # block until completion
yield instance
instance.delete()
@pytest.fixture(scope='module')
def database(spanner_instance):
""" Creates a temporary database that is removed after testing. """
db = spanner_instance.database(DATABASE_ID)
db.create()
yield db
db.drop()
def test_create_backup(capsys, database):
backup_sample.create_backup(INSTANCE_ID, DATABASE_ID, BACKUP_ID)
out, _ = capsys.readouterr()
assert BACKUP_ID in out
def test_restore_database(capsys):
backup_sample.restore_database(INSTANCE_ID, RESTORE_DB_ID, BACKUP_ID)
out, _ = capsys.readouterr()
assert (DATABASE_ID + " restored to ") in out
assert (RESTORE_DB_ID + " from backup ") in out
assert BACKUP_ID in out
def test_list_backup_operations(capsys, spanner_instance):
backup_sample.list_backup_operations(INSTANCE_ID, DATABASE_ID)
out, _ = capsys.readouterr()
assert BACKUP_ID in out
assert DATABASE_ID in out
def test_list_backups(capsys, spanner_instance):
backup_sample.list_backups(INSTANCE_ID, DATABASE_ID, BACKUP_ID)
out, _ = capsys.readouterr()
id_count = out.count(BACKUP_ID)
assert id_count == 7
def test_update_backup(capsys):
backup_sample.update_backup(INSTANCE_ID, BACKUP_ID)
out, _ = capsys.readouterr()
assert BACKUP_ID in out
def test_delete_backup(capsys, spanner_instance):
backup_sample.delete_backup(INSTANCE_ID, BACKUP_ID)
out, _ = capsys.readouterr()
assert BACKUP_ID in out
def test_cancel_backup(capsys):
backup_sample.cancel_backup(INSTANCE_ID, DATABASE_ID, BACKUP_ID)
out, _ = capsys.readouterr()
cancel_success = "Backup creation was successfully cancelled." in out
cancel_failure = (
("Backup was created before the cancel completed." in out) and
("Backup deleted." in out)
)
assert cancel_success or cancel_failure
|
Python
| 0
|
@@ -1758,9 +1758,10 @@
ult(
-3
+12
0)
|
c8fdcf888f6c34e8396f11b3e7ab3088af59abb6
|
Add tests for slice intersection and sanitization.
|
distarray/tests/test_utils.py
|
distarray/tests/test_utils.py
|
import unittest
from distarray import utils
class TestMultPartitions(unittest.TestCase):
"""
Test the multiplicative parition code.
"""
def test_both_methods(self):
"""
Do the two methods of computing the multiplicative partitions agree?
"""
for s in [2, 3]:
for n in range(2, 512):
self.assertEqual(utils.mult_partitions(n, s),
utils.create_factors(n, s))
if __name__ == '__main__':
unittest.main(verbosity=2)
|
Python
| 0
|
@@ -38,16 +38,87 @@
utils%0A%0A
+from numpy import arange%0Afrom numpy.testing import assert_array_equal%0A%0A
%0Aclass T
@@ -535,16 +535,1202 @@
, s))%0A%0A%0A
+class TestSanitizeIndices(unittest.TestCase):%0A%0A def test_point(self):%0A itype, inds = utils.sanitize_indices(1)%0A self.assertEqual(itype, 'point')%0A self.assertEqual(inds, (1,))%0A%0A def test_slice(self):%0A itype, inds = utils.sanitize_indices(slice(1,10))%0A self.assertEqual(itype, 'view')%0A self.assertEqual(inds, (slice(1,10),))%0A%0A def test_mixed(self):%0A provided = (5, 3, slice(7, 10, 2), 99, slice(1,10))%0A itype, inds = utils.sanitize_indices(provided)%0A self.assertEqual(itype, 'view')%0A self.assertEqual(inds, provided)%0A%0A%0Aclass TestSliceIntersection(unittest.TestCase):%0A%0A def test_containment(self):%0A arr = arange(20)%0A slc = utils.slice_intersection(slice(1,10), slice(2, 4))%0A assert_array_equal(arr%5Bslc%5D, arr%5Bslice(2, 4, 1)%5D)%0A%0A def test_overlapping(self):%0A arr = arange(20)%0A slc = utils.slice_intersection(slice(1,10), slice(4, 15))%0A assert_array_equal(arr%5Bslc%5D, arr%5Bslice(4, 10)%5D)%0A%0A def test_disjoint(self):%0A arr = arange(20)%0A slc = utils.slice_intersection(slice(1,10), slice(11, 15))%0A assert_array_equal(arr%5Bslc%5D, arr%5Bslice(11, 10)%5D)%0A%0A%0A
if __nam
|
e0c1d52cd66914db237bac5ebcc7b86fa0d4a54d
|
Fix consultant serializer
|
radar/api/serializers/consultants.py
|
radar/api/serializers/consultants.py
|
from cornflake.sqlalchemy_orm import ModelSerializer, ReferenceField
from cornflake import fields
from cornflake import serializers
from cornflake.validators import not_empty, upper, max_length, none_if_blank, optional, lower, email_address
from cornflake.exceptions import ValidationError
from radar.api.serializers.common import GroupField, MetaMixin, PatientMixin
from radar.api.serializers.validators import gmc_number
from radar.database import db
from radar.models.consultants import Consultant, GroupConsultant, Specialty
from radar.models.groups import GROUP_TYPE
from radar.models.patient_consultants import PatientConsultant
class SpecialtySerializer(ModelSerializer):
class Meta(object):
model_class = Specialty
class SpeciailtyField(ReferenceField):
model_class = Specialty
serializer_class = SpecialtySerializer
class ChildGroupConsultantSerializer(MetaMixin, ModelSerializer):
group = GroupField()
class Meta(object):
model_class = GroupConsultant
exclude = ['id', 'consultant_id', 'group_id']
def validate_group(self, group):
if group.type != GROUP_TYPE.HOSPITAL:
raise ValidationError('Must be a hospital.')
return group
class GroupConsultantListSerializer(serializers.ListSerializer):
child = ChildGroupConsultantSerializer()
def validate(self, group_consultants):
groups = set()
for i, group_consultant in enumerate(group_consultants):
group = group_consultant['group']
if group in groups:
raise ValidationError({i: {'group': 'Consultant already in group.'}})
else:
groups.add(group)
return group_consultants
# TODO check GMC number not duplicated
class ConsultantSerializer(ModelSerializer):
first_name = fields.StringField(validators=[not_empty(), upper(), max_length(100)])
last_name = fields.StringField(validators=[not_empty(), upper(), max_length(100)])
email = fields.StringField(required=False, validators=[none_if_blank(), optional(), lower(), email_address()])
telephone_number = fields.StringField(required=False, validators=[none_if_blank(), optional(), max_length(100)])
gmc_number = fields.StringField(required=False, validators=[gmc_number()])
groups = GroupConsultantListSerializer(source='group_consultants')
specialty = SpeciailtyField()
class Meta(object):
model_class = Consultant
def _save(self, instance, data):
instance.first_name = data['first_name']
instance.last_name = data['last_name']
instance.email = data['email']
instance.telephone_number = data['telephone_number']
instance.gmc_number = data['gmc_number']
instance.specialty = data['specialty']
instance.group_consultants = self.fields['groups'].create(data['group_consultants'])
def create(self, data):
instance = Consultant()
self._save(instance, data)
return instance
def update(self, instance, data):
# Unique constraint fails unless we flush the deletes before the inserts
instance.group_consultants = []
db.session.flush()
self._save(instance, data)
return instance
class ChildConsultantSerializer(MetaMixin, ModelSerializer):
specialty = SpeciailtyField()
class Meta(object):
model_class = Consultant
exclude = ['specialty_id']
class ConsultantField(ReferenceField):
model_class = Consultant
serializer_class = ChildConsultantSerializer
class GroupConsultantSerializer(MetaMixin, ModelSerializer):
group = GroupField()
consultant = ConsultantField()
class Meta(object):
model_class = GroupConsultant
exclude = ['group_id', 'consultant_id']
class PatientConsultantSerializer(PatientMixin, MetaMixin, ModelSerializer):
from_date = fields.DateField()
to_date = fields.DateField(required=False)
consultant = ConsultantField()
class Meta(object):
model_class = PatientConsultant
exclude = ['consultant_id']
def validate(self, data):
data = super(PatientConsultantSerializer, self).validate(data)
if data['to_date'] is not None and data['to_date'] < data['from_date']:
raise ValidationError({'to_date': 'Must be on or after from date.'})
return data
|
Python
| 0.000001
|
@@ -1772,32 +1772,43 @@
ltantSerializer(
+MetaMixin,
ModelSerializer)
@@ -2457,16 +2457,51 @@
nsultant
+%0A exclude = %5B'specialty_id'%5D
%0A%0A de
|
f9395cb44587248178afd493e78ccf63b6ed7159
|
Add unittest test_obj_make_compatible() for dp objects
|
cyborg/tests/unit/objects/test_device_profile.py
|
cyborg/tests/unit/objects/test_device_profile.py
|
# Copyright 2019 Intel, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from cyborg import objects
from cyborg.tests.unit.db import base
from cyborg.tests.unit.db import utils
from cyborg.tests.unit import fake_device_profile
class TestDeviceProfileObject(base.DbTestCase):
def setUp(self):
super(TestDeviceProfileObject, self).setUp()
self.fake_device_profile = utils.get_test_device_profile()
def test_get_by_name(self):
name = self.fake_device_profile['name']
with mock.patch.object(self.dbapi, 'device_profile_get',
autospec=True) as mock_db_devprof_get:
mock_db_devprof_get.return_value = self.fake_device_profile
obj_devprof = objects.DeviceProfile.get_by_name(self.context, name)
mock_db_devprof_get.assert_called_once_with(self.context, name)
self.assertEqual(self.context, obj_devprof._context)
self.assertEqual(name, obj_devprof.name)
self.assertIn('description', obj_devprof)
def test_get_by_uuid(self):
uuid = self.fake_device_profile['uuid']
with mock.patch.object(self.dbapi, 'device_profile_get_by_uuid',
autospec=True) as mock_db_devprof_get:
mock_db_devprof_get.return_value = self.fake_device_profile
obj_devprof = objects.DeviceProfile.get_by_uuid(self.context, uuid)
mock_db_devprof_get.assert_called_once_with(self.context, uuid)
self.assertEqual(self.context, obj_devprof._context)
self.assertEqual(uuid, obj_devprof.uuid)
self.assertIn('description', obj_devprof)
def test_list(self):
with mock.patch.object(self.dbapi, 'device_profile_list',
autospec=True) as mock_db_devprof_list:
mock_db_devprof_list.return_value = [self.fake_device_profile]
obj_devprofs = objects.DeviceProfile.list(self.context)
self.assertEqual(1, mock_db_devprof_list.call_count)
self.assertEqual(1, len(obj_devprofs))
self.assertIsInstance(obj_devprofs[0], objects.DeviceProfile)
self.assertEqual(self.context, obj_devprofs[0]._context)
self.assertEqual(self.fake_device_profile['name'],
obj_devprofs[0].name)
self.assertEqual(self.fake_device_profile['description'],
obj_devprofs[0].description)
def test_create(self):
api_devprofs = fake_device_profile.get_api_devprofs()
api_devprof = api_devprofs[0]
db_devprofs = fake_device_profile.get_db_devprofs()
db_devprof = db_devprofs[0]
with mock.patch.object(self.dbapi, 'device_profile_create',
autospec=True) as mock_db_devprof_create:
mock_db_devprof_create.return_value = self.fake_device_profile
obj_devprof = objects.DeviceProfile(**api_devprof)
obj_devprof.create(self.context)
mock_db_devprof_create.assert_called_once_with(
self.context, db_devprof)
def test_destroy(self):
uuid = self.fake_device_profile['uuid']
with mock.patch.object(self.dbapi, 'device_profile_get_by_uuid',
autospec=True) as mock_dp_get:
mock_dp_get.return_value = self.fake_device_profile
with mock.patch.object(self.dbapi, 'device_profile_delete',
autospec=True) as m_dp_delete:
m_dp_delete.return_value = None
obj_devprof = objects.DeviceProfile.get_by_uuid(
self.context, uuid)
obj_devprof.destroy(self.context)
m_dp_delete.assert_called_once_with(self.context, uuid)
self.assertEqual(self.context, obj_devprof._context)
def test_update(self):
fake_db_devprofs = fake_device_profile.get_db_devprofs()
fake_obj_devprofs = fake_device_profile.get_obj_devprofs()
db_devprof = fake_db_devprofs[0]
db_devprof['created_at'] = None
db_devprof['updated_at'] = None
with mock.patch.object(self.dbapi, 'device_profile_get_by_uuid',
autospec=True) as mock_dp_get:
mock_dp_get.return_value = db_devprof
uuid = fake_db_devprofs[0]['uuid']
# Start with db_devprofs[0], corr. to fake_obj_devprofs[0]
obj_devprof = objects.DeviceProfile.get_by_uuid(self.context, uuid)
# Change contents to fake_obj_devprofs[1] except uuid
obj_devprof = fake_obj_devprofs[1]
obj_devprof['uuid'] = uuid
with mock.patch.object(self.dbapi, 'device_profile_update',
autospec=True) as mock_dp_update:
mock_dp_update.return_value = db_devprof
obj_devprof.save(self.context)
mock_dp_get.assert_called_once_with(self.context, uuid)
mock_dp_update.assert_called_once()
|
Python
| 0.000009
|
@@ -5606,12 +5606,509 @@
lled_once()%0A
+%0A def test_obj_make_compatible(self):%0A dp_obj = objects.DeviceProfile(description=%22fake description%22)%0A primitive = dp_obj.obj_to_primitive()%0A dp_obj.obj_make_compatible(primitive%5B'cyborg_object.data'%5D, '1.0')%0A self.assertNotIn('description', primitive%5B'cyborg_object.data'%5D)%0A primitive = dp_obj.obj_to_primitive()%0A dp_obj.obj_make_compatible(primitive%5B'cyborg_object.data'%5D, '1.1')%0A self.assertIn('description', primitive%5B'cyborg_object.data'%5D)%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.