commit stringlengths 40 40 | subject stringlengths 1 1.49k | old_file stringlengths 4 311 | new_file stringlengths 4 311 | new_contents stringlengths 1 29.8k | old_contents stringlengths 0 9.9k | lang stringclasses 3 values | proba float64 0 1 |
|---|---|---|---|---|---|---|---|
b5146035b7f4ae641a53bb956e9afee62c50c347 | Change cache directory for vendor LST | kotori/vendor/lst/h2m/util.py | kotori/vendor/lst/h2m/util.py | # -*- coding: utf-8 -*-
# (c) 2015 Andreas Motl, Elmyra UG <andreas.motl@elmyra.de>
import os
from appdirs import user_cache_dir
from kotori.daq.intercom.c import LibraryAdapter, StructRegistryByID
#from kotori.daq.intercom.cffi_adapter import LibraryAdapterCFFI
def setup_h2m_structs_pyclibrary():
cache_dir = os.path.join(user_cache_dir('kotori'), 'lst')
if not os.path.isdir(cache_dir): os.makedirs(cache_dir)
lib_dir = os.path.join(os.path.dirname(__file__), 'cpp')
library = LibraryAdapter(u'h2m_structs.h', u'h2m_structs.so', include_path=lib_dir, library_path=lib_dir, cache_path=cache_dir)
struct_registry = StructRegistryByID(library)
return struct_registry
def setup_h2m_structs_cffi():
cache_dir = os.path.join(user_cache_dir('kotori'), 'lst')
if not os.path.isdir(cache_dir): os.makedirs(cache_dir)
lib_dir = os.path.join(os.path.dirname(__file__), 'cpp')
library = LibraryAdapterCFFI(u'h2m_structs.h', u'h2m_structs.so', include_path=lib_dir, library_path=lib_dir, cache_path=cache_dir)
struct_registry = StructRegistryByID(library)
return struct_registry
setup_h2m_structs = setup_h2m_structs_pyclibrary
| # -*- coding: utf-8 -*-
# (c) 2015 Andreas Motl, Elmyra UG <andreas.motl@elmyra.de>
import os
from appdirs import user_cache_dir
from kotori.daq.intercom.c import LibraryAdapter, StructRegistryByID
#from kotori.daq.intercom.cffi_adapter import LibraryAdapterCFFI
def setup_h2m_structs_pyclibrary():
cache_dir = user_cache_dir('lst', 'elmyra')
if not os.path.isdir(cache_dir): os.makedirs(cache_dir)
lib_dir = os.path.join(os.path.dirname(__file__), 'cpp')
library = LibraryAdapter(u'h2m_structs.h', u'h2m_structs.so', include_path=lib_dir, library_path=lib_dir, cache_path=cache_dir)
struct_registry = StructRegistryByID(library)
return struct_registry
def setup_h2m_structs_cffi():
cache_dir = user_cache_dir('lst', 'elmyra')
if not os.path.isdir(cache_dir): os.makedirs(cache_dir)
lib_dir = os.path.join(os.path.dirname(__file__), 'cpp')
library = LibraryAdapterCFFI(u'h2m_structs.h', u'h2m_structs.so', include_path=lib_dir, library_path=lib_dir, cache_path=cache_dir)
struct_registry = StructRegistryByID(library)
return struct_registry
setup_h2m_structs = setup_h2m_structs_pyclibrary
| Python | 0 |
e6b11c0c110d0457cc31d7d798a2b35e19a0f56e | fix wrong parser | slackn/cli.py | slackn/cli.py | import sys
import logging
from argparse import ArgumentParser
from slackn.core import Queue, Notifier
from slackn.version import version
log = logging.getLogger('slackn')
def get_queue(s):
if ':' in s:
host,port = s.split(':')
else:
host,port = (s, 6379)
return Queue(host,port)
def process():
parser = ArgumentParser(description='slackn_process v%s' % version)
parser.add_argument('--slack-channel',
help='channel to send notifications')
parser.add_argument('--slack-token',
help='channel to send notifications')
parser.add_argument('--redis',
default='127.0.0.1:6379',
help='redis host:port to connect to')
args = parser.parse_args()
queue = get_queue(args.redis)
notifier = Notifier(args.slack_token, args.slack_channel)
for hostname,msgs in queue.dump().items():
notifier.add_host(hostname, msgs)
queue.increment('sent', len(msgs))
notifier.send()
def notify():
common_parser = ArgumentParser(add_help=False)
common_parser.add_argument('--redis',
help='redis host to connect to (127.0.0.1:6379)',
default='127.0.0.1:6379')
parser = ArgumentParser(description='slackn-notify %s' % version,
parents=[common_parser])
subparsers = parser.add_subparsers(description='notification type',
dest='subcommand')
parser_host = subparsers.add_parser('host')
parser_host.add_argument('hostname')
parser_host.add_argument('hoststate')
parser_host.add_argument('hostoutput')
parser_host.add_argument('nagiostype')
parser_service = subparsers.add_parser('service')
parser_service.add_argument('hostname')
parser_service.add_argument('servicedesc')
parser_service.add_argument('servicestate')
parser_service.add_argument('serviceoutput')
parser_service.add_argument('nagiostype')
args = parser.parse_args()
if not args.subcommand:
print('no notification type provided')
sys.exit(1)
queue = get_queue(args.redis)
notify_args = { k:v for k,v in args.__dict__.items() }
for k in ('redis','subcommand'):
del notify_args[k]
notify_args['type'] = args.subcommand
queue.submit(notify_args)
| import sys
import logging
from argparse import ArgumentParser
from slackn.core import Queue, Notifier
from slackn.version import version
log = logging.getLogger('slackn')
def get_queue(s):
if ':' in s:
host,port = s.split(':')
else:
host,port = (s, 6379)
return Queue(host,port)
def process():
parser = ArgumentParser(description='slackn_process v%s' % version)
parser.add_argument('--slack-channel',
help='channel to send notifications')
parser.add_argument('--slack-token',
help='channel to send notifications')
parser.add_argument('--redis',
default='127.0.0.1:6379',
help='redis host:port to connect to')
args = parser.parse_args()
queue = get_queue(args.redis)
notifier = Notifier(args.slack_token, args.slack_channel)
for hostname,msgs in queue.dump().items():
notifier.add_host(hostname, msgs)
queue.increment('sent', len(msgs))
notifier.send()
def notify():
common_parser = ArgumentParser(add_help=False)
common_parser.add_argument('--redis',
help='redis host to connect to (127.0.0.1:6379)',
default='127.0.0.1:6379')
parser = ArgumentParser(description='slackn-notify %s' % version,
parents=[common_parser])
subparsers = parser.add_subparsers(description='notification type',
dest='subcommand')
parser_host = subparsers.add_parser('host')
parser_host.add_argument('hostname')
parser_host.add_argument('hoststate')
parser_host.add_argument('hostoutput')
parser_host.add_argument('nagiostype')
parser_service = subparsers.add_parser('service')
parser_service.add_argument('hostname')
parser_service.add_argument('servicedesc')
parser_service.add_argument('servicestate')
parser_service.add_argument('serviceoutput')
parser_host.add_argument('nagiostype')
args = parser.parse_args()
if not args.subcommand:
print('no notification type provided')
sys.exit(1)
queue = get_queue(args.redis)
notify_args = { k:v for k,v in args.__dict__.items() }
for k in ('redis','subcommand'):
del notify_args[k]
notify_args['type'] = args.subcommand
queue.submit(notify_args)
| Python | 0.998471 |
463d044cfa70de6bde04c380c459274acb71a1b6 | add database | hello.py | hello.py | from flask import Flask, render_template, session, redirect, url_for, flash
from flask.ext.script import Manager
from flask.ext.bootstrap import Bootstrap
from flask.ext.moment import Moment
from flask.ext.wtf import Form
from wtforms import StringField, SubmitField
from wtforms.validators import Required
from flask.ext.sqlalchemy import SQLAlchemy
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
app.config['SECRET_KEY'] = 'hard to guess string'
app.config['SQLALCHEMY_DATABASE_URI'] = \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
db = SQLAlchemy(app)
manager = Manager(app)
bootstrap = Bootstrap(app)
moment = Moment(app)
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key = True)
name = db.Column(db.String(64), unique = True)
def __repr__(self):
return '<Role %r>' % self.name
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key = True)
username = db.Column(db.String(64), unique = True, index = True)
def __repr__(self):
return '<User %r>' % self.username
class NameForm(Form):
name = StringField('What\'s your name?', validators = [Required()])
submit = SubmitField('Submit')
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
@app.route('/', methods = ['GET', 'POST'])
def index():
form = NameForm()
if form.validate_on_submit():
old_name = session.get('name')
if old_name is not None and old_name != form.name.data:
flash('Looks like you have changed your name!')
session['name'] = form.name.data
form.name.data = ''
return redirect(url_for('index'))
return render_template('index.html', form=form, name=session.get('name'))
if __name__ == '__main__':
manager.run() | from flask import Flask, render_template, session, redirect, url_for, flash
from flask.ext.script import Manager
from flask.ext.bootstrap import Bootstrap
from flask.ext.moment import Moment
from flask.ext.wtf import Form
from wtforms import StringField, SubmitField
from wtforms.validators import Required
app = Flask(__name__)
app.config['SECRET_KEY'] = 'hard to guess string'
manager = Manager(app)
bootstrap = Bootstrap(app)
moment = Moment(app)
class NameForm(Form):
name = StringField('What\'s your name?', validators = [Required()])
submit = SubmitField('Submit')
@app.errorhandler(404)
def page_not_found(e):
return render_template('404.html'), 404
@app.errorhandler(500)
def internal_server_error(e):
return render_template('500.html'), 500
@app.route('/', methods = ['GET', 'POST'])
def index():
form = NameForm()
if form.validate_on_submit():
old_name = session.get('name')
if old_name is not None and old_name != form.name.data:
flash('Looks like you have changed your name!')
session['name'] = form.name.data
form.name.data = ''
return redirect(url_for('index'))
return render_template('index.html', form=form, name=session.get('name'))
if __name__ == '__main__':
manager.run() | Python | 0.000001 |
b07ada0833c1d8319e946e7444b2f2c1337c15d1 | Add some debug | hello.py | hello.py | from flask import Flask, render_template, jsonify, request
import pandas as pd
import networkx as nx
import pygraphviz as pgv
import json
import tempfile
import numpy as np
import brewer2mpl
from StringIO import StringIO
app = Flask(__name__)
app.config['DEBUG'] = True
@app.route('/')
def hello():
return render_template('index.html', name="Julia")
@app.route('/graph', methods=["POST"])
def get_image():
import os
print os.getcwd()
history = StringIO(request.form["history"])
pair_counts, node_totals = get_statistics(history)
G = create_graph(pair_counts[:20], node_totals)
response = {'graph': dot_draw(G, tmp_dir="./tmp")}
return jsonify(response)
def dot_draw(G, prog="circo", tmp_dir="/tmp"):
# Hackiest code :)
tmp_dot = tempfile.mktemp(dir=tmp_dir, suffix=".dot")
tmp_image = tempfile.mktemp(dir=tmp_dir, suffix=".png")
nx.write_dot(G, tmp_dot)
dot_graph = pgv.AGraph(tmp_dot)
dot_graph.draw(tmp_image, prog=prog)
with open(tmp_image) as f:
data = f.read()
return data.encode("base64")
def getwidth(node, node_totals):
count = np.sqrt(node_totals[node])
count /= float(sum(np.sqrt(node_totals)))
count *= 20
count = max(count, 0.3)
count = min(count, 4)
return count
def get_colors(nodes):
n_colors = 8
colors = {}
set2 = brewer2mpl.get_map('Dark2', 'qualitative', n_colors).hex_colors
for i, node in enumerate(nodes):
colors[node] = set2[i % n_colors]
return colors
def create_graph(pair_counts, node_totals):
G = nx.DiGraph()
node_colors = get_colors(list(node_totals.index))
for (frm, to), count in pair_counts.iterrows():
G.add_edge(frm, to, penwidth=float(count) / 8, color=node_colors[frm])
for node in G.nodes():
G.node[node]['width'] = getwidth(node, node_totals)
G.node[node]['height'] = G.node[node]['width']
G.node[node]['color'] = node_colors[node]
G.node[node]['label'] = "%s (%d%%)" % (node, int(node_totals[node] / float(sum(node_totals)) * 100) )
return G
def get_statistics(text):
df = pd.read_csv(text, sep=' ', header=None, names=["row", "command"], index_col="row")
pairs = pd.DataFrame(index=range(len(df) - 1))
pairs['dist'] = df.index[1:].values - df.index[:-1].values
pairs['from'] = df['command'][:-1].values
pairs['to'] = df['command'][1:].values
node_totals = df['command'].value_counts()
close_pairs = pairs[pairs.dist == 1]
pair_counts = close_pairs.groupby(['from', 'to']).aggregate(len).rename(columns= {'dist': 'count'})
pair_counts = pair_counts.sort('count', ascending=False)
return pair_counts, node_totals
if __name__ == "__main__":
app.run(port=5001)
| from flask import Flask, render_template, jsonify, request
import pandas as pd
import networkx as nx
import pygraphviz as pgv
import json
import tempfile
import numpy as np
import brewer2mpl
from StringIO import StringIO
app = Flask(__name__)
app.config['DEBUG'] = True
@app.route('/')
def hello():
return render_template('index.html', name="Julia")
@app.route('/graph', methods=["POST"])
def get_image():
history = StringIO(request.form["history"])
pair_counts, node_totals = get_statistics(history)
G = create_graph(pair_counts[:20], node_totals)
response = {'graph': dot_draw(G, tmp_dir="./tmp")}
return jsonify(response)
def dot_draw(G, prog="circo", tmp_dir="/tmp"):
# Hackiest code :)
tmp_dot = tempfile.mktemp(dir=tmp_dir, suffix=".dot")
tmp_image = tempfile.mktemp(dir=tmp_dir, suffix=".png")
nx.write_dot(G, tmp_dot)
dot_graph = pgv.AGraph(tmp_dot)
dot_graph.draw(tmp_image, prog=prog)
with open(tmp_image) as f:
data = f.read()
return data.encode("base64")
def getwidth(node, node_totals):
count = np.sqrt(node_totals[node])
count /= float(sum(np.sqrt(node_totals)))
count *= 20
count = max(count, 0.3)
count = min(count, 4)
return count
def get_colors(nodes):
n_colors = 8
colors = {}
set2 = brewer2mpl.get_map('Dark2', 'qualitative', n_colors).hex_colors
for i, node in enumerate(nodes):
colors[node] = set2[i % n_colors]
return colors
def create_graph(pair_counts, node_totals):
G = nx.DiGraph()
node_colors = get_colors(list(node_totals.index))
for (frm, to), count in pair_counts.iterrows():
G.add_edge(frm, to, penwidth=float(count) / 8, color=node_colors[frm])
for node in G.nodes():
G.node[node]['width'] = getwidth(node, node_totals)
G.node[node]['height'] = G.node[node]['width']
G.node[node]['color'] = node_colors[node]
G.node[node]['label'] = "%s (%d%%)" % (node, int(node_totals[node] / float(sum(node_totals)) * 100) )
return G
def get_statistics(text):
df = pd.read_csv(text, sep=' ', header=None, names=["row", "command"], index_col="row")
pairs = pd.DataFrame(index=range(len(df) - 1))
pairs['dist'] = df.index[1:].values - df.index[:-1].values
pairs['from'] = df['command'][:-1].values
pairs['to'] = df['command'][1:].values
node_totals = df['command'].value_counts()
close_pairs = pairs[pairs.dist == 1]
pair_counts = close_pairs.groupby(['from', 'to']).aggregate(len).rename(columns= {'dist': 'count'})
pair_counts = pair_counts.sort('count', ascending=False)
return pair_counts, node_totals
if __name__ == "__main__":
app.run(port=5001)
| Python | 0.000009 |
9de3dacc7c687bc5e4d11a5a334f5ef5cc4d2f37 | Fix call to genome mapping code | rnacentral_pipeline/cli/genome_mapping.py | rnacentral_pipeline/cli/genome_mapping.py | # -*- coding: utf-8 -*-
"""
Copyright [2009-2018] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import click
from rnacentral_pipeline.rnacentral.genome_mapping import urls
from rnacentral_pipeline.rnacentral.genome_mapping import blat
@click.group('genome-mapping')
def cli():
"""
This group of commands deals with figuring out what data to map as well as
parsing the result into a format for loading.
"""
pass
@cli.command('select-hits')
@click.argument('assembly_id')
@click.argument('hits', default='-', type=click.File('r'))
@click.argument('output', default='-', type=click.File('w'))
def select_hits(assembly_id, hits, output):
blat.write_selected(assembly_id, hits, output)
@cli.command('url-for')
@click.option('--host', default='ensembl')
@click.argument('species')
@click.argument('assembly_id')
@click.argument('output', default='-', type=click.File('w'))
def find_remote_url(species, assembly_id, output, host=None):
url = urls.url_for(species, assembly_id, host=host)
output.write(url)
@cli.command('urls-for')
@click.argument('filename', default='-', type=click.File('r'))
@click.argument('output', default='-', type=click.File('w'))
def find_remote_urls(filename, output):
urls.write_urls_for(filename, output)
| # -*- coding: utf-8 -*-
"""
Copyright [2009-2018] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import click
from rnacentral_pipeline.rnacentral import genome_mapping
@click.group('genome-mapping')
def cli():
"""
This group of commands deals with figuring out what data to map as well as
parsing the result into a format for loading.
"""
pass
@cli.command('select-hits')
@click.argument('assembly_id')
@click.argument('hits', default='-', type=click.File('r'))
@click.argument('output', default='-', type=click.File('w'))
def select_hits(assembly_id, hits, output):
genome_mapping.blat.write_selected(assembly_id, hits, output)
@cli.command('url-for')
@click.option('--host', default='ensembl')
@click.argument('species')
@click.argument('assembly_id')
@click.argument('output', default='-', type=click.File('w'))
def find_remote_url(species, assembly_id, output, host=None):
url = genome_mapping.urls.url_for(species, assembly_id, host=host)
output.write(url)
@cli.command('urls-for')
@click.argument('filename', default='-', type=click.File('r'))
@click.argument('output', default='-', type=click.File('w'))
def find_remote_urls(filename, output):
genome_mapping.urls.write_urls_for(filename, output)
| Python | 0.000002 |
6424edf4186236443ba4ec5a1b2ffcc26de7c695 | add classifications | fl/__init__.py | fl/__init__.py | # encoding=utf-8
from pupa.scrape import Jurisdiction, Organization
from .votes import FlVoteScraper
from .bills import FlBillScraper
from .people import FlPersonScraper
class Florida(Jurisdiction):
division_id = "ocd-division/country:us/state:fl"
classification = "government"
name = "Florida"
url = "http://myflorida.com"
scrapers = {
# "votes": FlVoteScraper,
"bills": FlBillScraper,
"people": FlPersonScraper,
}
parties = [{'name': 'Republican'},
{'name': 'Democratic'}]
legislative_sessions = [
{'name': '2011 Regular Session', 'identifier': '2011', 'classification': 'primary'},
{'name': '2012 Regular Session', 'identifier': '2012', 'classification': 'primary'},
{'name': '2012 Extraordinary Apportionment Session', 'identifier': '2012B', 'classification': 'special'},
{'name': '2013 Regular Session', 'identifier': '2013', 'classification': 'primary'},
{'name': '2014 Regular Session', 'identifier': '2014', 'classification': 'primary'},
{'name': '2014 Special Session A', 'identifier': '2014A', 'classification': 'special'},
{'name': '2015 Regular Session', 'identifier': '2015', 'classification': 'primary'},
{'name': '2015 Special Session A', 'identifier': '2015A', 'classification': 'special'},
{'name': '2015 Special Session B', 'identifier': '2015B', 'classification': 'special'},
{'name': '2016 Regular Session', 'identifier': '2016', 'classification': 'primary'},
]
def get_organizations(self):
legis = Organization(name="Florida Legislature", classification="legislature")
upper = Organization('Florida Senate', classification='upper', parent_id=legis._id)
lower = Organization('Florida House', classification='lower', parent_id=legis._id)
for n in range(1, 41):
upper.add_post(label=str(n), role='Senator')
for n in range(1, 121):
lower.add_post(label=str(n), role='Representative')
yield legis
yield upper
yield lower
| # encoding=utf-8
from pupa.scrape import Jurisdiction, Organization
from .votes import FlVoteScraper
from .bills import FlBillScraper
from .people import FlPersonScraper
class Florida(Jurisdiction):
division_id = "ocd-division/country:us/state:fl"
classification = "government"
name = "Florida"
url = "http://myflorida.com"
scrapers = {
# "votes": FlVoteScraper,
"bills": FlBillScraper,
"people": FlPersonScraper,
}
parties = [{'name': 'Republican'},
{'name': 'Democratic'},
{'name': 'Independent'}]
legislative_sessions = [
{'name': '2011 Regular Session', 'identifier': '2011', },
{'name': '2012 Regular Session', 'identifier': '2012', },
{'name': '2012 Extraordinary Apportionment Session', 'identifier': '2012B', },
{'name': '2013 Regular Session', 'identifier': '2013', },
{'name': '2014 Regular Session', 'identifier': '2014', },
{'name': '2014 Special Session A', 'identifier': '2014A', },
{'name': '2015 Regular Session', 'identifier': '2015', },
{'name': '2015 Special Session A', 'identifier': '2015A', },
{'name': '2015 Special Session B', 'identifier': '2015B', },
{'name': '2016 Regular Session', 'identifier': '2016', },
]
def get_organizations(self):
legis = Organization(name="Florida Legislature", classification="legislature")
upper = Organization('Florida Senate', classification='upper', parent_id=legis._id)
lower = Organization('Florida House', classification='lower', parent_id=legis._id)
for n in range(1, 41):
upper.add_post(label=str(n), role='Senator')
for n in range(1, 121):
lower.add_post(label=str(n), role='Representative')
yield legis
yield upper
yield lower
| Python | 0.000317 |
d915b43cea8e4ebb5792daaa4d537d4986fc3d0f | document for command decorator | flask_slack.py | flask_slack.py | """
flask_slack
~~~~~~~~~~~~~~~
Slack extension for Flask.
:copyright: (c) 2014 by VeryCB.
:license: BSD, see LICENSE for more details.
"""
from six import string_types
__all__ = ('Slack',)
__version__ = '0.1.2'
__author__ = 'VeryCB <imcaibin@gmail.com>'
class Slack(object):
def __init__(self, app=None):
self._commands = {}
self.team_id = None
if app:
self.init_app(app)
def init_app(self, app=None):
"""Initialize application configuration"""
config = getattr(app, 'config', app)
self.team_id = config.get('TEAM_ID')
def command(self, command, token, team_id=None, methods=['GET'], **kwargs):
"""A decorator used to register a command.
:param command: the command to register
:param token: your command token provided by slack
:param team_id: optional. your team_id provided by slack.
You can also specify the "TEAM_ID" in app
configuration file for one-team project
:param methods: optional. HTTP methods which are accepted to
execute the command
:param kwargs: optional. the optional arguments which will be passed
to your register method
"""
if team_id is None:
team_id = self.team_id
if team_id is None:
raise RuntimeError('TEAM_ID is not found in your configuration!')
def deco(func):
self._commands[(team_id, command)] = (func, token, methods, kwargs)
return func
return deco
def dispatch(self):
from flask import request
method = request.method
data = request.args
if method == 'POST':
data = request.form
token = data.get('token')
team_id = data.get('team_id')
command = data.get('command')
if isinstance(command, string_types):
command = command.strip().lstrip('/')
try:
self.validate(command, token, team_id, method)
except SlackError as e:
return self.response(e.msg)
func, _, _, kwargs = self._commands[(team_id, command)]
kwargs.update(data.to_dict())
return func(**kwargs)
dispatch.methods = ['GET', 'POST']
def validate(self, command, token, team_id, method):
if (team_id, command) not in self._commands:
raise SlackError('Command {0} is not found in team {1}'.format(
command, team_id))
func, _token, methods, kwargs = self._commands[(team_id, command)]
if method not in methods:
raise SlackError('{} request is not allowed'.format(method))
if token != _token:
raise SlackError('Your token {} is invalid'.format(token))
def response(self, text):
from flask import Response
return Response(text, content_type='text/plain; charset=utf-8')
class SlackError(Exception):
def __init__(self, msg):
self.msg = msg
| """
flask_slack
~~~~~~~~~~~~~~~
Slack extension for Flask.
:copyright: (c) 2014 by VeryCB.
:license: BSD, see LICENSE for more details.
"""
from six import string_types
__all__ = ('Slack',)
__version__ = '0.1.2'
__author__ = 'VeryCB <imcaibin@gmail.com>'
class Slack(object):
def __init__(self, app=None):
self._commands = {}
self.team_id = None
if app:
self.init_app(app)
def init_app(self, app=None):
"""Initialize application configuration"""
config = getattr(app, 'config', app)
self.team_id = config.get('TEAM_ID')
def command(self, command, token, team_id=None, methods=['GET'], **kwargs):
if team_id is None:
team_id = self.team_id
if team_id is None:
raise RuntimeError('TEAM_ID is not found in your configuration!')
def deco(func):
self._commands[(team_id, command)] = (func, token, methods, kwargs)
return func
return deco
def dispatch(self):
from flask import request
method = request.method
data = request.args
if method == 'POST':
data = request.form
token = data.get('token')
team_id = data.get('team_id')
command = data.get('command')
if isinstance(command, string_types):
command = command.strip().lstrip('/')
try:
self.validate(command, token, team_id, method)
except SlackError as e:
return self.response(e.msg)
func, _, _, kwargs = self._commands[(team_id, command)]
kwargs.update(data.to_dict())
return func(**kwargs)
dispatch.methods = ['GET', 'POST']
def validate(self, command, token, team_id, method):
if (team_id, command) not in self._commands:
raise SlackError('Command {0} is not found in team {1}'.format(
command, team_id))
func, _token, methods, kwargs = self._commands[(team_id, command)]
if method not in methods:
raise SlackError('{} request is not allowed'.format(method))
if token != _token:
raise SlackError('Your token {} is invalid'.format(token))
def response(self, text):
from flask import Response
return Response(text, content_type='text/plain; charset=utf-8')
class SlackError(Exception):
def __init__(self, msg):
self.msg = msg
| Python | 0.000001 |
6e9a0df29ba488a96293e938ed96561ee709fc4b | Improve heatmap plotting | smps/plots.py | smps/plots.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import matplotlib.ticker as mtick
from matplotlib.ticker import ScalarFormatter
import seaborn as sns
from numpy import nan_to_num
default_cmap = sns.cubehelix_palette(8, as_cmap=True)
rc_log = {
'xtick.major.size': 10.0,
'xtick.minor.size': 8.0,
'ytick.major.size': 10.0,
'ytick.minor.size': 8.0,
'xtick.color': '0.0',
'ytick.color': '0.0',
'axes.linewidth': 1.75
}
def heatmap(X, Y, Z, ax=None, kind='log', cbar=True, hide_low=True,
cmap=default_cmap, fig_kws=None, cbar_kws=None, **kwargs):
"""
"""
cbar_min = kwargs.pop('cbar_min', Z.min() if Z.min() > 0.0 else 1.)
cbar_max = kwargs.pop('cbar_max', Z.max())
# Copy to avoid modifying original data
Z_plot = Z.copy()
if hide_low:
# Hide NaN values
Z_plot = nan_to_num(Z_plot)
# Increase values below cbar_min to cbar_min
below_min = Z_plot < cbar_min
Z_plot[below_min] = cbar_min
if fig_kws is None:
fig_kws = dict(figsize=(16,8))
if cbar_kws is None:
cbar_kws = dict(label='$dN/dlogD_p \; [cm^{-3}]$')
if ax is None:
plt.figure(**fig_kws)
ax = plt.gca()
im = ax.pcolormesh(X, Y, Z_plot, norm=LogNorm(vmin=cbar_min, vmax=cbar_max),
cmap=cmap)
ax.set_ylim([Y.min(), Y.max()])
if kind == 'log':
ax.semilogy()
ax.yaxis.set_major_formatter(ScalarFormatter())
ax.set_ylabel("$D_p \; [nm]$")
if cbar:
clb = plt.colorbar(im, **cbar_kws)
return ax
def histplot(histogram, bins, ax=None, plot_kws=None, fig_kws=None, **kwargs):
"""Plot the histogram in the form of a bar chart."""
if isinstance(histogram, pd.DataFrame):
histogram = histogram.mean().values
if fig_kws is None:
fig_kws = dict(figsize=(16,8))
if plot_kws is None:
plot_kws = dict(alpha=1, edgecolor=None, linewidth=0)
if ax is None:
plt.figure(**fig_kws)
ax = plt.gca()
ax.bar(left=bins[:, 0], height=histogram, width=bins[:, -1] - bins[:, 0],
align='edge', **plot_kws)
ax.semilogx()
ax.set_xlabel("$D_p \; [\mu m]$")
ax.xaxis.set_major_formatter(mtick.FormatStrFormatter("%.4g"))
return ax
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
import matplotlib.ticker as mtick
from matplotlib.ticker import ScalarFormatter
import seaborn as sns
default_cmap = sns.cubehelix_palette(8, as_cmap=True)
rc_log = {
'xtick.major.size': 10.0,
'xtick.minor.size': 8.0,
'ytick.major.size': 10.0,
'ytick.minor.size': 8.0,
'xtick.color': '0.0',
'ytick.color': '0.0',
'axes.linewidth': 1.75
}
def heatmap(X, Y, Z, ax=None, kind='log', cbar=True, cmap=default_cmap,
fig_kws=None, cbar_kws=None, **kwargs):
"""
"""
cbar_min = kwargs.pop('cbar_min', Z.min() if Z.min() > 0.0 else 1.)
cbar_max = kwargs.pop('cbar_max', Z.max())
if fig_kws is None:
fig_kws = dict(figsize=(16,8))
if cbar_kws is None:
cbar_kws = dict(label='$dN/dlogD_p \; [cm^{-3}]$')
if ax is None:
plt.figure(**fig_kws)
ax = plt.gca()
im = ax.pcolormesh(X, Y, Z, norm=LogNorm(vmin=cbar_min, vmax=cbar_max), cmap=cmap)
ax.set_ylim([Y.min(), Y.max()])
if kind == 'log':
ax.semilogy()
ax.yaxis.set_major_formatter(ScalarFormatter())
ax.set_ylabel("$D_p \; [nm]$")
if cbar:
clb = plt.colorbar(im, **cbar_kws)
return ax
def histplot(histogram, bins, ax=None, plot_kws=None, fig_kws=None, **kwargs):
"""Plot the histogram in the form of a bar chart."""
if isinstance(histogram, pd.DataFrame):
histogram = histogram.mean().values
if fig_kws is None:
fig_kws = dict(figsize=(16,8))
if plot_kws is None:
plot_kws = dict(alpha=1, edgecolor=None, linewidth=0)
if ax is None:
plt.figure(**fig_kws)
ax = plt.gca()
ax.bar(left=bins[:, 0], height=histogram, width=bins[:, -1] - bins[:, 0],
align='edge', **plot_kws)
ax.semilogx()
ax.set_xlabel("$D_p \; [\mu m]$")
ax.xaxis.set_major_formatter(mtick.FormatStrFormatter("%.4g"))
return ax
| Python | 0 |
40d9ceb14c57c109e8f6371b1a4c677fa33e1669 | Bump base package requirements (#10078) | snmp/setup.py | snmp/setup.py | # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from codecs import open # To use a consistent encoding
from os import path
from setuptools import setup
HERE = path.dirname(path.abspath(__file__))
# Get version info
ABOUT = {}
with open(path.join(HERE, 'datadog_checks', 'snmp', '__about__.py')) as f:
exec(f.read(), ABOUT)
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
def get_dependencies():
dep_file = path.join(HERE, 'requirements.in')
if not path.isfile(dep_file):
return []
with open(dep_file, encoding='utf-8') as f:
return f.readlines()
CHECKS_BASE_REQ = 'datadog-checks-base>=21.1.0'
setup(
name='datadog-snmp',
version=ABOUT['__version__'],
description='The SNMP check',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='datadog agent snmp check',
# The project's main homepage.
url='https://github.com/DataDog/integrations-core',
# Author details
author='Datadog',
author_email='packages@datadoghq.com',
# License
license='BSD',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
],
# The package we're going to ship
packages=['datadog_checks.snmp'],
# Run-time dependencies
install_requires=[CHECKS_BASE_REQ],
extras_require={'deps': get_dependencies()},
# Extra files to ship with the wheel package
include_package_data=True,
)
| # (C) Datadog, Inc. 2018-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
from codecs import open # To use a consistent encoding
from os import path
from setuptools import setup
HERE = path.dirname(path.abspath(__file__))
# Get version info
ABOUT = {}
with open(path.join(HERE, 'datadog_checks', 'snmp', '__about__.py')) as f:
exec(f.read(), ABOUT)
# Get the long description from the README file
with open(path.join(HERE, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
def get_dependencies():
dep_file = path.join(HERE, 'requirements.in')
if not path.isfile(dep_file):
return []
with open(dep_file, encoding='utf-8') as f:
return f.readlines()
CHECKS_BASE_REQ = 'datadog-checks-base>=15.7.0'
setup(
name='datadog-snmp',
version=ABOUT['__version__'],
description='The SNMP check',
long_description=long_description,
long_description_content_type='text/markdown',
keywords='datadog agent snmp check',
# The project's main homepage.
url='https://github.com/DataDog/integrations-core',
# Author details
author='Datadog',
author_email='packages@datadoghq.com',
# License
license='BSD',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.8',
],
# The package we're going to ship
packages=['datadog_checks.snmp'],
# Run-time dependencies
install_requires=[CHECKS_BASE_REQ],
extras_require={'deps': get_dependencies()},
# Extra files to ship with the wheel package
include_package_data=True,
)
| Python | 0 |
17bb20d7f7b236feb1c2eff9d71420e672c87431 | Fix typo | djchoices/choices.py | djchoices/choices.py | import re
from django.core.exceptions import ValidationError
try:
from collections import OrderedDict
except ImportError: # Py2.6, fall back to Django's implementation
from django.utils.datastructures import SortedDict as OrderedDict
try:
from django.utils import six
except ImportError:
import six
__all__ = ["ChoiceItem", "DjangoChoices", "C"]
### Support Functionality (Not part of public API ###
class Labels(dict):
def __getattribute__(self, name):
result = dict.get(self, name, None)
if result is not None:
return result
else:
raise AttributeError("Label for field %s was not found." % name)
def __setattr__(self, name, value):
self[name] = value
### End Support Functionality ###
class ChoiceItem(object):
"""
Describes a choice item. The label is usually the field name so label can
normally be left blank. Set a label if you need characters that are illegal
in a python identifier name (ie: "DVD/Movie").
"""
order = 0
def __init__(self, value=None, label=None, order=None):
self.value = value
if order:
self.order = order
else:
ChoiceItem.order += 1
self.order = ChoiceItem.order
self.label = label
# Shorter convenience alias.
C = ChoiceItem
class DjangoChoicesMeta(type):
"""
Metaclass that writes the choices class.
"""
name_clean = re.compile(r"_+")
def __new__(cls, name, bases, attrs):
class StaticProp(object):
def __init__(self, value):
self.value = value
def __get__(self, obj, objtype):
return self.value
fields = {}
labels = Labels()
values = {}
choices = []
# Get all the fields from parent classes.
parents = [b for b in bases if isinstance(b, DjangoChoicesMeta)]
for kls in parents:
for field_name in kls._fields:
fields[field_name] = kls._fields[field_name]
# Get all the fields from this class.
for field_name in attrs:
val = attrs[field_name]
if isinstance(val, ChoiceItem):
fields[field_name] = val
fields = OrderedDict(sorted(fields.items(), key=lambda x: x[1].order))
for field_name in fields:
val = fields[field_name]
if isinstance(val, ChoiceItem):
if not val.label is None:
label = val.label
else:
label = cls.name_clean.sub(" ", field_name)
val0 = label if val.value is None else val.value
choices.append((val0, label))
attrs[field_name] = StaticProp(val0)
setattr(labels, field_name, label)
values[val.value or label] = label
else:
choices.append((field_name, val.choices))
attrs["choices"] = StaticProp(tuple(choices))
attrs["labels"] = labels
attrs["values"] = values
attrs["_fields"] = fields
return super(DjangoChoicesMeta, cls).__new__(cls, name, bases, attrs)
class DjangoChoices(six.with_metaclass(DjangoChoicesMeta)):
order = 0
choices = ()
labels = Labels()
values = {}
@classmethod
def validator(cls, value):
if value not in cls.values:
raise ValidationError('Select a valid choice. %(value)s is not '
'one of the available choices.')
| import re
from django.core.exceptions import ValidationError]
try:
from collections import OrderedDict
except ImportError: # Py2.6, fall back to Django's implementation
from django.utils.datastructures import SortedDict as OrderedDict
try:
from django.utils import six
except ImportError:
import six
__all__ = ["ChoiceItem", "DjangoChoices", "C"]
### Support Functionality (Not part of public API ###
class Labels(dict):
def __getattribute__(self, name):
result = dict.get(self, name, None)
if result is not None:
return result
else:
raise AttributeError("Label for field %s was not found." % name)
def __setattr__(self, name, value):
self[name] = value
### End Support Functionality ###
class ChoiceItem(object):
"""
Describes a choice item. The label is usually the field name so label can
normally be left blank. Set a label if you need characters that are illegal
in a python identifier name (ie: "DVD/Movie").
"""
order = 0
def __init__(self, value=None, label=None, order=None):
self.value = value
if order:
self.order = order
else:
ChoiceItem.order += 1
self.order = ChoiceItem.order
self.label = label
# Shorter convenience alias.
C = ChoiceItem
class DjangoChoicesMeta(type):
"""
Metaclass that writes the choices class.
"""
name_clean = re.compile(r"_+")
def __new__(cls, name, bases, attrs):
class StaticProp(object):
def __init__(self, value):
self.value = value
def __get__(self, obj, objtype):
return self.value
fields = {}
labels = Labels()
values = {}
choices = []
# Get all the fields from parent classes.
parents = [b for b in bases if isinstance(b, DjangoChoicesMeta)]
for kls in parents:
for field_name in kls._fields:
fields[field_name] = kls._fields[field_name]
# Get all the fields from this class.
for field_name in attrs:
val = attrs[field_name]
if isinstance(val, ChoiceItem):
fields[field_name] = val
fields = OrderedDict(sorted(fields.items(), key=lambda x: x[1].order))
for field_name in fields:
val = fields[field_name]
if isinstance(val, ChoiceItem):
if not val.label is None:
label = val.label
else:
label = cls.name_clean.sub(" ", field_name)
val0 = label if val.value is None else val.value
choices.append((val0, label))
attrs[field_name] = StaticProp(val0)
setattr(labels, field_name, label)
values[val.value or label] = label
else:
choices.append((field_name, val.choices))
attrs["choices"] = StaticProp(tuple(choices))
attrs["labels"] = labels
attrs["values"] = values
attrs["_fields"] = fields
return super(DjangoChoicesMeta, cls).__new__(cls, name, bases, attrs)
class DjangoChoices(six.with_metaclass(DjangoChoicesMeta)):
order = 0
choices = ()
labels = Labels()
values = {}
@classmethod
def validator(cls, value):
if value not in cls.values:
raise ValidationError('Select a valid choice. %(value)s is not '
'one of the available choices.')
| Python | 0.999999 |
0a0ebb7dd3267d727e6af598f6d964cd4d73fd69 | Add TODO for multiple e-mail verification clicks. | eduid_signup/utils.py | eduid_signup/utils.py | from uuid import uuid4
from hashlib import sha256
import datetime
from pyramid.httpexceptions import HTTPInternalServerError
from eduid_signup.i18n import TranslationString as _
from eduid_signup.compat import text_type
def generate_verification_link(request):
code = text_type(uuid4())
link = request.route_url("email_verification_link", code=code)
return (link, code)
def verify_email_code(collection, code):
result = collection.find_and_modify(
{
"code": code,
"verified": False
}, {
"$set": {
"verified": True,
"verified_ts": datetime.utcnow(),
}
},
new=True,
safe=True
)
# XXX need to handle user clicking on confirmation link more than
# once gracefully. Should show page saying that e-mail address was
# already confirmed, but NOT allow user to auth_token login to
# dashboard from that page.
if result is None:
raise HTTPInternalServerError(_("Your email can't be verified now, "
"try it later"))
return True
def check_email_status(db, email):
"""
Check the email registration status.
If the email doesn't exist in database, then return 'new'.
If exists and it hasn't been verified, then return 'not_verified'.
If exists and it has been verified before, then return 'verified'.
"""
email = db.registered.find_one({'email': email})
if not email:
return 'new'
if email.get('verified', False):
return 'verified'
else:
return 'not_verified'
def generate_auth_token(shared_key, email, nonce, timestamp, generator=sha256):
"""
The shared_key is a secret between the two systems
The public word must must go through form POST or GET
"""
return generator("{0}|{1}|{2}|{3}".format(
shared_key, email, nonce, timestamp)).hexdigest()
| from uuid import uuid4
from hashlib import sha256
import datetime
from pyramid.httpexceptions import HTTPInternalServerError
from eduid_signup.i18n import TranslationString as _
from eduid_signup.compat import text_type
def generate_verification_link(request):
code = text_type(uuid4())
link = request.route_url("email_verification_link", code=code)
return (link, code)
def verify_email_code(collection, code):
result = collection.find_and_modify(
{
"code": code,
"verified": False
}, {
"$set": {
"verified": True,
"verified_ts": datetime.utcnow(),
}
},
new=True,
safe=True
)
if result is None:
raise HTTPInternalServerError(_("Your email can't be verified now, "
"try it later"))
return True
def check_email_status(db, email):
"""
Check the email registration status.
If the email doesn't exist in database, then return 'new'.
If exists and it hasn't been verified, then return 'not_verified'.
If exists and it has been verified before, then return 'verified'.
"""
email = db.registered.find_one({'email': email})
if not email:
return 'new'
if email.get('verified', False):
return 'verified'
else:
return 'not_verified'
def generate_auth_token(shared_key, email, nonce, timestamp, generator=sha256):
"""
The shared_key is a secret between the two systems
The public word must must go through form POST or GET
"""
return generator("{0}|{1}|{2}|{3}".format(
shared_key, email, nonce, timestamp)).hexdigest()
| Python | 0 |
0019d3a4d512c4b7da2670872cd880bbe76edd80 | Bump version to 0.4.0 | arghelper.py | arghelper.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014-2016 The arghelper developers. All rights reserved.
# Project site: https://github.com/questrail/arghelper
# Use of this source code is governed by a MIT-style license that
# can be found in the LICENSE.txt file for the project.
"""Provide helper functions for argparse
"""
# Try to future proof code so that it's Python 3.x ready
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
# Standard module imports
import argparse
import sys
import os
# The version as used in the setup.py
__version__ = '0.4.0'
def extant_file(arg):
"""Facade for extant_item(arg, arg_type="file")
"""
return extant_item(arg, "file")
def extant_dir(arg):
"""Facade for extant_item(arg, arg_type="directory")
"""
return extant_item(arg, "directory")
def extant_item(arg, arg_type):
"""Determine if parser argument is an existing file or directory.
This technique comes from http://stackoverflow.com/a/11541450/95592
and from http://stackoverflow.com/a/11541495/95592
Args:
arg: parser argument containing filename to be checked
arg_type: string of either "file" or "directory"
Returns:
If the file exists, return the filename or directory.
Raises:
If the file does not exist, raise a parser error.
"""
if arg_type == "file":
if not os.path.isfile(arg):
raise argparse.ArgumentError(
None,
"The file {arg} does not exist.".format(arg=arg))
else:
# File exists so return the filename
return arg
elif arg_type == "directory":
if not os.path.isdir(arg):
raise argparse.ArgumentError(
None,
"The directory {arg} does not exist.".format(arg=arg))
else:
# Directory exists so return the directory name
return arg
def parse_config_input_output(args=sys.argv):
"""Parse the args using the config_file, input_dir, output_dir pattern
Args:
args: sys.argv
Returns:
The populated namespace object from parser.parse_args().
Raises:
TBD
"""
parser = argparse.ArgumentParser(
description='Process the input files using the given config')
parser.add_argument(
'config_file',
help='Configuration file.',
metavar='FILE', type=extant_file)
parser.add_argument(
'input_dir',
help='Directory containing the input files.',
metavar='DIR', type=extant_dir)
parser.add_argument(
'output_dir',
help='Directory where the output files should be saved.',
metavar='DIR', type=extant_dir)
return parser.parse_args(args[1:])
def parse_config(args=sys.argv):
"""Parse the args using the config_file pattern
Args:
args: sys.argv
Returns:
The populated namespace object from parser.parse_args().
Raises:
TBD
"""
parser = argparse.ArgumentParser(
description='Read in the config file')
parser.add_argument(
'config_file',
help='Configuration file.',
metavar='FILE', type=extant_file)
return parser.parse_args(args[1:])
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2014-2016 The arghelper developers. All rights reserved.
# Project site: https://github.com/questrail/arghelper
# Use of this source code is governed by a MIT-style license that
# can be found in the LICENSE.txt file for the project.
"""Provide helper functions for argparse
"""
# Try to future proof code so that it's Python 3.x ready
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
# Standard module imports
import argparse
import sys
import os
# The version as used in the setup.py
__version__ = '0.3.2'
def extant_file(arg):
"""Facade for extant_item(arg, arg_type="file")
"""
return extant_item(arg, "file")
def extant_dir(arg):
"""Facade for extant_item(arg, arg_type="directory")
"""
return extant_item(arg, "directory")
def extant_item(arg, arg_type):
"""Determine if parser argument is an existing file or directory.
This technique comes from http://stackoverflow.com/a/11541450/95592
and from http://stackoverflow.com/a/11541495/95592
Args:
arg: parser argument containing filename to be checked
arg_type: string of either "file" or "directory"
Returns:
If the file exists, return the filename or directory.
Raises:
If the file does not exist, raise a parser error.
"""
if arg_type == "file":
if not os.path.isfile(arg):
raise argparse.ArgumentError(
None,
"The file {arg} does not exist.".format(arg=arg))
else:
# File exists so return the filename
return arg
elif arg_type == "directory":
if not os.path.isdir(arg):
raise argparse.ArgumentError(
None,
"The directory {arg} does not exist.".format(arg=arg))
else:
# Directory exists so return the directory name
return arg
def parse_config_input_output(args=sys.argv):
"""Parse the args using the config_file, input_dir, output_dir pattern
Args:
args: sys.argv
Returns:
The populated namespace object from parser.parse_args().
Raises:
TBD
"""
parser = argparse.ArgumentParser(
description='Process the input files using the given config')
parser.add_argument(
'config_file',
help='Configuration file.',
metavar='FILE', type=extant_file)
parser.add_argument(
'input_dir',
help='Directory containing the input files.',
metavar='DIR', type=extant_dir)
parser.add_argument(
'output_dir',
help='Directory where the output files should be saved.',
metavar='DIR', type=extant_dir)
return parser.parse_args(args[1:])
def parse_config(args=sys.argv):
"""Parse the args using the config_file pattern
Args:
args: sys.argv
Returns:
The populated namespace object from parser.parse_args().
Raises:
TBD
"""
parser = argparse.ArgumentParser(
description='Read in the config file')
parser.add_argument(
'config_file',
help='Configuration file.',
metavar='FILE', type=extant_file)
return parser.parse_args(args[1:])
| Python | 0.000001 |
46483b7e551e5180ff36d6892221e4b583f107ac | Use HTTPS for oauth/authenticate step. | le_social/twitter/views.py | le_social/twitter/views.py | from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.shortcuts import redirect
from django.views import generic
from twitter import Twitter, OAuth, TwitterError
from twitter.oauth_dance import parse_oauth_tokens
class OAuthMixin(object):
consumer_key = None
consumer_secret = None
def get_consumer_key(self):
if self.consumer_key is not None:
return self.consumer_key
if hasattr(settings, 'CONSUMER_KEY'):
return settings.CONSUMER_KEY
else:
raise ImproperlyConfigured("Set settings.CONSUMER_KEY or the "
"consumer_key attribute or "
"implement get_consumer_key")
def get_consumer_secret(self):
if self.consumer_secret is not None:
return self.consumer_secret
if hasattr(settings, 'CONSUMER_SECRET'):
return settings.CONSUMER_SECRET
else:
raise ImproperlyConfigured("Set settings.CONSUMER_SECRET or the "
"consumer_secret attribute or "
"implement get_consumer_secret")
class Authorize(generic.View, OAuthMixin):
"""
A base class for the authorize view. Just sets the request token
in the session and redirects to twitter.
"""
def get(self, request, force_login=False, *args, **kwargs):
callback = self.build_callback()
oauth = OAuth('', '',
self.get_consumer_key(),
self.get_consumer_secret())
api = Twitter(auth=oauth, secure=True, format='', api_version=None)
(oauth.token, oauth.token_secret) = parse_oauth_tokens(
api.oauth.request_token(oauth_callback=callback))
request.session['request_token'] = (oauth.token, oauth.token_secret)
url = 'https://api.twitter.com/oauth/authenticate?oauth_token=%s' % oauth.token
if force_login:
url += '&force_login=true'
return redirect(url)
def build_callback(self):
""" Override this if you'd like to specify a callback URL"""
return None
class Callback(generic.View, OAuthMixin):
"""
A base class for the return callback. Subclasses must define:
- error(error_msg, exception=None): what to do when
something goes wrong? Must return an HttpResponse
- success(auth): what to do on successful auth? Do
some stuff with the twitter.OAuth object and return
an HttpResponse
"""
def get(self, request, *args, **kwargs):
verifier = request.GET.get('oauth_verifier', None)
if verifier is None:
return self.error('No verifier code')
if not 'request_token' in request.session:
return self.error('No request token found in the session')
request_token = request.session.pop('request_token')
request.session.modified = True
oauth = OAuth(request_token[0], request_token[1],
self.get_consumer_key(),
self.get_consumer_secret())
api = Twitter(auth=oauth, secure=True, format='', api_version=None)
try:
(oauth.token, oauth.token_secret) = parse_oauth_tokens(
api.oauth.access_token(oauth_verifier=verifier))
except TwitterError as e:
return self.error('Failed to get an access token')
return self.success(oauth)
def success(self, auth):
"""
Twitter authentication successful, do some stuff with his key.
"""
raise NotImplementedError("You need to provide an implementation of "
"success(auth)")
def error(self, message, exception=None):
"""
Meh. Something broke.
"""
raise NotImplementedError("You need to provide an implementation of "
"error(message, exception=None)")
| from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.shortcuts import redirect
from django.views import generic
from twitter import Twitter, OAuth, TwitterError
from twitter.oauth_dance import parse_oauth_tokens
class OAuthMixin(object):
consumer_key = None
consumer_secret = None
def get_consumer_key(self):
if self.consumer_key is not None:
return self.consumer_key
if hasattr(settings, 'CONSUMER_KEY'):
return settings.CONSUMER_KEY
else:
raise ImproperlyConfigured("Set settings.CONSUMER_KEY or the "
"consumer_key attribute or "
"implement get_consumer_key")
def get_consumer_secret(self):
if self.consumer_secret is not None:
return self.consumer_secret
if hasattr(settings, 'CONSUMER_SECRET'):
return settings.CONSUMER_SECRET
else:
raise ImproperlyConfigured("Set settings.CONSUMER_SECRET or the "
"consumer_secret attribute or "
"implement get_consumer_secret")
class Authorize(generic.View, OAuthMixin):
"""
A base class for the authorize view. Just sets the request token
in the session and redirects to twitter.
"""
def get(self, request, force_login=False, *args, **kwargs):
callback = self.build_callback()
oauth = OAuth('', '',
self.get_consumer_key(),
self.get_consumer_secret())
api = Twitter(auth=oauth, secure=True, format='', api_version=None)
(oauth.token, oauth.token_secret) = parse_oauth_tokens(
api.oauth.request_token(oauth_callback=callback))
request.session['request_token'] = (oauth.token, oauth.token_secret)
url = 'http://api.twitter.com/oauth/authenticate?oauth_token=%s' % oauth.token
if force_login:
url += '&force_login=true'
return redirect(url)
def build_callback(self):
""" Override this if you'd like to specify a callback URL"""
return None
class Callback(generic.View, OAuthMixin):
"""
A base class for the return callback. Subclasses must define:
- error(error_msg, exception=None): what to do when
something goes wrong? Must return an HttpResponse
- success(auth): what to do on successful auth? Do
some stuff with the twitter.OAuth object and return
an HttpResponse
"""
def get(self, request, *args, **kwargs):
verifier = request.GET.get('oauth_verifier', None)
if verifier is None:
return self.error('No verifier code')
if not 'request_token' in request.session:
return self.error('No request token found in the session')
request_token = request.session.pop('request_token')
request.session.modified = True
oauth = OAuth(request_token[0], request_token[1],
self.get_consumer_key(),
self.get_consumer_secret())
api = Twitter(auth=oauth, secure=True, format='', api_version=None)
try:
(oauth.token, oauth.token_secret) = parse_oauth_tokens(
api.oauth.access_token(oauth_verifier=verifier))
except TwitterError as e:
return self.error('Failed to get an access token')
return self.success(oauth)
def success(self, auth):
"""
Twitter authentication successful, do some stuff with his key.
"""
raise NotImplementedError("You need to provide an implementation of "
"success(auth)")
def error(self, message, exception=None):
"""
Meh. Something broke.
"""
raise NotImplementedError("You need to provide an implementation of "
"error(message, exception=None)")
| Python | 0 |
f9141964ffa4ed36420b8ba564407c2ca661ac46 | edit on glitter | glitter.py | glitter.py | from willie.module import commands
import random
@commands('glitter')
def ans(bot, trigger):
bot.say("*'-.*\(^O^)/*.-'*") | from willie.module import commands
import random
@commands('glitter')
def ans(bot, trigger):
bot.reply("*'-.*\(^O^)/*.-'*") | Python | 0 |
3a83ff315db6f34fb8e656309580060cf708b8a1 | Refactor request body | request.py | request.py | '''
Code adapted from https://westus.dev.cognitive.microsoft.com/docs/services/TextAnalytics.V2.0/operations/56f30ceeeda5650db055a3c9
'''
import http.client, urllib.request, urllib.parse, urllib.error
import script
import numpy as np
def main():
'''
Sends a single POST request with a test bit of text.
'''
headers = headers()
params = urllib.parse.urlencode({})
sample_text = 'I had a wonderful experience! The rooms were wonderful and the staff were helpful.' # from default given at https://www.microsoft.com/cognitive-services/en-us/text-analytics-api
body = body_from_string_vectors(np.array([sample_text]))
try:
conn = http.client.HTTPSConnection('westus.api.cognitive.microsoft.com')
conn.request("POST", "/text/analytics/v2.0/sentiment?%s" % params, str(body), headers)
response = conn.getresponse()
data = response.read()
print(data) # score is on a scale from 0 to 1, with 0 being the most negative sentiment and 1 being the most positive sentiment. Includes some metadata.
conn.close()
except Exception as e:
print("[Errno {0}] {1}".format(e.errno, e.strerror))
def body_from_string_vectors(vector):
'''
Takes in a numpy vector of strings, each string representing a separate quote from someone.
'''
body_documents_list = []
for string in vector:
body_documents_list += {
'language': 'en',
'id': '1',
'text': string
}
body = {
'documents': {
body_documents_list
}
}
def generate_headers():
api_key = script.get_api_key()
headers = {
# Request headers
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': api_key
}
return headers
if __name__ == '__main__':
main()
| '''
Code adapted from https://westus.dev.cognitive.microsoft.com/docs/services/TextAnalytics.V2.0/operations/56f30ceeeda5650db055a3c9
'''
import http.client, urllib.request, urllib.parse, urllib.error
import script
def main():
'''
Sends a single POST request with a test bit of text.
'''
headers = headers()
params = urllib.parse.urlencode({})
body = {
"documents": [
{
"language": "en",
"id": "1",
"text": "I had a wonderful experience! The rooms were wonderful and the staff were helpful."
}
]
}
try:
conn = http.client.HTTPSConnection('westus.api.cognitive.microsoft.com')
conn.request("POST", "/text/analytics/v2.0/sentiment?%s" % params, str(body), headers)
response = conn.getresponse()
data = response.read()
print(data) # score is on a scale from 0 to 1, with 0 being the most negative sentiment and 1 being the most positive sentiment. Includes some metadata.
conn.close()
except Exception as e:
print("[Errno {0}] {1}".format(e.errno, e.strerror))
def generate_headers():
api_key = script.get_api_key()
headers = {
# Request headers
'Content-Type': 'application/json',
'Ocp-Apim-Subscription-Key': api_key
}
return headers
if __name__ == '__main__':
main()
| Python | 0 |
c5d68743bf6392ae5e4c6bd80ed6727bfebf77fd | Solve basic/string2.py Please enter the commit message for your changes. Lines starting | basic/string2.py | basic/string2.py | #!/usr/bin/python2.4 -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic string exercises
# D. verbing
# Given a string, if its length is at least 3,
# add 'ing' to its end.
# Unless it already ends in 'ing', in which case
# add 'ly' instead.
# If the string length is less than 3, leave it unchanged.
# Return the resulting string.
def verbing(s):
result=s
if len(s)>=3:
if s[-3:]=='ing':
result=result+'ly'
else :
result=result+'ing'
return result
# E. not_bad
# Given a string, find the first appearance of the
# substring 'not' and 'bad'. If the 'bad' follows
# the 'not', replace the whole 'not'...'bad' substring
# with 'good'.
# Return the resulting string.
# So 'This dinner is not that bad!' yields:
# This dinner is good!
def not_bad(s):
notindex=s.find('not')
badindex=s.find('bad')
result=s
if ((notindex>-1) & (badindex>-1))&(notindex<badindex):
result=s.replace(s[notindex:badindex+3],'good')
return result
# F. front_back
# Consider dividing a string into two halves.
# If the length is even, the front and back halves are the same length.
# If the length is odd, we'll say that the extra char goes in the front half.
# e.g. 'abcde', the front half is 'abc', the back half 'de'.
# Given 2 strings, a and b, return a string of the form
# a-front + b-front + a-back + b-back
def front_back(a, b):
ahalf=int(round(len(a)/2.0))
bhalf=int(round(len(b)/2.0))
afront=a[:ahalf]
aback=a[ahalf:]
bfront=b[:bhalf]
bback=b[bhalf:]
return afront+bfront+aback+bback
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# main() calls the above functions with interesting inputs,
# using the above test() to check if the result is correct or not.
def main():
print 'verbing'
test(verbing('hail'), 'hailing')
test(verbing('swiming'), 'swimingly')
test(verbing('do'), 'do')
print
print 'not_bad'
test(not_bad('This movie is not so bad'), 'This movie is good')
test(not_bad('This dinner is not that bad!'), 'This dinner is good!')
test(not_bad('This tea is not hot'), 'This tea is not hot')
test(not_bad("It's bad yet not"), "It's bad yet not")
print
print 'front_back'
test(front_back('abcd', 'xy'), 'abxcdy')
test(front_back('abcde', 'xyz'), 'abcxydez')
test(front_back('Kitten', 'Donut'), 'KitDontenut')
if __name__ == '__main__':
main()
| #!/usr/bin/python2.4 -tt
# Copyright 2010 Google Inc.
# Licensed under the Apache License, Version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
# Google's Python Class
# http://code.google.com/edu/languages/google-python-class/
# Additional basic string exercises
# D. verbing
# Given a string, if its length is at least 3,
# add 'ing' to its end.
# Unless it already ends in 'ing', in which case
# add 'ly' instead.
# If the string length is less than 3, leave it unchanged.
# Return the resulting string.
def verbing(s):
# +++your code here+++
return
# E. not_bad
# Given a string, find the first appearance of the
# substring 'not' and 'bad'. If the 'bad' follows
# the 'not', replace the whole 'not'...'bad' substring
# with 'good'.
# Return the resulting string.
# So 'This dinner is not that bad!' yields:
# This dinner is good!
def not_bad(s):
# +++your code here+++
return
# F. front_back
# Consider dividing a string into two halves.
# If the length is even, the front and back halves are the same length.
# If the length is odd, we'll say that the extra char goes in the front half.
# e.g. 'abcde', the front half is 'abc', the back half 'de'.
# Given 2 strings, a and b, return a string of the form
# a-front + b-front + a-back + b-back
def front_back(a, b):
# +++your code here+++
return
# Simple provided test() function used in main() to print
# what each function returns vs. what it's supposed to return.
def test(got, expected):
if got == expected:
prefix = ' OK '
else:
prefix = ' X '
print '%s got: %s expected: %s' % (prefix, repr(got), repr(expected))
# main() calls the above functions with interesting inputs,
# using the above test() to check if the result is correct or not.
def main():
print 'verbing'
test(verbing('hail'), 'hailing')
test(verbing('swiming'), 'swimingly')
test(verbing('do'), 'do')
print
print 'not_bad'
test(not_bad('This movie is not so bad'), 'This movie is good')
test(not_bad('This dinner is not that bad!'), 'This dinner is good!')
test(not_bad('This tea is not hot'), 'This tea is not hot')
test(not_bad("It's bad yet not"), "It's bad yet not")
print
print 'front_back'
test(front_back('abcd', 'xy'), 'abxcdy')
test(front_back('abcde', 'xyz'), 'abcxydez')
test(front_back('Kitten', 'Donut'), 'KitDontenut')
if __name__ == '__main__':
main()
| Python | 0.998948 |
6f7a75dd98c1f2cc82fe357a3c7cd2147f70db9d | add download_date to API download of single and multiple URLs | newsplease/__init__.py | newsplease/__init__.py | import datetime
import os
import sys
import urllib
from six.moves import urllib
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
from newsplease.pipeline.extractor import article_extractor
from newsplease.crawler.items import NewscrawlerItem
from dotmap import DotMap
from newsplease.pipeline.pipelines import ExtractedInformationStorage
from newsplease.crawler.simple_crawler import SimpleCrawler
class NewsPlease:
"""
Access news-please functionality via this interface
"""
@staticmethod
def from_warc(warc_record):
"""
Extracts relevant information from a WARC record. This function does not invoke scrapy but only uses the article
extractor.
:return:
"""
html = str(warc_record.raw_stream.read())
url = warc_record.rec_headers.get_header('WARC-Target-URI')
download_date = warc_record.rec_headers.get_header('WARC-Date')
article = NewsPlease.from_html(html, url=url, download_date=download_date)
return article
@staticmethod
def from_html(html, url=None, download_date=None):
"""
Extracts relevant information from an HTML page given as a string. This function does not invoke scrapy but only
uses the article extractor. If you have the original URL make sure to provide it as this helps NewsPlease
to extract the publishing date and title.
:param html:
:param url:
:return:
"""
extractor = article_extractor.Extractor(
['newspaper_extractor', 'readability_extractor', 'date_extractor', 'lang_detect_extractor'])
title_encoded = ''.encode()
if not url:
url = ''
# if an url was given, we can use that as the filename
filename = urllib.parse.quote_plus(url) + '.json'
item = NewscrawlerItem()
item['spider_response'] = DotMap()
item['spider_response'].body = html
item['url'] = url
item['source_domain'] = urllib.parse.urlparse(url).hostname.encode() if url != '' else ''.encode()
item['html_title'] = title_encoded
item['rss_title'] = title_encoded
item['local_path'] = None
item['filename'] = filename
item['download_date'] = download_date
item['modified_date'] = None
item = extractor.extract(item)
tmp_article = ExtractedInformationStorage.extract_relevant_info(item)
final_article = ExtractedInformationStorage.convert_to_class(tmp_article)
# final_article = DotMap(tmp_article)
return final_article
@staticmethod
def from_url(url):
"""
Crawls the article from the url and extracts relevant information.
:param url:
:return: A dict containing all the information of the article. Else, None.
"""
articles = NewsPlease.from_urls([url])
if url in articles.keys():
return articles[url]
else:
return None
@staticmethod
def from_urls(urls):
"""
Crawls articles from the urls and extracts relevant information.
:param urls:
:return: A dict containing given URLs as keys, and extracted information as corresponding values.
"""
results = {}
download_date = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if len(urls) == 0:
pass
elif len(urls) == 1:
url = urls[0]
html = SimpleCrawler.fetch_url(url)
results[url] = NewsPlease.from_html(html, url, download_date)
else:
results = SimpleCrawler.fetch_urls(urls)
for url in results:
results[url] = NewsPlease.from_html(results[url], url, download_date)
return results
@staticmethod
def from_file(path):
"""
Crawls articles from the urls and extracts relevant information.
:param path: path to file containing urls (each line contains one URL)
:return: A dict containing given URLs as keys, and extracted information as corresponding values.
"""
with open(path) as f:
content = f.readlines()
content = [x.strip() for x in content]
urls = list(filter(None, content))
return NewsPlease.from_urls(urls)
| import os
import sys
import urllib
from six.moves import urllib
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
from newsplease.pipeline.extractor import article_extractor
from newsplease.crawler.items import NewscrawlerItem
from dotmap import DotMap
from newsplease.pipeline.pipelines import ExtractedInformationStorage
from newsplease.crawler.simple_crawler import SimpleCrawler
class NewsPlease:
"""
Access news-please functionality via this interface
"""
@staticmethod
def from_warc(warc_record):
"""
Extracts relevant information from a WARC record. This function does not invoke scrapy but only uses the article
extractor.
:return:
"""
html = str(warc_record.raw_stream.read())
url = warc_record.rec_headers.get_header('WARC-Target-URI')
download_date = warc_record.rec_headers.get_header('WARC-Date')
article = NewsPlease.from_html(html, url=url, download_date=download_date)
return article
@staticmethod
def from_html(html, url=None, download_date=None):
"""
Extracts relevant information from an HTML page given as a string. This function does not invoke scrapy but only
uses the article extractor. If you have the original URL make sure to provide it as this helps NewsPlease
to extract the publishing date and title.
:param html:
:param url:
:return:
"""
extractor = article_extractor.Extractor(
['newspaper_extractor', 'readability_extractor', 'date_extractor', 'lang_detect_extractor'])
title_encoded = ''.encode()
if not url:
url = ''
# if an url was given, we can use that as the filename
filename = urllib.parse.quote_plus(url) + '.json'
item = NewscrawlerItem()
item['spider_response'] = DotMap()
item['spider_response'].body = html
item['url'] = url
item['source_domain'] = urllib.parse.urlparse(url).hostname.encode() if url != '' else ''.encode()
item['html_title'] = title_encoded
item['rss_title'] = title_encoded
item['local_path'] = None
item['filename'] = filename
item['download_date'] = download_date
item['modified_date'] = None
item = extractor.extract(item)
tmp_article = ExtractedInformationStorage.extract_relevant_info(item)
final_article = ExtractedInformationStorage.convert_to_class(tmp_article)
# final_article = DotMap(tmp_article)
return final_article
@staticmethod
def from_url(url):
"""
Crawls the article from the url and extracts relevant information.
:param url:
:return: A dict containing all the information of the article. Else, None.
"""
articles = NewsPlease.from_urls([url])
if url in articles.keys():
return articles[url]
else:
return None
@staticmethod
def from_urls(urls):
"""
Crawls articles from the urls and extracts relevant information.
:param urls:
:return: A dict containing given URLs as keys, and extracted information as corresponding values.
"""
results = {}
if len(urls) == 0:
pass
elif len(urls) == 1:
url = urls[0]
html = SimpleCrawler.fetch_url(url)
results[url] = NewsPlease.from_html(html, url)
else:
results = SimpleCrawler.fetch_urls(urls)
for url in results:
results[url] = NewsPlease.from_html(results[url], url)
return results
@staticmethod
def from_file(path):
"""
Crawls articles from the urls and extracts relevant information.
:param path: path to file containing urls (each line contains one URL)
:return: A dict containing given URLs as keys, and extracted information as corresponding values.
"""
with open(path) as f:
content = f.readlines()
content = [x.strip() for x in content]
urls = list(filter(None, content))
return NewsPlease.from_urls(urls)
| Python | 0 |
634aa9818875c15c3db0ac0763fc15889936b79e | Add a structure test macro to make test writing easier. | tests.bzl | tests.bzl | # Copyright 2017 Google Inc. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rule for running structure tests."""
load(
"@io_bazel_rules_docker//docker:docker.bzl",
"docker_build",
)
def _impl(ctx):
ext_run_location = ctx.executable._structure_test.short_path
config_location = ctx.file.config.short_path
load_location = ctx.executable.image.short_path
# docker_build rules always generate an image named 'bazel/$package:$name'.
image_name = "bazel/%s:%s" % (ctx.attr.image.label.package, ctx.attr.image.label.name)
# Generate a shell script to execute ext_run with the correct flags.
test_contents = """\
#!/bin/bash
set -ex
# Execute the image loader script.
%s
# Run the tests.
%s \
-i %s \
-c %s""" % (load_location, ext_run_location, image_name, config_location)
ctx.file_action(
output=ctx.outputs.executable,
content=test_contents
)
return struct(runfiles=ctx.runfiles(files = [
ctx.executable._structure_test,
ctx.executable.image,
ctx.file.config] +
ctx.attr.image.files.to_list() +
ctx.attr.image.data_runfiles.files.to_list()
),
)
structure_test = rule(
attrs = {
"_structure_test": attr.label(
default = Label("//structure_tests:ext_run"),
cfg = "target",
allow_files = True,
executable = True,
),
"image": attr.label(
mandatory = True,
executable = True,
cfg = "target",
),
"config": attr.label(
mandatory = True,
allow_files = True,
single_file = True,
),
},
executable = True,
test = True,
implementation = _impl,
)
def structure_test_with_files(name, image, config, files):
"""A macro for including extra files inside an image before testing it."""
child_image_name = "%s.child_image" % name
docker_build(
name = child_image_name,
base = image,
files = files,
)
structure_test(
name = name,
image = child_image_name,
config = config,
)
| # Copyright 2017 Google Inc. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Rule for running structure tests."""
def _impl(ctx):
ext_run_location = ctx.executable._structure_test.short_path
config_location = ctx.file.config.short_path
load_location = ctx.executable.image.short_path
# docker_build rules always generate an image named 'bazel/$package:$name'.
image_name = "bazel/%s:%s" % (ctx.attr.image.label.package, ctx.attr.image.label.name)
# Generate a shell script to execute ext_run with the correct flags.
test_contents = """\
#!/bin/bash
set -ex
# Execute the image loader script.
%s
# Run the tests.
%s \
-i %s \
-c %s""" % (load_location, ext_run_location, image_name, config_location)
ctx.file_action(
output=ctx.outputs.executable,
content=test_contents
)
return struct(runfiles=ctx.runfiles(files = [
ctx.executable._structure_test,
ctx.executable.image,
ctx.file.config] +
ctx.attr.image.files.to_list() +
ctx.attr.image.data_runfiles.files.to_list()
),
)
structure_test = rule(
attrs = {
"_structure_test": attr.label(
default = Label("//structure_tests:ext_run"),
cfg = "target",
allow_files = True,
executable = True,
),
"image": attr.label(
mandatory = True,
executable = True,
cfg = "target",
),
"config": attr.label(
mandatory = True,
allow_files = True,
single_file = True,
),
},
executable = True,
test = True,
implementation = _impl,
)
| Python | 0 |
f5df42e6049b31b1c147da7160e0595e595c6dbc | Add logging to grade | grade.py | grade.py | #Grader called by pyxserver_wsgi.py
#Loads a grader file, which is a dict containing the prompt of the question,
#a feature extractor object, and a trained model.
#Extracts features and runs trained model on the submission to produce a final score.
#Correctness determined by ratio of score to max possible score.
#Requires aspell to be installed and added to the path.
import sys
import pickle
import os
import numpy
import logging
base_path = os.path.dirname(__file__)
sys.path.append(base_path)
from essay_set import EssaySet
#Imports needed to unpickle grader data
import feature_extractor
import sklearn.ensemble
log = logging.getLogger(__name__)
def grade(grader_path,submission,sandbox=None):
log.debug("Grader path: {0}\n Submission: {1}".format(grader_path,submission))
results = {'errors': [],'tests': [],'correct': False,'score': 0, 'feedback' : []}
#Try to find and load the model file
try:
grader_data=pickle.load(file(grader_path,"r"))
except:
results['errors'].append("Could not find a valid model file.")
grader_set=EssaySet(type="test")
#Try to add essays to essay set object
try:
grader_set.add_essay(str(submission),0)
grader_set.update_prompt(str(grader_data['prompt']))
except:
results['errors'].append("Essay could not be added to essay set:{0}".format(submission))
#Try to extract features from submission and assign score via the model
try:
grader_feats=grader_data['extractor'].gen_feats(grader_set)
results['feedback']=grader_data['extractor'].gen_feedback(grader_set)
results['score']=int(grader_data['model'].predict(grader_feats)[0])
except :
results['errors'].append("Could not extract features and score essay.")
#Determine maximum score and correctness of response
max_score=numpy.max(grader_data['model'].classes_)
if results['score']/float(max_score) >= .66:
results['correct']=True
else:
results['correct']=False
return results
| #Grader called by pyxserver_wsgi.py
#Loads a grader file, which is a dict containing the prompt of the question,
#a feature extractor object, and a trained model.
#Extracts features and runs trained model on the submission to produce a final score.
#Correctness determined by ratio of score to max possible score.
#Requires aspell to be installed and added to the path.
import sys
import pickle
import os
import numpy
base_path = os.path.dirname(__file__)
sys.path.append(base_path)
from essay_set import EssaySet
#Imports needed to unpickle grader data
import feature_extractor
import sklearn.ensemble
def grade(grader_path,submission,sandbox=None):
results = {'errors': [],'tests': [],'correct': False,'score': 0, 'feedback' : []}
#Try to find and load the model file
try:
grader_data=pickle.load(file(grader_path,"r"))
except:
results['errors'].append("Could not find a valid model file.")
grader_set=EssaySet(type="test")
#Try to add essays to essay set object
try:
grader_set.add_essay(str(submission),0)
grader_set.update_prompt(str(grader_data['prompt']))
except:
results['errors'].append("Essay could not be added to essay set:{0}".format(submission))
#Try to extract features from submission and assign score via the model
try:
grader_feats=grader_data['extractor'].gen_feats(grader_set)
results['feedback']=grader_data['extractor'].gen_feedback(grader_set)
results['score']=int(grader_data['model'].predict(grader_feats)[0])
except:
results['errors'].append("Could not extract features and score essay.")
#Determine maximum score and correctness of response
max_score=numpy.max(grader_data['model'].classes_)
if results['score']/float(max_score) >= .66:
results['correct']=True
else:
results['correct']=False
return results
| Python | 0 |
f96989d067f6fd073d04f96bdf2ae314c9b02d49 | Use request helper function in LayersScraper | uoftscrapers/scrapers/utils/layers.py | uoftscrapers/scrapers/utils/layers.py | import requests
import json
from . import Scraper
class LayersScraper:
"""A superclass for scraping Layers of the UofT Map.
Map is located at http://map.utoronto.ca
"""
host = 'http://map.utoronto.ca/'
@staticmethod
def get_layers_json(campus):
"""Retrieve the JSON structure from host."""
Scraper.logger.info('Retrieving map layers for %s.' % campus.upper())
headers = {'Referer': LayersScraper.host}
data = Scraper.get('%s%s%s' % (
LayersScraper.host,
'data/map/',
campus
), headers=headers, json=True)
return data['layers']
@staticmethod
def get_value(entry, val, number=False):
"""Retrieve the desired value from the parsed response dictionary."""
if val in entry.keys():
return entry[val]
else:
return 0 if number else ''
| import requests
import json
from . import Scraper
class LayersScraper:
"""A superclass for scraping Layers of the UofT Map.
Map is located at http://map.utoronto.ca
"""
host = 'http://map.utoronto.ca/'
s = requests.Session()
@staticmethod
def get_layers_json(campus):
"""Retrieve the JSON structure from host."""
Scraper.logger.info('Retrieving map layers for %s.' % campus.upper())
headers = {
'Referer': LayersScraper.host
}
html = LayersScraper.s.get('%s%s%s' % (
LayersScraper.host,
'data/map/',
campus
), headers=headers).text
data = json.loads(html)
return data['layers']
@staticmethod
def get_value(entry, val, number=False):
"""Retrieve the desired value from the parsed response dictionary."""
if val in entry.keys():
return entry[val]
else:
return 0 if number else ''
| Python | 0 |
b747da4fe99372e53850a964f450c7b00a4d81c9 | Add node add/delete, edge del | graph.py | graph.py |
class SimpleGraph(object):
"""This is a simple graph program that will allow us
to impliment a graph data structure"""
def __init__(self, dict_graph={}):
self.dict_graph = dict_graph
def node(self):
'''return a list of all nodes in the graph'''
return list(__dict_graph.keys())
def edges():
'''return a list of all edges in the graph'''
return list(add_edge())
def add_node(self, n):
'''adds a new node 'n' to the graph'''
if n not in self.dict_graph:
self.dict_graph[n] = []
def add_edge(self, n1, n2):
'''adds a new edge to the graph connecting 'n1' and 'n2',
if either n1 or n2 are not already present in the graph,
they should be added.'''
if n1 in self.dict_graph:
self.dict_graph[n1].append(n2)
else:
self.dict_graph[n1] == [n2]
def del_node(self, n):
'''deletes the node 'n' from the graph,
raises an error if no such node exists'''
try:
del self.dict_graph[n]
except KeyError:
raise ValueError('That node does not exist')
def del_edge(self, n1, n2):
'''deletes the edge connecting 'n1' and 'n2' from the graph,
raises an error if no such edge exists'''
try:
self.dict_graph[n1].remove(n2)
except ValueError:
raise ValueError('That edge does not exist')
def has_node(self, n):
'''True if node 'n' is contained in the graph, False if not.'''
return n in self.dict_graph
def neighbors(self, n):
'''returns the list of all nodes connected to 'n' by edges,
raises an error if n is not in graph'''
def adjacent(self, n1, n2):
'''returns True if there is an edge connecting n1 and n2, False if not,
raises an error if either of the supplied nodes are not in g'''
pass
|
class SimpleGraph(object):
"""This is a simple graph program that will allow us
to impliment a graph data structure"""
def __init__(self, dict_graph={}):
self.dict_graph = dict_graph
def node(self):
'''return a list of all nodes in the graph'''
return list(__dict_graph.keys())
def edges():
'''return a list of all edges in the graph'''
return list(add_edge())
def add_node(self, n):
'''adds a new node 'n' to the graph'''
pass
def add_edge(self, n1, n2):
'''adds a new edge to the graph connecting 'n1' and 'n2',
if either n1 or n2 are not already present in the graph,
they should be added.'''
edge = set(edge)
(n1, n2) = tuple(edge)
if n1 in self.dict_graph:
self.dict_graph[n1].append(n2)
else:
self.dict_graph[n1] == [n2]
def del_node(self, n):
'''deletes the node 'n' from the graph,
raises an error if no such node exists'''
pass
def del_edge(self, n1, n2):
'''deletes the edge connecting 'n1' and 'n2' from the graph,
raises an error if no such edge exists'''
pass
def has_node(self, n):
'''True if node 'n' is contained in the graph, False if not.'''
pass
def neighbors(self, n):
'''returns the list of all nodes connected to 'n' by edges,
raises an error if n is not in g'''
pass
def adjacent(self, n1, n2):
'''returns True if there is an edge connecting n1 and n2, False if not,
raises an error if either of the supplied nodes are not in g'''
pass
| Python | 0.000001 |
4dbde6b8c33a85508ae9c375fef4d4caabfb4d15 | add function build_valid_filename | nlp/extractors/base.py | nlp/extractors/base.py | import re
class BaseExtractor(object):
def build_valid_filename(self, text):
dst=text
for x in '\t\n\':;",.[](){}~!@#$%^&*_+-=/<>?':
dst=dst.replace(x,' ')
dst=dst.replace(' ','-').replace('--','-').replace('--','-')
dst=dst.strip('-')
return dst
def find_between(self, text, s1, s2=None):
if not s1:
raise Exception('s1 is None!')
pos1 = text.find(s1)
if s2 and pos1 != -1:
pos2 = text.find(s2, pos1)
else:
pos2 = -1
if pos2 != -1 and pos2>pos1:
return text[pos1+len(s1):pos2]
else:
return ''
def _extract(self, html):
result =[]
return result
def extract(self, html):
return self._extract(html)
class BaseRegexExtractor(object):
regex = None
def _extract(self, html, regex=None):
result =[]
if regex == None:
regex = self.regex
if regex == None:
return result
p = re.compile(regex)
result = p.findall(html)
return result
def extract(self, html, regex=None):
return self._extract(html, regex=regex)
| import re
class BaseExtractor(object):
def _extract(self, html):
result =[]
return result
def find_between(self, text, s1, s2=None):
if not s1:
raise Exception('s1 is None!')
pos1 = text.find(s1)
if s2 and pos1 != -1:
pos2 = text.find(s2, pos1)
else:
pos2 = -1
if pos2 != -1 and pos2>pos1:
return text[pos1+len(s1):pos2]
else:
return ''
def extract(self, html):
return self._extract(html)
class BaseRegexExtractor(object):
regex = None
def _extract(self, html, regex=None):
result =[]
if regex == None:
regex = self.regex
if regex == None:
return result
p = re.compile(regex)
result = p.findall(html)
return result
def extract(self, html, regex=None):
return self._extract(html, regex=regex)
| Python | 0.00008 |
a2837ab778d39e66c6178dae34a3bebdc638061f | fix test | py/test/testing/test_outcome.py | py/test/testing/test_outcome.py |
import py
import marshal
class TestRaises:
def test_raises(self):
py.test.raises(ValueError, "int('qwe')")
def test_raises_exec(self):
py.test.raises(ValueError, "a,x = []")
def test_raises_syntax_error(self):
py.test.raises(SyntaxError, "qwe qwe qwe")
def test_raises_function(self):
py.test.raises(ValueError, int, 'hello')
def test_importorskip():
from py.__.test.outcome import Skipped
try:
sys = py.test.importorskip("sys")
assert sys == py.std.sys
#path = py.test.importorskip("os.path")
#assert path == py.std.os.path
py.test.raises(Skipped, "py.test.importorskip('alskdj')")
py.test.raises(SyntaxError, "py.test.importorskip('x y z')")
py.test.raises(SyntaxError, "py.test.importorskip('x=y')")
path = py.test.importorskip("py", minversion=".".join(py.__version__))
mod = py.std.new.module("hello123")
mod.__version__ = "1.3"
py.test.raises(Skipped, """
py.test.importorskip("hello123", minversion="5.0")
""")
except Skipped:
print py.code.ExceptionInfo()
py.test.fail("spurious skip")
def test_pytest_exit():
try:
py.test.exit("hello")
except:
excinfo = py.code.ExceptionInfo()
assert excinfo.errisinstance(KeyboardInterrupt)
|
import py
import marshal
class TestRaises:
def test_raises(self):
py.test.raises(ValueError, "int('qwe')")
def test_raises_exec(self):
py.test.raises(ValueError, "a,x = []")
def test_raises_syntax_error(self):
py.test.raises(SyntaxError, "qwe qwe qwe")
def test_raises_function(self):
py.test.raises(ValueError, int, 'hello')
def test_importorskip():
from py.__.test.outcome import Skipped
try:
sys = py.test.importorskip("sys")
assert sys == py.std.sys
#path = py.test.importorskip("os.path")
#assert path == py.std.os.path
py.test.raises(Skipped, "py.test.importorskip('alskdj')")
py.test.raises(SyntaxError, "py.test.importorskip('x y z')")
py.test.raises(SyntaxError, "py.test.importorskip('x=y')")
path = py.test.importorskip("py", minversion=".".join(py.__version__))
py.test.raises(Skipped, """
py.test.importorskip("py", minversion="5.0")
""")
except Skipped:
print py.code.ExceptionInfo()
py.test.fail("spurious skip")
def test_pytest_exit():
try:
py.test.exit("hello")
except:
excinfo = py.code.ExceptionInfo()
assert excinfo.errisinstance(KeyboardInterrupt)
| Python | 0.000001 |
38dc94240fdecaa0676921d32f749ca31da94c49 | Add unit tests for similarity graph and density estimation utilities. | debacl/test/test_utils.py | debacl/test/test_utils.py | #####################################
## Brian P. Kent
## test_utils.py
## created: 20140529
## updated: 20140712
## Test the DeBaCl utility functions.
#####################################
import unittest
import numpy as np
import scipy.special as spspec
import sys
from debacl import utils as utl
class TestDensityEstimates(unittest.TestCase):
"""
Unit test class for density estimate functions in DeBaCl utilities.
"""
def setUp(self):
# Input parameters
self.r_k = 1.
self.n = 100
self.p = 2
self.k = 5.
# Correct density estimate
unit_ball_volume = np.pi**(self.p/2.) / spspec.gamma(1 + self.p/2.0)
normalizer = self.k / (self.n * unit_ball_volume)
self.fhat = normalizer / (self.r_k**self.p)
def test_knn_density(self):
fhat = utl.knn_density(self.r_k, self.n, self.p, self.k)
self.assertEqual(self.fhat, fhat)
class TestNeighborGraphs(unittest.TestCase):
"""
Unit test class for neighbor graphs.
"""
def setUp(self):
## Make data
n = 5
self.X = np.arange(5).reshape((n, 1))
## Graph parameters
self.k = 3
self.epsilon = 1.01
## Answers
self.knn = {
0: set([0, 1, 2]),
1: set([1, 0, 2]),
2: set([2, 1, 3]),
3: set([3, 2, 4]),
4: set([4, 3, 2])}
self.r_k = np.array([2., 1., 1., 1., 2.])
self.eps_nn = {
0: set([0, 1]),
1: set([1, 0, 2]),
2: set([2, 1, 3]),
3: set([3, 2, 4]),
4: set([4, 3])}
self.edge_list = [(0, 1), (1, 2), (2, 3), (3, 4)]
def test_knn_graph(self):
"""
Test construction of the k-nearest neighbor graph.
"""
knn, r_k = utl.knn_graph(self.X, k=self.k, method='brute-force')
np.testing.assert_array_equal(r_k, self.r_k)
for idx, neighbors in knn.iteritems():
self.assertSetEqual(self.knn[idx], set(neighbors))
def test_epsilon_graph(self):
"""
Test construction of the epsilon-nearest neighbor graph.
"""
eps_nn = utl.epsilon_graph(self.X, self.epsilon)
for idx, neighbors in eps_nn.iteritems():
self.assertSetEqual(self.eps_nn[idx], set(neighbors))
def test_type_conversions(self):
"""
Test conversion between graph representations.
"""
edge_list = utl.adjacency_to_edge_list(self.eps_nn, self_edge=False)
edge_list = sorted([tuple(sorted(x)) for x in edge_list])
for e, ans in zip(edge_list, self.edge_list):
self.assertTupleEqual(e, ans)
| #####################################
## Brian P. Kent
## test_utils.py
## created: 20140529
## updated: 20140529
## Test the DeBaCl utility functions.
#####################################
import unittest
import numpy as np
import scipy.special as spspec
import sys
sys.path.insert(0, '/home/brian/Projects/debacl/DeBaCl/')
from debacl import utils as utl
## Example from the unittest introduction
# class TestSequenceFunctions(unittest.TestCase):
# def setUp(self):
# self.seq = range(10)
# def test_choice(self):
# element = random.choice(self.seq)
# self.assertTrue(element in self.seq)
class TestDensityEstimates(unittest.TestCase):
"""
Unit test class for density estimate functions in DeBaCl utilities.
"""
def setUp(self):
# Input parameters
self.r_k = 1.
self.n = 100
self.p = 2
self.k = 5.
# Correct density estimate
unit_ball_volume = np.pi**(self.p/2.) / spspec.gamma(1 + self.p/2.0)
normalizer = self.k / (self.n * unit_ball_volume)
self.fhat = normalizer / (self.r_k**self.p)
def test_knn_density(self):
fhat = utl.knnDensity(self.r_k, self.n, self.p, self.k)
self.assertEqual(self.fhat, fhat)
class TestNeighborGraphs(unittest.TestCase):
"""
Unit test class for neighbor graphs.
"""
def setUp(self):
pass
def test_knn_graph(self):
pass
def test_epsilon_graph(self):
pass
def test_gaussian_graph(self):
pass
class TestTreeConstructionUtils(unittest.TestCase):
"""
Unit test class for stages of level set tree construction.
"""
def setUp(self):
pass
def test_density_grid(self):
pass
def test_background_assignment(self):
pass | Python | 0 |
f0f1fb06896294f2657083aa7a077d852ea8bb4b | add sort order | ats/admin.py | ats/admin.py | from django.contrib import admin
from .models import ProjectWorker
class ProjectWorkerAdmin(admin.ModelAdmin):
list_filter = ['user', 'project', 'job']
ordering = ['user', 'project', 'job']
admin.site.register(ProjectWorker, ProjectWorkerAdmin)
| from django.contrib import admin
from .models import ProjectWorker
class ProjectWorkerAdmin(admin.ModelAdmin):
list_filter = ['user', 'project', 'job']
admin.site.register(ProjectWorker, ProjectWorkerAdmin)
| Python | 0.000088 |
86c82a4215aeffb3ddf0a195c8a556fc5a32667a | fix save method if object is updated | nosql_schema/schema.py | nosql_schema/schema.py | import nosqlite
from fields import Field
from exceptions import ValidationError
import config as base_config
class Schema:
@staticmethod
def get_config():
config = vars(base_config)
if 'DATABASE_PATH' not in config:
config['DATABASE_PATH'] = 'database.db'
return config
def __init__(self, **kwargs):
self.config = Schema.get_config()
self._id = None
attributes = self.__class__.__dict__
# creation by dictionary -> see find / find_one
try:
field_dictionary = kwargs.pop('__dictionary')
if field_dictionary:
setattr(self, '_id', field_dictionary.pop('_id'))
except KeyError:
field_dictionary = None
# set default values, override with passed values, then with __dictionary
for k, v in attributes.iteritems():
if isinstance(v, Field):
setattr(self, k, v.default)
if k in kwargs:
setattr(self, k, kwargs.pop(k))
if field_dictionary and k in field_dictionary:
setattr(self, k, field_dictionary.pop(k))
def save(self):
if not self.__validate():
return False
# convert attributes to document
document = self.__to_dict()
if self._id is not None:
# update
with nosqlite.Connection(self.config['DATABASE_PATH']) as db:
collection_name = self.__class__.__name__
collection = db[collection_name]
collection.update(document)
return self._id
else:
# insert
with nosqlite.Connection(self.config['DATABASE_PATH']) as db:
collection_name = self.__class__.__name__
collection = db[collection_name]
document = collection.insert(document)
self._id = document['_id']
return self._id
def delete(self):
document = self.__to_dict()
if '_id' in document:
with nosqlite.Connection(self.config['DATABASE_PATH']) as db:
collection_name = self.__class__.__name__
collection = db[collection_name]
return collection.delete({'_id': document['_id']})
def __validate(self):
attributes = self.__class__.__dict__
document = self.__to_dict()
for k, v in attributes.iteritems():
if isinstance(v, Field):
# workaround for getattr(self, k) as it returns class attribute if value is None?!
value = document.get(k)
if not v.validate(value=value):
raise ValidationError('Invalid value "{0}" for field "{1}"'.format(value, k))
return True
def __to_dict(self):
# remove all undefined attributes and add defined attributes
raw_document = self.__dict__
document = dict()
attributes = self.__class__.__dict__
for k, v in attributes.iteritems():
if isinstance(v, Field):
if k in raw_document:
document[k] = raw_document[k]
else:
# actually not necessary
document[k] = None
if '_id' in raw_document and raw_document['_id'] is not None:
document['_id'] = raw_document['_id']
return document
# class methods
@classmethod
def find(cls, query=None, limit=None):
config = Schema.get_config()
with nosqlite.Connection(config['DATABASE_PATH']) as db:
collection_name = cls.__name__
collection = db[collection_name]
documents = []
results = collection.find(query, limit)
for document in results:
instance = cls(__dictionary=document)
documents.append(instance)
return documents
@classmethod
def find_one(cls, query=None):
config = Schema.get_config()
with nosqlite.Connection(config['DATABASE_PATH']) as db:
collection_name = cls.__name__
collection = db[collection_name]
document = collection.find_one(query)
return cls(__dictionary=document)
| import nosqlite
from fields import Field
from exceptions import ValidationError
import config as base_config
class Schema:
@staticmethod
def get_config():
config = vars(base_config)
if 'DATABASE_PATH' not in config:
config['DATABASE_PATH'] = 'database.db'
return config
def __init__(self, **kwargs):
self.config = Schema.get_config()
self._id = None
attributes = self.__class__.__dict__
# creation by dictionary -> see find / find_one
try:
field_dictionary = kwargs.pop('__dictionary')
if field_dictionary:
setattr(self, '_id', field_dictionary.pop('_id'))
except KeyError:
field_dictionary = None
# set default values, override with passed values, then with __dictionary
for k, v in attributes.iteritems():
if isinstance(v, Field):
setattr(self, k, v.default)
if k in kwargs:
setattr(self, k, kwargs.pop(k))
if field_dictionary and k in field_dictionary:
setattr(self, k, field_dictionary.pop(k))
def save(self):
if not self.__validate():
return False
# convert attributes to document
document = self.__to_dict()
if self._id is not None:
# update
with nosqlite.Connection(self.config['DATABASE_PATH']) as db:
collection_name = self.__class__.__name__
collection = db[collection_name]
collection.update({'_id': document['_id']}, document)
return self._id
else:
# insert
with nosqlite.Connection(self.config['DATABASE_PATH']) as db:
collection_name = self.__class__.__name__
collection = db[collection_name]
document = collection.insert(document)
self._id = document['_id']
return self._id
def delete(self):
document = self.__to_dict()
if '_id' in document:
with nosqlite.Connection(self.config['DATABASE_PATH']) as db:
collection_name = self.__class__.__name__
collection = db[collection_name]
return collection.delete({'_id': document['_id']})
def __validate(self):
attributes = self.__class__.__dict__
document = self.__to_dict()
for k, v in attributes.iteritems():
if isinstance(v, Field):
# workaround for getattr(self, k) as it returns class attribute if value is None?!
value = document.get(k)
if not v.validate(value=value):
raise ValidationError('Invalid value "{0}" for field "{1}"'.format(value, k))
return True
def __to_dict(self):
# remove all undefined attributes and add defined attributes
raw_document = self.__dict__
document = dict()
attributes = self.__class__.__dict__
for k, v in attributes.iteritems():
if isinstance(v, Field):
if k in raw_document:
document[k] = raw_document[k]
else:
# actually not necessary
document[k] = None
if '_id' in raw_document and raw_document['_id'] is not None:
document['_id'] = raw_document['_id']
return document
# class methods
@classmethod
def find(cls, query=None, limit=None):
config = Schema.get_config()
with nosqlite.Connection(config['DATABASE_PATH']) as db:
collection_name = cls.__name__
collection = db[collection_name]
documents = []
results = collection.find(query, limit)
for document in results:
instance = cls(__dictionary=document)
documents.append(instance)
return documents
@classmethod
def find_one(cls, query=None):
config = Schema.get_config()
with nosqlite.Connection(config['DATABASE_PATH']) as db:
collection_name = cls.__name__
collection = db[collection_name]
document = collection.find_one(query)
return cls(__dictionary=document)
| Python | 0.000001 |
6447899ec344d14fbb78b9a2bbbe8b75451f10f2 | Set isolation level to reapeatable read | pyophase/settings_production.py | pyophase/settings_production.py | """
This is the settings file used in production.
First, it imports all default settings, then overrides respective ones.
Secrets are stored in and imported from an additional file, not set under version control.
"""
from pyophase import settings_secrets as secrets
from .settings import *
SECRET_KEY = secrets.SECRET_KEY
DEBUG = False
ALLOWED_HOSTS = ['.fachschaft.informatik.tu-darmstadt.de', '.d120.de']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': 'localhost',
'NAME': 'pyophase',
'USER': 'pyophase',
'PASSWORD': secrets.DB_PASSWORD,
'OPTIONS': {
'init_command': "SET sql_mode='STRICT_TRANS_TABLES'",
'isolation_level': "repeatable read"
}
}
}
STATIC_URL = '/ophase/static/'
LOGIN_URL = '/ophase/accounts/login/'
MEDIA_URL = '/ophase/media/'
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
ADMINS = (('pyophase-dev', 'pyophase-dev@fachschaft.informatik.tu-darmstadt.de'),)
SERVER_EMAIL = "pyophase@fachschaft.informatik.tu-darmstadt.de"
DEFAULT_FROM_EMAIL = SERVER_EMAIL
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'mail.d120.de'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_USER = 'pyophase'
EMAIL_HOST_PASSWORD = secrets.MAIL_PASSWORD
TUID_FORCE_SERVICE_URL = 'https://www.fachschaft.informatik.tu-darmstadt.de/ophase/sso/login/'
FILE_UPLOAD_PERMISSIONS = 0o644
| """
This is the settings file used in production.
First, it imports all default settings, then overrides respective ones.
Secrets are stored in and imported from an additional file, not set under version control.
"""
from pyophase import settings_secrets as secrets
from .settings import *
SECRET_KEY = secrets.SECRET_KEY
DEBUG = False
ALLOWED_HOSTS = ['.fachschaft.informatik.tu-darmstadt.de', '.d120.de']
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'HOST': 'localhost',
'NAME': 'pyophase',
'USER': 'pyophase',
'PASSWORD': secrets.DB_PASSWORD,
'OPTIONS': {
'init_command': "SET sql_mode='STRICT_TRANS_TABLES'"
}
}
}
STATIC_URL = '/ophase/static/'
LOGIN_URL = '/ophase/accounts/login/'
MEDIA_URL = '/ophase/media/'
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
ADMINS = (('pyophase-dev', 'pyophase-dev@fachschaft.informatik.tu-darmstadt.de'),)
SERVER_EMAIL = "pyophase@fachschaft.informatik.tu-darmstadt.de"
DEFAULT_FROM_EMAIL = SERVER_EMAIL
EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend'
EMAIL_HOST = 'mail.d120.de'
EMAIL_PORT = 587
EMAIL_USE_TLS = True
EMAIL_HOST_USER = 'pyophase'
EMAIL_HOST_PASSWORD = secrets.MAIL_PASSWORD
TUID_FORCE_SERVICE_URL = 'https://www.fachschaft.informatik.tu-darmstadt.de/ophase/sso/login/'
FILE_UPLOAD_PERMISSIONS = 0o644
| Python | 0 |
4839121f90934f7e52e51c05d052d27124680be7 | Remove confusing and useless "\n" | pyqode/python/backend/server.py | pyqode/python/backend/server.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Main server script for a pyqode.python backend. You can directly use this
script in your application if it fits your needs or use it as a starting point
for writing your own server.
::
usage: server.py [-h] [-s [SYSPATH [SYSPATH ...]]] port
positional arguments:
port the local tcp port to use to run the server
optional arguments:
-h, --help show this help message and exit
-s [SYSPATH [SYSPATH ...]], --syspath [SYSPATH [SYSPATH ...]]
"""
import argparse
import sys
if __name__ == '__main__':
"""
Server process' entry point
"""
# setup argument parser and parse command line args
parser = argparse.ArgumentParser()
parser.add_argument("port", help="the local tcp port to use to run "
"the server")
parser.add_argument('-s', '--syspath', nargs='*')
args = parser.parse_args()
# add user paths to sys.path
if args.syspath:
for path in args.syspath:
print('append path %s to sys.path' % path)
sys.path.append(path)
from pyqode.core import backend
from pyqode.python.backend.workers import JediCompletionProvider
# setup completion providers
backend.CodeCompletionWorker.providers.append(JediCompletionProvider())
backend.CodeCompletionWorker.providers.append(
backend.DocumentWordsProvider())
# starts the server
backend.serve_forever(args)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Main server script for a pyqode.python backend. You can directly use this
script in your application if it fits your needs or use it as a starting point
for writing your own server.
::
usage: server.py [-h] [-s [SYSPATH [SYSPATH ...]]] port
positional arguments:
port the local tcp port to use to run the server
optional arguments:
-h, --help show this help message and exit
-s [SYSPATH [SYSPATH ...]], --syspath [SYSPATH [SYSPATH ...]]
"""
import argparse
import sys
if __name__ == '__main__':
"""
Server process' entry point
"""
# setup argument parser and parse command line args
parser = argparse.ArgumentParser()
parser.add_argument("port", help="the local tcp port to use to run "
"the server")
parser.add_argument('-s', '--syspath', nargs='*')
args = parser.parse_args()
# add user paths to sys.path
if args.syspath:
for path in args.syspath:
print('append path %s to sys.path\n' % path)
sys.path.append(path)
from pyqode.core import backend
from pyqode.python.backend.workers import JediCompletionProvider
# setup completion providers
backend.CodeCompletionWorker.providers.append(JediCompletionProvider())
backend.CodeCompletionWorker.providers.append(
backend.DocumentWordsProvider())
# starts the server
backend.serve_forever(args)
| Python | 0.000072 |
31852bbf09e4f416f93c7720ecd9eca8cfe32d38 | Update version | pyramid_request_log/__init__.py | pyramid_request_log/__init__.py | from __future__ import absolute_import
from .config import includeme
__version__ = '0.7'
| from __future__ import absolute_import
from .config import includeme
__version__ = '0.6'
| Python | 0 |
3d0bb2ea93d230c6615658be31b604228c06c1b5 | Optimize verison check (#4461) | python/taichi/_version_check.py | python/taichi/_version_check.py | import datetime
import json
import os
import platform
import threading
import uuid
from urllib import request
from taichi._lib import core as _ti_core
def check_version(cur_uuid):
# Check Taichi version for the user.
major = _ti_core.get_version_major()
minor = _ti_core.get_version_minor()
patch = _ti_core.get_version_patch()
version = f'{major}.{minor}.{patch}'
payload = {'version': version, 'platform': '', 'python': ''}
system = platform.system()
if system == 'Linux':
payload['platform'] = 'manylinux1_x86_64'
elif system == 'Windows':
payload['platform'] = 'win_amd64'
elif system == 'Darwin':
if platform.release() < '19.0.0':
payload['platform'] = 'macosx_10_14_x86_64'
elif platform.machine() == 'x86_64':
payload['platform'] = 'macosx_10_15_x86_64'
else:
payload['platform'] = 'macosx_11_0_arm64'
python_version = platform.python_version()
if python_version.startswith('3.6.'):
payload['python'] = 'cp36'
elif python_version.startswith('3.7.'):
payload['python'] = 'cp37'
elif python_version.startswith('3.8.'):
payload['python'] = 'cp38'
elif python_version.startswith('3.9.'):
payload['python'] = 'cp39'
elif python_version.startswith('3.10.'):
payload['python'] = 'cp310'
payload['uuid'] = cur_uuid
# We do not want request exceptions break users' usage of Taichi.
try:
payload = json.dumps(payload)
payload = payload.encode()
req = request.Request('https://metadata.taichi.graphics/check_version',
method='POST')
req.add_header('Content-Type', 'application/json')
with request.urlopen(req, data=payload, timeout=5) as response:
response = json.loads(response.read().decode('utf-8'))
return response
except:
return None
def write_version_info(response, cur_uuid, version_info_path, cur_date):
if response is None:
return
with open(version_info_path, 'w') as f:
f.write((cur_date).strftime('%Y-%m-%d'))
f.write('\n')
if response['status'] == 1:
f.write(response['latest_version'])
else:
f.write('0.0.0')
f.write('\n')
f.write(cur_uuid)
f.write('\n')
def try_check_version():
try:
os.makedirs(_ti_core.get_repo_dir(), exist_ok=True)
version_info_path = os.path.join(_ti_core.get_repo_dir(),
'version_info')
cur_date = datetime.date.today()
if os.path.exists(version_info_path):
with open(version_info_path, 'r') as f:
version_info_file = f.readlines()
last_time = version_info_file[0].rstrip()
cur_uuid = version_info_file[2].rstrip()
if cur_date.strftime('%Y-%m-%d') > last_time:
response = check_version(cur_uuid)
write_version_info(response, cur_uuid, version_info_path,
cur_date)
else:
cur_uuid = str(uuid.uuid4())
response = check_version(cur_uuid)
write_version_info(response, cur_uuid, version_info_path, cur_date)
# Wildcard exception to catch potential file writing errors.
except:
pass
def start_version_check_thread():
skip = os.environ.get("TI_SKIP_VERSION_CHECK")
if skip != 'ON':
# We don't join this thread because we do not wish to block users.
check_version_thread = threading.Thread(target=try_check_version,
daemon=True)
check_version_thread.start()
__all__ = []
| import json
import os
import platform
import threading
from urllib import request
def check_version():
# Check Taichi version for the user.
major = _ti_core.get_version_major()
minor = _ti_core.get_version_minor()
patch = _ti_core.get_version_patch()
version = f'{major}.{minor}.{patch}'
payload = {'version': version, 'platform': '', 'python': ''}
system = platform.system()
if system == 'Linux':
payload['platform'] = 'manylinux1_x86_64'
elif system == 'Windows':
payload['platform'] = 'win_amd64'
elif system == 'Darwin':
if platform.release() < '19.0.0':
payload['platform'] = 'macosx_10_14_x86_64'
elif platform.machine() == 'x86_64':
payload['platform'] = 'macosx_10_15_x86_64'
else:
payload['platform'] = 'macosx_11_0_arm64'
python_version = platform.python_version()
if python_version.startswith('3.6.'):
payload['python'] = 'cp36'
elif python_version.startswith('3.7.'):
payload['python'] = 'cp37'
elif python_version.startswith('3.8.'):
payload['python'] = 'cp38'
elif python_version.startswith('3.9.'):
payload['python'] = 'cp39'
# We do not want request exceptions break users' usage of Taichi.
try:
payload = json.dumps(payload)
payload = payload.encode()
req = request.Request('https://metadata.taichi.graphics/check_version',
method='POST')
req.add_header('Content-Type', 'application/json')
with request.urlopen(req, data=payload, timeout=5) as response:
response = json.loads(response.read().decode('utf-8'))
return response
except:
return None
def try_check_version():
try:
os.makedirs(_ti_core.get_repo_dir(), exist_ok=True)
timestamp_path = os.path.join(_ti_core.get_repo_dir(), 'timestamp')
cur_date = datetime.date.today()
if os.path.exists(timestamp_path):
last_time = ''
with open(timestamp_path, 'r') as f:
last_time = f.readlines()[0].rstrip()
if cur_date.strftime('%Y-%m-%d') > last_time:
response = check_version()
if response is None:
return
with open(timestamp_path, 'w') as f:
f.write((cur_date +
datetime.timedelta(days=7)).strftime('%Y-%m-%d'))
f.write('\n')
if response['status'] == 1:
f.write(response['latest_version'])
else:
f.write('0.0.0')
else:
response = check_version()
if response is None:
return
with open(timestamp_path, 'w') as f:
f.write((cur_date +
datetime.timedelta(days=7)).strftime('%Y-%m-%d'))
f.write('\n')
if response['status'] == 1:
f.write(response['latest_version'])
else:
f.write('0.0.0')
# Wildcard exception to catch potential file writing errors.
except:
pass
def start_version_check_thread():
skip = os.environ.get("TI_SKIP_VERSION_CHECK")
if skip != 'ON':
# We don't join this thread because we do not wish to block users.
check_version_thread = threading.Thread(target=try_check_version,
daemon=True)
check_version_thread.start()
__all__ = []
| Python | 0 |
3c3c452426b7568675028ec9def514eb6d501e35 | Fix flake8 errors | qmxgraph/decoration_contents.py | qmxgraph/decoration_contents.py | import attr
from qmxgraph.extra_attr_validators import tuple_of
asdict = attr.asdict
_is_int = attr.validators.instance_of(int)
_is_str = attr.validators.instance_of(str)
@attr.s(frozen=True, slots=True)
class Image:
"""
Represet an image tag that could be embedded into a table contents.
The image's width and height are required since mxgraph will render the
html in a helper container in order to get the cell's size. To avoid the
cell size to be wrongly calculated we got some options like passing the
image's size explicitly (as done here) or force the user to pre load the
images so when rendering the html the image is already loaded and the
correct size is used.
:ivar str tag:
:ivar str source: The URL to the image, data URIs can be used.
:ivar int width: The desired width for the image.
:ivar int height: The desired height for the image.
"""
tag = attr.ib(default='img', init=False)
src = attr.ib(validator=_is_str)
width = attr.ib(validator=_is_int)
height = attr.ib(validator=_is_int)
@attr.s(frozen=True, slots=True)
class TableData:
"""
Represents the contents of a table's cell when inserting or updating a
table in the graph.
:ivar str tag:
:ivar tuple[union[str,Image]] contents: The table cell's contents.
:ivar int colspan: The number of columns the cell should span into.
:ivar int rowspan: The number of rows the cell should span into.
:ivar optional[str] style: A inline style for the element.
"""
tag = attr.ib(default='td', init=False)
contents = attr.ib(validator=tuple_of(str, Image),
convert=tuple)
colspan = attr.ib(default=1, validator=_is_int)
rowspan = attr.ib(default=1, validator=_is_int)
style = attr.ib(default=None, validator=attr.validators.optional(_is_str))
@attr.s(frozen=True, slots=True)
class TableRow:
"""
Represents the contents of a table's row when inserting or updating a
table in the graph.
:ivar str tag:
:ivar tuple[union[str,TableData]] contents: The row's cells. Normal `str`
elements will be interpreted as `TableData` elements with all the
default values and it's contents equal to a tuple of one element (the
`str` used).
"""
tag = attr.ib(default='tr', init=False)
contents = attr.ib(validator=tuple_of(str, TableData),
convert=tuple)
@attr.s(frozen=True, slots=True)
class Table:
"""
Represents the contents of a table when inserting or updating a table in
the graph.
:ivar str tag:
:ivar tuple[TableRow] contents: The table rows.
"""
tag = attr.ib(default='table', init=False)
contents = attr.ib(validator=tuple_of(TableRow), convert=tuple)
def contents_after(self, caption):
"""
Useful for testing: truncates the contents after the first row with
the given caption and return it as a list.
:rtype: tuple[TableRow]
"""
seen_captions = []
def get_caption(row):
first_row_content = row.contents[0]
if isinstance(first_row_content, TableData):
return first_row_content.contents[0]
return first_row_content
for index, row in enumerate(self.contents):
row_caption = get_caption(row)
if row_caption == caption:
break
seen_captions.append(row_caption)
else:
__tracebackhide__ = True
msg = '\nCould not find row with caption "{}" in\n{}'
assert False, msg.format(caption, seen_captions)
return tuple(self.contents[index + 1:])
| import attr
from qmxgraph.extra_attr_validators import tuple_of
asdict = attr.asdict
_is_int = attr.validators.instance_of(int)
_is_str = attr.validators.instance_of(str)
@attr.s(frozen=True, slots=True)
class Image:
"""
Represet an image tag that could be embedded into a table contents.
The image's width and height are required since mxgraph will render the
html in a helper container in order to get the cell's size. To avoid the
cell size to be wrongly calculated we got some options like passing the
image's size explicitly (as done here) or force the user to pre load the
images so when rendering the html the image is already loaded and the
correct size is used.
:ivar str tag:
:ivar str source: The URL to the image, data URIs can be used.
:ivar int width: The desired width for the image.
:ivar int height: The desired height for the image.
"""
tag = attr.ib(default='img', init=False)
src = attr.ib(validator=_is_str)
width = attr.ib(validator=_is_int)
height = attr.ib(validator=_is_int)
@attr.s(frozen=True, slots=True)
class TableData:
"""
Represents the contents of a table's cell when inserting or updating a
table in the graph.
:ivar str tag:
:ivar tuple[union[str,Image]] contents: The table cell's contents.
:ivar int colspan: The number of columns the cell should span into.
:ivar int rowspan: The number of rows the cell should span into.
:ivar optional[str] style: A inline style for the element.
"""
tag = attr.ib(default='td', init=False)
contents = attr.ib(validator=tuple_of(str, Image),
convert=tuple)
colspan = attr.ib(default=1, validator=_is_int)
rowspan = attr.ib(default=1, validator=_is_int)
style = attr.ib(default=None, validator=attr.validators.optional(_is_str))
@attr.s(frozen=True, slots=True)
class TableRow:
"""
Represents the contents of a table's row when inserting or updating a
table in the graph.
:ivar str tag:
:ivar tuple[union[str,TableData]] contents: The row's cells. Normal `str`
elements will be interpreted as `TableData` elements with all the
default values and it's contents equal to a tuple of one element (the
`str` used).
"""
tag = attr.ib(default='tr', init=False)
contents = attr.ib(validator=tuple_of(str, TableData),
convert=tuple)
@attr.s(frozen=True, slots=True)
class Table:
"""
Represents the contents of a table when inserting or updating a table in
the graph.
:ivar str tag:
:ivar tuple[TableRow] contents: The table rows.
"""
tag = attr.ib(default='table', init=False)
contents = attr.ib(validator=tuple_of(TableRow), convert=tuple)
def contents_after(self, caption):
"""
Useful for testing: truncates the contents after the first row with the given caption and return it as a list.
:rtype: tuple[TableRow]
"""
seen_captions = []
def get_caption(row):
first_row_content = row.contents[0]
if isinstance(first_row_content, TableData):
return first_row_content.contents[0]
return first_row_content
for index, row in enumerate(self.contents):
row_caption = get_caption(row)
if row_caption == caption:
break
seen_captions.append(row_caption)
else:
__tracebackhide__ = True
assert False, '\nCould not find row with caption "{}" in\n{}'.format(caption, seen_captions)
return tuple(self.contents[index + 1:])
| Python | 0.000004 |
6b966f68aab979bb6dab55f08ec7c5b3807295da | Update hello.py | hello.py | hello.py | import sqlite3
from flask import Flask
from flask import g
import cf_deployment_tracker
import os
# Emit Bluemix deployment event
cf_deployment_tracker.track()
app = Flask(__name__)
# On Bluemix, get the port number from the environment variable VCAP_APP_PORT
# When running this app on the local machine, default the port to 8080
port = int(os.getenv('VCAP_APP_PORT', 8080))
DATABASE = './database.db'
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
return db
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
def init_db():
with app.app_context():
db = get_db()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
def query_db(query, args=(), one=False):
cur = get_db().execute(query, args)
rv = cur.fetchall()
cur.close()
return (rv[0] if rv else None) if one else rv
@app.route('/')
def hello_world():
return 'Hello World! I am running on port ' + str(port)
@app.route('/placeOrder')
def placeOrder():
return return app.send_static_file('/placeOrder/index.html')
@app.route('/processOrder', methods = ['POST'])
def processOrder():
if request.method == 'POST':
nonce = request.form['nonce']
name = request.form['username']
order = request.form['order']
cost = int(request.form['cost'])
phoneNumber = request.form['phoneNumber']
userDetails = {'name':name, 'order':order, 'cost':cost, 'phoneNumber':phoneNumber}
print(userDetails)
success = processTransaction(nonce,cost)
if success:
return app.send_static_file('thankYou.html')
else:
return app.send_static_file('index.html')
def processTransaction(nonce,cost):
api_instance = TransactionApi()
# Every payment you process with the SDK must have a unique idempotency key.
# If you're unsure whether a particular payment succeeded, you can reattempt
# it with the same idempotency key without worrying about double charging
# the buyer.
idempotency_key = str(uuid.uuid1())
# Monetary amounts are specified in the smallest unit of the applicable currency.
# This amount is in cents. It's also hard-coded for $1.00, which isn't very useful.
amount = {'amount':cost, 'currency': 'USD'}
body = {'idempotency_key': idempotency_key, 'card_nonce': nonce, 'amount_money': amount}
# The SDK throws an exception if a Connect endpoint responds with anything besides
# a 200-level HTTP code. This block catches any exceptions that occur from the request.
try:
# Charge
api_response = api_instance.charge(access_token, location_id, body)
res = api_response.transaction
#Push to Db #TODO
return True
except ApiException as e:
res = "Exception when calling TransactionApi->charge: {}".format(e)
print(res); #For Debuggig
return False
def insert(table, fields=(), values=()):
db = getattr(g, '_database', None)
cur = db.cursor()
query = 'INSERT INTO %s (%s) VALUES (%s)' % (
table,
', '.join(fields),
', '.join(['?'] * len(values))
)
cur.execute(query, values)
db.commit()
id = cur.lastrowid
cur.close()
return id
@app.route('/CreatePool', methods = ['POST'])
def CreatePool():
db = get_db()
insert("Pools", ("restaurant", "return_time", "num_orders", "pickup_location", "has_arrived"), ("in n out", "1478939164", "5", "room 383", False))
return "Pool Created"
if __name__ == '__main__':
app.run(host='0.0.0.0', port=port)
| import sqlite3
from flask import Flask
from flask import g
import cf_deployment_tracker
import os
# Emit Bluemix deployment event
cf_deployment_tracker.track()
app = Flask(__name__)
# On Bluemix, get the port number from the environment variable VCAP_APP_PORT
# When running this app on the local machine, default the port to 8080
port = int(os.getenv('VCAP_APP_PORT', 8080))
DATABASE = './database.db'
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
return db
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
def init_db():
with app.app_context():
db = get_db()
with app.open_resource('schema.sql', mode='r') as f:
db.cursor().executescript(f.read())
db.commit()
def query_db(query, args=(), one=False):
cur = get_db().execute(query, args)
rv = cur.fetchall()
cur.close()
return (rv[0] if rv else None) if one else rv
@app.route('/')
def hello_world():
return 'Hello World! I am running on port ' + str(port)
@app.route('/placeOrder')
def placeOrder():
return return app.send_static_file('/placeOrder/index.html')
def insert(table, fields=(), values=()):
db = getattr(g, '_database', None)
cur = db.cursor()
query = 'INSERT INTO %s (%s) VALUES (%s)' % (
table,
', '.join(fields),
', '.join(['?'] * len(values))
)
cur.execute(query, values)
db.commit()
id = cur.lastrowid
cur.close()
return id
@app.route('/CreatePool', methods = ['POST'])
def CreatePool():
db = get_db()
insert("Pools", ("restaurant", "return_time", "num_orders", "pickup_location", "has_arrived"), ("in n out", "1478939164", "5", "room 383", False))
return "Pool Created"
if __name__ == '__main__':
app.run(host='0.0.0.0', port=port)
| Python | 0.000001 |
3c46550771f5dc588ae9a0cf61a6980fd0315e6f | Fix e.details handling | odlclient/v2/client.py | odlclient/v2/client.py | # Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Author: Endre Karlson <endre.karlson@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import requests
from odlclient.openstack.common.apiclient import client
from odlclient.v2.bridge_domain import BridgeDomain
from odlclient.v2.connection_manager import ConnectionManager
from odlclient.v2.node import NodeManager
from odlclient.v2.ovsdb import OvsdbManager
from odlclient.v2.subnet import SubnetManager
from odlclient.v2.staticroute import StaticRouteManager
LOG = logging.getLogger(__name__)
class HTTPClient(client.HTTPClient):
"""
Modified HTTPClient to take a endpoint and doesn't use X-Auth-Token
"""
user_agent = "odlclient.openstack.common.apiclient"
def __init__(self,
endpoint,
username=None,
password=None,
original_ip=None,
verify=True,
cert=None,
timeout=None,
timings=False,
keyring_saver=None,
debug=False,
user_agent=None,
http=None):
self.endpoint = endpoint
self.username = username
self.password = password
self.original_ip = original_ip
self.timeout = timeout
self.verify = verify
self.cert = cert
self.keyring_saver = keyring_saver
self.debug = debug
self.user_agent = user_agent or self.user_agent
self.times = [] # [("item", starttime, endtime), ...]
self.timings = timings
# requests within the same session can reuse TCP connections from pool
self.http = http or requests.Session()
if self.username and self.password:
self.http.auth = (self.username, self.password)
def client_request(self, client, method, url, **kwargs):
try:
return self.request(
method, self.concat_url(self.endpoint, url), **kwargs)
except Exception as e:
if hasattr(e, 'details'):
LOG.error("Error from server below:\n%s", e.details)
raise
class Client(client.BaseClient):
def __init__(self, *args, **kw):
super(Client, self).__init__(*args, **kw)
self.bridge_domain = BridgeDomain(self)
self.connection_manager = ConnectionManager(self)
self.nodes = NodeManager(self)
self.ovsdb = OvsdbManager(self)
self.subnets = SubnetManager(self)
self.staticroutes = StaticRouteManager(self)
| # Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Author: Endre Karlson <endre.karlson@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import requests
from odlclient.openstack.common.apiclient import client
from odlclient.v2.bridge_domain import BridgeDomain
from odlclient.v2.connection_manager import ConnectionManager
from odlclient.v2.node import NodeManager
from odlclient.v2.ovsdb import OvsdbManager
from odlclient.v2.subnet import SubnetManager
from odlclient.v2.staticroute import StaticRouteManager
LOG = logging.getLogger(__name__)
class HTTPClient(client.HTTPClient):
"""
Modified HTTPClient to take a endpoint and doesn't use X-Auth-Token
"""
user_agent = "odlclient.openstack.common.apiclient"
def __init__(self,
endpoint,
username=None,
password=None,
original_ip=None,
verify=True,
cert=None,
timeout=None,
timings=False,
keyring_saver=None,
debug=False,
user_agent=None,
http=None):
self.endpoint = endpoint
self.username = username
self.password = password
self.original_ip = original_ip
self.timeout = timeout
self.verify = verify
self.cert = cert
self.keyring_saver = keyring_saver
self.debug = debug
self.user_agent = user_agent or self.user_agent
self.times = [] # [("item", starttime, endtime), ...]
self.timings = timings
# requests within the same session can reuse TCP connections from pool
self.http = http or requests.Session()
if self.username and self.password:
self.http.auth = (self.username, self.password)
def client_request(self, client, method, url, **kwargs):
try:
return self.request(
method, self.concat_url(self.endpoint, url), **kwargs)
except Exception as e:
LOG.error("Error from server below:\n%s", e.details)
raise
class Client(client.BaseClient):
def __init__(self, *args, **kw):
super(Client, self).__init__(*args, **kw)
self.bridge_domain = BridgeDomain(self)
self.connection_manager = ConnectionManager(self)
self.nodes = NodeManager(self)
self.ovsdb = OvsdbManager(self)
self.subnets = SubnetManager(self)
self.staticroutes = StaticRouteManager(self)
| Python | 0.000003 |
791b6720e489353bb5a2b35906dd88f558f26c33 | Handle NotImplementedError | ogn/gateway/process.py | ogn/gateway/process.py | import logging
from ogn.commands.dbutils import session
from ogn.model import AircraftBeacon, ReceiverBeacon, Location
from ogn.parser import parse, ParseError
logger = logging.getLogger(__name__)
def replace_lonlat_with_wkt(message):
location = Location(message['longitude'], message['latitude'])
message['location_wkt'] = location.to_wkt()
del message['latitude']
del message['longitude']
return message
def message_to_beacon(raw_message, reference_date):
beacon = None
if raw_message[0] != '#':
try:
message = parse(raw_message, reference_date)
if message['aprs_type'] == 'position':
message = replace_lonlat_with_wkt(message)
if message['beacon_type'] == 'aircraft_beacon':
beacon = AircraftBeacon(**message)
elif message['beacon_type'] == 'receiver_beacon':
beacon = ReceiverBeacon(**message)
else:
print("Whoops: what is this: {}".format(message))
except NotImplementedError as e:
logger.error('Received message: {}'.format(raw_message))
logger.error(e)
except ParseError as e:
logger.error('Received message: {}'.format(raw_message))
logger.error('Drop packet, {}'.format(e.message))
except TypeError as e:
logger.error('TypeError: {}'.format(raw_message))
return beacon
def process_beacon(raw_message, reference_date=None):
beacon = message_to_beacon(raw_message, reference_date)
if beacon is not None:
session.add(beacon)
session.commit()
logger.debug('Received message: {}'.format(raw_message))
| import logging
from ogn.commands.dbutils import session
from ogn.model import AircraftBeacon, ReceiverBeacon, Location
from ogn.parser import parse, ParseError
logger = logging.getLogger(__name__)
def replace_lonlat_with_wkt(message):
location = Location(message['longitude'], message['latitude'])
message['location_wkt'] = location.to_wkt()
del message['latitude']
del message['longitude']
return message
def message_to_beacon(raw_message, reference_date):
beacon = None
if raw_message[0] != '#':
try:
message = parse(raw_message, reference_date)
if message['aprs_type'] == 'position':
message = replace_lonlat_with_wkt(message)
if message['beacon_type'] == 'aircraft_beacon':
beacon = AircraftBeacon(**message)
elif message['beacon_type'] == 'receiver_beacon':
beacon = ReceiverBeacon(**message)
else:
print("Whoops: what is this: {}".format(message))
except ParseError as e:
logger.error('Received message: {}'.format(raw_message))
logger.error('Drop packet, {}'.format(e.message))
except TypeError as e:
logger.error('TypeError: {}'.format(raw_message))
return beacon
def process_beacon(raw_message, reference_date=None):
beacon = message_to_beacon(raw_message, reference_date)
if beacon is not None:
session.add(beacon)
session.commit()
logger.debug('Received message: {}'.format(raw_message))
| Python | 0.000001 |
77733f09717a2b61d5773c14b0b5a357f155dd1e | make handler authenticated and change to request body | oidenbterm/handlers.py | oidenbterm/handlers.py | import oide.lib.decorators
import oide.settings as global_settings
import oide.apps.filebrowser.settings as app_settings
from oide.lib.handlers.base import BaseHandler
from oidenbterm.mixins.kernel_mixin import KernelMixin
from terminado import TermSocket
from traitlets import Integer
import json
import tornado.web
import jupyter_client
class AuthTermSocket(TermSocket,BaseHandler):
@oide.lib.decorators.authenticated
def get(self, *args, **kwargs):
return super(AuthTermSocket, self).get(*args, **kwargs)
class KernelHandler(BaseHandler, KernelMixin):
@oide.lib.decorators.authenticated
def post(self):
# print self.request.body
code = json.loads(self.request.body)["code"]
msg_id = self.kernel.execute(code)
# print(msg_id)
res = []
while True:
# print('stuck here')
try:
msg = self.kernel.shell_channel.get_msg(Integer(10, config=True))
# print(msg)
except Empty:
# print('Empty')
pass
# This indicates that something bad happened, as AFAIK this should return...
# self.log.error("Timeout waiting for execute reply")
# raise KnitpyException("Timeout waiting for execute reply.")
if msg['parent_header'].get('msg_id') == msg_id:
# It's finished, and we got our reply, so next look at the results
break
else:
# print('something')
# not our reply
# self.log.debug("Discarding message from a different client: %s" % msg)
continue
# Now look at the results of our code execution and earlier completion requests
# We handle messages until the kernel indicates it's ide again
status_idle_again = False
while True:
# print('stuck here now')
try:
msg = self.kernel.get_iopub_msg(Integer(10, config=True))
# print('doing something')
except Exception:
# print('Empty')
pass
# There should be at least some messages: we just executed code!
# The only valid time could be when the timeout happened too early (aka long
# running code in the document) -> we handle that below
# self.log.warn("Timeout waiting for expected IOPub output")
break
# print(msg['parent_header'].get('msg_id') != msg_id)
if msg['parent_header'].get('msg_id') != msg_id:
if msg['parent_header'].get(u'msg_type') != u'is_complete_request':
# print('output')
pass
# not an output from our execution and not one of the complete_requests
# self.log.debug("Discarding output from a different client: %s" % msg)
else:
# print('something too')
pass
# complete_requests are ok
continue
# Here we have some message which corresponds to our code execution
msg_type = msg['msg_type']
content = msg['content']
# print('Out')
# The kernel indicates some status: executing -> idle
if msg_type == 'status':
if content['execution_state'] == 'idle':
# When idle, the kernel has executed all input
status_idle_again = True
break
else:
# the "starting execution" messages
continue
elif msg_type == 'clear_output':
# we don't handle that!?
# self.log.debug("Discarding unexpected 'clear_output' message: %s" % msg)
continue
## So, from here on we have a messages with real content
# self.write(content)
res.append(content)
if not status_idle_again:
pass
# self.log.error("Code lines didn't execute in time. Don't use long-running code in "
# "documents or increase the timeout!")
# self.log.error("line(s): %s" % lines)
self.write({'res': res})
| import oide.lib.decorators
import oide.settings as global_settings
import oide.apps.filebrowser.settings as app_settings
from oide.lib.handlers.base import BaseHandler
from oidenbterm.mixins.kernel_mixin import KernelMixin
from terminado import TermSocket
from traitlets import Integer
import json
import tornado.web
import jupyter_client
class AuthTermSocket(TermSocket,BaseHandler):
@oide.lib.decorators.authenticated
def get(self, *args, **kwargs):
return super(AuthTermSocket, self).get(*args, **kwargs)
class KernelHandler(BaseHandler, KernelMixin):
def post(self):
code = self.get_argument("code")
msg_id = self.kernel.execute(code)
# print(msg_id)
res = []
while True:
# print('stuck here')
try:
msg = self.kernel.shell_channel.get_msg(Integer(10, config=True))
# print(msg)
except Empty:
# print('Empty')
pass
# This indicates that something bad happened, as AFAIK this should return...
# self.log.error("Timeout waiting for execute reply")
# raise KnitpyException("Timeout waiting for execute reply.")
if msg['parent_header'].get('msg_id') == msg_id:
# It's finished, and we got our reply, so next look at the results
break
else:
# print('something')
# not our reply
# self.log.debug("Discarding message from a different client: %s" % msg)
continue
# Now look at the results of our code execution and earlier completion requests
# We handle messages until the kernel indicates it's ide again
status_idle_again = False
while True:
# print('stuck here now')
try:
msg = self.kernel.get_iopub_msg(Integer(10, config=True))
# print('doing something')
except Exception:
# print('Empty')
pass
# There should be at least some messages: we just executed code!
# The only valid time could be when the timeout happened too early (aka long
# running code in the document) -> we handle that below
# self.log.warn("Timeout waiting for expected IOPub output")
break
# print(msg['parent_header'].get('msg_id') != msg_id)
if msg['parent_header'].get('msg_id') != msg_id:
if msg['parent_header'].get(u'msg_type') != u'is_complete_request':
# print('output')
pass
# not an output from our execution and not one of the complete_requests
# self.log.debug("Discarding output from a different client: %s" % msg)
else:
# print('something too')
pass
# complete_requests are ok
continue
# Here we have some message which corresponds to our code execution
msg_type = msg['msg_type']
content = msg['content']
# print('Out')
# The kernel indicates some status: executing -> idle
if msg_type == 'status':
if content['execution_state'] == 'idle':
# When idle, the kernel has executed all input
status_idle_again = True
break
else:
# the "starting execution" messages
continue
elif msg_type == 'clear_output':
# we don't handle that!?
# self.log.debug("Discarding unexpected 'clear_output' message: %s" % msg)
continue
## So, from here on we have a messages with real content
# self.write(content)
res.append(content)
if not status_idle_again:
pass
# self.log.error("Code lines didn't execute in time. Don't use long-running code in "
# "documents or increase the timeout!")
# self.log.error("line(s): %s" % lines)
self.write({'res': res})
| Python | 0 |
5c5f7981905c757cd5a750c2b2d09ea6bc6f1f28 | Add BoolTypeFactory class | dataproperty/_factory.py | dataproperty/_factory.py | # encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <gogogo.vm@gmail.com>
"""
from __future__ import absolute_import
import abc
import six
from .converter import NopConverterCreator
from .converter import IntegerConverterCreator
from .converter import FloatConverterCreator
from .converter import BoolConverterCreator
from .converter import DateTimeConverterCreator
from ._type_checker_creator import NoneTypeCheckerCreator
from ._type_checker_creator import IntegerTypeCheckerCreator
from ._type_checker_creator import FloatTypeCheckerCreator
from ._type_checker_creator import BoolTypeCheckerCreator
from ._type_checker_creator import DateTimeTypeCheckerCreator
from ._type_checker_creator import InfinityCheckerCreator
from ._type_checker_creator import NanCheckerCreator
@six.add_metaclass(abc.ABCMeta)
class TypeConverterFactoryInterface(object):
"""
Abstract factory class of type converter.
"""
@abc.abstractproperty
def type_checker_factory(self): # pragma: no cover
pass
@abc.abstractproperty
def value_converter_factory(self): # pragma: no cover
pass
class NoneTypeFactory(TypeConverterFactoryInterface):
@property
def type_checker_factory(self):
return NoneTypeCheckerCreator()
@property
def value_converter_factory(self):
return NopConverterCreator()
class IntegerTypeFactory(TypeConverterFactoryInterface):
@property
def type_checker_factory(self):
return IntegerTypeCheckerCreator()
@property
def value_converter_factory(self):
return IntegerConverterCreator()
class FloatTypeFactory(TypeConverterFactoryInterface):
@property
def type_checker_factory(self):
return FloatTypeCheckerCreator()
@property
def value_converter_factory(self):
return FloatConverterCreator()
class DateTimeTypeFactory(TypeConverterFactoryInterface):
@property
def type_checker_factory(self):
return DateTimeTypeCheckerCreator()
@property
def value_converter_factory(self):
return DateTimeConverterCreator()
class BoolTypeFactory(TypeConverterFactoryInterface):
@property
def type_checker_factory(self):
return BoolTypeCheckerCreator()
@property
def value_converter_factory(self):
return BoolConverterCreator()
class InfinityTypeFactory(TypeConverterFactoryInterface):
@property
def type_checker_factory(self):
return InfinityCheckerCreator()
@property
def value_converter_factory(self):
return FloatConverterCreator()
class NanTypeFactory(TypeConverterFactoryInterface):
@property
def type_checker_factory(self):
return NanCheckerCreator()
@property
def value_converter_factory(self):
return FloatConverterCreator()
| # encoding: utf-8
"""
.. codeauthor:: Tsuyoshi Hombashi <gogogo.vm@gmail.com>
"""
from __future__ import absolute_import
import abc
import six
from .converter import NopConverterCreator
from .converter import IntegerConverterCreator
from .converter import FloatConverterCreator
from .converter import DateTimeConverterCreator
from ._type_checker_creator import NoneTypeCheckerCreator
from ._type_checker_creator import IntegerTypeCheckerCreator
from ._type_checker_creator import FloatTypeCheckerCreator
from ._type_checker_creator import DateTimeTypeCheckerCreator
from ._type_checker_creator import InfinityCheckerCreator
from ._type_checker_creator import NanCheckerCreator
@six.add_metaclass(abc.ABCMeta)
class TypeConverterFactoryInterface(object):
"""
Abstract factory class of type converter.
"""
@abc.abstractproperty
def type_checker_factory(self): # pragma: no cover
pass
@abc.abstractproperty
def value_converter_factory(self): # pragma: no cover
pass
class NoneTypeFactory(TypeConverterFactoryInterface):
@property
def type_checker_factory(self):
return NoneTypeCheckerCreator()
@property
def value_converter_factory(self):
return NopConverterCreator()
class IntegerTypeFactory(TypeConverterFactoryInterface):
@property
def type_checker_factory(self):
return IntegerTypeCheckerCreator()
@property
def value_converter_factory(self):
return IntegerConverterCreator()
class FloatTypeFactory(TypeConverterFactoryInterface):
@property
def type_checker_factory(self):
return FloatTypeCheckerCreator()
@property
def value_converter_factory(self):
return FloatConverterCreator()
class DateTimeTypeFactory(TypeConverterFactoryInterface):
@property
def type_checker_factory(self):
return DateTimeTypeCheckerCreator()
@property
def value_converter_factory(self):
return DateTimeConverterCreator()
class InfinityTypeFactory(TypeConverterFactoryInterface):
@property
def type_checker_factory(self):
return InfinityCheckerCreator()
@property
def value_converter_factory(self):
return FloatConverterCreator()
class NanTypeFactory(TypeConverterFactoryInterface):
@property
def type_checker_factory(self):
return NanCheckerCreator()
@property
def value_converter_factory(self):
return FloatConverterCreator()
| Python | 0 |
f8fd1a1b7e0eb2bcbcd64fa6a352862fceea80de | Use two level danger level. | autopilot.py | autopilot.py | # -*- coding: utf-8 -*-
"""
Autopilots for the DriveIt Gym environment.
@author: Jean-Claude Manoli
"""
import numpy as np
from belief import BeliefTracking
epsilon = 0.05
class Autopilot(object):
def __init__(self, car, other_cars=None):
self.car = car
self.tracker = BeliefTracking(car, other_cars, normalize=True)
self.belief, self.deltas = [], []
self.action = 0
def reset(self, observation):
belief = self.tracker.reset(observation)
self.deltas = np.zeros(np.shape(belief))
self.belief = belief
return belief
def observe(self, observation, dt):
belief = self.tracker.update(self.action, observation, dt)
self.deltas = belief - self.belief
self.belief = belief
return belief
def act(self):
self.action = self._act()
return self.action
def _act(self): raise NotImplementedError
class LookAheadPilot(Autopilot):
def __init__(self, car, other_cars=None,
ky=3.0, kdy=10.0,
kth=3.0, kdth=10.0,
kka=3.0, kdka=-3.0):
super().__init__(car, other_cars)
self.params = ky, kdy, kth, kdth, kka, kdka
def _danger(self, dist, ddist, x):
d, dd, ddd = False, False, 1.0
if dist[0] < 0.8 and x < 0.0 and x > -0.8:
dd = True
for i in range(0, min(3, len(dist))):
ddd = min(dd, ddist[i])
if dist[i] < (0.25 if i == 0 else 0.95):
d = True
if dist[i] < (0.20 if i == 0 else 0.80):
dd = True
return d, dd, ddd
def _act(self):
x, y, th, v, k, kt, ka, *dist = self.belief #pylint: disable=W0612
dx, dy, dth, dv, dk, dkt, dka, *ddist = self.deltas #pylint: disable=W0612
ky, kdy, kth, kdth, kka, kdka = self.params
fy = ky * y + kdy * dy
fth = kth * dth + kdth * dth
fk = kka * (ka - k) + kdka * (dka - k)
f = -fy + fth + fk - k
if f > epsilon: action = 1
elif f < -epsilon: action = 2
else: action = 0
d, dd, ddd = self._danger(dist, ddist, x)
safe_throttle = self.car.specs.safe_turn_speed( \
max(abs(k), abs(ka)), 0.9) / self.car.specs.v_max
if not d and v < safe_throttle - epsilon:
action += 3
elif dd or (d and ddd < 0.0) or v > safe_throttle + epsilon:
action += 6
return action
| # -*- coding: utf-8 -*-
"""
Autopilots for the DriveIt Gym environment.
@author: Jean-Claude Manoli
"""
import numpy as np
from belief import BeliefTracking
epsilon = 0.05
class Autopilot(object):
def __init__(self, car, other_cars=None):
self.car = car
self.tracker = BeliefTracking(car, other_cars, normalize=True)
self.belief, self.deltas = [], []
self.action = 0
def reset(self, observation):
belief = self.tracker.reset(observation)
self.deltas = np.zeros(np.shape(belief))
self.belief = belief
return belief
def observe(self, observation, dt):
belief = self.tracker.update(self.action, observation, dt)
self.deltas = belief - self.belief
self.belief = belief
return belief
def act(self):
self.action = self._act()
return self.action
def _act(self): raise NotImplementedError
class LookAheadPilot(Autopilot):
def __init__(self, car, other_cars=None,
ky=3.0, kdy=10.0,
kth=3.0, kdth=10.0,
kka=3.0, kdka=-3.0):
super().__init__(car, other_cars)
self.params = ky, kdy, kth, kdth, kka, kdka
def _danger(self, dist, ddist, x):
d, dd = False, 1
if dist[0] < 0.5 and x < 0.0 and x > -1.0:
d = True
for i in range(0, min(3, len(dist))):
dd = min(dd, ddist[i])
if dist[i] < (0.25 if i == 0 else 0.95):
d = True
return d and dd < 0.0
def _act(self):
x, y, th, v, k, kt, ka, *dist = self.belief #pylint: disable=W0612
dx, dy, dth, dv, dk, dkt, dka, *ddist = self.deltas #pylint: disable=W0612
ky, kdy, kth, kdth, kka, kdka = self.params
fy = ky * y + kdy * dy
fth = kth * dth + kdth * dth
fk = kka * (ka - k) + kdka * (dka - k)
f = -fy + fth + fk - k
if f > epsilon: action = 1
elif f < -epsilon: action = 2
else: action = 0
if self._danger(dist, ddist, x):
action += 6
else:
safe_throttle = self.car.specs.safe_turn_speed( \
max(abs(k), abs(ka)), 0.9) / self.car.specs.v_max
if v < safe_throttle - epsilon:
action += 3
elif v > safe_throttle + epsilon:
action += 6
return action
| Python | 0.000004 |
27df09cd98d9128d89d9d9d26ee0e89223fbd990 | document idlerpg's external dependencies | libqtile/widget/idlerpg.py | libqtile/widget/idlerpg.py | # -*- coding: utf-8 -*-
# Copyright (c) 2016 Tycho Andersen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from . import base
from .generic_poll_text import GenPollUrl
import datetime
class IdleRPG(GenPollUrl):
"""
A widget for monitoring and displaying IdleRPG stats.
::
# display idlerpg stats for the player 'pants' on freenode's #idlerpg
widget.IdleRPG(url="http://xethron.lolhosting.net/xml.php?player=pants")
Widget requirements: xmltodict_.
.. _xmltodict: https://pypi.org/project/xmltodict/
"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
('format', 'IdleRPG: {online} TTL: {ttl}', 'Display format'),
('json', False, 'Not json :)'),
('xml', True, 'Is XML :)'),
]
def __init__(self, **config):
GenPollUrl.__init__(self, **config)
self.add_defaults(IdleRPG.defaults)
def parse(self, body):
formatted = {}
for k, v in body['player'].items():
if k == 'ttl':
formatted[k] = str(datetime.timedelta(seconds=int(v)))
elif k == 'online':
formatted[k] = "online" if v == "1" else "offline"
else:
formatted[k] = v
return self.format.format(**formatted)
| # -*- coding: utf-8 -*-
# Copyright (c) 2016 Tycho Andersen
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from . import base
from .generic_poll_text import GenPollUrl
import datetime
class IdleRPG(GenPollUrl):
"""
A widget for monitoring and displaying IdleRPG stats.
::
# display idlerpg stats for the player 'pants' on freenode's #idlerpg
widget.IdleRPG(url="http://xethron.lolhosting.net/xml.php?player=pants")
"""
orientations = base.ORIENTATION_HORIZONTAL
defaults = [
('format', 'IdleRPG: {online} TTL: {ttl}', 'Display format'),
('json', False, 'Not json :)'),
('xml', True, 'Is XML :)'),
]
def __init__(self, **config):
GenPollUrl.__init__(self, **config)
self.add_defaults(IdleRPG.defaults)
def parse(self, body):
formatted = {}
for k, v in body['player'].items():
if k == 'ttl':
formatted[k] = str(datetime.timedelta(seconds=int(v)))
elif k == 'online':
formatted[k] = "online" if v == "1" else "offline"
else:
formatted[k] = v
return self.format.format(**formatted)
| Python | 0 |
b736e69a88d3caf288f55216830a37df3b2da57c | Generate docs correctly if pexpect is not available. | docs/autogen_api.py | docs/autogen_api.py | #!/usr/bin/env python
"""Script to auto-generate our API docs.
"""
# stdlib imports
import os
import sys
# local imports
sys.path.append(os.path.abspath('sphinxext'))
from apigen import ApiDocWriter
#*****************************************************************************
if __name__ == '__main__':
pjoin = os.path.join
package = 'IPython'
outdir = pjoin('source','api','generated')
docwriter = ApiDocWriter(package,rst_extension='.txt')
# You have to escape the . here because . is a special char for regexps.
# You must do make clean if you change this!
docwriter.package_skip_patterns += [r'\.fixes$',
r'\.external$',
r'\.extensions',
r'\.kernel\.config',
r'\.attic',
r'\.quarantine',
r'\.deathrow',
r'\.config\.default',
r'\.config\.profile',
r'\.frontend',
r'\.gui'
]
docwriter.module_skip_patterns += [ r'\.core\.fakemodule',
# XXX These need fixing, disabling for
# now but we need to figure out why
# they are breaking. Error from sphinx
# for each group copied below
# AttributeError: __abstractmethods__
r'\.core\.component',
r'\.utils\.traitlets',
# AttributeError: __provides__
r'\.kernel\.clusterdir',
r'\.kernel\.configobjfactory',
r'\.kernel\.fcutil',
r'\.kernel\.ipcontrollerapp',
r'\.kernel\.launcher',
r'\.kernel\.task',
r'\.kernel\.winhpcjob',
r'\.testing\.util',
# Keeping these disabled is OK
r'\.cocoa',
r'\.ipdoctest',
r'\.Gnuplot',
r'\.frontend\.process\.winprocess',
r'\.Shell',
]
# If we don't have pexpect, we can't load irunner, so skip any code that
# depends on it
try:
import pexpect
except ImportError:
docwriter.module_skip_patterns += [r'\.lib\.irunner',
r'\.testing\.mkdoctests']
# Now, generate the outputs
docwriter.write_api_docs(outdir)
docwriter.write_index(outdir, 'gen',
relative_to = pjoin('source','api')
)
print '%d files written' % len(docwriter.written_modules)
| #!/usr/bin/env python
"""Script to auto-generate our API docs.
"""
# stdlib imports
import os
import sys
# local imports
sys.path.append(os.path.abspath('sphinxext'))
from apigen import ApiDocWriter
#*****************************************************************************
if __name__ == '__main__':
pjoin = os.path.join
package = 'IPython'
outdir = pjoin('source','api','generated')
docwriter = ApiDocWriter(package,rst_extension='.txt')
# You have to escape the . here because . is a special char for regexps.
# You must do make clean if you change this!
docwriter.package_skip_patterns += [r'\.fixes$',
r'\.external$',
r'\.extensions',
r'\.kernel\.config',
r'\.attic',
r'\.quarantine',
r'\.deathrow',
r'\.config\.default',
r'\.config\.profile',
r'\.frontend',
r'\.gui'
]
docwriter.module_skip_patterns += [ r'\.core\.fakemodule',
# XXX These need fixing, disabling for
# now but we need to figure out why
# they are breaking. Error from sphinx
# for each group copied below
# AttributeError: __abstractmethods__
r'\.core\.component',
r'\.utils\.traitlets',
# AttributeError: __provides__
r'\.kernel\.clusterdir',
r'\.kernel\.configobjfactory',
r'\.kernel\.fcutil',
r'\.kernel\.ipcontrollerapp',
r'\.kernel\.launcher',
r'\.kernel\.task',
r'\.kernel\.winhpcjob',
r'\.testing\.util',
# Keeping these disabled is OK
r'\.cocoa',
r'\.ipdoctest',
r'\.Gnuplot',
r'\.frontend\.process\.winprocess',
r'\.Shell',
]
docwriter.write_api_docs(outdir)
docwriter.write_index(outdir, 'gen',
relative_to = pjoin('source','api')
)
print '%d files written' % len(docwriter.written_modules)
| Python | 0.999999 |
79006fa1fe0bf78d10a1951b2cc20ba5ff245e4b | Provide 'self' argument to instance method | sklearn/utils/tests/test_metaestimators.py | sklearn/utils/tests/test_metaestimators.py | from sklearn.utils.metaestimators import if_delegate_has_method
from nose.tools import assert_true
class Prefix(object):
def func(self):
pass
class MockMetaEstimator(object):
"""This is a mock meta estimator"""
a_prefix = Prefix()
@if_delegate_has_method(delegate="a_prefix")
def func(self):
"""This is a mock delegated function"""
pass
def test_delegated_docstring():
assert_true("This is a mock delegated function"
in str(MockMetaEstimator.__dict__['func'].__doc__))
assert_true("This is a mock delegated function"
in str(MockMetaEstimator.func.__doc__))
assert_true("This is a mock delegated function"
in str(MockMetaEstimator().func.__doc__))
| from sklearn.utils.metaestimators import if_delegate_has_method
from nose.tools import assert_true
class Prefix(object):
def func():
pass
class MockMetaEstimator(object):
"""This is a mock meta estimator"""
a_prefix = Prefix()
@if_delegate_has_method(delegate="a_prefix")
def func(self):
"""This is a mock delegated function"""
pass
def test_delegated_docstring():
assert_true("This is a mock delegated function"
in str(MockMetaEstimator.__dict__['func'].__doc__))
assert_true("This is a mock delegated function"
in str(MockMetaEstimator.func.__doc__))
assert_true("This is a mock delegated function"
in str(MockMetaEstimator().func.__doc__))
| Python | 0.999981 |
f9f3ca75e8151b1467fddffe390aee6a8fe00259 | Change configuration for wsgi settings | dev_cloud/web_service/wsgi.py | dev_cloud/web_service/wsgi.py | # -*- coding: utf-8 -*-
# @COPYRIGHT_begin
#
# Copyright [2015] Michał Szczygieł, M4GiK Software
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @COPYRIGHT_end
"""
WSGI config for web_service project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
from core.settings.config import ENVIROMENT_PATH
activate_this = ENVIROMENT_PATH
execfile(activate_this, dict(__file__=activate_this))
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "core.settings.prod")
os.environ["CELERY_LOADER"] = "django"
import djcelery
djcelery.setup_loader()
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| # -*- coding: utf-8 -*-
# @COPYRIGHT_begin
#
# Copyright [2015] Michał Szczygieł, M4GiK Software
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @COPYRIGHT_end
"""
WSGI config for web_service project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/
"""
activate_this = '../.pyenv/bin/activate_this.py '
execfile(activate_this, dict(__file__=activate_this))
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "core.settings.prod")
os.environ["CELERY_LOADER"] = "django"
import djcelery
djcelery.setup_loader()
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| Python | 0 |
f3724421fa859a5970e66353b6a311aa14b866ec | Add additional spacing to improve readability | labs/lab-5/ex5-1.log.py | labs/lab-5/ex5-1.log.py | #!/usr/bin/python
#
# Copyright 2016 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import time
from log_utils import follow
if __name__ == '__main__':
# We are expecting two arguments
# The first is the name of the script
# The second is a path to a log file
if len(sys.argv) == 2:
# Open our file for reading
log_file = open(sys.argv[1], "r")
# Create our iterable function
log_lines = follow(log_file)
# Process the lines as they are appended
for line in log_lines:
# Strip out the new line an print the line
print("{0}".format(line.strip()))
else:
# Incorrect number of arguments
# Output usage to standard out
sys.stderr.write("usage: {0} <path>\n".format(os.path.basename(sys.argv[0])))
| #!/usr/bin/python
#
# Copyright 2016 BMC Software, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import time
from log_utils import follow
if __name__ == '__main__':
# We are expecting two arguments
# The first is the name of the script
# The second is a path to a log file
if len(sys.argv) == 2:
# Open our file for reading
log_file = open(sys.argv[1], "r")
# Create our iterable function
log_lines = follow(log_file)
# Process the lines as they are appended
for line in log_lines:
# Strip out the new line an print the line
print("{0}".format(line.strip()))
else:
# Incorrect number of arguments
# Output usage to standard out
sys.stderr.write("usage: {0} <path>\n".format(os.path.basename(sys.argv[0])))
| Python | 0.000003 |
1adb2c16780a2bbdf5a40368a856dc4b6e9df9f6 | build out initial email example, small change to README | example-emailed_daily_operational_report.py | example-emailed_daily_operational_report.py | import liveengage_data_app as le_api
def get_skills(application):
skills_data = application.get_skills_data()
print(str(skills_data['errors'])) if skills_data['errors'] else print('ok')
account_skills = [{}]
for skill in skills_data:
account_skills.append({
'id': skill['id'],
'name': skill['name'],
})
return account_skills
def get_users(application):
users_data = application.get_user_data()
print(str(users_data['errors'])) if users_data['errors'] else print('ok')
account_agents = [{}]
for user in users_data:
account_agents.append({
'fullName': user['fullName'],
'id': user['id'],
'memberOf': user['memberOf']['agentGroupId'],
'skills': user['skillIds'],
})
return account_agents
def get_groups(application):
agent_groups_data = app.get_agent_groups_data()
print(str(agent_groups_data['errors'])) if agent_groups_data['errors'] else print('ok')
account_groups = [{}]
for group in agent_groups_data:
account_groups.append({
'id': group['id'],
'name': group['name'],
})
return account_groups
def enrich_users_data(groups, skills, users):
for user in users:
for group in groups:
if user['memberOf'] == group['id']:
user['memberOf'] = group
for skill in skills:
if skill in user['skills']:
user['skills'] = skill
return users
app = le_api.LiveEngageDataApp(account_number='xx',
keys_and_secrets={
'consumer_key':'xx',
'consumer_secret':'xx',
'token_key':'xx',
'token_secret':'xx'
},
services=[
#'engHistDomain',
'leDataReporting',
#'accountConfigReadOnly_skills',
'accountConfigReadOnly_users',
#'accountConfigReadOnly_agentGroups'
])
print(str(app))
# Grab data for the timeframe of the last 24 hours. Put the data in only one bucket.
rt_data = app.get_rt_operational_data(minute_timeframe='1440', in_buckets_of='1440')
print(str(rt_data['errors'])) if rt_data['errors'] else print('Main data: ok')
for method in rt_data['success']:
details = method + 'data: '
print(details + str(rt_data['success'][method]['errors'])) if rt_data['success'][method]['errors'] else print(details + 'ok')
agent_data = enrich_users_data(
get_groups(app),
get_skills(app),
get_agents(app),
)
# Need to write functions below this line
agent_csv = build_agent_csv(rt_data['agentactivity'], agent_data)
queue_csv = build_queue_csv(rt_data['queuehealth'], agent_data)
eng_activity_csv = build_eng_activity_csv(rt_data['engactivity'], agent_data)
mail_reports([agent_csv, queue_csv, eng_activity_csv]) | import liveengage_data_app as le_api
app = le_api.LiveEngageDataApp(account_number='xx',
keys_and_secrets={
'consumer_key':'xx',
'consumer_secret':'xx',
'token_key':'xx',
'token_secret':'xx'
},
services=[
#'engHistDomain',
'leDataReporting',
'accountConfigReadOnly_skills',
#'accountConfigReadOnly_users',
'accountConfigReadOnly_agentGroups'
])
print(str(app))
# Grab data for the timeframe of the last 24 hours.
# Put the data in only one bucket.
rt_data = app.get_rt_operational_data(minute_timeframe='1440', in_buckets_of='1440')
print(str(rt_data['errors'])) if rt_data['errors'] else print('Main data: ok')
for method in rt_data['success']:
details = method + 'data: '
print(details + str(rt_data['success'][method]['errors'])) if rt_data['success'][method]['errors'] else print(details + 'ok')
agent_groups_data = app.get_agent_groups_data()
print(str(agent_groups_data['errors'])) if agent_groups_data['errors'] else print('ok')
skills_data = app.get_skills_data()
print(str(skills_data['errors'])) if skills_data['errors'] else print('ok')
| Python | 0.000361 |
7760765e32b81bad6957d333850293db86a2bbcf | Fixes 'Document instance has no attribute 'file_basename'' | doc2text/__init__.py | doc2text/__init__.py | import PyPDF2 as pyPdf
import PythonMagick
import os
import mimetypes
import cv2
from .page import Page
acceptable_mime = ["image/bmp", "image/png", "image/tiff", "image/jpg", "video/JPEG", "video/jpeg2000"]
def main():
"""Entry point for the application script"""
print("Call your main application code here")
FileNotAcceptedException = Exception('The filetype is not acceptable. We accept bmp, png, tiff, jpg, jpeg, jpeg2000, and PDF.')
class Document:
def __init__(self):
self.pages = []
self.processed_pages = []
self.page_content = []
self.prepared = False
self.error = None
def read(self, path):
self.filename = os.path.basename(path)
self.file_basename, self.file_extension = os.path.splitext(self.filename)
self.path = path
self.mime_type = mimetypes.guess_type(path)
self.file_basepath = os.path.dirname(path)
# If the file is a pdf, split the pdf and prep the pages.
if self.mime_type[0] == "application/pdf":
file_temp = open(self.path, 'rb')
pdf_reader = pyPdf.PdfFileReader(file_temp)
self.num_pages = pdf_reader.numPages
try:
for i in xrange(self.num_pages):
output = pyPdf.PdfFileWriter()
output.addPage(pdf_reader.getPage(i))
path = 'temp.pdf'
im_path = 'temp.png'
with open(path, 'wb') as f:
output.write(f)
im = PythonMagick.Image()
im.density("300")
im.read(path)
im.write(im_path)
orig_im = cv2.imread(im_path, 0)
page = Page(orig_im, i)
self.pages.append(page)
os.remove(path)
os.remove(im_path)
self.prepared = True
except Exception as e:
self.error = e
raise
# If the file is an image, think of it as a 1-page pdf.
elif self.mime_type[0] in acceptable_mime:
self.num_pages = 1
im = PythonMagick.Image()
im.density("300")
im.read(path)
temp_path = os.path.normpath(os.path.join(
self.file_basepath, self.file_basename + '_temp.png'
))
img.write(temp_path)
orig_im = cv2.imread(temp_path, 0)
os.remove(temp_path)
page = Page(orig_im, 0)
self.pages.append(page)
# Otherwise, out of luck.
else:
print self.mime_type[0]
raise FileNotAcceptedException
def process(self):
for page in self.pages:
new = page
new.crop()
new.deskew()
self.processed_pages.append(new)
def extract_text(self):
if len(self.processed_pages) > 0:
for page in self.processed_pages:
new = page
text = new.extract_text()
self.page_content.append(text)
else:
raise Exception('You must run `process()` first.')
def get_text(self):
if len(self.page_content) > 0:
return "\n".join(self.page_content)
else:
raise Exception('You must run `extract_text()` first.')
def save_pages(self):
# TODO
stuff = stuff
| import PyPDF2 as pyPdf
import PythonMagick
import os
import mimetypes
import cv2
from .page import Page
acceptable_mime = ["image/bmp", "image/png", "image/tiff", "image/jpg", "video/JPEG", "video/jpeg2000"]
def main():
"""Entry point for the application script"""
print("Call your main application code here")
FileNotAcceptedException = Exception('The filetype is not acceptable. We accept bmp, png, tiff, jpg, jpeg, jpeg2000, and PDF.')
class Document:
def __init__(self):
self.pages = []
self.processed_pages = []
self.page_content = []
self.prepared = False
self.error = None
def read(self, path):
filename, self.file_extension = os.path.splitext(path)
self.path = path
self.filename = os.path.basename(path)
self.mime_type = mimetypes.guess_type(path)
self.file_basepath = os.path.dirname(path)
# If the file is a pdf, split the pdf and prep the pages.
if self.mime_type[0] == "application/pdf":
file_temp = open(self.path, 'rb')
pdf_reader = pyPdf.PdfFileReader(file_temp)
self.num_pages = pdf_reader.numPages
try:
for i in xrange(self.num_pages):
output = pyPdf.PdfFileWriter()
output.addPage(pdf_reader.getPage(i))
path = 'temp.pdf'
im_path = 'temp.png'
with open(path, 'wb') as f:
output.write(f)
im = PythonMagick.Image()
im.density("300")
im.read(path)
im.write(im_path)
orig_im = cv2.imread(im_path, 0)
page = Page(orig_im, i)
self.pages.append(page)
os.remove(path)
os.remove(im_path)
self.prepared = True
except Exception as e:
self.error = e
raise
# If the file is an image, think of it as a 1-page pdf.
elif self.mime_type[0] in acceptable_mime:
self.num_pages = 1
im = PythonMagick.Image()
im.density("300")
im.read(path)
temp_path = os.path.normpath(os.path.join(
self.file_basepath, self.file_basename + '_temp.png'
))
img.write(temp_path)
orig_im = cv2.imread(temp_path, 0)
os.remove(temp_path)
page = Page(orig_im, 0)
self.pages.append(page)
# Otherwise, out of luck.
else:
print self.mime_type[0]
raise FileNotAcceptedException
def process(self):
for page in self.pages:
new = page
new.crop()
new.deskew()
self.processed_pages.append(new)
def extract_text(self):
if len(self.processed_pages) > 0:
for page in self.processed_pages:
new = page
text = new.extract_text()
self.page_content.append(text)
else:
raise Exception('You must run `process()` first.')
def get_text(self):
if len(self.page_content) > 0:
return "\n".join(self.page_content)
else:
raise Exception('You must run `extract_text()` first.')
def save_pages(self):
# TODO
stuff = stuff
| Python | 0.997427 |
d40f1fe493ec2c71d84ac84f5dc989c68de321ca | add version option | batch_isp.py | batch_isp.py | import argparse
from parts import Parts
from pgm_error import PgmError
from operations import Operations
from serial_io import SerialIO
class BatchISP:
def __init__(self):
parser = argparse.ArgumentParser(
description='Linux remake of Atmel\'s BatchISP utility.')
parser.add_argument('-device', type=str, required=True,
help="Device type, ? for list.")
parser.add_argument('-port', type=str,
help="Port/interface to connect.")
parser.add_argument('-hardware', type=str,
help="{ RS232 | TODO }")
parser.add_argument('-version', action='version', version='%(prog)s 0.0.0')
parser.add_argument('-operation', type=str, required=True, nargs='*',
help="... ??? TODO")
self._args = parser.parse_args()
self._parser = parser
def _getIOByHardwareName(self, hardware):
if hardware == 'RS232':
if self._args.port is None:
raise PrgError("Port not specified for RS232")
return SerialIO(self._args.port)
else:
raise PrgError("Unsupported hardware: %s" % hardware)
def run(self):
if self._args.device == '?':
parts = Parts()
print([part.getName() for part in parts.list()])
return 0
try:
part = Parts().getPartByName(self._args.device)
if not self._args.hardware is None:
hw = sef._args.hardware
else:
hw = part.listHardware()
if len(hw) != 1:
raise PrgError("Cannot determine hardware select one of: %s" % hw)
hw = hw[0]
io = self._getIOByHardwareName(hw)
operations = Operations(part, io)
for op in self._args.operation:
print(op)
except PgmError as e:
print(e)
exit(1)
| import argparse
from parts import Parts
from pgm_error import PgmError
from operations import Operations
from serial_io import SerialIO
class BatchISP:
def __init__(self):
parser = argparse.ArgumentParser(
description='Linux remake of Atmel\'s BatchISP utility.')
parser.add_argument('-device', type=str, required=True,
help="Device type, ? for list.")
parser.add_argument('-port', type=str,
help="Port/interface to connect.")
parser.add_argument('-hardware', type=str,
help="{ RS232 | TODO }")
parser.add_argument('-operation', type=str, required=True, nargs='*',
help="... ??? TODO")
self._args = parser.parse_args()
self._parser = parser
def _getIOByHardwareName(self, hardware):
if hardware == 'RS232':
if self._args.port is None:
raise PrgError("Port not specified for RS232")
return SerialIO(self._args.port)
else:
raise PrgError("Unsupported hardware: %s" % hardware)
def run(self):
if self._args.device == '?':
parts = Parts()
print([part.getName() for part in parts.list()])
return 0
try:
part = Parts().getPartByName(self._args.device)
if not self._args.hardware is None:
hw = sef._args.hardware
else:
hw = part.listHardware()
if len(hw) != 1:
raise PrgError("Cannot determine hardware select one of: %s" % hw)
hw = hw[0]
io = self._getIOByHardwareName(hw)
operations = Operations(part, io)
for op in self._args.operation:
print(op)
except PgmError as e:
print(e)
exit(1)
| Python | 0.000001 |
84fc6f4e05c30e368b869b0e5af80b90db5b0ace | Write generic push instructions for the time being | Lib/extractor/stream.py | Lib/extractor/stream.py | # -*- coding: utf-8 -*-
from fontTools.misc.textTools import num2binary
from fontTools.ttLib.tables.ttProgram import streamOpcodeDict, opcodeDict
from io import BytesIO
class InstructionStream(object):
"""
:param program_bytes: The program bytecode.
:type program_bytes: bytes
The instruction stream.
"""
def __init__(self, instruction_processor=None, program_bytes=b""):
self.io = BytesIO(program_bytes)
def rewind(self):
"""
Rewind the instruction pointer to the beginning of the stream.
"""
self.io.seek(0)
def read_byte(self):
"""
Read a byte from the instruction stream and advance the instruction
pointer. Returns the value as a tuple of (byte, int).
"""
b = self.io.read(1)
if not b:
return False
return b, int.from_bytes(b, byteorder="big", signed=False)
def read_word(self):
"""
Read a word from the instruction stream and advance the instruction
pointer. Returns the value as a tuple of (word, int).
"""
w = self.io.read(2)
if not w:
return False
return w, int.from_bytes(w, byteorder="big", signed=True)
def __repr__(self):
"""
Print the instructions from the bytecode in the current stream starting
at the beginning.
"""
self.rewind()
asm = ""
indent = 0
more = True
while more:
opcode = self.io.read(1)
if opcode:
opcode = int.from_bytes(opcode, byteorder="big", signed=False)
cmd_info = streamOpcodeDict.get(opcode, None)
if cmd_info is None:
cmd_info = opcodeDict.get(opcode, None)
if cmd_info is None:
print(
asm + "\n"
"Illegal opcode 0x%02x at offset 0x%04x."
% (int(opcode), self.io.tell(),)
)
raise KeyError
cmd_name, arg_bits, base_opcode, name = cmd_info
args = []
if cmd_name in ("EIF", "ELSE", "ENDF"):
indent -= 1
if cmd_name in ("NPUSHB", "NPUSHW", "PUSHB", "PUSHW"):
# PUSH instructions read their arguments from the stream
if cmd_name.startswith("PUSH"):
# Take number of arguments from the opcode
num_args = opcode - base_opcode + 1
else:
# Take number of arguments from the stream
_, num_args = self.read_byte()
# args.append(str(num_args))
# TODO: Reactivate when NPUSH can be roundtripped
if cmd_name.endswith("B"):
for n in range(num_args):
_, i = self.read_byte()
args.append(str(i))
else:
for n in range(num_args):
_, i = self.read_word()
args.append(str(i))
arg_bits = 0 # Don't output bits for push instructions
cmd_name = "push" # Write generic push instruction for now
if arg_bits == 0:
asm += "\n%s%s" % (" " * indent, cmd_name,)
else:
asm += "\n%s%s[%s]" % (
" " * indent,
cmd_name,
num2binary(opcode - base_opcode, arg_bits),
)
if args:
asm += " " + " ".join(args)
if cmd_name in ("ELSE", "FDEF", "IF"):
indent += 1
else:
more = False
return asm.strip()
| # -*- coding: utf-8 -*-
from fontTools.misc.textTools import num2binary
from fontTools.ttLib.tables.ttProgram import streamOpcodeDict, opcodeDict
from io import BytesIO
class InstructionStream(object):
"""
:param program_bytes: The program bytecode.
:type program_bytes: bytes
The instruction stream.
"""
def __init__(self, instruction_processor=None, program_bytes=b""):
self.io = BytesIO(program_bytes)
def rewind(self):
"""
Rewind the instruction pointer to the beginning of the stream.
"""
self.io.seek(0)
def read_byte(self):
"""
Read a byte from the instruction stream and advance the instruction
pointer. Returns the value as a tuple of (byte, int).
"""
b = self.io.read(1)
if not b:
return False
return b, int.from_bytes(b, byteorder="big", signed=False)
def read_word(self):
"""
Read a word from the instruction stream and advance the instruction
pointer. Returns the value as a tuple of (word, int).
"""
w = self.io.read(2)
if not w:
return False
return w, int.from_bytes(w, byteorder="big", signed=True)
def __repr__(self):
"""
Print the instructions from the bytecode in the current stream starting
at the beginning.
"""
self.rewind()
asm = ""
indent = 0
more = True
while more:
opcode = self.io.read(1)
if opcode:
opcode = int.from_bytes(opcode, byteorder="big", signed=False)
cmd_info = streamOpcodeDict.get(opcode, None)
if cmd_info is None:
cmd_info = opcodeDict.get(opcode, None)
if cmd_info is None:
print(
asm + "\n"
"Illegal opcode 0x%02x at offset 0x%04x."
% (int(opcode), self.io.tell(),)
)
raise KeyError
cmd_name, arg_bits, base_opcode, name = cmd_info
args = []
if cmd_name in ("EIF", "ELSE", "ENDF"):
indent -= 1
if cmd_name in ("NPUSHB", "NPUSHW", "PUSHB", "PUSHW"):
# PUSH instructions read their arguments from the stream
if cmd_name.startswith("PUSH"):
# Take number of arguments from the opcode
num_args = opcode - base_opcode + 1
else:
# Take number of arguments from the stream
_, num_args = self.read_byte()
args.append(str(num_args))
if cmd_name.endswith("B"):
for n in range(num_args):
_, i = self.read_byte()
args.append(str(i))
else:
for n in range(num_args):
_, i = self.read_word()
args.append(str(i))
arg_bits = 0 # Don't output bits for push instructions
if arg_bits == 0:
asm += "\n%s%s" % (" " * indent, cmd_name,)
else:
asm += "\n%s%s[%s]" % (
" " * indent,
cmd_name,
num2binary(opcode - base_opcode, arg_bits),
)
if args:
asm += " " + " ".join(args)
if cmd_name in ("ELSE", "FDEF", "IF"):
indent += 1
else:
more = False
return asm.strip()
| Python | 0.000002 |
1ea72ca96f0f43bd80baa9fb41ec930ea02de271 | fix name error | sfa/rspecs/sfa_rspec_converter.py | sfa/rspecs/sfa_rspec_converter.py | #!/usr/bin/python
from lxml import etree
from StringIO import StringIO
from sfa.util.xrn import *
from sfa.rspecs.sfa_rspec import SfaRSpec
from sfa.rspecs.pg_rspec import PGRSpec
class SfaRSpecConverter:
@staticmethod
def to_pg_rspec(rspec):
if isinstance(rspec, SfaRSpec):
sfa_rspec = rspec
else:
sfa_rspec = SfaRSpec(rspec=rspec)
pg_rspec = PGRSpec()
# get networks
networks = sfa_rspec.get_networks()
for network in networks:
# get nodes
sfa_node_elements = sfa_rspec.get_node_elements(network=network)
for sfa_node_element in sfa_node_elements:
# create node element
node_attrs = {}
node_attrs['exclusive'] = 'false'
node_attrs['component_manager_id'] = network
if sfa_node_element.find('hostname') != None:
node_attrs['component_name'] = sfa_node_element.find('hostname').text
if sfa_node_element.find('urn') != None:
node_attrs['component_id'] = sfa_node_element.find('urn').text
node_element = pg_rspec.add_element('node', node_attrs)
# create node_type element
for hw_type in ['plab-pc', 'pc']:
hdware_type_element = pg_rspec.add_element('hardware_type', {'name': hw_type}, parent=node_element)
# create available element
pg_rspec.add_element('available', {'now': 'true'}, parent=node_element)
# create locaiton element
# We don't actually associate nodes with a country.
# Set country to "unknown" until we figure out how to make
# sure this value is always accurate.
location = sfa_node_element.find('location')
if location != None:
location_attrs = {}
location_attrs['country'] = location.get('country', 'unknown')
location_attrs['latitude'] = location.get('latitiue', 'None')
location_attrs['longitude'] = location.get('longitude', 'None')
pg_rspec.add_element('location', location_attrs, parent=node_element)
sliver_element = sfa_node_element.find('sliver')
if sliver_element != None:
pg_rspec.add_element('sliver_type', {'name': 'planetlab-vnode'}, parent=node_element)
return pg_rspec.toxml()
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
print SfaRSpecConverter.to_pg_rspec(sys.argv[1])
| #!/usr/bin/python
from lxml import etree
from StringIO import StringIO
from sfa.util.xrn import *
from sfa.rspecs.sfa_rspec import SfaRSpec
from sfa.rspecs.pg_rspec import PGRSpec
class SfaRSpecConverter:
@staticmethod
def to_pg_rspec(rspec):
if isinstance(rspec, SfaRSpec):
sfa_rspec = rspec
else:
sfa_rspec = SfaRSpec(rspec=rspec)
pg_rspec = PGRSpec()
# get networks
networks = sfa_rspec.get_networks()
for network in networks:
# get nodes
sfa_node_elements = sfa_rspec.get_node_elements(network=network)
for sfa_node_element in sfa_node_elements:
# create node element
node_attrs = {}
node_attrs['exclusive'] = 'false'
node_attrs['component_manager_id'] = network
if sfa_node_element.find('hostname') != None:
node_attrs['component_name'] = sfa_node_element.find('hostname').text
if sfa_node_element.find('urn') != None:
node_attrs['component_id'] = sfa_node_element.find('urn').text
node_element = pg_rspec.add_element('node', node_attrs)
# create node_type element
for hw_type in ['plab-pc', 'pc']:
hdware_type_element = pg_rspec.add_element('hardware_type', {'name': hw_type}, parent=node_element)
# create available element
pg_rspec.add_element('available', {'now': 'true'}, parent=node_element)
# create locaiton element
# We don't actually associate nodes with a country.
# Set country to "unknown" until we figure out how to make
# sure this value is always accurate.
location = sfa_node_element.find('location')
if location != None:
location_attrs = {}
location_attrs['country'] = locatiton.get('country', 'unknown')
location_attrs['latitude'] = location.get('latitiue', 'None')
location_attrs['longitude'] = location.get('longitude', 'None')
pg_rspec.add_element('location', location_attrs, parent=node_element)
sliver_element = sfa_node_element.find('sliver')
if sliver_element != None:
pg_rspec.add_element('sliver_type', {'name': 'planetlab-vnode'}, parent=node_element)
return pg_rspec.toxml()
if __name__ == '__main__':
import sys
if len(sys.argv) > 1:
print SfaRSpecConverter.to_pg_rspec(sys.argv[1])
| Python | 0.000024 |
c0a075810e3d92295ade789c24d141c1dbba60c4 | Add support for secret driver in create_secret | docker/api/secret.py | docker/api/secret.py | import base64
import six
from .. import errors
from .. import utils
class SecretApiMixin(object):
@utils.minimum_version('1.25')
def create_secret(self, name, data, labels=None, driver=None):
"""
Create a secret
Args:
name (string): Name of the secret
data (bytes): Secret data to be stored
labels (dict): A mapping of labels to assign to the secret
driver (DriverConfig): A custom driver configuration. If
unspecified, the default ``internal`` driver will be used
Returns (dict): ID of the newly created secret
"""
if not isinstance(data, bytes):
data = data.encode('utf-8')
data = base64.b64encode(data)
if six.PY3:
data = data.decode('ascii')
body = {
'Data': data,
'Name': name,
'Labels': labels
}
if driver is not None:
if utils.version_lt(self._version, '1.31'):
raise errors.InvalidVersion(
'Secret driver is only available for API version > 1.31'
)
body['Driver'] = driver
url = self._url('/secrets/create')
return self._result(
self._post_json(url, data=body), True
)
@utils.minimum_version('1.25')
@utils.check_resource('id')
def inspect_secret(self, id):
"""
Retrieve secret metadata
Args:
id (string): Full ID of the secret to remove
Returns (dict): A dictionary of metadata
Raises:
:py:class:`docker.errors.NotFound`
if no secret with that ID exists
"""
url = self._url('/secrets/{0}', id)
return self._result(self._get(url), True)
@utils.minimum_version('1.25')
@utils.check_resource('id')
def remove_secret(self, id):
"""
Remove a secret
Args:
id (string): Full ID of the secret to remove
Returns (boolean): True if successful
Raises:
:py:class:`docker.errors.NotFound`
if no secret with that ID exists
"""
url = self._url('/secrets/{0}', id)
res = self._delete(url)
self._raise_for_status(res)
return True
@utils.minimum_version('1.25')
def secrets(self, filters=None):
"""
List secrets
Args:
filters (dict): A map of filters to process on the secrets
list. Available filters: ``names``
Returns (list): A list of secrets
"""
url = self._url('/secrets')
params = {}
if filters:
params['filters'] = utils.convert_filters(filters)
return self._result(self._get(url, params=params), True)
| import base64
import six
from .. import utils
class SecretApiMixin(object):
@utils.minimum_version('1.25')
def create_secret(self, name, data, labels=None):
"""
Create a secret
Args:
name (string): Name of the secret
data (bytes): Secret data to be stored
labels (dict): A mapping of labels to assign to the secret
Returns (dict): ID of the newly created secret
"""
if not isinstance(data, bytes):
data = data.encode('utf-8')
data = base64.b64encode(data)
if six.PY3:
data = data.decode('ascii')
body = {
'Data': data,
'Name': name,
'Labels': labels
}
url = self._url('/secrets/create')
return self._result(
self._post_json(url, data=body), True
)
@utils.minimum_version('1.25')
@utils.check_resource('id')
def inspect_secret(self, id):
"""
Retrieve secret metadata
Args:
id (string): Full ID of the secret to remove
Returns (dict): A dictionary of metadata
Raises:
:py:class:`docker.errors.NotFound`
if no secret with that ID exists
"""
url = self._url('/secrets/{0}', id)
return self._result(self._get(url), True)
@utils.minimum_version('1.25')
@utils.check_resource('id')
def remove_secret(self, id):
"""
Remove a secret
Args:
id (string): Full ID of the secret to remove
Returns (boolean): True if successful
Raises:
:py:class:`docker.errors.NotFound`
if no secret with that ID exists
"""
url = self._url('/secrets/{0}', id)
res = self._delete(url)
self._raise_for_status(res)
return True
@utils.minimum_version('1.25')
def secrets(self, filters=None):
"""
List secrets
Args:
filters (dict): A map of filters to process on the secrets
list. Available filters: ``names``
Returns (list): A list of secrets
"""
url = self._url('/secrets')
params = {}
if filters:
params['filters'] = utils.convert_filters(filters)
return self._result(self._get(url, params=params), True)
| Python | 0 |
dbf3af1de0bbbda178e5bbd1ca0473a83d8cb9b3 | test triggering travis | fabre_test.py | fabre_test.py | #!/usr/bin/env python
# coding=UTF-8
import sys
import pytest
sys.exit(0)
| #!/usr/bin/env python
# coding=UTF-8
import sys
sys.exit(0)
| Python | 0.000001 |
4cff5b7a14dfda786fef4a869e72095b7d9d83e4 | correct relative import, d'oh | pyjac/performance_tester/__main__.py | pyjac/performance_tester/__main__.py | import sys
import os
from . import performance_tester as pt
from argparse import ArgumentParser
def main(args=None):
if args is None:
# command line arguments
parser = ArgumentParser(description='performance_tester.py: '
'tests pyJac performance'
)
parser.add_argument('-w', '--working_directory',
type=str,
default='performance',
help='Directory storing the mechanisms / data.'
)
parser.add_argument('-uoo', '--use_old_opt',
action='store_true',
default=False,
required=False,
help='If True, allows performance_tester to use '
'any old optimization files found'
)
parser.add_argument('-nt', '--num_omp_threads',
type=int,
default=12,
required=False,
help='Number of threads to use for OpenMP '
'parallelization of the C codes.'
)
args = parser.parse_args()
pt.performance_tester(os.path.dirname(os.path.abspath(pt.__file__)),
args.working_directory,
args.use_old_opt, args.num_omp_threads
)
if __name__ == '__main__':
sys.exit(main())
| import sys
import os
import .performance_tester as pt
from argparse import ArgumentParser
def main(args=None):
if args is None:
# command line arguments
parser = ArgumentParser(description='performance_tester.py: '
'tests pyJac performance'
)
parser.add_argument('-w', '--working_directory',
type=str,
default='performance',
help='Directory storing the mechanisms / data.'
)
parser.add_argument('-uoo', '--use_old_opt',
action='store_true',
default=False,
required=False,
help='If True, allows performance_tester to use '
'any old optimization files found'
)
parser.add_argument('-nt', '--num_omp_threads',
type=int,
default=12,
required=False,
help='Number of threads to use for OpenMP '
'parallelization of the C codes.'
)
args = parser.parse_args()
pt.performance_tester(os.path.dirname(os.path.abspath(pt.__file__)),
args.working_directory,
args.use_old_opt, args.num_omp_threads
)
if __name__ == '__main__':
sys.exit(main())
| Python | 0.000035 |
7596de67f67f5bdc9350067a896dcd4b7b4c7650 | Stop requiring the path of the users file; only require the name. | gobbldygook.py | gobbldygook.py | #!/usr/bin/env python3
import argparse, csv, os
from course import Course, all_courses, all_labs, getCourse
from student import Student
def argument_parse():
parser = argparse.ArgumentParser(description="This program works best if you give it some data. However, we have some example stuff to show you anyway.)")
parser.add_argument('-l', "--load", default='example')
parser.add_argument('-f', "--find")
parser.add_argument("--demo")
parser.add_argument("--stress")
parser.add_argument("--debug")
return parser
def parse_filename(fname):
filename = fname.name
filename = filename.split('.')[0] # Remove the extension
filename = filename.split('/')[1] # Remove the path seperator
start_year, end_year, semester = filename.split(sep='-')
if semester == 's1':
semester = "fall"
elif semester == 's2':
semester = "spring"
elif semester == 'ss1':
semester = "summer session 1"
elif semester == 'ss2':
semester = "summer session 2"
return int(filename[0:4]), semester
def load_data(filename):
with open(filename) as infile:
year, semester = parse_filename(infile)
if year not in all_courses:
all_courses[year] = {}
if semester not in all_courses[year]:
all_courses[year][semester] = {}
infile.readline() # Remove the csv header line
csvfile = csv.reader(infile)
for row in csvfile:
tmp = Course(data=row)
if tmp.course_status == 'X':
pass
elif tmp.course_type == "Lab":
all_labs[tmp.id] = tmp
else:
all_courses[tmp.id] = tmp
all_courses[year][tmp.id] = tmp
all_courses[year][semester][tmp.id] = tmp
def read_data():
path = 'data/'
for filename in os.listdir(path):
if filename[0] is not '.':
load_data(path + filename)
def main():
parser = argument_parse()
args = parser.parse_args()
read_data()
user = Student(filename='users/'+args.load+'.yaml')
print(user)
if __name__ == '__main__':
main()
| #!/usr/bin/env python3
import argparse, csv, os
from course import Course, all_courses, all_labs, getCourse
from student import Student
def argument_parse():
parser = argparse.ArgumentParser(description="This program works best if you give it some data. However, we have some example stuff to show you anyway.)")
parser.add_argument('-l', "--load", default='users/example.yaml')
parser.add_argument('-f', "--find")
parser.add_argument("--demo")
parser.add_argument("--stress")
parser.add_argument("--debug")
return parser
def parse_filename(fname):
filename = fname.name
filename = filename.split('.')[0] # Remove the extension
filename = filename.split('/')[1] # Remove the path seperator
start_year, end_year, semester = filename.split(sep='-')
if semester == 's1':
semester = "fall"
elif semester == 's2':
semester = "spring"
elif semester == 'ss1':
semester = "summer session 1"
elif semester == 'ss2':
semester = "summer session 2"
return int(filename[0:4]), semester
def load_data(filename):
with open(filename) as infile:
year, semester = parse_filename(infile)
if year not in all_courses:
all_courses[year] = {}
if semester not in all_courses[year]:
all_courses[year][semester] = {}
infile.readline() # Remove the csv header line
csvfile = csv.reader(infile)
for row in csvfile:
tmp = Course(data=row)
if tmp.course_status == 'X':
pass
elif tmp.course_type == "Lab":
all_labs[tmp.id] = tmp
else:
all_courses[tmp.id] = tmp
all_courses[year][tmp.id] = tmp
all_courses[year][semester][tmp.id] = tmp
def read_data():
path = 'data/'
for filename in os.listdir(path):
if filename[0] is not '.':
load_data(path + filename)
def main():
parser = argument_parse()
args = parser.parse_args()
read_data()
user = Student(filename=args.load)
print(user)
if __name__ == '__main__':
main()
| Python | 0 |
d73c6addf064ba7b78c4874a6affc6bac6dfee1f | Add image feature detection | image.py | image.py | from __future__ import division
import numpy as np
import cv2
import time, io
from matplotlib import pyplot as plt
from google.cloud import vision
MIN_MATCH_COUNT = 200
# only using match count right now
MIN_MATCH_RATIO = .2
def compare(img1_name, img2_name):
"""
Return whether img1 and img2 differ signficiantly
Determined through feature matching and comparison
(the number of good matches must be greater than MIN_MATCH_COUNT)
"""
img1 = cv2.imread(img1_name)
img2 = cv2.imread(img2_name)
# Initiate SIFT detector
sift = cv2.xfeatures2d.SURF_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = sift.detectAndCompute(img2,None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1,des2,k=2)
# count the number of good matches
num_good_matches = 0
for m,n in matches:
if m.distance < 0.7*n.distance:
num_good_matches += 1
print('Number of good features matched: ' + str(num_good_matches))
return num_good_matches>MIN_MATCH_COUNT
def features(img_path,labels=True,logos=True,landmarks=True):
"""
Returns a list of features from an image
Optionally pass a certainty_threshold value to give a threshold in [0,1] on how certain
Google's identification is.
"""
v_c = vision.Client()
with io.open(img_path, 'rb') as image_file:
content = image_file.read()
img = v_c.image(content=content)
output = []
if labels:
labels = [label.description for label in img.detect_labels()]
output += labels
if logos:
logos = [logo.description for logo in img.detect_logos()]
output += logos
if landmarks:
landmarks = [landmark.description for landmark in img.detect_landmarks()]
output += landmarks
return output
| from __future__ import division
import numpy as np
import cv2
import time
from matplotlib import pyplot as plt
MIN_MATCH_COUNT = 200
# only using match count right now
MIN_MATCH_RATIO = .2
def compare(img1_name, img2_name):
"""
Return whether img1 and img2 differ signficiantly
Determined through feature matching and comparison
(the number of good matches must be greater than MIN_MATCH_COUNT)
"""
img1 = cv2.imread(img1_name)
img2 = cv2.imread(img2_name)
# Initiate SIFT detector
sift = cv2.xfeatures2d.SURF_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = sift.detectAndCompute(img2,None)
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1,des2,k=2)
# count the number of good matches
num_good_matches = 0
for m,n in matches:
if m.distance < 0.7*n.distance:
num_good_matches += 1
print('Number of good features matched: ' + str(num_good_matches))
return num_good_matches>MIN_MATCH_COUNT
| Python | 0 |
377aef17394b2dabd6db7439d3cfcd4e0d54a3c2 | Allow codata tests to be run as script. | scipy/constants/tests/test_codata.py | scipy/constants/tests/test_codata.py |
import warnings
from scipy.constants import find
from numpy.testing import assert_equal, run_module_suite
def test_find():
warnings.simplefilter('ignore', DeprecationWarning)
keys = find('weak mixing', disp=False)
assert_equal(keys, ['weak mixing angle'])
keys = find('qwertyuiop', disp=False)
assert_equal(keys, [])
keys = find('natural unit', disp=False)
assert_equal(keys, sorted(['natural unit of velocity',
'natural unit of action',
'natural unit of action in eV s',
'natural unit of mass',
'natural unit of energy',
'natural unit of energy in MeV',
'natural unit of momentum',
'natural unit of momentum in MeV/c',
'natural unit of length',
'natural unit of time']))
if __name__ == "__main__":
run_module_suite()
|
import warnings
from scipy.constants import find
from numpy.testing import assert_equal
def test_find():
warnings.simplefilter('ignore', DeprecationWarning)
keys = find('weak mixing', disp=False)
assert_equal(keys, ['weak mixing angle'])
keys = find('qwertyuiop', disp=False)
assert_equal(keys, [])
keys = find('natural unit', disp=False)
assert_equal(keys, sorted(['natural unit of velocity',
'natural unit of action',
'natural unit of action in eV s',
'natural unit of mass',
'natural unit of energy',
'natural unit of energy in MeV',
'natural unit of momentum',
'natural unit of momentum in MeV/c',
'natural unit of length',
'natural unit of time']))
| Python | 0.000004 |
67c2e8ba33b5bcc83f0242bece81f604d21939db | Fix editing error | pytest-profiling/pytest_profiling.py | pytest-profiling/pytest_profiling.py | from __future__ import absolute_import
import pytest
import os
import cProfile
import pstats
import pipes
import six
import errno
from hashlib import md5
LARGE_FILENAME_HASH_LEN = 8
def clean_filename(s):
forbidden_chars = set('/?<>\:*|"')
return six.text_type("".join(c if c not in forbidden_chars and ord(c) < 127 else '_'
for c in s))
class Profiling(object):
"""Profiling plugin for pytest."""
svg = False
svg_name = None
profs = []
combined = None
def __init__(self, svg):
self.svg = svg
self.profs = []
def pytest_sessionstart(self, session): # @UnusedVariable
try:
os.makedirs("prof")
except OSError:
pass
def pytest_sessionfinish(self, session, exitstatus): # @UnusedVariable
if self.profs:
combined = pstats.Stats(self.profs[0])
for prof in self.profs[1:]:
combined.add(prof)
self.combined = os.path.abspath(os.path.join("prof", "combined.prof"))
combined.dump_stats(self.combined)
if self.svg:
self.svg_name = os.path.abspath(os.path.join("prof", "combined.svg"))
t = pipes.Template()
t.append("gprof2dot -f pstats $IN", "f-")
t.append("dot -Tsvg -o $OUT", "-f")
t.copy(self.combined, self.svg_name)
def pytest_terminal_summary(self, terminalreporter):
if self.combined:
terminalreporter.write("Profiling (from {prof}):\n".format(prof=self.combined))
pstats.Stats(self.combined, stream=terminalreporter).strip_dirs().sort_stats('cumulative').print_stats(20)
if self.svg_name:
terminalreporter.write("SVG profile in {svg}.\n".format(svg=self.svg_name))
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_call(self, item):
prof_filename = os.path.abspath(os.path.join("prof", clean_filename(item.name) + ".prof"))
prof = cProfile.Profile()
prof.enable()
yield
prof.disable()
try:
prof.dump_stats(prof_filename)
except EnvironmentError as err:
if err.errno != errno.ENAMETOOLONG:
raise
if len(item.name) < LARGE_FILENAME_HASH_LEN:
raise
hash_str = md5(item.name).hexdigest()[:LARGE_FILENAME_HASH_LEN]
prof_filename = os.path.join("prof", hash_str + ".prof")
prof.dump_stats(prof_filename)
self.profs.append(prof_filename)
def pytest_addoption(parser):
"""pytest_addoption hook for profiling plugin"""
group = parser.getgroup('Profiling')
group.addoption("--profile", action="store_true",
help="generate profiling information")
group.addoption("--profile-svg", action="store_true",
help="generate profiling graph (using gprof2dot and dot -Tsvg)")
def pytest_configure(config):
"""pytest_configure hook for profiling plugin"""
profile_enable = any(config.getvalue(x) for x in ('profile', 'profile_svg'))
if profile_enable:
config.pluginmanager.register(Profiling(config.getvalue('profile_svg')))
| from __future__ import absolute_import
import pytest
import os
import cProfile
import pstats
import pipes
import six
import errno
from hashlib import md5
LARGE_FILENAME_HASH_LEN = 8
def clean_filename(s):
forbidden_chars = set('/?<>\:*|"')
return six.text_type("".join(c if c not in forbidden_chars and ord(c) < 127 else '_'
for c in s))
class Profiling(object):
"""Profiling plugin for pytest."""
svg = False
svg_name = None
profs = []
combined = None
def __init__(self, svg):
self.svg = svg
self.profs = []
def pytest_sessionstart(self, session): # @UnusedVariable
try:
os.makedirs("prof")
except OSError:
pass
def pytest_sessionfinish(self, session, exitstatus): # @UnusedVariable
if self.profs:
combined = pstats.Stats(self.profs[0])
for prof in self.profs[1:]:
combined.add(prof)
self.combined = os.path.abspath(os.path.join("prof", "combined.prof"))
combined.dump_stats(self.combined)
if self.svg:
t.append("gprof2dot -f pstats $IN", "f-")
t.append("dot -Tsvg -o $OUT", "-f")
t.copy(self.combined, self.svg_name)
def pytest_terminal_summary(self, terminalreporter):
if self.combined:
terminalreporter.write("Profiling (from {prof}):\n".format(prof=self.combined))
pstats.Stats(self.combined, stream=terminalreporter).strip_dirs().sort_stats('cumulative').print_stats(20)
if self.svg_name:
terminalreporter.write("SVG profile in {svg}.\n".format(svg=self.svg_name))
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_call(self, item):
prof_filename = os.path.abspath(os.path.join("prof", clean_filename(item.name) + ".prof"))
prof = cProfile.Profile()
prof.enable()
yield
prof.disable()
try:
prof.dump_stats(prof_filename)
except EnvironmentError as err:
if err.errno != errno.ENAMETOOLONG:
raise
if len(item.name) < LARGE_FILENAME_HASH_LEN:
raise
hash_str = md5(item.name).hexdigest()[:LARGE_FILENAME_HASH_LEN]
prof_filename = os.path.join("prof", hash_str + ".prof")
prof.dump_stats(prof_filename)
self.profs.append(prof_filename)
def pytest_addoption(parser):
"""pytest_addoption hook for profiling plugin"""
group = parser.getgroup('Profiling')
group.addoption("--profile", action="store_true",
help="generate profiling information")
group.addoption("--profile-svg", action="store_true",
help="generate profiling graph (using gprof2dot and dot -Tsvg)")
def pytest_configure(config):
"""pytest_configure hook for profiling plugin"""
profile_enable = any(config.getvalue(x) for x in ('profile', 'profile_svg'))
if profile_enable:
config.pluginmanager.register(Profiling(config.getvalue('profile_svg')))
| Python | 0.000002 |
20d39fe954e9dc62bbe283ad89b7c140529a23df | Remove WindowDict.__contains__ | gorm/window.py | gorm/window.py | from collections import deque, MutableMapping
class WindowDict(MutableMapping):
"""A dict that keeps every value that a variable has had over time.
Look up a revision number in this dict and it will give you the effective value as
of that revision. Keys should always be revision numbers. Once a key is set, all greater
keys are considered to be in this dict unless the value is ``None``. Keys after that one
aren't "set" until one's value is non-``None`` again.
Optimized for the cases where you look up the same revision repeatedly, or its neighbors.
"""
def seek(self, rev):
"""Arrange the caches in the optimal way for looking up the given revision."""
# TODO: binary search? Perhaps only when one or the other deque is very large?
while self._future and self._future[0][0] <= rev:
self._past.append(self._future.popleft())
while self._past and self._past[-1][0] > rev:
self._future.appendleft(self._past.pop())
def rev_before(self, rev):
"""Return the last rev prior to the given one on which the value changed."""
self.seek(rev)
return self._past[-1][0]
def rev_after(self, rev):
"""Return the next rev after the given one on which the value will change, or None if it never will."""
self.seek(rev)
if self._future:
return self._future[0][0]
def __init__(self, data={}):
self._past = deque(sorted(data.items()))
self._future = deque()
def __iter__(self):
for (rev, v) in self._past:
yield rev
for (rev, v) in self._future:
yield rev
def __len__(self):
return len(self._past) + len(self._future)
def __getitem__(self, rev):
self.seek(rev)
if not self._past:
raise KeyError("Revision {} is before the start of history".format(rev))
if self._past[-1][1] is None:
raise KeyError("No value since revision {}".format(self._past[-1][0]))
return self._past[-1][1]
def __setitem__(self, rev, v):
if not self._past:
self._past.append((rev, v))
elif rev < self._past[0][0]:
self._past.appendleft((rev, v))
elif rev == self._past[0][0]:
self._past[0] = (rev, v)
elif rev == self._past[-1][0]:
self._past[-1] = (rev, v)
elif rev > self._past[-1][0]:
if not self._future or rev < self._future[0][0]:
self._past.append((rev, v))
elif rev == self._future[0][0]:
self._future[0][0] = (rev, v)
elif rev == self._future[-1][0]:
self._future[-1][0] = (rev, v)
elif rev > self._future[-1][0]:
self._future.append((rev, v))
else:
self._future.append((rev, v))
inserted = sorted(self._future)
self._future = deque(inserted)
else:
# I was going to implement my own insertion sort here, but I gather Python already
# does that, via Timsort. I wonder if there's a way I can give it a hint, so it doesn't
# have to check for partial ordering? And maybe avoid reconstructing the deque?
self._past.append((rev, v))
inserted = sorted(self._past)
self._past = deque(inserted)
def __delitem__(self, rev):
name = '_past' if rev <= self._rev else '_future'
stack = getattr(self, name)
waste = deque()
setattr(self, name, waste)
deleted = False
while stack:
(r, v) = stack.popleft()
if r != rev:
waste.append((r, v))
else:
assert not deleted
deleted = True
if not deleted:
raise KeyError("Rev not present: {}".format(rev))
def __repr__(self):
return "WindowDict({})".format(repr(dict(self)))
| from collections import deque, MutableMapping
class WindowDict(MutableMapping):
"""A dict that keeps every value that a variable has had over time.
Look up a revision number in this dict and it will give you the effective value as
of that revision. Keys should always be revision numbers. Once a key is set, all greater
keys are considered to be in this dict unless the value is ``None``. Keys after that one
aren't "set" until one's value is non-``None`` again.
Optimized for the cases where you look up the same revision repeatedly, or its neighbors.
"""
def seek(self, rev):
"""Arrange the caches in the optimal way for looking up the given revision."""
# TODO: binary search? Perhaps only when one or the other deque is very large?
while self._future and self._future[0][0] <= rev:
self._past.append(self._future.popleft())
while self._past and self._past[-1][0] > rev:
self._future.appendleft(self._past.pop())
def rev_before(self, rev):
"""Return the last rev prior to the given one on which the value changed."""
self.seek(rev)
return self._past[-1][0]
def rev_after(self, rev):
"""Return the next rev after the given one on which the value will change, or None if it never will."""
self.seek(rev)
if self._future:
return self._future[0][0]
def __init__(self, data={}):
self._past = deque(sorted(data.items()))
self._future = deque()
def __iter__(self):
for (rev, v) in self._past:
yield rev
for (rev, v) in self._future:
yield rev
def __contains__(self, k):
if not self._past or self._past[0][0] > k:
return False
self.seek(k)
return self._past[-1][1] is not None
def __len__(self):
return len(self._past) + len(self._future)
def __getitem__(self, rev):
self.seek(rev)
if not self._past:
raise KeyError("Revision {} is before the start of history".format(rev))
if self._past[-1][1] is None:
raise KeyError("No value since revision {}".format(self._past[-1][0]))
return self._past[-1][1]
def __setitem__(self, rev, v):
if not self._past:
self._past.append((rev, v))
elif rev < self._past[0][0]:
self._past.appendleft((rev, v))
elif rev == self._past[0][0]:
self._past[0] = (rev, v)
elif rev == self._past[-1][0]:
self._past[-1] = (rev, v)
elif rev > self._past[-1][0]:
if not self._future or rev < self._future[0][0]:
self._past.append((rev, v))
elif rev == self._future[0][0]:
self._future[0][0] = (rev, v)
elif rev == self._future[-1][0]:
self._future[-1][0] = (rev, v)
elif rev > self._future[-1][0]:
self._future.append((rev, v))
else:
self._future.append((rev, v))
inserted = sorted(self._future)
self._future = deque(inserted)
else:
# I was going to implement my own insertion sort here, but I gather Python already
# does that, via Timsort. I wonder if there's a way I can give it a hint, so it doesn't
# have to check for partial ordering? And maybe avoid reconstructing the deque?
self._past.append((rev, v))
inserted = sorted(self._past)
self._past = deque(inserted)
def __delitem__(self, rev):
name = '_past' if rev <= self._rev else '_future'
stack = getattr(self, name)
waste = deque()
setattr(self, name, waste)
deleted = False
while stack:
(r, v) = stack.popleft()
if r != rev:
waste.append((r, v))
else:
assert not deleted
deleted = True
if not deleted:
raise KeyError("Rev not present: {}".format(rev))
def __repr__(self):
return "WindowDict({})".format(repr(dict(self)))
| Python | 0.000055 |
0e5118f0ec68ead39392e168efcc1a957162bb3f | Fix NIN example | examples/imagenet/nin.py | examples/imagenet/nin.py | import math
from chainer import FunctionSet, Variable
import chainer.functions as F
class NIN(FunctionSet):
"""Network-in-Network example model."""
insize = 227
def __init__(self):
w = math.sqrt(2) # MSRA scaling
super(NIN, self).__init__(
conv1 = F.Convolution2D( 3, 96, 11, wscale=w, stride=4),
conv1a = F.Convolution2D( 96, 96, 1, wscale=w),
conv1b = F.Convolution2D( 96, 96, 1, wscale=w),
conv2 = F.Convolution2D( 96, 256, 5, wscale=w, pad=2),
conv2a = F.Convolution2D( 256, 256, 1, wscale=w),
conv2b = F.Convolution2D( 256, 256, 1, wscale=w),
conv3 = F.Convolution2D( 256, 384, 3, wscale=w, pad=1),
conv3a = F.Convolution2D( 384, 384, 1, wscale=w),
conv3b = F.Convolution2D( 384, 384, 1, wscale=w),
conv4 = F.Convolution2D( 384, 1024, 3, wscale=w, pad=1),
conv4a = F.Convolution2D(1024, 1024, 1, wscale=w),
conv4b = F.Convolution2D(1024, 1000, 1, wscale=w),
)
def forward(self, x_data, y_data, train=True):
x = Variable(x_data, volatile=not train)
t = Variable(y_data, volatile=not train)
h = F.relu(self.conv1(x))
h = F.relu(self.conv1a(h))
h = F.relu(self.conv1b(h))
h = F.max_pooling_2d(h, 3, stride=2)
h = F.relu(self.conv2(h))
h = F.relu(self.conv2a(h))
h = F.relu(self.conv2b(h))
h = F.max_pooling_2d(h, 3, stride=2)
h = F.relu(self.conv3(h))
h = F.relu(self.conv3a(h))
h = F.relu(self.conv3b(h))
h = F.max_pooling_2d(h, 3, stride=2)
h = F.dropout(h, train=train)
h = F.relu(self.conv4(h))
h = F.relu(self.conv4a(h))
h = F.relu(self.conv4b(h))
h = F.reshape(F.average_pooling_2d(h, 6), (x_data.shape[0], 1000))
return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
| import math
from chainer import FunctionSet, Variable
import chainer.functions as F
class NIN(FunctionSet):
"""Network-in-Network example model."""
insize = 227
def __init__(self):
w = math.sqrt(2) # MSRA scaling
super(NIN, self).__init__(
conv1 = F.Convolution2D( 3, 96, 11, wscale=w, stride=4),
conv1a = F.Convolution2D( 96, 96, 1, wscale=w),
conv1b = F.Convolution2D( 96, 96, 1, wscale=w),
conv2 = F.Convolution2D( 96, 256, 5, wscale=w, pad=2),
conv2a = F.Convolution2D( 256, 256, 1, wscale=w),
conv2b = F.Convolution2D( 256, 256, 1, wscale=w),
conv3 = F.Convolution2D( 256, 384, 3, wscale=w, pad=1),
conv3a = F.Convolution2D( 384, 384, 1, wscale=w),
conv3b = F.Convolution2D( 384, 384, 1, wscale=w),
conv4 = F.Convolution2D( 384, 1024, 3, wscale=w, pad=1),
conv4a = F.Convolution2D(1024, 1024, 1, wscale=w),
conv4b = F.Convolution2D(1024, 1000, 1, wscale=w),
)
def forward(self, x_data, y_data, train=True):
x = Variable(x_data, volatile=not train)
t = Variable(y_data, volatile=not train)
h = F.relu(self.conv1(x))
h = F.relu(self.conv1a(h))
h = F.relu(self.conv1b(h))
h = F.max_pooling_2d(h, 3, stride=2)
h = F.relu(self.conv2(h))
h = F.relu(self.conv2a(h))
h = F.relu(self.conv2b(h))
h = F.max_pooling_2d(h, 3, stride=2)
h = F.relu(self.conv3(h))
h = F.relu(self.conv3a(h))
h = F.relu(self.conv3b(h))
h = F.max_pooling_2d(h, 3, stride=2)
h = F.dropout(h, train=train)
h = F.relu(self.conv4(h))
h = F.relu(self.conv4a(h))
h = F.relu(self.conv4b(h))
h = F.average_pooling_2d(h, 6)
return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
| Python | 0.023101 |
e4fbd6f8e13861053a4a29c776ae24b934639fa5 | fix ports on yaml script | tools/scripts/mosaic/gen_yaml.py | tools/scripts/mosaic/gen_yaml.py | #!/usr/bin/env python3
#
# Create a yaml file for running a mosaic file
# Note: *requires pyyaml*
import argparse
import yaml
def address(port):
return ['127.0.0.1', port]
def create_peers(peers):
res = []
for p in peers:
res += [{'addr':address(p[1])}]
return res
def entity(role, port, peers):
return {'role':role, 'me':address(port), 'peers':create_peers(peers)}
def create_file(num_switches, num_nodes):
peers = []
peers += [('master', 40000)]
peers += [('timer', 40001)]
switch_ports = 50001
for i in range(num_switches):
peers += [('switch', switch_ports + i)]
node_ports = 60001
for i in range(num_nodes):
peers += [('node', node_ports + i)]
# convert to dictionaries
peers2 = []
for p in peers:
peers2 += [entity(p[0], p[1], peers)]
# dump out
print("---")
for i, p in enumerate(peers2):
print(yaml.dump(p, default_flow_style=True))
if i < len(peers2) - 1:
print("---")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--switches", type=int, help="number of switches", dest="num_switches", default=1)
parser.add_argument("-n", "--nodes", type=int, help="number of nodes", dest="num_nodes", default=1)
args = parser.parse_args()
create_file(args.num_switches, args.num_nodes)
if __name__ == '__main__':
main()
| #!/usr/bin/env python3
#
# Create a yaml file for running a mosaic file
# Note: *requires pyyaml*
import argparse
import yaml
def address(port):
return ['127.0.0.1', port]
def create_peers(peers):
res = []
for p in peers:
res += [{'addr':address(p[1])}]
return res
def entity(role, port, peers):
return {'role':role, 'me':address(port), 'peers':create_peers(peers)}
def create_file(num_switches, num_nodes):
peers = []
peers += [('master', 40000)]
peers += [('timer', 40001)]
switch_ports = 50000
for i in range(num_switches):
peers += [('switch', switch_ports + i)]
node_ports = 60000
for i in range(num_nodes):
peers += [('node', node_ports + i)]
# convert to dictionaries
peers2 = []
for p in peers:
peers2 += [entity(p[0], p[1], peers)]
# dump out
print("---")
for i, p in enumerate(peers2):
print(yaml.dump(p, default_flow_style=True))
if i < len(peers2) - 1:
print("---")
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--switches", type=int, help="number of switches", dest="num_switches", default=1)
parser.add_argument("-n", "--nodes", type=int, help="number of nodes", dest="num_nodes", default=1)
args = parser.parse_args()
create_file(args.num_switches, args.num_nodes)
if __name__ == '__main__':
main()
| Python | 0 |
8e664b417d978d040d780dc252418fce087c47f4 | Fix version option | src/htrun/htrun.py | src/htrun/htrun.py | #
# Copyright (c) 2021-2022 Arm Limited and Contributors. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""Greentea Host Tests Runner."""
from multiprocessing import freeze_support
from htrun import init_host_test_cli_params
from htrun.host_tests_runner.host_test_default import DefaultTestSelector
from htrun.host_tests_toolbox.host_functional import handle_send_break_cmd
def main():
"""Drive command line tool 'htrun' which is using DefaultTestSelector.
1. Create DefaultTestSelector object and pass command line parameters.
2. Call default test execution function run() to start test instrumentation.
"""
freeze_support()
result = 0
cli_params = init_host_test_cli_params()
if cli_params.version: # --version
import pkg_resources # part of setuptools
version = pkg_resources.require("greentea-host")[0].version
print(version)
elif cli_params.send_break_cmd: # -b with -p PORT (and optional -r RESET_TYPE)
handle_send_break_cmd(
port=cli_params.port,
disk=cli_params.disk,
reset_type=cli_params.forced_reset_type,
baudrate=cli_params.baud_rate,
verbose=cli_params.verbose,
)
else:
test_selector = DefaultTestSelector(cli_params)
try:
result = test_selector.execute()
# Ensure we don't return a negative value
if result < 0 or result > 255:
result = 1
except (KeyboardInterrupt, SystemExit):
test_selector.finish()
result = 1
raise
else:
test_selector.finish()
return result
| #
# Copyright (c) 2021 Arm Limited and Contributors. All rights reserved.
# SPDX-License-Identifier: Apache-2.0
#
"""Greentea Host Tests Runner."""
from multiprocessing import freeze_support
from htrun import init_host_test_cli_params
from htrun.host_tests_runner.host_test_default import DefaultTestSelector
from htrun.host_tests_toolbox.host_functional import handle_send_break_cmd
def main():
"""Drive command line tool 'htrun' which is using DefaultTestSelector.
1. Create DefaultTestSelector object and pass command line parameters.
2. Call default test execution function run() to start test instrumentation.
"""
freeze_support()
result = 0
cli_params = init_host_test_cli_params()
if cli_params.version: # --version
import pkg_resources # part of setuptools
version = pkg_resources.require("htrun")[0].version
print(version)
elif cli_params.send_break_cmd: # -b with -p PORT (and optional -r RESET_TYPE)
handle_send_break_cmd(
port=cli_params.port,
disk=cli_params.disk,
reset_type=cli_params.forced_reset_type,
baudrate=cli_params.baud_rate,
verbose=cli_params.verbose,
)
else:
test_selector = DefaultTestSelector(cli_params)
try:
result = test_selector.execute()
# Ensure we don't return a negative value
if result < 0 or result > 255:
result = 1
except (KeyboardInterrupt, SystemExit):
test_selector.finish()
result = 1
raise
else:
test_selector.finish()
return result
| Python | 0.999999 |
625c70580770b5bb00a64d15e14d15c623db21ee | Update urls.py | taiga/base/utils/urls.py | taiga/base/utils/urls.py | import django_sites as sites
URL_TEMPLATE = "{scheme}://{domain}/{path}"
def build_url(path, scheme="http", domain="localhost"):
return URL_TEMPLATE.format(scheme=scheme, domain=domain, path=path.lstrip("/"))
def is_absolute_url(path):
"""Test wether or not `path` is absolute url."""
return path.startswith("http") or path.startswith("https")
def get_absolute_url(path):
"""Return a path as an absolute url."""
if is_absolute_url(path):
return path
site = sites.get_current()
return build_url(path, scheme=site.scheme, domain=site.domain)
| import django_sites as sites
URL_TEMPLATE = "{scheme}://{domain}/{path}"
def build_url(path, scheme="http", domain="localhost"):
return URL_TEMPLATE.format(scheme=scheme, domain=domain, path=path.lstrip("/"))
def is_absolute_url(path):
"""Test wether or not `path` is absolute url."""
return path.startswith("http")
def get_absolute_url(path):
"""Return a path as an absolute url."""
if is_absolute_url(path):
return path
site = sites.get_current()
return build_url(path, scheme=site.scheme, domain=site.domain)
| Python | 0.000002 |
1656cbd6b62690017af810e795b8a23b3907a1fa | bump 1.0.2 | epubuilder/version.py | epubuilder/version.py | # coding=utf-8
__version__ = '1.0.2'
| # coding=utf-8
__version__ = '1.0.1'
| Python | 0.000003 |
c8fdcf888f6c34e8396f11b3e7ab3088af59abb6 | Add tests for slice intersection and sanitization. | distarray/tests/test_utils.py | distarray/tests/test_utils.py | import unittest
from distarray import utils
from numpy import arange
from numpy.testing import assert_array_equal
class TestMultPartitions(unittest.TestCase):
"""
Test the multiplicative parition code.
"""
def test_both_methods(self):
"""
Do the two methods of computing the multiplicative partitions agree?
"""
for s in [2, 3]:
for n in range(2, 512):
self.assertEqual(utils.mult_partitions(n, s),
utils.create_factors(n, s))
class TestSanitizeIndices(unittest.TestCase):
def test_point(self):
itype, inds = utils.sanitize_indices(1)
self.assertEqual(itype, 'point')
self.assertEqual(inds, (1,))
def test_slice(self):
itype, inds = utils.sanitize_indices(slice(1,10))
self.assertEqual(itype, 'view')
self.assertEqual(inds, (slice(1,10),))
def test_mixed(self):
provided = (5, 3, slice(7, 10, 2), 99, slice(1,10))
itype, inds = utils.sanitize_indices(provided)
self.assertEqual(itype, 'view')
self.assertEqual(inds, provided)
class TestSliceIntersection(unittest.TestCase):
def test_containment(self):
arr = arange(20)
slc = utils.slice_intersection(slice(1,10), slice(2, 4))
assert_array_equal(arr[slc], arr[slice(2, 4, 1)])
def test_overlapping(self):
arr = arange(20)
slc = utils.slice_intersection(slice(1,10), slice(4, 15))
assert_array_equal(arr[slc], arr[slice(4, 10)])
def test_disjoint(self):
arr = arange(20)
slc = utils.slice_intersection(slice(1,10), slice(11, 15))
assert_array_equal(arr[slc], arr[slice(11, 10)])
if __name__ == '__main__':
unittest.main(verbosity=2)
| import unittest
from distarray import utils
class TestMultPartitions(unittest.TestCase):
"""
Test the multiplicative parition code.
"""
def test_both_methods(self):
"""
Do the two methods of computing the multiplicative partitions agree?
"""
for s in [2, 3]:
for n in range(2, 512):
self.assertEqual(utils.mult_partitions(n, s),
utils.create_factors(n, s))
if __name__ == '__main__':
unittest.main(verbosity=2)
| Python | 0 |
c54fb802932717b417ed2e189da85b500c8e06b8 | Update rasa_core/policies/mapping_policy.py | rasa_core/policies/mapping_policy.py | rasa_core/policies/mapping_policy.py | import logging
import os
from typing import Any, List, Text, Optional
from rasa_core.actions.action import (ACTION_LISTEN_NAME, ACTION_RESTART_NAME,
ACTION_BACK_NAME)
from rasa_core import utils
from rasa_core.domain import Domain
from rasa_core.policies.policy import Policy
from rasa_core.trackers import DialogueStateTracker
logger = logging.getLogger(__name__)
class MappingPolicy(Policy):
"""Policy which maps intents directly to actions.
Intents can be assigned actions in the domain file which are to be
executed whenever the intent is detected. This policy takes precedence over
any other policy."""
def __init__(self, priority: int = 5) -> None:
"""Create a new Mapping policy."""
super(MappingPolicy, self).__init__(priority=priority)
def train(self, *args, **kwargs) -> None:
"""Does nothing. This policy is deterministic."""
pass
def predict_action_probabilities(self, tracker: DialogueStateTracker,
domain: Domain) -> List[float]:
"""Predicts the assigned action.
If the current intent is assigned to an action that action will be
predicted with the highest probability of all policies. If it is not
the policy will predict zero for every action."""
prediction = [0.0] * domain.num_actions
if tracker.latest_action_name == ACTION_LISTEN_NAME:
intent = tracker.latest_message.intent.get('name')
action = domain.intent_properties.get(intent, {}).get('triggers')
if action:
idx = domain.index_for_action(action)
if idx is None:
logger.warning("MappingPolicy tried to predict unkown "
"action '{}'.".format(action))
else:
prediction[idx] = 1
elif tracker.latest_message.intent.get('name') == 'restart':
idx = domain.index_for_action(ACTION_RESTART_NAME)
prediction[idx] = 1
elif tracker.latest_message.intent.get('name') == 'back':
idx = domain.index_for_action(ACTION_BACK_NAME)
prediction[idx] = 1
return prediction
def persist(self, *args) -> None:
"""Does nothing since there is no data to be saved."""
pass
@classmethod
def load(cls, *args) -> 'MappingPolicy':
"""Just returns the class since there is no data to be loaded."""
return cls()
| import logging
import os
from typing import Any, List, Text, Optional
from rasa_core.actions.action import (ACTION_LISTEN_NAME, ACTION_RESTART_NAME,
ACTION_BACK_NAME)
from rasa_core import utils
from rasa_core.domain import Domain
from rasa_core.policies.policy import Policy
from rasa_core.trackers import DialogueStateTracker
logger = logging.getLogger(__name__)
class MappingPolicy(Policy):
"""Policy which maps intents directly to actions.
Intents can be assigned actions in the domain file which are to be
executed whenever the intent is detected. This policy takes precedence over
any other policy."""
def __init__(self, priority: Optional[int] = 5) -> None:
"""Create a new Mapping policy."""
super(MappingPolicy, self).__init__(priority=priority)
def train(self, *args, **kwargs) -> None:
"""Does nothing. This policy is deterministic."""
pass
def predict_action_probabilities(self, tracker: DialogueStateTracker,
domain: Domain) -> List[float]:
"""Predicts the assigned action.
If the current intent is assigned to an action that action will be
predicted with the highest probability of all policies. If it is not
the policy will predict zero for every action."""
prediction = [0.0] * domain.num_actions
if tracker.latest_action_name == ACTION_LISTEN_NAME:
intent = tracker.latest_message.intent.get('name')
action = domain.intent_properties.get(intent, {}).get('triggers')
if action:
idx = domain.index_for_action(action)
if idx is None:
logger.warning("MappingPolicy tried to predict unkown "
"action '{}'.".format(action))
else:
prediction[idx] = 1
elif tracker.latest_message.intent.get('name') == 'restart':
idx = domain.index_for_action(ACTION_RESTART_NAME)
prediction[idx] = 1
elif tracker.latest_message.intent.get('name') == 'back':
idx = domain.index_for_action(ACTION_BACK_NAME)
prediction[idx] = 1
return prediction
def persist(self, *args) -> None:
"""Does nothing since there is no data to be saved."""
pass
@classmethod
def load(cls, *args) -> 'MappingPolicy':
"""Just returns the class since there is no data to be loaded."""
return cls()
| Python | 0 |
673b123b147b99f49357b02c227b1d34ae653485 | Set up some realistic defaults. | bootstrap.py | bootstrap.py | """This script will bootstrap the database to a minimal level for usage.
You will be left with the following buildings:
* Town Hall (homely).
* Farm (requires Town Hall).
* Stable (requires Farm)
You will be left with the following land features:
* Mine (provides gold)
* Quarry (provides stone)
* Lake (provides water)
* Forest (provides wood)
* Field (provides food)
You will be left with the following mobiles:
* Peasant (provided by Town Hall).
Peasants can build town halls and farms, and can exploit wood, and gold.
* Farmer (provided by Farm)
Farmers can build farms and stables, and can exploit food, water, and wood.
* Scout (provided by Stable)
"""
import os.path
from server.db import BuildingType, FeatureType, MobileType, dump
from server.db.util import _filename as fn
def main():
if os.path.isfile(fn):
return print('Refusing to continue with existing database file.')
town_hall = BuildingType(
name='Town Hall', homely=True, gold=15, wood=30, stone=10
)
farm = BuildingType(
name='Farm', depends=town_hall, gold=5, wood=5, stone=1
)
stable = BuildingType(
name='Stable', depends=farm, wood=30, stone=15, gold=30
)
for thing in (town_hall, farm, stable):
thing.save()
peasant = MobileType(name='Peasant', wood=1, gold=1)
farmer = MobileType(name='Farmer', food=1, water=1)
scout = MobileType(name='Scout', stone=1)
for thing in (peasant, farmer, scout):
thing.save()
peasant.add_building(town_hall)
peasant.add_building(farm)
town_hall.add_recruit(peasant, food=1, water=1, gold=3).save()
farmer.add_building(farm)
farm.add_recruit(farmer, food=2, gold=4, water=2)
stable.add_recruit(scout, food=4, water=5, gold=6)
FeatureType(name='Mine', gold=1).save()
FeatureType(name='Quarry', stone=1).save()
FeatureType(name='Lake', water=1).save()
FeatureType(name='Forest', wood=1).save()
FeatureType(name='Field', food=1).save()
dump()
print('Done.')
if __name__ == '__main__':
main()
| """This script will bootstrap the database to a minimal level for usage.
You will be left with the following buildings:
* Town Hall (homely).
* Farm (requires Town Hall).
* Stable (requires Farm)
You will be left with the following land features:
* Mine (provides gold)
* Quarry (provides stone)
* Lake (provides water)
* Forest (provides wood)
* Field (provides food)
You will be left with the following mobiles:
* Peasant (provided by Town Hall).
Peasants can build town halls and farms, and can exploit wood, and gold.
* Farmer (provided by Farm)
Farmers can build farms and stables, and can exploit food, water, and wood.
* Scout (provided by Stable)
"""
import os.path
from server.db import BuildingType, FeatureType, MobileType, dump
from server.db.util import _filename as fn
def main():
if os.path.isfile(fn):
return print('Refusing to continue with existing database file.')
town_hall = BuildingType(name='Town Hall', homely=True)
farm = BuildingType(name='Farm', depends=town_hall)
stable = BuildingType(name='Stable', depends=farm)
for thing in (town_hall, farm, stable):
thing.save()
peasant = MobileType(name='Peasant', wood=1, gold=1)
peasant.save()
for t in (town_hall, farm):
t.builders.append(peasant)
town_hall.add_recruit(peasant, food=1, water=1, gold=1).save()
farmer = MobileType(name='Farmer', food=1, water=1, wood=1)
farmer.save()
for t in (farm, stable):
t.builders.append(farmer)
farm.add_recruit(farmer, food=2, gold=2, water=2)
scout = MobileType(name='Scout', stone=1)
scout.save()
stable.add_recruit(scout, food=4, water=5, gold=3)
FeatureType(name='Mine', gold=1).save()
FeatureType(name='Quarry', stone=1).save()
FeatureType(name='Lake', water=1).save()
FeatureType(name='Forest', wood=1).save()
FeatureType(name='Field', food=1).save()
dump()
print('Done.')
if __name__ == '__main__':
main()
| Python | 0 |
208b6cf99d90494df9a0f6d66a0ea3669ff5fe66 | remove get, add ls and rm | dog/ext/config.py | dog/ext/config.py | import logging
from discord.ext import commands
from dog import Cog
log = logging.getLogger(__name__)
class Config(Cog):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.permitted_keys = [
'woof_response'
]
@commands.group()
@commands.guild_only()
@commands.has_permissions(manage_guild=True)
async def config(self, ctx):
""" Manages server-specific configuration for the bot. """
@config.command(name='set')
async def config_set(self, ctx, name: str, value: str):
""" Sets a config field for this server. """
if name not in self.permitted_keys:
await ctx.send('That configuration value is not allowed.')
return
await self.bot.redis.set(f'{ctx.guild.id}:{name}', value)
await ctx.send('\N{OK HAND SIGN}')
@config.command(name='permitted')
async def config_permitted(self, ctx):
""" Views permitted configuration keys. """
await ctx.send(', '.join(self.permitted_keys))
@config.command(name='is_set')
async def config_is_set(self, ctx, name: str):
""" Checks if a configuration key is set. """
is_set = await self.bot.config_is_set(ctx.guild, name)
await ctx.send('Yes, it is set.' if is_set else 'No, it is not set.')
@config.command(name='list', aliases=['ls'])
async def config_list(self, ctx):
""" Lists set configuration keys for this server. """
keys = [k.decode().split(':')[1] for k in await self.bot.redis.keys(f'{ctx.guild.id}:*')]
await ctx.send('Set configuration keys in this server: ' + ', '.join(keys))
@config.command(name='remove', aliases=['rm', 'del'])
async def config_remove(self, ctx, name: str):
""" Removes a config field for this server. """
await self.bot.redis.delete(f'{ctx.guild.id}:{name}')
await ctx.send('\N{OK HAND SIGN}')
def setup(bot):
bot.add_cog(Config(bot))
| import logging
from discord.ext import commands
from dog import Cog
log = logging.getLogger(__name__)
class Config(Cog):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.permitted_keys = [
'woof_response'
]
@commands.group()
@commands.guild_only()
@commands.has_permissions(manage_guild=True)
async def config(self, ctx):
""" Manages server-specific configuration for the bot. """
@config.command(name='set')
async def config_set(self, ctx, name: str, value: str):
""" Sets a config field for this server. """
if name not in self.permitted_keys:
await ctx.send('That configuration value is not allowed.')
return
await self.bot.redis.set(f'{ctx.guild.id}:{name}', value)
await ctx.send('\N{OK HAND SIGN}')
@config.command(name='permitted')
async def config_permitted(self, ctx):
""" Views permitted configuration keys. """
await ctx.send(', '.join(self.permitted_keys))
@config.command(name='is_set')
async def config_is_set(self, ctx, name: str):
""" Checks if a configuration key is set. """
is_set = await self.bot.config_is_set(ctx.guild, name)
await ctx.send('Yes, it is set.' if is_set else 'No, it is not set.')
@config.command(name='get')
async def config_get(self, ctx, name: str):
""" Fetches a config field for this server. """
result = await self.bot.redis.get(f'{ctx.guild.id}:{name}')
if result is not None:
result = result.decode()
else:
result = '`<nothing>`'
await ctx.send(f'`{name}`: {result}')
def setup(bot):
bot.add_cog(Config(bot))
| Python | 0.000002 |
546f1188444365a365dc1dd7a81c2ffc974cf8b2 | change of documentation in the param vector class | ParamVector.py | ParamVector.py | class ParamVector(object):
"""
This class represents the vectors that defines a firewall
a ParamVector is a class that represents a vector that defines a firewall.
we have our indicator functions that should get parameters, lets call these functions g1...gn
for each gi we can say that there is a vector (ai1,...,aim) of scalars. so we can represent every firewall Fl
as the sum of fi^gi where fi is a mutate function.
so we can think about a vector of different sized vectors, where every vector i is:
fi, ai1,...aim
"""
# the functions that can be used to mutate a ParamVector, instances of ProbabilityFunction
mutate_functions = []
@staticmethod
def generate_random_data():
"""
creates a ParamVector with random data
:return: an instance of ParamVector that is defined using random data
"""
pass | class ParamVector(object):
"""
This class represents the vectors that defines a firewall
a ParamVector is a class that represents a vector that defines a firewall.
we have our indicator functions that should get parameters, lets call these functions g1...gn
for each gi we can say that there is a vector (ai1,...,aim) of scalars. so we can represent every firewall Fl
as the sum of Fi^gi where Fi is a mutate function.
so we can think about a vector of different sized vectors, where every vector i is:
fi, ai1,...aim
"""
# the functions that can be used to mutate a ParamVector, instances of ProbabilityFunction
mutate_functions = []
@staticmethod
def generate_random_data():
"""
creates a ParamVector with random data
:return: an instance of ParamVector that is defined using random data
"""
pass | Python | 0 |
1c7317ea85206541c8d518a3fc6cb338ad6873d3 | Fix requires_auth decorator | fickle/api.py | fickle/api.py | import os
from functools import wraps
import flask
from flask import request, json
USERNAME = 'fickle'
def Response(data, status = 200):
body = json.dumps(data)
return flask.Response(body, status = status, mimetype = 'application/json')
def SuccessResponse(dataset_id = None):
return Response({ 'success': True, 'id': dataset_id })
def ErrorResponse(status = 400):
return Response({ 'success': False }, status = status)
def check_auth(username, password):
setting = os.environ.get('FICKLE_PASSWORD')
if setting:
return username == USERNAME and password == setting
else:
return True
def requires_auth(f):
if not bool(os.environ.get('FICKLE_PASSWORD')):
return f
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return ErrorResponse(403)
return f(*args, **kwargs)
return decorated
def API(name, backend):
app = flask.Flask(name)
app.config.from_object(name)
@app.route('/')
@requires_auth
def api_root():
return SuccessResponse(backend.dataset_id)
@app.route('/load', methods=['POST'])
@requires_auth
def api_load():
backend.load(request.json)
return SuccessResponse(backend.dataset_id)
@app.route('/fit', methods=['POST'])
@requires_auth
def api_fit():
if not backend.loaded():
return ErrorResponse()
backend.fit()
return SuccessResponse(backend.dataset_id)
@app.route('/validate', methods=['POST'])
@requires_auth
def api_validate():
if not backend.loaded():
return ErrorResponse()
data = backend.validate()
return Response(data)
@app.route('/predict', methods=['POST'])
@requires_auth
def api_predict():
if not backend.trained():
return ErrorResponse()
data = backend.predict(request.json).tolist()
return Response(data)
return app
| import os
from functools import wraps
import flask
from flask import request, json
USERNAME = 'fickle'
def Response(data, status = 200):
body = json.dumps(data)
return flask.Response(body, status = status, mimetype = 'application/json')
def SuccessResponse(dataset_id = None):
return Response({ 'success': True, 'id': dataset_id })
def ErrorResponse(status = 400):
return Response({ 'success': False }, status = status)
def check_auth(username, password):
setting = os.environ.get('FICKLE_PASSWORD')
if setting:
return username == USERNAME and password == setting
else:
return True
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not auth or not check_auth(auth.username, auth.password):
return ErrorResponse(403)
return f(*args, **kwargs)
return decorated
def API(name, backend):
app = flask.Flask(name)
app.config.from_object(name)
@app.route('/')
@requires_auth
def api_root():
return SuccessResponse(backend.dataset_id)
@app.route('/load', methods=['POST'])
@requires_auth
def api_load():
backend.load(request.json)
return SuccessResponse(backend.dataset_id)
@app.route('/fit', methods=['POST'])
@requires_auth
def api_fit():
if not backend.loaded():
return ErrorResponse()
backend.fit()
return SuccessResponse(backend.dataset_id)
@app.route('/validate', methods=['POST'])
@requires_auth
def api_validate():
if not backend.loaded():
return ErrorResponse()
data = backend.validate()
return Response(data)
@app.route('/predict', methods=['POST'])
@requires_auth
def api_predict():
if not backend.trained():
return ErrorResponse()
data = backend.predict(request.json).tolist()
return Response(data)
return app
| Python | 0.000001 |
9437e024b1e1630e06d1b05972eb9049af442be0 | fix bad copy/paste | build_all.py | build_all.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import subprocess
# list of projects
progs = [
{'path': 'apps/EHD', 'travis': True},
{'path': 'apps/fractal/cpp', 'travis': False},
{'path': 'apps/GenMAI', 'travis': True},
{'path': 'apps/md5', 'travis': True},
{'path': 'apps/minibarreTE', 'travis': True},
{'path': 'cmake/findGMM', 'travis': True},
{'path': 'cmake/findMKL', 'travis': False},
{'path': 'langtests/cpp11', 'travis': True},
{'path': 'langtests/exporttpl', 'travis': True},
{'path': 'langtests/singleton', 'travis': True},
{'path': 'metafor/arbre', 'travis': True},
{'path': 'metafor/drmeta', 'travis': True},
{'path': 'metafor/mailsph', 'travis': False},
{'path': 'sandbox/fortran', 'travis': True},
{'path': 'sandbox/fortranc', 'travis': True},
{'path': 'student/dcm1', 'travis': False},
{'path': 'student/dcm2', 'travis': True},
{'path': 'student/lejeune', 'travis': True},
{'path': 'student/mico', 'travis': True},
{'path': 'student/ndh', 'travis': True},
]
def getArgs():
# parse args
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--travis", help="run only travis tests",
action="store_true")
args = parser.parse_args()
return args
def build_one(basedir, p):
"""build project 'p'
"""
args = getArgs()
fullpath = os.path.join(basedir, *(p['path'].split('/')))
if(p['travis'] or not args.travis):
print '=> running build.py in', fullpath
os.chdir(fullpath)
subprocess.call(['python', 'build.py'])
def build_all(basedir):
"""build everything in 'basedir'
"""
for p in progs:
build_one(basedir, p)
def rm_builds(basedir):
"""remove all 'build' directories in 'basedir'
"""
import shutil
for path, subdirs, files in os.walk(basedir):
for name in subdirs:
if name == 'build':
fullname = os.path.join(path, name)
print 'removing', fullname
shutil.rmtree(fullname)
if __name__ == "__main__":
basedir = os.path.abspath(os.path.dirname(__file__))
rm_builds(basedir)
build_all(basedir)
| #! /usr/bin/env python
# -*- coding: utf-8 -*-
import os
import subprocess
# list of projects
progs = [
{'path': 'apps/EHD', 'travis': True},
{'path': 'apps/fractal/cpp', 'travis': False},
{'path': 'apps/GenMAI', 'travis': True},
{'path': 'apps/md5', 'travis': True},
{'path': 'apps/minibarreTE', 'travis': True},
{'path': 'cmake/findGMM', 'travis': True},
{'path': 'cmake/findMKL', 'travis': False},
{'path': 'langtests/cpp11', 'travis': True},
{'path': 'langtests/exporttpl', 'travis': True},
{'path': 'langtests/singleton', 'travis': True},
{'path': 'metafor/arbre', 'travis': True},
{'path': 'metafor/drmeta', 'travis': True},
{'path': 'metafor/mailsph', 'travis': False},
{'path': 'sandbox/fortran', 'travis': True},
{'path': 'sandbox/fortranc', 'travis': True},
{'path': 'student/dcm1', 'travis': False},
{'path': 'student/dcm2', 'travis': True},
{'path': 'student/lejeune', 'lejeune': True},
{'path': 'student/mico', 'travis': True},
{'path': 'student/ndh', 'travis': True},
]
def getArgs():
# parse args
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--travis", help="run only travis tests",
action="store_true")
args = parser.parse_args()
return args
def build_one(basedir, p):
"""build project 'p'
"""
args = getArgs()
fullpath = os.path.join(basedir, *(p['path'].split('/')))
if(p['travis'] or not args.travis):
print '=> running build.py in', fullpath
os.chdir(fullpath)
subprocess.call(['python', 'build.py'])
def build_all(basedir):
"""build everything in 'basedir'
"""
for p in progs:
build_one(basedir, p)
def rm_builds(basedir):
"""remove all 'build' directories in 'basedir'
"""
import shutil
for path, subdirs, files in os.walk(basedir):
for name in subdirs:
if name == 'build':
fullname = os.path.join(path, name)
print 'removing', fullname
shutil.rmtree(fullname)
if __name__ == "__main__":
basedir = os.path.abspath(os.path.dirname(__file__))
rm_builds(basedir)
build_all(basedir)
| Python | 0.000009 |
a8cb15b1983c48547edfeb53bfb63245f7e7c892 | Revert "log integrations with zabbix through pyzabbix" | dbaas_zabbix/__init__.py | dbaas_zabbix/__init__.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from dbaas_zabbix.dbaas_api import DatabaseAsAServiceApi
from dbaas_zabbix.provider_factory import ProviderFactory
from pyzabbix import ZabbixAPI
def factory_for(**kwargs):
databaseinfra = kwargs['databaseinfra']
credentials = kwargs['credentials']
del kwargs['databaseinfra']
del kwargs['credentials']
zabbix_api = ZabbixAPI
if kwargs.get('zabbix_api'):
zabbix_api = kwargs.get('zabbix_api')
del kwargs['zabbix_api']
dbaas_api = DatabaseAsAServiceApi(databaseinfra, credentials)
return ProviderFactory.factory(dbaas_api, zabbix_api=zabbix_api, **kwargs)
| #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import sys
from dbaas_zabbix.dbaas_api import DatabaseAsAServiceApi
from dbaas_zabbix.provider_factory import ProviderFactory
from pyzabbix import ZabbixAPI
stream = logging.StreamHandler(sys.stdout)
stream.setLevel(logging.DEBUG)
log = logging.getLogger('pyzabbix')
log.addHandler(stream)
log.setLevel(logging.DEBUG)
def factory_for(**kwargs):
databaseinfra = kwargs['databaseinfra']
credentials = kwargs['credentials']
del kwargs['databaseinfra']
del kwargs['credentials']
zabbix_api = ZabbixAPI
if kwargs.get('zabbix_api'):
zabbix_api = kwargs.get('zabbix_api')
del kwargs['zabbix_api']
dbaas_api = DatabaseAsAServiceApi(databaseinfra, credentials)
return ProviderFactory.factory(dbaas_api, zabbix_api=zabbix_api, **kwargs)
| Python | 0 |
b3c1b3b66d1c720172e731d1bfc44cfb44c992a3 | Revert of [Android] Re-enable content_browsertests on main waterfall. (https://codereview.chromium.org/132403005/) | build/android/pylib/gtest/gtest_config.py | build/android/pylib/gtest/gtest_config.py | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Configuration file for android gtest suites."""
# Add new suites here before upgrading them to the stable list below.
EXPERIMENTAL_TEST_SUITES = [
'content_browsertests',
'content_gl_tests',
]
# Do not modify this list without approval of an android owner.
# This list determines which suites are run by default, both for local
# testing and on android trybots running on commit-queue.
STABLE_TEST_SUITES = [
'android_webview_unittests',
'base_unittests',
'cc_unittests',
'components_unittests',
'content_unittests',
'gl_tests',
'gpu_unittests',
'ipc_tests',
'media_unittests',
'net_unittests',
'sql_unittests',
'sync_unit_tests',
'ui_unittests',
'unit_tests',
'webkit_compositor_bindings_unittests',
'webkit_unit_tests',
'breakpad_unittests',
'sandbox_linux_unittests',
]
WEBRTC_CHROMIUM_TEST_SUITES = [
'content_browsertests',
]
WEBRTC_NATIVE_TEST_SUITES = [
'audio_decoder_unittests',
'common_audio_unittests',
'common_video_unittests',
'modules_tests',
'modules_unittests',
'neteq_unittests',
'system_wrappers_unittests',
'test_support_unittests',
'tools_unittests',
'video_engine_core_unittests',
'voice_engine_unittests',
]
| # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Configuration file for android gtest suites."""
# Add new suites here before upgrading them to the stable list below.
EXPERIMENTAL_TEST_SUITES = [
'content_gl_tests',
]
# Do not modify this list without approval of an android owner.
# This list determines which suites are run by default, both for local
# testing and on android trybots running on commit-queue.
STABLE_TEST_SUITES = [
'android_webview_unittests',
'base_unittests',
'cc_unittests',
'components_unittests',
'content_unittests',
'gl_tests',
'gpu_unittests',
'ipc_tests',
'media_unittests',
'net_unittests',
'sql_unittests',
'sync_unit_tests',
'ui_unittests',
'unit_tests',
'webkit_compositor_bindings_unittests',
'webkit_unit_tests',
'breakpad_unittests',
'sandbox_linux_unittests',
'content_browsertests',
]
WEBRTC_CHROMIUM_TEST_SUITES = [
'content_browsertests',
]
WEBRTC_NATIVE_TEST_SUITES = [
'audio_decoder_unittests',
'common_audio_unittests',
'common_video_unittests',
'modules_tests',
'modules_unittests',
'neteq_unittests',
'system_wrappers_unittests',
'test_support_unittests',
'tools_unittests',
'video_engine_core_unittests',
'voice_engine_unittests',
]
| Python | 0.000251 |
3812403655153e86a8b0e1ac68c9b15e69d6a4e3 | Update BUILD_OSS to 4770. | src/data/version/mozc_version_template.bzl | src/data/version/mozc_version_template.bzl | # Copyright 2010-2021, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
MAJOR = 2
MINOR = 28
# BUILD number used for the OSS version.
BUILD_OSS = 4770
# Number to be increased. This value may be replaced by other tools.
BUILD = BUILD_OSS
# Represent the platform and release channel.
REVISION = 100
REVISION_MACOS = REVISION + 1
# This version represents the version of Mozc IME engine (converter, predictor,
# etc.). This version info is included both in the Mozc server and in the Mozc
# data set file so that the Mozc server can accept only the compatible version
# of data set file. The engine version must be incremented when:
# * POS matcher definition and/or conversion models were changed,
# * New data are added to the data set file, and/or
# * Any changes that loose data compatibility are made.
ENGINE_VERSION = 24
# This version is used to manage the data version and is included only in the
# data set file. DATA_VERSION can be incremented without updating
# ENGINE_VERSION as long as it's compatible with the engine.
# This version should be reset to 0 when ENGINE_VERSION is incremented.
DATA_VERSION = 11
| # Copyright 2010-2021, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
MAJOR = 2
MINOR = 28
# BUILD number used for the OSS version.
BUILD_OSS = 4750
# Number to be increased. This value may be replaced by other tools.
BUILD = BUILD_OSS
# Represent the platform and release channel.
REVISION = 100
REVISION_MACOS = REVISION + 1
# This version represents the version of Mozc IME engine (converter, predictor,
# etc.). This version info is included both in the Mozc server and in the Mozc
# data set file so that the Mozc server can accept only the compatible version
# of data set file. The engine version must be incremented when:
# * POS matcher definition and/or conversion models were changed,
# * New data are added to the data set file, and/or
# * Any changes that loose data compatibility are made.
ENGINE_VERSION = 24
# This version is used to manage the data version and is included only in the
# data set file. DATA_VERSION can be incremented without updating
# ENGINE_VERSION as long as it's compatible with the engine.
# This version should be reset to 0 when ENGINE_VERSION is incremented.
DATA_VERSION = 11
| Python | 0 |
b6d747599661f3ce19b4d2f6ea9f80ec9839a2d8 | Update couchm.reactor.py | resources/reactors/couchm.reactor.py | resources/reactors/couchm.reactor.py | #!/usr/bin/python
import argparse
import mosquitto
#from pushover import PushoverClient
import os, sys
import urllib2
import json, base64
import ConfigParser
#Posting data to couchDB
def post(doc):
global config
url = 'http://%(server)s/%(database)s/_design/energy_data/_update/measurement' % config
# print url
request = urllib2.Request(url, data=json.dumps(doc))
auth = base64.encodestring('%(user)s:%(password)s' % config).replace('\n', '')
request.add_header('Authorization', 'Basic ' + auth)
request.add_header('Content-Type', 'application/json')
request.get_method = lambda: 'POST'
urllib2.urlopen(request, timeout=1)
return
if __name__ == '__main__':
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-h', dest='host', default="localhost", help='MQTT host send results to')
parser.add_argument('-t', dest='topic', default="", help='MQTT topic to process')
parser.add_argument('-m', dest='message', default="", help='MQTT message to process')
args = parser.parse_args()
#Where am I
path = os.path.abspath(os.path.dirname(sys.argv[0]))
#Load config file...
ConfigFile = path + "/couchm.cfg"
try:
f = open(ConfigFile,"r")
f.close()
except:
print "Please provide a valid config file! In the same folder as the couchDB script!"
exit(1)
#Read config file.
config = ConfigParser.RawConfigParser(allow_no_value=True)
config.read(ConfigFile)
#Load basic config.
config = {}
config["user"] = config.get("CouchDB","user")
config["password"] = config.get("CouchDB","password")
config["server"] = config.get("CouchDB","server")
config["database"] = config.get("CouchDB","database")
source = config.get("CouchM","source")
if args.message[0] == '"':
args.message = args.message[1:]
if args.message[-1] == '"':
args.message = args.message[:-1]
data = json.loads(args.message)
#Post data to couchm
post({
"source": source,
"timestamp": str(data["time"]),
"ElectricPower": str(data["value"]),
"ElectricEnergy": str(0),
"PowerThreshold": str(1),
"ElectricPowerUnoccupied": "0",
"ElectricEnergyOccupied": "0",
"ElectricEnergyUnoccupied": "0"
})
| #!/usr/bin/python
import argparse
import mosquitto
#from pushover import PushoverClient
import os, sys
import urllib2
import json, base64
#Posting data to couchDB
def post(doc):
global config
url = 'http://%(server)s/%(database)s/_design/energy_data/_update/measurement' % config
# print url
request = urllib2.Request(url, data=json.dumps(doc))
auth = base64.encodestring('%(user)s:%(password)s' % config).replace('\n', '')
request.add_header('Authorization', 'Basic ' + auth)
request.add_header('Content-Type', 'application/json')
request.get_method = lambda: 'POST'
urllib2.urlopen(request, timeout=1)
return
if __name__ == '__main__':
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument('-h', dest='host', default="localhost", help='MQTT host send results to')
parser.add_argument('-t', dest='topic', default="", help='MQTT topic to process')
parser.add_argument('-m', dest='message', default="", help='MQTT message to process')
args = parser.parse_args()
#Where am I
path = os.path.abspath(os.path.dirname(sys.argv[0]))
#Load config file...
ConfigFile = path + "/couchm.cfg"
try:
f = open(ConfigFile,"r")
f.close()
except:
print "Please provide a valid config file! In the same folder as the couchDB script!"
exit(1)
#Read config file.
config = ConfigParser.RawConfigParser(allow_no_value=True)
config.read(ConfigFile)
#Load basic config.
config = {}
config["user"] = config.get("CouchDB","user")
config["password"] = config.get("CouchDB","password")
config["server"] = config.get("CouchDB","server")
config["database"] = config.get("CouchDB","database")
source = config.get("CouchM","source")
if args.message[0] == '"':
args.message = args.message[1:]
if args.message[-1] == '"':
args.message = args.message[:-1]
data = json.loads(args.message)
#Post data to couchm
post({
"source": source,
"timestamp": str(data["time"]),
"ElectricPower": str(data["value"]),
"ElectricEnergy": str(0),
"PowerThreshold": str(1),
"ElectricPowerUnoccupied": "0",
"ElectricEnergyOccupied": "0",
"ElectricEnergyUnoccupied": "0"
})
| Python | 0 |
ad5df2624e19779f264443f9ec779ab8a018baaa | Add socket based fallback approach in get_local_ip | catt/util.py | catt/util.py | import ipaddress
import json
import socket
import tempfile
import time
from pathlib import Path
import click
import ifaddr
def warning(msg):
click.secho("Warning: ", fg="red", nl=False, err=True)
click.echo("{}.".format(msg), err=True)
def echo_json(data_dict):
click.echo(json.dumps(data_dict, indent=4, default=str))
def guess_mime(path):
# source: https://developers.google.com/cast/docs/media
extension = Path(path).suffix.lower()
extensions = {
".mp4": "video/mp4",
".m4a": "audio/mp4",
".mp3": "audio/mp3",
".mpa": "audio/mpeg",
".webm": "video/webm",
".mkv": "video/x-matroska",
".bmp": "image/bmp",
".jpg": "image/jpeg",
".gif": "image/gif",
".png": "image/png",
".webp": "image/web",
}
return extensions.get(extension, "video/mp4")
def hunt_subtitles(video):
"""Searches for subtitles in the current folder"""
video_path = Path(video)
video_path_stem_lower = video_path.stem.lower()
for entry_path in video_path.parent.iterdir():
if entry_path.is_dir():
continue
if entry_path.stem.lower().startswith(video_path_stem_lower) and entry_path.suffix.lower() in [".vtt", ".srt"]:
return str(entry_path.resolve())
return None
def create_temp_file(content):
with tempfile.NamedTemporaryFile(mode="w+b", suffix=".vtt", delete=False) as tfile:
tfile.write(content.encode())
return tfile.name
def human_time(seconds: int):
return time.strftime("%H:%M:%S", time.gmtime(seconds))
def get_local_ip(host):
"""
The primary ifaddr based approach, tries to guess the local ip from the cc ip,
by comparing the subnet of ip-addresses of all the local adapters to the subnet of the cc ip.
This should work on all platforms, but requires the catt box and the cc to be on the same subnet.
As a fallback we use a socket based approach, that does not suffer from this limitation, but
might not work on all platforms.
"""
host_ipversion = type(ipaddress.ip_address(host))
for adapter in ifaddr.get_adapters():
for adapter_ip in adapter.ips:
aip = adapter_ip.ip[0] if isinstance(adapter_ip.ip, tuple) else adapter_ip.ip
try:
if not isinstance(ipaddress.ip_address(aip), host_ipversion):
continue
except ValueError:
continue
ipt = [(ip, adapter_ip.network_prefix) for ip in (aip, host)]
catt_net, cc_net = [ipaddress.ip_network("{0}/{1}".format(*ip), strict=False) for ip in ipt]
if catt_net == cc_net:
return aip
else:
continue
return [
(s.connect(("8.8.8.8", 53)), s.getsockname()[0], s.close())
for s in [socket.socket(socket.AF_INET, socket.SOCK_DGRAM)]
][0][1]
def is_ipaddress(device):
try:
ipaddress.ip_address(device)
except ValueError:
return False
else:
return True
| import ipaddress
import json
import tempfile
import time
from pathlib import Path
import click
import ifaddr
def warning(msg):
click.secho("Warning: ", fg="red", nl=False, err=True)
click.echo("{}.".format(msg), err=True)
def echo_json(data_dict):
click.echo(json.dumps(data_dict, indent=4, default=str))
def guess_mime(path):
# source: https://developers.google.com/cast/docs/media
extension = Path(path).suffix.lower()
extensions = {
".mp4": "video/mp4",
".m4a": "audio/mp4",
".mp3": "audio/mp3",
".mpa": "audio/mpeg",
".webm": "video/webm",
".mkv": "video/x-matroska",
".bmp": "image/bmp",
".jpg": "image/jpeg",
".gif": "image/gif",
".png": "image/png",
".webp": "image/web",
}
return extensions.get(extension, "video/mp4")
def hunt_subtitles(video):
"""Searches for subtitles in the current folder"""
video_path = Path(video)
video_path_stem_lower = video_path.stem.lower()
for entry_path in video_path.parent.iterdir():
if entry_path.is_dir():
continue
if entry_path.stem.lower().startswith(video_path_stem_lower) and entry_path.suffix.lower() in [".vtt", ".srt"]:
return str(entry_path.resolve())
return None
def create_temp_file(content):
with tempfile.NamedTemporaryFile(mode="w+b", suffix=".vtt", delete=False) as tfile:
tfile.write(content.encode())
return tfile.name
def human_time(seconds: int):
return time.strftime("%H:%M:%S", time.gmtime(seconds))
def get_local_ip(host):
for adapter in ifaddr.get_adapters():
for adapter_ip in adapter.ips:
aip = adapter_ip.ip[0] if isinstance(adapter_ip.ip, tuple) else adapter_ip.ip
try:
if not isinstance(ipaddress.ip_address(host), type(ipaddress.ip_address(aip))):
raise ValueError
except ValueError:
continue
ipt = [(ip, adapter_ip.network_prefix) for ip in (aip, host)]
catt_net, cc_net = [ipaddress.ip_network("{0}/{1}".format(*ip), strict=False) for ip in ipt]
if catt_net == cc_net:
return aip
else:
continue
def is_ipaddress(device):
try:
ipaddress.ip_address(device)
except ValueError:
return False
else:
return True
| Python | 0 |
7a2fd7bbdaed3ffda3cb8740d38e5f3e88dd8ce8 | add name for the thread | update.py | update.py | from time import time
from app import db
import argparse
from jobs import update_registry
from util import elapsed
# needs to be imported so the definitions get loaded into the registry
import jobs_defs
"""
examples of calling this:
# update everything
python update.py Person.refresh --limit 10 --chunk 5 --rq
# update one thing not using rq
python update.py Package.test --id 0000-1111-2222-3333
"""
def parse_update_optional_args(parser):
# just for updating lots
parser.add_argument('--limit', "-l", nargs="?", type=int, help="how many jobs to do")
parser.add_argument('--chunk', "-ch", nargs="?", default=10, type=int, help="how many to take off db at once")
parser.add_argument('--after', nargs="?", type=str, help="minimum id or id start, ie 0000-0001")
parser.add_argument('--rq', action="store_true", default=False, help="do jobs in this thread")
parser.add_argument('--order', action="store_true", default=True, help="order them")
parser.add_argument('--append', action="store_true", default=False, help="append, dont' clear queue")
parser.add_argument('--name', nargs="?", type=str, help="name for the thread")
# just for updating one
parser.add_argument('--id', nargs="?", type=str, help="id of the one thing you want to update")
parser.add_argument('--doi', nargs="?", type=str, help="doi of the one thing you want to update")
# parse and run
parsed_args = parser.parse_args()
return parsed_args
def run_update(parsed_args):
update = update_registry.get(parsed_args.fn)
start = time()
#convenience method for handling an doi
if parsed_args.doi:
from publication import Crossref
from util import clean_doi
my_pub = db.session.query(Crossref).filter(Crossref.id==clean_doi(parsed_args.doi)).first()
parsed_args.id = my_pub.id
print u"Got database hit for this doi: {}".format(my_pub.id)
update.run(**vars(parsed_args))
db.session.remove()
print "finished update in {} secconds".format(elapsed(start))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run stuff.")
# for everything
parser.add_argument('fn', type=str, help="what function you want to run")
parsed_args = parse_update_optional_args(parser)
run_update(parsed_args)
| from time import time
from app import db
import argparse
from jobs import update_registry
from util import elapsed
# needs to be imported so the definitions get loaded into the registry
import jobs_defs
"""
examples of calling this:
# update everything
python update.py Person.refresh --limit 10 --chunk 5 --rq
# update one thing not using rq
python update.py Package.test --id 0000-1111-2222-3333
"""
def parse_update_optional_args(parser):
# just for updating lots
parser.add_argument('--limit', "-l", nargs="?", type=int, help="how many jobs to do")
parser.add_argument('--chunk', "-ch", nargs="?", default=10, type=int, help="how many to take off db at once")
parser.add_argument('--after', nargs="?", type=str, help="minimum id or id start, ie 0000-0001")
parser.add_argument('--rq', action="store_true", default=False, help="do jobs in this thread")
parser.add_argument('--order', action="store_true", default=True, help="order them")
parser.add_argument('--append', action="store_true", default=False, help="append, dont' clear queue")
# just for updating one
parser.add_argument('--id', nargs="?", type=str, help="id of the one thing you want to update")
parser.add_argument('--doi', nargs="?", type=str, help="doi of the one thing you want to update")
# parse and run
parsed_args = parser.parse_args()
return parsed_args
def run_update(parsed_args):
update = update_registry.get(parsed_args.fn)
start = time()
#convenience method for handling an doi
if parsed_args.doi:
from publication import Crossref
from util import clean_doi
my_pub = db.session.query(Crossref).filter(Crossref.id==clean_doi(parsed_args.doi)).first()
parsed_args.id = my_pub.id
print u"Got database hit for this doi: {}".format(my_pub.id)
update.run(**vars(parsed_args))
db.session.remove()
print "finished update in {} secconds".format(elapsed(start))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Run stuff.")
# for everything
parser.add_argument('fn', type=str, help="what function you want to run")
parsed_args = parse_update_optional_args(parser)
run_update(parsed_args)
| Python | 0 |
ce625cb3c6769e859d47b3bcc90bc772b7d92a3e | use default widget many2one in tree view | partner_multi_relation_tabs/tablib/tab.py | partner_multi_relation_tabs/tablib/tab.py | # Copyright 2014-2018 Therp BV <https://therp.nl>.
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
import logging
from lxml import etree
from odoo import _
from odoo.osv.orm import transfer_modifiers_to_node
_logger = logging.getLogger(__name__) # pylint: disable=invalid-name
NAME_PREFIX = 'relation_ids_tab'
class Tab(object):
"""Encapsulate the information on a tab in the database."""
def __init__(self, tab_record):
"""Create tab from tab_record.
In this version tab_record can be assumed to be a partner.relation.tab.
"""
self.tab_record = tab_record
self.name = tab_record.code
def get_fieldname(self):
return '%s_%s' % (NAME_PREFIX, self.tab_record.id)
def get_visible_fieldname(self):
return '%s_visible' % self.get_fieldname()
def create_page(self):
tab_page = etree.Element('page')
self._set_page_attrs(tab_page)
field = etree.Element(
'field',
name=self.get_fieldname(),
context='{'
'"default_this_partner_id": id,'
'"default_tab_id": %d,'
'"active_test": False}' % self.tab_record.id)
tab_page.append(field)
tree = etree.Element('tree', editable='bottom')
field.append(tree)
# Now add fields for the editable tree view in the tab.
type_field = etree.Element(
'field',
name='type_selection_id',
widget='many2one')
type_field.set('domain', repr([('tab_id', '=', self.tab_record.id)]))
type_field.set('options', repr({'no_create': True}))
tree.append(type_field)
other_partner_field = etree.Element(
'field',
string=_('Partner'),
name='other_partner_id',
widget='many2one')
other_partner_field.set('options', repr({'no_create': True}))
tree.append(other_partner_field)
tree.append(etree.Element('field', name='date_start'))
tree.append(etree.Element('field', name='date_end'))
return tab_page
def _set_page_attrs(self, tab_page):
tab_page.set('string', self.tab_record.name)
attrs = {'invisible': [(self.get_visible_fieldname(), '=', False)]}
tab_page.set('attrs', repr(attrs))
transfer_modifiers_to_node(attrs, tab_page)
def compute_visibility(self, partner):
"""Compute visibility, dependent on partner and conditions."""
tab = self.tab_record
if tab.partner_ids:
return partner in tab.partner_ids
if tab.contact_type:
is_company_tab = tab.contact_type == 'c'
if partner.is_company != is_company_tab:
return False
if tab.partner_category_id:
if tab.partner_category_id not in partner.category_id:
return False
return True
| # Copyright 2014-2018 Therp BV <https://therp.nl>.
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
import logging
from lxml import etree
from odoo import _
from odoo.osv.orm import transfer_modifiers_to_node
_logger = logging.getLogger(__name__) # pylint: disable=invalid-name
NAME_PREFIX = 'relation_ids_tab'
class Tab(object):
"""Encapsulate the information on a tab in the database."""
def __init__(self, tab_record):
"""Create tab from tab_record.
In this version tab_record can be assumed to be a partner.relation.tab.
"""
self.tab_record = tab_record
self.name = tab_record.code
def get_fieldname(self):
return '%s_%s' % (NAME_PREFIX, self.tab_record.id)
def get_visible_fieldname(self):
return '%s_visible' % self.get_fieldname()
def create_page(self):
tab_page = etree.Element('page')
self._set_page_attrs(tab_page)
field = etree.Element(
'field',
name=self.get_fieldname(),
context='{'
'"default_this_partner_id": id,'
'"default_tab_id": %d,'
'"active_test": False}' % self.tab_record.id)
tab_page.append(field)
tree = etree.Element('tree', editable='bottom')
field.append(tree)
# Now add fields for the editable tree view in the tab.
type_field = etree.Element(
'field',
name='type_selection_id',
widget='many2one_clickable')
type_field.set('domain', repr([('tab_id', '=', self.tab_record.id)]))
type_field.set('options', repr({'no_create': True}))
tree.append(type_field)
other_partner_field = etree.Element(
'field',
string=_('Partner'),
name='other_partner_id',
widget='many2one_clickable')
other_partner_field.set('options', repr({'no_create': True}))
tree.append(other_partner_field)
tree.append(etree.Element('field', name='date_start'))
tree.append(etree.Element('field', name='date_end'))
return tab_page
def _set_page_attrs(self, tab_page):
tab_page.set('string', self.tab_record.name)
attrs = {'invisible': [(self.get_visible_fieldname(), '=', False)]}
tab_page.set('attrs', repr(attrs))
transfer_modifiers_to_node(attrs, tab_page)
def compute_visibility(self, partner):
"""Compute visibility, dependent on partner and conditions."""
tab = self.tab_record
if tab.partner_ids:
return partner in tab.partner_ids
if tab.contact_type:
is_company_tab = tab.contact_type == 'c'
if partner.is_company != is_company_tab:
return False
if tab.partner_category_id:
if tab.partner_category_id not in partner.category_id:
return False
return True
| Python | 0.000001 |
0b54c244e6e4b745a678fe69fc1be7c16850203d | Fix a mistake. | python/distutils/example_without_dependency/setup.py | python/distutils/example_without_dependency/setup.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# PyNurseryRhymesDemo
# The MIT License
#
# Copyright (c) 2010,2015 Jeremie DECOCK (http://www.jdhp.org)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from nursery_rhymes import __version__ as VERSION
from distutils.core import setup
# See : http://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = ['Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries']
PACKAGES = ['nursery_rhymes']
README_FILE = 'README.rst'
def get_long_description():
with open(README_FILE, 'r') as fd:
desc = fd.read()
return desc
setup(author='Jeremie DECOCK',
author_email='jd.jdhp@gmail.com',
maintainer='Jeremie DECOCK',
maintainer_email='jd.jdhp@gmail.com',
name='nursery_rhymes',
description='A snippet to show how to install a project with setuptools',
long_description=get_long_description(),
url='http://www.jdhp.org/',
download_url='http://www.jdhp.org/',# where the package may be downloaded
scripts = ["rowyourboat"],
classifiers=CLASSIFIERS,
#license='MIT license', # Useless if license is already in CLASSIFIERS
packages=PACKAGES,
version=VERSION)
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# PyNurseryRhymesDemo
# The MIT License
#
# Copyright (c) 2010,2015 Jeremie DECOCK (http://www.jdhp.org)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from nursery_rhymes import __version__ as VERSION
from distutils.core import setup
# See : http://pypi.python.org/pypi?%3Aaction=list_classifiers
CLASSIFIERS = ['Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX :: OS Independent',
'Programming Language :: Python :: 3',
'Topic :: Software Development :: Libraries']
PACKAGES = ['nursery_rhymes']
README_FILE = 'README.rst'
def get_long_description():
with open(README_FILE, 'r') as fd:
desc = fd.read()
return desc
setup(author='Jeremie DECOCK',
author_email='jd.jdhp@gmail.com',
maintainer='Jeremie DECOCK',
maintainer_email='jd.jdhp@gmail.com',
name='nursery_rhymes',
description='A snippet to show how to install a project with setuptools',
long_description=get_long_description(),
url='http://www.jdhp.org/',
download_url='http://www.jdhp.org/',# where the package may be downloaded
scripts = ["rowyourboat"],
classifiers=CLASSIFIERS,
#license='MIT license', # Useless if license is already in CLASSIFIERS
packages=PACKAGES,
version=VERSION)
| Python | 0.003448 |
d0a42e06baa46f1f9455fbfbe16a0ba3f16b4b61 | Fix CellView has no set_completion method | gaphor/adapters/profiles/metaclasseditor.py | gaphor/adapters/profiles/metaclasseditor.py | """
Metaclass item editors.
"""
from builtins import object
from zope import component
from gi.repository import Gtk
from zope.interface import implementer
from gaphor import UML
from gaphor.adapters.propertypages import create_hbox_label, EventWatcher
from gaphor.core import _, transactional
from gaphor.diagram import items
from gaphor.ui.interfaces import IPropertyPage
def _issubclass(c, b):
try:
return issubclass(c, b)
except TypeError:
return False
@implementer(IPropertyPage)
class MetaclassNameEditor(object):
"""
Metaclass name editor. Provides editable combo box entry with
predefined list of names of UML classes.
"""
order = 10
NAME_LABEL = _("Name")
CLASSES = list(
sorted(
n
for n in dir(UML)
if _issubclass(getattr(UML, n), UML.Element) and n != "Stereotype"
)
)
def __init__(self, item):
self.item = item
self.size_group = Gtk.SizeGroup.new(Gtk.SizeGroupMode.HORIZONTAL)
self.watcher = EventWatcher(item.subject)
def construct(self):
page = Gtk.VBox()
subject = self.item.subject
if not subject:
return page
hbox = create_hbox_label(self, page, self.NAME_LABEL)
model = Gtk.ListStore(str)
for c in self.CLASSES:
model.append([c])
cb = Gtk.ComboBox.new_with_model_and_entry(model)
completion = Gtk.EntryCompletion()
completion.set_model(model)
completion.set_minimum_key_length(1)
completion.set_text_column(0)
cb.get_child().set_completion(completion)
entry = cb.get_child()
entry.set_text(subject and subject.name or "")
hbox.pack_start(cb, True, True, 0)
page.default = entry
# monitor subject.name attribute
changed_id = entry.connect("changed", self._on_name_change)
def handler(event):
if event.element is subject and event.new_value is not None:
entry.handler_block(changed_id)
entry.set_text(event.new_value)
entry.handler_unblock(changed_id)
self.watcher.watch("name", handler).register_handlers()
entry.connect("destroy", self.watcher.unregister_handlers)
page.show_all()
return page
@transactional
def _on_name_change(self, entry):
self.item.subject.name = entry.get_text()
component.provideAdapter(
MetaclassNameEditor, adapts=[items.MetaclassItem], name="Properties"
)
# vim:sw=4:et:ai
| """
Metaclass item editors.
"""
from builtins import object
from zope import component
from gi.repository import Gtk
from zope.interface import implementer
from gaphor import UML
from gaphor.adapters.propertypages import create_hbox_label, EventWatcher
from gaphor.core import _, transactional
from gaphor.diagram import items
from gaphor.ui.interfaces import IPropertyPage
def _issubclass(c, b):
try:
return issubclass(c, b)
except TypeError:
return False
@implementer(IPropertyPage)
class MetaclassNameEditor(object):
"""
Metaclass name editor. Provides editable combo box entry with
predefined list of names of UML classes.
"""
order = 10
NAME_LABEL = _("Name")
CLASSES = list(
sorted(
n
for n in dir(UML)
if _issubclass(getattr(UML, n), UML.Element) and n != "Stereotype"
)
)
def __init__(self, item):
self.item = item
self.size_group = Gtk.SizeGroup.new(Gtk.SizeGroupMode.HORIZONTAL)
self.watcher = EventWatcher(item.subject)
def construct(self):
page = Gtk.VBox()
subject = self.item.subject
if not subject:
return page
hbox = create_hbox_label(self, page, self.NAME_LABEL)
model = Gtk.ListStore(str)
for c in self.CLASSES:
model.append([c])
cb = Gtk.ComboBox(model=model)
completion = Gtk.EntryCompletion()
completion.set_model(model)
completion.set_minimum_key_length(1)
completion.set_text_column(0)
cb.get_child().set_completion(completion)
entry = cb.get_child()
entry.set_text(subject and subject.name or "")
hbox.pack_start(cb, True, True, 0)
page.default = entry
# monitor subject.name attribute
changed_id = entry.connect("changed", self._on_name_change)
def handler(event):
if event.element is subject and event.new_value is not None:
entry.handler_block(changed_id)
entry.set_text(event.new_value)
entry.handler_unblock(changed_id)
self.watcher.watch("name", handler).register_handlers()
entry.connect("destroy", self.watcher.unregister_handlers)
page.show_all()
return page
@transactional
def _on_name_change(self, entry):
self.item.subject.name = entry.get_text()
component.provideAdapter(
MetaclassNameEditor, adapts=[items.MetaclassItem], name="Properties"
)
# vim:sw=4:et:ai
| Python | 0 |
6c11b9cc9b213928e32d883d4f557f7421da6802 | Add kamerstukken to dossier API | document/api.py | document/api.py | from rest_framework import serializers, viewsets
from document.models import Document, Kamerstuk, Dossier
class DossierSerializer(serializers.HyperlinkedModelSerializer):
documents = serializers.HyperlinkedRelatedField(read_only=True,
view_name='document-detail',
many=True)
kamerstukken = serializers.HyperlinkedRelatedField(read_only=True,
view_name='kamerstuk-detail',
many=True)
class Meta:
model = Dossier
fields = ('id', 'dossier_id', 'title', 'kamerstukken', 'documents')
class DossierViewSet(viewsets.ModelViewSet):
queryset = Dossier.objects.all()
serializer_class = DossierSerializer
class DocumentSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Document
fields = ('id', 'dossier', 'raw_type', 'raw_title', 'publisher', 'date_published', 'document_url')
class DocumentViewSet(viewsets.ModelViewSet):
queryset = Document.objects.all()
serializer_class = DocumentSerializer
class KamerstukSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Kamerstuk
fields = ('id', 'document', 'id_main', 'id_sub', 'type_short', 'type_long')
class KamerstukViewSet(viewsets.ModelViewSet):
queryset = Kamerstuk.objects.all()
serializer_class = KamerstukSerializer
| from rest_framework import serializers, viewsets
from document.models import Document, Kamerstuk, Dossier
class DossierSerializer(serializers.HyperlinkedModelSerializer):
documents = serializers.HyperlinkedRelatedField(read_only=True,
view_name='document-detail',
many=True)
class Meta:
model = Dossier
fields = ('id', 'dossier_id', 'title', 'documents')
class DossierViewSet(viewsets.ModelViewSet):
queryset = Dossier.objects.all()
serializer_class = DossierSerializer
class DocumentSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Document
fields = ('id', 'dossier', 'raw_type', 'raw_title', 'publisher', 'date_published', 'document_url')
class DocumentViewSet(viewsets.ModelViewSet):
queryset = Document.objects.all()
serializer_class = DocumentSerializer
class KamerstukSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Kamerstuk
fields = ('id', 'document', 'id_main', 'id_sub', 'type_short', 'type_long')
class KamerstukViewSet(viewsets.ModelViewSet):
queryset = Kamerstuk.objects.all()
serializer_class = KamerstukSerializer
| Python | 0 |
51e35e88597d2c34905222cd04a46a2a840c0d92 | Refactor Poly ABC | dimod/core/polysampler.py | dimod/core/polysampler.py | # Copyright 2019 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ============================================================================
import abc
from six import add_metaclass
from dimod.core.composite import Composite
from dimod.higherorder.polynomial import BinaryPolynomial
__all__ = 'PolySampler', 'ComposedPolySampler'
@add_metaclass(abc.ABCMeta)
class PolySampler:
"""Sampler supports binary polynomials.
Binary polynomials are an extension of binary quadratic models that allow
higher-order interactions.
"""
@abc.abstractmethod
def sample_poly(self, polynomial, **kwargs):
"""Sample from a higher-order polynomial."""
pass
def sample_hising(self, h, J, **kwargs):
return self.sample_poly(BinaryPolynomial.from_hising(h, J), **kwargs)
def sample_hubo(self, H, **kwargs):
return self.sample_poly(BinaryPolynomial.from_hubo(H), **kwargs)
class ComposedPolySampler(PolySampler, Composite):
pass
| # Copyright 2019 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ============================================================================
import abc
from six import add_metaclass
__all__ = 'PolySampler',
@add_metaclass(abc.ABCMeta)
class PolySampler:
"""Sampler/Composite supports binary polynomials.
Binary polynomials are an extension of binary quadratic models that allow
higher-order interactions.
"""
@abc.abstractmethod
def sample_poly(self, polynomial, **kwargs):
"""Sample from a higher-order polynomial."""
pass
| Python | 0 |
81cd0b74e611532f8421d0dfb22266cd789a5a6a | add in oauth_keys to dev (bug 858813) | solitude/settings/sites/dev/db.py | solitude/settings/sites/dev/db.py | """private_base will be populated from puppet and placed in this directory"""
import logging
import dj_database_url
import private_base as private
from solitude.settings import base
from django_sha2 import get_password_hashers
ADMINS = ()
ALLOWED_HOSTS = ['payments-dev.allizom.org', 'localhost']
DATABASES = {}
DATABASES['default'] = dj_database_url.parse(private.DATABASES_DEFAULT_URL)
DATABASES['default']['ENGINE'] = 'django.db.backends.mysql'
DATABASES['default']['OPTIONS'] = {'init_command': 'SET storage_engine=InnoDB'}
DEBUG = False
DEBUG_PROPAGATE_EXCEPTIONS = False
HMAC_KEYS = private.HMAC_KEYS
PASSWORD_HASHERS = get_password_hashers(base.BASE_PASSWORD_HASHERS, HMAC_KEYS)
LOG_LEVEL = logging.DEBUG
SECRET_KEY = private.SECRET_KEY
SENTRY_DSN = private.SENTRY_DSN
STATSD_HOST = private.STATSD_HOST
STATSD_PORT = private.STATSD_PORT
STATSD_PREFIX = private.STATSD_PREFIX
SYSLOG_TAG = 'http_app_payments_dev'
TEMPLATE_DEBUG = DEBUG
# Solitude specific settings.
AES_KEYS = private.AES_KEYS
CLEANSED_SETTINGS_ACCESS = True
CLIENT_OAUTH_KEYS = private.CLIENT_OAUTH_KEYS
PAYPAL_PROXY = private.PAYPAL_PROXY
PAYPAL_URL_WHITELIST = ('https://marketplace-dev.allizom.org',)
BANGO_PROXY = private.BANGO_PROXY
SITE_URL = 'https://payments-dev.allizom.org'
| """private_base will be populated from puppet and placed in this directory"""
import logging
import dj_database_url
import private_base as private
from solitude.settings import base
from django_sha2 import get_password_hashers
ADMINS = ()
ALLOWED_HOSTS = ['payments-dev.allizom.org', 'localhost']
DATABASES = {}
DATABASES['default'] = dj_database_url.parse(private.DATABASES_DEFAULT_URL)
DATABASES['default']['ENGINE'] = 'django.db.backends.mysql'
DATABASES['default']['OPTIONS'] = {'init_command': 'SET storage_engine=InnoDB'}
DEBUG = False
DEBUG_PROPAGATE_EXCEPTIONS = False
HMAC_KEYS = private.HMAC_KEYS
PASSWORD_HASHERS = get_password_hashers(base.BASE_PASSWORD_HASHERS, HMAC_KEYS)
LOG_LEVEL = logging.DEBUG
SECRET_KEY = private.SECRET_KEY
SENTRY_DSN = private.SENTRY_DSN
STATSD_HOST = private.STATSD_HOST
STATSD_PORT = private.STATSD_PORT
STATSD_PREFIX = private.STATSD_PREFIX
SYSLOG_TAG = 'http_app_payments_dev'
TEMPLATE_DEBUG = DEBUG
# Solitude specific settings.
AES_KEYS = private.AES_KEYS
CLEANSED_SETTINGS_ACCESS = True
CLIENT_JWT_KEYS = private.CLIENT_JWT_KEYS
PAYPAL_PROXY = private.PAYPAL_PROXY
PAYPAL_URL_WHITELIST = ('https://marketplace-dev.allizom.org',)
BANGO_PROXY = private.BANGO_PROXY
SITE_URL = 'https://payments-dev.allizom.org'
| Python | 0 |
6e3ad1b462a95a5d51ab7b56e475b334fed32260 | add "--style=symbol_index" for outputting symbox index | shotglass/app/management/commands/show.py | shotglass/app/management/commands/show.py | import collections
import os
import sys
from django.core.management.base import BaseCommand
from django.db.models import Avg, Max, Sum
from app.models import SourceFile, Symbol
def show_file_index(projects):
FORMAT = '{name:20} {path:50} {num_lines:>5}'
print FORMAT.format(name='NAME', path='PATH', num_lines="LINES")
for project in projects:
# pylint: disable=no-member
files = SourceFile.objects.filter(
project=project).order_by('path')
for file_ in files:
print FORMAT.format(**vars(file_))
def show_dir_index(projects):
"""
for each directory, output total number of source lines
"""
FORMAT = '{dir_path:50} {num_lines:>5}'
for project in projects:
data = SourceFile.objects.filter(
project=project).values_list('path', 'num_lines')
count = collections.Counter()
for path,num_lines in data:
count[os.path.dirname(path)] = num_lines
for dir_path,num_lines in sorted(count.iteritems()):
print FORMAT.format(**locals())
def show_symbol_index(projects):
FORMAT = '{0:30} {1}:{2}'
def fun_symbol(sym):
return sym.name[0] != '_'
for project in projects:
# pylint: disable=no-member
proj_symbols = Symbol.objects.filter(
source_file__project=project)
fun_symbols = proj_symbols.exclude(label__startswith='_').order_by('label')
for symbol in fun_symbols:
print FORMAT.format(symbol.label, symbol.source_file.path, symbol.line_number)
def show_summary(projects):
HEADER = '{:30} {:>9} {:>6} {:>7} {:>10} {:>9}'.format(
'project', 'files', 'avglen', 'maxlen', 'total', 'symbols')
FORMAT = (
'{project:30}'
' {num_files:9,}'
' {avg_length:6,}'
' {max_length:7,}'
' {total_length:10,}'
' {num_symbols:9,}')
print HEADER
for project in projects:
proj_qs = SourceFile.objects.filter(project=project)
num_files = proj_qs.count()
avg_length = int(proj_qs.aggregate(Avg('num_lines')).values()[0])
max_length = proj_qs.aggregate(Max('num_lines')).values()[0]
total_length = proj_qs.aggregate(Sum('num_lines')).values()[0]
proj_symbols = Symbol.objects.filter(source_file__project=project)
num_symbols = proj_symbols.count()
print FORMAT.format(**locals())
class Command(BaseCommand):
help = 'beer'
def add_arguments(self, parser):
parser.add_argument('projects', nargs='*')
parser.add_argument('--style', default='summary')
# if 0: # XX V1
# parser.add_argument('--index', action="store_true")
# else:
# parser.add_argument('--dirindex',
# action="store_true",
# help="show source lines per directory")
# parser.add_argument('--index', default=True)
def handle(self, *args, **options):
all_projects = SourceFile.projects()
projects = options['projects']
if not projects:
print('PROJECTS: {}'.format(', '.join(all_projects)))
print('or "all"')
return
if projects == ['all']:
projects = all_projects
try:
style_fname = 'show_{}'.format(options['style'])
infofunc = globals()[style_fname]
except KeyError:
sys.exit("{}: unknown style".format(options['style']))
infofunc(projects)
# if options['dirindex']:
# show_dir_index(projects)
# elif options['index']:
# show_file_index(projects)
# else:
# show_summary(projects)
| import collections
import os
import sys
from django.core.management.base import BaseCommand
from django.db.models import Avg, Max, Sum
from app.models import SourceFile, Symbol
def show_file_index(projects):
FORMAT = '{name:20} {path:50} {num_lines:>5}'
print FORMAT.format(name='NAME', path='PATH', num_lines="LINES")
for project in projects:
# pylint: disable=no-member
files = SourceFile.objects.filter(
project=project).order_by('path')
for file_ in files:
print FORMAT.format(**vars(file_))
def show_dir_index(projects):
"""
for each directory, output total number of source lines
"""
FORMAT = '{dir_path:50} {num_lines:>5}'
for project in projects:
data = SourceFile.objects.filter(
project=project).values_list('path', 'num_lines')
count = collections.Counter()
for path,num_lines in data:
count[os.path.dirname(path)] = num_lines
for dir_path,num_lines in sorted(count.iteritems()):
print FORMAT.format(**locals())
# XX V1
def show_symbol_index(projects):
FORMAT = '{name:30} {path}:{line_number}'
def fun_symbol(sym):
return sym.name[0] != '_'
for project in projects:
# pylint: disable=no-member
symbols = SourceLine.objects.filter(
project=project).order_by('name')
for symbol in filter(fun_symbol, symbols):
print FORMAT.format(**symbol.__dict__)
def show_summary(projects):
HEADER = '{:30} {:>9} {:>6} {:>7} {:>10} {:>9}'.format(
'project', 'files', 'avglen', 'maxlen', 'total', 'symbols')
FORMAT = (
'{project:30}'
' {num_files:9,}'
' {avg_length:6,}'
' {max_length:7,}'
' {total_length:10,}'
' {num_symbols:9,}')
print HEADER
for project in projects:
proj_qs = SourceFile.objects.filter(project=project)
num_files = proj_qs.count()
avg_length = int(proj_qs.aggregate(Avg('num_lines')).values()[0])
max_length = proj_qs.aggregate(Max('num_lines')).values()[0]
total_length = proj_qs.aggregate(Sum('num_lines')).values()[0]
proj_symbols = Symbol.objects.filter(source_file__project=project)
num_symbols = proj_symbols.count()
print FORMAT.format(**locals())
class Command(BaseCommand):
help = 'beer'
def add_arguments(self, parser):
parser.add_argument('projects', nargs='*')
parser.add_argument('--style', default='summary')
# if 0: # XX V1
# parser.add_argument('--index', action="store_true")
# else:
# parser.add_argument('--dirindex',
# action="store_true",
# help="show source lines per directory")
# parser.add_argument('--index', default=True)
def handle(self, *args, **options):
all_projects = SourceFile.projects()
projects = options['projects']
if not projects:
print('PROJECTS: {}'.format(', '.join(all_projects)))
print('or "all"')
return
if projects == ['all']:
projects = all_projects
try:
style_fname = 'show_{}'.format(options['style'])
infofunc = globals()[style_fname]
except KeyError:
sys.exit("{}: unknown style".format(options['style']))
infofunc(projects)
# if options['dirindex']:
# show_dir_index(projects)
# elif options['index']:
# show_file_index(projects)
# else:
# show_summary(projects)
| Python | 0.000487 |
701bb76a49ec88ba1352ca85e75c43e5f204ab73 | remove commented code commited by mistake. | depsolver/solver/core.py | depsolver/solver/core.py | import six
from depsolver.errors \
import \
DepSolverError
from depsolver.compat \
import \
OrderedDict
from depsolver.bundled.traitlets \
import \
HasTraits, Instance
from depsolver.solver.decisions \
import \
DecisionsSet
from depsolver.operations \
import \
Install, Remove, Update
from depsolver.pool \
import \
Pool
from depsolver.repository \
import \
Repository
from depsolver.request \
import \
Request
from depsolver.solver.policy \
import \
DefaultPolicy
from depsolver.solver.rules_generator \
import \
RulesGenerator
# FIXME: the php model for this class is pretty broken as many attributes are
# initialized outside the ctor. Fix this.
class Solver(HasTraits):
policy = Instance(DefaultPolicy)
pool = Instance(Pool)
installed_repository = Instance(Repository)
def __init__(self, pool, installed_repository, **kw):
policy = DefaultPolicy()
super(Solver, self).__init__(self, policy=policy, pool=pool,
installed_repository=installed_repository, **kw)
def solve(self, request):
decision, rules = self._prepare_solver(request)
self._make_assertion_rules_decisions(decisions, rules)
return decisions
def _setup_install_map(self, jobs):
installed_map = OrderedDict()
for package in self.installed_repository.iter_packages():
installed_map[package.id] = package
for job in jobs:
if job.job_type == "update":
raise NotImplementedError()
elif job.job_type == "upgrade":
raise NotImplementedError()
elif job.job_type == "install":
if len(job.packages) < 1:
raise NotImplementedError()
return installed_map
def _prepare_solver(self, request):
installed_map = self._setup_install_map(request.jobs)
decisions = DecisionsSet(self.pool)
watch_graph = RulesWatchGraph()
rules_generator = RulesGenerator(self.pool, request, installed_map)
rules = list(rules_generator.iter_rules())
return decisions, rules
def _make_assertion_rules_decisions(self, decisions, rules):
decision_start = len(decisions) - 1
rule_index = 0
while rule_index < len(rules):
rule = rules[rule_index]
rule_index += 1
if not rule.is_assertion or not rule.enabled:
continue
literals = rule.literals
literal = literals[0]
if not decisions.is_decided(abs(literal)):
decisions.decide(literal, 1, rule)
continue;
if decisions.satisfy(literal):
continue
if rule.rule_type == "learnt":
rule.enable = False
continue
raise NotImplementedError()
| import six
from depsolver.errors \
import \
DepSolverError
from depsolver.compat \
import \
OrderedDict
from depsolver.bundled.traitlets \
import \
HasTraits, Instance
from depsolver.solver.decisions \
import \
DecisionsSet
from depsolver.operations \
import \
Install, Remove, Update
from depsolver.pool \
import \
Pool
from depsolver.repository \
import \
Repository
from depsolver.request \
import \
Request
from depsolver.solver.policy \
import \
DefaultPolicy
from depsolver.solver.rules_generator \
import \
RulesGenerator
# FIXME: the php model for this class is pretty broken as many attributes are
# initialized outside the ctor. Fix this.
class Solver(HasTraits):
policy = Instance(DefaultPolicy)
pool = Instance(Pool)
installed_repository = Instance(Repository)
def __init__(self, pool, installed_repository, **kw):
policy = DefaultPolicy()
super(Solver, self).__init__(self, policy=policy, pool=pool,
installed_repository=installed_repository, **kw)
def solve(self, request):
decision, rules = self._prepare_solver(request)
self._make_assertion_rules_decisions(decisions, rules)
return decisions
def _setup_install_map(self, jobs):
installed_map = OrderedDict()
for package in self.installed_repository.iter_packages():
installed_map[package.id] = package
for job in jobs:
if job.job_type == "update":
raise NotImplementedError()
elif job.job_type == "upgrade":
raise NotImplementedError()
elif job.job_type == "install":
if len(job.packages) < 1:
raise NotImplementedError()
return installed_map
def _prepare_solver(self, request):
installed_map = self._setup_install_map(request.jobs)
decisions = DecisionsSet(self.pool)
watch_graph = RulesWatchGraph()
rules_generator = RulesGenerator(self.pool, request, installed_map)
rules = list(rules_generator.iter_rules())
return decisions, rules
def _make_assertion_rules_decisions(self, decisions, rules):
decision_start = len(decisions) - 1
rule_index = 0
while rule_index < len(rules):
rule = rules[rule_index]
rule_index += 1
if not rule.is_assertion or not rule.enabled:
#print "\trule {} is an assertion or disabled".format(rule)
continue
literals = rule.literals
literal = literals[0]
#print "\tlooking at literal {}".format(literal)
if not decisions.is_decided(abs(literal)):
decisions.decide(literal, 1, rule)
continue;
if decisions.satisfy(literal):
continue
if rule.rule_type == "learnt":
rule.enable = False
continue
raise NotImplementedError()
| Python | 0 |
b068180ef61b3e865bc0eb325e7722ddffa72bce | Add OTP_VERIFY_URL default val | droll/settings.py | droll/settings.py | """
Django settings for droll project.
Generated by 'django-admin startproject' using Django 1.8.13.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
import os
import sys
import random
import string
import dj_database_url
from . import utils
env = utils.Env()
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.get('SECRET_KEY') or ''.join(random.choice(
''.join([string.ascii_letters,
string.digits,
string.punctuation])) for _ in range(50))
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.get_bool('DEBUG')
TESTING = 'test' in sys.argv
PRODUCTION = not DEBUG and not TESTING
if PRODUCTION:
MAILTO = env.get('MAILTO')
if MAILTO:
ADMINS = (('Admin', MAILTO), )
DEFAULT_FROM_EMAIL = env.get('DEFAULT_FROM_EMAIL') or 'webmaster@localhost'
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'access',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
if DEBUG and not TESTING:
INSTALLED_APPS += (
'django_extensions',
)
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
ROOT_URLCONF = 'droll.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'droll.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': dj_database_url.config()
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = env.get('LANGUAGE_CODE') or 'en-us'
TIME_ZONE = env.get('TIME_ZONE') or 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'access.User'
OTP_VERIFY_URL = '/access/otp/'
OTP_SESSION_FLAG_NAME = 'otp_verified'
| """
Django settings for droll project.
Generated by 'django-admin startproject' using Django 1.8.13.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
import os
import sys
import random
import string
import dj_database_url
from . import utils
env = utils.Env()
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = env.get('SECRET_KEY') or ''.join(random.choice(
''.join([string.ascii_letters,
string.digits,
string.punctuation])) for _ in range(50))
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = env.get_bool('DEBUG')
TESTING = 'test' in sys.argv
PRODUCTION = not DEBUG and not TESTING
if PRODUCTION:
MAILTO = env.get('MAILTO')
if MAILTO:
ADMINS = (('Admin', MAILTO), )
DEFAULT_FROM_EMAIL = env.get('DEFAULT_FROM_EMAIL') or 'webmaster@localhost'
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'access',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
if DEBUG and not TESTING:
INSTALLED_APPS += (
'django_extensions',
)
MIDDLEWARE_CLASSES += (
'debug_toolbar.middleware.DebugToolbarMiddleware',
)
ROOT_URLCONF = 'droll.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'droll.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': dj_database_url.config()
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = env.get('LANGUAGE_CODE') or 'en-us'
TIME_ZONE = env.get('TIME_ZONE') or 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'access.User'
OTP_SESSION_FLAG_NAME = 'otp_verified'
| Python | 0.000005 |
3422c60553c6cd1a746e7c6e39a3e2ac707b0cf7 | return only mean predictions for cost and omit variance | robo/acquisition/EnvEntropySearch.py | robo/acquisition/EnvEntropySearch.py | '''
Created on Jun 8, 2015
@author: Aaron Klein
'''
import emcee
import numpy as np
from robo.acquisition.LogEI import LogEI
from robo.acquisition.EntropyMC import EntropyMC
from scipy import stats
class EnvEntropySearch(EntropyMC):
'''
classdocs
'''
def __init__(self, model, cost_model, X_lower, X_upper, compute_incumbent, is_env_variable, n_representer=10, n_hals_vals=100, n_func_samples=100, **kwargs):
self.cost_model = cost_model
self.n_dims = X_lower.shape[0]
self.is_env_variable = is_env_variable
super(EnvEntropySearch, self).__init__(model, X_lower, X_upper, compute_incumbent, Nb=n_representer, Nf=n_func_samples, Np=n_hals_vals)
def update(self, model, cost_model):
self.cost_model = cost_model
super(EnvEntropySearch, self).update(model)
def __call__(self, X, derivative=False):
# Predict the costs for this configuration
cost = self.cost_model.predict(X)[0]
# Compute fantasized pmin
new_pmin = self.change_pmin_by_innovation(X, self.f)
# Compute acquisition value
H_old = np.sum(np.multiply(self.pmin, (self.logP + self.lmb)))
H_new = np.sum(np.multiply(new_pmin, (np.log(new_pmin) + self.lmb)))
loss = np.array([[-H_new + H_old]])
acquisition_value = loss / cost
return acquisition_value
def update_representer_points(self):
#TODO: We might want to start the sampling of the representer points from the incumbent here? Or maybe from a sobel grid?
super(EnvEntropySearch, self).update_representer_points()
# Project representer points to subspace
self.zb[:, self.is_env_variable == 1] = self.X_upper[self.is_env_variable == 1]
| '''
Created on Jun 8, 2015
@author: Aaron Klein
'''
import emcee
import numpy as np
from robo.acquisition.LogEI import LogEI
from robo.acquisition.EntropyMC import EntropyMC
from scipy import stats
class EnvEntropySearch(EntropyMC):
'''
classdocs
'''
def __init__(self, model, cost_model, X_lower, X_upper, compute_incumbent, is_env_variable, n_representer=10, n_hals_vals=100, n_func_samples=100, **kwargs):
self.cost_model = cost_model
self.n_dims = X_lower.shape[0]
self.is_env_variable = is_env_variable
super(EnvEntropySearch, self).__init__(model, X_lower, X_upper, compute_incumbent, Nb=n_representer, Nf=n_func_samples, Np=n_hals_vals)
def update(self, model, cost_model):
self.cost_model = cost_model
super(EnvEntropySearch, self).update(model)
def compute(self, X, derivative=False):
# Predict the costs for this configuration
cost = self.cost_model.predict(X)
# Compute fantasized pmin
new_pmin = self.change_pmin_by_innovation(X, self.f)
# Compute acquisition value
H_old = np.sum(np.multiply(self.pmin, (self.logP + self.lmb)))
H_new = np.sum(np.multiply(new_pmin, (np.log(new_pmin) + self.lmb)))
loss = np.array([[-H_new + H_old]])
acquisition_value = loss / cost
return acquisition_value
def update_representer_points(self):
#TODO: We might want to start the sampling of the representer points from the incumbent here? Or maybe from a sobel grid?
super(EnvEntropySearch, self).update_representer_points()
# Project representer points to subspace
self.zb[:, self.is_env_variable == 1] = self.X_upper[self.is_env_variable == 1]
| Python | 0.000001 |
2d8c8cce8885b24ac1766912ee7bd1897900ae0c | fix up Comment model | dwitter/models.py | dwitter/models.py | from django.db import models
from django.contrib.auth import get_user_model
from django.contrib.auth.models import User
from django.dispatch import receiver
from django.db.models.signals import pre_delete
def get_sentinel_user():
users = get_user_model().objects
return users.get_or_create(username='[deleted]', is_active=False)[0]
@receiver(pre_delete, sender=User)
def soft_delete_user_dweets(instance, **kwargs):
for dweet in Dweet.objects.filter(_author=instance):
dweet.delete()
class NotDeletedDweetManager(models.Manager):
def get_queryset(self):
base_queryset = super(NotDeletedDweetManager, self).get_queryset()
return base_queryset.filter(deleted=False)
class Dweet(models.Model):
code = models.TextField()
posted = models.DateTimeField()
reply_to = models.ForeignKey("self", on_delete=models.DO_NOTHING,
null=True, blank=True)
likes = models.ManyToManyField(User, related_name="liked")
hotness = models.FloatField(default=1.0)
deleted = models.BooleanField(default=False)
_author = models.ForeignKey(User, on_delete=models.SET_NULL,
null=True, blank=True)
@property
def author(self):
return self._author or get_sentinel_user()
@author.setter
def author(self, value):
self._author = value
objects = NotDeletedDweetManager()
with_deleted = models.Manager()
def delete(self):
self.deleted = True
self.save()
def __unicode__(self):
return 'd/' + str(self.id) + ' (' + self.author.username + ')'
class Meta:
ordering = ('-posted',)
class Comment(models.Model):
text = models.TextField()
posted = models.DateTimeField()
reply_to = models.ForeignKey(Dweet, on_delete=models.CASCADE,
related_name="comments")
_author = models.ForeignKey(User, on_delete=models.CASCADE)
@property
def author(self):
return self._author
@author.setter
def author(self, value):
self._author = value
def __unicode__(self):
return ('c/' +
str(self.id) +
' (' +
self.author.username +
') to ' +
str(self.reply_to))
class Meta:
ordering = ('-posted',)
| from django.db import models
from django.contrib.auth import get_user_model
from django.contrib.auth.models import User
from django.dispatch import receiver
from django.db.models.signals import pre_delete
def get_sentinel_user():
users = get_user_model().objects
return users.get_or_create(username='[deleted]', is_active=False)[0]
@receiver(pre_delete, sender=User)
def soft_delete_user_dweets(instance, **kwargs):
for dweet in Dweet.objects.filter(_author=instance):
dweet.delete()
class NotDeletedDweetManager(models.Manager):
def get_queryset(self):
base_queryset = super(NotDeletedDweetManager, self).get_queryset()
return base_queryset.filter(deleted=False)
class Dweet(models.Model):
code = models.TextField()
posted = models.DateTimeField()
reply_to = models.ForeignKey("self", on_delete=models.DO_NOTHING,
null=True, blank=True)
likes = models.ManyToManyField(User, related_name="liked")
hotness = models.FloatField(default=1.0)
deleted = models.BooleanField(default=False)
_author = models.ForeignKey(User, on_delete=models.SET_NULL,
null=True, blank=True)
@property
def author(self):
return self._author or get_sentinel_user()
@author.setter
def author(self, value):
self._author = value
objects = NotDeletedDweetManager()
with_deleted = models.Manager()
def delete(self):
self.deleted = True
self.save()
def __unicode__(self):
return 'd/' + str(self.id) + ' (' + self.author.username + ')'
class Meta:
ordering = ('-posted',)
class Comment(models.Model):
text = models.TextField()
posted = models.DateTimeField()
reply_to = models.ForeignKey(Dweet, on_delete=models.CASCADE,
related_name="comments")
author = models.ForeignKey(User, on_delete=models.CASCADE)
def __unicode__(self):
return ('c/' +
str(self.id) +
' (' +
self.author.username +
') to ' +
str(self.reply_to))
class Meta:
ordering = ('-posted',)
| Python | 0.000001 |
3c87476c4d0861638ff7c3d6950377c75d3057dd | read true positions | streams/io/core.py | streams/io/core.py | # coding: utf-8
""" Code for helping to select stars from the nearby Sgr wraps. """
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import os, sys
import gc
# Third-party
import h5py
import numpy as np
import numexpr
import astropy.units as u
from astropy.io import ascii
from astropy.table import vstack, Table, Column
import astropy.coordinates as coord
# Project
from ..coordinates.frame import heliocentric, galactocentric
from ..dynamics import Particle, ObservedParticle, Orbit
__all__ = ["read_table", "read_hdf5"]
def read_table(filename, expr=None, N=None):
_table = np.genfromtxt(filename, names=True)
if expr is not None:
idx = numexpr.evaluate(str(expr), _table)
_table = _table[idx]
if N is not None and N > 0:
np.random.shuffle(_table)
_table = _table[:min(N,len(_table))]
return _table
def read_hdf5(h5file):
""" Read particles and satellite from a given HDF5 file. """
return_dict = dict()
with h5py.File(h5file, "r") as f:
try:
ptcl = f["particles"]
satl = f["satellite"]
except KeyError:
raise ValueError("Invalid HDF5 file. Missing 'particles' or "
"'satellite' group.")
if "error" in ptcl.keys():
p = ObservedParticle(ptcl["data"].value.T, ptcl["error"].value.T,
frame=heliocentric,
units=[u.Unit(x) for x in ptcl["units"]])
p.tub = ptcl["tub"].value
return_dict["true_particles"] = Particle(ptcl["true_data"].value.T,
frame=heliocentric,
units=[u.Unit(x) for x in ptcl["units"]])
else:
p = Particle(ptcl["data"].value.T,
frame=heliocentric,
units=[u.Unit(x) for x in ptcl["units"]])
p.tub = ptcl["tub"].value
return_dict["particles"] = p
if "error" in satl.keys():
s = ObservedParticle(satl["data"].value.T, satl["error"].value.T,
frame=heliocentric,
units=[u.Unit(x) for x in satl["units"]])
return_dict["true_satellite"] = Particle(satl["true_data"].value.T,
frame=heliocentric,
units=[u.Unit(x) for x in satl["units"]])
else:
s = Particle(satl["data"].value.T,
frame=heliocentric,
units=[u.Unit(x) for x in satl["units"]])
s.m = satl["m"].value
s.v_disp = satl["v_disp"].value
return_dict["satellite"] = s
if "simulation" in f.keys():
return_dict["t1"] = float(f["simulation"]["t1"].value)
return_dict["t2"] = float(f["simulation"]["t2"].value)
return return_dict | # coding: utf-8
""" Code for helping to select stars from the nearby Sgr wraps. """
from __future__ import division, print_function
__author__ = "adrn <adrn@astro.columbia.edu>"
# Standard library
import os, sys
import gc
# Third-party
import h5py
import numpy as np
import numexpr
import astropy.units as u
from astropy.io import ascii
from astropy.table import vstack, Table, Column
import astropy.coordinates as coord
# Project
from ..coordinates.frame import heliocentric, galactocentric
from ..dynamics import Particle, ObservedParticle, Orbit
__all__ = ["read_table", "read_hdf5"]
def read_table(filename, expr=None, N=None):
_table = np.genfromtxt(filename, names=True)
if expr is not None:
idx = numexpr.evaluate(str(expr), _table)
_table = _table[idx]
if N is not None and N > 0:
np.random.shuffle(_table)
_table = _table[:min(N,len(_table))]
return _table
def read_hdf5(h5file):
""" Read particles and satellite from a given HDF5 file. """
return_dict = dict()
with h5py.File(h5file, "r") as f:
try:
ptcl = f["particles"]
satl = f["satellite"]
except KeyError:
raise ValueError("Invalid HDF5 file. Missing 'particles' or "
"'satellite' group.")
if "error" in ptcl.keys():
p = ObservedParticle(ptcl["data"].value.T, ptcl["error"].value.T,
frame=heliocentric,
units=[u.Unit(x) for x in ptcl["units"]])
p.tub = ptcl["tub"].value
else:
p = Particle(ptcl["data"].value.T,
frame=heliocentric,
units=[u.Unit(x) for x in ptcl["units"]])
p.tub = ptcl["tub"].value
return_dict["particles"] = p
if "error" in satl.keys():
s = ObservedParticle(satl["data"].value.T, satl["error"].value.T,
frame=heliocentric,
units=[u.Unit(x) for x in satl["units"]])
else:
s = Particle(satl["data"].value.T,
frame=heliocentric,
units=[u.Unit(x) for x in satl["units"]])
s.m = satl["m"].value
s.v_disp = satl["v_disp"].value
return_dict["satellite"] = s
if "simulation" in f.keys():
return_dict["t1"] = float(f["simulation"]["t1"].value)
return_dict["t2"] = float(f["simulation"]["t2"].value)
return return_dict | Python | 0.000938 |
c98662bf577afa1dcf1b847193dd2e856a90e864 | Fix flopped windows comment | examples/app-two-programs.py | examples/app-two-programs.py | # -----------------------------------------------------------------------------
# Copyright (c) 2009-2016 Nicolas P. Rougier. All rights reserved.
# Distributed under the (new) BSD License.
# -----------------------------------------------------------------------------
import numpy as np
from glumpy import gl, app, gloo
vertex = """
attribute vec2 a_position;
void main() {
gl_Position = vec4(a_position, 0.0, 1.0);
gl_PointSize = 30.0;
}
"""
fragment1 = """
void main() {
gl_FragColor = vec4(0.0, 0.0, 1.0, 1.0);
}
"""
fragment2 = """
void main() {
gl_FragColor = vec4(1.0, 0.0, 0.0, 1.0);
}
"""
program1 = gloo.Program(vertex, fragment1) # blue on the right
program1['a_position'] = np.zeros((1,2),dtype=np.float32) + 0.5
program2 = gloo.Program(vertex, fragment2) # red on the left
program2['a_position'] = np.zeros((1,2),dtype=np.float32) - 0.5
window = app.Window()
@window.event
def on_draw(dt):
window.clear()
program1.draw(gl.GL_POINTS)
program2.draw(gl.GL_POINTS)
app.run()
| # -----------------------------------------------------------------------------
# Copyright (c) 2009-2016 Nicolas P. Rougier. All rights reserved.
# Distributed under the (new) BSD License.
# -----------------------------------------------------------------------------
import numpy as np
from glumpy import gl, app, gloo
vertex = """
attribute vec2 a_position;
void main() {
gl_Position = vec4(a_position, 0.0, 1.0);
gl_PointSize = 30.0;
}
"""
fragment1 = """
void main() {
gl_FragColor = vec4(0.0, 0.0, 1.0, 1.0);
}
"""
fragment2 = """
void main() {
gl_FragColor = vec4(1.0, 0.0, 0.0, 1.0);
}
"""
program1 = gloo.Program(vertex, fragment1) # blue on the left
program1['a_position'] = np.zeros((1,2),dtype=np.float32) + 0.5
program2 = gloo.Program(vertex, fragment2) # red on the right
program2['a_position'] = np.zeros((1,2),dtype=np.float32) - 0.5
window = app.Window()
@window.event
def on_draw(dt):
window.clear()
program1.draw(gl.GL_POINTS)
program2.draw(gl.GL_POINTS)
app.run()
| Python | 0 |
818f1431fe67120967f385ee090d06c1038e48c4 | Add project level imports so users don't have to worry about the module names. | dimensionful/__init__.py | dimensionful/__init__.py | from units import Unit
from quantity import Quantity
from common_units import *
from constants import *
| Python | 0 | |
8c37a2b8e8c80fd9ad62d707a6669ea60d7b106c | Update unit tests with new estimator class interface | sklearn/linear_model/tests/test_ransac.py | sklearn/linear_model/tests/test_ransac.py | import numpy as np
from numpy.testing import assert_equal, assert_raises
from sklearn import linear_model
np.random.seed(1)
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add some faulty data
outliers = np.array((10, 30, 200))
data[outliers[0], :] = (1000, 1000)
data[outliers[1], :] = (-1000, -1000)
data[outliers[2], :] = (-100, -50)
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
def test_ransac_inliers_outliers():
base_estimator = linear_model.LinearRegression()
ransac_estimator = linear_model.RANSAC(base_estimator, 2, 5)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_,
dtype=np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_is_data_valid():
def is_data_valid(X, y):
return False
X = np.random.rand(10, 2)
y = np.random.rand(10, 1)
base_estimator = linear_model.LinearRegression()
ransac_estimator = linear_model.RANSAC(base_estimator, 2, 5,
is_data_valid=is_data_valid)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_is_model_valid():
def is_model_valid(estimator, X, y):
return False
base_estimator = linear_model.LinearRegression()
ransac_estimator = linear_model.RANSAC(base_estimator, 2, 5,
is_model_valid=is_model_valid)
estimator = linear_model.LinearRegression()
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_max_trials():
base_estimator = linear_model.LinearRegression()
ransac_estimator = linear_model.RANSAC(base_estimator, 2, 5, max_trials=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = linear_model.RANSAC(base_estimator, 2, 5, max_trials=11)
assert ransac_estimator.n_trials_ is None
ransac_estimator.fit(X, y)
assert ransac_estimator.n_trials_ == 11
def test_ransac_stop_n_inliers():
base_estimator = linear_model.LinearRegression()
ransac_estimator = linear_model.RANSAC(base_estimator, 2, 5,
stop_n_inliers=2)
ransac_estimator.fit(X, y)
assert ransac_estimator.n_trials_ == 1
def test_ransac_stop_n_score():
base_estimator = linear_model.LinearRegression()
ransac_estimator = linear_model.RANSAC(base_estimator, 2, 5,
stop_score=0)
ransac_estimator.fit(X, y)
assert ransac_estimator.n_trials_ == 1
if __name__ == "__main__":
np.testing.run_module_suite()
| import numpy as np
from numpy.testing import assert_equal, assert_raises
from sklearn import linear_model
from sklearn.utils import ransac
np.random.seed(1)
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add some faulty data
outliers = np.array((10, 30, 200))
data[outliers[0], :] = (1000, 1000)
data[outliers[1], :] = (-1000, -1000)
data[outliers[2], :] = (-100, -50)
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
def test_ransac_inliers_outliers():
# Estimate parameters of corrupted data
_, inlier_mask = ransac(X, y, linear_model.LinearRegression(), 2, 5)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(inlier_mask, dtype=np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(inlier_mask, ref_inlier_mask)
def test_ransac_is_data_valid():
def is_data_valid(X, y):
return False
X = np.random.rand(10, 2)
y = np.random.rand(10, 1)
estimator = linear_model.LinearRegression()
assert_raises(ValueError, ransac, X, y, estimator, 2, 5,
is_data_valid=is_data_valid)
def test_ransac_is_model_valid():
def is_model_valid(estimator, X, y):
return False
estimator = linear_model.LinearRegression()
assert_raises(ValueError, ransac, X, y, estimator, 2, 5,
is_model_valid=is_model_valid)
def test_ransac_max_trials():
estimator = linear_model.LinearRegression()
assert_raises(ValueError, ransac, X, y, estimator, 2, 5, max_trials=0)
assert ransac(X, y, estimator, 2, 5, max_trials=11)[0] == 11
def test_ransac_stop_n_inliers():
estimator = linear_model.LinearRegression()
assert ransac(X, y, estimator, 2, 5, stop_n_inliers=2)[0] == 1
def test_ransac_stop_n_score():
estimator = linear_model.LinearRegression()
assert ransac(X, y, estimator, 2, 5, stop_score=0)[0] == 1
if __name__ == "__main__":
np.testing.run_module_suite()
| Python | 0 |
c713aa4953063cb6e64ecaaaed464f2a441482bb | Add testing scaffolding. | lilkv/testsuite/test_basic.py | lilkv/testsuite/test_basic.py | # -*- coding: utf-8 -*-
"""
lilkv.testsuite.basic
Test lilkv basic functionality.
"""
from lilkv.testsuite import LilKVTestCase
from lilkv.keyspace import Keyspace
from lilkv.columnfamily import ColumnFamily
from lilkv.column import Column
class BasicTests(LilKVTestCase):
"""Baseclass for testing out the application.
"""
def test_keyspace_creation(self):
ks = Keyspace("Test Keyspace")
self.assert_in("Test Keyspace", Keyspace.KEYSPACES)
def test_columnfamily_creation(self):
ks = Keyspace("Test Keyspace")
ks.create_columnfamily("daily_visitors")
self.assert_in("daily_visitors", ks.columnfamilies)
def test_adding_data(self):
pass
def test_reading_data(self):
pass
def test_deleting_data(self):
pass
| # -*- coding: utf-8 -*-
"""
lilkv.testsuite.basic
Test lilkv basic functionality.
"""
from lilkv.testsuite import LilKVTestCase
from lilkv.keyspace import Keyspace
from lilkv.columnfamily import ColumnFamily
from lilkv.column import Column
class BasicTests(LilKVTestCase):
"""Baseclass for testing out the application.
"""
def test_keyspace_creation(self):
ks = Keyspace("Test Keyspace")
self.assert_in("Test Keyspace", Keyspace.KEYSPACES)
def test_columnfamily_creation(self):
ks = Keyspace("Test Keyspace")
ks.create_columnfamily("daily_visitors")
self.assert_in("daily_visitors", ks.columnfamilies)
| Python | 0 |
573d1e2498467da357a79bb865683e162e16eb14 | increment version to 0.13.1 | gym/version.py | gym/version.py | VERSION = '0.13.1'
| VERSION = '0.13.0'
| Python | 0.000683 |
d83e8f8702755766c1c15a35297b40d25051d55e | Bump version | gym/version.py | gym/version.py | VERSION = '0.4.9'
| VERSION = '0.4.8'
| Python | 0 |
3f6d5aa5ee18462ff3a9a0e366ea48f508c93a58 | move to target | character.py | character.py | import pyglet # noqa
from pyglet.gl import * # noqa
from utility import load_image, mainbatches, window_width, window_height, calc_vel_xy # noqa
from collide import * # noqa
import random
import math
# from controller import Controller
def mean(inp):
return sum(inp) / float(len(inp))
green_sprite = pyglet.image.SolidColorImagePattern(color=(30, 255, 30, 255))
blue_sprite = pyglet.image.SolidColorImagePattern(color=(30, 30, 255, 255))
class Character(object):
def __init__(self, assets):
self.assets = assets
self.sprites = [pyglet.sprite.Sprite(pyglet.image.create(10, 10, green_sprite),
500, 500, batch=mainbatches[2])
]
for i in range(9):
self.make_sprite()
self.target = None
# self.collision = SpriteCollision(self.sprite)
# self.controller = Controller(self)
def update(self):
if not self.target:
self.target = (random.randint(50, window_width - 50), random.randint(50, window_height - 50))
print self.target
print self.sprite_mean()
print "###"
i = random.choice(self.sprites)
ret = calc_vel_xy(self.target[0], self.target[1], i.x, i.y, 3)
i.x += ret[0]
i.y += ret[1]
self.random_move()
m = self.sprite_mean()
for i in self.sprites:
if math.hypot(m[0] - i.x, m[1] - i.y) > 10:
ret = calc_vel_xy(m[0], m[1], i.x, i.y, 3)
i.x += ret[0]
i.y += ret[1]
def cleanup(self):
try:
self.sprite.delete()
except:
pass
try:
self.assets.modules['characters'].remove(self)
except:
pass
try:
del self
except:
pass
def random_move(self):
v = random.choice(self.sprites)
v.x += random.randint(-5, 5)
v.y += random.randint(-5, 5)
def sprite_mean(self):
x = mean([m.x for m in self.sprites])
y = mean([m.x for m in self.sprites])
return (x, y)
def make_sprite(self):
m = self.sprite_mean()
self.sprites.append(
pyglet.sprite.Sprite(pyglet.image.create(10, 10, green_sprite),
m[0], m[1], batch=mainbatches[2]) # noqa
)
| import pyglet # noqa
from pyglet.gl import * # noqa
from utility import load_image, mainbatches, window_width, window_height, calc_vel_xy # noqa
from collide import * # noqa
import random
import math
# from controller import Controller
def mean(inp):
return sum(inp) / float(len(inp))
green_sprite = pyglet.image.SolidColorImagePattern(color=(30, 255, 30, 255))
blue_sprite = pyglet.image.SolidColorImagePattern(color=(30, 30, 255, 255))
class Character(object):
def __init__(self, assets):
self.assets = assets
self.sprites = [pyglet.sprite.Sprite(pyglet.image.create(10, 10, green_sprite),
500, 500, batch=mainbatches[2])
]
for i in range(9):
self.make_sprite()
# self.collision = SpriteCollision(self.sprite)
# self.controller = Controller(self)
def update(self):
self.random_move()
m = self.sprite_mean()
for i in self.sprites:
if math.hypot(m[0] - i.x, m[1] - i.y) > 10:
ret = calc_vel_xy(m[0], m[1], i.x, i.y, 3)
i.x += ret[0]
i.y += ret[1]
def cleanup(self):
try:
self.sprite.delete()
except:
pass
try:
self.assets.modules['characters'].remove(self)
except:
pass
try:
del self
except:
pass
def random_move(self):
v = random.choice(self.sprites)
v.x += random.randint(-5, 5)
v.y += random.randint(-5, 5)
def sprite_mean(self):
x = mean([m.x for m in self.sprites])
y = mean([m.x for m in self.sprites])
return (x, y)
def make_sprite(self):
m = self.sprite_mean()
self.sprites.append(
pyglet.sprite.Sprite(pyglet.image.create(10, 10, green_sprite),
m[0], m[1], batch=mainbatches[2]) # noqa
)
| Python | 0 |
bb9aafe090d71c2a25eb3f3a6d591a205dbb7e5e | bump to 0.9.5 | dvc/__init__.py | dvc/__init__.py | """
DVC
----
Make your data science projects reproducible and shareable.
"""
import os
VERSION = '0.9.5'
if os.getenv('APPVEYOR_REPO_TAG', '').lower() != 'true' and os.getenv('TRAVIS_TAG', '') == '':
# Dynamically update version
try:
import git
repo = git.Repo(os.curdir, search_parent_directories=True)
sha = repo.head.object.hexsha
short_sha = repo.git.rev_parse(sha, short=6)
dirty = '.mod' if repo.is_dirty() else ''
VERSION = '{}+{}{}'.format(VERSION, short_sha, dirty)
except:
pass
__version__ = VERSION
| """
DVC
----
Make your data science projects reproducible and shareable.
"""
import os
VERSION = '0.9.4'
if os.getenv('APPVEYOR_REPO_TAG', '').lower() != 'true' and os.getenv('TRAVIS_TAG', '') == '':
# Dynamically update version
try:
import git
repo = git.Repo(os.curdir, search_parent_directories=True)
sha = repo.head.object.hexsha
short_sha = repo.git.rev_parse(sha, short=6)
dirty = '.mod' if repo.is_dirty() else ''
VERSION = '{}+{}{}'.format(VERSION, short_sha, dirty)
except:
pass
__version__ = VERSION
| Python | 0 |
3aac2716972c49eb3b1b688cb1fad89ce690ca58 | fix incorrect empty list condition | filter_log.py | filter_log.py | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2017 ciel <ciel@cieldeMBP>
#
# Distributed under terms of the MIT license.
"""
filter git log
"""
import codecs
from call_cmd import call
import config
def filter_log(last_commit):
commit_valid = call('git -C {} cat-file -e '.format(config.config_dic['project_path']) + last_commit)[0]
if commit_valid != 0:
return '无'
git_logs_cmd = '''git -C {} log --pretty=\"%s\" {}..HEAD'''.format(config.config_dic['project_path'], last_commit)
logs = call(git_logs_cmd)
log_has_prefix = []
prefix = config.config_dic['filter_log']['prefix']
if not prefix:
prefix = '['
for line in logs[1].split("\n"):
if line.startswith(prefix):
log_has_prefix.append(line)
if not log_has_prefix:
return '无'
log_file = '{}log.txt'.format(config.config_dic['builds_path'])
with codecs.open(log_file, 'w', 'UTF-8') as f:
for line in log_has_prefix:
f.write('{}\n'.format(line))
with codecs.open(log_file, 'r+', 'UTF-8') as f:
flip_cmd = "sed '1!G;h;$!d' " + log_file
res = call(flip_cmd)
f.write(res[1])
with codecs.open(log_file, 'r+', 'UTF-8') as f:
add_num_cmd = """awk '{printf NR"."" "}1' """ + log_file
res = call(add_num_cmd)
f.write(res[1])
with codecs.open(log_file, 'r', 'UTF-8') as f:
return f.read()
def msg_with_intall_info(last_commit, build):
build_info = config.config_dic['build'][build]
log = filter_log(last_commit)
msg = '更新日志:' + '\n\n' + log + '\n\n' + '安装地址:' + build_info['download_url']
return msg | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © 2017 ciel <ciel@cieldeMBP>
#
# Distributed under terms of the MIT license.
"""
filter git log
"""
import codecs
from call_cmd import call
import config
def filter_log(last_commit):
commit_valid = call('git -C {} cat-file -e '.format(config.config_dic['project_path']) + last_commit)[0]
if commit_valid != 0:
return '无'
git_logs_cmd = '''git -C {} log --pretty=\"%s\" {}..HEAD'''.format(config.config_dic['project_path'], last_commit)
logs = call(git_logs_cmd)
log_has_prefix = []
prefix = config.config_dic['filter_log']['prefix']
if not prefix:
prefix = '['
for line in logs[1].split("\n"):
if line.startswith(prefix):
log_has_prefix.append(line)
if log_has_prefix.count:
return '无'
log_file = '{}log.txt'.format(config.config_dic['builds_path'])
with codecs.open(log_file, 'w', 'UTF-8') as f:
for line in log_has_prefix:
f.write('{}\n'.format(line))
with codecs.open(log_file, 'r+', 'UTF-8') as f:
flip_cmd = "sed '1!G;h;$!d' " + log_file
res = call(flip_cmd)
f.write(res[1])
with codecs.open(log_file, 'r+', 'UTF-8') as f:
add_num_cmd = """awk '{printf NR"."" "}1' """ + log_file
res = call(add_num_cmd)
f.write(res[1])
with codecs.open(log_file, 'r', 'UTF-8') as f:
return f.read()
def msg_with_intall_info(last_commit, build):
build_info = config.config_dic['build'][build]
log = filter_log(last_commit)
msg = '更新日志:' + '\n\n' + log + '\n\n' + '安装地址:' + build_info['download_url']
return msg | Python | 0.999117 |
ee795da3215374f30005c9daa42de6f9d581580f | Make finglonger output a bit easier to read | finglonger.py | finglonger.py | #!/usr/bin/python
import os
import sys
import subprocess
import tempfile
import yaml
def validate_config(config):
environment = config.get('environment')
if environment is None:
print "No environment set, set one in config.yaml "
sys.exit(1)
def validate_environment(config):
if os.path.isfile("envs/" + config['environment'] + "/tasks.yaml"):
pass
else:
print "Tasks file not found, are you in the right directory?"
sys.exit(1)
def git_cmd(command):
p = subprocess.Popen(command.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
return out, err
def process_task(task):
print "Finglongering..."
print task['name']
temp, temp_name = tempfile.mkstemp()
print temp_name
f = os.fdopen(temp, 'w')
f.write(task['shell'])
f.close()
os.chmod(temp_name, 0755)
p = subprocess.Popen(["/bin/bash", temp_name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
print out
print err
if __name__ == "__main__":
config_file = os.environ['HOME'] + "/.config/finglonger/config.yaml"
if os.path.isfile(config_file):
with open(config_file) as f:
config = yaml.load(f.read())
else:
print "Config file not found: {0}".format(config_file)
sys.exit(1)
validate_config(config)
validate_environment(config)
git_cmd('git checkout master')
with open("envs/" + config['environment'] + "/tasks.yaml") as f:
master_tasks = yaml.load(f.read())
git_cmd('git checkout done')
with open("envs/" + config['environment'] + "/tasks.yaml") as f:
done_tasks = yaml.load(f.read())
git_cmd('git checkout master')
print "Tasks on master", len(master_tasks)
print "Tasks on done", len(done_tasks)
print "Tasks to do", len(master_tasks) - len(done_tasks)
for i in done_tasks:
master_tasks.remove(i)
for task in master_tasks:
process_task(task['task'])
git_cmd('git checkout done')
git_cmd('git merge master')
git_cmd('git push origin done')
git_cmd('git checkout master')
| #!/usr/bin/python
import os
import sys
import subprocess
import tempfile
import yaml
def validate_config(config):
environment = config.get('environment')
if environment is None:
print "No environment set, set one in config.yaml "
sys.exit(1)
def validate_environment(config):
if os.path.isfile("envs/" + config['environment'] + "/tasks.yaml"):
pass
else:
print "Tasks file not found, are you in the right directory?"
sys.exit(1)
def git_cmd(command):
p = subprocess.Popen(command.split(' '),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
return out, err
def process_task(task):
print "Finglongering..."
print task['name']
temp, temp_name = tempfile.mkstemp()
print temp_name
f = os.fdopen(temp, 'w')
f.write(task['shell'])
f.close()
os.chmod(temp_name, 0755)
p = subprocess.Popen(["/bin/bash", temp_name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
print out
print err
if __name__ == "__main__":
config_file = os.environ['HOME'] + "/.config/finglonger/config.yaml"
if os.path.isfile(config_file):
with open(config_file) as f:
config = yaml.load(f.read())
else:
print "Config file not found: {0}".format(config_file)
sys.exit(1)
validate_config(config)
validate_environment(config)
git_cmd('git checkout master')
with open("envs/" + config['environment'] + "/tasks.yaml") as f:
master_tasks = yaml.load(f.read())
git_cmd('git checkout done')
with open("envs/" + config['environment'] + "/tasks.yaml") as f:
done_tasks = yaml.load(f.read())
git_cmd('git checkout master')
print len(master_tasks)
print len(done_tasks)
for i in done_tasks:
master_tasks.remove(i)
for task in master_tasks:
process_task(task['task'])
git_cmd('git checkout done')
git_cmd('git merge master')
git_cmd('git push origin done')
git_cmd('git checkout master')
| Python | 0.000005 |
cb4f022fb1fe0780eb2e37c8fdc8ff6a4409115c | Test and implementation for !ptr+offset loading | ostester/yamlreader.py | ostester/yamlreader.py | import collections.abc
from io import StringIO
import yaml
import ast
def parse(file):
return yaml.safe_load(file)
def parse_from_string(string):
return parse(StringIO(string))
class Zeros(collections.abc.Sequence):
"""
Represents a zeroed region of memory in C
>>> yaml.load("!zeros 5")
Zeros(5)
>>> yaml.dump(Zeros(3))
"!zeros '3'\\n"
>>> list(Zeros(7))
[0, 0, 0, 0, 0, 0, 0]
>>> Zeros(3)[-3]
0
>>> Zeros(3)[-2]
0
>>> Zeros(4)[1:3]
[0, 0]
"""
yaml_tag='!zeros'
def __init__(self, len):
self.len = len
@staticmethod
def from_yaml_loader(loader, node):
return Zeros(int(node.value))
@staticmethod
def yaml_representer(dumper, data):
return dumper.represent_scalar(Zeros.yaml_tag, str(data.len))
def __getitem__(self, key):
if isinstance(key, slice):
return [0 for key in range(*key.indices(self.len))]
elif key > self.len-1 or key < -self.len:
raise IndexError('Zeros index out of range')
return 0
def __len__(self):
return self.len
def __repr__(self):
return 'Zeros({})'.format(repr(self.len))
yaml.add_representer(Zeros, Zeros.yaml_representer)
yaml.add_constructor(Zeros.yaml_tag, Zeros.from_yaml_loader)
class Pointer():
"""
Represents a pointer into an array.
>>> yaml.load('!ptr value')
Pointer('value')
>>> yaml.load('!ptr array+3')
Pointer('array', offset=3)
>>> yaml.dump(Pointer("value"))
"!ptr 'value'\\n"
"""
yaml_tag = '!ptr'
def __init__(self, data, offset=0):
self.data = data
self.offset = int(offset)
@staticmethod
def from_yaml_loader(loader, node):
args = map(str.strip, node.value.split('+'))
return Pointer(*args)
@staticmethod
def yaml_representer(dumper, data):
return dumper.represent_scalar(Pointer.yaml_tag, data.data)
def __repr__(self):
if not self.offset:
format_str = 'Pointer({})'
else:
format_str = 'Pointer({}, offset={})'
return format_str.format(repr(self.data), self.offset)
yaml.add_representer(Pointer, Pointer.yaml_representer)
yaml.add_constructor(Pointer.yaml_tag, Pointer.from_yaml_loader)
def transform(yaml):
pass
| import collections.abc
from io import StringIO
import yaml
import ast
def parse(file):
return yaml.safe_load(file)
def parse_from_string(string):
return parse(StringIO(string))
class Zeros(collections.abc.Sequence):
"""
Represents a zeroed region of memory in C
>>> yaml.load("!zeros 5")
Zeros(5)
>>> yaml.dump(Zeros(3))
"!zeros '3'\\n"
>>> list(Zeros(7))
[0, 0, 0, 0, 0, 0, 0]
>>> Zeros(3)[-3]
0
>>> Zeros(3)[-2]
0
>>> Zeros(4)[1:3]
[0, 0]
"""
yaml_tag='!zeros'
def __init__(self, len):
self.len = len
@staticmethod
def from_yaml_loader(loader, node):
return Zeros(int(node.value))
@staticmethod
def yaml_representer(dumper, data):
return dumper.represent_scalar(Zeros.yaml_tag, str(data.len))
def __getitem__(self, key):
if isinstance(key, slice):
return [0 for key in range(*key.indices(self.len))]
elif key > self.len-1 or key < -self.len:
raise IndexError('Zeros index out of range')
return 0
def __len__(self):
return self.len
def __repr__(self):
return 'Zeros({})'.format(repr(self.len))
yaml.add_representer(Zeros, Zeros.yaml_representer)
yaml.add_constructor(Zeros.yaml_tag, Zeros.from_yaml_loader)
class Pointer():
"""
Represents a pointer into an array.
>>> yaml.load('!ptr value')
Pointer('value')
>>> yaml.dump(Pointer("value"))
"!ptr 'value'\\n"
"""
yaml_tag = '!ptr'
def __init__(self, data, offset=0):
self.data = data
self.offset = offset
@staticmethod
def from_yaml_loader(loader, node):
return Pointer(node.value)
@staticmethod
def yaml_representer(dumper, data):
return dumper.represent_scalar(Pointer.yaml_tag, data.data)
def __repr__(self):
return 'Pointer({})'.format(repr(self.data))
yaml.add_representer(Pointer, Pointer.yaml_representer)
yaml.add_constructor(Pointer.yaml_tag, Pointer.from_yaml_loader)
def transform(yaml):
pass
| Python | 0 |
fe9e6e0335716333c7cbdf6d11a737125551dc5f | Use get_terminal_size directly from shutil on newer Python version. | halo/_utils.py | halo/_utils.py | # -*- coding: utf-8 -*-
"""Utilities for Halo library.
"""
import codecs
import platform
import six
from sys import version_info
if version_info >= (3, 3):
from shutil import get_terminal_size
else:
from backports.shutil_get_terminal_size import get_terminal_size
from colorama import init
from termcolor import colored
init(autoreset=True)
def is_supported():
"""Check whether operating system supports main symbols or not.
Returns
-------
boolean
Whether operating system supports main symbols or not
"""
os_arch = platform.system()
if os_arch != 'Windows':
return True
return False
def get_environment():
"""Get the environment in which halo is running
Returns
-------
str
Environment name
"""
try:
from IPython import get_ipython
except ImportError:
return 'terminal'
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell': # Jupyter notebook or qtconsole
return 'jupyter'
elif shell == 'TerminalInteractiveShell': # Terminal running IPython
return 'ipython'
else:
return 'terminal' # Other type (?)
except NameError:
return 'terminal'
def colored_frame(frame, color):
"""Color the frame with given color and returns.
Parameters
----------
frame : str
Frame to be colored
color : str
Color to be applied
Returns
-------
str
Colored frame
"""
return colored(frame, color, attrs=['bold'])
def is_text_type(text):
"""Check if given parameter is a string or not
Parameters
----------
text : *
Parameter to be checked for text type
Returns
-------
bool
Whether parameter is a string or not
"""
if isinstance(text, six.text_type) or isinstance(text, six.string_types):
return True
return False
def decode_utf_8_text(text):
"""Decode the text from utf-8 format
Parameters
----------
text : str
String to be decoded
Returns
-------
str
Decoded string
"""
try:
return codecs.decode(text, 'utf-8')
except:
return text
def get_terminal_columns():
"""Determine the amount of available columns in the terminal
Returns
-------
int
Terminal width
"""
terminal_size = get_terminal_size()
# If column size is 0 either we are not connected
# to a terminal or something else went wrong. Fallback to 80.
if terminal_size.columns == 0:
return 80
else:
return terminal_size.columns
| # -*- coding: utf-8 -*-
"""Utilities for Halo library.
"""
import codecs
import platform
import six
from backports.shutil_get_terminal_size import get_terminal_size
from colorama import init
from termcolor import colored
init(autoreset=True)
def is_supported():
"""Check whether operating system supports main symbols or not.
Returns
-------
boolean
Whether operating system supports main symbols or not
"""
os_arch = platform.system()
if os_arch != 'Windows':
return True
return False
def get_environment():
"""Get the environment in which halo is running
Returns
-------
str
Environment name
"""
try:
from IPython import get_ipython
except ImportError:
return 'terminal'
try:
shell = get_ipython().__class__.__name__
if shell == 'ZMQInteractiveShell': # Jupyter notebook or qtconsole
return 'jupyter'
elif shell == 'TerminalInteractiveShell': # Terminal running IPython
return 'ipython'
else:
return 'terminal' # Other type (?)
except NameError:
return 'terminal'
def colored_frame(frame, color):
"""Color the frame with given color and returns.
Parameters
----------
frame : str
Frame to be colored
color : str
Color to be applied
Returns
-------
str
Colored frame
"""
return colored(frame, color, attrs=['bold'])
def is_text_type(text):
"""Check if given parameter is a string or not
Parameters
----------
text : *
Parameter to be checked for text type
Returns
-------
bool
Whether parameter is a string or not
"""
if isinstance(text, six.text_type) or isinstance(text, six.string_types):
return True
return False
def decode_utf_8_text(text):
"""Decode the text from utf-8 format
Parameters
----------
text : str
String to be decoded
Returns
-------
str
Decoded string
"""
try:
return codecs.decode(text, 'utf-8')
except:
return text
def get_terminal_columns():
"""Determine the amount of available columns in the terminal
Returns
-------
int
Terminal width
"""
terminal_size = get_terminal_size()
# If column size is 0 either we are not connected
# to a terminal or something else went wrong. Fallback to 80.
if terminal_size.columns == 0:
return 80
else:
return terminal_size.columns
| Python | 0 |
3329b260fbea858dcfe3f6f6a9ff365467352d1f | optimize sum_lines for time consuming | fileprocess/filesline.py | fileprocess/filesline.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
r"""
# .---. .-----------
# / \ __ / ------
# / / \( )/ ----- (`-') _ _(`-') <-. (`-')_
# ////// '\/ ` --- ( OO).-/( (OO ).-> .-> \( OO) ) .->
# //// / // : : --- (,------. \ .'_ (`-')----. ,--./ ,--/ ,--.' ,-.
# // / / / `\/ '-- | .---' '`'-..__)( OO).-. ' | \ | | (`-')'.' /
# // //..\\ (| '--. | | ' |( _) | | | | . '| |)(OO \ /
# ============UU====UU==== | .--' | | / : \| |)| | | |\ | | / /)
# '//||\\` | `---. | '-' / ' '-' ' | | \ | `-/ /`
# ''`` `------' `------' `-----' `--' `--' `--'
# ######################################################################################
#
# Author: edony - edonyzpc@gmail.com
#
# twitter : @edonyzpc
#
# Last modified: 2017-09-15 15:33
#
# Filename: filesline.py
#
# Description: All Rights Are Reserved
#
"""
import sys
import os
from dirlist import DirList
class FilesLine(DirList):
"""generate the line number of files located in directory
"""
def __init__(self, directory):
super(FilesLine, self).__init__(directory)
self.filesline = 0
def sum_lines(self):
# TODO(edony): optimize algorithm of sum_lines method
filesname = []
for item_dir in self.dirlist.keys():
for item_file in self.dirlist[item_dir][1]:
filesname.append(item_dir + '/' + item_file)
for filename in filesname:
with open(filename, 'rb') as filebuf:
self.filesline += len(filebuf.readlines())
return self.filesline
if __name__ == "__main__":
import time
tmp = DirList('/Users/edony/coding/toolkitem')
#print(tmp.dirlist)
#print(sys.path)
#print(os.path.split(os.path.realpath(__file__)))
tmp1 = FilesLine('/Users/edony/coding/toolkitem')
print(tmp1.dirlist)
print(time.time())
tmp1.sum_lines()
print(time.time())
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
r"""
# .---. .-----------
# / \ __ / ------
# / / \( )/ ----- (`-') _ _(`-') <-. (`-')_
# ////// '\/ ` --- ( OO).-/( (OO ).-> .-> \( OO) ) .->
# //// / // : : --- (,------. \ .'_ (`-')----. ,--./ ,--/ ,--.' ,-.
# // / / / `\/ '-- | .---' '`'-..__)( OO).-. ' | \ | | (`-')'.' /
# // //..\\ (| '--. | | ' |( _) | | | | . '| |)(OO \ /
# ============UU====UU==== | .--' | | / : \| |)| | | |\ | | / /)
# '//||\\` | `---. | '-' / ' '-' ' | | \ | `-/ /`
# ''`` `------' `------' `-----' `--' `--' `--'
# ######################################################################################
#
# Author: edony - edonyzpc@gmail.com
#
# twitter : @edonyzpc
#
# Last modified: 2017-09-15 15:33
#
# Filename: filesline.py
#
# Description: All Rights Are Reserved
#
"""
import sys
import os
from dirlist import DirList
class FilesLine(DirList):
"""generate the line number of files located in directory
"""
def __init__(self, directory):
super(FilesLine, self).__init__(directory)
self.filesline = 0
def sum_lines(self):
pass
if __name__ == "__main__":
tmp = DirList('/Users/edony/coding/toolkitem')
#print(tmp.dirlist)
#print(sys.path)
#print(os.path.split(os.path.realpath(__file__)))
tmp1 = FilesLine('/Users/edony/coding/toolkitem')
print(tmp1.dirlist)
| Python | 0.012633 |
e1f1f0ca797b639a730e8804dbd5595ad0f395e0 | Add docstring for module.py | package_name/module.py | package_name/module.py | """
Module provides a simple cubic_rectification function.
"""
import numpy as np
def cubic_rectification(x):
'''
Returns the rectified value of the cube of X.
If X is positive, this is the cube of X, if X is negative it is 0.
'''
return np.maximum(0, x**3)
| import numpy as np
def cubic_rectification(x):
'''
Returns the rectified value of the cube of X.
If X is positive, this is the cube of X, if X is negative it is 0.
'''
return np.maximum(0, x**3)
| Python | 0.000001 |
d71c0745a4032ce60dd506e91665e46c4c98271f | Update forwarder_ZMQ_Server.py | ProBot_Server/Midi_Device/forwarder_ZMQ_Server.py | ProBot_Server/Midi_Device/forwarder_ZMQ_Server.py | #!/usr/bin/python
import zmq
def main():
print "\nProBot's ZMQ Server is running..."
try:
context = zmq.Context(1)
# Socket facing clients
frontend = context.socket(zmq.SUB)
frontend.bind("tcp://*:5559")
frontend.setsockopt(zmq.SUBSCRIBE, "")
# Socket facing services
backend = context.socket(zmq.PUB)
backend.bind("tcp://*:5560")
zmq.device(zmq.FORWARDER, frontend, backend)
except Exception, e:
print e
print "bringing down zmq device"
finally:
pass
frontend.close()
backend.close()
context.term()
if __name__ == "__main__":
main()
| #!/usr/bin/python
import zmq
def main():
print "\nServer for ProBot is running..."
try:
context = zmq.Context(1)
# Socket facing clients
frontend = context.socket(zmq.SUB)
frontend.bind("tcp://*:5559")
frontend.setsockopt(zmq.SUBSCRIBE, "")
# Socket facing services
backend = context.socket(zmq.PUB)
backend.bind("tcp://*:5560")
zmq.device(zmq.FORWARDER, frontend, backend)
except Exception, e:
print e
print "bringing down zmq device"
finally:
pass
frontend.close()
backend.close()
context.term()
if __name__ == "__main__":
main()
| Python | 0.000001 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.